From 00398e1d518309328e8ba7dff00881538ac22c6a Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 19:50:41 +0000 Subject: Bluetooth: Add support for BT_PKT_STATUS CMSG data for SCO connections This change adds support for reporting the BT_PKT_STATUS to the socket CMSG data to allow the implementation of a packet loss correction on erroneous data received on the SCO socket. The patch was partially developed by Marcel Holtmann and validated by Hsin-yu Chao. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- include/net/bluetooth/bluetooth.h | 10 ++++++++++ include/net/bluetooth/sco.h | 2 ++ 2 files changed, 12 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 18190055374c..7ee8041af803 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -147,6 +147,10 @@ struct bt_voice { #define BT_MODE_LE_FLOWCTL 0x03 #define BT_MODE_EXT_FLOWCTL 0x04 +#define BT_PKT_STATUS 16 + +#define BT_SCM_PKT_STATUS 0x03 + __printf(1, 2) void bt_info(const char *fmt, ...); __printf(1, 2) @@ -286,6 +290,7 @@ struct bt_sock { struct sock *parent; unsigned long flags; void (*skb_msg_name)(struct sk_buff *, void *, int *); + void (*skb_put_cmsg)(struct sk_buff *, struct msghdr *, struct sock *); }; enum { @@ -335,6 +340,10 @@ struct l2cap_ctrl { struct l2cap_chan *chan; }; +struct sco_ctrl { + u8 pkt_status; +}; + struct hci_dev; typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); @@ -361,6 +370,7 @@ struct bt_skb_cb { u8 incoming:1; union { struct l2cap_ctrl l2cap; + struct sco_ctrl sco; struct hci_ctrl hci; }; }; diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h index f40ddb4264fc..1aa2e14b6c94 100644 --- a/include/net/bluetooth/sco.h +++ b/include/net/bluetooth/sco.h @@ -46,4 +46,6 @@ struct sco_conninfo { __u8 dev_class[3]; }; +#define SCO_CMSG_PKT_STATUS 0x01 + #endif /* __SCO_H */ -- cgit v1.2.3 From 32929e1f4ad9adf71f655028e4dd5d87adb97f52 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 14:26:10 +0000 Subject: Bluetooth: Use only 8 bits for the HCI CMSG state flags This change implements suggestions from the code review of the SCO CMSG state flag patch. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_sock.h | 4 ++-- net/bluetooth/hci_sock.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h index 9352bb1bf34c..9949870f7d78 100644 --- a/include/net/bluetooth/hci_sock.h +++ b/include/net/bluetooth/hci_sock.h @@ -31,8 +31,8 @@ #define HCI_TIME_STAMP 3 /* CMSG flags */ -#define HCI_CMSG_DIR 0x0001 -#define HCI_CMSG_TSTAMP 0x0002 +#define HCI_CMSG_DIR 0x01 +#define HCI_CMSG_TSTAMP 0x02 struct sockaddr_hci { sa_family_t hci_family; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index caf38a8ea6a8..d5627967fc25 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -52,7 +52,7 @@ struct hci_pinfo { struct bt_sock bt; struct hci_dev *hdev; struct hci_filter filter; - __u32 cmsg_mask; + __u8 cmsg_mask; unsigned short channel; unsigned long flags; __u32 cookie; @@ -1399,7 +1399,7 @@ done: static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { - __u32 mask = hci_pi(sk)->cmsg_mask; + __u8 mask = hci_pi(sk)->cmsg_mask; if (mask & HCI_CMSG_DIR) { int incoming = bt_cb(skb)->incoming; -- cgit v1.2.3 From 7e90de4ac1099d3f4e26023853d4aefd0d2a1dea Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 02:01:55 +0000 Subject: Bluetooth: mgmt: read/set system parameter definitions This patch submits the corresponding kernel definitions to mgmt.h. This is submitted before the implementation to avoid any conflicts in values allocations. Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Reviewed-by: Yu Liu Signed-off-by: Marcel Holtmann --- include/net/bluetooth/mgmt.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 16e0d87bd8fa..e515288f328f 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -52,6 +52,12 @@ struct mgmt_hdr { __le16 len; } __packed; +struct mgmt_tlv { + __le16 type; + __u8 length; + __u8 value[]; +} __packed; + struct mgmt_addr_info { bdaddr_t bdaddr; __u8 type; @@ -702,6 +708,18 @@ struct mgmt_rp_set_exp_feature { __le32 flags; } __packed; +#define MGMT_OP_READ_DEF_SYSTEM_CONFIG 0x004b +#define MGMT_READ_DEF_SYSTEM_CONFIG_SIZE 0 + +#define MGMT_OP_SET_DEF_SYSTEM_CONFIG 0x004c +#define MGMT_SET_DEF_SYSTEM_CONFIG_SIZE 0 + +#define MGMT_OP_READ_DEF_RUNTIME_CONFIG 0x004d +#define MGMT_READ_DEF_RUNTIME_CONFIG_SIZE 0 + +#define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e +#define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; -- cgit v1.2.3 From 10873f99ced274cbfc119f55e7e57a0f047a0799 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 02:01:56 +0000 Subject: Bluetooth: centralize default value initialization. This patch centralized the initialization of default parameters. This is required to allow clients to more easily customize the default system parameters. Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 18 ++++++++++++++++++ net/bluetooth/hci_conn.c | 14 ++++---------- net/bluetooth/hci_core.c | 14 +++++++++++++- net/bluetooth/hci_request.c | 15 +++++---------- 4 files changed, 40 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index cdd4f1db8670..0d5dbb6cb5a0 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -295,6 +295,14 @@ struct hci_dev { __u8 le_scan_type; __u16 le_scan_interval; __u16 le_scan_window; + __u16 le_scan_int_suspend; + __u16 le_scan_window_suspend; + __u16 le_scan_int_discovery; + __u16 le_scan_window_discovery; + __u16 le_scan_int_adv_monitor; + __u16 le_scan_window_adv_monitor; + __u16 le_scan_int_connect; + __u16 le_scan_window_connect; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_latency; @@ -323,6 +331,16 @@ struct hci_dev { __u16 devid_product; __u16 devid_version; + __u8 def_page_scan_type; + __u16 def_page_scan_int; + __u16 def_page_scan_window; + __u8 def_inq_scan_type; + __u16 def_inq_scan_int; + __u16 def_inq_scan_window; + __u16 def_br_lsto; + __u16 def_page_timeout; + __u16 def_multi_adv_rotation_duration; + __u16 pkt_type; __u16 esco_type; __u16 link_policy; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 307800fd18e6..9bdffc4e79b0 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -789,11 +789,8 @@ static void set_ext_conn_params(struct hci_conn *conn, memset(p, 0, sizeof(*p)); - /* Set window to be the same value as the interval to - * enable continuous scanning. - */ - p->scan_interval = cpu_to_le16(hdev->le_scan_interval); - p->scan_window = p->scan_interval; + p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); p->conn_latency = cpu_to_le16(conn->le_conn_latency); @@ -875,11 +872,8 @@ static void hci_req_add_le_create_conn(struct hci_request *req, memset(&cp, 0, sizeof(cp)); - /* Set window to be the same value as the interval to enable - * continuous scanning. - */ - cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); - cp.scan_window = cp.scan_interval; + cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); bacpy(&cp.peer_addr, &conn->dst); cp.peer_addr_type = conn->dst_type; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 00458a8c26f8..4f1052a7c488 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2982,7 +2982,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, adv_instance->remaining_time = timeout; if (duration == 0) - adv_instance->duration = HCI_DEFAULT_ADV_DURATION; + adv_instance->duration = hdev->def_multi_adv_rotation_duration; else adv_instance->duration = duration; @@ -3400,6 +3400,12 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_adv_max_interval = 0x0800; hdev->le_scan_interval = 0x0060; hdev->le_scan_window = 0x0030; + hdev->le_scan_int_suspend = 0x0400; + hdev->le_scan_window_suspend = 0x0012; + hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; + hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; + hdev->le_scan_int_connect = 0x0060; + hdev->le_scan_window_connect = 0x0060; hdev->le_conn_min_interval = 0x0018; hdev->le_conn_max_interval = 0x0028; hdev->le_conn_latency = 0x0000; @@ -3415,6 +3421,7 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; + hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; @@ -3423,6 +3430,11 @@ struct hci_dev *hci_alloc_dev(void) hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; + /* default 1.28 sec page scan */ + hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; + hdev->def_page_scan_int = 0x0800; + hdev->def_page_scan_window = 0x0012; + mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 1acf5b8e0910..a7f572ad38ef 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -34,9 +34,6 @@ #define HCI_REQ_PEND 1 #define HCI_REQ_CANCELED 2 -#define LE_SUSPEND_SCAN_WINDOW 0x0012 -#define LE_SUSPEND_SCAN_INTERVAL 0x0400 - void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); @@ -366,13 +363,11 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) /* 160 msec page scan interval */ acp.interval = cpu_to_le16(0x0100); } else { - type = PAGE_SCAN_TYPE_STANDARD; /* default */ - - /* default 1.28 sec page scan */ - acp.interval = cpu_to_le16(0x0800); + type = hdev->def_page_scan_type; + acp.interval = cpu_to_le16(hdev->def_page_scan_int); } - acp.window = cpu_to_le16(0x0012); + acp.window = cpu_to_le16(hdev->def_page_scan_window); if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || __cpu_to_le16(hdev->page_scan_window) != acp.window) @@ -927,8 +922,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req) filter_policy |= 0x02; if (hdev->suspended) { - window = LE_SUSPEND_SCAN_WINDOW; - interval = LE_SUSPEND_SCAN_INTERVAL; + window = hdev->le_scan_window_suspend; + interval = hdev->le_scan_int_suspend; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; -- cgit v1.2.3 From d1492bbd470280c687e6677a355f923435bdb3ac Mon Sep 17 00:00:00 2001 From: Jishnu Prakash Date: Thu, 28 May 2020 22:24:24 +0530 Subject: iio: adc: Add PMIC7 ADC bindings Add documentation for PMIC7 ADC peripheral. For the PMIC7-type PMICs, ADC peripheral is present in HW for the following PMICs: PMK8350, PM8350, PM8350b, PMR735a and PMR735b. Of these, only the ADC peripheral on PMK8350 is exposed directly to SW. If SW needs to communicate with ADCs on other PMICs, it specifies the PMIC to PMK8350 through the newly added SID register and communication between PMK8350 ADC and other PMIC ADCs is carried out through PBS(Programmable Boot Sequence) at the firmware level. In addition, add definitions for ADC channels and virtual channel definitions (combination of ADC channel number and PMIC SID number) per PMIC, to be used by ADC clients for PMIC7. Signed-off-by: Jishnu Prakash Reviewed-by: Amit Kucheria Reviewed-by: Rob Herring Signed-off-by: Jonathan Cameron --- .../bindings/iio/adc/qcom,spmi-vadc.yaml | 38 ++++++++-- include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h | 67 ++++++++++++++++ include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h | 88 ++++++++++++++++++++++ include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h | 46 +++++++++++ include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h | 28 +++++++ include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h | 28 +++++++ include/dt-bindings/iio/qcom,spmi-vadc.h | 78 ++++++++++++++++++- 7 files changed, 366 insertions(+), 7 deletions(-) create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h create mode 100644 include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml index de8d243f01c2..e6263b617941 100644 --- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml @@ -13,7 +13,7 @@ maintainers: description: | SPMI PMIC voltage ADC (VADC) provides interface to clients to read voltage. The VADC is a 15-bit sigma-delta ADC. - SPMI PMIC5 voltage ADC (ADC) provides interface to clients to read + SPMI PMIC5/PMIC7 voltage ADC (ADC) provides interface to clients to read voltage. The VADC is a 16-bit sigma-delta ADC. properties: @@ -28,6 +28,7 @@ properties: - qcom,spmi-vadc - qcom,spmi-adc5 - qcom,spmi-adc-rev2 + - qcom,spmi-adc7 reg: description: VADC base address in the SPMI PMIC register map @@ -70,6 +71,8 @@ patternProperties: description: | ADC channel number. See include/dt-bindings/iio/qcom,spmi-vadc.h + For PMIC7 ADC, the channel numbers are specified separately per PMIC + in the PMIC-specific files in include/dt-bindings/iio/. label: $ref: /schemas/types.yaml#/definitions/string @@ -113,11 +116,11 @@ patternProperties: channel calibration. If property is not found, channel will be calibrated with 0.625V and 1.25V reference channels, also known as absolute calibration. - - For compatible property "qcom,spmi-adc5" and "qcom,spmi-adc-rev2", - if this property is specified VADC will use the VDD reference (1.875V) - and GND for channel calibration. If property is not found, channel - will be calibrated with 0V and 1.25V reference channels, also known - as absolute calibration. + - For compatible property "qcom,spmi-adc5", "qcom,spmi-adc7" and + "qcom,spmi-adc-rev2", if this property is specified VADC will use + the VDD reference (1.875V) and GND for channel calibration. If + property is not found, channel will be calibrated with 0V and 1.25V + reference channels, also known as absolute calibration. type: boolean qcom,hw-settle-time: @@ -208,6 +211,29 @@ allOf: enum: [ 1, 2, 4, 8, 16 ] default: 1 + - if: + properties: + compatible: + contains: + const: qcom,spmi-adc7 + + then: + patternProperties: + "^.*@[0-9a-f]+$": + properties: + qcom,decimation: + enum: [ 85, 340, 1360 ] + default: 1360 + + qcom,hw-settle-time: + enum: [ 15, 100, 200, 300, 400, 500, 600, 700, 1000, 2000, 4000, + 8000, 16000, 32000, 64000, 128000 ] + default: 15 + + qcom,avg-samples: + enum: [ 1, 2, 4, 8, 16 ] + default: 1 + examples: - | spmi_bus { diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h new file mode 100644 index 000000000000..9426f27a1946 --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H + +#ifndef PM8350_SID +#define PM8350_SID 1 +#endif + +/* ADC channels for PM8350_ADC for PMIC7 */ +#define PM8350_ADC7_REF_GND (PM8350_SID << 8 | 0x0) +#define PM8350_ADC7_1P25VREF (PM8350_SID << 8 | 0x01) +#define PM8350_ADC7_VREF_VADC (PM8350_SID << 8 | 0x02) +#define PM8350_ADC7_DIE_TEMP (PM8350_SID << 8 | 0x03) + +#define PM8350_ADC7_AMUX_THM1 (PM8350_SID << 8 | 0x04) +#define PM8350_ADC7_AMUX_THM2 (PM8350_SID << 8 | 0x05) +#define PM8350_ADC7_AMUX_THM3 (PM8350_SID << 8 | 0x06) +#define PM8350_ADC7_AMUX_THM4 (PM8350_SID << 8 | 0x07) +#define PM8350_ADC7_AMUX_THM5 (PM8350_SID << 8 | 0x08) +#define PM8350_ADC7_GPIO1 (PM8350_SID << 8 | 0x0a) +#define PM8350_ADC7_GPIO2 (PM8350_SID << 8 | 0x0b) +#define PM8350_ADC7_GPIO3 (PM8350_SID << 8 | 0x0c) +#define PM8350_ADC7_GPIO4 (PM8350_SID << 8 | 0x0d) + +/* 30k pull-up1 */ +#define PM8350_ADC7_AMUX_THM1_30K_PU (PM8350_SID << 8 | 0x24) +#define PM8350_ADC7_AMUX_THM2_30K_PU (PM8350_SID << 8 | 0x25) +#define PM8350_ADC7_AMUX_THM3_30K_PU (PM8350_SID << 8 | 0x26) +#define PM8350_ADC7_AMUX_THM4_30K_PU (PM8350_SID << 8 | 0x27) +#define PM8350_ADC7_AMUX_THM5_30K_PU (PM8350_SID << 8 | 0x28) +#define PM8350_ADC7_GPIO1_30K_PU (PM8350_SID << 8 | 0x2a) +#define PM8350_ADC7_GPIO2_30K_PU (PM8350_SID << 8 | 0x2b) +#define PM8350_ADC7_GPIO3_30K_PU (PM8350_SID << 8 | 0x2c) +#define PM8350_ADC7_GPIO4_30K_PU (PM8350_SID << 8 | 0x2d) + +/* 100k pull-up2 */ +#define PM8350_ADC7_AMUX_THM1_100K_PU (PM8350_SID << 8 | 0x44) +#define PM8350_ADC7_AMUX_THM2_100K_PU (PM8350_SID << 8 | 0x45) +#define PM8350_ADC7_AMUX_THM3_100K_PU (PM8350_SID << 8 | 0x46) +#define PM8350_ADC7_AMUX_THM4_100K_PU (PM8350_SID << 8 | 0x47) +#define PM8350_ADC7_AMUX_THM5_100K_PU (PM8350_SID << 8 | 0x48) +#define PM8350_ADC7_GPIO1_100K_PU (PM8350_SID << 8 | 0x4a) +#define PM8350_ADC7_GPIO2_100K_PU (PM8350_SID << 8 | 0x4b) +#define PM8350_ADC7_GPIO3_100K_PU (PM8350_SID << 8 | 0x4c) +#define PM8350_ADC7_GPIO4_100K_PU (PM8350_SID << 8 | 0x4d) + +/* 400k pull-up3 */ +#define PM8350_ADC7_AMUX_THM1_400K_PU (PM8350_SID << 8 | 0x64) +#define PM8350_ADC7_AMUX_THM2_400K_PU (PM8350_SID << 8 | 0x65) +#define PM8350_ADC7_AMUX_THM3_400K_PU (PM8350_SID << 8 | 0x66) +#define PM8350_ADC7_AMUX_THM4_400K_PU (PM8350_SID << 8 | 0x67) +#define PM8350_ADC7_AMUX_THM5_400K_PU (PM8350_SID << 8 | 0x68) +#define PM8350_ADC7_GPIO1_400K_PU (PM8350_SID << 8 | 0x6a) +#define PM8350_ADC7_GPIO2_400K_PU (PM8350_SID << 8 | 0x6b) +#define PM8350_ADC7_GPIO3_400K_PU (PM8350_SID << 8 | 0x6c) +#define PM8350_ADC7_GPIO4_400K_PU (PM8350_SID << 8 | 0x6d) + +/* 1/3 Divider */ +#define PM8350_ADC7_GPIO4_DIV3 (PM8350_SID << 8 | 0x8d) + +#define PM8350_ADC7_VPH_PWR (PM8350_SID << 8 | 0x8e) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h new file mode 100644 index 000000000000..dc2497c27e16 --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H + +#ifndef PM8350B_SID +#define PM8350B_SID 3 +#endif + +/* ADC channels for PM8350B_ADC for PMIC7 */ +#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | 0x0) +#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | 0x01) +#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | 0x02) +#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | 0x03) + +#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | 0x04) +#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | 0x05) +#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | 0x06) +#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | 0x07) +#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | 0x08) +#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | 0x09) +#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | 0x0a) +#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | 0x0b) +#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | 0x0c) +#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | 0x0d) + +#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | 0x10) +#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | 0x11) +#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | 0x12) +#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | 0x13) +#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | 0x15) +#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | 0x17) + +/* 30k pull-up1 */ +#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | 0x24) +#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | 0x25) +#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | 0x26) +#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | 0x27) +#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | 0x28) +#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | 0x29) +#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | 0x2a) +#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | 0x2b) +#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | 0x2c) +#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | 0x2d) +#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | 0x33) + +/* 100k pull-up2 */ +#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | 0x44) +#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | 0x45) +#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | 0x46) +#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | 0x47) +#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | 0x48) +#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | 0x49) +#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | 0x4a) +#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | 0x4b) +#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | 0x4c) +#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | 0x4d) +#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | 0x53) + +/* 400k pull-up3 */ +#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | 0x64) +#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | 0x65) +#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | 0x66) +#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | 0x67) +#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | 0x68) +#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | 0x69) +#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | 0x6a) +#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | 0x6b) +#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | 0x6c) +#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | 0x6d) +#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | 0x73) + +/* 1/3 Divider */ +#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | 0x8a) +#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | 0x8b) +#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | 0x8c) +#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | 0x8d) + +#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | 0x8e) +#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | 0x8f) + +#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | 0x94) +#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | 0x96) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h new file mode 100644 index 000000000000..6c296870e95b --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H + +#ifndef PMK8350_SID +#define PMK8350_SID 0 +#endif + +/* ADC channels for PMK8350_ADC for PMIC7 */ +#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | 0x0) +#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | 0x01) +#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | 0x02) +#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | 0x03) + +#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | 0x04) +#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | 0x05) +#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | 0x06) +#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | 0x07) +#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | 0x08) + +/* 30k pull-up1 */ +#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | 0x24) +#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | 0x25) +#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | 0x26) +#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | 0x27) +#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | 0x28) + +/* 100k pull-up2 */ +#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | 0x44) +#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | 0x45) +#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | 0x46) +#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | 0x47) +#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | 0x48) + +/* 400k pull-up3 */ +#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | 0x64) +#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | 0x65) +#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | 0x66) +#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | 0x67) +#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | 0x68) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h new file mode 100644 index 000000000000..d6df1b19e5ff --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H + +#ifndef PMR735A_SID +#define PMR735A_SID 4 +#endif + +/* ADC channels for PMR735A_ADC for PMIC7 */ +#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | 0x0) +#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | 0x01) +#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | 0x02) +#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | 0x03) + +#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | 0x0a) +#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | 0x0b) +#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | 0x0c) + +/* 100k pull-up2 */ +#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | 0x4a) +#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | 0x4b) +#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | 0x4c) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h new file mode 100644 index 000000000000..8da0e7dab315 --- /dev/null +++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H +#define _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H + +#ifndef PMR735B_SID +#define PMR735B_SID 5 +#endif + +/* ADC channels for PMR735B_ADC for PMIC7 */ +#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | 0x0) +#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | 0x01) +#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | 0x02) +#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | 0x03) + +#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | 0x0a) +#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | 0x0b) +#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | 0x0c) + +/* 100k pull-up2 */ +#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | 0x4a) +#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | 0x4b) +#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | 0x4c) + +#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H */ diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h index 61d556db1542..08adfe25964c 100644 --- a/include/dt-bindings/iio/qcom,spmi-vadc.h +++ b/include/dt-bindings/iio/qcom,spmi-vadc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2014,2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2014,2018,2020 The Linux Foundation. All rights reserved. */ #ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H @@ -221,4 +221,80 @@ #define ADC5_MAX_CHANNEL 0xc0 +/* ADC channels for ADC for PMIC7 */ + +#define ADC7_REF_GND 0x00 +#define ADC7_1P25VREF 0x01 +#define ADC7_VREF_VADC 0x02 +#define ADC7_DIE_TEMP 0x03 + +#define ADC7_AMUX_THM1 0x04 +#define ADC7_AMUX_THM2 0x05 +#define ADC7_AMUX_THM3 0x06 +#define ADC7_AMUX_THM4 0x07 +#define ADC7_AMUX_THM5 0x08 +#define ADC7_AMUX_THM6 0x09 +#define ADC7_GPIO1 0x0a +#define ADC7_GPIO2 0x0b +#define ADC7_GPIO3 0x0c +#define ADC7_GPIO4 0x0d + +#define ADC7_CHG_TEMP 0x10 +#define ADC7_USB_IN_V_16 0x11 +#define ADC7_VDC_16 0x12 +#define ADC7_CC1_ID 0x13 +#define ADC7_VREF_BAT_THERM 0x15 +#define ADC7_IIN_FB 0x17 + +/* 30k pull-up1 */ +#define ADC7_AMUX_THM1_30K_PU 0x24 +#define ADC7_AMUX_THM2_30K_PU 0x25 +#define ADC7_AMUX_THM3_30K_PU 0x26 +#define ADC7_AMUX_THM4_30K_PU 0x27 +#define ADC7_AMUX_THM5_30K_PU 0x28 +#define ADC7_AMUX_THM6_30K_PU 0x29 +#define ADC7_GPIO1_30K_PU 0x2a +#define ADC7_GPIO2_30K_PU 0x2b +#define ADC7_GPIO3_30K_PU 0x2c +#define ADC7_GPIO4_30K_PU 0x2d +#define ADC7_CC1_ID_30K_PU 0x33 + +/* 100k pull-up2 */ +#define ADC7_AMUX_THM1_100K_PU 0x44 +#define ADC7_AMUX_THM2_100K_PU 0x45 +#define ADC7_AMUX_THM3_100K_PU 0x46 +#define ADC7_AMUX_THM4_100K_PU 0x47 +#define ADC7_AMUX_THM5_100K_PU 0x48 +#define ADC7_AMUX_THM6_100K_PU 0x49 +#define ADC7_GPIO1_100K_PU 0x4a +#define ADC7_GPIO2_100K_PU 0x4b +#define ADC7_GPIO3_100K_PU 0x4c +#define ADC7_GPIO4_100K_PU 0x4d +#define ADC7_CC1_ID_100K_PU 0x53 + +/* 400k pull-up3 */ +#define ADC7_AMUX_THM1_400K_PU 0x64 +#define ADC7_AMUX_THM2_400K_PU 0x65 +#define ADC7_AMUX_THM3_400K_PU 0x66 +#define ADC7_AMUX_THM4_400K_PU 0x67 +#define ADC7_AMUX_THM5_400K_PU 0x68 +#define ADC7_AMUX_THM6_400K_PU 0x69 +#define ADC7_GPIO1_400K_PU 0x6a +#define ADC7_GPIO2_400K_PU 0x6b +#define ADC7_GPIO3_400K_PU 0x6c +#define ADC7_GPIO4_400K_PU 0x6d +#define ADC7_CC1_ID_400K_PU 0x73 + +/* 1/3 Divider */ +#define ADC7_GPIO1_DIV3 0x8a +#define ADC7_GPIO2_DIV3 0x8b +#define ADC7_GPIO3_DIV3 0x8c +#define ADC7_GPIO4_DIV3 0x8d + +#define ADC7_VPH_PWR 0x8e +#define ADC7_VBAT_SNS 0x8f + +#define ADC7_SBUx 0x94 +#define ADC7_VBAT_2S_MID 0x96 + #endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */ -- cgit v1.2.3 From e7e3b9d23f3bc6774cce585ef4fcb02462e04065 Mon Sep 17 00:00:00 2001 From: Gwendal Grignou Date: Tue, 26 May 2020 21:35:17 -0700 Subject: iio: cros_ec: Reapply range at resume EC does not currently preserve range across sensor reinit. If sensor is powered down at suspend, it will default to the EC default range at resume, not the range set by the host. Save range if modified, and apply at resume. Signed-off-by: Gwendal Grignou Signed-off-by: Jonathan Cameron --- .../iio/common/cros_ec_sensors/cros_ec_sensors.c | 5 +++++ .../common/cros_ec_sensors/cros_ec_sensors_core.c | 21 +++++++++++++++++++++ drivers/iio/light/cros_ec_light_prox.c | 6 +++++- drivers/iio/pressure/cros_ec_baro.c | 8 ++++++-- include/linux/iio/common/cros_ec_sensors_core.h | 11 ++++++++++- 5 files changed, 47 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index a66941fdb385..130ab8ce0269 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -200,6 +200,10 @@ static int cros_ec_sensors_write(struct iio_dev *indio_dev, st->core.param.sensor_range.roundup = 1; ret = cros_ec_motion_send_host_cmd(&st->core, 0); + if (ret == 0) { + st->core.range_updated = true; + st->core.curr_range = val; + } break; default: ret = cros_ec_sensors_core_write( @@ -315,6 +319,7 @@ MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids); static struct platform_driver cros_ec_sensors_platform_driver = { .driver = { .name = "cros-ec-sensors", + .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_sensors_probe, .id_table = cros_ec_sensors_ids, diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index c831915ca7e5..cda459b61206 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -824,5 +824,26 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write); +static int __maybe_unused cros_ec_sensors_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + int ret = 0; + + if (st->range_updated) { + mutex_lock(&st->cmd_lock); + st->param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE; + st->param.sensor_range.data = st->curr_range; + st->param.sensor_range.roundup = 1; + ret = cros_ec_motion_send_host_cmd(st, 0); + mutex_unlock(&st->cmd_lock); + } + return ret; +} + +SIMPLE_DEV_PM_OPS(cros_ec_sensors_pm_ops, NULL, cros_ec_sensors_resume); +EXPORT_SYMBOL_GPL(cros_ec_sensors_pm_ops); + MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c index 2198b50909ed..fed79ba27fda 100644 --- a/drivers/iio/light/cros_ec_light_prox.c +++ b/drivers/iio/light/cros_ec_light_prox.c @@ -145,8 +145,11 @@ static int cros_ec_light_prox_write(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_CALIBSCALE: st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE; - st->core.param.sensor_range.data = (val << 16) | (val2 / 100); + st->core.curr_range = (val << 16) | (val2 / 100); + st->core.param.sensor_range.data = st->core.curr_range; ret = cros_ec_motion_send_host_cmd(&st->core, 0); + if (ret == 0) + st->core.range_updated = true; break; default: ret = cros_ec_sensors_core_write(&st->core, chan, val, val2, @@ -256,6 +259,7 @@ MODULE_DEVICE_TABLE(platform, cros_ec_light_prox_ids); static struct platform_driver cros_ec_light_prox_platform_driver = { .driver = { .name = "cros-ec-light-prox", + .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_light_prox_probe, .id_table = cros_ec_light_prox_ids, diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c index c079b8960082..f0938b6fbba0 100644 --- a/drivers/iio/pressure/cros_ec_baro.c +++ b/drivers/iio/pressure/cros_ec_baro.c @@ -96,8 +96,11 @@ static int cros_ec_baro_write(struct iio_dev *indio_dev, /* Always roundup, so caller gets at least what it asks for. */ st->core.param.sensor_range.roundup = 1; - if (cros_ec_motion_send_host_cmd(&st->core, 0)) - ret = -EIO; + ret = cros_ec_motion_send_host_cmd(&st->core, 0); + if (ret == 0) { + st->core.range_updated = true; + st->core.curr_range = val; + } break; default: ret = cros_ec_sensors_core_write(&st->core, chan, val, val2, @@ -199,6 +202,7 @@ MODULE_DEVICE_TABLE(platform, cros_ec_baro_ids); static struct platform_driver cros_ec_baro_platform_driver = { .driver = { .name = "cros-ec-baro", + .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_baro_probe, .id_table = cros_ec_baro_ids, diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 7bc961defa87..caa8bb279a34 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -42,6 +42,10 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); * @resp: motion sensor response structure * @type: type of motion sensor * @loc: location where the motion sensor is placed + * @range_updated: True if the range of the sensor has been + * updated. + * @curr_range: If updated, the current range value. + * It will be reapplied at every resume. * @calib: calibration parameters. Note that trigger * captured data will always provide the calibrated * data @@ -65,6 +69,9 @@ struct cros_ec_sensors_core_state { enum motionsensor_type type; enum motionsensor_location loc; + bool range_updated; + int curr_range; + struct calib_data { s16 offset; u16 scale; @@ -114,7 +121,9 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask); -/* List of extended channel specification for all sensors */ +extern const struct dev_pm_ops cros_ec_sensors_pm_ops; + +/* List of extended channel specification for all sensors. */ extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[]; extern const struct attribute *cros_ec_sensor_fifo_attributes[]; -- cgit v1.2.3 From 18d563858d97dbefc9a16c8210ef2f97dc264202 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Thu, 21 May 2020 18:53:22 +0100 Subject: iio: make iio_device_get_drvdata take a const struct iio_dev *. As this just calls dev_get_drvdata underneath which is happy with a const struct device * we should change and avoid potentially casting away a const in order to then put it back again. Signed-off-by: Jonathan Cameron Reviewed-by: Jean-Baptiste Maneyrol Signed-off-by: Jonathan Cameron --- include/linux/iio/iio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index a1be82e74c93..e846a0a7001e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -669,7 +669,7 @@ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data) * * Returns the data previously set with iio_device_set_drvdata() */ -static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev) +static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) { return dev_get_drvdata(&indio_dev->dev); } -- cgit v1.2.3 From 78289b4a58b58e9a8a76ef43ffbaf04a097e33c6 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Wed, 3 Jun 2020 14:40:18 +0300 Subject: iio: core: pass parent device as parameter during allocation The change passes the parent device to the iio_device_alloc() call. This also updates the devm_iio_device_alloc() call to consider the device object as the parent device by default. Having it passed like this, should ensure that any IIO device object already has a device object as parent, allowing for neater control, like passing the 'indio_dev' object for other stuff [like buffers/triggers/etc], and potentially creating iiom_xxx(indio_dev) functions. With this patch, only the 'drivers/platform/x86/toshiba_acpi.c' needs an update to pass the parent object as a parameter. In the next patch all devm_iio_device_alloc() calls will be handled. Acked-by: Andy Shevchenko Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- drivers/iio/dummy/iio_simple_dummy.c | 14 ++++++++------ drivers/iio/industrialio-core.c | 11 ++++++----- drivers/platform/x86/toshiba_acpi.c | 3 +-- drivers/staging/iio/Documentation/device.txt | 4 +--- include/linux/iio/iio.h | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c index 6cb02299a215..b35ae7c039f7 100644 --- a/drivers/iio/dummy/iio_simple_dummy.c +++ b/drivers/iio/dummy/iio_simple_dummy.c @@ -566,6 +566,13 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) struct iio_dev *indio_dev; struct iio_dummy_state *st; struct iio_sw_device *swd; + struct device *parent = NULL; + + /* + * With hardware: Set the parent device. + * parent = &spi->dev; + * parent = &client->dev; + */ swd = kzalloc(sizeof(*swd), GFP_KERNEL); if (!swd) { @@ -580,7 +587,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) * It also has a region (accessed by iio_priv() * for chip specific state information. */ - indio_dev = iio_device_alloc(sizeof(*st)); + indio_dev = iio_device_alloc(parent, sizeof(*st)); if (!indio_dev) { ret = -ENOMEM; goto error_ret; @@ -590,11 +597,6 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) mutex_init(&st->lock); iio_dummy_init_device(indio_dev); - /* - * With hardware: Set the parent device. - * indio_dev->dev.parent = &spi->dev; - * indio_dev->dev.parent = &client->dev; - */ /* * Make the iio_dev struct available to remove function. diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 1527f01a44f1..75661661aaba 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1493,7 +1493,7 @@ struct device_type iio_device_type = { * iio_device_alloc() - allocate an iio_dev from a driver * @sizeof_priv: Space to allocate for private structure. **/ -struct iio_dev *iio_device_alloc(int sizeof_priv) +struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) { struct iio_dev *dev; size_t alloc_size; @@ -1510,6 +1510,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv) if (!dev) return NULL; + dev->dev.parent = parent; dev->dev.groups = dev->groups; dev->dev.type = &iio_device_type; dev->dev.bus = &iio_bus_type; @@ -1551,7 +1552,7 @@ static void devm_iio_device_release(struct device *dev, void *res) /** * devm_iio_device_alloc - Resource-managed iio_device_alloc() - * @dev: Device to allocate iio_dev for + * @parent: Device to allocate iio_dev for, and parent for this IIO device * @sizeof_priv: Space to allocate for private structure. * * Managed iio_device_alloc. iio_dev allocated with this function is @@ -1560,7 +1561,7 @@ static void devm_iio_device_release(struct device *dev, void *res) * RETURNS: * Pointer to allocated iio_dev on success, NULL on failure. */ -struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv) +struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) { struct iio_dev **ptr, *iio_dev; @@ -1569,10 +1570,10 @@ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv) if (!ptr) return NULL; - iio_dev = iio_device_alloc(sizeof_priv); + iio_dev = iio_device_alloc(parent, sizeof_priv); if (iio_dev) { *ptr = iio_dev; - devres_add(dev, ptr); + devres_add(parent, ptr); } else { devres_free(ptr); } diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 1ddab5a6dead..36fff00af9eb 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -3114,7 +3114,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) toshiba_accelerometer_available(dev); if (dev->accelerometer_supported) { - dev->indio_dev = iio_device_alloc(sizeof(*dev)); + dev->indio_dev = iio_device_alloc(&acpi_dev->dev, sizeof(*dev)); if (!dev->indio_dev) { pr_err("Unable to allocate iio device\n"); goto iio_error; @@ -3124,7 +3124,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) dev->indio_dev->info = &toshiba_iio_accel_info; dev->indio_dev->name = "Toshiba accelerometer"; - dev->indio_dev->dev.parent = &acpi_dev->dev; dev->indio_dev->modes = INDIO_DIRECT_MODE; dev->indio_dev->channels = toshiba_iio_accel_channels; dev->indio_dev->num_channels = diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt index ec42544a46aa..0d1275b1eb3f 100644 --- a/drivers/staging/iio/Documentation/device.txt +++ b/drivers/staging/iio/Documentation/device.txt @@ -8,7 +8,7 @@ The crucial structure for device drivers in iio is iio_dev. First allocate one using: -struct iio_dev *indio_dev = iio_device_alloc(sizeof(struct chip_state)); +struct iio_dev *indio_dev = iio_device_alloc(parent, sizeof(struct chip_state)); where chip_state is a structure of local state data for this instance of the chip. @@ -16,8 +16,6 @@ That data can be accessed using iio_priv(struct iio_dev *). Then fill in the following: -- indio_dev->dev.parent - Struct device associated with the underlying hardware. - indio_dev->name Name of the device being driven - made available as the name attribute in sysfs. diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index e846a0a7001e..655e34a08d94 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -676,7 +676,7 @@ static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) /* Can we make this smaller? */ #define IIO_ALIGN L1_CACHE_BYTES -struct iio_dev *iio_device_alloc(int sizeof_priv); +struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); static inline void *iio_priv(const struct iio_dev *indio_dev) { @@ -690,7 +690,7 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv) } void iio_device_free(struct iio_dev *indio_dev); -struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv); +struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, const char *fmt, ...); /** -- cgit v1.2.3 From f5d017938e7a6517b85f7fd215213a28e11291fb Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Wed, 3 Jun 2020 14:40:19 +0300 Subject: iio: core: add iio_device_set_parent() helper By default, the device allocation will also assign a parent device to the IIO device object. In cases where devm_iio_device_alloc() is used, sometimes the parent device must be different than the device used to manage the allocation. In that case, this helper should be used to change the parent, hence the requirement to call this between allocation & registration. This pattern/requirement is not very common in the IIO space, and it may be cleaned up later. But until then, assigning the parent manually between allocation & registration is slightly easier. Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- include/linux/iio/iio.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include') diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 655e34a08d94..1c1d02107722 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -649,6 +649,26 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev) return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL; } +/** + * iio_device_set_parent() - assign parent device to the IIO device object + * @indio_dev: IIO device structure + * @parent: reference to parent device object + * + * This utility must be called between IIO device allocation + * (via devm_iio_device_alloc()) & IIO device registration + * (via {devm_}iio_device_register()). + * By default, the device allocation will also assign a parent device to + * the IIO device object. In cases where devm_iio_device_alloc() is used, + * sometimes the parent device must be different than the device used to + * manage the allocation. + * In that case, this helper should be used to change the parent, hence the + * requirement to call this between allocation & registration. + **/ +static inline void iio_device_set_parent(struct iio_dev *indio_dev, + struct device *parent) +{ + indio_dev->dev.parent = parent; +} /** * iio_device_set_drvdata() - Set device driver data -- cgit v1.2.3 From 5afced3bf28100d81fb2fe7e98918632a08feaf5 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 29 May 2020 15:05:22 +0200 Subject: writeback: Avoid skipping inode writeback Inode's i_io_list list head is used to attach inode to several different lists - wb->{b_dirty, b_dirty_time, b_io, b_more_io}. When flush worker prepares a list of inodes to writeback e.g. for sync(2), it moves inodes to b_io list. Thus it is critical for sync(2) data integrity guarantees that inode is not requeued to any other writeback list when inode is queued for processing by flush worker. That's the reason why writeback_single_inode() does not touch i_io_list (unless the inode is completely clean) and why __mark_inode_dirty() does not touch i_io_list if I_SYNC flag is set. However there are two flaws in the current logic: 1) When inode has only I_DIRTY_TIME set but it is already queued in b_io list due to sync(2), concurrent __mark_inode_dirty(inode, I_DIRTY_SYNC) can still move inode back to b_dirty list resulting in skipping writeback of inode time stamps during sync(2). 2) When inode is on b_dirty_time list and writeback_single_inode() races with __mark_inode_dirty() like: writeback_single_inode() __mark_inode_dirty(inode, I_DIRTY_PAGES) inode->i_state |= I_SYNC __writeback_single_inode() inode->i_state |= I_DIRTY_PAGES; if (inode->i_state & I_SYNC) bail if (!(inode->i_state & I_DIRTY_ALL)) - not true so nothing done We end up with I_DIRTY_PAGES inode on b_dirty_time list and thus standard background writeback will not writeback this inode leading to possible dirty throttling stalls etc. (thanks to Martijn Coenen for this analysis). Fix these problems by tracking whether inode is queued in b_io or b_more_io lists in a new I_SYNC_QUEUED flag. When this flag is set, we know flush worker has queued inode and we should not touch i_io_list. On the other hand we also know that once flush worker is done with the inode it will requeue the inode to appropriate dirty list. When I_SYNC_QUEUED is not set, __mark_inode_dirty() can (and must) move inode to appropriate dirty list. Reported-by: Martijn Coenen Reviewed-by: Martijn Coenen Tested-by: Martijn Coenen Reviewed-by: Christoph Hellwig Fixes: 0ae45f63d4ef ("vfs: add support for a lazytime mount option") CC: stable@vger.kernel.org Signed-off-by: Jan Kara --- fs/fs-writeback.c | 17 ++++++++++++----- include/linux/fs.h | 8 ++++++-- 2 files changed, 18 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index ff0b18331590..f470c10641c5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -146,6 +146,7 @@ static void inode_io_list_del_locked(struct inode *inode, assert_spin_locked(&wb->list_lock); assert_spin_locked(&inode->i_lock); + inode->i_state &= ~I_SYNC_QUEUED; list_del_init(&inode->i_io_list); wb_io_lists_depopulated(wb); } @@ -1187,6 +1188,7 @@ static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) inode->dirtied_when = jiffies; } inode_io_list_move_locked(inode, wb, &wb->b_dirty); + inode->i_state &= ~I_SYNC_QUEUED; } static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) @@ -1262,8 +1264,11 @@ static int move_expired_inodes(struct list_head *delaying_queue, break; list_move(&inode->i_io_list, &tmp); moved++; + spin_lock(&inode->i_lock); if (flags & EXPIRE_DIRTY_ATIME) - set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); + inode->i_state |= I_DIRTY_TIME_EXPIRED; + inode->i_state |= I_SYNC_QUEUED; + spin_unlock(&inode->i_lock); if (sb_is_blkdev_sb(inode->i_sb)) continue; if (sb && sb != inode->i_sb) @@ -1438,6 +1443,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, } else if (inode->i_state & I_DIRTY_TIME) { inode->dirtied_when = jiffies; inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); + inode->i_state &= ~I_SYNC_QUEUED; } else { /* The inode is clean. Remove from writeback lists. */ inode_io_list_del_locked(inode, wb); @@ -2301,11 +2307,12 @@ void __mark_inode_dirty(struct inode *inode, int flags) inode->i_state |= flags; /* - * If the inode is being synced, just update its dirty state. - * The unlocker will place the inode on the appropriate - * superblock list, based upon its state. + * If the inode is queued for writeback by flush worker, just + * update its dirty state. Once the flush worker is done with + * the inode it will place it on the appropriate superblock + * list, based upon its state. */ - if (inode->i_state & I_SYNC) + if (inode->i_state & I_SYNC_QUEUED) goto out_unlock_inode; /* diff --git a/include/linux/fs.h b/include/linux/fs.h index 19ef6c88c152..48556efcdcf0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2157,6 +2157,10 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, * * I_DONTCACHE Evict inode as soon as it is not used anymore. * + * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. + * Used to detect that mark_inode_dirty() should not move + * inode between dirty lists. + * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) @@ -2174,12 +2178,12 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) -#define __I_DIRTY_TIME_EXPIRED 12 -#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) +#define I_DIRTY_TIME_EXPIRED (1 << 12) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) #define I_CREATING (1 << 15) #define I_DONTCACHE (1 << 16) +#define I_SYNC_QUEUED (1 << 17) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) -- cgit v1.2.3 From f9cae926f35e8230330f28c7b743ad088611a8de Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 29 May 2020 16:08:58 +0200 Subject: writeback: Fix sync livelock due to b_dirty_time processing When we are processing writeback for sync(2), move_expired_inodes() didn't set any inode expiry value (older_than_this). This can result in writeback never completing if there's steady stream of inodes added to b_dirty_time list as writeback rechecks dirty lists after each writeback round whether there's more work to be done. Fix the problem by using sync(2) start time is inode expiry value when processing b_dirty_time list similarly as for ordinarily dirtied inodes. This requires some refactoring of older_than_this handling which simplifies the code noticeably as a bonus. Fixes: 0ae45f63d4ef ("vfs: add support for a lazytime mount option") CC: stable@vger.kernel.org Reviewed-by: Christoph Hellwig Signed-off-by: Jan Kara --- fs/fs-writeback.c | 44 ++++++++++++++++------------------------ include/trace/events/writeback.h | 13 ++++++------ 2 files changed, 23 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f470c10641c5..ae17d64a3e18 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -42,7 +42,6 @@ struct wb_writeback_work { long nr_pages; struct super_block *sb; - unsigned long *older_than_this; enum writeback_sync_modes sync_mode; unsigned int tagged_writepages:1; unsigned int for_kupdate:1; @@ -1234,16 +1233,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) #define EXPIRE_DIRTY_ATIME 0x0001 /* - * Move expired (dirtied before work->older_than_this) dirty inodes from + * Move expired (dirtied before dirtied_before) dirty inodes from * @delaying_queue to @dispatch_queue. */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - int flags, - struct wb_writeback_work *work) + int flags, unsigned long dirtied_before) { - unsigned long *older_than_this = NULL; - unsigned long expire_time; LIST_HEAD(tmp); struct list_head *pos, *node; struct super_block *sb = NULL; @@ -1251,16 +1247,9 @@ static int move_expired_inodes(struct list_head *delaying_queue, int do_sb_sort = 0; int moved = 0; - if ((flags & EXPIRE_DIRTY_ATIME) == 0) - older_than_this = work->older_than_this; - else if (!work->for_sync) { - expire_time = jiffies - (dirtytime_expire_interval * HZ); - older_than_this = &expire_time; - } while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); - if (older_than_this && - inode_dirtied_after(inode, *older_than_this)) + if (inode_dirtied_after(inode, dirtied_before)) break; list_move(&inode->i_io_list, &tmp); moved++; @@ -1306,18 +1295,22 @@ out: * | * +--> dequeue for IO */ -static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) +static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before) { int moved; + unsigned long time_expire_jif = dirtied_before; assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before); + if (!work->for_sync) + time_expire_jif = jiffies - dirtytime_expire_interval * HZ; moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, - EXPIRE_DIRTY_ATIME, work); + EXPIRE_DIRTY_ATIME, time_expire_jif); if (moved) wb_io_lists_populated(wb); - trace_writeback_queue_io(wb, work, moved); + trace_writeback_queue_io(wb, work, dirtied_before, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) @@ -1829,7 +1822,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, blk_start_plug(&plug); spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) - queue_io(wb, &work); + queue_io(wb, &work, jiffies); __writeback_inodes_wb(wb, &work); spin_unlock(&wb->list_lock); blk_finish_plug(&plug); @@ -1849,7 +1842,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, * takes longer than a dirty_writeback_interval interval, then leave a * one-second gap. * - * older_than_this takes precedence over nr_to_write. So we'll only write back + * dirtied_before takes precedence over nr_to_write. So we'll only write back * all dirty pages if they are all attached to "old" mappings. */ static long wb_writeback(struct bdi_writeback *wb, @@ -1857,14 +1850,11 @@ static long wb_writeback(struct bdi_writeback *wb, { unsigned long wb_start = jiffies; long nr_pages = work->nr_pages; - unsigned long oldest_jif; + unsigned long dirtied_before = jiffies; struct inode *inode; long progress; struct blk_plug plug; - oldest_jif = jiffies; - work->older_than_this = &oldest_jif; - blk_start_plug(&plug); spin_lock(&wb->list_lock); for (;;) { @@ -1898,14 +1888,14 @@ static long wb_writeback(struct bdi_writeback *wb, * safe. */ if (work->for_kupdate) { - oldest_jif = jiffies - + dirtied_before = jiffies - msecs_to_jiffies(dirty_expire_interval * 10); } else if (work->for_background) - oldest_jif = jiffies; + dirtied_before = jiffies; trace_writeback_start(wb, work); if (list_empty(&wb->b_io)) - queue_io(wb, work); + queue_io(wb, work, dirtied_before); if (work->sb) progress = writeback_sb_inodes(work->sb, wb, work); else diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 10f5d1fa7347..7565dcd59697 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -498,8 +498,9 @@ DEFINE_WBC_EVENT(wbc_writepage); TRACE_EVENT(writeback_queue_io, TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before, int moved), - TP_ARGS(wb, work, moved), + TP_ARGS(wb, work, dirtied_before, moved), TP_STRUCT__entry( __array(char, name, 32) __field(unsigned long, older) @@ -509,19 +510,17 @@ TRACE_EVENT(writeback_queue_io, __field(ino_t, cgroup_ino) ), TP_fast_assign( - unsigned long *older_than_this = work->older_than_this; strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); - __entry->older = older_than_this ? *older_than_this : 0; - __entry->age = older_than_this ? - (jiffies - *older_than_this) * 1000 / HZ : -1; + __entry->older = dirtied_before; + __entry->age = (jiffies - dirtied_before) * 1000 / HZ; __entry->moved = moved; __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu", __entry->name, - __entry->older, /* older_than_this in jiffies */ - __entry->age, /* older_than_this in relative milliseconds */ + __entry->older, /* dirtied_before in jiffies */ + __entry->age, /* dirtied_before in relative milliseconds */ __entry->moved, __print_symbolic(__entry->reason, WB_WORK_REASON), (unsigned long)__entry->cgroup_ino -- cgit v1.2.3 From 5fcd57505c002efc5823a7355e21f48dd02d5a51 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 29 May 2020 16:24:43 +0200 Subject: writeback: Drop I_DIRTY_TIME_EXPIRE The only use of I_DIRTY_TIME_EXPIRE is to detect in __writeback_single_inode() that inode got there because flush worker decided it's time to writeback the dirty inode time stamps (either because we are syncing or because of age). However we can detect this directly in __writeback_single_inode() and there's no need for the strange propagation with I_DIRTY_TIME_EXPIRE flag. Reviewed-by: Christoph Hellwig Signed-off-by: Jan Kara --- fs/ext4/inode.c | 2 +- fs/fs-writeback.c | 28 +++++++++++----------------- fs/xfs/libxfs/xfs_trans_inode.c | 4 ++-- include/linux/fs.h | 1 - include/trace/events/writeback.h | 1 - 5 files changed, 14 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 40ec5c7ef0d3..4db497f02ffb 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4887,7 +4887,7 @@ static void __ext4_update_other_inode_time(struct super_block *sb, (inode->i_state & I_DIRTY_TIME)) { struct ext4_inode_info *ei = EXT4_I(inode); - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); + inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); spin_lock(&ei->i_raw_lock); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index ae17d64a3e18..149227160ff0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1238,7 +1238,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - int flags, unsigned long dirtied_before) + unsigned long dirtied_before) { LIST_HEAD(tmp); struct list_head *pos, *node; @@ -1254,8 +1254,6 @@ static int move_expired_inodes(struct list_head *delaying_queue, list_move(&inode->i_io_list, &tmp); moved++; spin_lock(&inode->i_lock); - if (flags & EXPIRE_DIRTY_ATIME) - inode->i_state |= I_DIRTY_TIME_EXPIRED; inode->i_state |= I_SYNC_QUEUED; spin_unlock(&inode->i_lock); if (sb_is_blkdev_sb(inode->i_sb)) @@ -1303,11 +1301,11 @@ static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before); if (!work->for_sync) time_expire_jif = jiffies - dirtytime_expire_interval * HZ; moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, - EXPIRE_DIRTY_ATIME, time_expire_jif); + time_expire_jif); if (moved) wb_io_lists_populated(wb); trace_writeback_queue_io(wb, work, dirtied_before, moved); @@ -1483,18 +1481,14 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_lock(&inode->i_lock); dirty = inode->i_state & I_DIRTY; - if (inode->i_state & I_DIRTY_TIME) { - if ((dirty & I_DIRTY_INODE) || - wbc->sync_mode == WB_SYNC_ALL || - unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || - unlikely(time_after(jiffies, - (inode->dirtied_time_when + - dirtytime_expire_interval * HZ)))) { - dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; - trace_writeback_lazytime(inode); - } - } else - inode->i_state &= ~I_DIRTY_TIME_EXPIRED; + if ((inode->i_state & I_DIRTY_TIME) && + ((dirty & I_DIRTY_INODE) || + wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync || + time_after(jiffies, inode->dirtied_time_when + + dirtytime_expire_interval * HZ))) { + dirty |= I_DIRTY_TIME; + trace_writeback_lazytime(inode); + } inode->i_state &= ~dirty; /* diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c index b5dfb6654842..1b4df6636944 100644 --- a/fs/xfs/libxfs/xfs_trans_inode.c +++ b/fs/xfs/libxfs/xfs_trans_inode.c @@ -96,9 +96,9 @@ xfs_trans_log_inode( * to log the timestamps, or will clear already cleared fields in the * worst case. */ - if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) { + if (inode->i_state & I_DIRTY_TIME) { spin_lock(&inode->i_lock); - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); + inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); } diff --git a/include/linux/fs.h b/include/linux/fs.h index 48556efcdcf0..45eadf5bea5d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2178,7 +2178,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) -#define I_DIRTY_TIME_EXPIRED (1 << 12) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) #define I_CREATING (1 << 15) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 7565dcd59697..e7cbccc7c14c 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -20,7 +20,6 @@ {I_CLEAR, "I_CLEAR"}, \ {I_SYNC, "I_SYNC"}, \ {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ - {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \ {I_REFERENCED, "I_REFERENCED"} \ ) -- cgit v1.2.3 From e012d15a238f24795081ef1e43ffe2859b6538ed Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 15 Jun 2020 08:46:47 +0200 Subject: gpio: driver.h: fix kernel-doc markup There is one parameter with a wrong name at kernel-doc macro: ./include/linux/gpio/driver.h:499: warning: Function parameter or member 'gc' not described in 'gpiochip_add_data' ./include/linux/gpio/driver.h:499: warning: Excess function parameter 'chip' description in 'gpiochip_add_data' Fix it. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Bartosz Golaszewski --- include/linux/gpio/driver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index c4f272af7af5..c11261f3c724 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -481,7 +481,7 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, /** * gpiochip_add_data() - register a gpio_chip - * @chip: the chip to register, with chip->base initialized + * @gc: the chip to register, with chip->base initialized * @data: driver-private data associated with this chip * * Context: potentially before irqs will work -- cgit v1.2.3 From e17d43b93e544f5016c0251d2074c15568d5d963 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 May 2020 15:19:08 +0300 Subject: perf: Add perf text poke event Record (single instruction) changes to the kernel text (i.e. self-modifying code) in order to support tracers like Intel PT and ARM CoreSight. A copy of the running kernel code is needed as a reference point (e.g. from /proc/kcore). The text poke event records the old bytes and the new bytes so that the event can be processed forwards or backwards. The basic problem is recording the modified instruction in an unambiguous manner given SMP instruction cache (in)coherence. That is, when modifying an instruction concurrently any solution with one or multiple timestamps is not sufficient: CPU0 CPU1 0 1 write insn A 2 execute insn A 3 sync-I$ 4 Due to I$, CPU1 might execute either the old or new A. No matter where we record tracepoints on CPU0, one simply cannot tell what CPU1 will have observed, except that at 0 it must be the old one and at 4 it must be the new one. To solve this, take inspiration from x86 text poking, which has to solve this exact problem due to variable length instruction encoding and I-fetch windows. 1) overwrite the instruction with a breakpoint and sync I$ This guarantees that that code flow will never hit the target instruction anymore, on any CPU (or rather, it will cause an exception). 2) issue the TEXT_POKE event 3) overwrite the breakpoint with the new instruction and sync I$ Now we know that any execution after the TEXT_POKE event will either observe the breakpoint (and hit the exception) or the new instruction. So by guarding the TEXT_POKE event with an exception on either side; we can now tell, without doubt, which instruction another CPU will have observed. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200512121922.8997-2-adrian.hunter@intel.com --- include/linux/perf_event.h | 8 ++++ include/uapi/linux/perf_event.h | 21 +++++++++- kernel/events/core.c | 90 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 117 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b4bb32082342..46fe5cfb5163 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1232,6 +1232,9 @@ extern void perf_event_exec(void); extern void perf_event_comm(struct task_struct *tsk, bool exec); extern void perf_event_namespaces(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); +extern void perf_event_text_poke(const void *addr, + const void *old_bytes, size_t old_len, + const void *new_bytes, size_t new_len); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); @@ -1479,6 +1482,11 @@ static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_namespaces(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } +static inline void perf_event_text_poke(const void *addr, + const void *old_bytes, + size_t old_len, + const void *new_bytes, + size_t new_len) { } static inline void perf_event_init(void) { } static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 7b2d6fc9e6ed..e5bee6c17b86 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -383,7 +383,8 @@ struct perf_event_attr { bpf_event : 1, /* include bpf events */ aux_output : 1, /* generate AUX records instead of events */ cgroup : 1, /* include cgroup events */ - __reserved_1 : 31; + text_poke : 1, /* include text poke events */ + __reserved_1 : 30; union { __u32 wakeup_events; /* wakeup every n events */ @@ -1024,6 +1025,24 @@ enum perf_event_type { */ PERF_RECORD_CGROUP = 19, + /* + * Records changes to kernel text i.e. self-modified code. 'old_len' is + * the number of old bytes, 'new_len' is the number of new bytes. Either + * 'old_len' or 'new_len' may be zero to indicate, for example, the + * addition or removal of a trampoline. 'bytes' contains the old bytes + * followed immediately by the new bytes. + * + * struct { + * struct perf_event_header header; + * u64 addr; + * u16 old_len; + * u16 new_len; + * u8 bytes[]; + * struct sample_id sample_id; + * }; + */ + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_MAX, /* non-ABI */ }; diff --git a/kernel/events/core.c b/kernel/events/core.c index 856d98c36f56..9b8f92500833 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -394,6 +394,7 @@ static atomic_t nr_switch_events __read_mostly; static atomic_t nr_ksymbol_events __read_mostly; static atomic_t nr_bpf_events __read_mostly; static atomic_t nr_cgroup_events __read_mostly; +static atomic_t nr_text_poke_events __read_mostly; static LIST_HEAD(pmus); static DEFINE_MUTEX(pmus_lock); @@ -4575,7 +4576,7 @@ static bool is_sb_event(struct perf_event *event) if (attr->mmap || attr->mmap_data || attr->mmap2 || attr->comm || attr->comm_exec || attr->task || attr->ksymbol || - attr->context_switch || + attr->context_switch || attr->text_poke || attr->bpf_event) return true; return false; @@ -4651,6 +4652,8 @@ static void unaccount_event(struct perf_event *event) atomic_dec(&nr_ksymbol_events); if (event->attr.bpf_event) atomic_dec(&nr_bpf_events); + if (event->attr.text_poke) + atomic_dec(&nr_text_poke_events); if (dec) { if (!atomic_add_unless(&perf_sched_count, -1, 1)) @@ -8628,6 +8631,89 @@ void perf_event_bpf_event(struct bpf_prog *prog, perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL); } +struct perf_text_poke_event { + const void *old_bytes; + const void *new_bytes; + size_t pad; + u16 old_len; + u16 new_len; + + struct { + struct perf_event_header header; + + u64 addr; + } event_id; +}; + +static int perf_event_text_poke_match(struct perf_event *event) +{ + return event->attr.text_poke; +} + +static void perf_event_text_poke_output(struct perf_event *event, void *data) +{ + struct perf_text_poke_event *text_poke_event = data; + struct perf_output_handle handle; + struct perf_sample_data sample; + u64 padding = 0; + int ret; + + if (!perf_event_text_poke_match(event)) + return; + + perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); + + ret = perf_output_begin(&handle, event, text_poke_event->event_id.header.size); + if (ret) + return; + + perf_output_put(&handle, text_poke_event->event_id); + perf_output_put(&handle, text_poke_event->old_len); + perf_output_put(&handle, text_poke_event->new_len); + + __output_copy(&handle, text_poke_event->old_bytes, text_poke_event->old_len); + __output_copy(&handle, text_poke_event->new_bytes, text_poke_event->new_len); + + if (text_poke_event->pad) + __output_copy(&handle, &padding, text_poke_event->pad); + + perf_event__output_id_sample(event, &handle, &sample); + + perf_output_end(&handle); +} + +void perf_event_text_poke(const void *addr, const void *old_bytes, + size_t old_len, const void *new_bytes, size_t new_len) +{ + struct perf_text_poke_event text_poke_event; + size_t tot, pad; + + if (!atomic_read(&nr_text_poke_events)) + return; + + tot = sizeof(text_poke_event.old_len) + old_len; + tot += sizeof(text_poke_event.new_len) + new_len; + pad = ALIGN(tot, sizeof(u64)) - tot; + + text_poke_event = (struct perf_text_poke_event){ + .old_bytes = old_bytes, + .new_bytes = new_bytes, + .pad = pad, + .old_len = old_len, + .new_len = new_len, + .event_id = { + .header = { + .type = PERF_RECORD_TEXT_POKE, + .misc = PERF_RECORD_MISC_KERNEL, + .size = sizeof(text_poke_event.event_id) + tot + pad, + }, + .addr = (unsigned long)addr, + }, + }; + + perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL); +} + void perf_event_itrace_started(struct perf_event *event) { event->attach_state |= PERF_ATTACH_ITRACE; @@ -10945,6 +11031,8 @@ static void account_event(struct perf_event *event) atomic_inc(&nr_ksymbol_events); if (event->attr.bpf_event) atomic_inc(&nr_bpf_events); + if (event->attr.text_poke) + atomic_inc(&nr_text_poke_events); if (inc) { /* -- cgit v1.2.3 From d002b8bc6dbc20e9043e279196cff8795dba05fe Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 28 May 2020 11:00:58 +0300 Subject: kprobes: Add symbols for kprobe insn pages Symbols are needed for tools to describe instruction addresses. Pages allocated for kprobe's purposes need symbols to be created for them. Add such symbols to be visible via /proc/kallsyms. Note: kprobe insn pages are not used if ftrace is configured. To see the effect of this patch, the kernel must be configured with: # CONFIG_FUNCTION_TRACER is not set CONFIG_KPROBES=y and for optimised kprobes: CONFIG_OPTPROBES=y Example on x86: # perf probe __schedule Added new event: probe:__schedule (on __schedule) # cat /proc/kallsyms | grep '\[__builtin__kprobes\]' ffffffffc00d4000 t kprobe_insn_page [__builtin__kprobes] ffffffffc00d6000 t kprobe_optinsn_page [__builtin__kprobes] Note: This patch adds "__builtin__kprobes" as a module name in /proc/kallsyms for symbols for pages allocated for kprobes' purposes, even though "__builtin__kprobes" is not a module. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Masami Hiramatsu Link: https://lkml.kernel.org/r/20200528080058.20230-1-adrian.hunter@intel.com --- include/linux/kprobes.h | 15 +++++++++++++++ kernel/kallsyms.c | 37 +++++++++++++++++++++++++++++++++---- kernel/kprobes.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 594265bfd390..13fc58a74c04 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -242,6 +242,7 @@ struct kprobe_insn_cache { struct mutex mutex; void *(*alloc)(void); /* allocate insn page */ void (*free)(void *); /* free insn page */ + const char *sym; /* symbol for insn pages */ struct list_head pages; /* list of kprobe_insn_page */ size_t insn_size; /* size of instruction slot */ int nr_garbage; @@ -272,6 +273,10 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ { \ return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \ } +#define KPROBE_INSN_PAGE_SYM "kprobe_insn_page" +#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page" +int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, + unsigned long *value, char *type, char *sym); #else /* __ARCH_WANT_KPROBES_INSN_SLOT */ #define DEFINE_INSN_CACHE_OPS(__name) \ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ @@ -373,6 +378,11 @@ void dump_kprobe(struct kprobe *kp); void *alloc_insn_page(void); void free_insn_page(void *page); +int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym); + +int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, + char *type, char *sym); #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) @@ -435,6 +445,11 @@ static inline bool within_kprobe_blacklist(unsigned long addr) { return true; } +static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *sym) +{ + return -ERANGE; +} #endif /* CONFIG_KPROBES */ static inline int disable_kretprobe(struct kretprobe *rp) { diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 16c8c605f4b0..c6cc293c0e67 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -24,6 +24,7 @@ #include #include #include +#include #include /* @@ -437,6 +438,7 @@ struct kallsym_iter { loff_t pos_arch_end; loff_t pos_mod_end; loff_t pos_ftrace_mod_end; + loff_t pos_bpf_end; unsigned long value; unsigned int nameoff; /* If iterating in core kernel symbols. */ char type; @@ -496,11 +498,33 @@ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) static int get_ksymbol_bpf(struct kallsym_iter *iter) { + int ret; + strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN); iter->exported = 0; - return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, - &iter->value, &iter->type, - iter->name) < 0 ? 0 : 1; + ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, + &iter->value, &iter->type, + iter->name); + if (ret < 0) { + iter->pos_bpf_end = iter->pos; + return 0; + } + + return 1; +} + +/* + * This uses "__builtin__kprobes" as a module name for symbols for pages + * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a + * module. + */ +static int get_ksymbol_kprobe(struct kallsym_iter *iter) +{ + strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN); + iter->exported = 0; + return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end, + &iter->value, &iter->type, + iter->name) < 0 ? 0 : 1; } /* Returns space to next name. */ @@ -527,6 +551,7 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) iter->pos_arch_end = 0; iter->pos_mod_end = 0; iter->pos_ftrace_mod_end = 0; + iter->pos_bpf_end = 0; } } @@ -551,7 +576,11 @@ static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) get_ksymbol_ftrace_mod(iter)) return 1; - return get_ksymbol_bpf(iter); + if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) && + get_ksymbol_bpf(iter)) + return 1; + + return get_ksymbol_kprobe(iter); } /* Returns false if pos at or past end of file. */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 50cd84f53df0..058c0be3464b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -118,6 +118,7 @@ struct kprobe_insn_cache kprobe_insn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), .alloc = alloc_insn_page, .free = free_insn_page, + .sym = KPROBE_INSN_PAGE_SYM, .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), .insn_size = MAX_INSN_SIZE, .nr_garbage = 0, @@ -290,12 +291,34 @@ bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) return ret; } +int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, + unsigned long *value, char *type, char *sym) +{ + struct kprobe_insn_page *kip; + int ret = -ERANGE; + + rcu_read_lock(); + list_for_each_entry_rcu(kip, &c->pages, list) { + if ((*symnum)--) + continue; + strlcpy(sym, c->sym, KSYM_NAME_LEN); + *type = 't'; + *value = (unsigned long)kip->insns; + ret = 0; + break; + } + rcu_read_unlock(); + + return ret; +} + #ifdef CONFIG_OPTPROBES /* For optimized_kprobe buffer */ struct kprobe_insn_cache kprobe_optinsn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), .alloc = alloc_insn_page, .free = free_insn_page, + .sym = KPROBE_OPTINSN_PAGE_SYM, .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), /* .insn_size is initialized later */ .nr_garbage = 0, @@ -2197,6 +2220,28 @@ static void kprobe_remove_ksym_blacklist(unsigned long entry) kprobe_remove_area_blacklist(entry, entry + 1); } +int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, + char *type, char *sym) +{ + return -ERANGE; +} + +int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym) +{ +#ifdef __ARCH_WANT_KPROBES_INSN_SLOT + if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym)) + return 0; +#ifdef CONFIG_OPTPROBES + if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym)) + return 0; +#endif +#endif + if (!arch_kprobe_get_kallsym(&symnum, value, type, sym)) + return 0; + return -ERANGE; +} + int __init __weak arch_populate_kprobe_blacklist(void) { return 0; -- cgit v1.2.3 From 69e49088692899d25dedfa22f00dfb9761e86ed7 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 May 2020 15:19:11 +0300 Subject: kprobes: Add perf ksymbol events for kprobe insn pages Symbols are needed for tools to describe instruction addresses. Pages allocated for kprobe's purposes need symbols to be created for them. Add such symbols to be visible via perf ksymbol events. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Peter Zijlstra (Intel) Acked-by: Masami Hiramatsu Link: https://lkml.kernel.org/r/20200512121922.8997-5-adrian.hunter@intel.com --- include/uapi/linux/perf_event.h | 5 +++++ kernel/kprobes.c | 12 ++++++++++++ 2 files changed, 17 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e5bee6c17b86..e1a4179144a1 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -1049,6 +1049,11 @@ enum perf_event_type { enum perf_record_ksymbol_type { PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + /* + * Out of line code such as kprobe-replaced instructions or optimized + * kprobes. + */ + PERF_RECORD_KSYMBOL_TYPE_OOL = 2, PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ }; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 058c0be3464b..2b58740ca0f3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -184,6 +185,10 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) kip->cache = c; list_add_rcu(&kip->list, &c->pages); slot = kip->insns; + + /* Record the perf ksymbol register event after adding the page */ + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, + PAGE_SIZE, false, c->sym); out: mutex_unlock(&c->mutex); return slot; @@ -202,6 +207,13 @@ static int collect_one_slot(struct kprobe_insn_page *kip, int idx) * next time somebody inserts a probe. */ if (!list_is_singular(&kip->list)) { + /* + * Record perf ksymbol unregister event before removing + * the page. + */ + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, + (unsigned long)kip->insns, PAGE_SIZE, true, + kip->cache->sym); list_del_rcu(&kip->list); synchronize_rcu(); kip->cache->free(kip->insns); -- cgit v1.2.3 From fc0ea795f53c8d7040fa42471f74fe51d78d0834 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 May 2020 15:19:13 +0300 Subject: ftrace: Add symbols for ftrace trampolines Symbols are needed for tools to describe instruction addresses. Pages allocated for ftrace's purposes need symbols to be created for them. Add such symbols to be visible via /proc/kallsyms. Example on x86 with CONFIG_DYNAMIC_FTRACE=y # echo function > /sys/kernel/debug/tracing/current_tracer # cat /proc/kallsyms | grep '\[__builtin__ftrace\]' ffffffffc0238000 t ftrace_trampoline [__builtin__ftrace] Note: This patch adds "__builtin__ftrace" as a module name in /proc/kallsyms for symbols for pages allocated for ftrace's purposes, even though "__builtin__ftrace" is not a module. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200512121922.8997-7-adrian.hunter@intel.com --- include/linux/ftrace.h | 12 +++++--- kernel/kallsyms.c | 5 ++++ kernel/trace/ftrace.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 88 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e339dac91ee6..ce2c06f72e86 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -58,9 +58,6 @@ struct ftrace_direct_func; const char * ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym); -int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name, - char *module_name, int *exported); #else static inline const char * ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, @@ -68,6 +65,13 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, { return NULL; } +#endif + +#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported); +#else static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) @@ -76,7 +80,6 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val } #endif - #ifdef CONFIG_FUNCTION_TRACER extern int ftrace_enabled; @@ -207,6 +210,7 @@ struct ftrace_ops { struct ftrace_ops_hash old_hash; unsigned long trampoline; unsigned long trampoline_size; + struct list_head list; #endif }; diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index c6cc293c0e67..834bfdc43235 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -482,6 +482,11 @@ static int get_ksymbol_mod(struct kallsym_iter *iter) return 1; } +/* + * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace + * purposes. In that case "__builtin__ftrace" is used as a module name, even + * though "__builtin__ftrace" is not a module. + */ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) { int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end, diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index c163c3531faf..31675b209db2 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2764,6 +2764,38 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) { } +/* List of trace_ops that have allocated trampolines */ +static LIST_HEAD(ftrace_ops_trampoline_list); + +static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) +{ + lockdep_assert_held(&ftrace_lock); + list_add_rcu(&ops->list, &ftrace_ops_trampoline_list); +} + +static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) +{ + lockdep_assert_held(&ftrace_lock); + list_del_rcu(&ops->list); +} + +/* + * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols + * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is + * not a module. + */ +#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" +#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" + +static void ftrace_trampoline_free(struct ftrace_ops *ops) +{ + if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && + ops->trampoline) + ftrace_remove_trampoline_from_kallsyms(ops); + + arch_ftrace_trampoline_free(ops); +} + static void ftrace_startup_enable(int command) { if (saved_ftrace_func != ftrace_trace_function) { @@ -2934,7 +2966,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) synchronize_rcu_tasks(); free_ops: - arch_ftrace_trampoline_free(ops); + ftrace_trampoline_free(ops); } return 0; @@ -6178,6 +6210,27 @@ struct ftrace_mod_map { unsigned int num_funcs; }; +static int ftrace_get_trampoline_kallsym(unsigned int symnum, + unsigned long *value, char *type, + char *name, char *module_name, + int *exported) +{ + struct ftrace_ops *op; + + list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { + if (!op->trampoline || symnum--) + continue; + *value = op->trampoline; + *type = 't'; + strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); + strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); + *exported = 0; + return 0; + } + + return -ERANGE; +} + #ifdef CONFIG_MODULES #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) @@ -6514,6 +6567,7 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, { struct ftrace_mod_map *mod_map; struct ftrace_mod_func *mod_func; + int ret; preempt_disable(); list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { @@ -6540,8 +6594,10 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, WARN_ON(1); break; } + ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, + module_name, exported); preempt_enable(); - return -ERANGE; + return ret; } #else @@ -6553,6 +6609,18 @@ allocate_ftrace_mod_map(struct module *mod, { return NULL; } +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, char *module_name, + int *exported) +{ + int ret; + + preempt_disable(); + ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, + module_name, exported); + preempt_enable(); + return ret; +} #endif /* CONFIG_MODULES */ struct ftrace_init_func { @@ -6733,7 +6801,12 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) static void ftrace_update_trampoline(struct ftrace_ops *ops) { + unsigned long trampoline = ops->trampoline; + arch_ftrace_update_trampoline(ops); + if (ops->trampoline && ops->trampoline != trampoline && + (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) + ftrace_add_trampoline_to_kallsyms(ops); } void ftrace_init_trace_array(struct trace_array *tr) -- cgit v1.2.3 From dd9ddf466ad7a5d2e247925d81ebb0b878bf3b76 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 May 2020 15:19:14 +0300 Subject: ftrace: Add perf ksymbol events for ftrace trampolines Symbols are needed for tools to describe instruction addresses. Pages allocated for ftrace's purposes need symbols to be created for them. Add such symbols to be visible via perf ksymbol events. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200512121922.8997-8-adrian.hunter@intel.com --- include/uapi/linux/perf_event.h | 2 +- kernel/trace/ftrace.c | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index e1a4179144a1..52ca2093831c 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -1051,7 +1051,7 @@ enum perf_record_ksymbol_type { PERF_RECORD_KSYMBOL_TYPE_BPF = 1, /* * Out of line code such as kprobe-replaced instructions or optimized - * kprobes. + * kprobes or ftrace trampolines. */ PERF_RECORD_KSYMBOL_TYPE_OOL = 2, PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 31675b209db2..2baaf7716537 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2790,8 +2790,13 @@ static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) static void ftrace_trampoline_free(struct ftrace_ops *ops) { if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && - ops->trampoline) + ops->trampoline) { + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, + ops->trampoline, ops->trampoline_size, + true, FTRACE_TRAMPOLINE_SYM); + /* Remove from kallsyms after the perf events */ ftrace_remove_trampoline_from_kallsyms(ops); + } arch_ftrace_trampoline_free(ops); } @@ -6805,8 +6810,13 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops) arch_ftrace_update_trampoline(ops); if (ops->trampoline && ops->trampoline != trampoline && - (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) + (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { + /* Add to kallsyms before the perf events */ ftrace_add_trampoline_to_kallsyms(ops); + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, + ops->trampoline, ops->trampoline_size, false, + FTRACE_TRAMPOLINE_SYM); + } } void ftrace_init_trace_array(struct trace_array *tr) -- cgit v1.2.3 From 3dc167ba5729ddd2d8e3fa1841653792c295d3f1 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 19 May 2020 19:25:06 +0200 Subject: sched/cputime: Improve cputime_adjust() People report that utime and stime from /proc//stat become very wrong when the numbers are big enough, especially if you watch these counters incrementally. Specifically, the current implementation of: stime*rtime/total, results in a saw-tooth function on top of the desired line, where the teeth grow in size the larger the values become. IOW, it has a relative error. The result is that, when watching incrementally as time progresses (for large values), we'll see periods of pure stime or utime increase, irrespective of the actual ratio we're striving for. Replace scale_stime() with a math64.h helper: mul_u64_u64_div_u64() that is far more accurate. This also allows architectures to override the implementation -- for instance they can opt for the old algorithm if this new one turns out to be too expensive for them. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200519172506.GA317395@hirez.programming.kicks-ass.net --- arch/x86/include/asm/div64.h | 14 ++++++++++++-- include/linux/math64.h | 2 ++ kernel/sched/cputime.c | 46 +------------------------------------------- lib/math/div64.c | 41 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h index 9b8cb50768c2..b8f1dc0761e4 100644 --- a/arch/x86/include/asm/div64.h +++ b/arch/x86/include/asm/div64.h @@ -74,16 +74,26 @@ static inline u64 mul_u32_u32(u32 a, u32 b) #else # include -static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div) +/* + * Will generate an #DE when the result doesn't fit u64, could fix with an + * __ex_table[] entry when it becomes an issue. + */ +static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div) { u64 q; asm ("mulq %2; divq %3" : "=a" (q) - : "a" (a), "rm" ((u64)mul), "rm" ((u64)div) + : "a" (a), "rm" (mul), "rm" (div) : "rdx"); return q; } +#define mul_u64_u64_div_u64 mul_u64_u64_div_u64 + +static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div) +{ + return mul_u64_u64_div_u64(a, mul, div); +} #define mul_u64_u32_div mul_u64_u32_div #endif /* CONFIG_X86_32 */ diff --git a/include/linux/math64.h b/include/linux/math64.h index 11a267413e8e..d097119419e6 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -263,6 +263,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) } #endif /* mul_u64_u32_div */ +u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); + #define DIV64_U64_ROUND_UP(ll, d) \ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index ff9435dee1df..5a55d2300452 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -519,50 +519,6 @@ void account_idle_ticks(unsigned long ticks) account_idle_time(cputime); } -/* - * Perform (stime * rtime) / total, but avoid multiplication overflow by - * losing precision when the numbers are big. - */ -static u64 scale_stime(u64 stime, u64 rtime, u64 total) -{ - u64 scaled; - - for (;;) { - /* Make sure "rtime" is the bigger of stime/rtime */ - if (stime > rtime) - swap(rtime, stime); - - /* Make sure 'total' fits in 32 bits */ - if (total >> 32) - goto drop_precision; - - /* Does rtime (and thus stime) fit in 32 bits? */ - if (!(rtime >> 32)) - break; - - /* Can we just balance rtime/stime rather than dropping bits? */ - if (stime >> 31) - goto drop_precision; - - /* We can grow stime and shrink rtime and try to make them both fit */ - stime <<= 1; - rtime >>= 1; - continue; - -drop_precision: - /* We drop from rtime, it has more bits than stime */ - rtime >>= 1; - total >>= 1; - } - - /* - * Make sure gcc understands that this is a 32x32->64 multiply, - * followed by a 64/32->64 divide. - */ - scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); - return scaled; -} - /* * Adjust tick based cputime random precision against scheduler runtime * accounting. @@ -622,7 +578,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, goto update; } - stime = scale_stime(stime, rtime, stime + utime); + stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); update: /* diff --git a/lib/math/div64.c b/lib/math/div64.c index 368ca7fd0d82..3952a07130d8 100644 --- a/lib/math/div64.c +++ b/lib/math/div64.c @@ -190,3 +190,44 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return __iter_div_u64_rem(dividend, divisor, remainder); } EXPORT_SYMBOL(iter_div_u64_rem); + +#ifndef mul_u64_u64_div_u64 +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + /* can a * b overflow ? */ + if (ilog2(a) + ilog2(b) > 62) { + /* + * (b * a) / c is equal to + * + * (b / c) * a + + * (b % c) * a / c + * + * if nothing overflows. Can the 1st multiplication + * overflow? Yes, but we do not care: this can only + * happen if the end result can't fit in u64 anyway. + * + * So the code below does + * + * res = (b / c) * a; + * b = b % c; + */ + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + /* drop precision */ + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif -- cgit v1.2.3 From 4581bea8b4ec4de353369775dfef921191e393b3 Mon Sep 17 00:00:00 2001 From: Vincent Donnefort Date: Wed, 27 May 2020 17:39:14 +0100 Subject: sched/debug: Add new tracepoints to track util_est The util_est signals are key elements for EAS task placement and frequency selection. Having tracepoints to track these signals enables load-tracking and schedutil testing and/or debugging by a toolkit. Signed-off-by: Vincent Donnefort Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/1590597554-370150-1-git-send-email-vincent.donnefort@arm.com --- include/trace/events/sched.h | 8 ++++++++ kernel/sched/core.c | 2 ++ kernel/sched/fair.c | 6 ++++++ 3 files changed, 16 insertions(+) (limited to 'include') diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index ed168b0e2c53..04f9a4c7b0d9 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -634,6 +634,14 @@ DECLARE_TRACE(sched_overutilized_tp, TP_PROTO(struct root_domain *rd, bool overutilized), TP_ARGS(rd, overutilized)); +DECLARE_TRACE(sched_util_est_cfs_tp, + TP_PROTO(struct cfs_rq *cfs_rq), + TP_ARGS(cfs_rq)); + +DECLARE_TRACE(sched_util_est_se_tp, + TP_PROTO(struct sched_entity *se), + TP_ARGS(se)); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9c89b0eaf796..0208b71bef80 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -36,6 +36,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 69da576f7f48..a785a9b262dd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3922,6 +3922,8 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq, enqueued = cfs_rq->avg.util_est.enqueued; enqueued += _task_util_est(p); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); + + trace_sched_util_est_cfs_tp(cfs_rq); } /* @@ -3952,6 +3954,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p)); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); + trace_sched_util_est_cfs_tp(cfs_rq); + /* * Skip update of task's estimated utilization when the task has not * yet completed an activation, e.g. being migrated. @@ -4017,6 +4021,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; done: WRITE_ONCE(p->se.avg.util_est, ue); + + trace_sched_util_est_se_tp(&p->se); } static inline int task_fits_capacity(struct task_struct *p, long capacity) -- cgit v1.2.3 From 461daba06bdcb9c7a3f92b9bbd110e1f7d093ffc Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 28 May 2020 12:54:42 -0700 Subject: psi: eliminate kthread_worker from psi trigger scheduling mechanism Each psi group requires a dedicated kthread_delayed_work and kthread_worker. Since no other work can be performed using psi_group's kthread_worker, the same result can be obtained using a task_struct and a timer directly. This makes psi triggering simpler by removing lists and locks involved with kthread_worker usage and eliminates the need for poll_scheduled atomic use in the hot path. Signed-off-by: Suren Baghdasaryan Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200528195442.190116-1-surenb@google.com --- include/linux/psi_types.h | 7 +-- kernel/sched/psi.c | 113 ++++++++++++++++++++++++++-------------------- 2 files changed, 68 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 4b7258495a04..b95f3211566a 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -153,9 +153,10 @@ struct psi_group { unsigned long avg[NR_PSI_STATES - 1][3]; /* Monitor work control */ - atomic_t poll_scheduled; - struct kthread_worker __rcu *poll_kworker; - struct kthread_delayed_work poll_work; + struct task_struct __rcu *poll_task; + struct timer_list poll_timer; + wait_queue_head_t poll_wait; + atomic_t poll_wakeup; /* Protects data used by the monitor */ struct mutex trigger_lock; diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 8f45cdb6463b..e53b711bd643 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -190,7 +190,6 @@ static void group_init(struct psi_group *group) INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); mutex_init(&group->avgs_lock); /* Init trigger-related members */ - atomic_set(&group->poll_scheduled, 0); mutex_init(&group->trigger_lock); INIT_LIST_HEAD(&group->triggers); memset(group->nr_triggers, 0, sizeof(group->nr_triggers)); @@ -199,7 +198,7 @@ static void group_init(struct psi_group *group) memset(group->polling_total, 0, sizeof(group->polling_total)); group->polling_next_update = ULLONG_MAX; group->polling_until = 0; - rcu_assign_pointer(group->poll_kworker, NULL); + rcu_assign_pointer(group->poll_task, NULL); } void __init psi_init(void) @@ -547,47 +546,38 @@ static u64 update_triggers(struct psi_group *group, u64 now) return now + group->poll_min_period; } -/* - * Schedule polling if it's not already scheduled. It's safe to call even from - * hotpath because even though kthread_queue_delayed_work takes worker->lock - * spinlock that spinlock is never contended due to poll_scheduled atomic - * preventing such competition. - */ +/* Schedule polling if it's not already scheduled. */ static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) { - struct kthread_worker *kworker; + struct task_struct *task; - /* Do not reschedule if already scheduled */ - if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0) + /* + * Do not reschedule if already scheduled. + * Possible race with a timer scheduled after this check but before + * mod_timer below can be tolerated because group->polling_next_update + * will keep updates on schedule. + */ + if (timer_pending(&group->poll_timer)) return; rcu_read_lock(); - kworker = rcu_dereference(group->poll_kworker); + task = rcu_dereference(group->poll_task); /* * kworker might be NULL in case psi_trigger_destroy races with * psi_task_change (hotpath) which can't use locks */ - if (likely(kworker)) - kthread_queue_delayed_work(kworker, &group->poll_work, delay); - else - atomic_set(&group->poll_scheduled, 0); + if (likely(task)) + mod_timer(&group->poll_timer, jiffies + delay); rcu_read_unlock(); } -static void psi_poll_work(struct kthread_work *work) +static void psi_poll_work(struct psi_group *group) { - struct kthread_delayed_work *dwork; - struct psi_group *group; u32 changed_states; u64 now; - dwork = container_of(work, struct kthread_delayed_work, work); - group = container_of(dwork, struct psi_group, poll_work); - - atomic_set(&group->poll_scheduled, 0); - mutex_lock(&group->trigger_lock); now = sched_clock(); @@ -623,6 +613,35 @@ out: mutex_unlock(&group->trigger_lock); } +static int psi_poll_worker(void *data) +{ + struct psi_group *group = (struct psi_group *)data; + struct sched_param param = { + .sched_priority = 1, + }; + + sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); + + while (true) { + wait_event_interruptible(group->poll_wait, + atomic_cmpxchg(&group->poll_wakeup, 1, 0) || + kthread_should_stop()); + if (kthread_should_stop()) + break; + + psi_poll_work(group); + } + return 0; +} + +static void poll_timer_fn(struct timer_list *t) +{ + struct psi_group *group = from_timer(group, t, poll_timer); + + atomic_set(&group->poll_wakeup, 1); + wake_up_interruptible(&group->poll_wait); +} + static void record_times(struct psi_group_cpu *groupc, int cpu, bool memstall_tick) { @@ -1099,22 +1118,20 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, mutex_lock(&group->trigger_lock); - if (!rcu_access_pointer(group->poll_kworker)) { - struct sched_param param = { - .sched_priority = 1, - }; - struct kthread_worker *kworker; + if (!rcu_access_pointer(group->poll_task)) { + struct task_struct *task; - kworker = kthread_create_worker(0, "psimon"); - if (IS_ERR(kworker)) { + task = kthread_create(psi_poll_worker, group, "psimon"); + if (IS_ERR(task)) { kfree(t); mutex_unlock(&group->trigger_lock); - return ERR_CAST(kworker); + return ERR_CAST(task); } - sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); - kthread_init_delayed_work(&group->poll_work, - psi_poll_work); - rcu_assign_pointer(group->poll_kworker, kworker); + atomic_set(&group->poll_wakeup, 0); + init_waitqueue_head(&group->poll_wait); + wake_up_process(task); + timer_setup(&group->poll_timer, poll_timer_fn, 0); + rcu_assign_pointer(group->poll_task, task); } list_add(&t->node, &group->triggers); @@ -1132,7 +1149,7 @@ static void psi_trigger_destroy(struct kref *ref) { struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount); struct psi_group *group = t->group; - struct kthread_worker *kworker_to_destroy = NULL; + struct task_struct *task_to_destroy = NULL; if (static_branch_likely(&psi_disabled)) return; @@ -1158,13 +1175,13 @@ static void psi_trigger_destroy(struct kref *ref) period = min(period, div_u64(tmp->win.size, UPDATES_PER_WINDOW)); group->poll_min_period = period; - /* Destroy poll_kworker when the last trigger is destroyed */ + /* Destroy poll_task when the last trigger is destroyed */ if (group->poll_states == 0) { group->polling_until = 0; - kworker_to_destroy = rcu_dereference_protected( - group->poll_kworker, + task_to_destroy = rcu_dereference_protected( + group->poll_task, lockdep_is_held(&group->trigger_lock)); - rcu_assign_pointer(group->poll_kworker, NULL); + rcu_assign_pointer(group->poll_task, NULL); } } @@ -1172,25 +1189,23 @@ static void psi_trigger_destroy(struct kref *ref) /* * Wait for both *trigger_ptr from psi_trigger_replace and - * poll_kworker RCUs to complete their read-side critical sections - * before destroying the trigger and optionally the poll_kworker + * poll_task RCUs to complete their read-side critical sections + * before destroying the trigger and optionally the poll_task */ synchronize_rcu(); /* * Destroy the kworker after releasing trigger_lock to prevent a * deadlock while waiting for psi_poll_work to acquire trigger_lock */ - if (kworker_to_destroy) { + if (task_to_destroy) { /* * After the RCU grace period has expired, the worker - * can no longer be found through group->poll_kworker. + * can no longer be found through group->poll_task. * But it might have been already scheduled before * that - deschedule it cleanly before destroying it. */ - kthread_cancel_delayed_work_sync(&group->poll_work); - atomic_set(&group->poll_scheduled, 0); - - kthread_destroy_worker(kworker_to_destroy); + del_timer_sync(&group->poll_timer); + kthread_stop(task_to_destroy); } kfree(t); } -- cgit v1.2.3 From 9cc5b8656892a72438ee7deb5e80f5be47643b8b Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 27 May 2020 16:29:09 +0200 Subject: isolcpus: Affine unbound kernel threads to housekeeping cpus This is a kernel enhancement that configures the cpu affinity of kernel threads via kernel boot option nohz_full=. When this option is specified, the cpumask is immediately applied upon kthread launch. This does not affect kernel threads that specify cpu and node. This allows CPU isolation (that is not allowing certain threads to execute on certain CPUs) without using the isolcpus=domain parameter, making it possible to enable load balancing on such CPUs during runtime (see kernel-parameters.txt). Note-1: this is based off on Wind River's patch at https://github.com/starlingx-staging/stx-integ/blob/master/kernel/kernel-std/centos/patches/affine-compute-kernel-threads.patch Difference being that this patch is limited to modifying kernel thread cpumask. Behaviour of other threads can be controlled via cgroups or sched_setaffinity. Note-2: Wind River's patch was based off Christoph Lameter's patch at https://lwn.net/Articles/565932/ with the only difference being the kernel parameter changed from kthread to kthread_cpus. Signed-off-by: Frederic Weisbecker Signed-off-by: Marcelo Tosatti Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200527142909.23372-3-frederic@kernel.org --- include/linux/sched/isolation.h | 1 + kernel/kthread.c | 6 ++++-- kernel/sched/isolation.c | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index 0fbcbacd1b29..cc9f393e2a70 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -14,6 +14,7 @@ enum hk_flags { HK_FLAG_DOMAIN = (1 << 5), HK_FLAG_WQ = (1 << 6), HK_FLAG_MANAGED_IRQ = (1 << 7), + HK_FLAG_KTHREAD = (1 << 8), }; #ifdef CONFIG_CPU_ISOLATION diff --git a/kernel/kthread.c b/kernel/kthread.c index b86d37cda109..032b610912b0 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -383,7 +384,8 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), * The kernel thread should not inherit these properties. */ sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); - set_cpus_allowed_ptr(task, cpu_possible_mask); + set_cpus_allowed_ptr(task, + housekeeping_cpumask(HK_FLAG_KTHREAD)); } kfree(create); return task; @@ -608,7 +610,7 @@ int kthreadd(void *unused) /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); - set_cpus_allowed_ptr(tsk, cpu_possible_mask); + set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD)); set_mems_allowed(node_states[N_MEMORY]); current->flags |= PF_NOFREEZE; diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 808244f3ddd9..5a6ea03f9882 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -140,7 +140,8 @@ static int __init housekeeping_nohz_full_setup(char *str) { unsigned int flags; - flags = HK_FLAG_TICK | HK_FLAG_WQ | HK_FLAG_TIMER | HK_FLAG_RCU | HK_FLAG_MISC; + flags = HK_FLAG_TICK | HK_FLAG_WQ | HK_FLAG_TIMER | HK_FLAG_RCU | + HK_FLAG_MISC | HK_FLAG_KTHREAD; return housekeeping_setup(str, flags); } -- cgit v1.2.3 From b4098bfc5efb1fd7ecf40165132a1283aeea3500 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 26 Jul 2019 16:54:10 +0200 Subject: sched/deadline: Impose global limits on sched_attr::sched_period Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20190726161357.397880775@infradead.org --- include/linux/sched/sysctl.h | 3 +++ kernel/sched/deadline.c | 23 +++++++++++++++++++++-- kernel/sysctl.c | 14 ++++++++++++++ 3 files changed, 38 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 660ac49f2b53..24be30a40814 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -61,6 +61,9 @@ int sched_proc_update_handler(struct ctl_table *table, int write, extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; +extern unsigned int sysctl_sched_dl_period_max; +extern unsigned int sysctl_sched_dl_period_min; + #ifdef CONFIG_UCLAMP_TASK extern unsigned int sysctl_sched_uclamp_util_min; extern unsigned int sysctl_sched_uclamp_util_max; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 504d2f51b0d6..f31964ad9c2e 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2634,6 +2634,14 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr) attr->sched_flags = dl_se->flags; } +/* + * Default limits for DL period; on the top end we guard against small util + * tasks still getting rediculous long effective runtimes, on the bottom end we + * guard against timer DoS. + */ +unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */ +unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */ + /* * This function validates the new parameters of a -deadline task. * We ask for the deadline not being zero, and greater or equal @@ -2646,6 +2654,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr) */ bool __checkparam_dl(const struct sched_attr *attr) { + u64 period, max, min; + /* special dl tasks don't actually use any parameter */ if (attr->sched_flags & SCHED_FLAG_SUGOV) return true; @@ -2669,12 +2679,21 @@ bool __checkparam_dl(const struct sched_attr *attr) attr->sched_period & (1ULL << 63)) return false; + period = attr->sched_period; + if (!period) + period = attr->sched_deadline; + /* runtime <= deadline <= period (if period != 0) */ - if ((attr->sched_period != 0 && - attr->sched_period < attr->sched_deadline) || + if (period < attr->sched_deadline || attr->sched_deadline < attr->sched_runtime) return false; + max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC; + min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC; + + if (period < min || period > max) + return false; + return true; } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index db1ce7af2563..4aea67d3d552 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1779,6 +1779,20 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = sched_rt_handler, }, + { + .procname = "sched_deadline_period_max_us", + .data = &sysctl_sched_dl_period_max, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sched_deadline_period_min_us", + .data = &sysctl_sched_dl_period_min, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "sched_rr_timeslice_ms", .data = &sysctl_sched_rr_timeslice, -- cgit v1.2.3 From 7318d4cc14c8c8a5dde2b0b72ea50fd2545f0b7a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 21 Apr 2020 12:09:13 +0200 Subject: sched: Provide sched_set_fifo() SCHED_FIFO (or any static priority scheduler) is a broken scheduler model; it is fundamentally incapable of resource management, the one thing an OS is actually supposed to do. It is impossible to compose static priority workloads. One cannot take two well designed and functional static priority workloads and mash them together and still expect them to work. Therefore it doesn't make sense to expose the priority field; the kernel is fundamentally incapable of setting a sensible value, it needs systems knowledge that it doesn't have. Take away sched_setschedule() / sched_setattr() from modules and replace them with: - sched_set_fifo(p); create a FIFO task (at prio 50) - sched_set_fifo_low(p); create a task higher than NORMAL, which ends up being a FIFO task at prio 1. - sched_set_normal(p, nice); (re)set the task to normal This stops the proliferation of randomly chosen, and irrelevant, FIFO priorities that dont't really mean anything anyway. The system administrator/integrator, whoever has insight into the actual system design and requirements (userspace) can set-up appropriate priorities if and when needed. Cc: airlied@redhat.com Cc: alexander.deucher@amd.com Cc: awalls@md.metrocast.net Cc: axboe@kernel.dk Cc: broonie@kernel.org Cc: daniel.lezcano@linaro.org Cc: gregkh@linuxfoundation.org Cc: hannes@cmpxchg.org Cc: herbert@gondor.apana.org.au Cc: hverkuil@xs4all.nl Cc: john.stultz@linaro.org Cc: nico@fluxnic.net Cc: paulmck@kernel.org Cc: rafael.j.wysocki@intel.com Cc: rmk+kernel@arm.linux.org.uk Cc: sudeep.holla@arm.com Cc: tglx@linutronix.de Cc: ulf.hansson@linaro.org Cc: wim@linux-watchdog.org Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ingo Molnar Tested-by: Paul E. McKenney --- include/linux/sched.h | 3 +++ kernel/sched/core.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index b62e6aaf28f0..b792b8f0f4cf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1653,6 +1653,9 @@ extern int idle_cpu(int cpu); extern int available_idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern int sched_set_fifo(struct task_struct *p); +extern int sched_set_fifo_low(struct task_struct *p); +extern int sched_set_normal(struct task_struct *p, int nice); extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0208b71bef80..40d3939b0520 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5124,6 +5124,8 @@ static int _sched_setscheduler(struct task_struct *p, int policy, * @policy: new policy. * @param: structure containing the new RT priority. * + * Use sched_set_fifo(), read its comment. + * * Return: 0 on success. An error code otherwise. * * NOTE that the task may be already dead. @@ -5166,6 +5168,51 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy, } EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); +/* + * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally + * incapable of resource management, which is the one thing an OS really should + * be doing. + * + * This is of course the reason it is limited to privileged users only. + * + * Worse still; it is fundamentally impossible to compose static priority + * workloads. You cannot take two correctly working static prio workloads + * and smash them together and still expect them to work. + * + * For this reason 'all' FIFO tasks the kernel creates are basically at: + * + * MAX_RT_PRIO / 2 + * + * The administrator _MUST_ configure the system, the kernel simply doesn't + * know enough information to make a sensible choice. + */ +int sched_set_fifo(struct task_struct *p) +{ + struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; + return sched_setscheduler_nocheck(p, SCHED_FIFO, &sp); +} +EXPORT_SYMBOL_GPL(sched_set_fifo); + +/* + * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. + */ +int sched_set_fifo_low(struct task_struct *p) +{ + struct sched_param sp = { .sched_priority = 1 }; + return sched_setscheduler_nocheck(p, SCHED_FIFO, &sp); +} +EXPORT_SYMBOL_GPL(sched_set_fifo_low); + +int sched_set_normal(struct task_struct *p, int nice) +{ + struct sched_attr attr = { + .sched_policy = SCHED_NORMAL, + .sched_nice = nice, + }; + return sched_setattr_nocheck(p, &attr); +} +EXPORT_SYMBOL_GPL(sched_set_normal); + static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { -- cgit v1.2.3 From 8b700983de82f79e05b2c1136d6513ea4c9b22c4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 22 Apr 2020 13:10:04 +0200 Subject: sched: Remove sched_set_*() return value Ingo suggested that since the new sched_set_*() functions are implemented using the 'nocheck' variants, they really shouldn't ever fail, so remove the return value. Cc: axboe@kernel.dk Cc: daniel.lezcano@linaro.org Cc: sudeep.holla@arm.com Cc: airlied@redhat.com Cc: broonie@kernel.org Cc: paulmck@kernel.org Suggested-by: Ingo Molnar Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ingo Molnar --- drivers/block/drbd/drbd_receiver.c | 4 +--- drivers/firmware/psci/psci_checker.c | 3 +-- drivers/gpu/drm/msm/msm_drv.c | 5 +---- drivers/platform/chrome/cros_ec_spi.c | 7 +++---- include/linux/sched.h | 6 +++--- kernel/rcu/rcutorture.c | 5 +---- kernel/sched/core.c | 12 ++++++------ 7 files changed, 16 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 140fd98274b1..280615efef74 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -6020,9 +6020,7 @@ int drbd_ack_receiver(struct drbd_thread *thi) int expect = header_size; bool ping_timeout_active = false; - rv = sched_set_fifo_low(current); - if (rv < 0) - drbd_err(connection, "drbd_ack_receiver: ERROR set priority, ret=%d\n", rv); + sched_set_fifo_low(current); while (get_t_state(thi) == RUNNING) { drbd_thread_current_set_cpu(thi); diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index a5279a430274..6fff482847e7 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -281,8 +281,7 @@ static int suspend_test_thread(void *arg) wait_for_completion(&suspend_threads_started); /* Set maximum priority to preempt all other threads on this CPU. */ - if (sched_set_fifo(current)) - pr_warn("Failed to set suspend thread scheduler on CPU %d\n", cpu); + sched_set_fifo(current); dev = this_cpu_read(cpuidle_devices); drv = cpuidle_get_cpu_driver(dev); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 89a8b9c7e044..556cca38487c 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -509,10 +509,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) goto err_msm_uninit; } - ret = sched_set_fifo(priv->event_thread[i].thread); - if (ret) - dev_warn(dev, "event_thread set priority failed:%d\n", - ret); + sched_set_fifo(priv->event_thread[i].thread); } ret = drm_vblank_init(ddev, priv->num_crtcs); diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c index c20a43a97040..d09260382550 100644 --- a/drivers/platform/chrome/cros_ec_spi.c +++ b/drivers/platform/chrome/cros_ec_spi.c @@ -725,10 +725,9 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev, if (err) return err; - err = sched_set_fifo(ec_spi->high_pri_worker->task); - if (err) - dev_err(dev, "Can't set cros_ec high pri priority: %d\n", err); - return err; + sched_set_fifo(ec_spi->high_pri_worker->task); + + return 0; } static int cros_ec_spi_probe(struct spi_device *spi) diff --git a/include/linux/sched.h b/include/linux/sched.h index b792b8f0f4cf..ae7664492af2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1653,9 +1653,9 @@ extern int idle_cpu(int cpu); extern int available_idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); -extern int sched_set_fifo(struct task_struct *p); -extern int sched_set_fifo_low(struct task_struct *p); -extern int sched_set_normal(struct task_struct *p, int nice); +extern void sched_set_fifo(struct task_struct *p); +extern void sched_set_fifo_low(struct task_struct *p); +extern void sched_set_normal(struct task_struct *p, int nice); extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index bbc3c8a9e7b9..b4c1146de414 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -893,10 +893,7 @@ static int rcu_torture_boost(void *arg) VERBOSE_TOROUT_STRING("rcu_torture_boost started"); /* Set real-time priority. */ - if (sched_set_fifo_low(current) < 0) { - VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); - n_rcu_torture_boost_rterror++; - } + sched_set_fifo_low(current); init_rcu_head_on_stack(&rbi.rcu); /* Each pass through the following loop does one boost-test cycle. */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f882d3d74dad..41d3778ea80e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5183,30 +5183,30 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy, * The administrator _MUST_ configure the system, the kernel simply doesn't * know enough information to make a sensible choice. */ -int sched_set_fifo(struct task_struct *p) +void sched_set_fifo(struct task_struct *p) { struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; - return sched_setscheduler_nocheck(p, SCHED_FIFO, &sp); + WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); } EXPORT_SYMBOL_GPL(sched_set_fifo); /* * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. */ -int sched_set_fifo_low(struct task_struct *p) +void sched_set_fifo_low(struct task_struct *p) { struct sched_param sp = { .sched_priority = 1 }; - return sched_setscheduler_nocheck(p, SCHED_FIFO, &sp); + WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); } EXPORT_SYMBOL_GPL(sched_set_fifo_low); -int sched_set_normal(struct task_struct *p, int nice) +void sched_set_normal(struct task_struct *p, int nice) { struct sched_attr attr = { .sched_policy = SCHED_NORMAL, .sched_nice = nice, }; - return sched_setattr_nocheck(p, &attr); + WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); } EXPORT_SYMBOL_GPL(sched_set_normal); -- cgit v1.2.3 From 2a1f3368bff609504cdc984cdb7cef467bb0b2b0 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 15 Jun 2020 18:00:44 +0200 Subject: ALSA: memalloc: Make SG-buffer helper usable for continuous buffer, too We have a few helper functions for making the access to the buffer address easier on SG-buffer. Those are specific to the buffer that is allocated with SG-buffer type, and it makes hard to use both SG and non-SG buffers in the same code. This patch adds a few simple checks and lets the helpers to deal with both SG- and continuous buffers gracefully. It's a preliminary step for the upcoming patch that mimics the buffer type on the fly. Link: https://lore.kernel.org/r/20200615160045.2703-4-tiwai@suse.de Signed-off-by: Takashi Iwai --- include/sound/memalloc.h | 9 ++++++++- sound/core/sgbuf.c | 3 +++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h index 3b47832b1c1f..5daa937684a4 100644 --- a/include/sound/memalloc.h +++ b/include/sound/memalloc.h @@ -94,7 +94,11 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) { struct snd_sg_buf *sgbuf = dmab->private_data; - dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; + dma_addr_t addr; + + if (!sgbuf) + return dmab->addr + offset; + addr = sgbuf->table[offset >> PAGE_SHIFT].addr; addr &= ~((dma_addr_t)PAGE_SIZE - 1); return addr + offset % PAGE_SIZE; } @@ -106,6 +110,9 @@ static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab, size_t offset) { struct snd_sg_buf *sgbuf = dmab->private_data; + + if (!sgbuf) + return dmab->area + offset; return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE; } diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c index c42217e2dd19..29ddb76187e5 100644 --- a/sound/core/sgbuf.c +++ b/sound/core/sgbuf.c @@ -142,6 +142,9 @@ unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, struct snd_sg_buf *sg = dmab->private_data; unsigned int start, end, pg; + if (!sg) + return size; + start = ofs >> PAGE_SHIFT; end = (ofs + size - 1) >> PAGE_SHIFT; /* check page continuity */ -- cgit v1.2.3 From c7d75b5938e38a48e5fdac44f88fc5882f1f7bed Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:06:22 +0900 Subject: ASoC: soc-component: move snd_soc_component_xxx_regmap() to soc-component soc-component is handling snd_soc_component_xxx(). Move snd_soc_component_xxx_regmap() to it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87sgfbw8zl.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 1 + sound/soc/soc-component.c | 50 +++++++++++++++++++++++++++++++++++++++++++ sound/soc/soc-core.c | 50 ------------------------------------------- 3 files changed, 51 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 5663891148e3..481132141dc2 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -359,6 +359,7 @@ int snd_soc_component_stream_event(struct snd_soc_component *component, int snd_soc_component_set_bias_level(struct snd_soc_component *component, enum snd_soc_bias_level level); +void snd_soc_component_setup_regmap(struct snd_soc_component *component); #ifdef CONFIG_REGMAP void snd_soc_component_init_regmap(struct snd_soc_component *component, struct regmap *regmap); diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 76f4b953563c..3c96a1adaa8b 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -302,6 +302,56 @@ int snd_soc_component_of_xlate_dai_name(struct snd_soc_component *component, return -ENOTSUPP; } +void snd_soc_component_setup_regmap(struct snd_soc_component *component) +{ + int val_bytes = regmap_get_val_bytes(component->regmap); + + /* Errors are legitimate for non-integer byte multiples */ + if (val_bytes > 0) + component->val_bytes = val_bytes; +} + +#ifdef CONFIG_REGMAP + +/** + * snd_soc_component_init_regmap() - Initialize regmap instance for the + * component + * @component: The component for which to initialize the regmap instance + * @regmap: The regmap instance that should be used by the component + * + * This function allows deferred assignment of the regmap instance that is + * associated with the component. Only use this if the regmap instance is not + * yet ready when the component is registered. The function must also be called + * before the first IO attempt of the component. + */ +void snd_soc_component_init_regmap(struct snd_soc_component *component, + struct regmap *regmap) +{ + component->regmap = regmap; + snd_soc_component_setup_regmap(component); +} +EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap); + +/** + * snd_soc_component_exit_regmap() - De-initialize regmap instance for the + * component + * @component: The component for which to de-initialize the regmap instance + * + * Calls regmap_exit() on the regmap instance associated to the component and + * removes the regmap instance from the component. + * + * This function should only be used if snd_soc_component_init_regmap() was used + * to initialize the regmap instance. + */ +void snd_soc_component_exit_regmap(struct snd_soc_component *component) +{ + regmap_exit(component->regmap); + component->regmap = NULL; +} +EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap); + +#endif + int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 0f30f5aabaa8..13a59736b2fc 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2398,56 +2398,6 @@ static int snd_soc_component_initialize(struct snd_soc_component *component, return 0; } -static void snd_soc_component_setup_regmap(struct snd_soc_component *component) -{ - int val_bytes = regmap_get_val_bytes(component->regmap); - - /* Errors are legitimate for non-integer byte multiples */ - if (val_bytes > 0) - component->val_bytes = val_bytes; -} - -#ifdef CONFIG_REGMAP - -/** - * snd_soc_component_init_regmap() - Initialize regmap instance for the - * component - * @component: The component for which to initialize the regmap instance - * @regmap: The regmap instance that should be used by the component - * - * This function allows deferred assignment of the regmap instance that is - * associated with the component. Only use this if the regmap instance is not - * yet ready when the component is registered. The function must also be called - * before the first IO attempt of the component. - */ -void snd_soc_component_init_regmap(struct snd_soc_component *component, - struct regmap *regmap) -{ - component->regmap = regmap; - snd_soc_component_setup_regmap(component); -} -EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap); - -/** - * snd_soc_component_exit_regmap() - De-initialize regmap instance for the - * component - * @component: The component for which to de-initialize the regmap instance - * - * Calls regmap_exit() on the regmap instance associated to the component and - * removes the regmap instance from the component. - * - * This function should only be used if snd_soc_component_init_regmap() was used - * to initialize the regmap instance. - */ -void snd_soc_component_exit_regmap(struct snd_soc_component *component) -{ - regmap_exit(component->regmap); - component->regmap = NULL; -} -EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap); - -#endif - #define ENDIANNESS_MAP(name) \ (SNDRV_PCM_FMTBIT_##name##LE | SNDRV_PCM_FMTBIT_##name##BE) static u64 endianness_format_map[] = { -- cgit v1.2.3 From 536aba1dd4939bf647f5d182d4f101ae548e6505 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:06:32 +0900 Subject: ASoC: soc-component: move snd_soc_component_initialize() to soc-component.c snd_soc_component_xxx() should be implemented at soc-component.c Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87r1uvw8zb.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 4 ++++ sound/soc/soc-component.c | 16 ++++++++++++++++ sound/soc/soc-core.c | 29 ++++++++--------------------- 3 files changed, 28 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 481132141dc2..cb0d34fa77c6 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -324,6 +324,10 @@ static inline int snd_soc_component_cache_sync( return regcache_sync(component->regmap); } +int snd_soc_component_initialize(struct snd_soc_component *component, + const struct snd_soc_component_driver *driver, + struct device *dev, const char *name); + /* component IO */ int snd_soc_component_read(struct snd_soc_component *component, unsigned int reg, unsigned int *val); diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 3c96a1adaa8b..5bf2e71d3d83 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -8,6 +8,22 @@ #include #include +int snd_soc_component_initialize(struct snd_soc_component *component, + const struct snd_soc_component_driver *driver, + struct device *dev, const char *name) +{ + INIT_LIST_HEAD(&component->dai_list); + INIT_LIST_HEAD(&component->dobj_list); + INIT_LIST_HEAD(&component->card_list); + mutex_init(&component->io_mutex); + + component->name = name; + component->dev = dev; + component->driver = driver; + + return 0; +} + /** * snd_soc_component_set_sysclk - configure COMPONENT system or master clock. * @component: COMPONENT diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 13a59736b2fc..e596e5a765da 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2378,26 +2378,6 @@ err: return ret; } -static int snd_soc_component_initialize(struct snd_soc_component *component, - const struct snd_soc_component_driver *driver, struct device *dev) -{ - INIT_LIST_HEAD(&component->dai_list); - INIT_LIST_HEAD(&component->dobj_list); - INIT_LIST_HEAD(&component->card_list); - mutex_init(&component->io_mutex); - - component->name = fmt_single_name(dev, &component->id); - if (!component->name) { - dev_err(dev, "ASoC: Failed to allocate name\n"); - return -ENOMEM; - } - - component->dev = dev; - component->driver = driver; - - return 0; -} - #define ENDIANNESS_MAP(name) \ (SNDRV_PCM_FMTBIT_##name##LE | SNDRV_PCM_FMTBIT_##name##BE) static u64 endianness_format_map[] = { @@ -2460,12 +2440,19 @@ int snd_soc_add_component(struct device *dev, struct snd_soc_dai_driver *dai_drv, int num_dai) { + const char *name = fmt_single_name(dev, &component->id); int ret; int i; + if (!name) { + dev_err(dev, "ASoC: Failed to allocate name\n"); + return -ENOMEM; + } + mutex_lock(&client_mutex); - ret = snd_soc_component_initialize(component, component_driver, dev); + ret = snd_soc_component_initialize(component, component_driver, + dev, name); if (ret) goto err_free; -- cgit v1.2.3 From 4f39514f36980a4b20a754a5d51486a5999c8380 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:06:58 +0900 Subject: ASoC: soc-component: add snd_soc_pcm_component_prepare() We have 2 type of component functions snd_soc_component_xxx() is focusing to component itself, snd_soc_pcm_component_xxx() is focusing to rtd related component. Now we can update snd_soc_component_prepare() to snd_soc_pcm_component_prepare(). This patch do it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87o8pzw8yl.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 3 +-- sound/soc/soc-component.c | 28 +++++++++++++++++----------- sound/soc/soc-pcm.c | 12 +++--------- 3 files changed, 21 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index cb0d34fa77c6..fc287e910240 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -426,8 +426,6 @@ int snd_soc_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_close(struct snd_soc_component *component, struct snd_pcm_substream *substream); -int snd_soc_component_prepare(struct snd_soc_component *component, - struct snd_pcm_substream *substream); int snd_soc_component_hw_params(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); @@ -460,5 +458,6 @@ int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma); int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd); void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd); +int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream); #endif /* __SOC_COMPONENT_H */ diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 6d29c2de3b24..1bc155bc8e5e 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -275,17 +275,6 @@ int snd_soc_component_close(struct snd_soc_component *component, return soc_component_ret(component, ret); } -int snd_soc_component_prepare(struct snd_soc_component *component, - struct snd_pcm_substream *substream) -{ - int ret = 0; - - if (component->driver->prepare) - ret = component->driver->prepare(component, substream); - - return soc_component_ret(component, ret); -} - int snd_soc_component_hw_params(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) @@ -569,3 +558,20 @@ void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd) if (component->driver->pcm_destruct) component->driver->pcm_destruct(component, rtd->pcm); } + +int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_component *component; + int i, ret; + + for_each_rtd_components(rtd, i, component) { + if (component->driver->prepare) { + ret = component->driver->prepare(component, substream); + if (ret < 0) + return soc_component_ret(component, ret); + } + } + + return 0; +} diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index c517064f5391..8ba0f14a2f2f 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -850,7 +850,6 @@ static void codec2codec_close_delayed_work(struct snd_soc_pcm_runtime *rtd) static int soc_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_component *component; struct snd_soc_dai *dai; int i, ret = 0; @@ -860,14 +859,9 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream) if (ret < 0) goto out; - for_each_rtd_components(rtd, i, component) { - ret = snd_soc_component_prepare(component, substream); - if (ret < 0) { - dev_err(component->dev, - "ASoC: platform prepare error: %d\n", ret); - goto out; - } - } + ret = snd_soc_pcm_component_prepare(substream); + if (ret < 0) + goto out; ret = snd_soc_pcm_dai_prepare(substream); if (ret < 0) { -- cgit v1.2.3 From e1bafa828e3a0622ac24d238e00937f3059ed585 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:07:11 +0900 Subject: ASoC: soc-component: add snd_soc_pcm_component_hw_params() We have 2 type of component functions snd_soc_component_xxx() is focusing to component itself, snd_soc_pcm_component_xxx() is focusing to rtd related component. Now we can update snd_soc_component_hw_params() to snd_soc_pcm_component_hw_params(). This patch do it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87mu5jw8y8.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 6 +++--- sound/soc/soc-component.c | 36 +++++++++++++++++++++++------------- sound/soc/soc-pcm.c | 13 +++---------- 3 files changed, 29 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index fc287e910240..a2898bdd0a3c 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -426,9 +426,6 @@ int snd_soc_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_close(struct snd_soc_component *component, struct snd_pcm_substream *substream); -int snd_soc_component_hw_params(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params); int snd_soc_component_hw_free(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_trigger(struct snd_soc_component *component, @@ -459,5 +456,8 @@ int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream, int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd); void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd); int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream); +int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_component **last); #endif /* __SOC_COMPONENT_H */ diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 1bc155bc8e5e..56341968fe6d 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -275,19 +275,6 @@ int snd_soc_component_close(struct snd_soc_component *component, return soc_component_ret(component, ret); } -int snd_soc_component_hw_params(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params) -{ - int ret = 0; - - if (component->driver->hw_params) - ret = component->driver->hw_params(component, - substream, params); - - return soc_component_ret(component, ret); -} - int snd_soc_component_hw_free(struct snd_soc_component *component, struct snd_pcm_substream *substream) { @@ -575,3 +562,26 @@ int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream) return 0; } + +int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_component **last) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_component *component; + int i, ret; + + for_each_rtd_components(rtd, i, component) { + if (component->driver->hw_params) { + ret = component->driver->hw_params(component, + substream, params); + if (ret < 0) { + *last = component; + return soc_component_ret(component, ret); + } + } + } + + *last = NULL; + return 0; +} diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 8ba0f14a2f2f..e5eef48af167 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1009,16 +1009,9 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream, snd_soc_dapm_update_dai(substream, params, cpu_dai); } - for_each_rtd_components(rtd, i, component) { - ret = snd_soc_component_hw_params(component, substream, params); - if (ret < 0) { - dev_err(component->dev, - "ASoC: %s hw params failed: %d\n", - component->name, ret); - goto component_err; - } - } - component = NULL; + ret = snd_soc_pcm_component_hw_params(substream, params, &component); + if (ret < 0) + goto component_err; out: mutex_unlock(&rtd->card->pcm_mutex); -- cgit v1.2.3 From 047511198639649bdaacb1a34d9691429ccc5698 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:07:24 +0900 Subject: ASoC: soc-component: add snd_soc_pcm_component_hw_free() We have 2 type of component functions snd_soc_component_xxx() is focusing to component itself, snd_soc_pcm_component_xxx() is focusing to rtd related component. Now we can update snd_soc_component_hw_free() to snd_soc_pcm_component_hw_free(). This patch do it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87lfl3w8xv.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 4 ++-- sound/soc/soc-component.c | 30 +++++++++++++++++++----------- sound/soc/soc-pcm.c | 23 ++--------------------- 3 files changed, 23 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index a2898bdd0a3c..d2f62d529559 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -426,8 +426,6 @@ int snd_soc_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_close(struct snd_soc_component *component, struct snd_pcm_substream *substream); -int snd_soc_component_hw_free(struct snd_soc_component *component, - struct snd_pcm_substream *substream); int snd_soc_component_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd); @@ -459,5 +457,7 @@ int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream); int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_component **last); +void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, + struct snd_soc_component *last); #endif /* __SOC_COMPONENT_H */ diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 56341968fe6d..380f6459b5cb 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -275,17 +275,6 @@ int snd_soc_component_close(struct snd_soc_component *component, return soc_component_ret(component, ret); } -int snd_soc_component_hw_free(struct snd_soc_component *component, - struct snd_pcm_substream *substream) -{ - int ret = 0; - - if (component->driver->hw_free) - ret = component->driver->hw_free(component, substream); - - return soc_component_ret(component, ret); -} - int snd_soc_component_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd) @@ -585,3 +574,22 @@ int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, *last = NULL; return 0; } + +void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, + struct snd_soc_component *last) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_component *component; + int i, ret; + + for_each_rtd_components(rtd, i, component) { + if (component == last) + break; + + if (component->driver->hw_free) { + ret = component->driver->hw_free(component, substream); + if (ret < 0) + soc_component_ret(component, ret); + } + } +} diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index e5eef48af167..cbce15c5721e 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -898,25 +898,6 @@ static void soc_pcm_codec_params_fixup(struct snd_pcm_hw_params *params, interval->max = channels; } -static int soc_pcm_components_hw_free(struct snd_pcm_substream *substream, - struct snd_soc_component *last) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_component *component; - int i, r, ret = 0; - - for_each_rtd_components(rtd, i, component) { - if (component == last) - break; - - r = snd_soc_component_hw_free(component, substream); - if (r < 0) - ret = r; /* use last ret */ - } - - return ret; -} - /* * Called by ALSA when the hardware params are set by application. This * function can also be called multiple times and can allocate buffers @@ -1018,7 +999,7 @@ out: return ret; component_err: - soc_pcm_components_hw_free(substream, component); + snd_soc_pcm_component_hw_free(substream, component); i = rtd->num_cpus; @@ -1077,7 +1058,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) snd_soc_link_hw_free(substream); /* free any component resources */ - soc_pcm_components_hw_free(substream, NULL); + snd_soc_pcm_component_hw_free(substream, NULL); /* now free hw params for the DAIs */ for_each_rtd_dais(rtd, i, dai) { -- cgit v1.2.3 From 32fd120475c1b8a83d28bfedc2b95ec981fbb809 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:07:40 +0900 Subject: ASoC: soc-component: add snd_soc_pcm_component_trigger() We have 2 type of component functions snd_soc_component_xxx() is focusing to component itself, snd_soc_pcm_component_xxx() is focusing to rtd related component. Now we can update snd_soc_component_trigger() to snd_soc_pcm_component_trigger(). This patch do it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87k10nw8xf.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 5 ++--- sound/soc/soc-component.c | 30 ++++++++++++++++++------------ sound/soc/soc-pcm.c | 24 ++++++++---------------- 3 files changed, 28 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index d2f62d529559..bb26d55a9289 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -426,9 +426,6 @@ int snd_soc_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream); int snd_soc_component_close(struct snd_soc_component *component, struct snd_pcm_substream *substream); -int snd_soc_component_trigger(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - int cmd); void snd_soc_component_suspend(struct snd_soc_component *component); void snd_soc_component_resume(struct snd_soc_component *component); int snd_soc_component_is_suspended(struct snd_soc_component *component); @@ -459,5 +456,7 @@ int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, struct snd_soc_component **last); void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, struct snd_soc_component *last); +int snd_soc_pcm_component_trigger(struct snd_pcm_substream *substream, + int cmd); #endif /* __SOC_COMPONENT_H */ diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 380f6459b5cb..150b02be0219 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -275,18 +275,6 @@ int snd_soc_component_close(struct snd_soc_component *component, return soc_component_ret(component, ret); } -int snd_soc_component_trigger(struct snd_soc_component *component, - struct snd_pcm_substream *substream, - int cmd) -{ - int ret = 0; - - if (component->driver->trigger) - ret = component->driver->trigger(component, substream, cmd); - - return soc_component_ret(component, ret); -} - void snd_soc_component_suspend(struct snd_soc_component *component) { if (component->driver->suspend) @@ -593,3 +581,21 @@ void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, } } } + +int snd_soc_pcm_component_trigger(struct snd_pcm_substream *substream, + int cmd) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_component *component; + int i, ret; + + for_each_rtd_components(rtd, i, component) { + if (component->driver->trigger) { + ret = component->driver->trigger(component, substream, cmd); + if (ret < 0) + return soc_component_ret(component, ret); + } + } + + return 0; +} diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index cbce15c5721e..be5c83f1ab0c 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1074,38 +1074,30 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) static int soc_pcm_trigger_start(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_component *component; - int i, ret; + int ret; ret = snd_soc_link_trigger(substream, cmd); if (ret < 0) return ret; - for_each_rtd_components(rtd, i, component) { - ret = snd_soc_component_trigger(component, substream, cmd); - if (ret < 0) - return ret; - } + ret = snd_soc_pcm_component_trigger(substream, cmd); + if (ret < 0) + return ret; return snd_soc_pcm_dai_trigger(substream, cmd); } static int soc_pcm_trigger_stop(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_component *component; - int i, ret; + int ret; ret = snd_soc_pcm_dai_trigger(substream, cmd); if (ret < 0) return ret; - for_each_rtd_components(rtd, i, component) { - ret = snd_soc_component_trigger(component, substream, cmd); - if (ret < 0) - return ret; - } + ret = snd_soc_pcm_component_trigger(substream, cmd); + if (ret < 0) + return ret; ret = snd_soc_link_trigger(substream, cmd); if (ret < 0) -- cgit v1.2.3 From 257c4dac8b7877c865e734533b5f62769c64afb6 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:07:54 +0900 Subject: ASoC: soc-component: add snd_soc_component_init() we wantn't to directly access to component related parameter as much as possible to keep encapsulation. This patch adds snd_soc_component_init() for it. Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87img7w8x2.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 3 +++ sound/soc/soc-component.c | 16 ++++++++++++++++ sound/soc/soc-core.c | 23 ++++++++++++----------- 3 files changed, 31 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index bb26d55a9289..aea0eb0c3fcc 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -327,6 +327,9 @@ static inline int snd_soc_component_cache_sync( int snd_soc_component_initialize(struct snd_soc_component *component, const struct snd_soc_component_driver *driver, struct device *dev, const char *name); +void snd_soc_component_set_aux(struct snd_soc_component *component, + struct snd_soc_aux_dev *aux); +int snd_soc_component_init(struct snd_soc_component *component); /* component IO */ int snd_soc_component_read(struct snd_soc_component *component, diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 150b02be0219..7624ff5b67d3 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -46,6 +46,22 @@ int snd_soc_component_initialize(struct snd_soc_component *component, return 0; } +void snd_soc_component_set_aux(struct snd_soc_component *component, + struct snd_soc_aux_dev *aux) +{ + component->init = (aux) ? aux->init : NULL; +} + +int snd_soc_component_init(struct snd_soc_component *component) +{ + int ret = 0; + + if (component->init) + ret = component->init(component); + + return soc_component_ret(component, ret); +} + /** * snd_soc_component_set_sysclk - configure COMPONENT system or master clock. * @component: COMPONENT diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index e596e5a765da..c38bb423e695 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1208,15 +1208,14 @@ static int soc_probe_component(struct snd_soc_card *card, component->name); probed = 1; - /* machine specific init */ - if (component->init) { - ret = component->init(component); - if (ret < 0) { - dev_err(component->dev, - "Failed to do machine specific init %d\n", ret); - goto err_probe; - } - } + /* + * machine specific init + * see + * snd_soc_component_set_aux() + */ + ret = snd_soc_component_init(component); + if (ret < 0) + goto err_probe; ret = snd_soc_add_component_controls(component, component->driver->controls, @@ -1330,7 +1329,8 @@ static void soc_unbind_aux_dev(struct snd_soc_card *card) struct snd_soc_component *component, *_component; for_each_card_auxs_safe(card, component, _component) { - component->init = NULL; + /* for snd_soc_component_init() */ + snd_soc_component_set_aux(component, NULL); list_del(&component->card_aux_list); } } @@ -1347,7 +1347,8 @@ static int soc_bind_aux_dev(struct snd_soc_card *card) if (!component) return -EPROBE_DEFER; - component->init = aux->init; + /* for snd_soc_component_init() */ + snd_soc_component_set_aux(component, aux); /* see for_each_card_auxs */ list_add(&component->card_aux_list, &card->aux_comp_list); } -- cgit v1.2.3 From 45108214dbfdba4a07061d2a4db6dc12475049f2 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 4 Jun 2020 17:08:24 +0900 Subject: ASoC: soc-component: tidyup Copyright This patch add missing company copyright Signed-off-by: Kuninori Morimoto Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/87eeqvw8w8.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index aea0eb0c3fcc..4a4bb723ca9f 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -2,7 +2,8 @@ * * soc-component.h * - * Copyright (c) 2019 Kuninori Morimoto + * Copyright (C) 2019 Renesas Electronics Corp. + * Kuninori Morimoto */ #ifndef __SOC_COMPONENT_H #define __SOC_COMPONENT_H -- cgit v1.2.3 From c9015a1723373f2c8f8ac994f59470f4fb852623 Mon Sep 17 00:00:00 2001 From: Shengjiu Wang Date: Wed, 3 Jun 2020 18:26:53 +0800 Subject: ASoC: wm8960: Support headphone jack detection function Add two platform variables for headphone jack detection. "hp_cfg" is for configuration of heaphone jack detection. "gpio_cfg" is for configuration of gpio, the gpio is used for plug & unplug interrupt on SoC. Signed-off-by: Shengjiu Wang Link: https://lore.kernel.org/r/1591180013-12416-2-git-send-email-shengjiu.wang@nxp.com Signed-off-by: Mark Brown --- include/sound/wm8960.h | 17 +++++++++++++++++ sound/soc/codecs/wm8960.c | 20 ++++++++++++++++++++ 2 files changed, 37 insertions(+) (limited to 'include') diff --git a/include/sound/wm8960.h b/include/sound/wm8960.h index d22e84805025..275fd5b201ce 100644 --- a/include/sound/wm8960.h +++ b/include/sound/wm8960.h @@ -16,6 +16,23 @@ struct wm8960_data { bool capless; /* Headphone outputs configured in capless mode */ bool shared_lrclk; /* DAC and ADC LRCLKs are wired together */ + + /* + * Setup for headphone detection + * + * hp_cfg[0]: HPSEL[1:0] of R48 (Additional Control 4) + * hp_cfg[1]: {HPSWEN:HPSWPOL} of R24 (Additional Control 2). + * hp_cfg[2]: {TOCLKSEL:TOEN} of R23 (Additional Control 1). + */ + u32 hp_cfg[3]; + + /* + * Setup for gpio configuration + * + * gpio_cfg[0]: ALRCGPIO of R9 (Audio interface) + * gpio_cfg[1]: {GPIOPOL:GPIOSEL[2:0]} of R48 (Additional Control 4). + */ + u32 gpio_cfg[2]; }; #endif diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 6cf0f6612bda..2f7f0493144a 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -1389,6 +1389,12 @@ static void wm8960_set_pdata_from_of(struct i2c_client *i2c, if (of_property_read_bool(np, "wlf,shared-lrclk")) pdata->shared_lrclk = true; + + of_property_read_u32_array(np, "wlf,gpio-cfg", pdata->gpio_cfg, + ARRAY_SIZE(pdata->gpio_cfg)); + + of_property_read_u32_array(np, "wlf,hp-cfg", pdata->hp_cfg, + ARRAY_SIZE(pdata->hp_cfg)); } static int wm8960_i2c_probe(struct i2c_client *i2c, @@ -1446,6 +1452,20 @@ static int wm8960_i2c_probe(struct i2c_client *i2c, regmap_update_bits(wm8960->regmap, WM8960_LOUT2, 0x100, 0x100); regmap_update_bits(wm8960->regmap, WM8960_ROUT2, 0x100, 0x100); + /* ADCLRC pin configured as GPIO. */ + regmap_update_bits(wm8960->regmap, WM8960_IFACE2, 1 << 6, + wm8960->pdata.gpio_cfg[0] << 6); + regmap_update_bits(wm8960->regmap, WM8960_ADDCTL4, 0xF << 4, + wm8960->pdata.gpio_cfg[1] << 4); + + /* Enable headphone jack detect */ + regmap_update_bits(wm8960->regmap, WM8960_ADDCTL4, 3 << 2, + wm8960->pdata.hp_cfg[0] << 2); + regmap_update_bits(wm8960->regmap, WM8960_ADDCTL2, 3 << 5, + wm8960->pdata.hp_cfg[1] << 5); + regmap_update_bits(wm8960->regmap, WM8960_ADDCTL1, 3, + wm8960->pdata.hp_cfg[2]); + i2c_set_clientdata(i2c, wm8960); ret = devm_snd_soc_register_component(&i2c->dev, -- cgit v1.2.3 From 4b9e7edb5afc4e3c27d6623f5008bf53ae96cf1a Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 15 Jun 2020 09:23:13 +0200 Subject: regmap: convert all regmap_update_bits() and co. macros to static inlines There's no reason to have these as macros. Let's convert them all to static inlines for better readability and stronger typing. Suggested-by: Mark Brown Signed-off-by: Bartosz Golaszewski Link: https://lore.kernel.org/r/20200615072313.11106-1-brgl@bgdev.pl Signed-off-by: Mark Brown --- include/linux/regmap.h | 222 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 192 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/linux/regmap.h b/include/linux/regmap.h index cb666b9c6b6a..f4917efed5c3 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -80,36 +80,6 @@ struct reg_sequence { } #define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0) -#define regmap_update_bits(map, reg, mask, val) \ - regmap_update_bits_base(map, reg, mask, val, NULL, false, false) -#define regmap_update_bits_async(map, reg, mask, val)\ - regmap_update_bits_base(map, reg, mask, val, NULL, true, false) -#define regmap_update_bits_check(map, reg, mask, val, change)\ - regmap_update_bits_base(map, reg, mask, val, change, false, false) -#define regmap_update_bits_check_async(map, reg, mask, val, change)\ - regmap_update_bits_base(map, reg, mask, val, change, true, false) - -#define regmap_write_bits(map, reg, mask, val) \ - regmap_update_bits_base(map, reg, mask, val, NULL, false, true) - -#define regmap_field_write(field, val) \ - regmap_field_update_bits_base(field, ~0, val, NULL, false, false) -#define regmap_field_force_write(field, val) \ - regmap_field_update_bits_base(field, ~0, val, NULL, false, true) -#define regmap_field_update_bits(field, mask, val)\ - regmap_field_update_bits_base(field, mask, val, NULL, false, false) -#define regmap_field_force_update_bits(field, mask, val) \ - regmap_field_update_bits_base(field, mask, val, NULL, false, true) - -#define regmap_fields_write(field, id, val) \ - regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false) -#define regmap_fields_force_write(field, id, val) \ - regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true) -#define regmap_fields_update_bits(field, id, mask, val)\ - regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false) -#define regmap_fields_force_update_bits(field, id, mask, val) \ - regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true) - /** * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs * @@ -1054,6 +1024,42 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, int regmap_update_bits_base(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change, bool async, bool force); + +static inline int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return regmap_update_bits_base(map, reg, mask, val, NULL, false, false); +} + +static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return regmap_update_bits_base(map, reg, mask, val, NULL, true, false); +} + +static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + return regmap_update_bits_base(map, reg, mask, val, + change, false, false); +} + +static inline int +regmap_update_bits_check_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + return regmap_update_bits_base(map, reg, mask, val, + change, true, false); +} + +static inline int regmap_write_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return regmap_update_bits_base(map, reg, mask, val, NULL, false, true); +} + int regmap_get_val_bytes(struct regmap *map); int regmap_get_max_register(struct regmap *map); int regmap_get_reg_stride(struct regmap *map); @@ -1152,6 +1158,65 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id, int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, unsigned int mask, unsigned int val, bool *change, bool async, bool force); + +static inline int regmap_field_write(struct regmap_field *field, + unsigned int val) +{ + return regmap_field_update_bits_base(field, ~0, val, + NULL, false, false); +} + +static inline int regmap_field_force_write(struct regmap_field *field, + unsigned int val) +{ + return regmap_field_update_bits_base(field, ~0, val, NULL, false, true); +} + +static inline int regmap_field_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + return regmap_field_update_bits_base(field, mask, val, + NULL, false, false); +} + +static inline int +regmap_field_force_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + return regmap_field_update_bits_base(field, mask, val, + NULL, false, true); +} + +static inline int regmap_fields_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, ~0, val, + NULL, false, false); +} + +static inline int regmap_fields_force_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, ~0, val, + NULL, false, true); +} + +static inline int +regmap_fields_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, mask, val, + NULL, false, false); +} + +static inline int +regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, mask, val, + NULL, false, true); +} + /** * struct regmap_irq_type - IRQ type definitions. * @@ -1458,6 +1523,103 @@ static inline int regmap_fields_update_bits_base(struct regmap_field *field, return -EINVAL; } +static inline int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_update_bits_check_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_write_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_write(struct regmap_field *field, + unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_force_write(struct regmap_field *field, + unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_field_force_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_fields_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_fields_force_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_fields_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_get_val_bytes(struct regmap *map) { WARN_ONCE(1, "regmap API is disabled"); -- cgit v1.2.3 From dff08caf35ecef4f7647f8b1e40877a254852a2b Mon Sep 17 00:00:00 2001 From: Pi-Hsun Shih Date: Fri, 12 Jun 2020 12:05:19 +0800 Subject: platform/chrome: cros_ec: Add command for regulator control. Add host commands for voltage regulator control through ChromeOS EC. Signed-off-by: Pi-Hsun Shih Reviewed-by: Enric Balletbo i Serra Link: https://lore.kernel.org/r/20200612040526.192878-3-pihsun@chromium.org Signed-off-by: Mark Brown --- drivers/platform/chrome/cros_ec_trace.c | 5 ++ include/linux/platform_data/cros_ec_commands.h | 82 ++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) (limited to 'include') diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c index 523a39bd0ff6..425e9441b7ca 100644 --- a/drivers/platform/chrome/cros_ec_trace.c +++ b/drivers/platform/chrome/cros_ec_trace.c @@ -161,6 +161,11 @@ TRACE_SYMBOL(EC_CMD_ADC_READ), \ TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \ TRACE_SYMBOL(EC_CMD_AP_RESET), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_GET_INFO), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_ENABLE), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_IS_ENABLED), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_SET_VOLTAGE), \ + TRACE_SYMBOL(EC_CMD_REGULATOR_GET_VOLTAGE), \ TRACE_SYMBOL(EC_CMD_CR51_BASE), \ TRACE_SYMBOL(EC_CMD_CR51_LAST), \ TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \ diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 69210881ebac..a417b51b5764 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5430,6 +5430,88 @@ struct ec_response_rollback_info { /* Issue AP reset */ #define EC_CMD_AP_RESET 0x0125 +/*****************************************************************************/ +/* Voltage regulator controls */ + +/* + * Get basic info of voltage regulator for given index. + * + * Returns the regulator name and supported voltage list in mV. + */ +#define EC_CMD_REGULATOR_GET_INFO 0x012B + +/* Maximum length of regulator name */ +#define EC_REGULATOR_NAME_MAX_LEN 16 + +/* Maximum length of the supported voltage list. */ +#define EC_REGULATOR_VOLTAGE_MAX_COUNT 16 + +struct ec_params_regulator_get_info { + uint32_t index; +} __ec_align4; + +struct ec_response_regulator_get_info { + char name[EC_REGULATOR_NAME_MAX_LEN]; + uint16_t num_voltages; + uint16_t voltages_mv[EC_REGULATOR_VOLTAGE_MAX_COUNT]; +} __ec_align1; + +/* + * Configure the regulator as enabled / disabled. + */ +#define EC_CMD_REGULATOR_ENABLE 0x012C + +struct ec_params_regulator_enable { + uint32_t index; + uint8_t enable; +} __ec_align4; + +/* + * Query if the regulator is enabled. + * + * Returns 1 if the regulator is enabled, 0 if not. + */ +#define EC_CMD_REGULATOR_IS_ENABLED 0x012D + +struct ec_params_regulator_is_enabled { + uint32_t index; +} __ec_align4; + +struct ec_response_regulator_is_enabled { + uint8_t enabled; +} __ec_align1; + +/* + * Set voltage for the voltage regulator within the range specified. + * + * The driver should select the voltage in range closest to min_mv. + * + * Also note that this might be called before the regulator is enabled, and the + * setting should be in effect after the regulator is enabled. + */ +#define EC_CMD_REGULATOR_SET_VOLTAGE 0x012E + +struct ec_params_regulator_set_voltage { + uint32_t index; + uint32_t min_mv; + uint32_t max_mv; +} __ec_align4; + +/* + * Get the currently configured voltage for the voltage regulator. + * + * Note that this might be called before the regulator is enabled. + */ +#define EC_CMD_REGULATOR_GET_VOLTAGE 0x012F + +struct ec_params_regulator_get_voltage { + uint32_t index; +} __ec_align4; + +struct ec_response_regulator_get_voltage { + uint32_t voltage_mv; +} __ec_align4; + /*****************************************************************************/ /* The command range 0x200-0x2FF is reserved for Rotor. */ -- cgit v1.2.3 From 8e04187c1bc7953f6dfad3400c58b1b0b0ad767b Mon Sep 17 00:00:00 2001 From: Xu Yilun Date: Thu, 11 Jun 2020 11:25:07 +0800 Subject: spi: altera: add SPI core parameters support via platform data. This patch introduced SPI core parameters in platform data, it allows passing these SPI core parameters via platform data. Signed-off-by: Wu Hao Signed-off-by: Xu Yilun Signed-off-by: Matthew Gerlach Signed-off-by: Russ Weight Reviewed-by: Tom Rix Link: https://lore.kernel.org/r/1591845911-10197-3-git-send-email-yilun.xu@intel.com Signed-off-by: Mark Brown --- drivers/spi/spi-altera.c | 25 ++++++++++++++++++++++--- include/linux/spi/altera.h | 24 ++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 3 deletions(-) create mode 100644 include/linux/spi/altera.h (limited to 'include') diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c index d5fa0c5706b0..e6e6708c7c47 100644 --- a/drivers/spi/spi-altera.c +++ b/drivers/spi/spi-altera.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,8 @@ #define ALTERA_SPI_CONTROL_IE_MSK 0x100 #define ALTERA_SPI_CONTROL_SSO_MSK 0x400 +#define ALTERA_SPI_MAX_CS 32 + struct altera_spi { void __iomem *base; int irq; @@ -182,6 +185,7 @@ static irqreturn_t altera_spi_irq(int irq, void *dev) static int altera_spi_probe(struct platform_device *pdev) { + struct altera_spi_platform_data *pdata = dev_get_platdata(&pdev->dev); struct altera_spi *hw; struct spi_master *master; int err = -ENODEV; @@ -192,9 +196,24 @@ static int altera_spi_probe(struct platform_device *pdev) /* setup the master state. */ master->bus_num = pdev->id; - master->num_chipselect = 16; - master->mode_bits = SPI_CS_HIGH; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); + + if (pdata) { + if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) { + dev_err(&pdev->dev, + "Invalid number of chipselect: %hu\n", + pdata->num_chipselect); + return -EINVAL; + } + + master->num_chipselect = pdata->num_chipselect; + master->mode_bits = pdata->mode_bits; + master->bits_per_word_mask = pdata->bits_per_word_mask; + } else { + master->num_chipselect = 16; + master->mode_bits = SPI_CS_HIGH; + master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); + } + master->dev.of_node = pdev->dev.of_node; master->transfer_one = altera_spi_txrx; master->set_cs = altera_spi_set_cs; diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h new file mode 100644 index 000000000000..344a3fce56a4 --- /dev/null +++ b/include/linux/spi/altera.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header File for Altera SPI Driver. + */ +#ifndef __LINUX_SPI_ALTERA_H +#define __LINUX_SPI_ALTERA_H + +#include +#include +#include + +/** + * struct altera_spi_platform_data - Platform data of the Altera SPI driver + * @mode_bits: Mode bits of SPI master. + * @num_chipselect: Number of chipselects. + * @bits_per_word_mask: bitmask of supported bits_per_word for transfers. + */ +struct altera_spi_platform_data { + u16 mode_bits; + u16 num_chipselect; + u32 bits_per_word_mask; +}; + +#endif /* __LINUX_SPI_ALTERA_H */ -- cgit v1.2.3 From 1fccd182a4694a848f2d6f3b1820d6fc71d9c99d Mon Sep 17 00:00:00 2001 From: Xu Yilun Date: Thu, 11 Jun 2020 11:25:08 +0800 Subject: spi: altera: add platform data for slave information. This patch introduces platform data for slave information, it allows spi-altera to add new spi devices once master registration is done. Signed-off-by: Wu Hao Signed-off-by: Xu Yilun Signed-off-by: Matthew Gerlach Signed-off-by: Russ Weight Reviewed-by: Tom Rix Link: https://lore.kernel.org/r/1591845911-10197-4-git-send-email-yilun.xu@intel.com Signed-off-by: Mark Brown --- drivers/spi/spi-altera.c | 11 +++++++++++ include/linux/spi/altera.h | 5 +++++ 2 files changed, 16 insertions(+) (limited to 'include') diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c index e6e6708c7c47..aa9d1a257433 100644 --- a/drivers/spi/spi-altera.c +++ b/drivers/spi/spi-altera.c @@ -189,6 +189,7 @@ static int altera_spi_probe(struct platform_device *pdev) struct altera_spi *hw; struct spi_master *master; int err = -ENODEV; + u16 i; master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi)); if (!master) @@ -244,6 +245,16 @@ static int altera_spi_probe(struct platform_device *pdev) err = devm_spi_register_master(&pdev->dev, master); if (err) goto exit; + + if (pdata) { + for (i = 0; i < pdata->num_devices; i++) { + if (!spi_new_device(master, pdata->devices + i)) + dev_warn(&pdev->dev, + "unable to create SPI device: %s\n", + pdata->devices[i].modalias); + } + } + dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); return 0; diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h index 344a3fce56a4..2d42641499a6 100644 --- a/include/linux/spi/altera.h +++ b/include/linux/spi/altera.h @@ -14,11 +14,16 @@ * @mode_bits: Mode bits of SPI master. * @num_chipselect: Number of chipselects. * @bits_per_word_mask: bitmask of supported bits_per_word for transfers. + * @num_devices: Number of devices that shall be added when the driver + * is probed. + * @devices: The devices to add. */ struct altera_spi_platform_data { u16 mode_bits; u16 num_chipselect; u32 bits_per_word_mask; + u16 num_devices; + struct spi_board_info *devices; }; #endif /* __LINUX_SPI_ALTERA_H */ -- cgit v1.2.3 From aa5c697988b4c7e1077de1e2eb2298114d531736 Mon Sep 17 00:00:00 2001 From: Stanley Chu Date: Mon, 15 Jun 2020 15:22:35 +0800 Subject: scsi: ufs: Add trace event for UIC commands Use the ftrace infrastructure to conditionally trace UFS UIC command events. New trace event "ufshcd_uic_command" is created, which samples the following UFS UIC command data: - Device name - Optional identification string - UIC command opcode - UIC command argument1 - UIC command argument2 - UIC command argement3 Usage: echo 1 > /sys/kernel/debug/tracing/events/ufs/enable cat /sys/kernel/debug/tracing/trace_pipe Link: https://lore.kernel.org/r/20200615072235.23042-3-stanley.chu@mediatek.com Acked-by: Avri Altman Signed-off-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 26 ++++++++++++++++++++++++++ include/trace/events/ufs.h | 31 +++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) (limited to 'include') diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 6cedf0758b3f..19221de37a4d 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -311,6 +311,26 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, &descp->input_param1); } +static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, + struct uic_command *ucmd, + const char *str) +{ + u32 cmd; + + if (!trace_ufshcd_uic_command_enabled()) + return; + + if (!strcmp(str, "send")) + cmd = ucmd->command; + else + cmd = ufshcd_readl(hba, REG_UIC_COMMAND); + + trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd, + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); +} + static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -2030,6 +2050,8 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); + ufshcd_add_uic_command_trace(hba, uic_cmd, "send"); + /* Write UIC Cmd */ ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, REG_UIC_COMMAND); @@ -4835,6 +4857,10 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) complete(hba->uic_async_done); retval = IRQ_HANDLED; } + + if (retval == IRQ_HANDLED) + ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, + "complete"); return retval; } diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index 5f300739240d..84841b3a7ffd 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -249,6 +249,37 @@ TRACE_EVENT(ufshcd_command, ) ); +TRACE_EVENT(ufshcd_uic_command, + TP_PROTO(const char *dev_name, const char *str, u32 cmd, + u32 arg1, u32 arg2, u32 arg3), + + TP_ARGS(dev_name, str, cmd, arg1, arg2, arg3), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __string(str, str) + __field(u32, cmd) + __field(u32, arg1) + __field(u32, arg2) + __field(u32, arg3) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __assign_str(str, str); + __entry->cmd = cmd; + __entry->arg1 = arg1; + __entry->arg2 = arg2; + __entry->arg3 = arg3; + ), + + TP_printk( + "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x", + __get_str(str), __get_str(dev_name), __entry->cmd, + __entry->arg1, __entry->arg2, __entry->arg3 + ) +); + TRACE_EVENT(ufshcd_upiu, TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf), -- cgit v1.2.3 From 9357b04624013294e4204b1a837d0a611b9048c3 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 27 May 2020 17:47:31 +0200 Subject: reset: Move reset-simple header out of drivers/reset The reset-simple code can be useful for drivers outside of drivers/reset that have a few reset controls as part of their features. Let's move it to include/linux/reset. Reviewed-by: Philipp Zabel Signed-off-by: Maxime Ripard Signed-off-by: Philipp Zabel --- drivers/reset/reset-simple.c | 3 +-- drivers/reset/reset-simple.h | 41 ------------------------------------- drivers/reset/reset-socfpga.c | 3 +-- drivers/reset/reset-sunxi.c | 3 +-- drivers/reset/reset-uniphier-glue.c | 3 +-- include/linux/reset/reset-simple.h | 41 +++++++++++++++++++++++++++++++++++++ 6 files changed, 45 insertions(+), 49 deletions(-) delete mode 100644 drivers/reset/reset-simple.h create mode 100644 include/linux/reset/reset-simple.h (limited to 'include') diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c index 067e7e7b34f1..c854aa351640 100644 --- a/drivers/reset/reset-simple.c +++ b/drivers/reset/reset-simple.c @@ -18,10 +18,9 @@ #include #include #include +#include #include -#include "reset-simple.h" - static inline struct reset_simple_data * to_reset_simple_data(struct reset_controller_dev *rcdev) { diff --git a/drivers/reset/reset-simple.h b/drivers/reset/reset-simple.h deleted file mode 100644 index 08ccb25a55e6..000000000000 --- a/drivers/reset/reset-simple.h +++ /dev/null @@ -1,41 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Simple Reset Controller ops - * - * Based on Allwinner SoCs Reset Controller driver - * - * Copyright 2013 Maxime Ripard - * - * Maxime Ripard - */ - -#ifndef __RESET_SIMPLE_H__ -#define __RESET_SIMPLE_H__ - -#include -#include -#include - -/** - * struct reset_simple_data - driver data for simple reset controllers - * @lock: spinlock to protect registers during read-modify-write cycles - * @membase: memory mapped I/O register range - * @rcdev: reset controller device base structure - * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits - * are set to assert the reset. Note that this says nothing about - * the voltage level of the actual reset line. - * @status_active_low: if true, bits read back as cleared while the reset is - * asserted. Otherwise, bits read back as set while the - * reset is asserted. - */ -struct reset_simple_data { - spinlock_t lock; - void __iomem *membase; - struct reset_controller_dev rcdev; - bool active_low; - bool status_active_low; -}; - -extern const struct reset_control_ops reset_simple_ops; - -#endif /* __RESET_SIMPLE_H__ */ diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index 96953992c2bb..bdd984296196 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c @@ -11,13 +11,12 @@ #include #include #include +#include #include #include #include #include -#include "reset-simple.h" - #define SOCFPGA_NR_BANKS 8 static int a10_reset_init(struct device_node *np) diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c index e7f169e57bcf..e752594b6971 100644 --- a/drivers/reset/reset-sunxi.c +++ b/drivers/reset/reset-sunxi.c @@ -14,13 +14,12 @@ #include #include #include +#include #include #include #include #include -#include "reset-simple.h" - static int sunxi_reset_init(struct device_node *np) { struct reset_simple_data *data; diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c index 2b188b3bb69a..027990b79f61 100644 --- a/drivers/reset/reset-uniphier-glue.c +++ b/drivers/reset/reset-uniphier-glue.c @@ -9,8 +9,7 @@ #include #include #include - -#include "reset-simple.h" +#include #define MAX_CLKS 2 #define MAX_RSTS 2 diff --git a/include/linux/reset/reset-simple.h b/include/linux/reset/reset-simple.h new file mode 100644 index 000000000000..08ccb25a55e6 --- /dev/null +++ b/include/linux/reset/reset-simple.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Simple Reset Controller ops + * + * Based on Allwinner SoCs Reset Controller driver + * + * Copyright 2013 Maxime Ripard + * + * Maxime Ripard + */ + +#ifndef __RESET_SIMPLE_H__ +#define __RESET_SIMPLE_H__ + +#include +#include +#include + +/** + * struct reset_simple_data - driver data for simple reset controllers + * @lock: spinlock to protect registers during read-modify-write cycles + * @membase: memory mapped I/O register range + * @rcdev: reset controller device base structure + * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits + * are set to assert the reset. Note that this says nothing about + * the voltage level of the actual reset line. + * @status_active_low: if true, bits read back as cleared while the reset is + * asserted. Otherwise, bits read back as set while the + * reset is asserted. + */ +struct reset_simple_data { + spinlock_t lock; + void __iomem *membase; + struct reset_controller_dev rcdev; + bool active_low; + bool status_active_low; +}; + +extern const struct reset_control_ops reset_simple_ops; + +#endif /* __RESET_SIMPLE_H__ */ -- cgit v1.2.3 From a9701376ed0fb61a5be4bb438daf26bd9cfa24b5 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 27 May 2020 17:47:32 +0200 Subject: reset: simple: Add reset callback The reset-simple code lacks a reset callback that is still pretty easy to implement. The only real thing to consider is the delay needed for a device to be reset, so let's expose that as part of the reset-simple driver data. Reviewed-by: Philipp Zabel Signed-off-by: Maxime Ripard Signed-off-by: Philipp Zabel --- drivers/reset/reset-simple.c | 20 ++++++++++++++++++++ include/linux/reset/reset-simple.h | 7 +++++++ 2 files changed, 27 insertions(+) (limited to 'include') diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c index c854aa351640..e066614818a3 100644 --- a/drivers/reset/reset-simple.c +++ b/drivers/reset/reset-simple.c @@ -11,6 +11,7 @@ * Maxime Ripard */ +#include #include #include #include @@ -63,6 +64,24 @@ static int reset_simple_deassert(struct reset_controller_dev *rcdev, return reset_simple_update(rcdev, id, false); } +static int reset_simple_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct reset_simple_data *data = to_reset_simple_data(rcdev); + int ret; + + if (!data->reset_us) + return -ENOTSUPP; + + ret = reset_simple_assert(rcdev, id); + if (ret) + return ret; + + usleep_range(data->reset_us, data->reset_us * 2); + + return reset_simple_deassert(rcdev, id); +} + static int reset_simple_status(struct reset_controller_dev *rcdev, unsigned long id) { @@ -80,6 +99,7 @@ static int reset_simple_status(struct reset_controller_dev *rcdev, const struct reset_control_ops reset_simple_ops = { .assert = reset_simple_assert, .deassert = reset_simple_deassert, + .reset = reset_simple_reset, .status = reset_simple_status, }; EXPORT_SYMBOL_GPL(reset_simple_ops); diff --git a/include/linux/reset/reset-simple.h b/include/linux/reset/reset-simple.h index 08ccb25a55e6..c3e44f45b0f7 100644 --- a/include/linux/reset/reset-simple.h +++ b/include/linux/reset/reset-simple.h @@ -27,6 +27,12 @@ * @status_active_low: if true, bits read back as cleared while the reset is * asserted. Otherwise, bits read back as set while the * reset is asserted. + * @reset_us: Minimum delay in microseconds needed that needs to be + * waited for between an assert and a deassert to reset the + * device. If multiple consumers with different delay + * requirements are connected to this controller, it must + * be the largest minimum delay. 0 means that such a delay is + * unknown and the reset operation is unsupported. */ struct reset_simple_data { spinlock_t lock; @@ -34,6 +40,7 @@ struct reset_simple_data { struct reset_controller_dev rcdev; bool active_low; bool status_active_low; + unsigned int reset_us; }; extern const struct reset_control_ops reset_simple_ops; -- cgit v1.2.3 From 8a307d3601bcca99723b1a45e785adc3c9d3a476 Mon Sep 17 00:00:00 2001 From: Artur Świgoń Date: Thu, 21 May 2020 14:28:39 +0200 Subject: interconnect: Export of_icc_get_from_provider() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch makes the above function public (for use in exynos-bus devfreq driver). Signed-off-by: Artur Świgoń Reviewed-by: Krzysztof Kozlowski Reviewed-by: Chanwoo Choi Signed-off-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20200521122841.8867-2-s.nawrocki@samsung.com Signed-off-by: Georgi Djakov --- drivers/interconnect/core.c | 3 ++- include/linux/interconnect-provider.h | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index e5f998744501..9e2d55d94fb4 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -334,7 +334,7 @@ EXPORT_SYMBOL_GPL(of_icc_xlate_onecell); * Returns a valid pointer to struct icc_node on success or ERR_PTR() * on failure. */ -static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) +struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) { struct icc_node *node = ERR_PTR(-EPROBE_DEFER); struct icc_provider *provider; @@ -353,6 +353,7 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) return node; } +EXPORT_SYMBOL_GPL(of_icc_get_from_provider); static void devm_icc_release(struct device *dev, void *res) { diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 0c494534b4d3..c92be2a90fa0 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -103,6 +103,7 @@ void icc_node_del(struct icc_node *node); int icc_nodes_remove(struct icc_provider *provider); int icc_provider_add(struct icc_provider *provider); int icc_provider_del(struct icc_provider *provider); +struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec); #else @@ -154,6 +155,11 @@ static inline int icc_provider_del(struct icc_provider *provider) return -ENOTSUPP; } +static inline struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) +{ + return ERR_PTR(-ENOTSUPP); +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_PROVIDER_H */ -- cgit v1.2.3 From 65461e26b1fe73bde4326367ee23cc1a24e6c33e Mon Sep 17 00:00:00 2001 From: Artur Świgoń Date: Thu, 21 May 2020 14:28:41 +0200 Subject: interconnect: Allow inter-provider pairs to be configured MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds support for a new boolean 'inter_set' field in struct icc_provider. Setting it to 'true' enables calling '->set' for inter-provider node pairs. All existing users of the interconnect framework allocate this structure with kzalloc, and are therefore unaffected by this change. This makes it easier for hierarchies like exynos-bus, where every bus is probed separately and registers a separate interconnect provider, to model constraints between buses. Signed-off-by: Artur Świgoń Signed-off-by: Sylwester Nawrocki Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20200521122841.8867-4-s.nawrocki@samsung.com Signed-off-by: Georgi Djakov --- drivers/interconnect/core.c | 11 +++++------ include/linux/interconnect-provider.h | 2 ++ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 8b4d50d59e16..609e206bf598 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -263,23 +263,22 @@ static int aggregate_requests(struct icc_node *node) static int apply_constraints(struct icc_path *path) { struct icc_node *next, *prev = NULL; + struct icc_provider *p; int ret = -EINVAL; int i; for (i = 0; i < path->num_nodes; i++) { next = path->reqs[i].node; + p = next->provider; - /* - * Both endpoints should be valid master-slave pairs of the - * same interconnect provider that will be configured. - */ - if (!prev || next->provider != prev->provider) { + /* both endpoints should be valid master-slave pairs */ + if (!prev || (p != prev->provider && !p->inter_set)) { prev = next; continue; } /* set the constraints */ - ret = next->provider->set(prev, next); + ret = p->set(prev, next); if (ret) goto out; diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index c92be2a90fa0..38701925ab91 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -41,6 +41,7 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, * @xlate: provider-specific callback for mapping nodes from phandle arguments * @dev: the device this interconnect provider belongs to * @users: count of active users + * @inter_set: whether inter-provider pairs will be configured with @set * @data: pointer to private data */ struct icc_provider { @@ -53,6 +54,7 @@ struct icc_provider { struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); struct device *dev; int users; + bool inter_set; void *data; }; -- cgit v1.2.3 From 12a400b016ab955be8e4c569346fa18aaceed9d7 Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Tue, 16 Jun 2020 16:43:23 +0300 Subject: interconnect: Mark all dummy functions as static inline There are a few dummy stub functions that are not marked as static inline yet. Currently this header file is not included in any other file outside of drivers/interconnect/, but that might not be the case in the future. If this file gets included and the framework is disabled, we will be see warnings. Let's fix this in advance. Link: https://lore.kernel.org/r/20200228145945.13579-1-georgi.djakov@linaro.org Signed-off-by: Georgi Djakov --- include/linux/interconnect-provider.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 38701925ab91..4735518de515 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -120,7 +120,7 @@ static inline struct icc_node *icc_node_create(int id) return ERR_PTR(-ENOTSUPP); } -void icc_node_destroy(int id) +static inline void icc_node_destroy(int id) { } @@ -129,16 +129,16 @@ static inline int icc_link_create(struct icc_node *node, const int dst_id) return -ENOTSUPP; } -int icc_link_destroy(struct icc_node *src, struct icc_node *dst) +static inline int icc_link_destroy(struct icc_node *src, struct icc_node *dst) { return -ENOTSUPP; } -void icc_node_add(struct icc_node *node, struct icc_provider *provider) +static inline void icc_node_add(struct icc_node *node, struct icc_provider *provider) { } -void icc_node_del(struct icc_node *node) +static inline void icc_node_del(struct icc_node *node) { } -- cgit v1.2.3 From e370f886fefc23b9ca3011760d9376f1947eb321 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 16 Jun 2020 15:39:25 +0200 Subject: EDAC: Remove edac_get_dimm_by_index() It is unused now. Signed-off-by: Borislav Petkov --- include/linux/edac.h | 29 +++++++---------------------- 1 file changed, 7 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/edac.h b/include/linux/edac.h index 6eb7d55d7c3d..15e8f3d8a895 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -594,27 +594,6 @@ struct mem_ctl_info { ? (mci)->dimms[(dimm)->idx + 1] \ : NULL) -/** - * edac_get_dimm_by_index - Get DIMM info at @index from a memory - * controller - * - * @mci: MC descriptor struct mem_ctl_info - * @index: index in the memory controller's DIMM array - * - * Returns a struct dimm_info * or NULL on failure. - */ -static inline struct dimm_info * -edac_get_dimm_by_index(struct mem_ctl_info *mci, int index) -{ - if (index < 0 || index >= mci->tot_dimms) - return NULL; - - if (WARN_ON_ONCE(mci->dimms[index]->idx != index)) - return NULL; - - return mci->dimms[index]; -} - /** * edac_get_dimm - Get DIMM info from a memory controller given by * [layer0,layer1,layer2] position @@ -650,6 +629,12 @@ static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci, if (mci->n_layers > 2) index = index * mci->layers[2].size + layer2; - return edac_get_dimm_by_index(mci, index); + if (index < 0 || index >= mci->tot_dimms) + return NULL; + + if (WARN_ON_ONCE(mci->dimms[index]->idx != index)) + return NULL; + + return mci->dimms[index]; } #endif /* _LINUX_EDAC_H_ */ -- cgit v1.2.3 From 278a5fbaed89dacd04e9d052f4594ffd0e0585de Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 24 May 2019 11:30:34 +0200 Subject: open: add close_range() This adds the close_range() syscall. It allows to efficiently close a range of file descriptors up to all file descriptors of a calling task. I was contacted by FreeBSD as they wanted to have the same close_range() syscall as we proposed here. We've coordinated this and in the meantime, Kyle was fast enough to merge close_range() into FreeBSD already in April: https://reviews.freebsd.org/D21627 https://svnweb.freebsd.org/base?view=revision&revision=359836 and the current plan is to backport close_range() to FreeBSD 12.2 (cf. [2]) once its merged in Linux too. Python is in the process of switching to close_range() on FreeBSD and they are waiting on us to merge this to switch on Linux as well: https://bugs.python.org/issue38061 The syscall came up in a recent discussion around the new mount API and making new file descriptor types cloexec by default. During this discussion, Al suggested the close_range() syscall (cf. [1]). Note, a syscall in this manner has been requested by various people over time. First, it helps to close all file descriptors of an exec()ing task. This can be done safely via (quoting Al's example from [1] verbatim): /* that exec is sensitive */ unshare(CLONE_FILES); /* we don't want anything past stderr here */ close_range(3, ~0U); execve(....); The code snippet above is one way of working around the problem that file descriptors are not cloexec by default. This is aggravated by the fact that we can't just switch them over without massively regressing userspace. For a whole class of programs having an in-kernel method of closing all file descriptors is very helpful (e.g. demons, service managers, programming language standard libraries, container managers etc.). (Please note, unshare(CLONE_FILES) should only be needed if the calling task is multi-threaded and shares the file descriptor table with another thread in which case two threads could race with one thread allocating file descriptors and the other one closing them via close_range(). For the general case close_range() before the execve() is sufficient.) Second, it allows userspace to avoid implementing closing all file descriptors by parsing through /proc//fd/* and calling close() on each file descriptor. From looking at various large(ish) userspace code bases this or similar patterns are very common in: - service managers (cf. [4]) - libcs (cf. [6]) - container runtimes (cf. [5]) - programming language runtimes/standard libraries - Python (cf. [2]) - Rust (cf. [7], [8]) As Dmitry pointed out there's even a long-standing glibc bug about missing kernel support for this task (cf. [3]). In addition, the syscall will also work for tasks that do not have procfs mounted and on kernels that do not have procfs support compiled in. In such situations the only way to make sure that all file descriptors are closed is to call close() on each file descriptor up to UINT_MAX or RLIMIT_NOFILE, OPEN_MAX trickery (cf. comment [8] on Rust). The performance is striking. For good measure, comparing the following simple close_all_fds() userspace implementation that is essentially just glibc's version in [6]: static int close_all_fds(void) { int dir_fd; DIR *dir; struct dirent *direntp; dir = opendir("/proc/self/fd"); if (!dir) return -1; dir_fd = dirfd(dir); while ((direntp = readdir(dir))) { int fd; if (strcmp(direntp->d_name, ".") == 0) continue; if (strcmp(direntp->d_name, "..") == 0) continue; fd = atoi(direntp->d_name); if (fd == dir_fd || fd == 0 || fd == 1 || fd == 2) continue; close(fd); } closedir(dir); return 0; } to close_range() yields: 1. closing 4 open files: - close_all_fds(): ~280 us - close_range(): ~24 us 2. closing 1000 open files: - close_all_fds(): ~5000 us - close_range(): ~800 us close_range() is designed to allow for some flexibility. Specifically, it does not simply always close all open file descriptors of a task. Instead, callers can specify an upper bound. This is e.g. useful for scenarios where specific file descriptors are created with well-known numbers that are supposed to be excluded from getting closed. For extra paranoia close_range() comes with a flags argument. This can e.g. be used to implement extension. Once can imagine userspace wanting to stop at the first error instead of ignoring errors under certain circumstances. There might be other valid ideas in the future. In any case, a flag argument doesn't hurt and keeps us on the safe side. From an implementation side this is kept rather dumb. It saw some input from David and Jann but all nonsense is obviously my own! - Errors to close file descriptors are currently ignored. (Could be changed by setting a flag in the future if needed.) - __close_range() is a rather simplistic wrapper around __close_fd(). My reasoning behind this is based on the nature of how __close_fd() needs to release an fd. But maybe I misunderstood specifics: We take the files_lock and rcu-dereference the fdtable of the calling task, we find the entry in the fdtable, get the file and need to release files_lock before calling filp_close(). In the meantime the fdtable might have been altered so we can't just retake the spinlock and keep the old rcu-reference of the fdtable around. Instead we need to grab a fresh reference to the fdtable. If my reasoning is correct then there's really no point in fancyfying __close_range(): We just need to rcu-dereference the fdtable of the calling task once to cap the max_fd value correctly and then go on calling __close_fd() in a loop. /* References */ [1]: https://lore.kernel.org/lkml/20190516165021.GD17978@ZenIV.linux.org.uk/ [2]: https://github.com/python/cpython/blob/9e4f2f3a6b8ee995c365e86d976937c141d867f8/Modules/_posixsubprocess.c#L220 [3]: https://sourceware.org/bugzilla/show_bug.cgi?id=10353#c7 [4]: https://github.com/systemd/systemd/blob/5238e9575906297608ff802a27e2ff9effa3b338/src/basic/fd-util.c#L217 [5]: https://github.com/lxc/lxc/blob/ddf4b77e11a4d08f09b7b9cd13e593f8c047edc5/src/lxc/start.c#L236 [6]: https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/grantpt.c;h=2030e07fa6e652aac32c775b8c6e005844c3c4eb;hb=HEAD#l17 Note that this is an internal implementation that is not exported. Currently, libc seems to not provide an exported version of this because of missing kernel support to do this. Note, in a recent patch series Florian made grantpt() a nop thereby removing the code referenced here. [7]: https://github.com/rust-lang/rust/issues/12148 [8]: https://github.com/rust-lang/rust/blob/5f47c0613ed4eb46fca3633c1297364c09e5e451/src/libstd/sys/unix/process2.rs#L303-L308 Rust's solution is slightly different but is equally unperformant. Rust calls getdtablesize() which is a glibc library function that simply returns the current RLIMIT_NOFILE or OPEN_MAX values. Rust then goes on to call close() on each fd. That's obviously overkill for most tasks. Rarely, tasks - especially non-demons - hit RLIMIT_NOFILE or OPEN_MAX. Let's be nice and assume an unprivileged user with RLIMIT_NOFILE set to 1024. Even in this case, there's a very high chance that in the common case Rust is calling the close() syscall 1021 times pointlessly if the task just has 0, 1, and 2 open. Suggested-by: Al Viro Signed-off-by: Christian Brauner Cc: Arnd Bergmann Cc: Kyle Evans Cc: Jann Horn Cc: David Howells Cc: Dmitry V. Levin Cc: Oleg Nesterov Cc: Linus Torvalds Cc: Florian Weimer Cc: linux-api@vger.kernel.org --- fs/file.c | 64 ++++++++++++++++++++++++++++++++++++++++++------ fs/open.c | 20 +++++++++++++++ include/linux/fdtable.h | 2 ++ include/linux/syscalls.h | 2 ++ 4 files changed, 80 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/file.c b/fs/file.c index abb8b7081d7a..1b8ff05e8311 100644 --- a/fs/file.c +++ b/fs/file.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -620,12 +621,9 @@ void fd_install(unsigned int fd, struct file *file) EXPORT_SYMBOL(fd_install); -/* - * The same warnings as for __alloc_fd()/__fd_install() apply here... - */ -int __close_fd(struct files_struct *files, unsigned fd) +static struct file *pick_file(struct files_struct *files, unsigned fd) { - struct file *file; + struct file *file = NULL; struct fdtable *fdt; spin_lock(&files->file_lock); @@ -637,15 +635,65 @@ int __close_fd(struct files_struct *files, unsigned fd) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); - spin_unlock(&files->file_lock); - return filp_close(file, files); out_unlock: spin_unlock(&files->file_lock); - return -EBADF; + return file; +} + +/* + * The same warnings as for __alloc_fd()/__fd_install() apply here... + */ +int __close_fd(struct files_struct *files, unsigned fd) +{ + struct file *file; + + file = pick_file(files, fd); + if (!file) + return -EBADF; + + return filp_close(file, files); } EXPORT_SYMBOL(__close_fd); /* for ksys_close() */ +/** + * __close_range() - Close all file descriptors in a given range. + * + * @fd: starting file descriptor to close + * @max_fd: last file descriptor to close + * + * This closes a range of file descriptors. All file descriptors + * from @fd up to and including @max_fd are closed. + */ +int __close_range(struct files_struct *files, unsigned fd, unsigned max_fd) +{ + unsigned int cur_max; + + if (fd > max_fd) + return -EINVAL; + + rcu_read_lock(); + cur_max = files_fdtable(files)->max_fds; + rcu_read_unlock(); + + /* cap to last valid index into fdtable */ + cur_max--; + + max_fd = min(max_fd, cur_max); + while (fd <= max_fd) { + struct file *file; + + file = pick_file(files, fd++); + if (!file) + continue; + + filp_close(file, files); + cond_resched(); + } + + return 0; +} + /* * variant of __close_fd that gets a ref on the file for later fput. * The caller must ensure that filp_close() called on the file, and then diff --git a/fs/open.c b/fs/open.c index 6cd48a61cda3..073ea3c45347 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1310,6 +1310,26 @@ SYSCALL_DEFINE1(close, unsigned int, fd) return retval; } +/** + * close_range() - Close all file descriptors in a given range. + * + * @fd: starting file descriptor to close + * @max_fd: last file descriptor to close + * @flags: reserved for future extensions + * + * This closes a range of file descriptors. All file descriptors + * from @fd up to and including @max_fd are closed. + * Currently, errors to close a given file descriptor are ignored. + */ +SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd, + unsigned int, flags) +{ + if (flags) + return -EINVAL; + + return __close_range(current->files, fd, max_fd); +} + /* * This routine simulates a hangup on the tty, to arrange that users * are given clean terminals at login time. diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index f07c55ea0c22..fcd07181a365 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -121,6 +121,8 @@ extern void __fd_install(struct files_struct *files, unsigned int fd, struct file *file); extern int __close_fd(struct files_struct *files, unsigned int fd); +extern int __close_range(struct files_struct *files, unsigned int fd, + unsigned int max_fd); extern int __close_fd_get_file(unsigned int fd, struct file **res); extern struct kmem_cache *files_cachep; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7c354c2955f5..b22382db89cf 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -444,6 +444,8 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, asmlinkage long sys_openat2(int dfd, const char __user *filename, struct open_how *how, size_t size); asmlinkage long sys_close(unsigned int fd); +asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd, + unsigned int flags); asmlinkage long sys_vhangup(void); /* fs/pipe.c */ -- cgit v1.2.3 From 9b4feb630e8e9801603f3cab3a36369e3c1cf88d Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 24 May 2019 11:31:44 +0200 Subject: arch: wire-up close_range() This wires up the close_range() syscall into all arches at once. Suggested-by: Arnd Bergmann Signed-off-by: Christian Brauner Reviewed-by: Oleg Nesterov Acked-by: Arnd Bergmann Acked-by: Michael Ellerman (powerpc) Cc: Jann Horn Cc: David Howells Cc: Dmitry V. Levin Cc: Linus Torvalds Cc: Al Viro Cc: Florian Weimer Cc: linux-api@vger.kernel.org Cc: linux-alpha@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-ia64@vger.kernel.org Cc: linux-m68k@lists.linux-m68k.org Cc: linux-mips@vger.kernel.org Cc: linux-parisc@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: sparclinux@vger.kernel.org Cc: linux-xtensa@linux-xtensa.org Cc: linux-arch@vger.kernel.org Cc: x86@kernel.org --- arch/alpha/kernel/syscalls/syscall.tbl | 1 + arch/arm/tools/syscall.tbl | 1 + arch/arm64/include/asm/unistd32.h | 2 ++ arch/ia64/kernel/syscalls/syscall.tbl | 1 + arch/m68k/kernel/syscalls/syscall.tbl | 1 + arch/microblaze/kernel/syscalls/syscall.tbl | 1 + arch/mips/kernel/syscalls/syscall_n32.tbl | 1 + arch/mips/kernel/syscalls/syscall_n64.tbl | 1 + arch/mips/kernel/syscalls/syscall_o32.tbl | 1 + arch/parisc/kernel/syscalls/syscall.tbl | 1 + arch/powerpc/kernel/syscalls/syscall.tbl | 1 + arch/s390/kernel/syscalls/syscall.tbl | 1 + arch/sh/kernel/syscalls/syscall.tbl | 1 + arch/sparc/kernel/syscalls/syscall.tbl | 1 + arch/x86/entry/syscalls/syscall_32.tbl | 1 + arch/x86/entry/syscalls/syscall_64.tbl | 1 + arch/xtensa/kernel/syscalls/syscall.tbl | 1 + include/uapi/asm-generic/unistd.h | 2 ++ 18 files changed, 20 insertions(+) (limited to 'include') diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index 5ddd128d4b7a..a28fb211881d 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -475,6 +475,7 @@ 543 common fspick sys_fspick 544 common pidfd_open sys_pidfd_open # 545 reserved for clone3 +546 common close_range sys_close_range 547 common openat2 sys_openat2 548 common pidfd_getfd sys_pidfd_getfd 549 common faccessat2 sys_faccessat2 diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index d5cae5ffede0..7e8ee4adf269 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -449,6 +449,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 6d95d0c8bf2f..c760b9e159f5 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -879,6 +879,8 @@ __SYSCALL(__NR_fspick, sys_fspick) __SYSCALL(__NR_pidfd_open, sys_pidfd_open) #define __NR_clone3 435 __SYSCALL(__NR_clone3, sys_clone3) +#define __NR_close_range 436 +__SYSCALL(__NR_close_range, sys_close_range) #define __NR_openat2 437 __SYSCALL(__NR_openat2, sys_openat2) #define __NR_pidfd_getfd 438 diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index 49e325b604b3..ced9c83e47c9 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -356,6 +356,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open # 435 reserved for clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index f71b1bbcc198..1a4822de7292 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -435,6 +435,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 __sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index edacc4561f2b..a3f4be8e7238 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -441,6 +441,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index f777141f5256..501bc09643bd 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -374,6 +374,7 @@ 433 n32 fspick sys_fspick 434 n32 pidfd_open sys_pidfd_open 435 n32 clone3 __sys_clone3 +436 n32 close_range sys_close_range 437 n32 openat2 sys_openat2 438 n32 pidfd_getfd sys_pidfd_getfd 439 n32 faccessat2 sys_faccessat2 diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index da8c76394e17..391acbf425a0 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -350,6 +350,7 @@ 433 n64 fspick sys_fspick 434 n64 pidfd_open sys_pidfd_open 435 n64 clone3 __sys_clone3 +436 n64 close_range sys_close_range 437 n64 openat2 sys_openat2 438 n64 pidfd_getfd sys_pidfd_getfd 439 n64 faccessat2 sys_faccessat2 diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 13280625d312..d28f12f641d3 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -423,6 +423,7 @@ 433 o32 fspick sys_fspick 434 o32 pidfd_open sys_pidfd_open 435 o32 clone3 __sys_clone3 +436 o32 close_range sys_close_range 437 o32 openat2 sys_openat2 438 o32 pidfd_getfd sys_pidfd_getfd 439 o32 faccessat2 sys_faccessat2 diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 5a758fa6ec52..5d76b8f15197 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -433,6 +433,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3_wrapper +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index f833a3190822..dd87a782d80e 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -525,6 +525,7 @@ 435 32 clone3 ppc_clone3 sys_clone3 435 64 clone3 sys_clone3 435 spu clone3 sys_ni_syscall +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index bfdcb7633957..effb5195608c 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -438,6 +438,7 @@ 433 common fspick sys_fspick sys_fspick 434 common pidfd_open sys_pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 sys_clone3 +436 common close_range sys_close_range sys_close_range 437 common openat2 sys_openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 sys_faccessat2 diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index acc35daa1b79..96848db9659e 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -438,6 +438,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open # 435 reserved for clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 8004a276cb74..d6447d08c9a1 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -481,6 +481,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open # 435 reserved for clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index d8f8a1a69ed1..3f0c6546b47c 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -440,6 +440,7 @@ 433 i386 fspick sys_fspick 434 i386 pidfd_open sys_pidfd_open 435 i386 clone3 sys_clone3 +436 i386 close_range sys_close_range 437 i386 openat2 sys_openat2 438 i386 pidfd_getfd sys_pidfd_getfd 439 i386 faccessat2 sys_faccessat2 diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 78847b32e137..f8637c2c863d 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -357,6 +357,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 69d0d73876b3..d216ccba42f7 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -406,6 +406,7 @@ 433 common fspick sys_fspick 434 common pidfd_open sys_pidfd_open 435 common clone3 sys_clone3 +436 common close_range sys_close_range 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index f4a01305d9a6..31001e3323bc 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -850,6 +850,8 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open) #define __NR_clone3 435 __SYSCALL(__NR_clone3, sys_clone3) #endif +#define __NR_close_range 436 +__SYSCALL(__NR_close_range, sys_close_range) #define __NR_openat2 437 __SYSCALL(__NR_openat2, sys_openat2) -- cgit v1.2.3 From 60997c3d45d9a67daf01c56d805ae4fec37e0bd8 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Wed, 3 Jun 2020 21:48:55 +0200 Subject: close_range: add CLOSE_RANGE_UNSHARE One of the use-cases of close_range() is to drop file descriptors just before execve(). This would usually be expressed in the sequence: unshare(CLONE_FILES); close_range(3, ~0U); as pointed out by Linus it might be desirable to have this be a part of close_range() itself under a new flag CLOSE_RANGE_UNSHARE. This expands {dup,unshare)_fd() to take a max_fds argument that indicates the maximum number of file descriptors to copy from the old struct files. When the user requests that all file descriptors are supposed to be closed via close_range(min, max) then we can cap via unshare_fd(min) and hence don't need to do any of the heavy fput() work for everything above min. The patch makes it so that if CLOSE_RANGE_UNSHARE is requested and we do in fact currently share our file descriptor table we create a new private copy. We then close all fds in the requested range and finally after we're done we install the new fd table. Suggested-by: Linus Torvalds Signed-off-by: Christian Brauner --- fs/file.c | 65 +++++++++++++++++++++++++++++++++++----- fs/open.c | 5 +--- include/linux/fdtable.h | 8 +++-- include/uapi/linux/close_range.h | 9 ++++++ kernel/fork.c | 11 +++---- 5 files changed, 79 insertions(+), 19 deletions(-) create mode 100644 include/uapi/linux/close_range.h (limited to 'include') diff --git a/fs/file.c b/fs/file.c index 1b8ff05e8311..340bc9569f9d 100644 --- a/fs/file.c +++ b/fs/file.c @@ -19,6 +19,7 @@ #include #include #include +#include unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open_min = BITS_PER_LONG; @@ -265,12 +266,22 @@ static unsigned int count_open_files(struct fdtable *fdt) return i; } +static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) +{ + unsigned int count; + + count = count_open_files(fdt); + if (max_fds < NR_OPEN_DEFAULT) + max_fds = NR_OPEN_DEFAULT; + return min(count, max_fds); +} + /* * Allocate a new files structure and copy contents from the * passed in files structure. * errorp will be valid only when the returned files_struct is NULL. */ -struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) +struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp) { struct files_struct *newf; struct file **old_fds, **new_fds; @@ -297,7 +308,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); - open_files = count_open_files(old_fdt); + open_files = sane_fdtable_size(old_fdt, max_fds); /* * Check whether we need to allocate a larger fd array and fd set. @@ -328,7 +339,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) */ spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); - open_files = count_open_files(old_fdt); + open_files = sane_fdtable_size(old_fdt, max_fds); } copy_fd_bitmaps(new_fdt, old_fdt, open_files); @@ -665,32 +676,72 @@ EXPORT_SYMBOL(__close_fd); /* for ksys_close() */ * This closes a range of file descriptors. All file descriptors * from @fd up to and including @max_fd are closed. */ -int __close_range(struct files_struct *files, unsigned fd, unsigned max_fd) +int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) { unsigned int cur_max; + struct task_struct *me = current; + struct files_struct *cur_fds = me->files, *fds = NULL; + + if (flags & ~CLOSE_RANGE_UNSHARE) + return -EINVAL; if (fd > max_fd) return -EINVAL; rcu_read_lock(); - cur_max = files_fdtable(files)->max_fds; + cur_max = files_fdtable(cur_fds)->max_fds; rcu_read_unlock(); /* cap to last valid index into fdtable */ cur_max--; + if (flags & CLOSE_RANGE_UNSHARE) { + int ret; + unsigned int max_unshare_fds = NR_OPEN_MAX; + + /* + * If the requested range is greater than the current maximum, + * we're closing everything so only copy all file descriptors + * beneath the lowest file descriptor. + */ + if (max_fd >= cur_max) + max_unshare_fds = fd; + + ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds); + if (ret) + return ret; + + /* + * We used to share our file descriptor table, and have now + * created a private one, make sure we're using it below. + */ + if (fds) + swap(cur_fds, fds); + } + max_fd = min(max_fd, cur_max); while (fd <= max_fd) { struct file *file; - file = pick_file(files, fd++); + file = pick_file(cur_fds, fd++); if (!file) continue; - filp_close(file, files); + filp_close(file, cur_fds); cond_resched(); } + if (fds) { + /* + * We're done closing the files we were supposed to. Time to install + * the new file descriptor table and drop the old one. + */ + task_lock(me); + me->files = cur_fds; + task_unlock(me); + put_files_struct(fds); + } + return 0; } diff --git a/fs/open.c b/fs/open.c index 073ea3c45347..5e62f18adc5b 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1324,10 +1324,7 @@ SYSCALL_DEFINE1(close, unsigned int, fd) SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd, unsigned int, flags) { - if (flags) - return -EINVAL; - - return __close_range(current->files, fd, max_fd); + return __close_range(fd, max_fd, flags); } /* diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index fcd07181a365..a32bf47c593e 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -22,6 +22,7 @@ * as this is the granularity returned by copy_fdset(). */ #define NR_OPEN_DEFAULT BITS_PER_LONG +#define NR_OPEN_MAX ~0U struct fdtable { unsigned int max_fds; @@ -109,7 +110,7 @@ struct files_struct *get_files_struct(struct task_struct *); void put_files_struct(struct files_struct *fs); void reset_files_struct(struct files_struct *); int unshare_files(struct files_struct **); -struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy; +struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy; void do_close_on_exec(struct files_struct *); int iterate_fd(struct files_struct *, unsigned, int (*)(const void *, struct file *, unsigned), @@ -121,9 +122,10 @@ extern void __fd_install(struct files_struct *files, unsigned int fd, struct file *file); extern int __close_fd(struct files_struct *files, unsigned int fd); -extern int __close_range(struct files_struct *files, unsigned int fd, - unsigned int max_fd); +extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); extern int __close_fd_get_file(unsigned int fd, struct file **res); +extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, + struct files_struct **new_fdp); extern struct kmem_cache *files_cachep; diff --git a/include/uapi/linux/close_range.h b/include/uapi/linux/close_range.h new file mode 100644 index 000000000000..6928a9fdee3c --- /dev/null +++ b/include/uapi/linux/close_range.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_CLOSE_RANGE_H +#define _UAPI_LINUX_CLOSE_RANGE_H + +/* Unshare the file descriptor table before closing file descriptors. */ +#define CLOSE_RANGE_UNSHARE (1U << 1) + +#endif /* _UAPI_LINUX_CLOSE_RANGE_H */ + diff --git a/kernel/fork.c b/kernel/fork.c index 142b23645d82..8948121c8454 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1474,7 +1474,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct *tsk) goto out; } - newf = dup_fd(oldf, &error); + newf = dup_fd(oldf, NR_OPEN_MAX, &error); if (!newf) goto out; @@ -2907,14 +2907,15 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) /* * Unshare file descriptor table if it is being shared */ -static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) +int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, + struct files_struct **new_fdp) { struct files_struct *fd = current->files; int error = 0; if ((unshare_flags & CLONE_FILES) && (fd && atomic_read(&fd->count) > 1)) { - *new_fdp = dup_fd(fd, &error); + *new_fdp = dup_fd(fd, max_fds, &error); if (!*new_fdp) return error; } @@ -2974,7 +2975,7 @@ int ksys_unshare(unsigned long unshare_flags) err = unshare_fs(unshare_flags, &new_fs); if (err) goto bad_unshare_out; - err = unshare_fd(unshare_flags, &new_fd); + err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd); if (err) goto bad_unshare_cleanup_fs; err = unshare_userns(unshare_flags, &new_cred); @@ -3063,7 +3064,7 @@ int unshare_files(struct files_struct **displaced) struct files_struct *copy = NULL; int error; - error = unshare_fd(CLONE_FILES, ©); + error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, ©); if (error || !copy) { *displaced = NULL; return error; -- cgit v1.2.3 From 47ec7f09bc107720905c96bc37771e4ed1ff0aed Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 13 May 2020 11:47:49 -0700 Subject: dmaengine: cookie bypass for out of order completion The cookie tracking in dmaengine expects all submissions completed in order. Some DMA devices like Intel DSA can complete submissions out of order, especially if configured with a work queue sharing multiple DMA engines. Add a status DMA_OUT_OF_ORDER that tx_status can be returned for those DMA devices. The user should use callbacks to track the completion rather than the DMA cookie. This would address the issue of dmatest complaining that descriptors are "busy" when the cookie count goes backwards due to out of order completion. Add DMA_COMPLETION_NO_ORDER DMA capability to allow the driver to flag the device's ability to complete operations out of order. Reported-by: Swathi Kovvuri Signed-off-by: Dave Jiang Tested-by: Swathi Kovvuri Link: https://lore.kernel.org/r/158939557151.20335.12404113976045569870.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- Documentation/driver-api/dmaengine/provider.rst | 19 +++++++++++++++++++ drivers/dma/dmatest.c | 11 ++++++++++- drivers/dma/idxd/dma.c | 3 ++- include/linux/dmaengine.h | 2 ++ 4 files changed, 33 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst index 56e5833e8a07..ce68315482b1 100644 --- a/Documentation/driver-api/dmaengine/provider.rst +++ b/Documentation/driver-api/dmaengine/provider.rst @@ -239,6 +239,22 @@ Currently, the types available are: want to transfer a portion of uncompressed data directly to the display to print it +- DMA_COMPLETION_NO_ORDER + + - The device does not support in order completion. + + - The driver should return DMA_OUT_OF_ORDER for device_tx_status if + the device is setting this capability. + + - All cookie tracking and checking API should be treated as invalid if + the device exports this capability. + + - At this point, this is incompatible with polling option for dmatest. + + - If this cap is set, the user is recommended to provide an unique + identifier for each descriptor sent to the DMA device in order to + properly track the completion. + These various types will also affect how the source and destination addresses change over time. @@ -399,6 +415,9 @@ supported. - In the case of a cyclic transfer, it should only take into account the current period. + - Should return DMA_OUT_OF_ORDER if the device does not support in order + completion and is completing the operation out of order. + - This function can be called in an interrupt context. - device_config diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index b175229a4b01..18f10154ba19 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -829,7 +829,10 @@ static int dmatest_func(void *data) result("test timed out", total_tests, src->off, dst->off, len, 0); goto error_unmap_continue; - } else if (status != DMA_COMPLETE) { + } else if (status != DMA_COMPLETE && + !(dma_has_cap(DMA_COMPLETION_NO_ORDER, + dev->cap_mask) && + status == DMA_OUT_OF_ORDER)) { result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src->off, @@ -1007,6 +1010,12 @@ static int dmatest_add_channel(struct dmatest_info *info, dtc->chan = chan; INIT_LIST_HEAD(&dtc->threads); + if (dma_has_cap(DMA_COMPLETION_NO_ORDER, dma_dev->cap_mask) && + info->params.polled) { + info->params.polled = false; + pr_warn("DMA_COMPLETION_NO_ORDER, polled disabled\n"); + } + if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { if (dmatest == 0) { cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index c64c1429d160..0c892cbd72e0 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -133,7 +133,7 @@ static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - return dma_cookie_status(dma_chan, cookie, txstate); + return DMA_OUT_OF_ORDER; } /* @@ -174,6 +174,7 @@ int idxd_register_dma_device(struct idxd_device *idxd) INIT_LIST_HEAD(&dma->channels); dma->dev = &idxd->pdev->dev; + dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma->device_release = idxd_dma_release; if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e1c03339918f..9f9a13a2c01f 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -39,6 +39,7 @@ enum dma_status { DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, + DMA_OUT_OF_ORDER, }; /** @@ -61,6 +62,7 @@ enum dma_transaction_type { DMA_SLAVE, DMA_CYCLIC, DMA_INTERLEAVE, + DMA_COMPLETION_NO_ORDER, /* last transaction type for creation of the capabilities mask */ DMA_TX_TYPE_END, }; -- cgit v1.2.3 From 2accfa69050c2a0d6fc6106f609208b3e9622b26 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 17 Jun 2020 07:14:10 -0700 Subject: cpu/speculation: Add prototype for cpu_show_srbds() 0-day is not happy that there is no prototype for cpu_show_srbds(): drivers/base/cpu.c:565:16: error: no previous prototype for 'cpu_show_srbds' Fixes: 7e5b3c267d25 ("x86/speculation: Add Special Register Buffer Data Sampling (SRBDS) mitigation") Reported-by: kernel test robot Signed-off-by: Guenter Roeck Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200617141410.93338-1-linux@roeck-us.net --- include/linux/cpu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 52692587f7fe..8aa84c052fdf 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -64,6 +64,7 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev, char *buf); extern ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, -- cgit v1.2.3 From c935cd62d3fe985d7f0ebea185d2759e8992e96f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 17 Jun 2020 17:17:19 +1000 Subject: lockdep: Split header file into lockdep and lockdep_types There is a header file inclusion loop between asm-generic/bug.h and linux/kernel.h. This causes potential compile failurs depending on the which file is included first. One way of breaking this loop is to stop spinlock_types.h from including lockdep.h. This patch splits lockdep.h into two files for this purpose. Signed-off-by: Herbert Xu Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Sergey Senozhatsky Reviewed-by: Andy Shevchenko Acked-by: Petr Mladek Acked-by: Steven Rostedt (VMware) Link: https://lkml.kernel.org/r/E1jlSJz-0003hE-8g@fornost.hmeau.com --- include/linux/lockdep.h | 178 +------------------------------------ include/linux/lockdep_types.h | 196 +++++++++++++++++++++++++++++++++++++++++ include/linux/spinlock.h | 1 + include/linux/spinlock_types.h | 2 +- 4 files changed, 200 insertions(+), 177 deletions(-) create mode 100644 include/linux/lockdep_types.h (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 8fce5c98a4b0..3b73cf84f77d 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -10,181 +10,20 @@ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H +#include + struct task_struct; -struct lockdep_map; /* for sysctl */ extern int prove_locking; extern int lock_stat; -#define MAX_LOCKDEP_SUBCLASSES 8UL - -#include - -enum lockdep_wait_type { - LD_WAIT_INV = 0, /* not checked, catch all */ - - LD_WAIT_FREE, /* wait free, rcu etc.. */ - LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ - -#ifdef CONFIG_PROVE_RAW_LOCK_NESTING - LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ -#else - LD_WAIT_CONFIG = LD_WAIT_SPIN, -#endif - LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ - - LD_WAIT_MAX, /* must be last */ -}; - #ifdef CONFIG_LOCKDEP #include -#include #include #include -/* - * We'd rather not expose kernel/lockdep_states.h this wide, but we do need - * the total number of states... :-( - */ -#define XXX_LOCK_USAGE_STATES (1+2*4) - -/* - * NR_LOCKDEP_CACHING_CLASSES ... Number of classes - * cached in the instance of lockdep_map - * - * Currently main class (subclass == 0) and signle depth subclass - * are cached in lockdep_map. This optimization is mainly targeting - * on rq->lock. double_rq_lock() acquires this highly competitive with - * single depth. - */ -#define NR_LOCKDEP_CACHING_CLASSES 2 - -/* - * A lockdep key is associated with each lock object. For static locks we use - * the lock address itself as the key. Dynamically allocated lock objects can - * have a statically or dynamically allocated key. Dynamically allocated lock - * keys must be registered before being used and must be unregistered before - * the key memory is freed. - */ -struct lockdep_subclass_key { - char __one_byte; -} __attribute__ ((__packed__)); - -/* hash_entry is used to keep track of dynamically allocated keys. */ -struct lock_class_key { - union { - struct hlist_node hash_entry; - struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; - }; -}; - -extern struct lock_class_key __lockdep_no_validate__; - -struct lock_trace; - -#define LOCKSTAT_POINTS 4 - -/* - * The lock-class itself. The order of the structure members matters. - * reinit_class() zeroes the key member and all subsequent members. - */ -struct lock_class { - /* - * class-hash: - */ - struct hlist_node hash_entry; - - /* - * Entry in all_lock_classes when in use. Entry in free_lock_classes - * when not in use. Instances that are being freed are on one of the - * zapped_classes lists. - */ - struct list_head lock_entry; - - /* - * These fields represent a directed graph of lock dependencies, - * to every node we attach a list of "forward" and a list of - * "backward" graph nodes. - */ - struct list_head locks_after, locks_before; - - const struct lockdep_subclass_key *key; - unsigned int subclass; - unsigned int dep_gen_id; - - /* - * IRQ/softirq usage tracking bits: - */ - unsigned long usage_mask; - const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; - - /* - * Generation counter, when doing certain classes of graph walking, - * to ensure that we check one node only once: - */ - int name_version; - const char *name; - - short wait_type_inner; - short wait_type_outer; - -#ifdef CONFIG_LOCK_STAT - unsigned long contention_point[LOCKSTAT_POINTS]; - unsigned long contending_point[LOCKSTAT_POINTS]; -#endif -} __no_randomize_layout; - -#ifdef CONFIG_LOCK_STAT -struct lock_time { - s64 min; - s64 max; - s64 total; - unsigned long nr; -}; - -enum bounce_type { - bounce_acquired_write, - bounce_acquired_read, - bounce_contended_write, - bounce_contended_read, - nr_bounce_types, - - bounce_acquired = bounce_acquired_write, - bounce_contended = bounce_contended_write, -}; - -struct lock_class_stats { - unsigned long contention_point[LOCKSTAT_POINTS]; - unsigned long contending_point[LOCKSTAT_POINTS]; - struct lock_time read_waittime; - struct lock_time write_waittime; - struct lock_time read_holdtime; - struct lock_time write_holdtime; - unsigned long bounces[nr_bounce_types]; -}; - -struct lock_class_stats lock_stats(struct lock_class *class); -void clear_lock_stats(struct lock_class *class); -#endif - -/* - * Map the lock object (the lock instance) to the lock-class object. - * This is embedded into specific lock instances: - */ -struct lockdep_map { - struct lock_class_key *key; - struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; - const char *name; - short wait_type_outer; /* can be taken in this context */ - short wait_type_inner; /* presents this context */ -#ifdef CONFIG_LOCK_STAT - int cpu; - unsigned long ip; -#endif -}; - static inline void lockdep_copy_map(struct lockdep_map *to, struct lockdep_map *from) { @@ -440,8 +279,6 @@ static inline void lock_set_subclass(struct lockdep_map *lock, extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); -struct pin_cookie { unsigned int val; }; - #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); @@ -520,10 +357,6 @@ static inline void lockdep_set_selftest_task(struct task_struct *task) # define lockdep_reset() do { debug_locks = 1; } while (0) # define lockdep_free_key_range(start, size) do { } while (0) # define lockdep_sys_exit() do { } while (0) -/* - * The class key takes no space if lockdep is disabled: - */ -struct lock_class_key { }; static inline void lockdep_register_key(struct lock_class_key *key) { @@ -533,11 +366,6 @@ static inline void lockdep_unregister_key(struct lock_class_key *key) { } -/* - * The lockdep_map takes no space if lockdep is disabled: - */ -struct lockdep_map { }; - #define lockdep_depth(tsk) (0) #define lockdep_is_held_type(l, r) (1) @@ -549,8 +377,6 @@ struct lockdep_map { }; #define lockdep_recursing(tsk) (0) -struct pin_cookie { }; - #define NIL_COOKIE (struct pin_cookie){ } #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h new file mode 100644 index 000000000000..7b9350624577 --- /dev/null +++ b/include/linux/lockdep_types.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Runtime locking correctness validator + * + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * + * see Documentation/locking/lockdep-design.rst for more details. + */ +#ifndef __LINUX_LOCKDEP_TYPES_H +#define __LINUX_LOCKDEP_TYPES_H + +#include + +#define MAX_LOCKDEP_SUBCLASSES 8UL + +enum lockdep_wait_type { + LD_WAIT_INV = 0, /* not checked, catch all */ + + LD_WAIT_FREE, /* wait free, rcu etc.. */ + LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ + +#ifdef CONFIG_PROVE_RAW_LOCK_NESTING + LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ +#else + LD_WAIT_CONFIG = LD_WAIT_SPIN, +#endif + LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ + + LD_WAIT_MAX, /* must be last */ +}; + +#ifdef CONFIG_LOCKDEP + +#include + +/* + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( + */ +#define XXX_LOCK_USAGE_STATES (1+2*4) + +/* + * NR_LOCKDEP_CACHING_CLASSES ... Number of classes + * cached in the instance of lockdep_map + * + * Currently main class (subclass == 0) and signle depth subclass + * are cached in lockdep_map. This optimization is mainly targeting + * on rq->lock. double_rq_lock() acquires this highly competitive with + * single depth. + */ +#define NR_LOCKDEP_CACHING_CLASSES 2 + +/* + * A lockdep key is associated with each lock object. For static locks we use + * the lock address itself as the key. Dynamically allocated lock objects can + * have a statically or dynamically allocated key. Dynamically allocated lock + * keys must be registered before being used and must be unregistered before + * the key memory is freed. + */ +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + +/* hash_entry is used to keep track of dynamically allocated keys. */ +struct lock_class_key { + union { + struct hlist_node hash_entry; + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; + }; +}; + +extern struct lock_class_key __lockdep_no_validate__; + +struct lock_trace; + +#define LOCKSTAT_POINTS 4 + +/* + * The lock-class itself. The order of the structure members matters. + * reinit_class() zeroes the key member and all subsequent members. + */ +struct lock_class { + /* + * class-hash: + */ + struct hlist_node hash_entry; + + /* + * Entry in all_lock_classes when in use. Entry in free_lock_classes + * when not in use. Instances that are being freed are on one of the + * zapped_classes lists. + */ + struct list_head lock_entry; + + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + + const struct lockdep_subclass_key *key; + unsigned int subclass; + unsigned int dep_gen_id; + + /* + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; + const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; + + /* + * Generation counter, when doing certain classes of graph walking, + * to ensure that we check one node only once: + */ + int name_version; + const char *name; + + short wait_type_inner; + short wait_type_outer; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; +#endif +} __no_randomize_layout; + +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, +}; + +struct lock_class_stats { + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; + const char *name; + short wait_type_outer; /* can be taken in this context */ + short wait_type_inner; /* presents this context */ +#ifdef CONFIG_LOCK_STAT + int cpu; + unsigned long ip; +#endif +}; + +struct pin_cookie { unsigned int val; }; + +#else /* !CONFIG_LOCKDEP */ + +/* + * The class key takes no space if lockdep is disabled: + */ +struct lock_class_key { }; + +/* + * The lockdep_map takes no space if lockdep is disabled: + */ +struct lockdep_map { }; + +struct pin_cookie { }; + +#endif /* !LOCKDEP */ + +#endif /* __LINUX_LOCKDEP_TYPES_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d3770b3f9d9a..f2f12d746dbd 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -56,6 +56,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 6102e6bff3ae..b981caafe8bf 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -15,7 +15,7 @@ # include #endif -#include +#include typedef struct raw_spinlock { arch_spinlock_t raw_lock; -- cgit v1.2.3 From a9d887dc1c60ed67f2271d66560cdcf864c4a578 Mon Sep 17 00:00:00 2001 From: Guru Das Srinagesh Date: Tue, 2 Jun 2020 15:31:16 -0700 Subject: pwm: Convert period and duty cycle to u64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because period and duty cycle are defined as ints with units of nanoseconds, the maximum time duration that can be set is limited to ~2.147 seconds. Change their definitions to u64 in the structs of the PWM framework so that higher durations may be set. Also use the right format specifiers in debug prints in both core.c, pwm-stm32-lp.c as well as video/fbdev/ssd1307fb.c. Reported-by: kbuild test robot Signed-off-by: Guru Das Srinagesh Acked-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- drivers/pwm/core.c | 14 +++++++------- drivers/pwm/pwm-stm32-lp.c | 2 +- drivers/pwm/sysfs.c | 8 ++++---- drivers/video/fbdev/ssd1307fb.c | 2 +- include/linux/pwm.h | 12 ++++++------ 5 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 004b2ea9b5fd..276e939a5684 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -510,12 +510,12 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, last->period > s2.period && last->period <= state->period) dev_warn(chip->dev, - ".apply didn't pick the best available period (requested: %u, applied: %u, possible: %u)\n", + ".apply didn't pick the best available period (requested: %llu, applied: %llu, possible: %llu)\n", state->period, s2.period, last->period); if (state->enabled && state->period < s2.period) dev_warn(chip->dev, - ".apply is supposed to round down period (requested: %u, applied: %u)\n", + ".apply is supposed to round down period (requested: %llu, applied: %llu)\n", state->period, s2.period); if (state->enabled && @@ -524,14 +524,14 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, last->duty_cycle > s2.duty_cycle && last->duty_cycle <= state->duty_cycle) dev_warn(chip->dev, - ".apply didn't pick the best available duty cycle (requested: %u/%u, applied: %u/%u, possible: %u/%u)\n", + ".apply didn't pick the best available duty cycle (requested: %llu/%llu, applied: %llu/%llu, possible: %llu/%llu)\n", state->duty_cycle, state->period, s2.duty_cycle, s2.period, last->duty_cycle, last->period); if (state->enabled && state->duty_cycle < s2.duty_cycle) dev_warn(chip->dev, - ".apply is supposed to round down duty_cycle (requested: %u/%u, applied: %u/%u)\n", + ".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied: %llu/%llu)\n", state->duty_cycle, state->period, s2.duty_cycle, s2.period); @@ -558,7 +558,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, (s1.enabled && s1.period != last->period) || (s1.enabled && s1.duty_cycle != last->duty_cycle)) { dev_err(chip->dev, - ".apply is not idempotent (ena=%d pol=%d %u/%u) -> (ena=%d pol=%d %u/%u)\n", + ".apply is not idempotent (ena=%d pol=%d %llu/%llu) -> (ena=%d pol=%d %llu/%llu)\n", s1.enabled, s1.polarity, s1.duty_cycle, s1.period, last->enabled, last->polarity, last->duty_cycle, last->period); @@ -1284,8 +1284,8 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) if (state.enabled) seq_puts(s, " enabled"); - seq_printf(s, " period: %u ns", state.period); - seq_printf(s, " duty: %u ns", state.duty_cycle); + seq_printf(s, " period: %llu ns", state.period); + seq_printf(s, " duty: %llu ns", state.duty_cycle); seq_printf(s, " polarity: %s", state.polarity ? "inverse" : "normal"); diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c index 67fca62524dc..134c14621ee0 100644 --- a/drivers/pwm/pwm-stm32-lp.c +++ b/drivers/pwm/pwm-stm32-lp.c @@ -61,7 +61,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm, do_div(div, NSEC_PER_SEC); if (!div) { /* Clock is too slow to achieve requested period. */ - dev_dbg(priv->chip.dev, "Can't reach %u ns\n", state->period); + dev_dbg(priv->chip.dev, "Can't reach %llu ns\n", state->period); return -EINVAL; } diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 2389b8669846..449dbc0f49ed 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -42,7 +42,7 @@ static ssize_t period_show(struct device *child, pwm_get_state(pwm, &state); - return sprintf(buf, "%u\n", state.period); + return sprintf(buf, "%llu\n", state.period); } static ssize_t period_store(struct device *child, @@ -52,10 +52,10 @@ static ssize_t period_store(struct device *child, struct pwm_export *export = child_to_pwm_export(child); struct pwm_device *pwm = export->pwm; struct pwm_state state; - unsigned int val; + u64 val; int ret; - ret = kstrtouint(buf, 0, &val); + ret = kstrtou64(buf, 0, &val); if (ret) return ret; @@ -77,7 +77,7 @@ static ssize_t duty_cycle_show(struct device *child, pwm_get_state(pwm, &state); - return sprintf(buf, "%u\n", state.duty_cycle); + return sprintf(buf, "%llu\n", state.duty_cycle); } static ssize_t duty_cycle_store(struct device *child, diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 8e06ba912d60..09425ec317ba 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -312,7 +312,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) /* Enable the PWM */ pwm_enable(par->pwm); - dev_dbg(&par->client->dev, "Using PWM%d with a %dns period.\n", + dev_dbg(&par->client->dev, "Using PWM%d with a %lluns period.\n", par->pwm->pwm, pwm_get_period(par->pwm)); } diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 2635b2a55090..a13ff383fa1d 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -39,7 +39,7 @@ enum pwm_polarity { * current PWM hardware state. */ struct pwm_args { - unsigned int period; + u64 period; enum pwm_polarity polarity; }; @@ -56,8 +56,8 @@ enum { * @enabled: PWM enabled status */ struct pwm_state { - unsigned int period; - unsigned int duty_cycle; + u64 period; + u64 duty_cycle; enum pwm_polarity polarity; bool enabled; }; @@ -107,13 +107,13 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm) return state.enabled; } -static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) +static inline void pwm_set_period(struct pwm_device *pwm, u64 period) { if (pwm) pwm->state.period = period; } -static inline unsigned int pwm_get_period(const struct pwm_device *pwm) +static inline u64 pwm_get_period(const struct pwm_device *pwm) { struct pwm_state state; @@ -128,7 +128,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) pwm->state.duty_cycle = duty; } -static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) +static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm) { struct pwm_state state; -- cgit v1.2.3 From 7d34ca3854845398cb36866d14bbdc43dcec1ad0 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Tue, 9 Jun 2020 18:19:33 -0700 Subject: driver core: Add device_is_dependent() to linux/device.h DT implementation of fw_devlink needs this function to detect cycles. So make it available. Signed-off-by: Saravana Kannan Tested-by: John Stultz Signed-off-by: Rob Herring --- drivers/base/core.c | 2 +- include/linux/device.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/base/core.c b/drivers/base/core.c index 67d39a90b45c..320a0e1d628a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -121,7 +121,7 @@ int device_links_read_lock_held(void) * Check if @target depends on @dev or any device dependent on it (its child or * its consumer etc). Return 1 if that is the case or 0 otherwise. */ -static int device_is_dependent(struct device *dev, void *target) +int device_is_dependent(struct device *dev, void *target) { struct device_link *link; int ret; diff --git a/include/linux/device.h b/include/linux/device.h index 15460a5ac024..33cece5d9a4c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -829,6 +829,7 @@ extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); extern const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); +extern int device_is_dependent(struct device *dev, void *target); static inline bool device_supports_offline(struct device *dev) { -- cgit v1.2.3 From f3c802a1f30013f8f723b62d7fa49eb9e991da23 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 30 May 2020 00:23:49 +1000 Subject: crypto: algif_aead - Only wake up when ctx->more is zero AEAD does not support partial requests so we must not wake up while ctx->more is set. In order to distinguish between the case of no data sent yet and a zero-length request, a new init flag has been added to ctx. SKCIPHER has also been modified to ensure that at least a block of data is available if there is more data to come. Fixes: 2d97591ef43d ("crypto: af_alg - consolidation of...") Signed-off-by: Herbert Xu --- crypto/af_alg.c | 11 ++++++++--- crypto/algif_aead.c | 4 ++-- crypto/algif_skcipher.c | 4 ++-- include/crypto/if_alg.h | 4 +++- 4 files changed, 15 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 28fc323e3fe3..9fcb91ea10c4 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); @@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); * * @sk socket of connection to user space * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @min Set to minimum request size if partial requests are allowed. * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -843,7 +847,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { + if (ctx->init && (init || !ctx->more)) { err = -EINVAL; goto unlock; } @@ -854,6 +858,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, memcpy(ctx->iv, con.iv->iv, ivsize); ctx->aead_assoclen = con.aead_assoclen; + ctx->init = true; } while (size) { diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0ae000a61c7f..d48d2156e621 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ec5567c87a6d..a51ba22fef58 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 088c1ded2714..ee6412314f8f 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -135,6 +135,7 @@ struct af_alg_async_req { * SG? * @enc: Cryptographic operation to be performed when * recvmsg is invoked. + * @init: True if metadata has been sent. * @len: Length of memory allocated for this data structure. */ struct af_alg_ctx { @@ -151,6 +152,7 @@ struct af_alg_ctx { bool more; bool merge; bool enc; + bool init; unsigned int len; }; @@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, size_t dst_offset); void af_alg_wmem_wakeup(struct sock *sk); -int af_alg_wait_for_data(struct sock *sk, unsigned flags); +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize); ssize_t af_alg_sendpage(struct socket *sock, struct page *page, -- cgit v1.2.3 From 03cc8353c2244c8b790c3c81a0f1532d34a9d738 Mon Sep 17 00:00:00 2001 From: Rob Gill Date: Mon, 1 Jun 2020 21:17:56 +0000 Subject: USB: core: additional Device Classes to debug/usb/devices Several newer USB Device classes are not presently reported individually at /sys/kernel/debug/usb/devices, (reported as "unk."). This patch adds the following classes: 0fh (Personal Healthcare devices), 10h (USB Type-C combined Audio/Video devices) 11h (USB billboard), 12h (USB Type-C Bridge). As defined at [https://www.usb.org/defined-class-codes] Corresponding classes defined in include/linux/usb/ch9.h. Signed-off-by: Rob Gill Link: https://lore.kernel.org/r/20200601211749.6878-1-rrobgill@protonmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devices.c | 4 ++++ include/uapi/linux/usb/ch9.h | 4 ++++ 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 94b6fa6e585e..696b2b692b83 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -133,6 +133,10 @@ static const struct class_info clas_info[] = { {USB_CLASS_CSCID, "scard"}, {USB_CLASS_CONTENT_SEC, "c-sec"}, {USB_CLASS_VIDEO, "video"}, + {USB_CLASS_PERSONAL_HEALTHCARE, "perhc"}, + {USB_CLASS_AUDIO_VIDEO, "av"}, + {USB_CLASS_BILLBOARD, "blbrd"}, + {USB_CLASS_USB_TYPE_C_BRIDGE, "bridg"}, {USB_CLASS_WIRELESS_CONTROLLER, "wlcon"}, {USB_CLASS_MISC, "misc"}, {USB_CLASS_APP_SPEC, "app."}, diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 2b623f36af6b..456ab0c2b586 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -326,6 +326,10 @@ struct usb_device_descriptor { #define USB_CLASS_CONTENT_SEC 0x0d /* content security */ #define USB_CLASS_VIDEO 0x0e #define USB_CLASS_WIRELESS_CONTROLLER 0xe0 +#define USB_CLASS_PERSONAL_HEALTHCARE 0x0f +#define USB_CLASS_AUDIO_VIDEO 0x10 +#define USB_CLASS_BILLBOARD 0x11 +#define USB_CLASS_USB_TYPE_C_BRIDGE 0x12 #define USB_CLASS_MISC 0xef #define USB_CLASS_APP_SPEC 0xfe #define USB_CLASS_VENDOR_SPEC 0xff -- cgit v1.2.3 From 8baaa4038edbff67f318574e233e9e7e43808230 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:08 +0200 Subject: Bluetooth: Add bdaddr_list_with_flags for classic whitelist In order to more easily add device flags to classic devices, create a new type of bdaddr_list that supports setting flags. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 18 +++++++++++-- net/bluetooth/hci_core.c | 58 ++++++++++++++++++++++++++++++++++++++++ net/bluetooth/hci_event.c | 8 +++--- net/bluetooth/mgmt.c | 5 ++-- 4 files changed, 81 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 0d5dbb6cb5a0..95a3935325bb 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -136,6 +136,13 @@ struct bdaddr_list_with_irk { u8 local_irk[16]; }; +struct bdaddr_list_with_flags { + struct list_head list; + bdaddr_t bdaddr; + u8 bdaddr_type; + u32 current_flags; +}; + struct bt_uuid { struct list_head list; u8 uuid[16]; @@ -1169,12 +1176,19 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct list_head *list, bdaddr_t *bdaddr, u8 type); +struct bdaddr_list_with_flags * +hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type); int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, - u8 type, u8 *peer_irk, u8 *local_irk); + u8 type, u8 *peer_irk, u8 *local_irk); +int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u32 flags); int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, - u8 type); + u8 type); +int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type); void hci_bdaddr_list_clear(struct list_head *list); struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 4f1052a7c488..8a471bec2731 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3023,6 +3023,20 @@ struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( return NULL; } +struct bdaddr_list_with_flags * +hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, + bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list_with_flags *b; + + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + void hci_bdaddr_list_clear(struct list_head *bdaddr_list) { struct bdaddr_list *b, *n; @@ -3084,6 +3098,30 @@ int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, return 0; } +int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u32 flags) +{ + struct bdaddr_list_with_flags *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + entry->current_flags = flags; + + list_add(&entry->list, list); + + return 0; +} + int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; @@ -3123,6 +3161,26 @@ int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, return 0; } +int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type) +{ + struct bdaddr_list_with_flags *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) { + hci_bdaddr_list_clear(list); + return 0; + } + + entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; +} + /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index cfeaee347db3..8981954ff4c4 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2697,10 +2697,10 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) */ if (hci_dev_test_flag(hdev, HCI_MGMT) && !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && - !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, - BDADDR_BREDR)) { - hci_reject_conn(hdev, &ev->bdaddr); - return; + !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr, + BDADDR_BREDR)) { + hci_reject_conn(hdev, &ev->bdaddr); + return; } /* Connection accepted */ diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ecfdfc4df486..d0d0fa832c8a 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -6000,8 +6000,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr, - cp->addr.type); + err = hci_bdaddr_list_add_with_flags(&hdev->whitelist, + &cp->addr.bdaddr, + cp->addr.type, 0); if (err) goto unlock; -- cgit v1.2.3 From 7a92906f841db46a91df0179459ad8b2052f2e54 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:09 +0200 Subject: Bluetooth: Replace wakeable list with flag Since the classic device list now supports flags, convert the wakeable list into a flag on the existing device list. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 11 ++++++++++- net/bluetooth/hci_core.c | 1 - net/bluetooth/hci_request.c | 12 ++++++++---- 3 files changed, 18 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 95a3935325bb..0643c737ba85 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -143,6 +143,16 @@ struct bdaddr_list_with_flags { u32 current_flags; }; +enum hci_conn_flags { + HCI_CONN_FLAG_REMOTE_WAKEUP, + HCI_CONN_FLAG_MAX +}; + +#define hci_conn_test_flag(nr, flags) ((flags) & (1U << nr)) + +/* Make sure number of flags doesn't exceed sizeof(current_flags) */ +static_assert(HCI_CONN_FLAG_MAX < 32); + struct bt_uuid { struct list_head list; u8 uuid[16]; @@ -463,7 +473,6 @@ struct hci_dev { struct list_head mgmt_pending; struct list_head blacklist; struct list_head whitelist; - struct list_head wakeable; struct list_head uuids; struct list_head link_keys; struct list_head long_term_keys; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8a471bec2731..8e01afb2ee8c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3499,7 +3499,6 @@ struct hci_dev *hci_alloc_dev(void) INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->blacklist); INIT_LIST_HEAD(&hdev->whitelist); - INIT_LIST_HEAD(&hdev->wakeable); INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->long_term_keys); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index a7f572ad38ef..a5b53d3ea508 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -968,15 +968,19 @@ static void hci_req_clear_event_filter(struct hci_request *req) static void hci_req_set_event_filter(struct hci_request *req) { - struct bdaddr_list *b; + struct bdaddr_list_with_flags *b; struct hci_cp_set_event_filter f; struct hci_dev *hdev = req->hdev; - u8 scan; + u8 scan = SCAN_DISABLED; /* Always clear event filter when starting */ hci_req_clear_event_filter(req); - list_for_each_entry(b, &hdev->wakeable, list) { + list_for_each_entry(b, &hdev->whitelist, list) { + if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + b->current_flags)) + continue; + memset(&f, 0, sizeof(f)); bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); f.flt_type = HCI_FLT_CONN_SETUP; @@ -985,9 +989,9 @@ static void hci_req_set_event_filter(struct hci_request *req) bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f); + scan = SCAN_PAGE; } - scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED; hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } -- cgit v1.2.3 From a1fc7535ec34a5904abe93dd42a6ed7e31c36717 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:10 +0200 Subject: Bluetooth: Replace wakeable in hci_conn_params Replace the wakeable boolean with flags in hci_conn_params and all users of this boolean. This will be used by the get/set device flags mgmt op. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 2 +- net/bluetooth/hci_request.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 0643c737ba85..6f88e5d81bd2 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -660,7 +660,7 @@ struct hci_conn_params { struct hci_conn *conn; bool explicit_connect; - bool wakeable; + u32 current_flags; }; extern struct list_head hci_dev_list; diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index a5b53d3ea508..eee9c007a5fb 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -710,7 +710,8 @@ static int add_to_white_list(struct hci_request *req, } /* During suspend, only wakeable devices can be in whitelist */ - if (hdev->suspended && !params->wakeable) + if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + params->current_flags)) return 0; *num_entries += 1; -- cgit v1.2.3 From 4c54bf2b093bb2ae95e756342646d868e8101cb4 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:11 +0200 Subject: Bluetooth: Add get/set device flags mgmt op Add the get device flags and set device flags mgmt ops and the device flags changed event. Their behavior is described in detail in mgmt-api.txt in bluez. Sample btmon trace when a HID device is added (trimmed to 75 chars): @ MGMT Command: Unknown (0x0050) plen 11 {0x0001} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 ........... @ MGMT Event: Unknown (0x002a) plen 15 {0x0004} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0003} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0002} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Command Compl.. (0x0001) plen 10 {0x0001} [hci0] 18:06:14.98 Unknown (0x0050) plen 7 Status: Success (0x00) 90 c5 13 cd f3 cd 02 ....... @ MGMT Command: Add Device (0x0033) plen 8 {0x0001} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Device Added (0x001a) plen 8 {0x0004} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Device Added (0x001a) plen 8 {0x0003} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Device Added (0x001a) plen 8 {0x0002} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Unknown (0x002a) plen 15 {0x0004} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0003} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0002} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0001} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/mgmt.h | 28 ++++++++++ net/bluetooth/mgmt.c | 128 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index e515288f328f..8e47b0c5fe52 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -720,6 +720,27 @@ struct mgmt_rp_set_exp_feature { #define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e #define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0 +#define MGMT_OP_GET_DEVICE_FLAGS 0x004F +#define MGMT_GET_DEVICE_FLAGS_SIZE 7 +struct mgmt_cp_get_device_flags { + struct mgmt_addr_info addr; +} __packed; +struct mgmt_rp_get_device_flags { + struct mgmt_addr_info addr; + __le32 supported_flags; + __le32 current_flags; +} __packed; + +#define MGMT_OP_SET_DEVICE_FLAGS 0x0050 +#define MGMT_SET_DEVICE_FLAGS_SIZE 11 +struct mgmt_cp_set_device_flags { + struct mgmt_addr_info addr; + __le32 current_flags; +} __packed; +struct mgmt_rp_set_device_flags { + struct mgmt_addr_info addr; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -951,3 +972,10 @@ struct mgmt_ev_exp_feature_changed { __u8 uuid[16]; __le32 flags; } __packed; + +#define MGMT_EV_DEVICE_FLAGS_CHANGED 0x002a +struct mgmt_ev_device_flags_changed { + struct mgmt_addr_info addr; + __le32 supported_flags; + __le32 current_flags; +} __packed; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index d0d0fa832c8a..e409ff48e8e6 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -116,6 +116,8 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_OP_READ_DEF_RUNTIME_CONFIG, MGMT_OP_SET_DEF_RUNTIME_CONFIG, + MGMT_OP_GET_DEVICE_FLAGS, + MGMT_OP_SET_DEVICE_FLAGS, }; static const u16 mgmt_events[] = { @@ -156,6 +158,7 @@ static const u16 mgmt_events[] = { MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_PHY_CONFIGURATION_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED, + MGMT_EV_DEVICE_FLAGS_CHANGED, }; static const u16 mgmt_untrusted_commands[] = { @@ -3856,6 +3859,120 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_NOT_SUPPORTED); } +#define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1) + +static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_cp_get_device_flags *cp = data; + struct mgmt_rp_get_device_flags rp; + struct bdaddr_list_with_flags *br_params; + struct hci_conn_params *params; + u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); + u32 current_flags = 0; + u8 status = MGMT_STATUS_INVALID_PARAMS; + + bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n", + &cp->addr.bdaddr, cp->addr.type); + + if (cp->addr.type == BDADDR_BREDR) { + br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, + &cp->addr.bdaddr, + cp->addr.type); + if (!br_params) + goto done; + + current_flags = br_params->current_flags; + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + + if (!params) + goto done; + + current_flags = params->current_flags; + } + + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + rp.supported_flags = cpu_to_le32(supported_flags); + rp.current_flags = cpu_to_le32(current_flags); + + status = MGMT_STATUS_SUCCESS; + +done: + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status, + &rp, sizeof(rp)); +} + +static void device_flags_changed(struct sock *sk, struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type, + u32 supported_flags, u32 current_flags) +{ + struct mgmt_ev_device_flags_changed ev; + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = bdaddr_type; + ev.supported_flags = cpu_to_le32(supported_flags); + ev.current_flags = cpu_to_le32(current_flags); + + mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk); +} + +static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_device_flags *cp = data; + struct bdaddr_list_with_flags *br_params; + struct hci_conn_params *params; + u8 status = MGMT_STATUS_INVALID_PARAMS; + u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); + u32 current_flags = __le32_to_cpu(cp->current_flags); + + bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x", + &cp->addr.bdaddr, cp->addr.type, + __le32_to_cpu(current_flags)); + + if ((supported_flags | current_flags) != supported_flags) { + bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", + current_flags, supported_flags); + goto done; + } + + if (cp->addr.type == BDADDR_BREDR) { + br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, + &cp->addr.bdaddr, + cp->addr.type); + + if (br_params) { + br_params->current_flags = current_flags; + status = MGMT_STATUS_SUCCESS; + } else { + bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", + &cp->addr.bdaddr, cp->addr.type); + } + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + if (params) { + params->current_flags = current_flags; + status = MGMT_STATUS_SUCCESS; + } else { + bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", + &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + } + } + +done: + if (status == MGMT_STATUS_SUCCESS) + device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type, + supported_flags, current_flags); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status, + &cp->addr, sizeof(cp->addr)); +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -5973,7 +6090,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, { struct mgmt_cp_add_device *cp = data; u8 auto_conn, addr_type; + struct hci_conn_params *params; int err; + u32 current_flags = 0; bt_dev_dbg(hdev, "sock %p", sk); @@ -6041,12 +6160,19 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_FAILED, &cp->addr, sizeof(cp->addr)); goto unlock; + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + addr_type); + if (params) + current_flags = params->current_flags; } hci_update_background_scan(hdev); added: device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); + device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type, + SUPPORTED_DEVICE_FLAGS(), current_flags); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, @@ -7313,6 +7439,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { HCI_MGMT_UNTRUSTED }, { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE, HCI_MGMT_VAR_LEN }, + { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, + { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) -- cgit v1.2.3 From 7fceb17c6b480e0f2bd0e566a8231039fb8a809e Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:12 +0200 Subject: Bluetooth: Add definitions for advertisement monitor features This adds support for Advertisement Monitor API. Here are the commands and events added. - Read Advertisement Monitor Feature command - Add Advertisement Pattern Monitor command - Remove Advertisement Monitor command - Advertisement Monitor Added event - Advertisement Monitor Removed event Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/mgmt.h | 49 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 8e47b0c5fe52..beae5c3980f0 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -741,6 +741,45 @@ struct mgmt_rp_set_device_flags { struct mgmt_addr_info addr; } __packed; +#define MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS BIT(0) + +#define MGMT_OP_READ_ADV_MONITOR_FEATURES 0x0051 +#define MGMT_READ_ADV_MONITOR_FEATURES_SIZE 0 +struct mgmt_rp_read_adv_monitor_features { + __le32 supported_features; + __le32 enabled_features; + __le16 max_num_handles; + __u8 max_num_patterns; + __le16 num_handles; + __le16 handles[]; +} __packed; + +struct mgmt_adv_pattern { + __u8 ad_type; + __u8 offset; + __u8 length; + __u8 value[31]; +} __packed; + +#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052 +struct mgmt_cp_add_adv_patterns_monitor { + __u8 pattern_count; + struct mgmt_adv_pattern patterns[]; +} __packed; +#define MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE 1 +struct mgmt_rp_add_adv_patterns_monitor { + __le16 monitor_handle; +} __packed; + +#define MGMT_OP_REMOVE_ADV_MONITOR 0x0053 +struct mgmt_cp_remove_adv_monitor { + __le16 monitor_handle; +} __packed; +#define MGMT_REMOVE_ADV_MONITOR_SIZE 2 +struct mgmt_rp_remove_adv_monitor { + __le16 monitor_handle; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -979,3 +1018,13 @@ struct mgmt_ev_device_flags_changed { __le32 supported_flags; __le32 current_flags; } __packed; + +#define MGMT_EV_ADV_MONITOR_ADDED 0x002b +struct mgmt_ev_adv_monitor_added { + __le16 monitor_handle; +} __packed; + +#define MGMT_EV_ADV_MONITOR_REMOVED 0x002c +struct mgmt_ev_adv_monitor_removed { + __le16 monitor_handle; +} __packed; -- cgit v1.2.3 From e5e1e7fd470ccf2eb38ab7fb5a3ab0fc4792fe53 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:13 +0200 Subject: Bluetooth: Add handler of MGMT_OP_READ_ADV_MONITOR_FEATURES This adds the request handler of MGMT_OP_READ_ADV_MONITOR_FEATURES command. Since the controller-based monitoring is not yet in place, this report only the supported features but not the enabled features. The following test was performed. - Issuing btmgmt advmon-features. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 24 ++++++++++++++++++++ net/bluetooth/hci_core.c | 10 ++++++++- net/bluetooth/mgmt.c | 48 ++++++++++++++++++++++++++++++++++++++++ net/bluetooth/msft.c | 7 ++++++ net/bluetooth/msft.h | 9 ++++++++ 5 files changed, 97 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 6f88e5d81bd2..4e9d51087674 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -25,6 +25,7 @@ #ifndef __HCI_CORE_H #define __HCI_CORE_H +#include #include #include @@ -237,6 +238,24 @@ struct adv_info { #define HCI_MAX_ADV_INSTANCES 5 #define HCI_DEFAULT_ADV_DURATION 2 +struct adv_pattern { + struct list_head list; + __u8 ad_type; + __u8 offset; + __u8 length; + __u8 value[HCI_MAX_AD_LENGTH]; +}; + +struct adv_monitor { + struct list_head patterns; + bool active; + __u16 handle; +}; + +#define HCI_MIN_ADV_MONITOR_HANDLE 1 +#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 +#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 + #define HCI_MAX_SHORT_NAME_LENGTH 10 /* Min encryption key size to match with SMP */ @@ -511,6 +530,9 @@ struct hci_dev { __u16 adv_instance_timeout; struct delayed_work adv_instance_expire; + struct idr adv_monitors_idr; + unsigned int adv_monitors_cnt; + __u8 irk[16]; __u32 rpa_timeout; struct delayed_work rpa_expired; @@ -1258,6 +1280,8 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); +void hci_adv_monitors_clear(struct hci_dev *hdev); + void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); void hci_init_sysfs(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8e01afb2ee8c..53aec32a5850 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -26,7 +26,6 @@ /* Bluetooth HCI core. */ #include -#include #include #include #include @@ -2996,6 +2995,12 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, return 0; } +/* This function requires the caller holds hdev->lock */ +void hci_adv_monitors_clear(struct hci_dev *hdev) +{ + idr_destroy(&hdev->adv_monitors_idr); +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { @@ -3646,6 +3651,8 @@ int hci_register_dev(struct hci_dev *hdev) queue_work(hdev->req_workqueue, &hdev->power_on); + idr_init(&hdev->adv_monitors_idr); + return id; err_wqueue: @@ -3716,6 +3723,7 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_smp_irks_clear(hdev); hci_remote_oob_data_clear(hdev); hci_adv_instances_clear(hdev); + hci_adv_monitors_clear(hdev); hci_bdaddr_list_clear(&hdev->le_white_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_conn_params_clear_all(hdev); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index e409ff48e8e6..8aec7fbe9a38 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -37,6 +37,7 @@ #include "smp.h" #include "mgmt_util.h" #include "mgmt_config.h" +#include "msft.h" #define MGMT_VERSION 1 #define MGMT_REVISION 17 @@ -118,6 +119,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_DEF_RUNTIME_CONFIG, MGMT_OP_GET_DEVICE_FLAGS, MGMT_OP_SET_DEVICE_FLAGS, + MGMT_OP_READ_ADV_MONITOR_FEATURES, }; static const u16 mgmt_events[] = { @@ -3973,6 +3975,51 @@ done: &cp->addr, sizeof(cp->addr)); } +static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct adv_monitor *monitor = NULL; + struct mgmt_rp_read_adv_monitor_features *rp = NULL; + int handle; + size_t rp_size = 0; + __u32 supported = 0; + __u16 num_handles = 0; + __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES]; + + BT_DBG("request for %s", hdev->name); + + hci_dev_lock(hdev); + + if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR) + supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) { + handles[num_handles++] = monitor->handle; + } + + hci_dev_unlock(hdev); + + rp_size = sizeof(*rp) + (num_handles * sizeof(u16)); + rp = kmalloc(rp_size, GFP_KERNEL); + if (!rp) + return -ENOMEM; + + /* Once controller-based monitoring is in place, the enabled_features + * should reflect the use. + */ + rp->supported_features = cpu_to_le32(supported); + rp->enabled_features = 0; + rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES); + rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS; + rp->num_handles = cpu_to_le16(num_handles); + if (num_handles) + memcpy(&rp->handles, &handles, (num_handles * sizeof(u16))); + + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_ADV_MONITOR_FEATURES, + MGMT_STATUS_SUCCESS, rp, rp_size); +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -7441,6 +7488,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { HCI_MGMT_VAR_LEN }, { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, + { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index d6c4e6b5ae77..8579bfeb2836 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -139,3 +139,10 @@ void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) bt_dev_dbg(hdev, "MSFT vendor event %u", event); } + +__u64 msft_get_features(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + return msft ? msft->features : 0; +} diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index 5aa9130e1f8a..e9c478e890b8 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -3,16 +3,25 @@ * Copyright (C) 2020 Google Corporation */ +#define MSFT_FEATURE_MASK_BREDR_RSSI_MONITOR BIT(0) +#define MSFT_FEATURE_MASK_LE_CONN_RSSI_MONITOR BIT(1) +#define MSFT_FEATURE_MASK_LE_ADV_RSSI_MONITOR BIT(2) +#define MSFT_FEATURE_MASK_LE_ADV_MONITOR BIT(3) +#define MSFT_FEATURE_MASK_CURVE_VALIDITY BIT(4) +#define MSFT_FEATURE_MASK_CONCURRENT_ADV_MONITOR BIT(5) + #if IS_ENABLED(CONFIG_BT_MSFTEXT) void msft_do_open(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev); void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); +__u64 msft_get_features(struct hci_dev *hdev); #else static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} +static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; } #endif -- cgit v1.2.3 From b139553db5cd940d66095fb97de1727e9a19369f Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:14 +0200 Subject: Bluetooth: Add handler of MGMT_OP_ADD_ADV_PATTERNS_MONITOR This adds the request handler of MGMT_OP_ADD_ADV_PATTERNS_MONITOR command. Note that the controller-based monitoring is not yet in place. This tracks the content of the monitor without sending HCI traffic, so the request returns immediately. The following manual test was performed. - Issue btmgmt advmon-add with valid and invalid inputs. - Issue btmgmt advmon-add more the allowed number of monitors. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 2 + net/bluetooth/hci_core.c | 40 ++++++++++++++++ net/bluetooth/mgmt.c | 100 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 142 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 4e9d51087674..13fad419ae7d 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1281,6 +1281,8 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_adv_monitors_clear(struct hci_dev *hdev); +void hci_free_adv_monitor(struct adv_monitor *monitor); +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 53aec32a5850..ce481fab349d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2998,9 +2998,49 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, /* This function requires the caller holds hdev->lock */ void hci_adv_monitors_clear(struct hci_dev *hdev) { + struct adv_monitor *monitor; + int handle; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) + hci_free_adv_monitor(monitor); + idr_destroy(&hdev->adv_monitors_idr); } +void hci_free_adv_monitor(struct adv_monitor *monitor) +{ + struct adv_pattern *pattern; + struct adv_pattern *tmp; + + if (!monitor) + return; + + list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) + kfree(pattern); + + kfree(monitor); +} + +/* This function requires the caller holds hdev->lock */ +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + int min, max, handle; + + if (!monitor) + return -EINVAL; + + min = HCI_MIN_ADV_MONITOR_HANDLE; + max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; + handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, + GFP_KERNEL); + if (handle < 0) + return handle; + + hdev->adv_monitors_cnt++; + monitor->handle = handle; + return 0; +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 8aec7fbe9a38..1eca36e51706 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -120,6 +120,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_GET_DEVICE_FLAGS, MGMT_OP_SET_DEVICE_FLAGS, MGMT_OP_READ_ADV_MONITOR_FEATURES, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, }; static const u16 mgmt_events[] = { @@ -4020,6 +4021,103 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_SUCCESS, rp, rp_size); } +static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_add_adv_patterns_monitor *cp = data; + struct mgmt_rp_add_adv_patterns_monitor rp; + struct adv_monitor *m = NULL; + struct adv_pattern *p = NULL; + __u8 cp_ofst = 0, cp_len = 0; + unsigned int mp_cnt = 0; + int err, i; + + BT_DBG("request for %s", hdev->name); + + if (len <= sizeof(*cp) || cp->pattern_count == 0) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + m = kmalloc(sizeof(*m), GFP_KERNEL); + if (!m) { + err = -ENOMEM; + goto failed; + } + + INIT_LIST_HEAD(&m->patterns); + m->active = false; + + for (i = 0; i < cp->pattern_count; i++) { + if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + cp_ofst = cp->patterns[i].offset; + cp_len = cp->patterns[i].length; + if (cp_ofst >= HCI_MAX_AD_LENGTH || + cp_len > HCI_MAX_AD_LENGTH || + (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto failed; + } + + p->ad_type = cp->patterns[i].ad_type; + p->offset = cp->patterns[i].offset; + p->length = cp->patterns[i].length; + memcpy(p->value, cp->patterns[i].value, p->length); + + INIT_LIST_HEAD(&p->list); + list_add(&p->list, &m->patterns); + } + + if (mp_cnt != cp->pattern_count) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + hci_dev_lock(hdev); + + err = hci_add_adv_monitor(hdev, m); + if (err) { + if (err == -ENOSPC) { + mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_NO_RESOURCES); + } + goto unlock; + } + + hci_dev_unlock(hdev); + + rp.monitor_handle = cpu_to_le16(m->handle); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + +unlock: + hci_dev_unlock(hdev); + +failed: + hci_free_adv_monitor(m); + return err; +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -7489,6 +7587,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, + { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, + HCI_MGMT_VAR_LEN }, }; void mgmt_index_added(struct hci_dev *hdev) -- cgit v1.2.3 From bd2fbc6cb815b5171facb42526f6db206d920e13 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:15 +0200 Subject: Bluetooth: Add handler of MGMT_OP_REMOVE_ADV_MONITOR This adds the request handler of MGMT_OP_REMOVE_ADV_MONITOR command. Note that the controller-based monitoring is not yet in place. This removes the internal monitor(s) without sending HCI traffic, so the request returns immediately. The following test was performed. - Issue btmgmt advmon-remove with valid and invalid handles. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 31 +++++++++++++++++++++++++++++++ net/bluetooth/mgmt.c | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 13fad419ae7d..c54f9295892e 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1283,6 +1283,7 @@ void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct adv_monitor *monitor); int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); +int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index ce481fab349d..59132b3e2cde 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3041,6 +3041,37 @@ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) return 0; } +static int free_adv_monitor(int id, void *ptr, void *data) +{ + struct hci_dev *hdev = data; + struct adv_monitor *monitor = ptr; + + idr_remove(&hdev->adv_monitors_idr, monitor->handle); + hci_free_adv_monitor(monitor); + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle) +{ + struct adv_monitor *monitor; + + if (handle) { + monitor = idr_find(&hdev->adv_monitors_idr, handle); + if (!monitor) + return -ENOENT; + + idr_remove(&hdev->adv_monitors_idr, monitor->handle); + hci_free_adv_monitor(monitor); + } else { + /* Remove all monitors if handle is 0. */ + idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev); + } + + return 0; +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 1eca36e51706..cff24fde72d2 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -121,6 +121,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_DEVICE_FLAGS, MGMT_OP_READ_ADV_MONITOR_FEATURES, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_OP_REMOVE_ADV_MONITOR, }; static const u16 mgmt_events[] = { @@ -4118,6 +4119,39 @@ failed: return err; } +static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_remove_adv_monitor *cp = data; + struct mgmt_rp_remove_adv_monitor rp; + u16 handle; + int err; + + BT_DBG("request for %s", hdev->name); + + hci_dev_lock(hdev); + + handle = __le16_to_cpu(cp->monitor_handle); + + err = hci_remove_adv_monitor(hdev, handle); + if (err == -ENOENT) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, + MGMT_STATUS_INVALID_INDEX); + goto unlock; + } + + hci_dev_unlock(hdev); + + rp.monitor_handle = cp->monitor_handle; + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + +unlock: + hci_dev_unlock(hdev); + return err; +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -7589,6 +7623,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, HCI_MGMT_VAR_LEN }, + { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) -- cgit v1.2.3 From 8208f5a9d435e58ee7f53a24d9ccbe7787944537 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:18 +0200 Subject: Bluetooth: Update background scan and report device based on advertisement monitors This calls hci_update_background_scan() when there is any update on the advertisement monitors. If there is at least one advertisement monitor, the filtering policy of scan parameters should be 0x00. This also reports device found mgmt events if there is at least one monitor. The following cases were tested with btmgmt advmon-* commands. (1) add a ADV monitor and observe that the passive scanning is triggered. (2) remove the last ADV monitor and observe that the passive scanning is terminated. (3) with a LE peripheral paired, repeat (1) and observe the passive scanning continues. (4) with a LE peripheral paired, repeat (2) and observe the passive scanning continues. (5) with a ADV monitor, suspend/resume the host and observe the passive scanning continues. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 13 +++++++++++++ net/bluetooth/hci_event.c | 5 +++-- net/bluetooth/hci_request.c | 17 ++++++++++++++--- net/bluetooth/mgmt.c | 5 ++++- 5 files changed, 35 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c54f9295892e..524057598ffd 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1284,6 +1284,7 @@ void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct adv_monitor *monitor); int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle); +bool hci_is_adv_monitoring(struct hci_dev *hdev); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 59132b3e2cde..7959b851cc63 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3005,6 +3005,8 @@ void hci_adv_monitors_clear(struct hci_dev *hdev) hci_free_adv_monitor(monitor); idr_destroy(&hdev->adv_monitors_idr); + + hci_update_background_scan(hdev); } void hci_free_adv_monitor(struct adv_monitor *monitor) @@ -3038,6 +3040,9 @@ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) hdev->adv_monitors_cnt++; monitor->handle = handle; + + hci_update_background_scan(hdev); + return 0; } @@ -3069,9 +3074,17 @@ int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle) idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev); } + hci_update_background_scan(hdev); + return 0; } +/* This function requires the caller holds hdev->lock */ +bool hci_is_adv_monitoring(struct hci_dev *hdev) +{ + return !idr_is_empty(&hdev->adv_monitors_idr); +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 8981954ff4c4..e08d4dd9a24e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5447,14 +5447,15 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* Passive scanning shouldn't trigger any device found events, * except for devices marked as CONN_REPORT for which we do send - * device found events. + * device found events, or advertisement monitoring requested. */ if (hdev->le_scan_type == LE_SCAN_PASSIVE) { if (type == LE_ADV_DIRECT_IND) return; if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, - bdaddr, bdaddr_type)) + bdaddr, bdaddr_type) && + idr_is_empty(&hdev->adv_monitors_idr)) return; if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index eee9c007a5fb..29decd7e8051 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -413,11 +413,15 @@ static void __hci_update_background_scan(struct hci_request *req) */ hci_discovery_filter_clear(hdev); + BT_DBG("%s ADV monitoring is %s", hdev->name, + hci_is_adv_monitoring(hdev) ? "on" : "off"); + if (list_empty(&hdev->pend_le_conns) && - list_empty(&hdev->pend_le_reports)) { + list_empty(&hdev->pend_le_reports) && + !hci_is_adv_monitoring(hdev)) { /* If there is no pending LE connections or devices - * to be scanned for, we should stop the background - * scanning. + * to be scanned for or no ADV monitors, we should stop the + * background scanning. */ /* If controller is not scanning we are done. */ @@ -794,6 +798,13 @@ static u8 update_white_list(struct hci_request *req) return 0x00; } + /* Once the controller offloading of advertisement monitor is in place, + * the if condition should include the support of MSFT extension + * support. + */ + if (!idr_is_empty(&hdev->adv_monitors_idr)) + return 0x00; + /* Select filter policy to use white list */ return 0x01; } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index b194da4de2d7..ec66160a673c 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -8575,8 +8575,11 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, if (!hci_discovery_active(hdev)) { if (link_type == ACL_LINK) return; - if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports)) + if (link_type == LE_LINK && + list_empty(&hdev->pend_le_reports) && + !hci_is_adv_monitoring(hdev)) { return; + } } if (hdev->discovery.result_filtering) { -- cgit v1.2.3 From 76b139965575e51224d33ea721d9d00a542b6b39 Mon Sep 17 00:00:00 2001 From: Manish Mandlik Date: Wed, 17 Jun 2020 16:39:19 +0200 Subject: Bluetooth: Terminate the link if pairing is cancelled If user decides to cancel the ongoing pairing process (e.g. by clicking the cancel button on pairing/passkey window), abort any ongoing pairing and then terminate the link if it was created because of the pair device action. Signed-off-by: Manish Mandlik Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 14 ++++++++++++-- net/bluetooth/hci_conn.c | 11 ++++++++--- net/bluetooth/l2cap_core.c | 6 ++++-- net/bluetooth/mgmt.c | 22 ++++++++++++++++++---- 4 files changed, 42 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 524057598ffd..77d29341b064 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -564,6 +564,12 @@ struct hci_dev { #define HCI_PHY_HANDLE(handle) (handle & 0xff) +enum conn_reasons { + CONN_REASON_PAIR_DEVICE, + CONN_REASON_L2CAP_CHAN, + CONN_REASON_SCO_CONNECT, +}; + struct hci_conn { struct list_head list; @@ -615,6 +621,8 @@ struct hci_conn { __s8 max_tx_power; unsigned long flags; + enum conn_reasons conn_reason; + __u32 clock; __u16 clock_accuracy; @@ -1040,12 +1048,14 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, - u16 conn_timeout); + u16 conn_timeout, + enum conn_reasons conn_reason); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, u8 role, bdaddr_t *direct_rpa); struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, - u8 sec_level, u8 auth_type); + u8 sec_level, u8 auth_type, + enum conn_reasons conn_reason); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting); int hci_conn_check_link_mode(struct hci_conn *conn); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 9bdffc4e79b0..47f3a45d7dcb 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1174,7 +1174,8 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev, /* This function requires the caller holds hdev->lock */ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, - u16 conn_timeout) + u16 conn_timeout, + enum conn_reasons conn_reason) { struct hci_conn *conn; @@ -1219,6 +1220,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, conn->sec_level = BT_SECURITY_LOW; conn->pending_sec_level = sec_level; conn->conn_timeout = conn_timeout; + conn->conn_reason = conn_reason; hci_update_background_scan(hdev); @@ -1228,7 +1230,8 @@ done: } struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, - u8 sec_level, u8 auth_type) + u8 sec_level, u8 auth_type, + enum conn_reasons conn_reason) { struct hci_conn *acl; @@ -1248,6 +1251,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, hci_conn_hold(acl); + acl->conn_reason = conn_reason; if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { acl->sec_level = BT_SECURITY_LOW; acl->pending_sec_level = sec_level; @@ -1264,7 +1268,8 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, struct hci_conn *acl; struct hci_conn *sco; - acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); + acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, + CONN_REASON_SCO_CONNECT); if (IS_ERR(acl)) return acl; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index fe913a5c754a..35d2bc569a2d 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -7893,11 +7893,13 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, else hcon = hci_connect_le_scan(hdev, dst, dst_type, chan->sec_level, - HCI_LE_CONN_TIMEOUT); + HCI_LE_CONN_TIMEOUT, + CONN_REASON_L2CAP_CHAN); } else { u8 auth_type = l2cap_get_auth_type(chan); - hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); + hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, + CONN_REASON_L2CAP_CHAN); } if (IS_ERR(hcon)) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ec66160a673c..2a732cab1dc9 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2931,7 +2931,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (cp->addr.type == BDADDR_BREDR) { conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, - auth_type); + auth_type, CONN_REASON_PAIR_DEVICE); } else { u8 addr_type = le_addr_type(cp->addr.type); struct hci_conn_params *p; @@ -2950,9 +2950,9 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) p->auto_connect = HCI_AUTO_CONN_DISABLED; - conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, - addr_type, sec_level, - HCI_LE_CONN_TIMEOUT); + conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type, + sec_level, HCI_LE_CONN_TIMEOUT, + CONN_REASON_PAIR_DEVICE); } if (IS_ERR(conn)) { @@ -3053,6 +3053,20 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, addr, sizeof(*addr)); + + /* Since user doesn't want to proceed with the connection, abort any + * ongoing pairing and then terminate the link if it was created + * because of the pair device action. + */ + if (addr->type == BDADDR_BREDR) + hci_remove_link_key(hdev, &addr->bdaddr); + else + smp_cancel_and_remove_pairing(hdev, &addr->bdaddr, + le_addr_type(addr->type)); + + if (conn->conn_reason == CONN_REASON_PAIR_DEVICE) + hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); + unlock: hci_dev_unlock(hdev); return err; -- cgit v1.2.3 From e0bcc58d876c6ece9720310509a908b7637e37cf Mon Sep 17 00:00:00 2001 From: Benjamin Gaignard Date: Wed, 3 Jun 2020 14:54:36 +0200 Subject: mfd: stm32: Add defines to be used for clkevent purpose Add defines to be able to enable/clear irq and configure one shot mode. Signed-off-by: Benjamin Gaignard Signed-off-by: Lee Jones --- include/linux/mfd/stm32-lptimer.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h index 605f62264825..90b20550c1c8 100644 --- a/include/linux/mfd/stm32-lptimer.h +++ b/include/linux/mfd/stm32-lptimer.h @@ -27,10 +27,15 @@ #define STM32_LPTIM_CMPOK BIT(3) /* STM32_LPTIM_ICR - bit fields */ +#define STM32_LPTIM_ARRMCF BIT(1) #define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3) +/* STM32_LPTIM_IER - bit flieds */ +#define STM32_LPTIM_ARRMIE BIT(1) + /* STM32_LPTIM_CR - bit fields */ #define STM32_LPTIM_CNTSTRT BIT(2) +#define STM32_LPTIM_SNGSTRT BIT(1) #define STM32_LPTIM_ENABLE BIT(0) /* STM32_LPTIM_CFGR - bit fields */ -- cgit v1.2.3 From 7f8a137f736f7366820c485c5a0d34d65b9d0125 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Mon, 15 Jun 2020 14:53:22 +0100 Subject: mfd: madera: Remove unused forward declaration of madera_codec_pdata This forward declaration is redundant since the header including the full data structure is included. Signed-off-by: Charles Keepax Signed-off-by: Lee Jones --- include/linux/mfd/madera/pdata.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index fa9595dd42ba..601cbbc10370 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -21,7 +21,6 @@ struct gpio_desc; struct pinctrl_map; -struct madera_codec_pdata; /** * struct madera_pdata - Configuration data for Madera devices -- cgit v1.2.3 From 6c27219e34911601955b72c754adfc11c527ba7b Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Mon, 8 Jun 2020 11:17:36 +0200 Subject: mfd: Add support for the Khadas System control Microcontroller This Microcontroller is present on the Khadas VIM1, VIM2, VIM3 and Edge boards. It has multiple boot control features like password check, power-on options, power-off control and system FAN control on recent boards. This implements a very basic MFD driver with the fan control and User NVMEM cells. Signed-off-by: Neil Armstrong Signed-off-by: Lee Jones --- drivers/mfd/Kconfig | 21 ++++++ drivers/mfd/Makefile | 1 + drivers/mfd/khadas-mcu.c | 142 +++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/khadas-mcu.h | 91 ++++++++++++++++++++++++++ 4 files changed, 255 insertions(+) create mode 100644 drivers/mfd/khadas-mcu.c create mode 100644 include/linux/mfd/khadas-mcu.h (limited to 'include') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index a37d7d171382..d13bb0abfd6f 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -2053,6 +2053,27 @@ config MFD_WCD934X This driver provides common support WCD934x audio codec and its associated Pin Controller, Soundwire Controller and Audio codec. +config MFD_KHADAS_MCU + tristate "Support for Khadas System control Microcontroller" + depends on I2C + depends on ARCH_MESON || ARCH_ROCKCHIP || COMPILE_TEST + select MFD_CORE + select REGMAP_I2C + help + Support for the Khadas System control Microcontroller interface + present on their VIM and Edge boards. + + This Microcontroller is present on the Khadas VIM1, VIM2, VIM3 and + Edge boards. + + It provides multiple boot control features like password check, + power-on options, power-off control and system FAN control on recent + boards. + + This driver provides common support for accessing the device, + additional drivers must be enabled in order to use the functionality + of the device. + menu "Multimedia Capabilities Port drivers" depends on ARCH_SA1100 diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 9367a92f795a..1c8d6be3347d 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -262,5 +262,6 @@ obj-$(CONFIG_MFD_ROHM_BD70528) += rohm-bd70528.o obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o obj-$(CONFIG_MFD_STMFX) += stmfx.o +obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c new file mode 100644 index 000000000000..44d5bb462dab --- /dev/null +++ b/drivers/mfd/khadas-mcu.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Khadas System control Microcontroller + * + * Copyright (C) 2020 BayLibre SAS + * + * Author(s): Neil Armstrong + */ +#include +#include +#include +#include +#include +#include + +static bool khadas_mcu_reg_volatile(struct device *dev, unsigned int reg) +{ + if (reg >= KHADAS_MCU_USER_DATA_0_REG && + reg < KHADAS_MCU_PWR_OFF_CMD_REG) + return true; + + switch (reg) { + case KHADAS_MCU_PWR_OFF_CMD_REG: + case KHADAS_MCU_PASSWD_START_REG: + case KHADAS_MCU_CHECK_VEN_PASSWD_REG: + case KHADAS_MCU_CHECK_USER_PASSWD_REG: + case KHADAS_MCU_WOL_INIT_START_REG: + case KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG: + return true; + default: + return false; + } +} + +static bool khadas_mcu_reg_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case KHADAS_MCU_PASSWD_VEN_0_REG: + case KHADAS_MCU_PASSWD_VEN_1_REG: + case KHADAS_MCU_PASSWD_VEN_2_REG: + case KHADAS_MCU_PASSWD_VEN_3_REG: + case KHADAS_MCU_PASSWD_VEN_4_REG: + case KHADAS_MCU_PASSWD_VEN_5_REG: + case KHADAS_MCU_MAC_0_REG: + case KHADAS_MCU_MAC_1_REG: + case KHADAS_MCU_MAC_2_REG: + case KHADAS_MCU_MAC_3_REG: + case KHADAS_MCU_MAC_4_REG: + case KHADAS_MCU_MAC_5_REG: + case KHADAS_MCU_USID_0_REG: + case KHADAS_MCU_USID_1_REG: + case KHADAS_MCU_USID_2_REG: + case KHADAS_MCU_USID_3_REG: + case KHADAS_MCU_USID_4_REG: + case KHADAS_MCU_USID_5_REG: + case KHADAS_MCU_VERSION_0_REG: + case KHADAS_MCU_VERSION_1_REG: + case KHADAS_MCU_DEVICE_NO_0_REG: + case KHADAS_MCU_DEVICE_NO_1_REG: + case KHADAS_MCU_FACTORY_TEST_REG: + case KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG: + return false; + default: + return true; + } +} + +static const struct regmap_config khadas_mcu_regmap_config = { + .reg_bits = 8, + .reg_stride = 1, + .val_bits = 8, + .max_register = KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG, + .volatile_reg = khadas_mcu_reg_volatile, + .writeable_reg = khadas_mcu_reg_writeable, + .cache_type = REGCACHE_RBTREE, +}; + +static struct mfd_cell khadas_mcu_fan_cells[] = { + /* VIM1/2 Rev13+ and VIM3 only */ + { .name = "khadas-mcu-fan-ctrl", }, +}; + +static struct mfd_cell khadas_mcu_cells[] = { + { .name = "khadas-mcu-user-mem", }, +}; + +static int khadas_mcu_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct khadas_mcu *ddata; + int ret; + + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + i2c_set_clientdata(client, ddata); + + ddata->dev = dev; + + ddata->regmap = devm_regmap_init_i2c(client, &khadas_mcu_regmap_config); + if (IS_ERR(ddata->regmap)) { + ret = PTR_ERR(ddata->regmap); + dev_err(dev, "Failed to allocate register map: %d\n", ret); + return ret; + } + + ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + khadas_mcu_cells, + ARRAY_SIZE(khadas_mcu_cells), + NULL, 0, NULL); + if (ret) + return ret; + + if (of_find_property(dev->of_node, "#cooling-cells", NULL)) + return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + khadas_mcu_fan_cells, + ARRAY_SIZE(khadas_mcu_fan_cells), + NULL, 0, NULL); + + return 0; +} + +static const struct of_device_id khadas_mcu_of_match[] = { + { .compatible = "khadas,mcu", }, + {}, +}; +MODULE_DEVICE_TABLE(of, khadas_mcu_of_match); + +static struct i2c_driver khadas_mcu_driver = { + .driver = { + .name = "khadas-mcu-core", + .of_match_table = of_match_ptr(khadas_mcu_of_match), + }, + .probe = khadas_mcu_probe, +}; +module_i2c_driver(khadas_mcu_driver); + +MODULE_DESCRIPTION("Khadas MCU core driver"); +MODULE_AUTHOR("Neil Armstrong "); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/mfd/khadas-mcu.h b/include/linux/mfd/khadas-mcu.h new file mode 100644 index 000000000000..a99ba2ed0e4e --- /dev/null +++ b/include/linux/mfd/khadas-mcu.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Khadas System control Microcontroller Register map + * + * Copyright (C) 2020 BayLibre SAS + * + * Author(s): Neil Armstrong + */ + +#ifndef MFD_KHADAS_MCU_H +#define MFD_KHADAS_MCU_H + +#define KHADAS_MCU_PASSWD_VEN_0_REG 0x00 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_1_REG 0x01 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_2_REG 0x02 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_3_REG 0x03 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_4_REG 0x04 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_5_REG 0x05 /* RO */ +#define KHADAS_MCU_MAC_0_REG 0x06 /* RO */ +#define KHADAS_MCU_MAC_1_REG 0x07 /* RO */ +#define KHADAS_MCU_MAC_2_REG 0x08 /* RO */ +#define KHADAS_MCU_MAC_3_REG 0x09 /* RO */ +#define KHADAS_MCU_MAC_4_REG 0x0a /* RO */ +#define KHADAS_MCU_MAC_5_REG 0x0b /* RO */ +#define KHADAS_MCU_USID_0_REG 0x0c /* RO */ +#define KHADAS_MCU_USID_1_REG 0x0d /* RO */ +#define KHADAS_MCU_USID_2_REG 0x0e /* RO */ +#define KHADAS_MCU_USID_3_REG 0x0f /* RO */ +#define KHADAS_MCU_USID_4_REG 0x10 /* RO */ +#define KHADAS_MCU_USID_5_REG 0x11 /* RO */ +#define KHADAS_MCU_VERSION_0_REG 0x12 /* RO */ +#define KHADAS_MCU_VERSION_1_REG 0x13 /* RO */ +#define KHADAS_MCU_DEVICE_NO_0_REG 0x14 /* RO */ +#define KHADAS_MCU_DEVICE_NO_1_REG 0x15 /* RO */ +#define KHADAS_MCU_FACTORY_TEST_REG 0x16 /* R */ +#define KHADAS_MCU_BOOT_MODE_REG 0x20 /* RW */ +#define KHADAS_MCU_BOOT_EN_WOL_REG 0x21 /* RW */ +#define KHADAS_MCU_BOOT_EN_RTC_REG 0x22 /* RW */ +#define KHADAS_MCU_BOOT_EN_EXP_REG 0x23 /* RW */ +#define KHADAS_MCU_BOOT_EN_IR_REG 0x24 /* RW */ +#define KHADAS_MCU_BOOT_EN_DCIN_REG 0x25 /* RW */ +#define KHADAS_MCU_BOOT_EN_KEY_REG 0x26 /* RW */ +#define KHADAS_MCU_KEY_MODE_REG 0x27 /* RW */ +#define KHADAS_MCU_LED_MODE_ON_REG 0x28 /* RW */ +#define KHADAS_MCU_LED_MODE_OFF_REG 0x29 /* RW */ +#define KHADAS_MCU_SHUTDOWN_NORMAL_REG 0x2c /* RW */ +#define KHADAS_MCU_MAC_SWITCH_REG 0x2d /* RW */ +#define KHADAS_MCU_MCU_SLEEP_MODE_REG 0x2e /* RW */ +#define KHADAS_MCU_IR_CODE1_0_REG 0x2f /* RW */ +#define KHADAS_MCU_IR_CODE1_1_REG 0x30 /* RW */ +#define KHADAS_MCU_IR_CODE1_2_REG 0x31 /* RW */ +#define KHADAS_MCU_IR_CODE1_3_REG 0x32 /* RW */ +#define KHADAS_MCU_USB_PCIE_SWITCH_REG 0x33 /* RW */ +#define KHADAS_MCU_IR_CODE2_0_REG 0x34 /* RW */ +#define KHADAS_MCU_IR_CODE2_1_REG 0x35 /* RW */ +#define KHADAS_MCU_IR_CODE2_2_REG 0x36 /* RW */ +#define KHADAS_MCU_IR_CODE2_3_REG 0x37 /* RW */ +#define KHADAS_MCU_PASSWD_USER_0_REG 0x40 /* RW */ +#define KHADAS_MCU_PASSWD_USER_1_REG 0x41 /* RW */ +#define KHADAS_MCU_PASSWD_USER_2_REG 0x42 /* RW */ +#define KHADAS_MCU_PASSWD_USER_3_REG 0x43 /* RW */ +#define KHADAS_MCU_PASSWD_USER_4_REG 0x44 /* RW */ +#define KHADAS_MCU_PASSWD_USER_5_REG 0x45 /* RW */ +#define KHADAS_MCU_USER_DATA_0_REG 0x46 /* RW 56 bytes */ +#define KHADAS_MCU_PWR_OFF_CMD_REG 0x80 /* WO */ +#define KHADAS_MCU_PASSWD_START_REG 0x81 /* WO */ +#define KHADAS_MCU_CHECK_VEN_PASSWD_REG 0x82 /* WO */ +#define KHADAS_MCU_CHECK_USER_PASSWD_REG 0x83 /* WO */ +#define KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG 0x86 /* RO */ +#define KHADAS_MCU_WOL_INIT_START_REG 0x87 /* WO */ +#define KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG 0x88 /* WO */ + +enum { + KHADAS_BOARD_VIM1 = 0x1, + KHADAS_BOARD_VIM2, + KHADAS_BOARD_VIM3, + KHADAS_BOARD_EDGE = 0x11, + KHADAS_BOARD_EDGE_V, +}; + +/** + * struct khadas_mcu - Khadas MCU structure + * @device: device reference used for logs + * @regmap: register map + */ +struct khadas_mcu { + struct device *dev; + struct regmap *regmap; +}; + +#endif /* MFD_KHADAS_MCU_H */ -- cgit v1.2.3 From 81c7462883b0cc0a4eeef0687f80ad5b5baee5f6 Mon Sep 17 00:00:00 2001 From: Macpaul Lin Date: Thu, 18 Jun 2020 17:13:38 +0800 Subject: USB: replace hardcode maximum usb string length by definition Replace hardcoded maximum USB string length (126 bytes) by definition "USB_MAX_STRING_LEN". Signed-off-by: Macpaul Lin Acked-by: Alan Stern Link: https://lore.kernel.org/r/1592471618-29428-1-git-send-email-macpaul.lin@mediatek.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/composite.c | 4 ++-- drivers/usb/gadget/configfs.c | 2 +- drivers/usb/gadget/usbstring.c | 4 ++-- include/uapi/linux/usb/ch9.h | 3 +++ 4 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 5c1eb96a5c57..8fbf73467fef 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1085,7 +1085,7 @@ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) while (*sp) { s = *sp; language = cpu_to_le16(s->language); - for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { + for (tmp = buf; *tmp && tmp < &buf[USB_MAX_STRING_LEN]; tmp++) { if (*tmp == language) goto repeat; } @@ -1160,7 +1160,7 @@ static int get_string(struct usb_composite_dev *cdev, collect_langs(sp, s->wData); } - for (len = 0; len <= 126 && s->wData[len]; len++) + for (len = 0; len <= USB_MAX_STRING_LEN && s->wData[len]; len++) continue; if (!len) return -EINVAL; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 9dc06a4e1b30..56051bb97349 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -103,7 +103,7 @@ static int usb_string_copy(const char *s, char **s_copy) char *str; char *copy = *s_copy; ret = strlen(s); - if (ret > 126) + if (ret > USB_MAX_STRING_LEN) return -EOVERFLOW; str = kstrdup(s, GFP_KERNEL); diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 58a4d3325090..30f889ad3ad2 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c @@ -55,9 +55,9 @@ usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf) return -EINVAL; /* string descriptors have length, tag, then UTF16-LE text */ - len = min ((size_t) 126, strlen (s->s)); + len = min((size_t)USB_MAX_STRING_LEN, strlen(s->s)); len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN, - (wchar_t *) &buf[2], 126); + (wchar_t *) &buf[2], USB_MAX_STRING_LEN); if (len < 0) return -EINVAL; buf [0] = (len + 1) * 2; diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 456ab0c2b586..b1ed2ccfe9cf 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -368,6 +368,9 @@ struct usb_config_descriptor { /*-------------------------------------------------------------------------*/ +/* USB String descriptors can contain at most 126 characters. */ +#define USB_MAX_STRING_LEN 126 + /* USB_DT_STRING: String descriptor */ struct usb_string_descriptor { __u8 bLength; -- cgit v1.2.3 From 9c77b803f263573b6019e4828825709845c37d45 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:22 -0700 Subject: net: tso: double TSO_HEADER_SIZE value Transport header size could be 60 bytes, and network header size can also be 60 bytes. Add the Ethernet header and we are above 128 bytes. Since drivers using net/core/tso.c usually allocates one DMA coherent piece of memory per TX queue, this patch might cause issues if a driver was using too many slots. For 1024 slots, we would need 256 KB of physically contiguous memory instead of 128 KB. Alternative fix would be to add checks in the fast path, but this involves more work in all drivers using net/core/tso.c. Fixes: f9cbe9a556af ("net: define the TSO header size in net/tso.h") Signed-off-by: Eric Dumazet Cc: Antoine Tenart Signed-off-by: David S. Miller --- include/net/tso.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/tso.h b/include/net/tso.h index 7e166a570349..c33dd00c161f 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -4,7 +4,7 @@ #include -#define TSO_HEADER_SIZE 128 +#define TSO_HEADER_SIZE 256 struct tso_t { int next_frag_idx; -- cgit v1.2.3 From 185c3e5860227065dcb6ee884b45e0debe4762dd Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:23 -0700 Subject: net: tso: shrink struct tso_t size field can be an int, no need for size_t Removes a 32bit hole on 64bit kernels. And align fields for better readability. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tso.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/tso.h b/include/net/tso.h index c33dd00c161f..d9b0a14b2a57 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -7,12 +7,12 @@ #define TSO_HEADER_SIZE 256 struct tso_t { - int next_frag_idx; - void *data; - size_t size; - u16 ip_id; - bool ipv6; - u32 tcp_seq; + int next_frag_idx; + int size; + void *data; + u16 ip_id; + bool ipv6; + u32 tcp_seq; }; int tso_count_descs(struct sk_buff *skb); -- cgit v1.2.3 From 504b912150983a8b2499bbf9e4501336677404c9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:24 -0700 Subject: net: tso: constify tso_count_descs() and friends skb argument of tso_count_descs(), tso_build_hdr() and tso_build_data() can be const. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tso.h | 6 +++--- net/core/tso.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/tso.h b/include/net/tso.h index d9b0a14b2a57..32d9272ade6a 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -15,10 +15,10 @@ struct tso_t { u32 tcp_seq; }; -int tso_count_descs(struct sk_buff *skb); -void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, +int tso_count_descs(const struct sk_buff *skb); +void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last); -void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size); +void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size); void tso_start(struct sk_buff *skb, struct tso_t *tso); #endif /* _TSO_H */ diff --git a/net/core/tso.c b/net/core/tso.c index d4d5c077ad72..56487e3bb26d 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -6,14 +6,14 @@ #include /* Calculate expected number of TX descriptors */ -int tso_count_descs(struct sk_buff *skb) +int tso_count_descs(const struct sk_buff *skb) { /* The Marvell Way */ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; } EXPORT_SYMBOL(tso_count_descs); -void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, +void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; @@ -44,7 +44,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, } EXPORT_SYMBOL(tso_build_hdr); -void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) +void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size) { tso->tcp_seq += size; tso->size -= size; -- cgit v1.2.3 From 761b331cb6902dc0a08f786e9fa0dbd572059027 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:25 -0700 Subject: net: tso: cache transport header length Add tlen field into struct tso_t, and change tso_start() to return skb_transport_offset(skb) + tso->tlen This removes from callers the need to use tcp_hdrlen(skb) and will ease UDP segmentation offload addition. v2: calls tso_start() earlier in otx2_sq_append_tso() [Jakub] Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 5 +++-- drivers/net/ethernet/freescale/fec_main.c | 5 ++--- drivers/net/ethernet/marvell/mv643xx_eth.c | 5 ++--- drivers/net/ethernet/marvell/mvneta.c | 5 ++--- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 6 +++--- drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 6 +++--- include/net/tso.h | 3 ++- net/core/tso.c | 11 +++++++---- 8 files changed, 24 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 069e7413f1ef..a45223f0cca5 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1489,9 +1489,10 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, int seg_subdescs = 0, desc_cnt = 0; int seg_len, total_len, data_left; int hdr_qentry = qentry; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int hdr_len; + + hdr_len = tso_start(skb, &tso); - tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { char *hdr; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2d0d313ee7c5..9f80a33c5b16 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -710,8 +710,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int total_len, data_left; + int hdr_len, total_len, data_left; struct bufdesc *bdp = txq->bd.cur; struct tso_t tso; unsigned int index = 0; @@ -731,7 +730,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, } /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 4d4b6243318a..90e6111ce534 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -816,10 +816,9 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, struct net_device *dev) { struct mv643xx_eth_private *mp = txq_to_mp(txq); - int total_len, data_left, ret; + int hdr_len, total_len, data_left, ret; int desc_count = 0; struct tso_t tso; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct tx_desc *first_tx_desc; u32 first_cmd_sts = 0; @@ -832,7 +831,7 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 946925bbcb2d..95b447c14411 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2604,11 +2604,10 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvneta_tx_queue *txq) { - int total_len, data_left; + int hdr_len, total_len, data_left; int desc_count = 0; struct mvneta_port *pp = netdev_priv(dev); struct tso_t tso; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int i; /* Count needed descriptors */ @@ -2621,7 +2620,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, } /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 24f4d8e0da98..e9f287568026 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3160,9 +3160,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvpp2_txq_pcpu *txq_pcpu) { struct mvpp2_port *port = netdev_priv(dev); + int hdr_sz, i, len, descs = 0; struct tso_t tso; - int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); - int i, len, descs = 0; /* Check number of available descriptors */ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || @@ -3170,7 +3169,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, tso_count_descs(skb))) return 0; - tso_start(skb, &tso); + hdr_sz = tso_start(skb, &tso); + len = skb->len - hdr_sz; while (len > 0) { int left = min_t(int, skb_shinfo(skb)->gso_size, len); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index b04f5429d72d..3a5b34a2a7a6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -619,13 +619,14 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) { struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int tcp_data, seg_len, pkt_len, offset; + int hdr_len, tcp_data, seg_len, pkt_len, offset; struct nix_sqe_hdr_s *sqe_hdr; int first_sqe = sq->head; struct sg_list list; struct tso_t tso; + hdr_len = tso_start(skb, &tso); + /* Map SKB's fragments to DMA. * It's done here to avoid mapping for every TSO segment's packet. */ @@ -636,7 +637,6 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, netdev_tx_sent_queue(txq, skb->len); - tso_start(skb, &tso); tcp_data = skb->len - hdr_len; while (tcp_data > 0) { char *hdr; diff --git a/include/net/tso.h b/include/net/tso.h index 32d9272ade6a..62c98a9c60f1 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -11,6 +11,7 @@ struct tso_t { int size; void *data; u16 ip_id; + u8 tlen; /* transport header len */ bool ipv6; u32 tcp_seq; }; @@ -19,6 +20,6 @@ int tso_count_descs(const struct sk_buff *skb); void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last); void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size); -void tso_start(struct sk_buff *skb, struct tso_t *tso); +int tso_start(struct sk_buff *skb, struct tso_t *tso); #endif /* _TSO_H */ diff --git a/net/core/tso.c b/net/core/tso.c index 56487e3bb26d..9f35518815bd 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -17,7 +17,7 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int hdr_len = skb_transport_offset(skb) + tso->tlen; int mac_hdr_len = skb_network_offset(skb); memcpy(hdr, skb->data, hdr_len); @@ -30,7 +30,7 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, } else { struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len); - iph->payload_len = htons(size + tcp_hdrlen(skb)); + iph->payload_len = htons(size + tso->tlen); } tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); put_unaligned_be32(tso->tcp_seq, &tcph->seq); @@ -62,10 +62,12 @@ void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size) } EXPORT_SYMBOL(tso_build_data); -void tso_start(struct sk_buff *skb, struct tso_t *tso) +int tso_start(struct sk_buff *skb, struct tso_t *tso) { - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int tlen = tcp_hdrlen(skb); + int hdr_len = skb_transport_offset(skb) + tlen; + tso->tlen = tlen; tso->ip_id = ntohs(ip_hdr(skb)->id); tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); tso->next_frag_idx = 0; @@ -83,5 +85,6 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso) tso->data = skb_frag_address(frag); tso->next_frag_idx++; } + return hdr_len; } EXPORT_SYMBOL(tso_start); -- cgit v1.2.3 From 91c7eaa686c3b7ae2d5b2aed22a45a02c8baa30e Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 18 Jun 2020 11:42:53 +0200 Subject: USB: rename USB quirk to USB_QUIRK_ENDPOINT_IGNORE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The USB core has a quirk flag to ignore specific endpoints, so rename it to be more obvious what this quirk does. Cc: Johan Hovold Cc: Alan Stern Cc: Richard Dodd Cc: Hans de Goede Cc: Jonathan Cox Cc: Bastien Nocera Cc: "Thiébaud Weksteen" Cc: Nishad Kamdar Link: https://lore.kernel.org/r/20200618094300.1887727-2-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/config.c | 8 ++++---- drivers/usb/core/quirks.c | 18 +++++++++--------- drivers/usb/core/usb.h | 2 +- include/linux/usb/quirks.h | 4 ++-- 4 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index b7918f695434..37442f423a41 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -298,10 +298,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, goto skip_to_next_endpoint_or_interface_descriptor; } - /* Ignore blacklisted endpoints */ - if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) { - if (usb_endpoint_is_blacklisted(udev, ifp, d)) { - dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n", + /* Ignore some endpoints */ + if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) { + if (usb_endpoint_is_ignored(udev, ifp, d)) { + dev_warn(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n", cfgno, inum, asnum, d->bEndpointAddress); goto skip_to_next_endpoint_or_interface_descriptor; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 3e8efe759c3e..20dccf34182d 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -359,7 +359,7 @@ static const struct usb_device_id usb_quirk_list[] = { /* Sound Devices USBPre2 */ { USB_DEVICE(0x0926, 0x0202), .driver_info = - USB_QUIRK_ENDPOINT_BLACKLIST }, + USB_QUIRK_ENDPOINT_IGNORE }, /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = @@ -493,24 +493,24 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { }; /* - * Entries for blacklisted endpoints that should be ignored when parsing - * configuration descriptors. + * Entries for endpoints that should be ignored when parsing configuration + * descriptors. * - * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST. + * Matched for devices with USB_QUIRK_ENDPOINT_IGNORE. */ -static const struct usb_device_id usb_endpoint_blacklist[] = { +static const struct usb_device_id usb_endpoint_ignore[] = { { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, { } }; -bool usb_endpoint_is_blacklisted(struct usb_device *udev, - struct usb_host_interface *intf, - struct usb_endpoint_descriptor *epd) +bool usb_endpoint_is_ignored(struct usb_device *udev, + struct usb_host_interface *intf, + struct usb_endpoint_descriptor *epd) { const struct usb_device_id *id; unsigned int address; - for (id = usb_endpoint_blacklist; id->match_flags; ++id) { + for (id = usb_endpoint_ignore; id->match_flags; ++id) { if (!usb_match_device(udev, id)) continue; diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 19e4c550bc73..98e7d1ee63dc 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -37,7 +37,7 @@ extern void usb_authorize_interface(struct usb_interface *); extern void usb_detect_quirks(struct usb_device *udev); extern void usb_detect_interface_quirks(struct usb_device *udev); extern void usb_release_quirk_list(void); -extern bool usb_endpoint_is_blacklisted(struct usb_device *udev, +extern bool usb_endpoint_is_ignored(struct usb_device *udev, struct usb_host_interface *intf, struct usb_endpoint_descriptor *epd); extern int usb_remove_device(struct usb_device *udev); diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 22c1f579afe3..5e4c497f54d6 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -69,7 +69,7 @@ /* Hub needs extra delay after resetting its port. */ #define USB_QUIRK_HUB_SLOW_RESET BIT(14) -/* device has blacklisted endpoints */ -#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15) +/* device has endpoints that should be ignored */ +#define USB_QUIRK_ENDPOINT_IGNORE BIT(15) #endif /* __LINUX_USB_QUIRKS_H */ -- cgit v1.2.3 From df06230106e95347ec613f1707a704b04737c59b Mon Sep 17 00:00:00 2001 From: Dmitry Shmidt Date: Wed, 10 Jun 2020 10:30:11 +0200 Subject: dt-bindings: clk: g12a-clkc: Add NNA CLK Source clock IDs This adds the Neural Network Accelerator IP source clocks. Signed-off-by: Dmitry Shmidt Signed-off-by: Neil Armstrong Signed-off-by: Jerome Brunet Acked-by: Rob Herring Link: https://lore.kernel.org/r/20200610083012.5024-2-narmstrong@baylibre.com --- include/dt-bindings/clock/g12a-clkc.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h index b0d65d73db96..40d49940d8a8 100644 --- a/include/dt-bindings/clock/g12a-clkc.h +++ b/include/dt-bindings/clock/g12a-clkc.h @@ -145,5 +145,7 @@ #define CLKID_CPU3_CLK 255 #define CLKID_SPICC0_SCLK 258 #define CLKID_SPICC1_SCLK 261 +#define CLKID_NNA_AXI_CLK 264 +#define CLKID_NNA_CORE_CLK 267 #endif /* __G12A_CLKC_H */ -- cgit v1.2.3 From 5bf74682118b3003c8f9b0b0ec596e473fc6eb82 Mon Sep 17 00:00:00 2001 From: "Andrea Parri (Microsoft)" Date: Wed, 17 Jun 2020 18:46:35 +0200 Subject: Drivers: hv: vmbus: Remove the target_vp field from the vmbus_channel struct The field is read only in __vmbus_open() and it is already stored twice (after a call to hv_cpu_number_to_vp_number()) in target_cpu_store() and init_vp_index(); there is no need to "cache" its value in the channel data structure. Suggested-by: Michael Kelley Signed-off-by: Andrea Parri (Microsoft) Link: https://lore.kernel.org/r/20200617164642.37393-2-parri.andrea@gmail.com Reviewed-by: Michael Kelley Signed-off-by: Wei Liu --- drivers/hv/channel.c | 3 ++- drivers/hv/channel_mgmt.c | 3 --- drivers/hv/vmbus_drv.c | 2 -- include/linux/hyperv.h | 15 +++++++-------- 4 files changed, 9 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 90070b337c10..8848d1548b3f 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "hyperv_vmbus.h" @@ -176,7 +177,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel, open_msg->child_relid = newchannel->offermsg.child_relid; open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset; - open_msg->target_vp = newchannel->target_vp; + open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu); if (userdatalen) memcpy(open_msg->userdata, userdata, userdatalen); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 417a95e5094d..278e39221807 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -704,8 +704,6 @@ static void init_vp_index(struct vmbus_channel *channel) */ channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU); channel->target_cpu = VMBUS_CONNECT_CPU; - channel->target_vp = - hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU); if (perf_chn) hv_set_alloced_cpu(VMBUS_CONNECT_CPU); return; @@ -739,7 +737,6 @@ static void init_vp_index(struct vmbus_channel *channel) cpumask_set_cpu(target_cpu, alloced_mask); channel->target_cpu = target_cpu; - channel->target_vp = hv_cpu_number_to_vp_number(target_cpu); free_cpumask_var(available_mask); } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 9147ee9d5f7d..d2ddb46f1359 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -23,7 +23,6 @@ #include #include -#include #include #include #include @@ -1779,7 +1778,6 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel, */ channel->target_cpu = target_cpu; - channel->target_vp = hv_cpu_number_to_vp_number(target_cpu); channel->numa_node = cpu_to_node(target_cpu); /* See init_vp_index(). */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 40df3103e890..738efdb194b0 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -803,15 +803,14 @@ struct vmbus_channel { u64 sig_event; /* - * Starting with win8, this field will be used to specify - * the target virtual processor on which to deliver the interrupt for - * the host to guest communication. - * Prior to win8, incoming channel interrupts would only - * be delivered on cpu 0. Setting this value to 0 would - * preserve the earlier behavior. + * Starting with win8, this field will be used to specify the + * target CPU on which to deliver the interrupt for the host + * to guest communication. + * + * Prior to win8, incoming channel interrupts would only be + * delivered on CPU 0. Setting this value to 0 would preserve + * the earlier behavior. */ - u32 target_vp; - /* The corresponding CPUID in the guest */ u32 target_cpu; int numa_node; /* -- cgit v1.2.3 From 458d090fbad59d1f849ee15e78d0471784d428b6 Mon Sep 17 00:00:00 2001 From: "Andrea Parri (Microsoft)" Date: Wed, 17 Jun 2020 18:46:36 +0200 Subject: Drivers: hv: vmbus: Remove the numa_node field from the vmbus_channel struct The field is read only in numa_node_show() and it is already stored twice (after a call to cpu_to_node()) in target_cpu_store() and init_vp_index(); there is no need to "cache" its value in the channel data structure. Signed-off-by: Andrea Parri (Microsoft) Link: https://lore.kernel.org/r/20200617164642.37393-3-parri.andrea@gmail.com Reviewed-by: Michael Kelley Signed-off-by: Wei Liu --- drivers/hv/channel_mgmt.c | 2 -- drivers/hv/vmbus_drv.c | 3 +-- include/linux/hyperv.h | 1 - 3 files changed, 1 insertion(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 278e39221807..36dd8b6c544a 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -702,7 +702,6 @@ static void init_vp_index(struct vmbus_channel *channel) * In case alloc_cpumask_var() fails, bind it to * VMBUS_CONNECT_CPU. */ - channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU); channel->target_cpu = VMBUS_CONNECT_CPU; if (perf_chn) hv_set_alloced_cpu(VMBUS_CONNECT_CPU); @@ -719,7 +718,6 @@ static void init_vp_index(struct vmbus_channel *channel) continue; break; } - channel->numa_node = numa_node; alloced_mask = &hv_context.hv_numa_map[numa_node]; if (cpumask_weight(alloced_mask) == diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index d2ddb46f1359..de44d76c8ace 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -226,7 +226,7 @@ static ssize_t numa_node_show(struct device *dev, if (!hv_dev->channel) return -ENODEV; - return sprintf(buf, "%d\n", hv_dev->channel->numa_node); + return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu)); } static DEVICE_ATTR_RO(numa_node); #endif @@ -1778,7 +1778,6 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel, */ channel->target_cpu = target_cpu; - channel->numa_node = cpu_to_node(target_cpu); /* See init_vp_index(). */ if (hv_is_perf_channel(channel)) diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 738efdb194b0..690394b79d72 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -812,7 +812,6 @@ struct vmbus_channel { * the earlier behavior. */ u32 target_cpu; - int numa_node; /* * Support for sub-channels. For high performance devices, * it will be useful to have multiple sub-channels to support -- cgit v1.2.3 From e32b16c31339e37d3cdc814f2773b74c1dbf660c Mon Sep 17 00:00:00 2001 From: Prashant Malani Date: Thu, 28 May 2020 04:36:03 -0700 Subject: platform/chrome: cros_ec: Update mux state bits Sync the EC_CMD_USB_PD_MUX_INFO mux state bit fields with the Chrome EC code base. The newly added bit fields will be used for cros-ec-typec mux control. Signed-off-by: Prashant Malani Signed-off-by: Enric Balletbo i Serra --- include/linux/platform_data/cros_ec_commands.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 69210881ebac..a7b0fc440c35 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5207,11 +5207,15 @@ struct ec_params_usb_pd_mux_info { } __ec_align1; /* Flags representing mux state */ -#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ -#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ -#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ -#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ -#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_NONE 0 /* Open switch */ +#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ +#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ +#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ +#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ +#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_SAFE_MODE BIT(5) /* DP is in safe mode */ +#define USB_PD_MUX_TBT_COMPAT_ENABLED BIT(6) /* TBT compat enabled */ +#define USB_PD_MUX_USB4_ENABLED BIT(7) /* USB4 enabled */ struct ec_response_usb_pd_mux_info { uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */ -- cgit v1.2.3 From 4b61d3e8d3daebbde7ec02d593f84248fdf8bec2 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Fri, 19 Jun 2020 14:01:07 +0800 Subject: net: qos offload add flow status with dropped count This patch adds a drop frames counter to tc flower offloading. Reporting h/w dropped frames is necessary for some actions. Some actions like police action and the coming introduced stream gate action would produce dropped frames which is necessary for user. Status update shows how many filtered packets increasing and how many dropped in those packets. v2: Changes - Update commit comments suggest by Jiri Pirko. Signed-off-by: Po Liu Reviewed-by: Simon Horman Reviewed-by: Vlad Buslov Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_vl.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c | 2 +- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 7 +++++-- drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 2 +- drivers/net/ethernet/mscc/ocelot_flower.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/offload.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/qos_conf.c | 2 +- include/net/act_api.h | 11 ++++++----- include/net/flow_offload.h | 5 ++++- include/net/pkt_cls.h | 5 +++-- net/sched/act_api.c | 10 ++++------ net/sched/act_ct.c | 6 +++--- net/sched/act_gact.c | 7 ++++--- net/sched/act_gate.c | 6 +++--- net/sched/act_mirred.c | 6 +++--- net/sched/act_pedit.c | 6 +++--- net/sched/act_police.c | 4 ++-- net/sched/act_skbedit.c | 5 +++-- net/sched/act_vlan.c | 6 +++--- net/sched/cls_flower.c | 1 + net/sched/cls_matchall.c | 3 ++- 25 files changed, 60 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index bdfd6c4e190d..9ddc49b7eb8f 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -771,7 +771,7 @@ int sja1105_vl_stats(struct sja1105_private *priv, int port, pkts = timingerr + unreleased + lengtherr; - flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, + flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0, jiffies - rule->vl.stats.lastused, FLOW_ACTION_HW_STATS_IMMEDIATE); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0eef4f5e4a46..4d482d75a20b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1638,7 +1638,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, lastused = flow->lastused; spin_unlock(&flow->stats_lock); - flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, + flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0, lastused, FLOW_ACTION_HW_STATS_DELAYED); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 4a5fa9eba0b6..030de20a5d27 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -902,7 +902,7 @@ int cxgb4_tc_flower_stats(struct net_device *dev, if (ofld_stats->prev_packet_count != packets) ofld_stats->last_used = jiffies; flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count, - packets - ofld_stats->packet_count, + packets - ofld_stats->packet_count, 0, ofld_stats->last_used, FLOW_ACTION_HW_STATS_IMMEDIATE); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index c88c47a14fbb..c439b5bce9c9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -346,7 +346,7 @@ int cxgb4_tc_matchall_stats(struct net_device *dev, flow_stats_update(&cls_matchall->stats, bytes - tc_port_matchall->ingress.bytes, packets - tc_port_matchall->ingress.packets, - tc_port_matchall->ingress.last_used, + 0, tc_port_matchall->ingress.last_used, FLOW_ACTION_HW_STATS_IMMEDIATE); tc_port_matchall->ingress.packets = packets; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index fd3df19eaa32..fb76903eca90 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1291,12 +1291,15 @@ static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, spin_lock(&epsfp.psfp_lock); stats.pkts = counters.matching_frames_count - filter->stats.pkts; + stats.drops = counters.not_passing_frames_count - + filter->stats.drops; stats.lastused = filter->stats.lastused; filter->stats.pkts += stats.pkts; + filter->stats.drops += stats.drops; spin_unlock(&epsfp.psfp_lock); - flow_stats_update(&f->stats, 0x0, stats.pkts, stats.lastused, - FLOW_ACTION_HW_STATS_DELAYED); + flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, + stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 430025550fad..c7107da03212 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -672,7 +672,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft, return -ENOENT; mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse); - flow_stats_update(&f->stats, bytes, packets, lastuse, + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, FLOW_ACTION_HW_STATS_DELAYED); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7fc84f58e28a..bc9c0ac15f99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -4828,7 +4828,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, no_peer_counter: mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); out: - flow_stats_update(&f->stats, bytes, packets, lastuse, + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, FLOW_ACTION_HW_STATS_DELAYED); trace_mlx5e_stats_flower(f); errout: @@ -4946,7 +4946,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; rpriv->prev_vf_vport_stats = cur_stats; - flow_stats_update(&ma->stats, dbytes, dpkts, jiffies, + flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies, FLOW_ACTION_HW_STATS_DELAYED); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 51e1b3930c56..61d21043d83a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -633,7 +633,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rule_get_stats; - flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats); + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, used_hw_stats); mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); return 0; diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 5ce172e22b43..c90bafbd651f 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -244,7 +244,7 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, if (ret) return ret; - flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0x0, + flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 695d24b9dd92..234c652700e1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1491,7 +1491,7 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, nfp_flower_update_merge_stats(app, nfp_flow); flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, - priv->stats[ctx_id].pkts, priv->stats[ctx_id].used, + priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used, FLOW_ACTION_HW_STATS_DELAYED); priv->stats[ctx_id].pkts = 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index d18a830e4264..bb327d48d1ab 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -319,7 +319,7 @@ nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev, prev_stats->bytes = curr_stats->bytes; spin_unlock_bh(&fl_priv->qos_stats_lock); - flow_stats_update(&flow->stats, diff_bytes, diff_pkts, + flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0, repr_priv->qos_table.last_update, FLOW_ACTION_HW_STATS_DELAYED); return 0; diff --git a/include/net/act_api.h b/include/net/act_api.h index 8c3934880670..cb382a89ea58 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -106,7 +106,7 @@ struct tc_action_ops { struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); - void (*stats_update)(struct tc_action *, u64, u32, u64, bool); + void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool); size_t (*get_fill_size)(const struct tc_action *act); struct net_device *(*get_dev)(const struct tc_action *a, tc_action_priv_destructor *destructor); @@ -232,8 +232,8 @@ static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a) spin_unlock(&a->tcfa_lock); } -void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, - bool drop, bool hw); +void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, bool hw); int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, @@ -244,13 +244,14 @@ struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, #endif /* CONFIG_NET_CLS_ACT */ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, - u64 packets, u64 lastuse, bool hw) + u64 packets, u64 drops, + u64 lastuse, bool hw) { #ifdef CONFIG_NET_CLS_ACT if (!a->ops->stats_update) return; - a->ops->stats_update(a, bytes, packets, lastuse, hw); + a->ops->stats_update(a, bytes, packets, drops, lastuse, hw); #endif } diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index f2c8311a0433..00c15f14c434 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -389,17 +389,20 @@ static inline bool flow_rule_match_key(const struct flow_rule *rule, struct flow_stats { u64 pkts; u64 bytes; + u64 drops; u64 lastused; enum flow_action_hw_stats used_hw_stats; bool used_hw_stats_valid; }; static inline void flow_stats_update(struct flow_stats *flow_stats, - u64 bytes, u64 pkts, u64 lastused, + u64 bytes, u64 pkts, + u64 drops, u64 lastused, enum flow_action_hw_stats used_hw_stats) { flow_stats->pkts += pkts; flow_stats->bytes += bytes; + flow_stats->drops += drops; flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused); /* The driver should pass value with a maximum of one bit set. diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ed65619cbc47..ff017e5b3ea2 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -262,7 +262,7 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) static inline void tcf_exts_stats_update(const struct tcf_exts *exts, - u64 bytes, u64 packets, u64 lastuse, + u64 bytes, u64 packets, u64 drops, u64 lastuse, u8 used_hw_stats, bool used_hw_stats_valid) { #ifdef CONFIG_NET_CLS_ACT @@ -273,7 +273,8 @@ tcf_exts_stats_update(const struct tcf_exts *exts, for (i = 0; i < exts->nr_actions; i++) { struct tc_action *a = exts->actions[i]; - tcf_action_stats_update(a, bytes, packets, lastuse, true); + tcf_action_stats_update(a, bytes, packets, drops, + lastuse, true); a->used_hw_stats = used_hw_stats; a->used_hw_stats_valid = used_hw_stats_valid; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 8ac7eb0a8309..4c4466f18801 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1059,14 +1059,13 @@ err: return err; } -void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, - bool drop, bool hw) +void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, bool hw) { if (a->cpu_bstats) { _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); - if (drop) - this_cpu_ptr(a->cpu_qstats)->drops += packets; + this_cpu_ptr(a->cpu_qstats)->drops += drops; if (hw) _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), @@ -1075,8 +1074,7 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, } _bstats_update(&a->tcfa_bstats, bytes, packets); - if (drop) - a->tcfa_qstats.drops += packets; + a->tcfa_qstats.drops += drops; if (hw) _bstats_update(&a->tcfa_bstats_hw, bytes, packets); } diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index e9f3576cbf71..1b9c6d4a1b6b 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -1450,12 +1450,12 @@ static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } -static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_ct *c = to_ct(a); - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); } diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 416065772719..410e3bbfb9ca 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -171,14 +171,15 @@ static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a, return action; } -static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_gact *gact = to_gact(a); int action = READ_ONCE(gact->tcf_action); struct tcf_t *tm = &gact->tcf_tm; - tcf_action_update_stats(a, bytes, packets, action == TC_ACT_SHOT, hw); + tcf_action_update_stats(a, bytes, packets, + action == TC_ACT_SHOT ? packets : drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index 9c628591f452..c818844846b1 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -568,13 +568,13 @@ static int tcf_gate_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_gate *gact = to_gate(a); struct tcf_t *tm = &gact->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 83dd82fc9f40..b2705318993b 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -312,13 +312,13 @@ out: return retval; } -static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_mirred *m = to_mirred(a); struct tcf_t *tm = &m->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index d41d6200d9de..66986db062ed 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -409,13 +409,13 @@ done: return p->tcf_action; } -static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_pedit *d = to_pedit(a); struct tcf_t *tm = &d->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 8b7a0ac96c51..0b431d493768 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -288,13 +288,13 @@ static void tcf_police_cleanup(struct tc_action *a) } static void tcf_police_stats_update(struct tc_action *a, - u64 bytes, u32 packets, + u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { struct tcf_police *police = to_police(a); struct tcf_t *tm = &police->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index b125b2be4467..361b863e0634 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -74,12 +74,13 @@ err: } static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes, - u32 packets, u64 lastuse, bool hw) + u64 packets, u64 drops, + u64 lastuse, bool hw) { struct tcf_skbedit *d = to_skbedit(a); struct tcf_t *tm = &d->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index c91d3958fcbb..a5ff9f68ab02 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -302,13 +302,13 @@ static int tcf_vlan_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_vlan *v = to_vlan(a); struct tcf_t *tm = &v->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index b2da37286082..391971672d54 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -491,6 +491,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, cls_flower.stats.pkts, + cls_flower.stats.drops, cls_flower.stats.lastused, cls_flower.stats.used_hw_stats, cls_flower.stats.used_hw_stats_valid); diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 8d39dbcf1746..cafb84480bab 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -338,7 +338,8 @@ static void mall_stats_hw_filter(struct tcf_proto *tp, tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true); tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes, - cls_mall.stats.pkts, cls_mall.stats.lastused, + cls_mall.stats.pkts, cls_mall.stats.drops, + cls_mall.stats.lastused, cls_mall.stats.used_hw_stats, cls_mall.stats.used_hw_stats_valid); } -- cgit v1.2.3 From 0efaaa86581c596f9426482c731f262d843807b6 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 15 Jun 2020 08:50:08 +0200 Subject: docs: crypto: convert asymmetric-keys.txt to ReST This file is almost compatible with ReST. Just minor changes were needed: - Adjust document and titles markups; - Adjust numbered list markups; - Add a comments markup for the Contents section; - Add markups for literal blocks. Acked-by: Jarkko Sakkinen Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/c2275ea94e0507a01b020ab66dfa824d8b1c2545.1592203650.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- Documentation/crypto/asymmetric-keys.rst | 424 ++++++++++++++++++++++++++++++ Documentation/crypto/asymmetric-keys.txt | 429 ------------------------------- Documentation/crypto/index.rst | 1 + Documentation/security/keys/core.rst | 2 +- MAINTAINERS | 2 +- crypto/asymmetric_keys/asymmetric_type.c | 2 +- crypto/asymmetric_keys/public_key.c | 2 +- crypto/asymmetric_keys/signature.c | 2 +- include/crypto/public_key.h | 2 +- include/keys/asymmetric-parser.h | 2 +- include/keys/asymmetric-subtype.h | 2 +- include/keys/asymmetric-type.h | 2 +- 12 files changed, 434 insertions(+), 438 deletions(-) create mode 100644 Documentation/crypto/asymmetric-keys.rst delete mode 100644 Documentation/crypto/asymmetric-keys.txt (limited to 'include') diff --git a/Documentation/crypto/asymmetric-keys.rst b/Documentation/crypto/asymmetric-keys.rst new file mode 100644 index 000000000000..349f44a29392 --- /dev/null +++ b/Documentation/crypto/asymmetric-keys.rst @@ -0,0 +1,424 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================= +Asymmetric / Public-key Cryptography Key Type +============================================= + +.. Contents: + + - Overview. + - Key identification. + - Accessing asymmetric keys. + - Signature verification. + - Asymmetric key subtypes. + - Instantiation data parsers. + - Keyring link restrictions. + + +Overview +======== + +The "asymmetric" key type is designed to be a container for the keys used in +public-key cryptography, without imposing any particular restrictions on the +form or mechanism of the cryptography or form of the key. + +The asymmetric key is given a subtype that defines what sort of data is +associated with the key and provides operations to describe and destroy it. +However, no requirement is made that the key data actually be stored in the +key. + +A completely in-kernel key retention and operation subtype can be defined, but +it would also be possible to provide access to cryptographic hardware (such as +a TPM) that might be used to both retain the relevant key and perform +operations using that key. In such a case, the asymmetric key would then +merely be an interface to the TPM driver. + +Also provided is the concept of a data parser. Data parsers are responsible +for extracting information from the blobs of data passed to the instantiation +function. The first data parser that recognises the blob gets to set the +subtype of the key and define the operations that can be done on that key. + +A data parser may interpret the data blob as containing the bits representing a +key, or it may interpret it as a reference to a key held somewhere else in the +system (for example, a TPM). + + +Key Identification +================== + +If a key is added with an empty name, the instantiation data parsers are given +the opportunity to pre-parse a key and to determine the description the key +should be given from the content of the key. + +This can then be used to refer to the key, either by complete match or by +partial match. The key type may also use other criteria to refer to a key. + +The asymmetric key type's match function can then perform a wider range of +comparisons than just the straightforward comparison of the description with +the criterion string: + + 1) If the criterion string is of the form "id:" then the match + function will examine a key's fingerprint to see if the hex digits given + after the "id:" match the tail. For instance:: + + keyctl search @s asymmetric id:5acc2142 + + will match a key with fingerprint:: + + 1A00 2040 7601 7889 DE11 882C 3823 04AD 5ACC 2142 + + 2) If the criterion string is of the form ":" then the + match will match the ID as in (1), but with the added restriction that + only keys of the specified subtype (e.g. tpm) will be matched. For + instance:: + + keyctl search @s asymmetric tpm:5acc2142 + +Looking in /proc/keys, the last 8 hex digits of the key fingerprint are +displayed, along with the subtype:: + + 1a39e171 I----- 1 perm 3f010000 0 0 asymmetric modsign.0: DSA 5acc2142 [] + + +Accessing Asymmetric Keys +========================= + +For general access to asymmetric keys from within the kernel, the following +inclusion is required:: + + #include + +This gives access to functions for dealing with asymmetric / public keys. +Three enums are defined there for representing public-key cryptography +algorithms:: + + enum pkey_algo + +digest algorithms used by those:: + + enum pkey_hash_algo + +and key identifier representations:: + + enum pkey_id_type + +Note that the key type representation types are required because key +identifiers from different standards aren't necessarily compatible. For +instance, PGP generates key identifiers by hashing the key data plus some +PGP-specific metadata, whereas X.509 has arbitrary certificate identifiers. + +The operations defined upon a key are: + + 1) Signature verification. + +Other operations are possible (such as encryption) with the same key data +required for verification, but not currently supported, and others +(eg. decryption and signature generation) require extra key data. + + +Signature Verification +---------------------- + +An operation is provided to perform cryptographic signature verification, using +an asymmetric key to provide or to provide access to the public key:: + + int verify_signature(const struct key *key, + const struct public_key_signature *sig); + +The caller must have already obtained the key from some source and can then use +it to check the signature. The caller must have parsed the signature and +transferred the relevant bits to the structure pointed to by sig:: + + struct public_key_signature { + u8 *digest; + u8 digest_size; + enum pkey_hash_algo pkey_hash_algo : 8; + u8 nr_mpi; + union { + MPI mpi[2]; + ... + }; + }; + +The algorithm used must be noted in sig->pkey_hash_algo, and all the MPIs that +make up the actual signature must be stored in sig->mpi[] and the count of MPIs +placed in sig->nr_mpi. + +In addition, the data must have been digested by the caller and the resulting +hash must be pointed to by sig->digest and the size of the hash be placed in +sig->digest_size. + +The function will return 0 upon success or -EKEYREJECTED if the signature +doesn't match. + +The function may also return -ENOTSUPP if an unsupported public-key algorithm +or public-key/hash algorithm combination is specified or the key doesn't +support the operation; -EBADMSG or -ERANGE if some of the parameters have weird +data; or -ENOMEM if an allocation can't be performed. -EINVAL can be returned +if the key argument is the wrong type or is incompletely set up. + + +Asymmetric Key Subtypes +======================= + +Asymmetric keys have a subtype that defines the set of operations that can be +performed on that key and that determines what data is attached as the key +payload. The payload format is entirely at the whim of the subtype. + +The subtype is selected by the key data parser and the parser must initialise +the data required for it. The asymmetric key retains a reference on the +subtype module. + +The subtype definition structure can be found in:: + + #include + +and looks like the following:: + + struct asymmetric_key_subtype { + struct module *owner; + const char *name; + + void (*describe)(const struct key *key, struct seq_file *m); + void (*destroy)(void *payload); + int (*query)(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info); + int (*eds_op)(struct kernel_pkey_params *params, + const void *in, void *out); + int (*verify_signature)(const struct key *key, + const struct public_key_signature *sig); + }; + +Asymmetric keys point to this with their payload[asym_subtype] member. + +The owner and name fields should be set to the owning module and the name of +the subtype. Currently, the name is only used for print statements. + +There are a number of operations defined by the subtype: + + 1) describe(). + + Mandatory. This allows the subtype to display something in /proc/keys + against the key. For instance the name of the public key algorithm type + could be displayed. The key type will display the tail of the key + identity string after this. + + 2) destroy(). + + Mandatory. This should free the memory associated with the key. The + asymmetric key will look after freeing the fingerprint and releasing the + reference on the subtype module. + + 3) query(). + + Mandatory. This is a function for querying the capabilities of a key. + + 4) eds_op(). + + Optional. This is the entry point for the encryption, decryption and + signature creation operations (which are distinguished by the operation ID + in the parameter struct). The subtype may do anything it likes to + implement an operation, including offloading to hardware. + + 5) verify_signature(). + + Optional. This is the entry point for signature verification. The + subtype may do anything it likes to implement an operation, including + offloading to hardware. + +Instantiation Data Parsers +========================== + +The asymmetric key type doesn't generally want to store or to deal with a raw +blob of data that holds the key data. It would have to parse it and error +check it each time it wanted to use it. Further, the contents of the blob may +have various checks that can be performed on it (eg. self-signatures, validity +dates) and may contain useful data about the key (identifiers, capabilities). + +Also, the blob may represent a pointer to some hardware containing the key +rather than the key itself. + +Examples of blob formats for which parsers could be implemented include: + + - OpenPGP packet stream [RFC 4880]. + - X.509 ASN.1 stream. + - Pointer to TPM key. + - Pointer to UEFI key. + - PKCS#8 private key [RFC 5208]. + - PKCS#5 encrypted private key [RFC 2898]. + +During key instantiation each parser in the list is tried until one doesn't +return -EBADMSG. + +The parser definition structure can be found in:: + + #include + +and looks like the following:: + + struct asymmetric_key_parser { + struct module *owner; + const char *name; + + int (*parse)(struct key_preparsed_payload *prep); + }; + +The owner and name fields should be set to the owning module and the name of +the parser. + +There is currently only a single operation defined by the parser, and it is +mandatory: + + 1) parse(). + + This is called to preparse the key from the key creation and update paths. + In particular, it is called during the key creation _before_ a key is + allocated, and as such, is permitted to provide the key's description in + the case that the caller declines to do so. + + The caller passes a pointer to the following struct with all of the fields + cleared, except for data, datalen and quotalen [see + Documentation/security/keys/core.rst]:: + + struct key_preparsed_payload { + char *description; + void *payload[4]; + const void *data; + size_t datalen; + size_t quotalen; + }; + + The instantiation data is in a blob pointed to by data and is datalen in + size. The parse() function is not permitted to change these two values at + all, and shouldn't change any of the other values _unless_ they are + recognise the blob format and will not return -EBADMSG to indicate it is + not theirs. + + If the parser is happy with the blob, it should propose a description for + the key and attach it to ->description, ->payload[asym_subtype] should be + set to point to the subtype to be used, ->payload[asym_crypto] should be + set to point to the initialised data for that subtype, + ->payload[asym_key_ids] should point to one or more hex fingerprints and + quotalen should be updated to indicate how much quota this key should + account for. + + When clearing up, the data attached to ->payload[asym_key_ids] and + ->description will be kfree()'d and the data attached to + ->payload[asm_crypto] will be passed to the subtype's ->destroy() method + to be disposed of. A module reference for the subtype pointed to by + ->payload[asym_subtype] will be put. + + + If the data format is not recognised, -EBADMSG should be returned. If it + is recognised, but the key cannot for some reason be set up, some other + negative error code should be returned. On success, 0 should be returned. + + The key's fingerprint string may be partially matched upon. For a + public-key algorithm such as RSA and DSA this will likely be a printable + hex version of the key's fingerprint. + +Functions are provided to register and unregister parsers:: + + int register_asymmetric_key_parser(struct asymmetric_key_parser *parser); + void unregister_asymmetric_key_parser(struct asymmetric_key_parser *subtype); + +Parsers may not have the same name. The names are otherwise only used for +displaying in debugging messages. + + +Keyring Link Restrictions +========================= + +Keyrings created from userspace using add_key can be configured to check the +signature of the key being linked. Keys without a valid signature are not +allowed to link. + +Several restriction methods are available: + + 1) Restrict using the kernel builtin trusted keyring + + - Option string used with KEYCTL_RESTRICT_KEYRING: + - "builtin_trusted" + + The kernel builtin trusted keyring will be searched for the signing key. + If the builtin trusted keyring is not configured, all links will be + rejected. The ca_keys kernel parameter also affects which keys are used + for signature verification. + + 2) Restrict using the kernel builtin and secondary trusted keyrings + + - Option string used with KEYCTL_RESTRICT_KEYRING: + - "builtin_and_secondary_trusted" + + The kernel builtin and secondary trusted keyrings will be searched for the + signing key. If the secondary trusted keyring is not configured, this + restriction will behave like the "builtin_trusted" option. The ca_keys + kernel parameter also affects which keys are used for signature + verification. + + 3) Restrict using a separate key or keyring + + - Option string used with KEYCTL_RESTRICT_KEYRING: + - "key_or_keyring:[:chain]" + + Whenever a key link is requested, the link will only succeed if the key + being linked is signed by one of the designated keys. This key may be + specified directly by providing a serial number for one asymmetric key, or + a group of keys may be searched for the signing key by providing the + serial number for a keyring. + + When the "chain" option is provided at the end of the string, the keys + within the destination keyring will also be searched for signing keys. + This allows for verification of certificate chains by adding each + certificate in order (starting closest to the root) to a keyring. For + instance, one keyring can be populated with links to a set of root + certificates, with a separate, restricted keyring set up for each + certificate chain to be validated:: + + # Create and populate a keyring for root certificates + root_id=`keyctl add keyring root-certs "" @s` + keyctl padd asymmetric "" $root_id < root1.cert + keyctl padd asymmetric "" $root_id < root2.cert + + # Create and restrict a keyring for the certificate chain + chain_id=`keyctl add keyring chain "" @s` + keyctl restrict_keyring $chain_id asymmetric key_or_keyring:$root_id:chain + + # Attempt to add each certificate in the chain, starting with the + # certificate closest to the root. + keyctl padd asymmetric "" $chain_id < intermediateA.cert + keyctl padd asymmetric "" $chain_id < intermediateB.cert + keyctl padd asymmetric "" $chain_id < end-entity.cert + + If the final end-entity certificate is successfully added to the "chain" + keyring, we can be certain that it has a valid signing chain going back to + one of the root certificates. + + A single keyring can be used to verify a chain of signatures by + restricting the keyring after linking the root certificate:: + + # Create a keyring for the certificate chain and add the root + chain2_id=`keyctl add keyring chain2 "" @s` + keyctl padd asymmetric "" $chain2_id < root1.cert + + # Restrict the keyring that already has root1.cert linked. The cert + # will remain linked by the keyring. + keyctl restrict_keyring $chain2_id asymmetric key_or_keyring:0:chain + + # Attempt to add each certificate in the chain, starting with the + # certificate closest to the root. + keyctl padd asymmetric "" $chain2_id < intermediateA.cert + keyctl padd asymmetric "" $chain2_id < intermediateB.cert + keyctl padd asymmetric "" $chain2_id < end-entity.cert + + If the final end-entity certificate is successfully added to the "chain2" + keyring, we can be certain that there is a valid signing chain going back + to the root certificate that was added before the keyring was restricted. + + +In all of these cases, if the signing key is found the signature of the key to +be linked will be verified using the signing key. The requested key is added +to the keyring only if the signature is successfully verified. -ENOKEY is +returned if the parent certificate could not be found, or -EKEYREJECTED is +returned if the signature check fails or the key is blacklisted. Other errors +may be returned if the signature check could not be performed. diff --git a/Documentation/crypto/asymmetric-keys.txt b/Documentation/crypto/asymmetric-keys.txt deleted file mode 100644 index 8763866b11cf..000000000000 --- a/Documentation/crypto/asymmetric-keys.txt +++ /dev/null @@ -1,429 +0,0 @@ - ============================================= - ASYMMETRIC / PUBLIC-KEY CRYPTOGRAPHY KEY TYPE - ============================================= - -Contents: - - - Overview. - - Key identification. - - Accessing asymmetric keys. - - Signature verification. - - Asymmetric key subtypes. - - Instantiation data parsers. - - Keyring link restrictions. - - -======== -OVERVIEW -======== - -The "asymmetric" key type is designed to be a container for the keys used in -public-key cryptography, without imposing any particular restrictions on the -form or mechanism of the cryptography or form of the key. - -The asymmetric key is given a subtype that defines what sort of data is -associated with the key and provides operations to describe and destroy it. -However, no requirement is made that the key data actually be stored in the -key. - -A completely in-kernel key retention and operation subtype can be defined, but -it would also be possible to provide access to cryptographic hardware (such as -a TPM) that might be used to both retain the relevant key and perform -operations using that key. In such a case, the asymmetric key would then -merely be an interface to the TPM driver. - -Also provided is the concept of a data parser. Data parsers are responsible -for extracting information from the blobs of data passed to the instantiation -function. The first data parser that recognises the blob gets to set the -subtype of the key and define the operations that can be done on that key. - -A data parser may interpret the data blob as containing the bits representing a -key, or it may interpret it as a reference to a key held somewhere else in the -system (for example, a TPM). - - -================== -KEY IDENTIFICATION -================== - -If a key is added with an empty name, the instantiation data parsers are given -the opportunity to pre-parse a key and to determine the description the key -should be given from the content of the key. - -This can then be used to refer to the key, either by complete match or by -partial match. The key type may also use other criteria to refer to a key. - -The asymmetric key type's match function can then perform a wider range of -comparisons than just the straightforward comparison of the description with -the criterion string: - - (1) If the criterion string is of the form "id:" then the match - function will examine a key's fingerprint to see if the hex digits given - after the "id:" match the tail. For instance: - - keyctl search @s asymmetric id:5acc2142 - - will match a key with fingerprint: - - 1A00 2040 7601 7889 DE11 882C 3823 04AD 5ACC 2142 - - (2) If the criterion string is of the form ":" then the - match will match the ID as in (1), but with the added restriction that - only keys of the specified subtype (e.g. tpm) will be matched. For - instance: - - keyctl search @s asymmetric tpm:5acc2142 - -Looking in /proc/keys, the last 8 hex digits of the key fingerprint are -displayed, along with the subtype: - - 1a39e171 I----- 1 perm 3f010000 0 0 asymmetric modsign.0: DSA 5acc2142 [] - - -========================= -ACCESSING ASYMMETRIC KEYS -========================= - -For general access to asymmetric keys from within the kernel, the following -inclusion is required: - - #include - -This gives access to functions for dealing with asymmetric / public keys. -Three enums are defined there for representing public-key cryptography -algorithms: - - enum pkey_algo - -digest algorithms used by those: - - enum pkey_hash_algo - -and key identifier representations: - - enum pkey_id_type - -Note that the key type representation types are required because key -identifiers from different standards aren't necessarily compatible. For -instance, PGP generates key identifiers by hashing the key data plus some -PGP-specific metadata, whereas X.509 has arbitrary certificate identifiers. - -The operations defined upon a key are: - - (1) Signature verification. - -Other operations are possible (such as encryption) with the same key data -required for verification, but not currently supported, and others -(eg. decryption and signature generation) require extra key data. - - -SIGNATURE VERIFICATION ----------------------- - -An operation is provided to perform cryptographic signature verification, using -an asymmetric key to provide or to provide access to the public key. - - int verify_signature(const struct key *key, - const struct public_key_signature *sig); - -The caller must have already obtained the key from some source and can then use -it to check the signature. The caller must have parsed the signature and -transferred the relevant bits to the structure pointed to by sig. - - struct public_key_signature { - u8 *digest; - u8 digest_size; - enum pkey_hash_algo pkey_hash_algo : 8; - u8 nr_mpi; - union { - MPI mpi[2]; - ... - }; - }; - -The algorithm used must be noted in sig->pkey_hash_algo, and all the MPIs that -make up the actual signature must be stored in sig->mpi[] and the count of MPIs -placed in sig->nr_mpi. - -In addition, the data must have been digested by the caller and the resulting -hash must be pointed to by sig->digest and the size of the hash be placed in -sig->digest_size. - -The function will return 0 upon success or -EKEYREJECTED if the signature -doesn't match. - -The function may also return -ENOTSUPP if an unsupported public-key algorithm -or public-key/hash algorithm combination is specified or the key doesn't -support the operation; -EBADMSG or -ERANGE if some of the parameters have weird -data; or -ENOMEM if an allocation can't be performed. -EINVAL can be returned -if the key argument is the wrong type or is incompletely set up. - - -======================= -ASYMMETRIC KEY SUBTYPES -======================= - -Asymmetric keys have a subtype that defines the set of operations that can be -performed on that key and that determines what data is attached as the key -payload. The payload format is entirely at the whim of the subtype. - -The subtype is selected by the key data parser and the parser must initialise -the data required for it. The asymmetric key retains a reference on the -subtype module. - -The subtype definition structure can be found in: - - #include - -and looks like the following: - - struct asymmetric_key_subtype { - struct module *owner; - const char *name; - - void (*describe)(const struct key *key, struct seq_file *m); - void (*destroy)(void *payload); - int (*query)(const struct kernel_pkey_params *params, - struct kernel_pkey_query *info); - int (*eds_op)(struct kernel_pkey_params *params, - const void *in, void *out); - int (*verify_signature)(const struct key *key, - const struct public_key_signature *sig); - }; - -Asymmetric keys point to this with their payload[asym_subtype] member. - -The owner and name fields should be set to the owning module and the name of -the subtype. Currently, the name is only used for print statements. - -There are a number of operations defined by the subtype: - - (1) describe(). - - Mandatory. This allows the subtype to display something in /proc/keys - against the key. For instance the name of the public key algorithm type - could be displayed. The key type will display the tail of the key - identity string after this. - - (2) destroy(). - - Mandatory. This should free the memory associated with the key. The - asymmetric key will look after freeing the fingerprint and releasing the - reference on the subtype module. - - (3) query(). - - Mandatory. This is a function for querying the capabilities of a key. - - (4) eds_op(). - - Optional. This is the entry point for the encryption, decryption and - signature creation operations (which are distinguished by the operation ID - in the parameter struct). The subtype may do anything it likes to - implement an operation, including offloading to hardware. - - (5) verify_signature(). - - Optional. This is the entry point for signature verification. The - subtype may do anything it likes to implement an operation, including - offloading to hardware. - - -========================== -INSTANTIATION DATA PARSERS -========================== - -The asymmetric key type doesn't generally want to store or to deal with a raw -blob of data that holds the key data. It would have to parse it and error -check it each time it wanted to use it. Further, the contents of the blob may -have various checks that can be performed on it (eg. self-signatures, validity -dates) and may contain useful data about the key (identifiers, capabilities). - -Also, the blob may represent a pointer to some hardware containing the key -rather than the key itself. - -Examples of blob formats for which parsers could be implemented include: - - - OpenPGP packet stream [RFC 4880]. - - X.509 ASN.1 stream. - - Pointer to TPM key. - - Pointer to UEFI key. - - PKCS#8 private key [RFC 5208]. - - PKCS#5 encrypted private key [RFC 2898]. - -During key instantiation each parser in the list is tried until one doesn't -return -EBADMSG. - -The parser definition structure can be found in: - - #include - -and looks like the following: - - struct asymmetric_key_parser { - struct module *owner; - const char *name; - - int (*parse)(struct key_preparsed_payload *prep); - }; - -The owner and name fields should be set to the owning module and the name of -the parser. - -There is currently only a single operation defined by the parser, and it is -mandatory: - - (1) parse(). - - This is called to preparse the key from the key creation and update paths. - In particular, it is called during the key creation _before_ a key is - allocated, and as such, is permitted to provide the key's description in - the case that the caller declines to do so. - - The caller passes a pointer to the following struct with all of the fields - cleared, except for data, datalen and quotalen [see - Documentation/security/keys/core.rst]. - - struct key_preparsed_payload { - char *description; - void *payload[4]; - const void *data; - size_t datalen; - size_t quotalen; - }; - - The instantiation data is in a blob pointed to by data and is datalen in - size. The parse() function is not permitted to change these two values at - all, and shouldn't change any of the other values _unless_ they are - recognise the blob format and will not return -EBADMSG to indicate it is - not theirs. - - If the parser is happy with the blob, it should propose a description for - the key and attach it to ->description, ->payload[asym_subtype] should be - set to point to the subtype to be used, ->payload[asym_crypto] should be - set to point to the initialised data for that subtype, - ->payload[asym_key_ids] should point to one or more hex fingerprints and - quotalen should be updated to indicate how much quota this key should - account for. - - When clearing up, the data attached to ->payload[asym_key_ids] and - ->description will be kfree()'d and the data attached to - ->payload[asm_crypto] will be passed to the subtype's ->destroy() method - to be disposed of. A module reference for the subtype pointed to by - ->payload[asym_subtype] will be put. - - - If the data format is not recognised, -EBADMSG should be returned. If it - is recognised, but the key cannot for some reason be set up, some other - negative error code should be returned. On success, 0 should be returned. - - The key's fingerprint string may be partially matched upon. For a - public-key algorithm such as RSA and DSA this will likely be a printable - hex version of the key's fingerprint. - -Functions are provided to register and unregister parsers: - - int register_asymmetric_key_parser(struct asymmetric_key_parser *parser); - void unregister_asymmetric_key_parser(struct asymmetric_key_parser *subtype); - -Parsers may not have the same name. The names are otherwise only used for -displaying in debugging messages. - - -========================= -KEYRING LINK RESTRICTIONS -========================= - -Keyrings created from userspace using add_key can be configured to check the -signature of the key being linked. Keys without a valid signature are not -allowed to link. - -Several restriction methods are available: - - (1) Restrict using the kernel builtin trusted keyring - - - Option string used with KEYCTL_RESTRICT_KEYRING: - - "builtin_trusted" - - The kernel builtin trusted keyring will be searched for the signing key. - If the builtin trusted keyring is not configured, all links will be - rejected. The ca_keys kernel parameter also affects which keys are used - for signature verification. - - (2) Restrict using the kernel builtin and secondary trusted keyrings - - - Option string used with KEYCTL_RESTRICT_KEYRING: - - "builtin_and_secondary_trusted" - - The kernel builtin and secondary trusted keyrings will be searched for the - signing key. If the secondary trusted keyring is not configured, this - restriction will behave like the "builtin_trusted" option. The ca_keys - kernel parameter also affects which keys are used for signature - verification. - - (3) Restrict using a separate key or keyring - - - Option string used with KEYCTL_RESTRICT_KEYRING: - - "key_or_keyring:[:chain]" - - Whenever a key link is requested, the link will only succeed if the key - being linked is signed by one of the designated keys. This key may be - specified directly by providing a serial number for one asymmetric key, or - a group of keys may be searched for the signing key by providing the - serial number for a keyring. - - When the "chain" option is provided at the end of the string, the keys - within the destination keyring will also be searched for signing keys. - This allows for verification of certificate chains by adding each - certificate in order (starting closest to the root) to a keyring. For - instance, one keyring can be populated with links to a set of root - certificates, with a separate, restricted keyring set up for each - certificate chain to be validated: - - # Create and populate a keyring for root certificates - root_id=`keyctl add keyring root-certs "" @s` - keyctl padd asymmetric "" $root_id < root1.cert - keyctl padd asymmetric "" $root_id < root2.cert - - # Create and restrict a keyring for the certificate chain - chain_id=`keyctl add keyring chain "" @s` - keyctl restrict_keyring $chain_id asymmetric key_or_keyring:$root_id:chain - - # Attempt to add each certificate in the chain, starting with the - # certificate closest to the root. - keyctl padd asymmetric "" $chain_id < intermediateA.cert - keyctl padd asymmetric "" $chain_id < intermediateB.cert - keyctl padd asymmetric "" $chain_id < end-entity.cert - - If the final end-entity certificate is successfully added to the "chain" - keyring, we can be certain that it has a valid signing chain going back to - one of the root certificates. - - A single keyring can be used to verify a chain of signatures by - restricting the keyring after linking the root certificate: - - # Create a keyring for the certificate chain and add the root - chain2_id=`keyctl add keyring chain2 "" @s` - keyctl padd asymmetric "" $chain2_id < root1.cert - - # Restrict the keyring that already has root1.cert linked. The cert - # will remain linked by the keyring. - keyctl restrict_keyring $chain2_id asymmetric key_or_keyring:0:chain - - # Attempt to add each certificate in the chain, starting with the - # certificate closest to the root. - keyctl padd asymmetric "" $chain2_id < intermediateA.cert - keyctl padd asymmetric "" $chain2_id < intermediateB.cert - keyctl padd asymmetric "" $chain2_id < end-entity.cert - - If the final end-entity certificate is successfully added to the "chain2" - keyring, we can be certain that there is a valid signing chain going back - to the root certificate that was added before the keyring was restricted. - - -In all of these cases, if the signing key is found the signature of the key to -be linked will be verified using the signing key. The requested key is added -to the keyring only if the signature is successfully verified. -ENOKEY is -returned if the parent certificate could not be found, or -EKEYREJECTED is -returned if the signature check fails or the key is blacklisted. Other errors -may be returned if the signature check could not be performed. diff --git a/Documentation/crypto/index.rst b/Documentation/crypto/index.rst index c4ff5d791233..2bcaf422731e 100644 --- a/Documentation/crypto/index.rst +++ b/Documentation/crypto/index.rst @@ -18,6 +18,7 @@ for cryptographic use cases, as well as programming examples. intro architecture + asymmetric-keys devel-algos userspace-if crypto_engine diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst index cdc42ccc12e4..aa0081685ee1 100644 --- a/Documentation/security/keys/core.rst +++ b/Documentation/security/keys/core.rst @@ -912,7 +912,7 @@ The keyctl syscall functions are: One application of restricted keyrings is to verify X.509 certificate chains or individual certificate signatures using the asymmetric key type. - See Documentation/crypto/asymmetric-keys.txt for specific restrictions + See Documentation/crypto/asymmetric-keys.rst for specific restrictions applicable to the asymmetric key type. diff --git a/MAINTAINERS b/MAINTAINERS index 68f21d46614c..156bd19d10eb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2827,7 +2827,7 @@ ASYMMETRIC KEYS M: David Howells L: keyrings@vger.kernel.org S: Maintained -F: Documentation/crypto/asymmetric-keys.txt +F: Documentation/crypto/asymmetric-keys.rst F: crypto/asymmetric_keys/ F: include/crypto/pkcs7.h F: include/crypto/public_key.h diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index 6e5fc8e31f01..33e77d846caa 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Asymmetric public-key cryptography key type * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index d7f43d4ea925..da4d0b82d018 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* In-software asymmetric public-key crypto subtype * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index e24a031db1e4..4aff3eebec17 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Signature verification with an asymmetric key * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index 0588ef3bc6ff..11f535cfb810 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric public-key algorithm definitions * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h index 8a21d6a613ab..c47dc5405f79 100644 --- a/include/keys/asymmetric-parser.h +++ b/include/keys/asymmetric-parser.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric public-key cryptography data parser * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h index 21407815d9c3..d55171f640a0 100644 --- a/include/keys/asymmetric-subtype.h +++ b/include/keys/asymmetric-subtype.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric public-key cryptography key subtype * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index 91cfd9bd9385..a29d3ff2e7e8 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Asymmetric Public-key cryptography key type interface * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) -- cgit v1.2.3 From 8e2a46a40fa76570e535e5baa2d351510b6e61fa Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 15 Jun 2020 08:50:25 +0200 Subject: docs: move remaining stuff under Documentation/*.txt to Documentation/staging There are several files that I was unable to find a proper place for them, and 3 ones that are still in plain old text format. Let's place those stuff behind the carpet, as we'd like to keep the root directory clean. We can later discuss and move those into better places. Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/11bd0d75e65a874f7c276a0aeab0fe13f3376f5f.1592203650.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- Documentation/crc32.txt | 189 -------- Documentation/index.rst | 13 + Documentation/kprobes.txt | 801 ---------------------------------- Documentation/lzo.txt | 202 --------- Documentation/remoteproc.txt | 359 --------------- Documentation/rpmsg.txt | 341 --------------- Documentation/speculation.txt | 90 ---- Documentation/staging/crc32.rst | 189 ++++++++ Documentation/staging/index.rst | 32 ++ Documentation/staging/kprobes.rst | 801 ++++++++++++++++++++++++++++++++++ Documentation/staging/lzo.rst | 202 +++++++++ Documentation/staging/remoteproc.rst | 359 +++++++++++++++ Documentation/staging/rpmsg.rst | 341 +++++++++++++++ Documentation/staging/speculation.rst | 92 ++++ Documentation/staging/static-keys.rst | 331 ++++++++++++++ Documentation/staging/tee.rst | 277 ++++++++++++ Documentation/staging/xz.rst | 127 ++++++ Documentation/static-keys.txt | 331 -------------- Documentation/tee.txt | 276 ------------ Documentation/trace/kprobetrace.rst | 2 +- Documentation/xz.txt | 127 ------ MAINTAINERS | 8 +- include/linux/jump_label.h | 2 +- lib/crc32.c | 2 +- lib/lzo/lzo1x_decompress_safe.c | 2 +- lib/xz/Kconfig | 2 +- samples/kprobes/kprobe_example.c | 2 +- samples/kprobes/kretprobe_example.c | 2 +- 28 files changed, 2775 insertions(+), 2727 deletions(-) delete mode 100644 Documentation/crc32.txt delete mode 100644 Documentation/kprobes.txt delete mode 100644 Documentation/lzo.txt delete mode 100644 Documentation/remoteproc.txt delete mode 100644 Documentation/rpmsg.txt delete mode 100644 Documentation/speculation.txt create mode 100644 Documentation/staging/crc32.rst create mode 100644 Documentation/staging/index.rst create mode 100644 Documentation/staging/kprobes.rst create mode 100644 Documentation/staging/lzo.rst create mode 100644 Documentation/staging/remoteproc.rst create mode 100644 Documentation/staging/rpmsg.rst create mode 100644 Documentation/staging/speculation.rst create mode 100644 Documentation/staging/static-keys.rst create mode 100644 Documentation/staging/tee.rst create mode 100644 Documentation/staging/xz.rst delete mode 100644 Documentation/static-keys.txt delete mode 100644 Documentation/tee.txt delete mode 100644 Documentation/xz.txt (limited to 'include') diff --git a/Documentation/crc32.txt b/Documentation/crc32.txt deleted file mode 100644 index 8a6860f33b4e..000000000000 --- a/Documentation/crc32.txt +++ /dev/null @@ -1,189 +0,0 @@ -================================= -brief tutorial on CRC computation -================================= - -A CRC is a long-division remainder. You add the CRC to the message, -and the whole thing (message+CRC) is a multiple of the given -CRC polynomial. To check the CRC, you can either check that the -CRC matches the recomputed value, *or* you can check that the -remainder computed on the message+CRC is 0. This latter approach -is used by a lot of hardware implementations, and is why so many -protocols put the end-of-frame flag after the CRC. - -It's actually the same long division you learned in school, except that: - -- We're working in binary, so the digits are only 0 and 1, and -- When dividing polynomials, there are no carries. Rather than add and - subtract, we just xor. Thus, we tend to get a bit sloppy about - the difference between adding and subtracting. - -Like all division, the remainder is always smaller than the divisor. -To produce a 32-bit CRC, the divisor is actually a 33-bit CRC polynomial. -Since it's 33 bits long, bit 32 is always going to be set, so usually the -CRC is written in hex with the most significant bit omitted. (If you're -familiar with the IEEE 754 floating-point format, it's the same idea.) - -Note that a CRC is computed over a string of *bits*, so you have -to decide on the endianness of the bits within each byte. To get -the best error-detecting properties, this should correspond to the -order they're actually sent. For example, standard RS-232 serial is -little-endian; the most significant bit (sometimes used for parity) -is sent last. And when appending a CRC word to a message, you should -do it in the right order, matching the endianness. - -Just like with ordinary division, you proceed one digit (bit) at a time. -Each step of the division you take one more digit (bit) of the dividend -and append it to the current remainder. Then you figure out the -appropriate multiple of the divisor to subtract to being the remainder -back into range. In binary, this is easy - it has to be either 0 or 1, -and to make the XOR cancel, it's just a copy of bit 32 of the remainder. - -When computing a CRC, we don't care about the quotient, so we can -throw the quotient bit away, but subtract the appropriate multiple of -the polynomial from the remainder and we're back to where we started, -ready to process the next bit. - -A big-endian CRC written this way would be coded like:: - - for (i = 0; i < input_bits; i++) { - multiple = remainder & 0x80000000 ? CRCPOLY : 0; - remainder = (remainder << 1 | next_input_bit()) ^ multiple; - } - -Notice how, to get at bit 32 of the shifted remainder, we look -at bit 31 of the remainder *before* shifting it. - -But also notice how the next_input_bit() bits we're shifting into -the remainder don't actually affect any decision-making until -32 bits later. Thus, the first 32 cycles of this are pretty boring. -Also, to add the CRC to a message, we need a 32-bit-long hole for it at -the end, so we have to add 32 extra cycles shifting in zeros at the -end of every message. - -These details lead to a standard trick: rearrange merging in the -next_input_bit() until the moment it's needed. Then the first 32 cycles -can be precomputed, and merging in the final 32 zero bits to make room -for the CRC can be skipped entirely. This changes the code to:: - - for (i = 0; i < input_bits; i++) { - remainder ^= next_input_bit() << 31; - multiple = (remainder & 0x80000000) ? CRCPOLY : 0; - remainder = (remainder << 1) ^ multiple; - } - -With this optimization, the little-endian code is particularly simple:: - - for (i = 0; i < input_bits; i++) { - remainder ^= next_input_bit(); - multiple = (remainder & 1) ? CRCPOLY : 0; - remainder = (remainder >> 1) ^ multiple; - } - -The most significant coefficient of the remainder polynomial is stored -in the least significant bit of the binary "remainder" variable. -The other details of endianness have been hidden in CRCPOLY (which must -be bit-reversed) and next_input_bit(). - -As long as next_input_bit is returning the bits in a sensible order, we don't -*have* to wait until the last possible moment to merge in additional bits. -We can do it 8 bits at a time rather than 1 bit at a time:: - - for (i = 0; i < input_bytes; i++) { - remainder ^= next_input_byte() << 24; - for (j = 0; j < 8; j++) { - multiple = (remainder & 0x80000000) ? CRCPOLY : 0; - remainder = (remainder << 1) ^ multiple; - } - } - -Or in little-endian:: - - for (i = 0; i < input_bytes; i++) { - remainder ^= next_input_byte(); - for (j = 0; j < 8; j++) { - multiple = (remainder & 1) ? CRCPOLY : 0; - remainder = (remainder >> 1) ^ multiple; - } - } - -If the input is a multiple of 32 bits, you can even XOR in a 32-bit -word at a time and increase the inner loop count to 32. - -You can also mix and match the two loop styles, for example doing the -bulk of a message byte-at-a-time and adding bit-at-a-time processing -for any fractional bytes at the end. - -To reduce the number of conditional branches, software commonly uses -the byte-at-a-time table method, popularized by Dilip V. Sarwate, -"Computation of Cyclic Redundancy Checks via Table Look-Up", Comm. ACM -v.31 no.8 (August 1998) p. 1008-1013. - -Here, rather than just shifting one bit of the remainder to decide -in the correct multiple to subtract, we can shift a byte at a time. -This produces a 40-bit (rather than a 33-bit) intermediate remainder, -and the correct multiple of the polynomial to subtract is found using -a 256-entry lookup table indexed by the high 8 bits. - -(The table entries are simply the CRC-32 of the given one-byte messages.) - -When space is more constrained, smaller tables can be used, e.g. two -4-bit shifts followed by a lookup in a 16-entry table. - -It is not practical to process much more than 8 bits at a time using this -technique, because tables larger than 256 entries use too much memory and, -more importantly, too much of the L1 cache. - -To get higher software performance, a "slicing" technique can be used. -See "High Octane CRC Generation with the Intel Slicing-by-8 Algorithm", -ftp://download.intel.com/technology/comms/perfnet/download/slicing-by-8.pdf - -This does not change the number of table lookups, but does increase -the parallelism. With the classic Sarwate algorithm, each table lookup -must be completed before the index of the next can be computed. - -A "slicing by 2" technique would shift the remainder 16 bits at a time, -producing a 48-bit intermediate remainder. Rather than doing a single -lookup in a 65536-entry table, the two high bytes are looked up in -two different 256-entry tables. Each contains the remainder required -to cancel out the corresponding byte. The tables are different because the -polynomials to cancel are different. One has non-zero coefficients from -x^32 to x^39, while the other goes from x^40 to x^47. - -Since modern processors can handle many parallel memory operations, this -takes barely longer than a single table look-up and thus performs almost -twice as fast as the basic Sarwate algorithm. - -This can be extended to "slicing by 4" using 4 256-entry tables. -Each step, 32 bits of data is fetched, XORed with the CRC, and the result -broken into bytes and looked up in the tables. Because the 32-bit shift -leaves the low-order bits of the intermediate remainder zero, the -final CRC is simply the XOR of the 4 table look-ups. - -But this still enforces sequential execution: a second group of table -look-ups cannot begin until the previous groups 4 table look-ups have all -been completed. Thus, the processor's load/store unit is sometimes idle. - -To make maximum use of the processor, "slicing by 8" performs 8 look-ups -in parallel. Each step, the 32-bit CRC is shifted 64 bits and XORed -with 64 bits of input data. What is important to note is that 4 of -those 8 bytes are simply copies of the input data; they do not depend -on the previous CRC at all. Thus, those 4 table look-ups may commence -immediately, without waiting for the previous loop iteration. - -By always having 4 loads in flight, a modern superscalar processor can -be kept busy and make full use of its L1 cache. - -Two more details about CRC implementation in the real world: - -Normally, appending zero bits to a message which is already a multiple -of a polynomial produces a larger multiple of that polynomial. Thus, -a basic CRC will not detect appended zero bits (or bytes). To enable -a CRC to detect this condition, it's common to invert the CRC before -appending it. This makes the remainder of the message+crc come out not -as zero, but some fixed non-zero value. (The CRC of the inversion -pattern, 0xffffffff.) - -The same problem applies to zero bits prepended to the message, and a -similar solution is used. Instead of starting the CRC computation with -a remainder of 0, an initial remainder of all ones is used. As long as -you start the same way on decoding, it doesn't make a difference. diff --git a/Documentation/index.rst b/Documentation/index.rst index 71eca3171574..3b491af0122d 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -182,6 +182,19 @@ subprojects. filesystems/ext4/index +Other documentation +------------------- + +There are several unsorted documents that don't seem to fit on other parts +of the documentation body, or may require some adjustments and/or conversion +to ReStructured Text format, or are simply too old. + +.. toctree:: + :maxdepth: 2 + + staging/index + + Translations ------------ diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt deleted file mode 100644 index 8baab8832c5b..000000000000 --- a/Documentation/kprobes.txt +++ /dev/null @@ -1,801 +0,0 @@ -======================= -Kernel Probes (Kprobes) -======================= - -:Author: Jim Keniston -:Author: Prasanna S Panchamukhi -:Author: Masami Hiramatsu - -.. CONTENTS - - 1. Concepts: Kprobes, and Return Probes - 2. Architectures Supported - 3. Configuring Kprobes - 4. API Reference - 5. Kprobes Features and Limitations - 6. Probe Overhead - 7. TODO - 8. Kprobes Example - 9. Kretprobes Example - 10. Deprecated Features - Appendix A: The kprobes debugfs interface - Appendix B: The kprobes sysctl interface - -Concepts: Kprobes and Return Probes -========================================= - -Kprobes enables you to dynamically break into any kernel routine and -collect debugging and performance information non-disruptively. You -can trap at almost any kernel code address [1]_, specifying a handler -routine to be invoked when the breakpoint is hit. - -.. [1] some parts of the kernel code can not be trapped, see - :ref:`kprobes_blacklist`) - -There are currently two types of probes: kprobes, and kretprobes -(also called return probes). A kprobe can be inserted on virtually -any instruction in the kernel. A return probe fires when a specified -function returns. - -In the typical case, Kprobes-based instrumentation is packaged as -a kernel module. The module's init function installs ("registers") -one or more probes, and the exit function unregisters them. A -registration function such as register_kprobe() specifies where -the probe is to be inserted and what handler is to be called when -the probe is hit. - -There are also ``register_/unregister_*probes()`` functions for batch -registration/unregistration of a group of ``*probes``. These functions -can speed up unregistration process when you have to unregister -a lot of probes at once. - -The next four subsections explain how the different types of -probes work and how jump optimization works. They explain certain -things that you'll need to know in order to make the best use of -Kprobes -- e.g., the difference between a pre_handler and -a post_handler, and how to use the maxactive and nmissed fields of -a kretprobe. But if you're in a hurry to start using Kprobes, you -can skip ahead to :ref:`kprobes_archs_supported`. - -How Does a Kprobe Work? ------------------------ - -When a kprobe is registered, Kprobes makes a copy of the probed -instruction and replaces the first byte(s) of the probed instruction -with a breakpoint instruction (e.g., int3 on i386 and x86_64). - -When a CPU hits the breakpoint instruction, a trap occurs, the CPU's -registers are saved, and control passes to Kprobes via the -notifier_call_chain mechanism. Kprobes executes the "pre_handler" -associated with the kprobe, passing the handler the addresses of the -kprobe struct and the saved registers. - -Next, Kprobes single-steps its copy of the probed instruction. -(It would be simpler to single-step the actual instruction in place, -but then Kprobes would have to temporarily remove the breakpoint -instruction. This would open a small time window when another CPU -could sail right past the probepoint.) - -After the instruction is single-stepped, Kprobes executes the -"post_handler," if any, that is associated with the kprobe. -Execution then continues with the instruction following the probepoint. - -Changing Execution Path ------------------------ - -Since kprobes can probe into a running kernel code, it can change the -register set, including instruction pointer. This operation requires -maximum care, such as keeping the stack frame, recovering the execution -path etc. Since it operates on a running kernel and needs deep knowledge -of computer architecture and concurrent computing, you can easily shoot -your foot. - -If you change the instruction pointer (and set up other related -registers) in pre_handler, you must return !0 so that kprobes stops -single stepping and just returns to the given address. -This also means post_handler should not be called anymore. - -Note that this operation may be harder on some architectures which use -TOC (Table of Contents) for function call, since you have to setup a new -TOC for your function in your module, and recover the old one after -returning from it. - -Return Probes -------------- - -How Does a Return Probe Work? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When you call register_kretprobe(), Kprobes establishes a kprobe at -the entry to the function. When the probed function is called and this -probe is hit, Kprobes saves a copy of the return address, and replaces -the return address with the address of a "trampoline." The trampoline -is an arbitrary piece of code -- typically just a nop instruction. -At boot time, Kprobes registers a kprobe at the trampoline. - -When the probed function executes its return instruction, control -passes to the trampoline and that probe is hit. Kprobes' trampoline -handler calls the user-specified return handler associated with the -kretprobe, then sets the saved instruction pointer to the saved return -address, and that's where execution resumes upon return from the trap. - -While the probed function is executing, its return address is -stored in an object of type kretprobe_instance. Before calling -register_kretprobe(), the user sets the maxactive field of the -kretprobe struct to specify how many instances of the specified -function can be probed simultaneously. register_kretprobe() -pre-allocates the indicated number of kretprobe_instance objects. - -For example, if the function is non-recursive and is called with a -spinlock held, maxactive = 1 should be enough. If the function is -non-recursive and can never relinquish the CPU (e.g., via a semaphore -or preemption), NR_CPUS should be enough. If maxactive <= 0, it is -set to a default value. If CONFIG_PREEMPT is enabled, the default -is max(10, 2*NR_CPUS). Otherwise, the default is NR_CPUS. - -It's not a disaster if you set maxactive too low; you'll just miss -some probes. In the kretprobe struct, the nmissed field is set to -zero when the return probe is registered, and is incremented every -time the probed function is entered but there is no kretprobe_instance -object available for establishing the return probe. - -Kretprobe entry-handler -^^^^^^^^^^^^^^^^^^^^^^^ - -Kretprobes also provides an optional user-specified handler which runs -on function entry. This handler is specified by setting the entry_handler -field of the kretprobe struct. Whenever the kprobe placed by kretprobe at the -function entry is hit, the user-defined entry_handler, if any, is invoked. -If the entry_handler returns 0 (success) then a corresponding return handler -is guaranteed to be called upon function return. If the entry_handler -returns a non-zero error then Kprobes leaves the return address as is, and -the kretprobe has no further effect for that particular function instance. - -Multiple entry and return handler invocations are matched using the unique -kretprobe_instance object associated with them. Additionally, a user -may also specify per return-instance private data to be part of each -kretprobe_instance object. This is especially useful when sharing private -data between corresponding user entry and return handlers. The size of each -private data object can be specified at kretprobe registration time by -setting the data_size field of the kretprobe struct. This data can be -accessed through the data field of each kretprobe_instance object. - -In case probed function is entered but there is no kretprobe_instance -object available, then in addition to incrementing the nmissed count, -the user entry_handler invocation is also skipped. - -.. _kprobes_jump_optimization: - -How Does Jump Optimization Work? --------------------------------- - -If your kernel is built with CONFIG_OPTPROBES=y (currently this flag -is automatically set 'y' on x86/x86-64, non-preemptive kernel) and -the "debug.kprobes_optimization" kernel parameter is set to 1 (see -sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump -instruction instead of a breakpoint instruction at each probepoint. - -Init a Kprobe -^^^^^^^^^^^^^ - -When a probe is registered, before attempting this optimization, -Kprobes inserts an ordinary, breakpoint-based kprobe at the specified -address. So, even if it's not possible to optimize this particular -probepoint, there'll be a probe there. - -Safety Check -^^^^^^^^^^^^ - -Before optimizing a probe, Kprobes performs the following safety checks: - -- Kprobes verifies that the region that will be replaced by the jump - instruction (the "optimized region") lies entirely within one function. - (A jump instruction is multiple bytes, and so may overlay multiple - instructions.) - -- Kprobes analyzes the entire function and verifies that there is no - jump into the optimized region. Specifically: - - - the function contains no indirect jump; - - the function contains no instruction that causes an exception (since - the fixup code triggered by the exception could jump back into the - optimized region -- Kprobes checks the exception tables to verify this); - - there is no near jump to the optimized region (other than to the first - byte). - -- For each instruction in the optimized region, Kprobes verifies that - the instruction can be executed out of line. - -Preparing Detour Buffer -^^^^^^^^^^^^^^^^^^^^^^^ - -Next, Kprobes prepares a "detour" buffer, which contains the following -instruction sequence: - -- code to push the CPU's registers (emulating a breakpoint trap) -- a call to the trampoline code which calls user's probe handlers. -- code to restore registers -- the instructions from the optimized region -- a jump back to the original execution path. - -Pre-optimization -^^^^^^^^^^^^^^^^ - -After preparing the detour buffer, Kprobes verifies that none of the -following situations exist: - -- The probe has a post_handler. -- Other instructions in the optimized region are probed. -- The probe is disabled. - -In any of the above cases, Kprobes won't start optimizing the probe. -Since these are temporary situations, Kprobes tries to start -optimizing it again if the situation is changed. - -If the kprobe can be optimized, Kprobes enqueues the kprobe to an -optimizing list, and kicks the kprobe-optimizer workqueue to optimize -it. If the to-be-optimized probepoint is hit before being optimized, -Kprobes returns control to the original instruction path by setting -the CPU's instruction pointer to the copied code in the detour buffer --- thus at least avoiding the single-step. - -Optimization -^^^^^^^^^^^^ - -The Kprobe-optimizer doesn't insert the jump instruction immediately; -rather, it calls synchronize_rcu() for safety first, because it's -possible for a CPU to be interrupted in the middle of executing the -optimized region [3]_. As you know, synchronize_rcu() can ensure -that all interruptions that were active when synchronize_rcu() -was called are done, but only if CONFIG_PREEMPT=n. So, this version -of kprobe optimization supports only kernels with CONFIG_PREEMPT=n [4]_. - -After that, the Kprobe-optimizer calls stop_machine() to replace -the optimized region with a jump instruction to the detour buffer, -using text_poke_smp(). - -Unoptimization -^^^^^^^^^^^^^^ - -When an optimized kprobe is unregistered, disabled, or blocked by -another kprobe, it will be unoptimized. If this happens before -the optimization is complete, the kprobe is just dequeued from the -optimized list. If the optimization has been done, the jump is -replaced with the original code (except for an int3 breakpoint in -the first byte) by using text_poke_smp(). - -.. [3] Please imagine that the 2nd instruction is interrupted and then - the optimizer replaces the 2nd instruction with the jump *address* - while the interrupt handler is running. When the interrupt - returns to original address, there is no valid instruction, - and it causes an unexpected result. - -.. [4] This optimization-safety checking may be replaced with the - stop-machine method that ksplice uses for supporting a CONFIG_PREEMPT=y - kernel. - -NOTE for geeks: -The jump optimization changes the kprobe's pre_handler behavior. -Without optimization, the pre_handler can change the kernel's execution -path by changing regs->ip and returning 1. However, when the probe -is optimized, that modification is ignored. Thus, if you want to -tweak the kernel's execution path, you need to suppress optimization, -using one of the following techniques: - -- Specify an empty function for the kprobe's post_handler. - -or - -- Execute 'sysctl -w debug.kprobes_optimization=n' - -.. _kprobes_blacklist: - -Blacklist ---------- - -Kprobes can probe most of the kernel except itself. This means -that there are some functions where kprobes cannot probe. Probing -(trapping) such functions can cause a recursive trap (e.g. double -fault) or the nested probe handler may never be called. -Kprobes manages such functions as a blacklist. -If you want to add a function into the blacklist, you just need -to (1) include linux/kprobes.h and (2) use NOKPROBE_SYMBOL() macro -to specify a blacklisted function. -Kprobes checks the given probe address against the blacklist and -rejects registering it, if the given address is in the blacklist. - -.. _kprobes_archs_supported: - -Architectures Supported -======================= - -Kprobes and return probes are implemented on the following -architectures: - -- i386 (Supports jump optimization) -- x86_64 (AMD-64, EM64T) (Supports jump optimization) -- ppc64 -- ia64 (Does not support probes on instruction slot1.) -- sparc64 (Return probes not yet implemented.) -- arm -- ppc -- mips -- s390 -- parisc - -Configuring Kprobes -=================== - -When configuring the kernel using make menuconfig/xconfig/oldconfig, -ensure that CONFIG_KPROBES is set to "y". Under "General setup", look -for "Kprobes". - -So that you can load and unload Kprobes-based instrumentation modules, -make sure "Loadable module support" (CONFIG_MODULES) and "Module -unloading" (CONFIG_MODULE_UNLOAD) are set to "y". - -Also make sure that CONFIG_KALLSYMS and perhaps even CONFIG_KALLSYMS_ALL -are set to "y", since kallsyms_lookup_name() is used by the in-kernel -kprobe address resolution code. - -If you need to insert a probe in the middle of a function, you may find -it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO), -so you can use "objdump -d -l vmlinux" to see the source-to-object -code mapping. - -API Reference -============= - -The Kprobes API includes a "register" function and an "unregister" -function for each type of probe. The API also includes "register_*probes" -and "unregister_*probes" functions for (un)registering arrays of probes. -Here are terse, mini-man-page specifications for these functions and -the associated probe handlers that you'll write. See the files in the -samples/kprobes/ sub-directory for examples. - -register_kprobe ---------------- - -:: - - #include - int register_kprobe(struct kprobe *kp); - -Sets a breakpoint at the address kp->addr. When the breakpoint is -hit, Kprobes calls kp->pre_handler. After the probed instruction -is single-stepped, Kprobe calls kp->post_handler. If a fault -occurs during execution of kp->pre_handler or kp->post_handler, -or during single-stepping of the probed instruction, Kprobes calls -kp->fault_handler. Any or all handlers can be NULL. If kp->flags -is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled, -so, its handlers aren't hit until calling enable_kprobe(kp). - -.. note:: - - 1. With the introduction of the "symbol_name" field to struct kprobe, - the probepoint address resolution will now be taken care of by the kernel. - The following will now work:: - - kp.symbol_name = "symbol_name"; - - (64-bit powerpc intricacies such as function descriptors are handled - transparently) - - 2. Use the "offset" field of struct kprobe if the offset into the symbol - to install a probepoint is known. This field is used to calculate the - probepoint. - - 3. Specify either the kprobe "symbol_name" OR the "addr". If both are - specified, kprobe registration will fail with -EINVAL. - - 4. With CISC architectures (such as i386 and x86_64), the kprobes code - does not validate if the kprobe.addr is at an instruction boundary. - Use "offset" with caution. - -register_kprobe() returns 0 on success, or a negative errno otherwise. - -User's pre-handler (kp->pre_handler):: - - #include - #include - int pre_handler(struct kprobe *p, struct pt_regs *regs); - -Called with p pointing to the kprobe associated with the breakpoint, -and regs pointing to the struct containing the registers saved when -the breakpoint was hit. Return 0 here unless you're a Kprobes geek. - -User's post-handler (kp->post_handler):: - - #include - #include - void post_handler(struct kprobe *p, struct pt_regs *regs, - unsigned long flags); - -p and regs are as described for the pre_handler. flags always seems -to be zero. - -User's fault-handler (kp->fault_handler):: - - #include - #include - int fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr); - -p and regs are as described for the pre_handler. trapnr is the -architecture-specific trap number associated with the fault (e.g., -on i386, 13 for a general protection fault or 14 for a page fault). -Returns 1 if it successfully handled the exception. - -register_kretprobe ------------------- - -:: - - #include - int register_kretprobe(struct kretprobe *rp); - -Establishes a return probe for the function whose address is -rp->kp.addr. When that function returns, Kprobes calls rp->handler. -You must set rp->maxactive appropriately before you call -register_kretprobe(); see "How Does a Return Probe Work?" for details. - -register_kretprobe() returns 0 on success, or a negative errno -otherwise. - -User's return-probe handler (rp->handler):: - - #include - #include - int kretprobe_handler(struct kretprobe_instance *ri, - struct pt_regs *regs); - -regs is as described for kprobe.pre_handler. ri points to the -kretprobe_instance object, of which the following fields may be -of interest: - -- ret_addr: the return address -- rp: points to the corresponding kretprobe object -- task: points to the corresponding task struct -- data: points to per return-instance private data; see "Kretprobe - entry-handler" for details. - -The regs_return_value(regs) macro provides a simple abstraction to -extract the return value from the appropriate register as defined by -the architecture's ABI. - -The handler's return value is currently ignored. - -unregister_*probe ------------------- - -:: - - #include - void unregister_kprobe(struct kprobe *kp); - void unregister_kretprobe(struct kretprobe *rp); - -Removes the specified probe. The unregister function can be called -at any time after the probe has been registered. - -.. note:: - - If the functions find an incorrect probe (ex. an unregistered probe), - they clear the addr field of the probe. - -register_*probes ----------------- - -:: - - #include - int register_kprobes(struct kprobe **kps, int num); - int register_kretprobes(struct kretprobe **rps, int num); - -Registers each of the num probes in the specified array. If any -error occurs during registration, all probes in the array, up to -the bad probe, are safely unregistered before the register_*probes -function returns. - -- kps/rps: an array of pointers to ``*probe`` data structures -- num: the number of the array entries. - -.. note:: - - You have to allocate(or define) an array of pointers and set all - of the array entries before using these functions. - -unregister_*probes ------------------- - -:: - - #include - void unregister_kprobes(struct kprobe **kps, int num); - void unregister_kretprobes(struct kretprobe **rps, int num); - -Removes each of the num probes in the specified array at once. - -.. note:: - - If the functions find some incorrect probes (ex. unregistered - probes) in the specified array, they clear the addr field of those - incorrect probes. However, other probes in the array are - unregistered correctly. - -disable_*probe --------------- - -:: - - #include - int disable_kprobe(struct kprobe *kp); - int disable_kretprobe(struct kretprobe *rp); - -Temporarily disables the specified ``*probe``. You can enable it again by using -enable_*probe(). You must specify the probe which has been registered. - -enable_*probe -------------- - -:: - - #include - int enable_kprobe(struct kprobe *kp); - int enable_kretprobe(struct kretprobe *rp); - -Enables ``*probe`` which has been disabled by disable_*probe(). You must specify -the probe which has been registered. - -Kprobes Features and Limitations -================================ - -Kprobes allows multiple probes at the same address. Also, -a probepoint for which there is a post_handler cannot be optimized. -So if you install a kprobe with a post_handler, at an optimized -probepoint, the probepoint will be unoptimized automatically. - -In general, you can install a probe anywhere in the kernel. -In particular, you can probe interrupt handlers. Known exceptions -are discussed in this section. - -The register_*probe functions will return -EINVAL if you attempt -to install a probe in the code that implements Kprobes (mostly -kernel/kprobes.c and ``arch/*/kernel/kprobes.c``, but also functions such -as do_page_fault and notifier_call_chain). - -If you install a probe in an inline-able function, Kprobes makes -no attempt to chase down all inline instances of the function and -install probes there. gcc may inline a function without being asked, -so keep this in mind if you're not seeing the probe hits you expect. - -A probe handler can modify the environment of the probed function --- e.g., by modifying kernel data structures, or by modifying the -contents of the pt_regs struct (which are restored to the registers -upon return from the breakpoint). So Kprobes can be used, for example, -to install a bug fix or to inject faults for testing. Kprobes, of -course, has no way to distinguish the deliberately injected faults -from the accidental ones. Don't drink and probe. - -Kprobes makes no attempt to prevent probe handlers from stepping on -each other -- e.g., probing printk() and then calling printk() from a -probe handler. If a probe handler hits a probe, that second probe's -handlers won't be run in that instance, and the kprobe.nmissed member -of the second probe will be incremented. - -As of Linux v2.6.15-rc1, multiple handlers (or multiple instances of -the same handler) may run concurrently on different CPUs. - -Kprobes does not use mutexes or allocate memory except during -registration and unregistration. - -Probe handlers are run with preemption disabled or interrupt disabled, -which depends on the architecture and optimization state. (e.g., -kretprobe handlers and optimized kprobe handlers run without interrupt -disabled on x86/x86-64). In any case, your handler should not yield -the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O). - -Since a return probe is implemented by replacing the return -address with the trampoline's address, stack backtraces and calls -to __builtin_return_address() will typically yield the trampoline's -address instead of the real return address for kretprobed functions. -(As far as we can tell, __builtin_return_address() is used only -for instrumentation and error reporting.) - -If the number of times a function is called does not match the number -of times it returns, registering a return probe on that function may -produce undesirable results. In such a case, a line: -kretprobe BUG!: Processing kretprobe d000000000041aa8 @ c00000000004f48c -gets printed. With this information, one will be able to correlate the -exact instance of the kretprobe that caused the problem. We have the -do_exit() case covered. do_execve() and do_fork() are not an issue. -We're unaware of other specific cases where this could be a problem. - -If, upon entry to or exit from a function, the CPU is running on -a stack other than that of the current task, registering a return -probe on that function may produce undesirable results. For this -reason, Kprobes doesn't support return probes (or kprobes) -on the x86_64 version of __switch_to(); the registration functions -return -EINVAL. - -On x86/x86-64, since the Jump Optimization of Kprobes modifies -instructions widely, there are some limitations to optimization. To -explain it, we introduce some terminology. Imagine a 3-instruction -sequence consisting of a two 2-byte instructions and one 3-byte -instruction. - -:: - - IA - | - [-2][-1][0][1][2][3][4][5][6][7] - [ins1][ins2][ ins3 ] - [<- DCR ->] - [<- JTPR ->] - - ins1: 1st Instruction - ins2: 2nd Instruction - ins3: 3rd Instruction - IA: Insertion Address - JTPR: Jump Target Prohibition Region - DCR: Detoured Code Region - -The instructions in DCR are copied to the out-of-line buffer -of the kprobe, because the bytes in DCR are replaced by -a 5-byte jump instruction. So there are several limitations. - -a) The instructions in DCR must be relocatable. -b) The instructions in DCR must not include a call instruction. -c) JTPR must not be targeted by any jump or call instruction. -d) DCR must not straddle the border between functions. - -Anyway, these limitations are checked by the in-kernel instruction -decoder, so you don't need to worry about that. - -Probe Overhead -============== - -On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0 -microseconds to process. Specifically, a benchmark that hits the same -probepoint repeatedly, firing a simple handler each time, reports 1-2 -million hits per second, depending on the architecture. A return-probe -hit typically takes 50-75% longer than a kprobe hit. -When you have a return probe set on a function, adding a kprobe at -the entry to that function adds essentially no overhead. - -Here are sample overhead figures (in usec) for different architectures:: - - k = kprobe; r = return probe; kr = kprobe + return probe - on same function - - i386: Intel Pentium M, 1495 MHz, 2957.31 bogomips - k = 0.57 usec; r = 0.92; kr = 0.99 - - x86_64: AMD Opteron 246, 1994 MHz, 3971.48 bogomips - k = 0.49 usec; r = 0.80; kr = 0.82 - - ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU) - k = 0.77 usec; r = 1.26; kr = 1.45 - -Optimized Probe Overhead ------------------------- - -Typically, an optimized kprobe hit takes 0.07 to 0.1 microseconds to -process. Here are sample overhead figures (in usec) for x86 architectures:: - - k = unoptimized kprobe, b = boosted (single-step skipped), o = optimized kprobe, - r = unoptimized kretprobe, rb = boosted kretprobe, ro = optimized kretprobe. - - i386: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips - k = 0.80 usec; b = 0.33; o = 0.05; r = 1.10; rb = 0.61; ro = 0.33 - - x86-64: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips - k = 0.99 usec; b = 0.43; o = 0.06; r = 1.24; rb = 0.68; ro = 0.30 - -TODO -==== - -a. SystemTap (http://sourceware.org/systemtap): Provides a simplified - programming interface for probe-based instrumentation. Try it out. -b. Kernel return probes for sparc64. -c. Support for other architectures. -d. User-space probes. -e. Watchpoint probes (which fire on data references). - -Kprobes Example -=============== - -See samples/kprobes/kprobe_example.c - -Kretprobes Example -================== - -See samples/kprobes/kretprobe_example.c - -For additional information on Kprobes, refer to the following URLs: - -- http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe -- http://www.redhat.com/magazine/005mar05/features/kprobes/ -- http://www-users.cs.umn.edu/~boutcher/kprobes/ -- http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115) - -Deprecated Features -=================== - -Jprobes is now a deprecated feature. People who are depending on it should -migrate to other tracing features or use older kernels. Please consider to -migrate your tool to one of the following options: - -- Use trace-event to trace target function with arguments. - - trace-event is a low-overhead (and almost no visible overhead if it - is off) statically defined event interface. You can define new events - and trace it via ftrace or any other tracing tools. - - See the following urls: - - - https://lwn.net/Articles/379903/ - - https://lwn.net/Articles/381064/ - - https://lwn.net/Articles/383362/ - -- Use ftrace dynamic events (kprobe event) with perf-probe. - - If you build your kernel with debug info (CONFIG_DEBUG_INFO=y), you can - find which register/stack is assigned to which local variable or arguments - by using perf-probe and set up new event to trace it. - - See following documents: - - - Documentation/trace/kprobetrace.rst - - Documentation/trace/events.rst - - tools/perf/Documentation/perf-probe.txt - - -The kprobes debugfs interface -============================= - - -With recent kernels (> 2.6.20) the list of registered kprobes is visible -under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at //sys/kernel/debug). - -/sys/kernel/debug/kprobes/list: Lists all registered probes on the system:: - - c015d71a k vfs_read+0x0 - c03dedc5 r tcp_v4_rcv+0x0 - -The first column provides the kernel address where the probe is inserted. -The second column identifies the type of probe (k - kprobe and r - kretprobe) -while the third column specifies the symbol+offset of the probe. -If the probed function belongs to a module, the module name is also -specified. Following columns show probe status. If the probe is on -a virtual address that is no longer valid (module init sections, module -virtual addresses that correspond to modules that've been unloaded), -such probes are marked with [GONE]. If the probe is temporarily disabled, -such probes are marked with [DISABLED]. If the probe is optimized, it is -marked with [OPTIMIZED]. If the probe is ftrace-based, it is marked with -[FTRACE]. - -/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. - -Provides a knob to globally and forcibly turn registered kprobes ON or OFF. -By default, all kprobes are enabled. By echoing "0" to this file, all -registered probes will be disarmed, till such time a "1" is echoed to this -file. Note that this knob just disarms and arms all kprobes and doesn't -change each probe's disabling state. This means that disabled kprobes (marked -[DISABLED]) will be not enabled if you turn ON all kprobes by this knob. - - -The kprobes sysctl interface -============================ - -/proc/sys/debug/kprobes-optimization: Turn kprobes optimization ON/OFF. - -When CONFIG_OPTPROBES=y, this sysctl interface appears and it provides -a knob to globally and forcibly turn jump optimization (see section -:ref:`kprobes_jump_optimization`) ON or OFF. By default, jump optimization -is allowed (ON). If you echo "0" to this file or set -"debug.kprobes_optimization" to 0 via sysctl, all optimized probes will be -unoptimized, and any new probes registered after that will not be optimized. - -Note that this knob *changes* the optimized state. This means that optimized -probes (marked [OPTIMIZED]) will be unoptimized ([OPTIMIZED] tag will be -removed). If the knob is turned on, they will be optimized again. - diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt deleted file mode 100644 index f65b51523014..000000000000 --- a/Documentation/lzo.txt +++ /dev/null @@ -1,202 +0,0 @@ -=========================================================== -LZO stream format as understood by Linux's LZO decompressor -=========================================================== - -Introduction -============ - - This is not a specification. No specification seems to be publicly available - for the LZO stream format. This document describes what input format the LZO - decompressor as implemented in the Linux kernel understands. The file subject - of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on - the compressor nor on any other implementations though it seems likely that - the format matches the standard one. The purpose of this document is to - better understand what the code does in order to propose more efficient fixes - for future bug reports. - -Description -=========== - - The stream is composed of a series of instructions, operands, and data. The - instructions consist in a few bits representing an opcode, and bits forming - the operands for the instruction, whose size and position depend on the - opcode and on the number of literals copied by previous instruction. The - operands are used to indicate: - - - a distance when copying data from the dictionary (past output buffer) - - a length (number of bytes to copy from dictionary) - - the number of literals to copy, which is retained in variable "state" - as a piece of information for next instructions. - - Optionally depending on the opcode and operands, extra data may follow. These - extra data can be a complement for the operand (eg: a length or a distance - encoded on larger values), or a literal to be copied to the output buffer. - - The first byte of the block follows a different encoding from other bytes, it - seems to be optimized for literal use only, since there is no dictionary yet - prior to that byte. - - Lengths are always encoded on a variable size starting with a small number - of bits in the operand. If the number of bits isn't enough to represent the - length, up to 255 may be added in increments by consuming more bytes with a - rate of at most 255 per extra byte (thus the compression ratio cannot exceed - around 255:1). The variable length encoding using #bits is always the same:: - - length = byte & ((1 << #bits) - 1) - if (!length) { - length = ((1 << #bits) - 1) - length += 255*(number of zero bytes) - length += first-non-zero-byte - } - length += constant (generally 2 or 3) - - For references to the dictionary, distances are relative to the output - pointer. Distances are encoded using very few bits belonging to certain - ranges, resulting in multiple copy instructions using different encodings. - Certain encodings involve one extra byte, others involve two extra bytes - forming a little-endian 16-bit quantity (marked LE16 below). - - After any instruction except the large literal copy, 0, 1, 2 or 3 literals - are copied before starting the next instruction. The number of literals that - were copied may change the meaning and behaviour of the next instruction. In - practice, only one instruction needs to know whether 0, less than 4, or more - literals were copied. This is the information stored in the variable - in this implementation. This number of immediate literals to be copied is - generally encoded in the last two bits of the instruction but may also be - taken from the last two bits of an extra operand (eg: distance). - - End of stream is declared when a block copy of distance 0 is seen. Only one - instruction may encode this distance (0001HLLL), it takes one LE16 operand - for the distance, thus requiring 3 bytes. - - .. important:: - - In the code some length checks are missing because certain instructions - are called under the assumption that a certain number of bytes follow - because it has already been guaranteed before parsing the instructions. - They just have to "refill" this credit if they consume extra bytes. This - is an implementation design choice independent on the algorithm or - encoding. - -Versions - -0: Original version -1: LZO-RLE - -Version 1 of LZO implements an extension to encode runs of zeros using run -length encoding. This improves speed for data with many zeros, which is a -common case for zram. This modifies the bitstream in a backwards compatible way -(v1 can correctly decompress v0 compressed data, but v0 cannot read v1 data). - -For maximum compatibility, both versions are available under different names -(lzo and lzo-rle). Differences in the encoding are noted in this document with -e.g.: version 1 only. - -Byte sequences -============== - - First byte encoding:: - - 0..16 : follow regular instruction encoding, see below. It is worth - noting that code 16 will represent a block copy from the - dictionary which is empty, and that it will always be - invalid at this place. - - 17 : bitstream version. If the first byte is 17, and compressed - stream length is at least 5 bytes (length of shortest possible - versioned bitstream), the next byte gives the bitstream version - (version 1 only). - Otherwise, the bitstream version is 0. - - 18..21 : copy 0..3 literals - state = (byte - 17) = 0..3 [ copy literals ] - skip byte - - 22..255 : copy literal string - length = (byte - 17) = 4..238 - state = 4 [ don't copy extra literals ] - skip byte - - Instruction encoding:: - - 0 0 0 0 X X X X (0..15) - Depends on the number of literals copied by the last instruction. - If last instruction did not copy any literal (state == 0), this - encoding will be a copy of 4 or more literal, and must be interpreted - like this : - - 0 0 0 0 L L L L (0..15) : copy long literal string - length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte) - state = 4 (no extra literals are copied) - - If last instruction used to copy between 1 to 3 literals (encoded in - the instruction's opcode or distance), the instruction is a copy of a - 2-byte block from the dictionary within a 1kB distance. It is worth - noting that this instruction provides little savings since it uses 2 - bytes to encode a copy of 2 other bytes but it encodes the number of - following literals for free. It must be interpreted like this : - - 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance - length = 2 - state = S (copy S literals after this block) - Always followed by exactly one byte : H H H H H H H H - distance = (H << 2) + D + 1 - - If last instruction used to copy 4 or more literals (as detected by - state == 4), the instruction becomes a copy of a 3-byte block from the - dictionary from a 2..3kB distance, and must be interpreted like this : - - 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance - length = 3 - state = S (copy S literals after this block) - Always followed by exactly one byte : H H H H H H H H - distance = (H << 2) + D + 2049 - - 0 0 0 1 H L L L (16..31) - Copy of a block within 16..48kB distance (preferably less than 10B) - length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte) - Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S - distance = 16384 + (H << 14) + D - state = S (copy S literals after this block) - End of stream is reached if distance == 16384 - In version 1 only, to prevent ambiguity with the RLE case when - ((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the - compressor must not emit block copies where distance and length - meet these conditions. - - In version 1 only, this instruction is also used to encode a run of - zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. - In this case, it is followed by a fourth byte, X. - run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4 - - 0 0 1 L L L L L (32..63) - Copy of small block within 16kB distance (preferably less than 34B) - length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte) - Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S - distance = D + 1 - state = S (copy S literals after this block) - - 0 1 L D D D S S (64..127) - Copy 3-4 bytes from block within 2kB distance - state = S (copy S literals after this block) - length = 3 + L - Always followed by exactly one byte : H H H H H H H H - distance = (H << 3) + D + 1 - - 1 L L D D D S S (128..255) - Copy 5-8 bytes from block within 2kB distance - state = S (copy S literals after this block) - length = 5 + L - Always followed by exactly one byte : H H H H H H H H - distance = (H << 3) + D + 1 - -Authors -======= - - This document was written by Willy Tarreau on 2014/07/19 during an - analysis of the decompression code available in Linux 3.16-rc5, and updated - by Dave Rodgman on 2018/10/30 to introduce run-length - encoding. The code is tricky, it is possible that this document contains - mistakes or that a few corner cases were overlooked. In any case, please - report any doubt, fix, or proposed updates to the author(s) so that the - document can be updated. diff --git a/Documentation/remoteproc.txt b/Documentation/remoteproc.txt deleted file mode 100644 index 2be1147256e0..000000000000 --- a/Documentation/remoteproc.txt +++ /dev/null @@ -1,359 +0,0 @@ -========================== -Remote Processor Framework -========================== - -Introduction -============ - -Modern SoCs typically have heterogeneous remote processor devices in asymmetric -multiprocessing (AMP) configurations, which may be running different instances -of operating system, whether it's Linux or any other flavor of real-time OS. - -OMAP4, for example, has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP. -In a typical configuration, the dual cortex-A9 is running Linux in a SMP -configuration, and each of the other three cores (two M3 cores and a DSP) -is running its own instance of RTOS in an AMP configuration. - -The remoteproc framework allows different platforms/architectures to -control (power on, load firmware, power off) those remote processors while -abstracting the hardware differences, so the entire driver doesn't need to be -duplicated. In addition, this framework also adds rpmsg virtio devices -for remote processors that supports this kind of communication. This way, -platform-specific remoteproc drivers only need to provide a few low-level -handlers, and then all rpmsg drivers will then just work -(for more information about the virtio-based rpmsg bus and its drivers, -please read Documentation/rpmsg.txt). -Registration of other types of virtio devices is now also possible. Firmwares -just need to publish what kind of virtio devices do they support, and then -remoteproc will add those devices. This makes it possible to reuse the -existing virtio drivers with remote processor backends at a minimal development -cost. - -User API -======== - -:: - - int rproc_boot(struct rproc *rproc) - -Boot a remote processor (i.e. load its firmware, power it on, ...). - -If the remote processor is already powered on, this function immediately -returns (successfully). - -Returns 0 on success, and an appropriate error value otherwise. -Note: to use this function you should already have a valid rproc -handle. There are several ways to achieve that cleanly (devres, pdata, -the way remoteproc_rpmsg.c does this, or, if this becomes prevalent, we -might also consider using dev_archdata for this). - -:: - - void rproc_shutdown(struct rproc *rproc) - -Power off a remote processor (previously booted with rproc_boot()). -In case @rproc is still being used by an additional user(s), then -this function will just decrement the power refcount and exit, -without really powering off the device. - -Every call to rproc_boot() must (eventually) be accompanied by a call -to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. - -.. note:: - - we're not decrementing the rproc's refcount, only the power refcount. - which means that the @rproc handle stays valid even after - rproc_shutdown() returns, and users can still use it with a subsequent - rproc_boot(), if needed. - -:: - - struct rproc *rproc_get_by_phandle(phandle phandle) - -Find an rproc handle using a device tree phandle. Returns the rproc -handle on success, and NULL on failure. This function increments -the remote processor's refcount, so always use rproc_put() to -decrement it back once rproc isn't needed anymore. - -Typical usage -============= - -:: - - #include - - /* in case we were given a valid 'rproc' handle */ - int dummy_rproc_example(struct rproc *my_rproc) - { - int ret; - - /* let's power on and boot our remote processor */ - ret = rproc_boot(my_rproc); - if (ret) { - /* - * something went wrong. handle it and leave. - */ - } - - /* - * our remote processor is now powered on... give it some work - */ - - /* let's shut it down now */ - rproc_shutdown(my_rproc); - } - -API for implementors -==================== - -:: - - struct rproc *rproc_alloc(struct device *dev, const char *name, - const struct rproc_ops *ops, - const char *firmware, int len) - -Allocate a new remote processor handle, but don't register -it yet. Required parameters are the underlying device, the -name of this remote processor, platform-specific ops handlers, -the name of the firmware to boot this rproc with, and the -length of private data needed by the allocating rproc driver (in bytes). - -This function should be used by rproc implementations during -initialization of the remote processor. - -After creating an rproc handle using this function, and when ready, -implementations should then call rproc_add() to complete -the registration of the remote processor. - -On success, the new rproc is returned, and on failure, NULL. - -.. note:: - - **never** directly deallocate @rproc, even if it was not registered - yet. Instead, when you need to unroll rproc_alloc(), use rproc_free(). - -:: - - void rproc_free(struct rproc *rproc) - -Free an rproc handle that was allocated by rproc_alloc. - -This function essentially unrolls rproc_alloc(), by decrementing the -rproc's refcount. It doesn't directly free rproc; that would happen -only if there are no other references to rproc and its refcount now -dropped to zero. - -:: - - int rproc_add(struct rproc *rproc) - -Register @rproc with the remoteproc framework, after it has been -allocated with rproc_alloc(). - -This is called by the platform-specific rproc implementation, whenever -a new remote processor device is probed. - -Returns 0 on success and an appropriate error code otherwise. -Note: this function initiates an asynchronous firmware loading -context, which will look for virtio devices supported by the rproc's -firmware. - -If found, those virtio devices will be created and added, so as a result -of registering this remote processor, additional virtio drivers might get -probed. - -:: - - int rproc_del(struct rproc *rproc) - -Unroll rproc_add(). - -This function should be called when the platform specific rproc -implementation decides to remove the rproc device. it should -_only_ be called if a previous invocation of rproc_add() -has completed successfully. - -After rproc_del() returns, @rproc is still valid, and its -last refcount should be decremented by calling rproc_free(). - -Returns 0 on success and -EINVAL if @rproc isn't valid. - -:: - - void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) - -Report a crash in a remoteproc - -This function must be called every time a crash is detected by the -platform specific rproc implementation. This should not be called from a -non-remoteproc driver. This function can be called from atomic/interrupt -context. - -Implementation callbacks -======================== - -These callbacks should be provided by platform-specific remoteproc -drivers:: - - /** - * struct rproc_ops - platform-specific device handlers - * @start: power on the device and boot it - * @stop: power off the device - * @kick: kick a virtqueue (virtqueue id given as a parameter) - */ - struct rproc_ops { - int (*start)(struct rproc *rproc); - int (*stop)(struct rproc *rproc); - void (*kick)(struct rproc *rproc, int vqid); - }; - -Every remoteproc implementation should at least provide the ->start and ->stop -handlers. If rpmsg/virtio functionality is also desired, then the ->kick handler -should be provided as well. - -The ->start() handler takes an rproc handle and should then power on the -device and boot it (use rproc->priv to access platform-specific private data). -The boot address, in case needed, can be found in rproc->bootaddr (remoteproc -core puts there the ELF entry point). -On success, 0 should be returned, and on failure, an appropriate error code. - -The ->stop() handler takes an rproc handle and powers the device down. -On success, 0 is returned, and on failure, an appropriate error code. - -The ->kick() handler takes an rproc handle, and an index of a virtqueue -where new message was placed in. Implementations should interrupt the remote -processor and let it know it has pending messages. Notifying remote processors -the exact virtqueue index to look in is optional: it is easy (and not -too expensive) to go through the existing virtqueues and look for new buffers -in the used rings. - -Binary Firmware Structure -========================= - -At this point remoteproc supports ELF32 and ELF64 firmware binaries. However, -it is quite expected that other platforms/devices which we'd want to -support with this framework will be based on different binary formats. - -When those use cases show up, we will have to decouple the binary format -from the framework core, so we can support several binary formats without -duplicating common code. - -When the firmware is parsed, its various segments are loaded to memory -according to the specified device address (might be a physical address -if the remote processor is accessing memory directly). - -In addition to the standard ELF segments, most remote processors would -also include a special section which we call "the resource table". - -The resource table contains system resources that the remote processor -requires before it should be powered on, such as allocation of physically -contiguous memory, or iommu mapping of certain on-chip peripherals. -Remotecore will only power up the device after all the resource table's -requirement are met. - -In addition to system resources, the resource table may also contain -resource entries that publish the existence of supported features -or configurations by the remote processor, such as trace buffers and -supported virtio devices (and their configurations). - -The resource table begins with this header:: - - /** - * struct resource_table - firmware resource table header - * @ver: version number - * @num: number of resource entries - * @reserved: reserved (must be zero) - * @offset: array of offsets pointing at the various resource entries - * - * The header of the resource table, as expressed by this structure, - * contains a version number (should we need to change this format in the - * future), the number of available resource entries, and their offsets - * in the table. - */ - struct resource_table { - u32 ver; - u32 num; - u32 reserved[2]; - u32 offset[0]; - } __packed; - -Immediately following this header are the resource entries themselves, -each of which begins with the following resource entry header:: - - /** - * struct fw_rsc_hdr - firmware resource entry header - * @type: resource type - * @data: resource data - * - * Every resource entry begins with a 'struct fw_rsc_hdr' header providing - * its @type. The content of the entry itself will immediately follow - * this header, and it should be parsed according to the resource type. - */ - struct fw_rsc_hdr { - u32 type; - u8 data[0]; - } __packed; - -Some resources entries are mere announcements, where the host is informed -of specific remoteproc configuration. Other entries require the host to -do something (e.g. allocate a system resource). Sometimes a negotiation -is expected, where the firmware requests a resource, and once allocated, -the host should provide back its details (e.g. address of an allocated -memory region). - -Here are the various resource types that are currently supported:: - - /** - * enum fw_resource_type - types of resource entries - * - * @RSC_CARVEOUT: request for allocation of a physically contiguous - * memory region. - * @RSC_DEVMEM: request to iommu_map a memory-based peripheral. - * @RSC_TRACE: announces the availability of a trace buffer into which - * the remote processor will be writing logs. - * @RSC_VDEV: declare support for a virtio device, and serve as its - * virtio header. - * @RSC_LAST: just keep this one at the end - * @RSC_VENDOR_START: start of the vendor specific resource types range - * @RSC_VENDOR_END: end of the vendor specific resource types range - * - * Please note that these values are used as indices to the rproc_handle_rsc - * lookup table, so please keep them sane. Moreover, @RSC_LAST is used to - * check the validity of an index before the lookup table is accessed, so - * please update it as needed. - */ - enum fw_resource_type { - RSC_CARVEOUT = 0, - RSC_DEVMEM = 1, - RSC_TRACE = 2, - RSC_VDEV = 3, - RSC_LAST = 4, - RSC_VENDOR_START = 128, - RSC_VENDOR_END = 512, - }; - -For more details regarding a specific resource type, please see its -dedicated structure in include/linux/remoteproc.h. - -We also expect that platform-specific resource entries will show up -at some point. When that happens, we could easily add a new RSC_PLATFORM -type, and hand those resources to the platform-specific rproc driver to handle. - -Virtio and remoteproc -===================== - -The firmware should provide remoteproc information about virtio devices -that it supports, and their configurations: a RSC_VDEV resource entry -should specify the virtio device id (as in virtio_ids.h), virtio features, -virtio config space, vrings information, etc. - -When a new remote processor is registered, the remoteproc framework -will look for its resource table and will register the virtio devices -it supports. A firmware may support any number of virtio devices, and -of any type (a single remote processor can also easily support several -rpmsg virtio devices this way, if desired). - -Of course, RSC_VDEV resource entries are only good enough for static -allocation of virtio devices. Dynamic allocations will also be made possible -using the rpmsg bus (similar to how we already do dynamic allocations of -rpmsg channels; read more about it in rpmsg.txt). diff --git a/Documentation/rpmsg.txt b/Documentation/rpmsg.txt deleted file mode 100644 index 24b7a9e1a5f9..000000000000 --- a/Documentation/rpmsg.txt +++ /dev/null @@ -1,341 +0,0 @@ -============================================ -Remote Processor Messaging (rpmsg) Framework -============================================ - -.. note:: - - This document describes the rpmsg bus and how to write rpmsg drivers. - To learn how to add rpmsg support for new platforms, check out remoteproc.txt - (also a resident of Documentation/). - -Introduction -============ - -Modern SoCs typically employ heterogeneous remote processor devices in -asymmetric multiprocessing (AMP) configurations, which may be running -different instances of operating system, whether it's Linux or any other -flavor of real-time OS. - -OMAP4, for example, has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP. -Typically, the dual cortex-A9 is running Linux in a SMP configuration, -and each of the other three cores (two M3 cores and a DSP) is running -its own instance of RTOS in an AMP configuration. - -Typically AMP remote processors employ dedicated DSP codecs and multimedia -hardware accelerators, and therefore are often used to offload CPU-intensive -multimedia tasks from the main application processor. - -These remote processors could also be used to control latency-sensitive -sensors, drive random hardware blocks, or just perform background tasks -while the main CPU is idling. - -Users of those remote processors can either be userland apps (e.g. multimedia -frameworks talking with remote OMX components) or kernel drivers (controlling -hardware accessible only by the remote processor, reserving kernel-controlled -resources on behalf of the remote processor, etc..). - -Rpmsg is a virtio-based messaging bus that allows kernel drivers to communicate -with remote processors available on the system. In turn, drivers could then -expose appropriate user space interfaces, if needed. - -When writing a driver that exposes rpmsg communication to userland, please -keep in mind that remote processors might have direct access to the -system's physical memory and other sensitive hardware resources (e.g. on -OMAP4, remote cores and hardware accelerators may have direct access to the -physical memory, gpio banks, dma controllers, i2c bus, gptimers, mailbox -devices, hwspinlocks, etc..). Moreover, those remote processors might be -running RTOS where every task can access the entire memory/devices exposed -to the processor. To minimize the risks of rogue (or buggy) userland code -exploiting remote bugs, and by that taking over the system, it is often -desired to limit userland to specific rpmsg channels (see definition below) -it can send messages on, and if possible, minimize how much control -it has over the content of the messages. - -Every rpmsg device is a communication channel with a remote processor (thus -rpmsg devices are called channels). Channels are identified by a textual name -and have a local ("source") rpmsg address, and remote ("destination") rpmsg -address. - -When a driver starts listening on a channel, its rx callback is bound with -a unique rpmsg local address (a 32-bit integer). This way when inbound messages -arrive, the rpmsg core dispatches them to the appropriate driver according -to their destination address (this is done by invoking the driver's rx handler -with the payload of the inbound message). - - -User API -======== - -:: - - int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len); - -sends a message across to the remote processor on a given channel. -The caller should specify the channel, the data it wants to send, -and its length (in bytes). The message will be sent on the specified -channel, i.e. its source and destination address fields will be -set to the channel's src and dst addresses. - -In case there are no TX buffers available, the function will block until -one becomes available (i.e. until the remote processor consumes -a tx buffer and puts it back on virtio's used descriptor ring), -or a timeout of 15 seconds elapses. When the latter happens, --ERESTARTSYS is returned. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst); - -sends a message across to the remote processor on a given channel, -to a destination address provided by the caller. - -The caller should specify the channel, the data it wants to send, -its length (in bytes), and an explicit destination address. - -The message will then be sent to the remote processor to which the -channel belongs, using the channel's src address, and the user-provided -dst address (thus the channel's dst address will be ignored). - -In case there are no TX buffers available, the function will block until -one becomes available (i.e. until the remote processor consumes -a tx buffer and puts it back on virtio's used descriptor ring), -or a timeout of 15 seconds elapses. When the latter happens, --ERESTARTSYS is returned. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, - void *data, int len); - - -sends a message across to the remote processor, using the src and dst -addresses provided by the user. - -The caller should specify the channel, the data it wants to send, -its length (in bytes), and explicit source and destination addresses. -The message will then be sent to the remote processor to which the -channel belongs, but the channel's src and dst addresses will be -ignored (and the user-provided addresses will be used instead). - -In case there are no TX buffers available, the function will block until -one becomes available (i.e. until the remote processor consumes -a tx buffer and puts it back on virtio's used descriptor ring), -or a timeout of 15 seconds elapses. When the latter happens, --ERESTARTSYS is returned. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len); - -sends a message across to the remote processor on a given channel. -The caller should specify the channel, the data it wants to send, -and its length (in bytes). The message will be sent on the specified -channel, i.e. its source and destination address fields will be -set to the channel's src and dst addresses. - -In case there are no TX buffers available, the function will immediately -return -ENOMEM without waiting until one becomes available. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst) - - -sends a message across to the remote processor on a given channel, -to a destination address provided by the user. - -The user should specify the channel, the data it wants to send, -its length (in bytes), and an explicit destination address. - -The message will then be sent to the remote processor to which the -channel belongs, using the channel's src address, and the user-provided -dst address (thus the channel's dst address will be ignored). - -In case there are no TX buffers available, the function will immediately -return -ENOMEM without waiting until one becomes available. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, - void *data, int len); - - -sends a message across to the remote processor, using source and -destination addresses provided by the user. - -The user should specify the channel, the data it wants to send, -its length (in bytes), and explicit source and destination addresses. -The message will then be sent to the remote processor to which the -channel belongs, but the channel's src and dst addresses will be -ignored (and the user-provided addresses will be used instead). - -In case there are no TX buffers available, the function will immediately -return -ENOMEM without waiting until one becomes available. - -The function can only be called from a process context (for now). -Returns 0 on success and an appropriate error value on failure. - -:: - - struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev, - void (*cb)(struct rpmsg_channel *, void *, int, void *, u32), - void *priv, u32 addr); - -every rpmsg address in the system is bound to an rx callback (so when -inbound messages arrive, they are dispatched by the rpmsg bus using the -appropriate callback handler) by means of an rpmsg_endpoint struct. - -This function allows drivers to create such an endpoint, and by that, -bind a callback, and possibly some private data too, to an rpmsg address -(either one that is known in advance, or one that will be dynamically -assigned for them). - -Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint -is already created for them when they are probed by the rpmsg bus -(using the rx callback they provide when they registered to the rpmsg bus). - -So things should just work for simple drivers: they already have an -endpoint, their rx callback is bound to their rpmsg address, and when -relevant inbound messages arrive (i.e. messages which their dst address -equals to the src address of their rpmsg channel), the driver's handler -is invoked to process it. - -That said, more complicated drivers might do need to allocate -additional rpmsg addresses, and bind them to different rx callbacks. -To accomplish that, those drivers need to call this function. -Drivers should provide their channel (so the new endpoint would bind -to the same remote processor their channel belongs to), an rx callback -function, an optional private data (which is provided back when the -rx callback is invoked), and an address they want to bind with the -callback. If addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will -dynamically assign them an available rpmsg address (drivers should have -a very good reason why not to always use RPMSG_ADDR_ANY here). - -Returns a pointer to the endpoint on success, or NULL on error. - -:: - - void rpmsg_destroy_ept(struct rpmsg_endpoint *ept); - - -destroys an existing rpmsg endpoint. user should provide a pointer -to an rpmsg endpoint that was previously created with rpmsg_create_ept(). - -:: - - int register_rpmsg_driver(struct rpmsg_driver *rpdrv); - - -registers an rpmsg driver with the rpmsg bus. user should provide -a pointer to an rpmsg_driver struct, which contains the driver's -->probe() and ->remove() functions, an rx callback, and an id_table -specifying the names of the channels this driver is interested to -be probed with. - -:: - - void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv); - - -unregisters an rpmsg driver from the rpmsg bus. user should provide -a pointer to a previously-registered rpmsg_driver struct. -Returns 0 on success, and an appropriate error value on failure. - - -Typical usage -============= - -The following is a simple rpmsg driver, that sends an "hello!" message -on probe(), and whenever it receives an incoming message, it dumps its -content to the console. - -:: - - #include - #include - #include - - static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len, - void *priv, u32 src) - { - print_hex_dump(KERN_INFO, "incoming message:", DUMP_PREFIX_NONE, - 16, 1, data, len, true); - } - - static int rpmsg_sample_probe(struct rpmsg_channel *rpdev) - { - int err; - - dev_info(&rpdev->dev, "chnl: 0x%x -> 0x%x\n", rpdev->src, rpdev->dst); - - /* send a message on our channel */ - err = rpmsg_send(rpdev, "hello!", 6); - if (err) { - pr_err("rpmsg_send failed: %d\n", err); - return err; - } - - return 0; - } - - static void rpmsg_sample_remove(struct rpmsg_channel *rpdev) - { - dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n"); - } - - static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = { - { .name = "rpmsg-client-sample" }, - { }, - }; - MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_sample_id_table); - - static struct rpmsg_driver rpmsg_sample_client = { - .drv.name = KBUILD_MODNAME, - .id_table = rpmsg_driver_sample_id_table, - .probe = rpmsg_sample_probe, - .callback = rpmsg_sample_cb, - .remove = rpmsg_sample_remove, - }; - module_rpmsg_driver(rpmsg_sample_client); - -.. note:: - - a similar sample which can be built and loaded can be found - in samples/rpmsg/. - -Allocations of rpmsg channels -============================= - -At this point we only support dynamic allocations of rpmsg channels. - -This is possible only with remote processors that have the VIRTIO_RPMSG_F_NS -virtio device feature set. This feature bit means that the remote -processor supports dynamic name service announcement messages. - -When this feature is enabled, creation of rpmsg devices (i.e. channels) -is completely dynamic: the remote processor announces the existence of a -remote rpmsg service by sending a name service message (which contains -the name and rpmsg addr of the remote service, see struct rpmsg_ns_msg). - -This message is then handled by the rpmsg bus, which in turn dynamically -creates and registers an rpmsg channel (which represents the remote service). -If/when a relevant rpmsg driver is registered, it will be immediately probed -by the bus, and can then start sending messages to the remote service. - -The plan is also to add static creation of rpmsg channels via the virtio -config space, but it's not implemented yet. diff --git a/Documentation/speculation.txt b/Documentation/speculation.txt deleted file mode 100644 index 50d7ea857cff..000000000000 --- a/Documentation/speculation.txt +++ /dev/null @@ -1,90 +0,0 @@ -This document explains potential effects of speculation, and how undesirable -effects can be mitigated portably using common APIs. - -=========== -Speculation -=========== - -To improve performance and minimize average latencies, many contemporary CPUs -employ speculative execution techniques such as branch prediction, performing -work which may be discarded at a later stage. - -Typically speculative execution cannot be observed from architectural state, -such as the contents of registers. However, in some cases it is possible to -observe its impact on microarchitectural state, such as the presence or -absence of data in caches. Such state may form side-channels which can be -observed to extract secret information. - -For example, in the presence of branch prediction, it is possible for bounds -checks to be ignored by code which is speculatively executed. Consider the -following code:: - - int load_array(int *array, unsigned int index) - { - if (index >= MAX_ARRAY_ELEMS) - return 0; - else - return array[index]; - } - -Which, on arm64, may be compiled to an assembly sequence such as:: - - CMP , #MAX_ARRAY_ELEMS - B.LT less - MOV , #0 - RET - less: - LDR , [, ] - RET - -It is possible that a CPU mis-predicts the conditional branch, and -speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This -value will subsequently be discarded, but the speculated load may affect -microarchitectural state which can be subsequently measured. - -More complex sequences involving multiple dependent memory accesses may -result in sensitive information being leaked. Consider the following -code, building on the prior example:: - - int load_dependent_arrays(int *arr1, int *arr2, int index) - { - int val1, val2, - - val1 = load_array(arr1, index); - val2 = load_array(arr2, val1); - - return val2; - } - -Under speculation, the first call to load_array() may return the value -of an out-of-bounds address, while the second call will influence -microarchitectural state dependent on this value. This may provide an -arbitrary read primitive. - -==================================== -Mitigating speculation side-channels -==================================== - -The kernel provides a generic API to ensure that bounds checks are -respected even under speculation. Architectures which are affected by -speculation-based side-channels are expected to implement these -primitives. - -The array_index_nospec() helper in can be used to -prevent information from being leaked via side-channels. - -A call to array_index_nospec(index, size) returns a sanitized index -value that is bounded to [0, size) even under cpu speculation -conditions. - -This can be used to protect the earlier load_array() example:: - - int load_array(int *array, unsigned int index) - { - if (index >= MAX_ARRAY_ELEMS) - return 0; - else { - index = array_index_nospec(index, MAX_ARRAY_ELEMS); - return array[index]; - } - } diff --git a/Documentation/staging/crc32.rst b/Documentation/staging/crc32.rst new file mode 100644 index 000000000000..8a6860f33b4e --- /dev/null +++ b/Documentation/staging/crc32.rst @@ -0,0 +1,189 @@ +================================= +brief tutorial on CRC computation +================================= + +A CRC is a long-division remainder. You add the CRC to the message, +and the whole thing (message+CRC) is a multiple of the given +CRC polynomial. To check the CRC, you can either check that the +CRC matches the recomputed value, *or* you can check that the +remainder computed on the message+CRC is 0. This latter approach +is used by a lot of hardware implementations, and is why so many +protocols put the end-of-frame flag after the CRC. + +It's actually the same long division you learned in school, except that: + +- We're working in binary, so the digits are only 0 and 1, and +- When dividing polynomials, there are no carries. Rather than add and + subtract, we just xor. Thus, we tend to get a bit sloppy about + the difference between adding and subtracting. + +Like all division, the remainder is always smaller than the divisor. +To produce a 32-bit CRC, the divisor is actually a 33-bit CRC polynomial. +Since it's 33 bits long, bit 32 is always going to be set, so usually the +CRC is written in hex with the most significant bit omitted. (If you're +familiar with the IEEE 754 floating-point format, it's the same idea.) + +Note that a CRC is computed over a string of *bits*, so you have +to decide on the endianness of the bits within each byte. To get +the best error-detecting properties, this should correspond to the +order they're actually sent. For example, standard RS-232 serial is +little-endian; the most significant bit (sometimes used for parity) +is sent last. And when appending a CRC word to a message, you should +do it in the right order, matching the endianness. + +Just like with ordinary division, you proceed one digit (bit) at a time. +Each step of the division you take one more digit (bit) of the dividend +and append it to the current remainder. Then you figure out the +appropriate multiple of the divisor to subtract to being the remainder +back into range. In binary, this is easy - it has to be either 0 or 1, +and to make the XOR cancel, it's just a copy of bit 32 of the remainder. + +When computing a CRC, we don't care about the quotient, so we can +throw the quotient bit away, but subtract the appropriate multiple of +the polynomial from the remainder and we're back to where we started, +ready to process the next bit. + +A big-endian CRC written this way would be coded like:: + + for (i = 0; i < input_bits; i++) { + multiple = remainder & 0x80000000 ? CRCPOLY : 0; + remainder = (remainder << 1 | next_input_bit()) ^ multiple; + } + +Notice how, to get at bit 32 of the shifted remainder, we look +at bit 31 of the remainder *before* shifting it. + +But also notice how the next_input_bit() bits we're shifting into +the remainder don't actually affect any decision-making until +32 bits later. Thus, the first 32 cycles of this are pretty boring. +Also, to add the CRC to a message, we need a 32-bit-long hole for it at +the end, so we have to add 32 extra cycles shifting in zeros at the +end of every message. + +These details lead to a standard trick: rearrange merging in the +next_input_bit() until the moment it's needed. Then the first 32 cycles +can be precomputed, and merging in the final 32 zero bits to make room +for the CRC can be skipped entirely. This changes the code to:: + + for (i = 0; i < input_bits; i++) { + remainder ^= next_input_bit() << 31; + multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + remainder = (remainder << 1) ^ multiple; + } + +With this optimization, the little-endian code is particularly simple:: + + for (i = 0; i < input_bits; i++) { + remainder ^= next_input_bit(); + multiple = (remainder & 1) ? CRCPOLY : 0; + remainder = (remainder >> 1) ^ multiple; + } + +The most significant coefficient of the remainder polynomial is stored +in the least significant bit of the binary "remainder" variable. +The other details of endianness have been hidden in CRCPOLY (which must +be bit-reversed) and next_input_bit(). + +As long as next_input_bit is returning the bits in a sensible order, we don't +*have* to wait until the last possible moment to merge in additional bits. +We can do it 8 bits at a time rather than 1 bit at a time:: + + for (i = 0; i < input_bytes; i++) { + remainder ^= next_input_byte() << 24; + for (j = 0; j < 8; j++) { + multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + remainder = (remainder << 1) ^ multiple; + } + } + +Or in little-endian:: + + for (i = 0; i < input_bytes; i++) { + remainder ^= next_input_byte(); + for (j = 0; j < 8; j++) { + multiple = (remainder & 1) ? CRCPOLY : 0; + remainder = (remainder >> 1) ^ multiple; + } + } + +If the input is a multiple of 32 bits, you can even XOR in a 32-bit +word at a time and increase the inner loop count to 32. + +You can also mix and match the two loop styles, for example doing the +bulk of a message byte-at-a-time and adding bit-at-a-time processing +for any fractional bytes at the end. + +To reduce the number of conditional branches, software commonly uses +the byte-at-a-time table method, popularized by Dilip V. Sarwate, +"Computation of Cyclic Redundancy Checks via Table Look-Up", Comm. ACM +v.31 no.8 (August 1998) p. 1008-1013. + +Here, rather than just shifting one bit of the remainder to decide +in the correct multiple to subtract, we can shift a byte at a time. +This produces a 40-bit (rather than a 33-bit) intermediate remainder, +and the correct multiple of the polynomial to subtract is found using +a 256-entry lookup table indexed by the high 8 bits. + +(The table entries are simply the CRC-32 of the given one-byte messages.) + +When space is more constrained, smaller tables can be used, e.g. two +4-bit shifts followed by a lookup in a 16-entry table. + +It is not practical to process much more than 8 bits at a time using this +technique, because tables larger than 256 entries use too much memory and, +more importantly, too much of the L1 cache. + +To get higher software performance, a "slicing" technique can be used. +See "High Octane CRC Generation with the Intel Slicing-by-8 Algorithm", +ftp://download.intel.com/technology/comms/perfnet/download/slicing-by-8.pdf + +This does not change the number of table lookups, but does increase +the parallelism. With the classic Sarwate algorithm, each table lookup +must be completed before the index of the next can be computed. + +A "slicing by 2" technique would shift the remainder 16 bits at a time, +producing a 48-bit intermediate remainder. Rather than doing a single +lookup in a 65536-entry table, the two high bytes are looked up in +two different 256-entry tables. Each contains the remainder required +to cancel out the corresponding byte. The tables are different because the +polynomials to cancel are different. One has non-zero coefficients from +x^32 to x^39, while the other goes from x^40 to x^47. + +Since modern processors can handle many parallel memory operations, this +takes barely longer than a single table look-up and thus performs almost +twice as fast as the basic Sarwate algorithm. + +This can be extended to "slicing by 4" using 4 256-entry tables. +Each step, 32 bits of data is fetched, XORed with the CRC, and the result +broken into bytes and looked up in the tables. Because the 32-bit shift +leaves the low-order bits of the intermediate remainder zero, the +final CRC is simply the XOR of the 4 table look-ups. + +But this still enforces sequential execution: a second group of table +look-ups cannot begin until the previous groups 4 table look-ups have all +been completed. Thus, the processor's load/store unit is sometimes idle. + +To make maximum use of the processor, "slicing by 8" performs 8 look-ups +in parallel. Each step, the 32-bit CRC is shifted 64 bits and XORed +with 64 bits of input data. What is important to note is that 4 of +those 8 bytes are simply copies of the input data; they do not depend +on the previous CRC at all. Thus, those 4 table look-ups may commence +immediately, without waiting for the previous loop iteration. + +By always having 4 loads in flight, a modern superscalar processor can +be kept busy and make full use of its L1 cache. + +Two more details about CRC implementation in the real world: + +Normally, appending zero bits to a message which is already a multiple +of a polynomial produces a larger multiple of that polynomial. Thus, +a basic CRC will not detect appended zero bits (or bytes). To enable +a CRC to detect this condition, it's common to invert the CRC before +appending it. This makes the remainder of the message+crc come out not +as zero, but some fixed non-zero value. (The CRC of the inversion +pattern, 0xffffffff.) + +The same problem applies to zero bits prepended to the message, and a +similar solution is used. Instead of starting the CRC computation with +a remainder of 0, an initial remainder of all ones is used. As long as +you start the same way on decoding, it doesn't make a difference. diff --git a/Documentation/staging/index.rst b/Documentation/staging/index.rst new file mode 100644 index 000000000000..8e98517675ca --- /dev/null +++ b/Documentation/staging/index.rst @@ -0,0 +1,32 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Unsorted Documentation +====================== + +.. toctree:: + :maxdepth: 2 + + crc32 + kprobes + lzo + remoteproc + rpmsg + speculation + static-keys + tee + xz + +Atomic Types +============ + +.. literalinclude:: ../atomic_t.txt + +Atomic bitops +============= + +.. literalinclude:: ../atomic_bitops.txt + +Memory Barriers +=============== + +.. literalinclude:: ../memory-barriers.txt diff --git a/Documentation/staging/kprobes.rst b/Documentation/staging/kprobes.rst new file mode 100644 index 000000000000..8baab8832c5b --- /dev/null +++ b/Documentation/staging/kprobes.rst @@ -0,0 +1,801 @@ +======================= +Kernel Probes (Kprobes) +======================= + +:Author: Jim Keniston +:Author: Prasanna S Panchamukhi +:Author: Masami Hiramatsu + +.. CONTENTS + + 1. Concepts: Kprobes, and Return Probes + 2. Architectures Supported + 3. Configuring Kprobes + 4. API Reference + 5. Kprobes Features and Limitations + 6. Probe Overhead + 7. TODO + 8. Kprobes Example + 9. Kretprobes Example + 10. Deprecated Features + Appendix A: The kprobes debugfs interface + Appendix B: The kprobes sysctl interface + +Concepts: Kprobes and Return Probes +========================================= + +Kprobes enables you to dynamically break into any kernel routine and +collect debugging and performance information non-disruptively. You +can trap at almost any kernel code address [1]_, specifying a handler +routine to be invoked when the breakpoint is hit. + +.. [1] some parts of the kernel code can not be trapped, see + :ref:`kprobes_blacklist`) + +There are currently two types of probes: kprobes, and kretprobes +(also called return probes). A kprobe can be inserted on virtually +any instruction in the kernel. A return probe fires when a specified +function returns. + +In the typical case, Kprobes-based instrumentation is packaged as +a kernel module. The module's init function installs ("registers") +one or more probes, and the exit function unregisters them. A +registration function such as register_kprobe() specifies where +the probe is to be inserted and what handler is to be called when +the probe is hit. + +There are also ``register_/unregister_*probes()`` functions for batch +registration/unregistration of a group of ``*probes``. These functions +can speed up unregistration process when you have to unregister +a lot of probes at once. + +The next four subsections explain how the different types of +probes work and how jump optimization works. They explain certain +things that you'll need to know in order to make the best use of +Kprobes -- e.g., the difference between a pre_handler and +a post_handler, and how to use the maxactive and nmissed fields of +a kretprobe. But if you're in a hurry to start using Kprobes, you +can skip ahead to :ref:`kprobes_archs_supported`. + +How Does a Kprobe Work? +----------------------- + +When a kprobe is registered, Kprobes makes a copy of the probed +instruction and replaces the first byte(s) of the probed instruction +with a breakpoint instruction (e.g., int3 on i386 and x86_64). + +When a CPU hits the breakpoint instruction, a trap occurs, the CPU's +registers are saved, and control passes to Kprobes via the +notifier_call_chain mechanism. Kprobes executes the "pre_handler" +associated with the kprobe, passing the handler the addresses of the +kprobe struct and the saved registers. + +Next, Kprobes single-steps its copy of the probed instruction. +(It would be simpler to single-step the actual instruction in place, +but then Kprobes would have to temporarily remove the breakpoint +instruction. This would open a small time window when another CPU +could sail right past the probepoint.) + +After the instruction is single-stepped, Kprobes executes the +"post_handler," if any, that is associated with the kprobe. +Execution then continues with the instruction following the probepoint. + +Changing Execution Path +----------------------- + +Since kprobes can probe into a running kernel code, it can change the +register set, including instruction pointer. This operation requires +maximum care, such as keeping the stack frame, recovering the execution +path etc. Since it operates on a running kernel and needs deep knowledge +of computer architecture and concurrent computing, you can easily shoot +your foot. + +If you change the instruction pointer (and set up other related +registers) in pre_handler, you must return !0 so that kprobes stops +single stepping and just returns to the given address. +This also means post_handler should not be called anymore. + +Note that this operation may be harder on some architectures which use +TOC (Table of Contents) for function call, since you have to setup a new +TOC for your function in your module, and recover the old one after +returning from it. + +Return Probes +------------- + +How Does a Return Probe Work? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When you call register_kretprobe(), Kprobes establishes a kprobe at +the entry to the function. When the probed function is called and this +probe is hit, Kprobes saves a copy of the return address, and replaces +the return address with the address of a "trampoline." The trampoline +is an arbitrary piece of code -- typically just a nop instruction. +At boot time, Kprobes registers a kprobe at the trampoline. + +When the probed function executes its return instruction, control +passes to the trampoline and that probe is hit. Kprobes' trampoline +handler calls the user-specified return handler associated with the +kretprobe, then sets the saved instruction pointer to the saved return +address, and that's where execution resumes upon return from the trap. + +While the probed function is executing, its return address is +stored in an object of type kretprobe_instance. Before calling +register_kretprobe(), the user sets the maxactive field of the +kretprobe struct to specify how many instances of the specified +function can be probed simultaneously. register_kretprobe() +pre-allocates the indicated number of kretprobe_instance objects. + +For example, if the function is non-recursive and is called with a +spinlock held, maxactive = 1 should be enough. If the function is +non-recursive and can never relinquish the CPU (e.g., via a semaphore +or preemption), NR_CPUS should be enough. If maxactive <= 0, it is +set to a default value. If CONFIG_PREEMPT is enabled, the default +is max(10, 2*NR_CPUS). Otherwise, the default is NR_CPUS. + +It's not a disaster if you set maxactive too low; you'll just miss +some probes. In the kretprobe struct, the nmissed field is set to +zero when the return probe is registered, and is incremented every +time the probed function is entered but there is no kretprobe_instance +object available for establishing the return probe. + +Kretprobe entry-handler +^^^^^^^^^^^^^^^^^^^^^^^ + +Kretprobes also provides an optional user-specified handler which runs +on function entry. This handler is specified by setting the entry_handler +field of the kretprobe struct. Whenever the kprobe placed by kretprobe at the +function entry is hit, the user-defined entry_handler, if any, is invoked. +If the entry_handler returns 0 (success) then a corresponding return handler +is guaranteed to be called upon function return. If the entry_handler +returns a non-zero error then Kprobes leaves the return address as is, and +the kretprobe has no further effect for that particular function instance. + +Multiple entry and return handler invocations are matched using the unique +kretprobe_instance object associated with them. Additionally, a user +may also specify per return-instance private data to be part of each +kretprobe_instance object. This is especially useful when sharing private +data between corresponding user entry and return handlers. The size of each +private data object can be specified at kretprobe registration time by +setting the data_size field of the kretprobe struct. This data can be +accessed through the data field of each kretprobe_instance object. + +In case probed function is entered but there is no kretprobe_instance +object available, then in addition to incrementing the nmissed count, +the user entry_handler invocation is also skipped. + +.. _kprobes_jump_optimization: + +How Does Jump Optimization Work? +-------------------------------- + +If your kernel is built with CONFIG_OPTPROBES=y (currently this flag +is automatically set 'y' on x86/x86-64, non-preemptive kernel) and +the "debug.kprobes_optimization" kernel parameter is set to 1 (see +sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump +instruction instead of a breakpoint instruction at each probepoint. + +Init a Kprobe +^^^^^^^^^^^^^ + +When a probe is registered, before attempting this optimization, +Kprobes inserts an ordinary, breakpoint-based kprobe at the specified +address. So, even if it's not possible to optimize this particular +probepoint, there'll be a probe there. + +Safety Check +^^^^^^^^^^^^ + +Before optimizing a probe, Kprobes performs the following safety checks: + +- Kprobes verifies that the region that will be replaced by the jump + instruction (the "optimized region") lies entirely within one function. + (A jump instruction is multiple bytes, and so may overlay multiple + instructions.) + +- Kprobes analyzes the entire function and verifies that there is no + jump into the optimized region. Specifically: + + - the function contains no indirect jump; + - the function contains no instruction that causes an exception (since + the fixup code triggered by the exception could jump back into the + optimized region -- Kprobes checks the exception tables to verify this); + - there is no near jump to the optimized region (other than to the first + byte). + +- For each instruction in the optimized region, Kprobes verifies that + the instruction can be executed out of line. + +Preparing Detour Buffer +^^^^^^^^^^^^^^^^^^^^^^^ + +Next, Kprobes prepares a "detour" buffer, which contains the following +instruction sequence: + +- code to push the CPU's registers (emulating a breakpoint trap) +- a call to the trampoline code which calls user's probe handlers. +- code to restore registers +- the instructions from the optimized region +- a jump back to the original execution path. + +Pre-optimization +^^^^^^^^^^^^^^^^ + +After preparing the detour buffer, Kprobes verifies that none of the +following situations exist: + +- The probe has a post_handler. +- Other instructions in the optimized region are probed. +- The probe is disabled. + +In any of the above cases, Kprobes won't start optimizing the probe. +Since these are temporary situations, Kprobes tries to start +optimizing it again if the situation is changed. + +If the kprobe can be optimized, Kprobes enqueues the kprobe to an +optimizing list, and kicks the kprobe-optimizer workqueue to optimize +it. If the to-be-optimized probepoint is hit before being optimized, +Kprobes returns control to the original instruction path by setting +the CPU's instruction pointer to the copied code in the detour buffer +-- thus at least avoiding the single-step. + +Optimization +^^^^^^^^^^^^ + +The Kprobe-optimizer doesn't insert the jump instruction immediately; +rather, it calls synchronize_rcu() for safety first, because it's +possible for a CPU to be interrupted in the middle of executing the +optimized region [3]_. As you know, synchronize_rcu() can ensure +that all interruptions that were active when synchronize_rcu() +was called are done, but only if CONFIG_PREEMPT=n. So, this version +of kprobe optimization supports only kernels with CONFIG_PREEMPT=n [4]_. + +After that, the Kprobe-optimizer calls stop_machine() to replace +the optimized region with a jump instruction to the detour buffer, +using text_poke_smp(). + +Unoptimization +^^^^^^^^^^^^^^ + +When an optimized kprobe is unregistered, disabled, or blocked by +another kprobe, it will be unoptimized. If this happens before +the optimization is complete, the kprobe is just dequeued from the +optimized list. If the optimization has been done, the jump is +replaced with the original code (except for an int3 breakpoint in +the first byte) by using text_poke_smp(). + +.. [3] Please imagine that the 2nd instruction is interrupted and then + the optimizer replaces the 2nd instruction with the jump *address* + while the interrupt handler is running. When the interrupt + returns to original address, there is no valid instruction, + and it causes an unexpected result. + +.. [4] This optimization-safety checking may be replaced with the + stop-machine method that ksplice uses for supporting a CONFIG_PREEMPT=y + kernel. + +NOTE for geeks: +The jump optimization changes the kprobe's pre_handler behavior. +Without optimization, the pre_handler can change the kernel's execution +path by changing regs->ip and returning 1. However, when the probe +is optimized, that modification is ignored. Thus, if you want to +tweak the kernel's execution path, you need to suppress optimization, +using one of the following techniques: + +- Specify an empty function for the kprobe's post_handler. + +or + +- Execute 'sysctl -w debug.kprobes_optimization=n' + +.. _kprobes_blacklist: + +Blacklist +--------- + +Kprobes can probe most of the kernel except itself. This means +that there are some functions where kprobes cannot probe. Probing +(trapping) such functions can cause a recursive trap (e.g. double +fault) or the nested probe handler may never be called. +Kprobes manages such functions as a blacklist. +If you want to add a function into the blacklist, you just need +to (1) include linux/kprobes.h and (2) use NOKPROBE_SYMBOL() macro +to specify a blacklisted function. +Kprobes checks the given probe address against the blacklist and +rejects registering it, if the given address is in the blacklist. + +.. _kprobes_archs_supported: + +Architectures Supported +======================= + +Kprobes and return probes are implemented on the following +architectures: + +- i386 (Supports jump optimization) +- x86_64 (AMD-64, EM64T) (Supports jump optimization) +- ppc64 +- ia64 (Does not support probes on instruction slot1.) +- sparc64 (Return probes not yet implemented.) +- arm +- ppc +- mips +- s390 +- parisc + +Configuring Kprobes +=================== + +When configuring the kernel using make menuconfig/xconfig/oldconfig, +ensure that CONFIG_KPROBES is set to "y". Under "General setup", look +for "Kprobes". + +So that you can load and unload Kprobes-based instrumentation modules, +make sure "Loadable module support" (CONFIG_MODULES) and "Module +unloading" (CONFIG_MODULE_UNLOAD) are set to "y". + +Also make sure that CONFIG_KALLSYMS and perhaps even CONFIG_KALLSYMS_ALL +are set to "y", since kallsyms_lookup_name() is used by the in-kernel +kprobe address resolution code. + +If you need to insert a probe in the middle of a function, you may find +it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO), +so you can use "objdump -d -l vmlinux" to see the source-to-object +code mapping. + +API Reference +============= + +The Kprobes API includes a "register" function and an "unregister" +function for each type of probe. The API also includes "register_*probes" +and "unregister_*probes" functions for (un)registering arrays of probes. +Here are terse, mini-man-page specifications for these functions and +the associated probe handlers that you'll write. See the files in the +samples/kprobes/ sub-directory for examples. + +register_kprobe +--------------- + +:: + + #include + int register_kprobe(struct kprobe *kp); + +Sets a breakpoint at the address kp->addr. When the breakpoint is +hit, Kprobes calls kp->pre_handler. After the probed instruction +is single-stepped, Kprobe calls kp->post_handler. If a fault +occurs during execution of kp->pre_handler or kp->post_handler, +or during single-stepping of the probed instruction, Kprobes calls +kp->fault_handler. Any or all handlers can be NULL. If kp->flags +is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled, +so, its handlers aren't hit until calling enable_kprobe(kp). + +.. note:: + + 1. With the introduction of the "symbol_name" field to struct kprobe, + the probepoint address resolution will now be taken care of by the kernel. + The following will now work:: + + kp.symbol_name = "symbol_name"; + + (64-bit powerpc intricacies such as function descriptors are handled + transparently) + + 2. Use the "offset" field of struct kprobe if the offset into the symbol + to install a probepoint is known. This field is used to calculate the + probepoint. + + 3. Specify either the kprobe "symbol_name" OR the "addr". If both are + specified, kprobe registration will fail with -EINVAL. + + 4. With CISC architectures (such as i386 and x86_64), the kprobes code + does not validate if the kprobe.addr is at an instruction boundary. + Use "offset" with caution. + +register_kprobe() returns 0 on success, or a negative errno otherwise. + +User's pre-handler (kp->pre_handler):: + + #include + #include + int pre_handler(struct kprobe *p, struct pt_regs *regs); + +Called with p pointing to the kprobe associated with the breakpoint, +and regs pointing to the struct containing the registers saved when +the breakpoint was hit. Return 0 here unless you're a Kprobes geek. + +User's post-handler (kp->post_handler):: + + #include + #include + void post_handler(struct kprobe *p, struct pt_regs *regs, + unsigned long flags); + +p and regs are as described for the pre_handler. flags always seems +to be zero. + +User's fault-handler (kp->fault_handler):: + + #include + #include + int fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr); + +p and regs are as described for the pre_handler. trapnr is the +architecture-specific trap number associated with the fault (e.g., +on i386, 13 for a general protection fault or 14 for a page fault). +Returns 1 if it successfully handled the exception. + +register_kretprobe +------------------ + +:: + + #include + int register_kretprobe(struct kretprobe *rp); + +Establishes a return probe for the function whose address is +rp->kp.addr. When that function returns, Kprobes calls rp->handler. +You must set rp->maxactive appropriately before you call +register_kretprobe(); see "How Does a Return Probe Work?" for details. + +register_kretprobe() returns 0 on success, or a negative errno +otherwise. + +User's return-probe handler (rp->handler):: + + #include + #include + int kretprobe_handler(struct kretprobe_instance *ri, + struct pt_regs *regs); + +regs is as described for kprobe.pre_handler. ri points to the +kretprobe_instance object, of which the following fields may be +of interest: + +- ret_addr: the return address +- rp: points to the corresponding kretprobe object +- task: points to the corresponding task struct +- data: points to per return-instance private data; see "Kretprobe + entry-handler" for details. + +The regs_return_value(regs) macro provides a simple abstraction to +extract the return value from the appropriate register as defined by +the architecture's ABI. + +The handler's return value is currently ignored. + +unregister_*probe +------------------ + +:: + + #include + void unregister_kprobe(struct kprobe *kp); + void unregister_kretprobe(struct kretprobe *rp); + +Removes the specified probe. The unregister function can be called +at any time after the probe has been registered. + +.. note:: + + If the functions find an incorrect probe (ex. an unregistered probe), + they clear the addr field of the probe. + +register_*probes +---------------- + +:: + + #include + int register_kprobes(struct kprobe **kps, int num); + int register_kretprobes(struct kretprobe **rps, int num); + +Registers each of the num probes in the specified array. If any +error occurs during registration, all probes in the array, up to +the bad probe, are safely unregistered before the register_*probes +function returns. + +- kps/rps: an array of pointers to ``*probe`` data structures +- num: the number of the array entries. + +.. note:: + + You have to allocate(or define) an array of pointers and set all + of the array entries before using these functions. + +unregister_*probes +------------------ + +:: + + #include + void unregister_kprobes(struct kprobe **kps, int num); + void unregister_kretprobes(struct kretprobe **rps, int num); + +Removes each of the num probes in the specified array at once. + +.. note:: + + If the functions find some incorrect probes (ex. unregistered + probes) in the specified array, they clear the addr field of those + incorrect probes. However, other probes in the array are + unregistered correctly. + +disable_*probe +-------------- + +:: + + #include + int disable_kprobe(struct kprobe *kp); + int disable_kretprobe(struct kretprobe *rp); + +Temporarily disables the specified ``*probe``. You can enable it again by using +enable_*probe(). You must specify the probe which has been registered. + +enable_*probe +------------- + +:: + + #include + int enable_kprobe(struct kprobe *kp); + int enable_kretprobe(struct kretprobe *rp); + +Enables ``*probe`` which has been disabled by disable_*probe(). You must specify +the probe which has been registered. + +Kprobes Features and Limitations +================================ + +Kprobes allows multiple probes at the same address. Also, +a probepoint for which there is a post_handler cannot be optimized. +So if you install a kprobe with a post_handler, at an optimized +probepoint, the probepoint will be unoptimized automatically. + +In general, you can install a probe anywhere in the kernel. +In particular, you can probe interrupt handlers. Known exceptions +are discussed in this section. + +The register_*probe functions will return -EINVAL if you attempt +to install a probe in the code that implements Kprobes (mostly +kernel/kprobes.c and ``arch/*/kernel/kprobes.c``, but also functions such +as do_page_fault and notifier_call_chain). + +If you install a probe in an inline-able function, Kprobes makes +no attempt to chase down all inline instances of the function and +install probes there. gcc may inline a function without being asked, +so keep this in mind if you're not seeing the probe hits you expect. + +A probe handler can modify the environment of the probed function +-- e.g., by modifying kernel data structures, or by modifying the +contents of the pt_regs struct (which are restored to the registers +upon return from the breakpoint). So Kprobes can be used, for example, +to install a bug fix or to inject faults for testing. Kprobes, of +course, has no way to distinguish the deliberately injected faults +from the accidental ones. Don't drink and probe. + +Kprobes makes no attempt to prevent probe handlers from stepping on +each other -- e.g., probing printk() and then calling printk() from a +probe handler. If a probe handler hits a probe, that second probe's +handlers won't be run in that instance, and the kprobe.nmissed member +of the second probe will be incremented. + +As of Linux v2.6.15-rc1, multiple handlers (or multiple instances of +the same handler) may run concurrently on different CPUs. + +Kprobes does not use mutexes or allocate memory except during +registration and unregistration. + +Probe handlers are run with preemption disabled or interrupt disabled, +which depends on the architecture and optimization state. (e.g., +kretprobe handlers and optimized kprobe handlers run without interrupt +disabled on x86/x86-64). In any case, your handler should not yield +the CPU (e.g., by attempting to acquire a semaphore, or waiting I/O). + +Since a return probe is implemented by replacing the return +address with the trampoline's address, stack backtraces and calls +to __builtin_return_address() will typically yield the trampoline's +address instead of the real return address for kretprobed functions. +(As far as we can tell, __builtin_return_address() is used only +for instrumentation and error reporting.) + +If the number of times a function is called does not match the number +of times it returns, registering a return probe on that function may +produce undesirable results. In such a case, a line: +kretprobe BUG!: Processing kretprobe d000000000041aa8 @ c00000000004f48c +gets printed. With this information, one will be able to correlate the +exact instance of the kretprobe that caused the problem. We have the +do_exit() case covered. do_execve() and do_fork() are not an issue. +We're unaware of other specific cases where this could be a problem. + +If, upon entry to or exit from a function, the CPU is running on +a stack other than that of the current task, registering a return +probe on that function may produce undesirable results. For this +reason, Kprobes doesn't support return probes (or kprobes) +on the x86_64 version of __switch_to(); the registration functions +return -EINVAL. + +On x86/x86-64, since the Jump Optimization of Kprobes modifies +instructions widely, there are some limitations to optimization. To +explain it, we introduce some terminology. Imagine a 3-instruction +sequence consisting of a two 2-byte instructions and one 3-byte +instruction. + +:: + + IA + | + [-2][-1][0][1][2][3][4][5][6][7] + [ins1][ins2][ ins3 ] + [<- DCR ->] + [<- JTPR ->] + + ins1: 1st Instruction + ins2: 2nd Instruction + ins3: 3rd Instruction + IA: Insertion Address + JTPR: Jump Target Prohibition Region + DCR: Detoured Code Region + +The instructions in DCR are copied to the out-of-line buffer +of the kprobe, because the bytes in DCR are replaced by +a 5-byte jump instruction. So there are several limitations. + +a) The instructions in DCR must be relocatable. +b) The instructions in DCR must not include a call instruction. +c) JTPR must not be targeted by any jump or call instruction. +d) DCR must not straddle the border between functions. + +Anyway, these limitations are checked by the in-kernel instruction +decoder, so you don't need to worry about that. + +Probe Overhead +============== + +On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0 +microseconds to process. Specifically, a benchmark that hits the same +probepoint repeatedly, firing a simple handler each time, reports 1-2 +million hits per second, depending on the architecture. A return-probe +hit typically takes 50-75% longer than a kprobe hit. +When you have a return probe set on a function, adding a kprobe at +the entry to that function adds essentially no overhead. + +Here are sample overhead figures (in usec) for different architectures:: + + k = kprobe; r = return probe; kr = kprobe + return probe + on same function + + i386: Intel Pentium M, 1495 MHz, 2957.31 bogomips + k = 0.57 usec; r = 0.92; kr = 0.99 + + x86_64: AMD Opteron 246, 1994 MHz, 3971.48 bogomips + k = 0.49 usec; r = 0.80; kr = 0.82 + + ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU) + k = 0.77 usec; r = 1.26; kr = 1.45 + +Optimized Probe Overhead +------------------------ + +Typically, an optimized kprobe hit takes 0.07 to 0.1 microseconds to +process. Here are sample overhead figures (in usec) for x86 architectures:: + + k = unoptimized kprobe, b = boosted (single-step skipped), o = optimized kprobe, + r = unoptimized kretprobe, rb = boosted kretprobe, ro = optimized kretprobe. + + i386: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips + k = 0.80 usec; b = 0.33; o = 0.05; r = 1.10; rb = 0.61; ro = 0.33 + + x86-64: Intel(R) Xeon(R) E5410, 2.33GHz, 4656.90 bogomips + k = 0.99 usec; b = 0.43; o = 0.06; r = 1.24; rb = 0.68; ro = 0.30 + +TODO +==== + +a. SystemTap (http://sourceware.org/systemtap): Provides a simplified + programming interface for probe-based instrumentation. Try it out. +b. Kernel return probes for sparc64. +c. Support for other architectures. +d. User-space probes. +e. Watchpoint probes (which fire on data references). + +Kprobes Example +=============== + +See samples/kprobes/kprobe_example.c + +Kretprobes Example +================== + +See samples/kprobes/kretprobe_example.c + +For additional information on Kprobes, refer to the following URLs: + +- http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe +- http://www.redhat.com/magazine/005mar05/features/kprobes/ +- http://www-users.cs.umn.edu/~boutcher/kprobes/ +- http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115) + +Deprecated Features +=================== + +Jprobes is now a deprecated feature. People who are depending on it should +migrate to other tracing features or use older kernels. Please consider to +migrate your tool to one of the following options: + +- Use trace-event to trace target function with arguments. + + trace-event is a low-overhead (and almost no visible overhead if it + is off) statically defined event interface. You can define new events + and trace it via ftrace or any other tracing tools. + + See the following urls: + + - https://lwn.net/Articles/379903/ + - https://lwn.net/Articles/381064/ + - https://lwn.net/Articles/383362/ + +- Use ftrace dynamic events (kprobe event) with perf-probe. + + If you build your kernel with debug info (CONFIG_DEBUG_INFO=y), you can + find which register/stack is assigned to which local variable or arguments + by using perf-probe and set up new event to trace it. + + See following documents: + + - Documentation/trace/kprobetrace.rst + - Documentation/trace/events.rst + - tools/perf/Documentation/perf-probe.txt + + +The kprobes debugfs interface +============================= + + +With recent kernels (> 2.6.20) the list of registered kprobes is visible +under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at //sys/kernel/debug). + +/sys/kernel/debug/kprobes/list: Lists all registered probes on the system:: + + c015d71a k vfs_read+0x0 + c03dedc5 r tcp_v4_rcv+0x0 + +The first column provides the kernel address where the probe is inserted. +The second column identifies the type of probe (k - kprobe and r - kretprobe) +while the third column specifies the symbol+offset of the probe. +If the probed function belongs to a module, the module name is also +specified. Following columns show probe status. If the probe is on +a virtual address that is no longer valid (module init sections, module +virtual addresses that correspond to modules that've been unloaded), +such probes are marked with [GONE]. If the probe is temporarily disabled, +such probes are marked with [DISABLED]. If the probe is optimized, it is +marked with [OPTIMIZED]. If the probe is ftrace-based, it is marked with +[FTRACE]. + +/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. + +Provides a knob to globally and forcibly turn registered kprobes ON or OFF. +By default, all kprobes are enabled. By echoing "0" to this file, all +registered probes will be disarmed, till such time a "1" is echoed to this +file. Note that this knob just disarms and arms all kprobes and doesn't +change each probe's disabling state. This means that disabled kprobes (marked +[DISABLED]) will be not enabled if you turn ON all kprobes by this knob. + + +The kprobes sysctl interface +============================ + +/proc/sys/debug/kprobes-optimization: Turn kprobes optimization ON/OFF. + +When CONFIG_OPTPROBES=y, this sysctl interface appears and it provides +a knob to globally and forcibly turn jump optimization (see section +:ref:`kprobes_jump_optimization`) ON or OFF. By default, jump optimization +is allowed (ON). If you echo "0" to this file or set +"debug.kprobes_optimization" to 0 via sysctl, all optimized probes will be +unoptimized, and any new probes registered after that will not be optimized. + +Note that this knob *changes* the optimized state. This means that optimized +probes (marked [OPTIMIZED]) will be unoptimized ([OPTIMIZED] tag will be +removed). If the knob is turned on, they will be optimized again. + diff --git a/Documentation/staging/lzo.rst b/Documentation/staging/lzo.rst new file mode 100644 index 000000000000..f65b51523014 --- /dev/null +++ b/Documentation/staging/lzo.rst @@ -0,0 +1,202 @@ +=========================================================== +LZO stream format as understood by Linux's LZO decompressor +=========================================================== + +Introduction +============ + + This is not a specification. No specification seems to be publicly available + for the LZO stream format. This document describes what input format the LZO + decompressor as implemented in the Linux kernel understands. The file subject + of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on + the compressor nor on any other implementations though it seems likely that + the format matches the standard one. The purpose of this document is to + better understand what the code does in order to propose more efficient fixes + for future bug reports. + +Description +=========== + + The stream is composed of a series of instructions, operands, and data. The + instructions consist in a few bits representing an opcode, and bits forming + the operands for the instruction, whose size and position depend on the + opcode and on the number of literals copied by previous instruction. The + operands are used to indicate: + + - a distance when copying data from the dictionary (past output buffer) + - a length (number of bytes to copy from dictionary) + - the number of literals to copy, which is retained in variable "state" + as a piece of information for next instructions. + + Optionally depending on the opcode and operands, extra data may follow. These + extra data can be a complement for the operand (eg: a length or a distance + encoded on larger values), or a literal to be copied to the output buffer. + + The first byte of the block follows a different encoding from other bytes, it + seems to be optimized for literal use only, since there is no dictionary yet + prior to that byte. + + Lengths are always encoded on a variable size starting with a small number + of bits in the operand. If the number of bits isn't enough to represent the + length, up to 255 may be added in increments by consuming more bytes with a + rate of at most 255 per extra byte (thus the compression ratio cannot exceed + around 255:1). The variable length encoding using #bits is always the same:: + + length = byte & ((1 << #bits) - 1) + if (!length) { + length = ((1 << #bits) - 1) + length += 255*(number of zero bytes) + length += first-non-zero-byte + } + length += constant (generally 2 or 3) + + For references to the dictionary, distances are relative to the output + pointer. Distances are encoded using very few bits belonging to certain + ranges, resulting in multiple copy instructions using different encodings. + Certain encodings involve one extra byte, others involve two extra bytes + forming a little-endian 16-bit quantity (marked LE16 below). + + After any instruction except the large literal copy, 0, 1, 2 or 3 literals + are copied before starting the next instruction. The number of literals that + were copied may change the meaning and behaviour of the next instruction. In + practice, only one instruction needs to know whether 0, less than 4, or more + literals were copied. This is the information stored in the variable + in this implementation. This number of immediate literals to be copied is + generally encoded in the last two bits of the instruction but may also be + taken from the last two bits of an extra operand (eg: distance). + + End of stream is declared when a block copy of distance 0 is seen. Only one + instruction may encode this distance (0001HLLL), it takes one LE16 operand + for the distance, thus requiring 3 bytes. + + .. important:: + + In the code some length checks are missing because certain instructions + are called under the assumption that a certain number of bytes follow + because it has already been guaranteed before parsing the instructions. + They just have to "refill" this credit if they consume extra bytes. This + is an implementation design choice independent on the algorithm or + encoding. + +Versions + +0: Original version +1: LZO-RLE + +Version 1 of LZO implements an extension to encode runs of zeros using run +length encoding. This improves speed for data with many zeros, which is a +common case for zram. This modifies the bitstream in a backwards compatible way +(v1 can correctly decompress v0 compressed data, but v0 cannot read v1 data). + +For maximum compatibility, both versions are available under different names +(lzo and lzo-rle). Differences in the encoding are noted in this document with +e.g.: version 1 only. + +Byte sequences +============== + + First byte encoding:: + + 0..16 : follow regular instruction encoding, see below. It is worth + noting that code 16 will represent a block copy from the + dictionary which is empty, and that it will always be + invalid at this place. + + 17 : bitstream version. If the first byte is 17, and compressed + stream length is at least 5 bytes (length of shortest possible + versioned bitstream), the next byte gives the bitstream version + (version 1 only). + Otherwise, the bitstream version is 0. + + 18..21 : copy 0..3 literals + state = (byte - 17) = 0..3 [ copy literals ] + skip byte + + 22..255 : copy literal string + length = (byte - 17) = 4..238 + state = 4 [ don't copy extra literals ] + skip byte + + Instruction encoding:: + + 0 0 0 0 X X X X (0..15) + Depends on the number of literals copied by the last instruction. + If last instruction did not copy any literal (state == 0), this + encoding will be a copy of 4 or more literal, and must be interpreted + like this : + + 0 0 0 0 L L L L (0..15) : copy long literal string + length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte) + state = 4 (no extra literals are copied) + + If last instruction used to copy between 1 to 3 literals (encoded in + the instruction's opcode or distance), the instruction is a copy of a + 2-byte block from the dictionary within a 1kB distance. It is worth + noting that this instruction provides little savings since it uses 2 + bytes to encode a copy of 2 other bytes but it encodes the number of + following literals for free. It must be interpreted like this : + + 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance + length = 2 + state = S (copy S literals after this block) + Always followed by exactly one byte : H H H H H H H H + distance = (H << 2) + D + 1 + + If last instruction used to copy 4 or more literals (as detected by + state == 4), the instruction becomes a copy of a 3-byte block from the + dictionary from a 2..3kB distance, and must be interpreted like this : + + 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance + length = 3 + state = S (copy S literals after this block) + Always followed by exactly one byte : H H H H H H H H + distance = (H << 2) + D + 2049 + + 0 0 0 1 H L L L (16..31) + Copy of a block within 16..48kB distance (preferably less than 10B) + length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte) + Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S + distance = 16384 + (H << 14) + D + state = S (copy S literals after this block) + End of stream is reached if distance == 16384 + In version 1 only, to prevent ambiguity with the RLE case when + ((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the + compressor must not emit block copies where distance and length + meet these conditions. + + In version 1 only, this instruction is also used to encode a run of + zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. + In this case, it is followed by a fourth byte, X. + run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4 + + 0 0 1 L L L L L (32..63) + Copy of small block within 16kB distance (preferably less than 34B) + length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte) + Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S + distance = D + 1 + state = S (copy S literals after this block) + + 0 1 L D D D S S (64..127) + Copy 3-4 bytes from block within 2kB distance + state = S (copy S literals after this block) + length = 3 + L + Always followed by exactly one byte : H H H H H H H H + distance = (H << 3) + D + 1 + + 1 L L D D D S S (128..255) + Copy 5-8 bytes from block within 2kB distance + state = S (copy S literals after this block) + length = 5 + L + Always followed by exactly one byte : H H H H H H H H + distance = (H << 3) + D + 1 + +Authors +======= + + This document was written by Willy Tarreau on 2014/07/19 during an + analysis of the decompression code available in Linux 3.16-rc5, and updated + by Dave Rodgman on 2018/10/30 to introduce run-length + encoding. The code is tricky, it is possible that this document contains + mistakes or that a few corner cases were overlooked. In any case, please + report any doubt, fix, or proposed updates to the author(s) so that the + document can be updated. diff --git a/Documentation/staging/remoteproc.rst b/Documentation/staging/remoteproc.rst new file mode 100644 index 000000000000..9cccd3dd6a4b --- /dev/null +++ b/Documentation/staging/remoteproc.rst @@ -0,0 +1,359 @@ +========================== +Remote Processor Framework +========================== + +Introduction +============ + +Modern SoCs typically have heterogeneous remote processor devices in asymmetric +multiprocessing (AMP) configurations, which may be running different instances +of operating system, whether it's Linux or any other flavor of real-time OS. + +OMAP4, for example, has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP. +In a typical configuration, the dual cortex-A9 is running Linux in a SMP +configuration, and each of the other three cores (two M3 cores and a DSP) +is running its own instance of RTOS in an AMP configuration. + +The remoteproc framework allows different platforms/architectures to +control (power on, load firmware, power off) those remote processors while +abstracting the hardware differences, so the entire driver doesn't need to be +duplicated. In addition, this framework also adds rpmsg virtio devices +for remote processors that supports this kind of communication. This way, +platform-specific remoteproc drivers only need to provide a few low-level +handlers, and then all rpmsg drivers will then just work +(for more information about the virtio-based rpmsg bus and its drivers, +please read Documentation/staging/rpmsg.rst). +Registration of other types of virtio devices is now also possible. Firmwares +just need to publish what kind of virtio devices do they support, and then +remoteproc will add those devices. This makes it possible to reuse the +existing virtio drivers with remote processor backends at a minimal development +cost. + +User API +======== + +:: + + int rproc_boot(struct rproc *rproc) + +Boot a remote processor (i.e. load its firmware, power it on, ...). + +If the remote processor is already powered on, this function immediately +returns (successfully). + +Returns 0 on success, and an appropriate error value otherwise. +Note: to use this function you should already have a valid rproc +handle. There are several ways to achieve that cleanly (devres, pdata, +the way remoteproc_rpmsg.c does this, or, if this becomes prevalent, we +might also consider using dev_archdata for this). + +:: + + void rproc_shutdown(struct rproc *rproc) + +Power off a remote processor (previously booted with rproc_boot()). +In case @rproc is still being used by an additional user(s), then +this function will just decrement the power refcount and exit, +without really powering off the device. + +Every call to rproc_boot() must (eventually) be accompanied by a call +to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. + +.. note:: + + we're not decrementing the rproc's refcount, only the power refcount. + which means that the @rproc handle stays valid even after + rproc_shutdown() returns, and users can still use it with a subsequent + rproc_boot(), if needed. + +:: + + struct rproc *rproc_get_by_phandle(phandle phandle) + +Find an rproc handle using a device tree phandle. Returns the rproc +handle on success, and NULL on failure. This function increments +the remote processor's refcount, so always use rproc_put() to +decrement it back once rproc isn't needed anymore. + +Typical usage +============= + +:: + + #include + + /* in case we were given a valid 'rproc' handle */ + int dummy_rproc_example(struct rproc *my_rproc) + { + int ret; + + /* let's power on and boot our remote processor */ + ret = rproc_boot(my_rproc); + if (ret) { + /* + * something went wrong. handle it and leave. + */ + } + + /* + * our remote processor is now powered on... give it some work + */ + + /* let's shut it down now */ + rproc_shutdown(my_rproc); + } + +API for implementors +==================== + +:: + + struct rproc *rproc_alloc(struct device *dev, const char *name, + const struct rproc_ops *ops, + const char *firmware, int len) + +Allocate a new remote processor handle, but don't register +it yet. Required parameters are the underlying device, the +name of this remote processor, platform-specific ops handlers, +the name of the firmware to boot this rproc with, and the +length of private data needed by the allocating rproc driver (in bytes). + +This function should be used by rproc implementations during +initialization of the remote processor. + +After creating an rproc handle using this function, and when ready, +implementations should then call rproc_add() to complete +the registration of the remote processor. + +On success, the new rproc is returned, and on failure, NULL. + +.. note:: + + **never** directly deallocate @rproc, even if it was not registered + yet. Instead, when you need to unroll rproc_alloc(), use rproc_free(). + +:: + + void rproc_free(struct rproc *rproc) + +Free an rproc handle that was allocated by rproc_alloc. + +This function essentially unrolls rproc_alloc(), by decrementing the +rproc's refcount. It doesn't directly free rproc; that would happen +only if there are no other references to rproc and its refcount now +dropped to zero. + +:: + + int rproc_add(struct rproc *rproc) + +Register @rproc with the remoteproc framework, after it has been +allocated with rproc_alloc(). + +This is called by the platform-specific rproc implementation, whenever +a new remote processor device is probed. + +Returns 0 on success and an appropriate error code otherwise. +Note: this function initiates an asynchronous firmware loading +context, which will look for virtio devices supported by the rproc's +firmware. + +If found, those virtio devices will be created and added, so as a result +of registering this remote processor, additional virtio drivers might get +probed. + +:: + + int rproc_del(struct rproc *rproc) + +Unroll rproc_add(). + +This function should be called when the platform specific rproc +implementation decides to remove the rproc device. it should +_only_ be called if a previous invocation of rproc_add() +has completed successfully. + +After rproc_del() returns, @rproc is still valid, and its +last refcount should be decremented by calling rproc_free(). + +Returns 0 on success and -EINVAL if @rproc isn't valid. + +:: + + void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) + +Report a crash in a remoteproc + +This function must be called every time a crash is detected by the +platform specific rproc implementation. This should not be called from a +non-remoteproc driver. This function can be called from atomic/interrupt +context. + +Implementation callbacks +======================== + +These callbacks should be provided by platform-specific remoteproc +drivers:: + + /** + * struct rproc_ops - platform-specific device handlers + * @start: power on the device and boot it + * @stop: power off the device + * @kick: kick a virtqueue (virtqueue id given as a parameter) + */ + struct rproc_ops { + int (*start)(struct rproc *rproc); + int (*stop)(struct rproc *rproc); + void (*kick)(struct rproc *rproc, int vqid); + }; + +Every remoteproc implementation should at least provide the ->start and ->stop +handlers. If rpmsg/virtio functionality is also desired, then the ->kick handler +should be provided as well. + +The ->start() handler takes an rproc handle and should then power on the +device and boot it (use rproc->priv to access platform-specific private data). +The boot address, in case needed, can be found in rproc->bootaddr (remoteproc +core puts there the ELF entry point). +On success, 0 should be returned, and on failure, an appropriate error code. + +The ->stop() handler takes an rproc handle and powers the device down. +On success, 0 is returned, and on failure, an appropriate error code. + +The ->kick() handler takes an rproc handle, and an index of a virtqueue +where new message was placed in. Implementations should interrupt the remote +processor and let it know it has pending messages. Notifying remote processors +the exact virtqueue index to look in is optional: it is easy (and not +too expensive) to go through the existing virtqueues and look for new buffers +in the used rings. + +Binary Firmware Structure +========================= + +At this point remoteproc supports ELF32 and ELF64 firmware binaries. However, +it is quite expected that other platforms/devices which we'd want to +support with this framework will be based on different binary formats. + +When those use cases show up, we will have to decouple the binary format +from the framework core, so we can support several binary formats without +duplicating common code. + +When the firmware is parsed, its various segments are loaded to memory +according to the specified device address (might be a physical address +if the remote processor is accessing memory directly). + +In addition to the standard ELF segments, most remote processors would +also include a special section which we call "the resource table". + +The resource table contains system resources that the remote processor +requires before it should be powered on, such as allocation of physically +contiguous memory, or iommu mapping of certain on-chip peripherals. +Remotecore will only power up the device after all the resource table's +requirement are met. + +In addition to system resources, the resource table may also contain +resource entries that publish the existence of supported features +or configurations by the remote processor, such as trace buffers and +supported virtio devices (and their configurations). + +The resource table begins with this header:: + + /** + * struct resource_table - firmware resource table header + * @ver: version number + * @num: number of resource entries + * @reserved: reserved (must be zero) + * @offset: array of offsets pointing at the various resource entries + * + * The header of the resource table, as expressed by this structure, + * contains a version number (should we need to change this format in the + * future), the number of available resource entries, and their offsets + * in the table. + */ + struct resource_table { + u32 ver; + u32 num; + u32 reserved[2]; + u32 offset[0]; + } __packed; + +Immediately following this header are the resource entries themselves, +each of which begins with the following resource entry header:: + + /** + * struct fw_rsc_hdr - firmware resource entry header + * @type: resource type + * @data: resource data + * + * Every resource entry begins with a 'struct fw_rsc_hdr' header providing + * its @type. The content of the entry itself will immediately follow + * this header, and it should be parsed according to the resource type. + */ + struct fw_rsc_hdr { + u32 type; + u8 data[0]; + } __packed; + +Some resources entries are mere announcements, where the host is informed +of specific remoteproc configuration. Other entries require the host to +do something (e.g. allocate a system resource). Sometimes a negotiation +is expected, where the firmware requests a resource, and once allocated, +the host should provide back its details (e.g. address of an allocated +memory region). + +Here are the various resource types that are currently supported:: + + /** + * enum fw_resource_type - types of resource entries + * + * @RSC_CARVEOUT: request for allocation of a physically contiguous + * memory region. + * @RSC_DEVMEM: request to iommu_map a memory-based peripheral. + * @RSC_TRACE: announces the availability of a trace buffer into which + * the remote processor will be writing logs. + * @RSC_VDEV: declare support for a virtio device, and serve as its + * virtio header. + * @RSC_LAST: just keep this one at the end + * @RSC_VENDOR_START: start of the vendor specific resource types range + * @RSC_VENDOR_END: end of the vendor specific resource types range + * + * Please note that these values are used as indices to the rproc_handle_rsc + * lookup table, so please keep them sane. Moreover, @RSC_LAST is used to + * check the validity of an index before the lookup table is accessed, so + * please update it as needed. + */ + enum fw_resource_type { + RSC_CARVEOUT = 0, + RSC_DEVMEM = 1, + RSC_TRACE = 2, + RSC_VDEV = 3, + RSC_LAST = 4, + RSC_VENDOR_START = 128, + RSC_VENDOR_END = 512, + }; + +For more details regarding a specific resource type, please see its +dedicated structure in include/linux/remoteproc.h. + +We also expect that platform-specific resource entries will show up +at some point. When that happens, we could easily add a new RSC_PLATFORM +type, and hand those resources to the platform-specific rproc driver to handle. + +Virtio and remoteproc +===================== + +The firmware should provide remoteproc information about virtio devices +that it supports, and their configurations: a RSC_VDEV resource entry +should specify the virtio device id (as in virtio_ids.h), virtio features, +virtio config space, vrings information, etc. + +When a new remote processor is registered, the remoteproc framework +will look for its resource table and will register the virtio devices +it supports. A firmware may support any number of virtio devices, and +of any type (a single remote processor can also easily support several +rpmsg virtio devices this way, if desired). + +Of course, RSC_VDEV resource entries are only good enough for static +allocation of virtio devices. Dynamic allocations will also be made possible +using the rpmsg bus (similar to how we already do dynamic allocations of +rpmsg channels; read more about it in rpmsg.txt). diff --git a/Documentation/staging/rpmsg.rst b/Documentation/staging/rpmsg.rst new file mode 100644 index 000000000000..24b7a9e1a5f9 --- /dev/null +++ b/Documentation/staging/rpmsg.rst @@ -0,0 +1,341 @@ +============================================ +Remote Processor Messaging (rpmsg) Framework +============================================ + +.. note:: + + This document describes the rpmsg bus and how to write rpmsg drivers. + To learn how to add rpmsg support for new platforms, check out remoteproc.txt + (also a resident of Documentation/). + +Introduction +============ + +Modern SoCs typically employ heterogeneous remote processor devices in +asymmetric multiprocessing (AMP) configurations, which may be running +different instances of operating system, whether it's Linux or any other +flavor of real-time OS. + +OMAP4, for example, has dual Cortex-A9, dual Cortex-M3 and a C64x+ DSP. +Typically, the dual cortex-A9 is running Linux in a SMP configuration, +and each of the other three cores (two M3 cores and a DSP) is running +its own instance of RTOS in an AMP configuration. + +Typically AMP remote processors employ dedicated DSP codecs and multimedia +hardware accelerators, and therefore are often used to offload CPU-intensive +multimedia tasks from the main application processor. + +These remote processors could also be used to control latency-sensitive +sensors, drive random hardware blocks, or just perform background tasks +while the main CPU is idling. + +Users of those remote processors can either be userland apps (e.g. multimedia +frameworks talking with remote OMX components) or kernel drivers (controlling +hardware accessible only by the remote processor, reserving kernel-controlled +resources on behalf of the remote processor, etc..). + +Rpmsg is a virtio-based messaging bus that allows kernel drivers to communicate +with remote processors available on the system. In turn, drivers could then +expose appropriate user space interfaces, if needed. + +When writing a driver that exposes rpmsg communication to userland, please +keep in mind that remote processors might have direct access to the +system's physical memory and other sensitive hardware resources (e.g. on +OMAP4, remote cores and hardware accelerators may have direct access to the +physical memory, gpio banks, dma controllers, i2c bus, gptimers, mailbox +devices, hwspinlocks, etc..). Moreover, those remote processors might be +running RTOS where every task can access the entire memory/devices exposed +to the processor. To minimize the risks of rogue (or buggy) userland code +exploiting remote bugs, and by that taking over the system, it is often +desired to limit userland to specific rpmsg channels (see definition below) +it can send messages on, and if possible, minimize how much control +it has over the content of the messages. + +Every rpmsg device is a communication channel with a remote processor (thus +rpmsg devices are called channels). Channels are identified by a textual name +and have a local ("source") rpmsg address, and remote ("destination") rpmsg +address. + +When a driver starts listening on a channel, its rx callback is bound with +a unique rpmsg local address (a 32-bit integer). This way when inbound messages +arrive, the rpmsg core dispatches them to the appropriate driver according +to their destination address (this is done by invoking the driver's rx handler +with the payload of the inbound message). + + +User API +======== + +:: + + int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len); + +sends a message across to the remote processor on a given channel. +The caller should specify the channel, the data it wants to send, +and its length (in bytes). The message will be sent on the specified +channel, i.e. its source and destination address fields will be +set to the channel's src and dst addresses. + +In case there are no TX buffers available, the function will block until +one becomes available (i.e. until the remote processor consumes +a tx buffer and puts it back on virtio's used descriptor ring), +or a timeout of 15 seconds elapses. When the latter happens, +-ERESTARTSYS is returned. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst); + +sends a message across to the remote processor on a given channel, +to a destination address provided by the caller. + +The caller should specify the channel, the data it wants to send, +its length (in bytes), and an explicit destination address. + +The message will then be sent to the remote processor to which the +channel belongs, using the channel's src address, and the user-provided +dst address (thus the channel's dst address will be ignored). + +In case there are no TX buffers available, the function will block until +one becomes available (i.e. until the remote processor consumes +a tx buffer and puts it back on virtio's used descriptor ring), +or a timeout of 15 seconds elapses. When the latter happens, +-ERESTARTSYS is returned. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, + void *data, int len); + + +sends a message across to the remote processor, using the src and dst +addresses provided by the user. + +The caller should specify the channel, the data it wants to send, +its length (in bytes), and explicit source and destination addresses. +The message will then be sent to the remote processor to which the +channel belongs, but the channel's src and dst addresses will be +ignored (and the user-provided addresses will be used instead). + +In case there are no TX buffers available, the function will block until +one becomes available (i.e. until the remote processor consumes +a tx buffer and puts it back on virtio's used descriptor ring), +or a timeout of 15 seconds elapses. When the latter happens, +-ERESTARTSYS is returned. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len); + +sends a message across to the remote processor on a given channel. +The caller should specify the channel, the data it wants to send, +and its length (in bytes). The message will be sent on the specified +channel, i.e. its source and destination address fields will be +set to the channel's src and dst addresses. + +In case there are no TX buffers available, the function will immediately +return -ENOMEM without waiting until one becomes available. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst) + + +sends a message across to the remote processor on a given channel, +to a destination address provided by the user. + +The user should specify the channel, the data it wants to send, +its length (in bytes), and an explicit destination address. + +The message will then be sent to the remote processor to which the +channel belongs, using the channel's src address, and the user-provided +dst address (thus the channel's dst address will be ignored). + +In case there are no TX buffers available, the function will immediately +return -ENOMEM without waiting until one becomes available. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst, + void *data, int len); + + +sends a message across to the remote processor, using source and +destination addresses provided by the user. + +The user should specify the channel, the data it wants to send, +its length (in bytes), and explicit source and destination addresses. +The message will then be sent to the remote processor to which the +channel belongs, but the channel's src and dst addresses will be +ignored (and the user-provided addresses will be used instead). + +In case there are no TX buffers available, the function will immediately +return -ENOMEM without waiting until one becomes available. + +The function can only be called from a process context (for now). +Returns 0 on success and an appropriate error value on failure. + +:: + + struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev, + void (*cb)(struct rpmsg_channel *, void *, int, void *, u32), + void *priv, u32 addr); + +every rpmsg address in the system is bound to an rx callback (so when +inbound messages arrive, they are dispatched by the rpmsg bus using the +appropriate callback handler) by means of an rpmsg_endpoint struct. + +This function allows drivers to create such an endpoint, and by that, +bind a callback, and possibly some private data too, to an rpmsg address +(either one that is known in advance, or one that will be dynamically +assigned for them). + +Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint +is already created for them when they are probed by the rpmsg bus +(using the rx callback they provide when they registered to the rpmsg bus). + +So things should just work for simple drivers: they already have an +endpoint, their rx callback is bound to their rpmsg address, and when +relevant inbound messages arrive (i.e. messages which their dst address +equals to the src address of their rpmsg channel), the driver's handler +is invoked to process it. + +That said, more complicated drivers might do need to allocate +additional rpmsg addresses, and bind them to different rx callbacks. +To accomplish that, those drivers need to call this function. +Drivers should provide their channel (so the new endpoint would bind +to the same remote processor their channel belongs to), an rx callback +function, an optional private data (which is provided back when the +rx callback is invoked), and an address they want to bind with the +callback. If addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will +dynamically assign them an available rpmsg address (drivers should have +a very good reason why not to always use RPMSG_ADDR_ANY here). + +Returns a pointer to the endpoint on success, or NULL on error. + +:: + + void rpmsg_destroy_ept(struct rpmsg_endpoint *ept); + + +destroys an existing rpmsg endpoint. user should provide a pointer +to an rpmsg endpoint that was previously created with rpmsg_create_ept(). + +:: + + int register_rpmsg_driver(struct rpmsg_driver *rpdrv); + + +registers an rpmsg driver with the rpmsg bus. user should provide +a pointer to an rpmsg_driver struct, which contains the driver's +->probe() and ->remove() functions, an rx callback, and an id_table +specifying the names of the channels this driver is interested to +be probed with. + +:: + + void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv); + + +unregisters an rpmsg driver from the rpmsg bus. user should provide +a pointer to a previously-registered rpmsg_driver struct. +Returns 0 on success, and an appropriate error value on failure. + + +Typical usage +============= + +The following is a simple rpmsg driver, that sends an "hello!" message +on probe(), and whenever it receives an incoming message, it dumps its +content to the console. + +:: + + #include + #include + #include + + static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len, + void *priv, u32 src) + { + print_hex_dump(KERN_INFO, "incoming message:", DUMP_PREFIX_NONE, + 16, 1, data, len, true); + } + + static int rpmsg_sample_probe(struct rpmsg_channel *rpdev) + { + int err; + + dev_info(&rpdev->dev, "chnl: 0x%x -> 0x%x\n", rpdev->src, rpdev->dst); + + /* send a message on our channel */ + err = rpmsg_send(rpdev, "hello!", 6); + if (err) { + pr_err("rpmsg_send failed: %d\n", err); + return err; + } + + return 0; + } + + static void rpmsg_sample_remove(struct rpmsg_channel *rpdev) + { + dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n"); + } + + static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = { + { .name = "rpmsg-client-sample" }, + { }, + }; + MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_sample_id_table); + + static struct rpmsg_driver rpmsg_sample_client = { + .drv.name = KBUILD_MODNAME, + .id_table = rpmsg_driver_sample_id_table, + .probe = rpmsg_sample_probe, + .callback = rpmsg_sample_cb, + .remove = rpmsg_sample_remove, + }; + module_rpmsg_driver(rpmsg_sample_client); + +.. note:: + + a similar sample which can be built and loaded can be found + in samples/rpmsg/. + +Allocations of rpmsg channels +============================= + +At this point we only support dynamic allocations of rpmsg channels. + +This is possible only with remote processors that have the VIRTIO_RPMSG_F_NS +virtio device feature set. This feature bit means that the remote +processor supports dynamic name service announcement messages. + +When this feature is enabled, creation of rpmsg devices (i.e. channels) +is completely dynamic: the remote processor announces the existence of a +remote rpmsg service by sending a name service message (which contains +the name and rpmsg addr of the remote service, see struct rpmsg_ns_msg). + +This message is then handled by the rpmsg bus, which in turn dynamically +creates and registers an rpmsg channel (which represents the remote service). +If/when a relevant rpmsg driver is registered, it will be immediately probed +by the bus, and can then start sending messages to the remote service. + +The plan is also to add static creation of rpmsg channels via the virtio +config space, but it's not implemented yet. diff --git a/Documentation/staging/speculation.rst b/Documentation/staging/speculation.rst new file mode 100644 index 000000000000..8045d99bcf12 --- /dev/null +++ b/Documentation/staging/speculation.rst @@ -0,0 +1,92 @@ +=========== +Speculation +=========== + +This document explains potential effects of speculation, and how undesirable +effects can be mitigated portably using common APIs. + +------------------------------------------------------------------------------ + +To improve performance and minimize average latencies, many contemporary CPUs +employ speculative execution techniques such as branch prediction, performing +work which may be discarded at a later stage. + +Typically speculative execution cannot be observed from architectural state, +such as the contents of registers. However, in some cases it is possible to +observe its impact on microarchitectural state, such as the presence or +absence of data in caches. Such state may form side-channels which can be +observed to extract secret information. + +For example, in the presence of branch prediction, it is possible for bounds +checks to be ignored by code which is speculatively executed. Consider the +following code:: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else + return array[index]; + } + +Which, on arm64, may be compiled to an assembly sequence such as:: + + CMP , #MAX_ARRAY_ELEMS + B.LT less + MOV , #0 + RET + less: + LDR , [, ] + RET + +It is possible that a CPU mis-predicts the conditional branch, and +speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This +value will subsequently be discarded, but the speculated load may affect +microarchitectural state which can be subsequently measured. + +More complex sequences involving multiple dependent memory accesses may +result in sensitive information being leaked. Consider the following +code, building on the prior example:: + + int load_dependent_arrays(int *arr1, int *arr2, int index) + { + int val1, val2, + + val1 = load_array(arr1, index); + val2 = load_array(arr2, val1); + + return val2; + } + +Under speculation, the first call to load_array() may return the value +of an out-of-bounds address, while the second call will influence +microarchitectural state dependent on this value. This may provide an +arbitrary read primitive. + +==================================== +Mitigating speculation side-channels +==================================== + +The kernel provides a generic API to ensure that bounds checks are +respected even under speculation. Architectures which are affected by +speculation-based side-channels are expected to implement these +primitives. + +The array_index_nospec() helper in can be used to +prevent information from being leaked via side-channels. + +A call to array_index_nospec(index, size) returns a sanitized index +value that is bounded to [0, size) even under cpu speculation +conditions. + +This can be used to protect the earlier load_array() example:: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else { + index = array_index_nospec(index, MAX_ARRAY_ELEMS); + return array[index]; + } + } diff --git a/Documentation/staging/static-keys.rst b/Documentation/staging/static-keys.rst new file mode 100644 index 000000000000..38290b9f25eb --- /dev/null +++ b/Documentation/staging/static-keys.rst @@ -0,0 +1,331 @@ +=========== +Static Keys +=========== + +.. warning:: + + DEPRECATED API: + + The use of 'struct static_key' directly, is now DEPRECATED. In addition + static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:: + + struct static_key false = STATIC_KEY_INIT_FALSE; + struct static_key true = STATIC_KEY_INIT_TRUE; + static_key_true() + static_key_false() + + The updated API replacements are:: + + DEFINE_STATIC_KEY_TRUE(key); + DEFINE_STATIC_KEY_FALSE(key); + DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); + DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); + static_branch_likely() + static_branch_unlikely() + +Abstract +======== + +Static keys allows the inclusion of seldom used features in +performance-sensitive fast-path kernel code, via a GCC feature and a code +patching technique. A quick example:: + + DEFINE_STATIC_KEY_FALSE(key); + + ... + + if (static_branch_unlikely(&key)) + do unlikely code + else + do likely code + + ... + static_branch_enable(&key); + ... + static_branch_disable(&key); + ... + +The static_branch_unlikely() branch will be generated into the code with as little +impact to the likely code path as possible. + + +Motivation +========== + + +Currently, tracepoints are implemented using a conditional branch. The +conditional check requires checking a global variable for each tracepoint. +Although the overhead of this check is small, it increases when the memory +cache comes under pressure (memory cache lines for these global variables may +be shared with other memory accesses). As we increase the number of tracepoints +in the kernel this overhead may become more of an issue. In addition, +tracepoints are often dormant (disabled) and provide no direct kernel +functionality. Thus, it is highly desirable to reduce their impact as much as +possible. Although tracepoints are the original motivation for this work, other +kernel code paths should be able to make use of the static keys facility. + + +Solution +======== + + +gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label: + +https://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html + +Using the 'asm goto', we can create branches that are either taken or not taken +by default, without the need to check memory. Then, at run-time, we can patch +the branch site to change the branch direction. + +For example, if we have a simple branch that is disabled by default:: + + if (static_branch_unlikely(&key)) + printk("I am the true branch\n"); + +Thus, by default the 'printk' will not be emitted. And the code generated will +consist of a single atomic 'no-op' instruction (5 bytes on x86), in the +straight-line code path. When the branch is 'flipped', we will patch the +'no-op' in the straight-line codepath with a 'jump' instruction to the +out-of-line true branch. Thus, changing branch direction is expensive but +branch selection is basically 'free'. That is the basic tradeoff of this +optimization. + +This lowlevel patching mechanism is called 'jump label patching', and it gives +the basis for the static keys facility. + +Static key label API, usage and examples +======================================== + + +In order to make use of this optimization you must first define a key:: + + DEFINE_STATIC_KEY_TRUE(key); + +or:: + + DEFINE_STATIC_KEY_FALSE(key); + + +The key must be global, that is, it can't be allocated on the stack or dynamically +allocated at run-time. + +The key is then used in code as:: + + if (static_branch_unlikely(&key)) + do unlikely code + else + do likely code + +Or:: + + if (static_branch_likely(&key)) + do likely code + else + do unlikely code + +Keys defined via DEFINE_STATIC_KEY_TRUE(), or DEFINE_STATIC_KEY_FALSE, may +be used in either static_branch_likely() or static_branch_unlikely() +statements. + +Branch(es) can be set true via:: + + static_branch_enable(&key); + +or false via:: + + static_branch_disable(&key); + +The branch(es) can then be switched via reference counts:: + + static_branch_inc(&key); + ... + static_branch_dec(&key); + +Thus, 'static_branch_inc()' means 'make the branch true', and +'static_branch_dec()' means 'make the branch false' with appropriate +reference counting. For example, if the key is initialized true, a +static_branch_dec(), will switch the branch to false. And a subsequent +static_branch_inc(), will change the branch back to true. Likewise, if the +key is initialized false, a 'static_branch_inc()', will change the branch to +true. And then a 'static_branch_dec()', will again make the branch false. + +The state and the reference count can be retrieved with 'static_key_enabled()' +and 'static_key_count()'. In general, if you use these functions, they +should be protected with the same mutex used around the enable/disable +or increment/decrement function. + +Note that switching branches results in some locks being taken, +particularly the CPU hotplug lock (in order to avoid races against +CPUs being brought in the kernel while the kernel is getting +patched). Calling the static key API from within a hotplug notifier is +thus a sure deadlock recipe. In order to still allow use of the +functionality, the following functions are provided: + + static_key_enable_cpuslocked() + static_key_disable_cpuslocked() + static_branch_enable_cpuslocked() + static_branch_disable_cpuslocked() + +These functions are *not* general purpose, and must only be used when +you really know that you're in the above context, and no other. + +Where an array of keys is required, it can be defined as:: + + DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); + +or:: + + DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); + +4) Architecture level code patching interface, 'jump labels' + + +There are a few functions and macros that architectures must implement in order +to take advantage of this optimization. If there is no architecture support, we +simply fall back to a traditional, load, test, and jump sequence. Also, the +struct jump_entry table must be at least 4-byte aligned because the +static_key->entry field makes use of the two least significant bits. + +* ``select HAVE_ARCH_JUMP_LABEL``, + see: arch/x86/Kconfig + +* ``#define JUMP_LABEL_NOP_SIZE``, + see: arch/x86/include/asm/jump_label.h + +* ``__always_inline bool arch_static_branch(struct static_key *key, bool branch)``, + see: arch/x86/include/asm/jump_label.h + +* ``__always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)``, + see: arch/x86/include/asm/jump_label.h + +* ``void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type)``, + see: arch/x86/kernel/jump_label.c + +* ``__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type)``, + see: arch/x86/kernel/jump_label.c + +* ``struct jump_entry``, + see: arch/x86/include/asm/jump_label.h + + +5) Static keys / jump label analysis, results (x86_64): + + +As an example, let's add the following branch to 'getppid()', such that the +system call now looks like:: + + SYSCALL_DEFINE0(getppid) + { + int pid; + + + if (static_branch_unlikely(&key)) + + printk("I am the true branch\n"); + + rcu_read_lock(); + pid = task_tgid_vnr(rcu_dereference(current->real_parent)); + rcu_read_unlock(); + + return pid; + } + +The resulting instructions with jump labels generated by GCC is:: + + ffffffff81044290 : + ffffffff81044290: 55 push %rbp + ffffffff81044291: 48 89 e5 mov %rsp,%rbp + ffffffff81044294: e9 00 00 00 00 jmpq ffffffff81044299 + ffffffff81044299: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax + ffffffff810442a0: 00 00 + ffffffff810442a2: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax + ffffffff810442a9: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax + ffffffff810442b0: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi + ffffffff810442b7: e8 f4 d9 00 00 callq ffffffff81051cb0 + ffffffff810442bc: 5d pop %rbp + ffffffff810442bd: 48 98 cltq + ffffffff810442bf: c3 retq + ffffffff810442c0: 48 c7 c7 e3 54 98 81 mov $0xffffffff819854e3,%rdi + ffffffff810442c7: 31 c0 xor %eax,%eax + ffffffff810442c9: e8 71 13 6d 00 callq ffffffff8171563f + ffffffff810442ce: eb c9 jmp ffffffff81044299 + +Without the jump label optimization it looks like:: + + ffffffff810441f0 : + ffffffff810441f0: 8b 05 8a 52 d8 00 mov 0xd8528a(%rip),%eax # ffffffff81dc9480 + ffffffff810441f6: 55 push %rbp + ffffffff810441f7: 48 89 e5 mov %rsp,%rbp + ffffffff810441fa: 85 c0 test %eax,%eax + ffffffff810441fc: 75 27 jne ffffffff81044225 + ffffffff810441fe: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax + ffffffff81044205: 00 00 + ffffffff81044207: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax + ffffffff8104420e: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax + ffffffff81044215: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi + ffffffff8104421c: e8 2f da 00 00 callq ffffffff81051c50 + ffffffff81044221: 5d pop %rbp + ffffffff81044222: 48 98 cltq + ffffffff81044224: c3 retq + ffffffff81044225: 48 c7 c7 13 53 98 81 mov $0xffffffff81985313,%rdi + ffffffff8104422c: 31 c0 xor %eax,%eax + ffffffff8104422e: e8 60 0f 6d 00 callq ffffffff81715193 + ffffffff81044233: eb c9 jmp ffffffff810441fe + ffffffff81044235: 66 66 2e 0f 1f 84 00 data32 nopw %cs:0x0(%rax,%rax,1) + ffffffff8104423c: 00 00 00 00 + +Thus, the disable jump label case adds a 'mov', 'test' and 'jne' instruction +vs. the jump label case just has a 'no-op' or 'jmp 0'. (The jmp 0, is patched +to a 5 byte atomic no-op instruction at boot-time.) Thus, the disabled jump +label case adds:: + + 6 (mov) + 2 (test) + 2 (jne) = 10 - 5 (5 byte jump 0) = 5 addition bytes. + +If we then include the padding bytes, the jump label code saves, 16 total bytes +of instruction memory for this small function. In this case the non-jump label +function is 80 bytes long. Thus, we have saved 20% of the instruction +footprint. We can in fact improve this even further, since the 5-byte no-op +really can be a 2-byte no-op since we can reach the branch with a 2-byte jmp. +However, we have not yet implemented optimal no-op sizes (they are currently +hard-coded). + +Since there are a number of static key API uses in the scheduler paths, +'pipe-test' (also known as 'perf bench sched pipe') can be used to show the +performance improvement. Testing done on 3.3.0-rc2: + +jump label disabled:: + + Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): + + 855.700314 task-clock # 0.534 CPUs utilized ( +- 0.11% ) + 200,003 context-switches # 0.234 M/sec ( +- 0.00% ) + 0 CPU-migrations # 0.000 M/sec ( +- 39.58% ) + 487 page-faults # 0.001 M/sec ( +- 0.02% ) + 1,474,374,262 cycles # 1.723 GHz ( +- 0.17% ) + stalled-cycles-frontend + stalled-cycles-backend + 1,178,049,567 instructions # 0.80 insns per cycle ( +- 0.06% ) + 208,368,926 branches # 243.507 M/sec ( +- 0.06% ) + 5,569,188 branch-misses # 2.67% of all branches ( +- 0.54% ) + + 1.601607384 seconds time elapsed ( +- 0.07% ) + +jump label enabled:: + + Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): + + 841.043185 task-clock # 0.533 CPUs utilized ( +- 0.12% ) + 200,004 context-switches # 0.238 M/sec ( +- 0.00% ) + 0 CPU-migrations # 0.000 M/sec ( +- 40.87% ) + 487 page-faults # 0.001 M/sec ( +- 0.05% ) + 1,432,559,428 cycles # 1.703 GHz ( +- 0.18% ) + stalled-cycles-frontend + stalled-cycles-backend + 1,175,363,994 instructions # 0.82 insns per cycle ( +- 0.04% ) + 206,859,359 branches # 245.956 M/sec ( +- 0.04% ) + 4,884,119 branch-misses # 2.36% of all branches ( +- 0.85% ) + + 1.579384366 seconds time elapsed + +The percentage of saved branches is .7%, and we've saved 12% on +'branch-misses'. This is where we would expect to get the most savings, since +this optimization is about reducing the number of branches. In addition, we've +saved .2% on instructions, and 2.8% on cycles and 1.4% on elapsed time. diff --git a/Documentation/staging/tee.rst b/Documentation/staging/tee.rst new file mode 100644 index 000000000000..62e8ba64d04f --- /dev/null +++ b/Documentation/staging/tee.rst @@ -0,0 +1,277 @@ +============= +TEE subsystem +============= + +This document describes the TEE subsystem in Linux. + +A TEE (Trusted Execution Environment) is a trusted OS running in some +secure environment, for example, TrustZone on ARM CPUs, or a separate +secure co-processor etc. A TEE driver handles the details needed to +communicate with the TEE. + +This subsystem deals with: + +- Registration of TEE drivers + +- Managing shared memory between Linux and the TEE + +- Providing a generic API to the TEE + +The TEE interface +================= + +include/uapi/linux/tee.h defines the generic interface to a TEE. + +User space (the client) connects to the driver by opening /dev/tee[0-9]* or +/dev/teepriv[0-9]*. + +- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor + which user space can mmap. When user space doesn't need the file + descriptor any more, it should be closed. When shared memory isn't needed + any longer it should be unmapped with munmap() to allow the reuse of + memory. + +- TEE_IOC_VERSION lets user space know which TEE this driver handles and + its capabilities. + +- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application. + +- TEE_IOC_INVOKE invokes a function in a Trusted Application. + +- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE. + +- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application. + +There are two classes of clients, normal clients and supplicants. The latter is +a helper process for the TEE to access resources in Linux, for example file +system access. A normal client opens /dev/tee[0-9]* and a supplicant opens +/dev/teepriv[0-9]. + +Much of the communication between clients and the TEE is opaque to the +driver. The main job for the driver is to receive requests from the +clients, forward them to the TEE and send back the results. In the case of +supplicants the communication goes in the other direction, the TEE sends +requests to the supplicant which then sends back the result. + +The TEE kernel interface +======================== + +Kernel provides a TEE bus infrastructure where a Trusted Application is +represented as a device identified via Universally Unique Identifier (UUID) and +client drivers register a table of supported device UUIDs. + +TEE bus infrastructure registers following APIs: +- match(): iterates over the client driver UUID table to find a corresponding + match for device UUID. If a match is found, then this particular device is + probed via corresponding probe API registered by the client driver. This + process happens whenever a device or a client driver is registered with TEE + bus. +- uevent(): notifies user-space (udev) whenever a new device is registered on + TEE bus for auto-loading of modularized client drivers. + +TEE bus device enumeration is specific to underlying TEE implementation, so it +is left open for TEE drivers to provide corresponding implementation. + +Then TEE client driver can talk to a matched Trusted Application using APIs +listed in include/linux/tee_drv.h. + +TEE client driver example +------------------------- + +Suppose a TEE client driver needs to communicate with a Trusted Application +having UUID: ``ac6a4085-0e82-4c33-bf98-8eb8e118b6c2``, so driver registration +snippet would look like:: + + static const struct tee_client_device_id client_id_table[] = { + {UUID_INIT(0xac6a4085, 0x0e82, 0x4c33, + 0xbf, 0x98, 0x8e, 0xb8, 0xe1, 0x18, 0xb6, 0xc2)}, + {} + }; + + MODULE_DEVICE_TABLE(tee, client_id_table); + + static struct tee_client_driver client_driver = { + .id_table = client_id_table, + .driver = { + .name = DRIVER_NAME, + .bus = &tee_bus_type, + .probe = client_probe, + .remove = client_remove, + }, + }; + + static int __init client_init(void) + { + return driver_register(&client_driver.driver); + } + + static void __exit client_exit(void) + { + driver_unregister(&client_driver.driver); + } + + module_init(client_init); + module_exit(client_exit); + +OP-TEE driver +============= + +The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM +TrustZone based OP-TEE solution that is supported. + +Lowest level of communication with OP-TEE builds on ARM SMC Calling +Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface +[3] used internally by the driver. Stacked on top of that is OP-TEE Message +Protocol [4]. + +OP-TEE SMC interface provides the basic functions required by SMCCC and some +additional functions specific for OP-TEE. The most interesting functions are: + +- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information + which is then returned by TEE_IOC_VERSION + +- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used + to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a + separate secure co-processor. + +- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol + +- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory + range to used for shared memory between Linux and OP-TEE. + +The GlobalPlatform TEE Client API [5] is implemented on top of the generic +TEE API. + +Picture of the relationship between the different components in the +OP-TEE architecture:: + + User space Kernel Secure world + ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~ + +--------+ +-------------+ + | Client | | Trusted | + +--------+ | Application | + /\ +-------------+ + || +----------+ /\ + || |tee- | || + || |supplicant| \/ + || +----------+ +-------------+ + \/ /\ | TEE Internal| + +-------+ || | API | + + TEE | || +--------+--------+ +-------------+ + | Client| || | TEE | OP-TEE | | OP-TEE | + | API | \/ | subsys | driver | | Trusted OS | + +-------+----------------+----+-------+----+-----------+-------------+ + | Generic TEE API | | OP-TEE MSG | + | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) | + +-----------------------------+ +------------------------------+ + +RPC (Remote Procedure Call) are requests from secure world to kernel driver +or tee-supplicant. An RPC is identified by a special range of SMCCC return +values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the +kernel are handled by the kernel driver. Other RPC messages will be forwarded to +tee-supplicant without further involvement of the driver, except switching +shared memory buffer representation. + +OP-TEE device enumeration +------------------------- + +OP-TEE provides a pseudo Trusted Application: drivers/tee/optee/device.c in +order to support device enumeration. In other words, OP-TEE driver invokes this +application to retrieve a list of Trusted Applications which can be registered +as devices on the TEE bus. + +AMD-TEE driver +============== + +The AMD-TEE driver handles the communication with AMD's TEE environment. The +TEE environment is provided by AMD Secure Processor. + +The AMD Secure Processor (formerly called Platform Security Processor or PSP) +is a dedicated processor that features ARM TrustZone technology, along with a +software-based Trusted Execution Environment (TEE) designed to enable +third-party Trusted Applications. This feature is currently enabled only for +APUs. + +The following picture shows a high level overview of AMD-TEE:: + + | + x86 | + | + User space (Kernel space) | AMD Secure Processor (PSP) + ~~~~~~~~~~ ~~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~~~ + | + +--------+ | +-------------+ + | Client | | | Trusted | + +--------+ | | Application | + /\ | +-------------+ + || | /\ + || | || + || | \/ + || | +----------+ + || | | TEE | + || | | Internal | + \/ | | API | + +---------+ +-----------+---------+ +----------+ + | TEE | | TEE | AMD-TEE | | AMD-TEE | + | Client | | subsystem | driver | | Trusted | + | API | | | | | OS | + +---------+-----------+----+------+---------+---------+----------+ + | Generic TEE API | | ASP | Mailbox | + | IOCTL (TEE_IOC_*) | | driver | Register Protocol | + +--------------------------+ +---------+--------------------+ + +At the lowest level (in x86), the AMD Secure Processor (ASP) driver uses the +CPU to PSP mailbox regsister to submit commands to the PSP. The format of the +command buffer is opaque to the ASP driver. It's role is to submit commands to +the secure processor and return results to AMD-TEE driver. The interface +between AMD-TEE driver and AMD Secure Processor driver can be found in [6]. + +The AMD-TEE driver packages the command buffer payload for processing in TEE. +The command buffer format for the different TEE commands can be found in [7]. + +The TEE commands supported by AMD-TEE Trusted OS are: + +* TEE_CMD_ID_LOAD_TA - loads a Trusted Application (TA) binary into + TEE environment. +* TEE_CMD_ID_UNLOAD_TA - unloads TA binary from TEE environment. +* TEE_CMD_ID_OPEN_SESSION - opens a session with a loaded TA. +* TEE_CMD_ID_CLOSE_SESSION - closes session with loaded TA +* TEE_CMD_ID_INVOKE_CMD - invokes a command with loaded TA +* TEE_CMD_ID_MAP_SHARED_MEM - maps shared memory +* TEE_CMD_ID_UNMAP_SHARED_MEM - unmaps shared memory + +AMD-TEE Trusted OS is the firmware running on AMD Secure Processor. + +The AMD-TEE driver registers itself with TEE subsystem and implements the +following driver function callbacks: + +* get_version - returns the driver implementation id and capability. +* open - sets up the driver context data structure. +* release - frees up driver resources. +* open_session - loads the TA binary and opens session with loaded TA. +* close_session - closes session with loaded TA and unloads it. +* invoke_func - invokes a command with loaded TA. + +cancel_req driver callback is not supported by AMD-TEE. + +The GlobalPlatform TEE Client API [5] can be used by the user space (client) to +talk to AMD's TEE. AMD's TEE provides a secure environment for loading, opening +a session, invoking commands and clossing session with TA. + +References +========== + +[1] https://github.com/OP-TEE/optee_os + +[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + +[3] drivers/tee/optee/optee_smc.h + +[4] drivers/tee/optee/optee_msg.h + +[5] http://www.globalplatform.org/specificationsdevice.asp look for + "TEE Client API Specification v1.0" and click download. + +[6] include/linux/psp-tee.h + +[7] drivers/tee/amdtee/amdtee_if.h diff --git a/Documentation/staging/xz.rst b/Documentation/staging/xz.rst new file mode 100644 index 000000000000..b2f5ff12a161 --- /dev/null +++ b/Documentation/staging/xz.rst @@ -0,0 +1,127 @@ +============================ +XZ data compression in Linux +============================ + +Introduction +============ + +XZ is a general purpose data compression format with high compression +ratio and relatively fast decompression. The primary compression +algorithm (filter) is LZMA2. Additional filters can be used to improve +compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters +improve compression ratio of executable data. + +The XZ decompressor in Linux is called XZ Embedded. It supports +the LZMA2 filter and optionally also BCJ filters. CRC32 is supported +for integrity checking. The home page of XZ Embedded is at +, where you can find the +latest version and also information about using the code outside +the Linux kernel. + +For userspace, XZ Utils provide a zlib-like compression library +and a gzip-like command line tool. XZ Utils can be downloaded from +. + +XZ related components in the kernel +=================================== + +The xz_dec module provides XZ decompressor with single-call (buffer +to buffer) and multi-call (stateful) APIs. The usage of the xz_dec +module is documented in include/linux/xz.h. + +The xz_dec_test module is for testing xz_dec. xz_dec_test is not +useful unless you are hacking the XZ decompressor. xz_dec_test +allocates a char device major dynamically to which one can write +.xz files from userspace. The decompressed output is thrown away. +Keep an eye on dmesg to see diagnostics printed by xz_dec_test. +See the xz_dec_test source code for the details. + +For decompressing the kernel image, initramfs, and initrd, there +is a wrapper function in lib/decompress_unxz.c. Its API is the +same as in other decompress_*.c files, which is defined in +include/linux/decompress/generic.h. + +scripts/xz_wrap.sh is a wrapper for the xz command line tool found +from XZ Utils. The wrapper sets compression options to values suitable +for compressing the kernel image. + +For kernel makefiles, two commands are provided for use with +$(call if_needed). The kernel image should be compressed with +$(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2 +dictionary. It will also append a four-byte trailer containing the +uncompressed size of the file, which is needed by the boot code. +Other things should be compressed with $(call if_needed,xzmisc) +which will use no BCJ filter and 1 MiB LZMA2 dictionary. + +Notes on compression options +============================ + +Since the XZ Embedded supports only streams with no integrity check or +CRC32, make sure that you don't use some other integrity check type +when encoding files that are supposed to be decoded by the kernel. With +liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32 +when encoding. With the xz command line tool, use --check=none or +--check=crc32. + +Using CRC32 is strongly recommended unless there is some other layer +which will verify the integrity of the uncompressed data anyway. +Double checking the integrity would probably be waste of CPU cycles. +Note that the headers will always have a CRC32 which will be validated +by the decoder; you can only change the integrity check type (or +disable it) for the actual uncompressed data. + +In userspace, LZMA2 is typically used with dictionary sizes of several +megabytes. The decoder needs to have the dictionary in RAM, thus big +dictionaries cannot be used for files that are intended to be decoded +by the kernel. 1 MiB is probably the maximum reasonable dictionary +size for in-kernel use (maybe more is OK for initramfs). The presets +in XZ Utils may not be optimal when creating files for the kernel, +so don't hesitate to use custom settings. Example:: + + xz --check=crc32 --lzma2=dict=512KiB inputfile + +An exception to above dictionary size limitation is when the decoder +is used in single-call mode. Decompressing the kernel itself is an +example of this situation. In single-call mode, the memory usage +doesn't depend on the dictionary size, and it is perfectly fine to +use a big dictionary: for maximum compression, the dictionary should +be at least as big as the uncompressed data itself. + +Future plans +============ + +Creating a limited XZ encoder may be considered if people think it is +useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at +the fastest settings, so it isn't clear if LZMA2 encoder is wanted +into the kernel. + +Support for limited random-access reading is planned for the +decompression code. I don't know if it could have any use in the +kernel, but I know that it would be useful in some embedded projects +outside the Linux kernel. + +Conformance to the .xz file format specification +================================================ + +There are a couple of corner cases where things have been simplified +at expense of detecting errors as early as possible. These should not +matter in practice all, since they don't cause security issues. But +it is good to know this if testing the code e.g. with the test files +from XZ Utils. + +Reporting bugs +============== + +Before reporting a bug, please check that it's not fixed already +at upstream. See to get the +latest code. + +Report bugs to or visit #tukaani on +Freenode and talk to Larhzu. I don't actively read LKML or other +kernel-related mailing lists, so if there's something I should know, +you should email to me personally or use IRC. + +Don't bother Igor Pavlov with questions about the XZ implementation +in the kernel or about XZ Utils. While these two implementations +include essential code that is directly based on Igor Pavlov's code, +these implementations aren't maintained nor supported by him. diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt deleted file mode 100644 index 38290b9f25eb..000000000000 --- a/Documentation/static-keys.txt +++ /dev/null @@ -1,331 +0,0 @@ -=========== -Static Keys -=========== - -.. warning:: - - DEPRECATED API: - - The use of 'struct static_key' directly, is now DEPRECATED. In addition - static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:: - - struct static_key false = STATIC_KEY_INIT_FALSE; - struct static_key true = STATIC_KEY_INIT_TRUE; - static_key_true() - static_key_false() - - The updated API replacements are:: - - DEFINE_STATIC_KEY_TRUE(key); - DEFINE_STATIC_KEY_FALSE(key); - DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); - DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); - static_branch_likely() - static_branch_unlikely() - -Abstract -======== - -Static keys allows the inclusion of seldom used features in -performance-sensitive fast-path kernel code, via a GCC feature and a code -patching technique. A quick example:: - - DEFINE_STATIC_KEY_FALSE(key); - - ... - - if (static_branch_unlikely(&key)) - do unlikely code - else - do likely code - - ... - static_branch_enable(&key); - ... - static_branch_disable(&key); - ... - -The static_branch_unlikely() branch will be generated into the code with as little -impact to the likely code path as possible. - - -Motivation -========== - - -Currently, tracepoints are implemented using a conditional branch. The -conditional check requires checking a global variable for each tracepoint. -Although the overhead of this check is small, it increases when the memory -cache comes under pressure (memory cache lines for these global variables may -be shared with other memory accesses). As we increase the number of tracepoints -in the kernel this overhead may become more of an issue. In addition, -tracepoints are often dormant (disabled) and provide no direct kernel -functionality. Thus, it is highly desirable to reduce their impact as much as -possible. Although tracepoints are the original motivation for this work, other -kernel code paths should be able to make use of the static keys facility. - - -Solution -======== - - -gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label: - -https://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html - -Using the 'asm goto', we can create branches that are either taken or not taken -by default, without the need to check memory. Then, at run-time, we can patch -the branch site to change the branch direction. - -For example, if we have a simple branch that is disabled by default:: - - if (static_branch_unlikely(&key)) - printk("I am the true branch\n"); - -Thus, by default the 'printk' will not be emitted. And the code generated will -consist of a single atomic 'no-op' instruction (5 bytes on x86), in the -straight-line code path. When the branch is 'flipped', we will patch the -'no-op' in the straight-line codepath with a 'jump' instruction to the -out-of-line true branch. Thus, changing branch direction is expensive but -branch selection is basically 'free'. That is the basic tradeoff of this -optimization. - -This lowlevel patching mechanism is called 'jump label patching', and it gives -the basis for the static keys facility. - -Static key label API, usage and examples -======================================== - - -In order to make use of this optimization you must first define a key:: - - DEFINE_STATIC_KEY_TRUE(key); - -or:: - - DEFINE_STATIC_KEY_FALSE(key); - - -The key must be global, that is, it can't be allocated on the stack or dynamically -allocated at run-time. - -The key is then used in code as:: - - if (static_branch_unlikely(&key)) - do unlikely code - else - do likely code - -Or:: - - if (static_branch_likely(&key)) - do likely code - else - do unlikely code - -Keys defined via DEFINE_STATIC_KEY_TRUE(), or DEFINE_STATIC_KEY_FALSE, may -be used in either static_branch_likely() or static_branch_unlikely() -statements. - -Branch(es) can be set true via:: - - static_branch_enable(&key); - -or false via:: - - static_branch_disable(&key); - -The branch(es) can then be switched via reference counts:: - - static_branch_inc(&key); - ... - static_branch_dec(&key); - -Thus, 'static_branch_inc()' means 'make the branch true', and -'static_branch_dec()' means 'make the branch false' with appropriate -reference counting. For example, if the key is initialized true, a -static_branch_dec(), will switch the branch to false. And a subsequent -static_branch_inc(), will change the branch back to true. Likewise, if the -key is initialized false, a 'static_branch_inc()', will change the branch to -true. And then a 'static_branch_dec()', will again make the branch false. - -The state and the reference count can be retrieved with 'static_key_enabled()' -and 'static_key_count()'. In general, if you use these functions, they -should be protected with the same mutex used around the enable/disable -or increment/decrement function. - -Note that switching branches results in some locks being taken, -particularly the CPU hotplug lock (in order to avoid races against -CPUs being brought in the kernel while the kernel is getting -patched). Calling the static key API from within a hotplug notifier is -thus a sure deadlock recipe. In order to still allow use of the -functionality, the following functions are provided: - - static_key_enable_cpuslocked() - static_key_disable_cpuslocked() - static_branch_enable_cpuslocked() - static_branch_disable_cpuslocked() - -These functions are *not* general purpose, and must only be used when -you really know that you're in the above context, and no other. - -Where an array of keys is required, it can be defined as:: - - DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); - -or:: - - DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); - -4) Architecture level code patching interface, 'jump labels' - - -There are a few functions and macros that architectures must implement in order -to take advantage of this optimization. If there is no architecture support, we -simply fall back to a traditional, load, test, and jump sequence. Also, the -struct jump_entry table must be at least 4-byte aligned because the -static_key->entry field makes use of the two least significant bits. - -* ``select HAVE_ARCH_JUMP_LABEL``, - see: arch/x86/Kconfig - -* ``#define JUMP_LABEL_NOP_SIZE``, - see: arch/x86/include/asm/jump_label.h - -* ``__always_inline bool arch_static_branch(struct static_key *key, bool branch)``, - see: arch/x86/include/asm/jump_label.h - -* ``__always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)``, - see: arch/x86/include/asm/jump_label.h - -* ``void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type)``, - see: arch/x86/kernel/jump_label.c - -* ``__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type)``, - see: arch/x86/kernel/jump_label.c - -* ``struct jump_entry``, - see: arch/x86/include/asm/jump_label.h - - -5) Static keys / jump label analysis, results (x86_64): - - -As an example, let's add the following branch to 'getppid()', such that the -system call now looks like:: - - SYSCALL_DEFINE0(getppid) - { - int pid; - - + if (static_branch_unlikely(&key)) - + printk("I am the true branch\n"); - - rcu_read_lock(); - pid = task_tgid_vnr(rcu_dereference(current->real_parent)); - rcu_read_unlock(); - - return pid; - } - -The resulting instructions with jump labels generated by GCC is:: - - ffffffff81044290 : - ffffffff81044290: 55 push %rbp - ffffffff81044291: 48 89 e5 mov %rsp,%rbp - ffffffff81044294: e9 00 00 00 00 jmpq ffffffff81044299 - ffffffff81044299: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax - ffffffff810442a0: 00 00 - ffffffff810442a2: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax - ffffffff810442a9: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax - ffffffff810442b0: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi - ffffffff810442b7: e8 f4 d9 00 00 callq ffffffff81051cb0 - ffffffff810442bc: 5d pop %rbp - ffffffff810442bd: 48 98 cltq - ffffffff810442bf: c3 retq - ffffffff810442c0: 48 c7 c7 e3 54 98 81 mov $0xffffffff819854e3,%rdi - ffffffff810442c7: 31 c0 xor %eax,%eax - ffffffff810442c9: e8 71 13 6d 00 callq ffffffff8171563f - ffffffff810442ce: eb c9 jmp ffffffff81044299 - -Without the jump label optimization it looks like:: - - ffffffff810441f0 : - ffffffff810441f0: 8b 05 8a 52 d8 00 mov 0xd8528a(%rip),%eax # ffffffff81dc9480 - ffffffff810441f6: 55 push %rbp - ffffffff810441f7: 48 89 e5 mov %rsp,%rbp - ffffffff810441fa: 85 c0 test %eax,%eax - ffffffff810441fc: 75 27 jne ffffffff81044225 - ffffffff810441fe: 65 48 8b 04 25 c0 b6 mov %gs:0xb6c0,%rax - ffffffff81044205: 00 00 - ffffffff81044207: 48 8b 80 80 02 00 00 mov 0x280(%rax),%rax - ffffffff8104420e: 48 8b 80 b0 02 00 00 mov 0x2b0(%rax),%rax - ffffffff81044215: 48 8b b8 e8 02 00 00 mov 0x2e8(%rax),%rdi - ffffffff8104421c: e8 2f da 00 00 callq ffffffff81051c50 - ffffffff81044221: 5d pop %rbp - ffffffff81044222: 48 98 cltq - ffffffff81044224: c3 retq - ffffffff81044225: 48 c7 c7 13 53 98 81 mov $0xffffffff81985313,%rdi - ffffffff8104422c: 31 c0 xor %eax,%eax - ffffffff8104422e: e8 60 0f 6d 00 callq ffffffff81715193 - ffffffff81044233: eb c9 jmp ffffffff810441fe - ffffffff81044235: 66 66 2e 0f 1f 84 00 data32 nopw %cs:0x0(%rax,%rax,1) - ffffffff8104423c: 00 00 00 00 - -Thus, the disable jump label case adds a 'mov', 'test' and 'jne' instruction -vs. the jump label case just has a 'no-op' or 'jmp 0'. (The jmp 0, is patched -to a 5 byte atomic no-op instruction at boot-time.) Thus, the disabled jump -label case adds:: - - 6 (mov) + 2 (test) + 2 (jne) = 10 - 5 (5 byte jump 0) = 5 addition bytes. - -If we then include the padding bytes, the jump label code saves, 16 total bytes -of instruction memory for this small function. In this case the non-jump label -function is 80 bytes long. Thus, we have saved 20% of the instruction -footprint. We can in fact improve this even further, since the 5-byte no-op -really can be a 2-byte no-op since we can reach the branch with a 2-byte jmp. -However, we have not yet implemented optimal no-op sizes (they are currently -hard-coded). - -Since there are a number of static key API uses in the scheduler paths, -'pipe-test' (also known as 'perf bench sched pipe') can be used to show the -performance improvement. Testing done on 3.3.0-rc2: - -jump label disabled:: - - Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): - - 855.700314 task-clock # 0.534 CPUs utilized ( +- 0.11% ) - 200,003 context-switches # 0.234 M/sec ( +- 0.00% ) - 0 CPU-migrations # 0.000 M/sec ( +- 39.58% ) - 487 page-faults # 0.001 M/sec ( +- 0.02% ) - 1,474,374,262 cycles # 1.723 GHz ( +- 0.17% ) - stalled-cycles-frontend - stalled-cycles-backend - 1,178,049,567 instructions # 0.80 insns per cycle ( +- 0.06% ) - 208,368,926 branches # 243.507 M/sec ( +- 0.06% ) - 5,569,188 branch-misses # 2.67% of all branches ( +- 0.54% ) - - 1.601607384 seconds time elapsed ( +- 0.07% ) - -jump label enabled:: - - Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs): - - 841.043185 task-clock # 0.533 CPUs utilized ( +- 0.12% ) - 200,004 context-switches # 0.238 M/sec ( +- 0.00% ) - 0 CPU-migrations # 0.000 M/sec ( +- 40.87% ) - 487 page-faults # 0.001 M/sec ( +- 0.05% ) - 1,432,559,428 cycles # 1.703 GHz ( +- 0.18% ) - stalled-cycles-frontend - stalled-cycles-backend - 1,175,363,994 instructions # 0.82 insns per cycle ( +- 0.04% ) - 206,859,359 branches # 245.956 M/sec ( +- 0.04% ) - 4,884,119 branch-misses # 2.36% of all branches ( +- 0.85% ) - - 1.579384366 seconds time elapsed - -The percentage of saved branches is .7%, and we've saved 12% on -'branch-misses'. This is where we would expect to get the most savings, since -this optimization is about reducing the number of branches. In addition, we've -saved .2% on instructions, and 2.8% on cycles and 1.4% on elapsed time. diff --git a/Documentation/tee.txt b/Documentation/tee.txt deleted file mode 100644 index 350dd40cba45..000000000000 --- a/Documentation/tee.txt +++ /dev/null @@ -1,276 +0,0 @@ -============= -TEE subsystem -============= - -This document describes the TEE subsystem in Linux. - -A TEE (Trusted Execution Environment) is a trusted OS running in some -secure environment, for example, TrustZone on ARM CPUs, or a separate -secure co-processor etc. A TEE driver handles the details needed to -communicate with the TEE. - -This subsystem deals with: - -- Registration of TEE drivers - -- Managing shared memory between Linux and the TEE - -- Providing a generic API to the TEE - -The TEE interface -================= - -include/uapi/linux/tee.h defines the generic interface to a TEE. - -User space (the client) connects to the driver by opening /dev/tee[0-9]* or -/dev/teepriv[0-9]*. - -- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor - which user space can mmap. When user space doesn't need the file - descriptor any more, it should be closed. When shared memory isn't needed - any longer it should be unmapped with munmap() to allow the reuse of - memory. - -- TEE_IOC_VERSION lets user space know which TEE this driver handles and - its capabilities. - -- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application. - -- TEE_IOC_INVOKE invokes a function in a Trusted Application. - -- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE. - -- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application. - -There are two classes of clients, normal clients and supplicants. The latter is -a helper process for the TEE to access resources in Linux, for example file -system access. A normal client opens /dev/tee[0-9]* and a supplicant opens -/dev/teepriv[0-9]. - -Much of the communication between clients and the TEE is opaque to the -driver. The main job for the driver is to receive requests from the -clients, forward them to the TEE and send back the results. In the case of -supplicants the communication goes in the other direction, the TEE sends -requests to the supplicant which then sends back the result. - -The TEE kernel interface -======================== - -Kernel provides a TEE bus infrastructure where a Trusted Application is -represented as a device identified via Universally Unique Identifier (UUID) and -client drivers register a table of supported device UUIDs. - -TEE bus infrastructure registers following APIs: -- match(): iterates over the client driver UUID table to find a corresponding - match for device UUID. If a match is found, then this particular device is - probed via corresponding probe API registered by the client driver. This - process happens whenever a device or a client driver is registered with TEE - bus. -- uevent(): notifies user-space (udev) whenever a new device is registered on - TEE bus for auto-loading of modularized client drivers. - -TEE bus device enumeration is specific to underlying TEE implementation, so it -is left open for TEE drivers to provide corresponding implementation. - -Then TEE client driver can talk to a matched Trusted Application using APIs -listed in include/linux/tee_drv.h. - -TEE client driver example -------------------------- - -Suppose a TEE client driver needs to communicate with a Trusted Application -having UUID: ``ac6a4085-0e82-4c33-bf98-8eb8e118b6c2``, so driver registration -snippet would look like:: - - static const struct tee_client_device_id client_id_table[] = { - {UUID_INIT(0xac6a4085, 0x0e82, 0x4c33, - 0xbf, 0x98, 0x8e, 0xb8, 0xe1, 0x18, 0xb6, 0xc2)}, - {} - }; - - MODULE_DEVICE_TABLE(tee, client_id_table); - - static struct tee_client_driver client_driver = { - .id_table = client_id_table, - .driver = { - .name = DRIVER_NAME, - .bus = &tee_bus_type, - .probe = client_probe, - .remove = client_remove, - }, - }; - - static int __init client_init(void) - { - return driver_register(&client_driver.driver); - } - - static void __exit client_exit(void) - { - driver_unregister(&client_driver.driver); - } - - module_init(client_init); - module_exit(client_exit); - -OP-TEE driver -============= - -The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM -TrustZone based OP-TEE solution that is supported. - -Lowest level of communication with OP-TEE builds on ARM SMC Calling -Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface -[3] used internally by the driver. Stacked on top of that is OP-TEE Message -Protocol [4]. - -OP-TEE SMC interface provides the basic functions required by SMCCC and some -additional functions specific for OP-TEE. The most interesting functions are: - -- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information - which is then returned by TEE_IOC_VERSION - -- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used - to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a - separate secure co-processor. - -- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol - -- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory - range to used for shared memory between Linux and OP-TEE. - -The GlobalPlatform TEE Client API [5] is implemented on top of the generic -TEE API. - -Picture of the relationship between the different components in the -OP-TEE architecture:: - - User space Kernel Secure world - ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~ - +--------+ +-------------+ - | Client | | Trusted | - +--------+ | Application | - /\ +-------------+ - || +----------+ /\ - || |tee- | || - || |supplicant| \/ - || +----------+ +-------------+ - \/ /\ | TEE Internal| - +-------+ || | API | - + TEE | || +--------+--------+ +-------------+ - | Client| || | TEE | OP-TEE | | OP-TEE | - | API | \/ | subsys | driver | | Trusted OS | - +-------+----------------+----+-------+----+-----------+-------------+ - | Generic TEE API | | OP-TEE MSG | - | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) | - +-----------------------------+ +------------------------------+ - -RPC (Remote Procedure Call) are requests from secure world to kernel driver -or tee-supplicant. An RPC is identified by a special range of SMCCC return -values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the -kernel are handled by the kernel driver. Other RPC messages will be forwarded to -tee-supplicant without further involvement of the driver, except switching -shared memory buffer representation. - -OP-TEE device enumeration -------------------------- - -OP-TEE provides a pseudo Trusted Application: drivers/tee/optee/device.c in -order to support device enumeration. In other words, OP-TEE driver invokes this -application to retrieve a list of Trusted Applications which can be registered -as devices on the TEE bus. - -AMD-TEE driver -============== - -The AMD-TEE driver handles the communication with AMD's TEE environment. The -TEE environment is provided by AMD Secure Processor. - -The AMD Secure Processor (formerly called Platform Security Processor or PSP) -is a dedicated processor that features ARM TrustZone technology, along with a -software-based Trusted Execution Environment (TEE) designed to enable -third-party Trusted Applications. This feature is currently enabled only for -APUs. - -The following picture shows a high level overview of AMD-TEE:: - - | - x86 | - | - User space (Kernel space) | AMD Secure Processor (PSP) - ~~~~~~~~~~ ~~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~~~ - | - +--------+ | +-------------+ - | Client | | | Trusted | - +--------+ | | Application | - /\ | +-------------+ - || | /\ - || | || - || | \/ - || | +----------+ - || | | TEE | - || | | Internal | - \/ | | API | - +---------+ +-----------+---------+ +----------+ - | TEE | | TEE | AMD-TEE | | AMD-TEE | - | Client | | subsystem | driver | | Trusted | - | API | | | | | OS | - +---------+-----------+----+------+---------+---------+----------+ - | Generic TEE API | | ASP | Mailbox | - | IOCTL (TEE_IOC_*) | | driver | Register Protocol | - +--------------------------+ +---------+--------------------+ - -At the lowest level (in x86), the AMD Secure Processor (ASP) driver uses the -CPU to PSP mailbox regsister to submit commands to the PSP. The format of the -command buffer is opaque to the ASP driver. It's role is to submit commands to -the secure processor and return results to AMD-TEE driver. The interface -between AMD-TEE driver and AMD Secure Processor driver can be found in [6]. - -The AMD-TEE driver packages the command buffer payload for processing in TEE. -The command buffer format for the different TEE commands can be found in [7]. - -The TEE commands supported by AMD-TEE Trusted OS are: -* TEE_CMD_ID_LOAD_TA - loads a Trusted Application (TA) binary into - TEE environment. -* TEE_CMD_ID_UNLOAD_TA - unloads TA binary from TEE environment. -* TEE_CMD_ID_OPEN_SESSION - opens a session with a loaded TA. -* TEE_CMD_ID_CLOSE_SESSION - closes session with loaded TA -* TEE_CMD_ID_INVOKE_CMD - invokes a command with loaded TA -* TEE_CMD_ID_MAP_SHARED_MEM - maps shared memory -* TEE_CMD_ID_UNMAP_SHARED_MEM - unmaps shared memory - -AMD-TEE Trusted OS is the firmware running on AMD Secure Processor. - -The AMD-TEE driver registers itself with TEE subsystem and implements the -following driver function callbacks: - -* get_version - returns the driver implementation id and capability. -* open - sets up the driver context data structure. -* release - frees up driver resources. -* open_session - loads the TA binary and opens session with loaded TA. -* close_session - closes session with loaded TA and unloads it. -* invoke_func - invokes a command with loaded TA. - -cancel_req driver callback is not supported by AMD-TEE. - -The GlobalPlatform TEE Client API [5] can be used by the user space (client) to -talk to AMD's TEE. AMD's TEE provides a secure environment for loading, opening -a session, invoking commands and clossing session with TA. - -References -========== - -[1] https://github.com/OP-TEE/optee_os - -[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html - -[3] drivers/tee/optee/optee_smc.h - -[4] drivers/tee/optee/optee_msg.h - -[5] http://www.globalplatform.org/specificationsdevice.asp look for - "TEE Client API Specification v1.0" and click download. - -[6] include/linux/psp-tee.h - -[7] drivers/tee/amdtee/amdtee_if.h diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst index cc4c5fc313df..c1709165c553 100644 --- a/Documentation/trace/kprobetrace.rst +++ b/Documentation/trace/kprobetrace.rst @@ -40,7 +40,7 @@ Synopsis of kprobe_events MEMADDR : Address where the probe is inserted. MAXACTIVE : Maximum number of instances of the specified function that can be probed simultaneously, or 0 for the default value - as defined in Documentation/kprobes.txt section 1.3.1. + as defined in Documentation/staging/kprobes.rst section 1.3.1. FETCHARGS : Arguments. Each probe can have up to 128 args. %REG : Fetch register REG diff --git a/Documentation/xz.txt b/Documentation/xz.txt deleted file mode 100644 index b2f5ff12a161..000000000000 --- a/Documentation/xz.txt +++ /dev/null @@ -1,127 +0,0 @@ -============================ -XZ data compression in Linux -============================ - -Introduction -============ - -XZ is a general purpose data compression format with high compression -ratio and relatively fast decompression. The primary compression -algorithm (filter) is LZMA2. Additional filters can be used to improve -compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters -improve compression ratio of executable data. - -The XZ decompressor in Linux is called XZ Embedded. It supports -the LZMA2 filter and optionally also BCJ filters. CRC32 is supported -for integrity checking. The home page of XZ Embedded is at -, where you can find the -latest version and also information about using the code outside -the Linux kernel. - -For userspace, XZ Utils provide a zlib-like compression library -and a gzip-like command line tool. XZ Utils can be downloaded from -. - -XZ related components in the kernel -=================================== - -The xz_dec module provides XZ decompressor with single-call (buffer -to buffer) and multi-call (stateful) APIs. The usage of the xz_dec -module is documented in include/linux/xz.h. - -The xz_dec_test module is for testing xz_dec. xz_dec_test is not -useful unless you are hacking the XZ decompressor. xz_dec_test -allocates a char device major dynamically to which one can write -.xz files from userspace. The decompressed output is thrown away. -Keep an eye on dmesg to see diagnostics printed by xz_dec_test. -See the xz_dec_test source code for the details. - -For decompressing the kernel image, initramfs, and initrd, there -is a wrapper function in lib/decompress_unxz.c. Its API is the -same as in other decompress_*.c files, which is defined in -include/linux/decompress/generic.h. - -scripts/xz_wrap.sh is a wrapper for the xz command line tool found -from XZ Utils. The wrapper sets compression options to values suitable -for compressing the kernel image. - -For kernel makefiles, two commands are provided for use with -$(call if_needed). The kernel image should be compressed with -$(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2 -dictionary. It will also append a four-byte trailer containing the -uncompressed size of the file, which is needed by the boot code. -Other things should be compressed with $(call if_needed,xzmisc) -which will use no BCJ filter and 1 MiB LZMA2 dictionary. - -Notes on compression options -============================ - -Since the XZ Embedded supports only streams with no integrity check or -CRC32, make sure that you don't use some other integrity check type -when encoding files that are supposed to be decoded by the kernel. With -liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32 -when encoding. With the xz command line tool, use --check=none or ---check=crc32. - -Using CRC32 is strongly recommended unless there is some other layer -which will verify the integrity of the uncompressed data anyway. -Double checking the integrity would probably be waste of CPU cycles. -Note that the headers will always have a CRC32 which will be validated -by the decoder; you can only change the integrity check type (or -disable it) for the actual uncompressed data. - -In userspace, LZMA2 is typically used with dictionary sizes of several -megabytes. The decoder needs to have the dictionary in RAM, thus big -dictionaries cannot be used for files that are intended to be decoded -by the kernel. 1 MiB is probably the maximum reasonable dictionary -size for in-kernel use (maybe more is OK for initramfs). The presets -in XZ Utils may not be optimal when creating files for the kernel, -so don't hesitate to use custom settings. Example:: - - xz --check=crc32 --lzma2=dict=512KiB inputfile - -An exception to above dictionary size limitation is when the decoder -is used in single-call mode. Decompressing the kernel itself is an -example of this situation. In single-call mode, the memory usage -doesn't depend on the dictionary size, and it is perfectly fine to -use a big dictionary: for maximum compression, the dictionary should -be at least as big as the uncompressed data itself. - -Future plans -============ - -Creating a limited XZ encoder may be considered if people think it is -useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at -the fastest settings, so it isn't clear if LZMA2 encoder is wanted -into the kernel. - -Support for limited random-access reading is planned for the -decompression code. I don't know if it could have any use in the -kernel, but I know that it would be useful in some embedded projects -outside the Linux kernel. - -Conformance to the .xz file format specification -================================================ - -There are a couple of corner cases where things have been simplified -at expense of detecting errors as early as possible. These should not -matter in practice all, since they don't cause security issues. But -it is good to know this if testing the code e.g. with the test files -from XZ Utils. - -Reporting bugs -============== - -Before reporting a bug, please check that it's not fixed already -at upstream. See to get the -latest code. - -Report bugs to or visit #tukaani on -Freenode and talk to Larhzu. I don't actively read LKML or other -kernel-related mailing lists, so if there's something I should know, -you should email to me personally or use IRC. - -Don't bother Igor Pavlov with questions about the XZ implementation -in the kernel or about XZ Utils. While these two implementations -include essential code that is directly based on Igor Pavlov's code, -these implementations aren't maintained nor supported by him. diff --git a/MAINTAINERS b/MAINTAINERS index f66fc236a325..ad90b3992887 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9597,7 +9597,7 @@ M: Anil S Keshavamurthy M: "David S. Miller" M: Masami Hiramatsu S: Maintained -F: Documentation/kprobes.txt +F: Documentation/staging/kprobes.rst F: include/asm-generic/kprobes.h F: include/linux/kprobes.h F: kernel/kprobes.c @@ -14500,7 +14500,7 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rproc-next F: Documentation/ABI/testing/sysfs-class-remoteproc F: Documentation/devicetree/bindings/remoteproc/ -F: Documentation/remoteproc.txt +F: Documentation/staging/remoteproc.rst F: drivers/remoteproc/ F: include/linux/remoteproc.h F: include/linux/remoteproc/ @@ -14512,7 +14512,7 @@ L: linux-remoteproc@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rpmsg-next F: Documentation/ABI/testing/sysfs-bus-rpmsg -F: Documentation/rpmsg.txt +F: Documentation/staging/rpmsg.rst F: drivers/rpmsg/ F: include/linux/rpmsg.h F: include/linux/rpmsg/ @@ -16761,7 +16761,7 @@ TEE SUBSYSTEM M: Jens Wiklander L: tee-dev@lists.linaro.org S: Maintained -F: Documentation/tee.txt +F: Documentation/staging/tee.rst F: drivers/tee/ F: include/linux/tee_drv.h F: include/uapi/linux/tee.h diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 3526c0aee954..32809624d422 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -68,7 +68,7 @@ * Lacking toolchain and or architecture support, static keys fall back to a * simple conditional branch. * - * Additional babbling in: Documentation/static-keys.txt + * Additional babbling in: Documentation/staging/static-keys.rst */ #ifndef __ASSEMBLY__ diff --git a/lib/crc32.c b/lib/crc32.c index 4a20455d1f61..35a03d03f973 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -24,7 +24,7 @@ * Version 2. See the file COPYING for more details. */ -/* see: Documentation/crc32.txt for a description of algorithms */ +/* see: Documentation/staging/crc32.rst for a description of algorithms */ #include #include diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index 2717c7963acd..7892a40cf765 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -32,7 +32,7 @@ * depending on the base count. Since the base count is taken from a u8 * and a few bits, it is safe to assume that it will always be lower than * or equal to 2*255, thus we can always prevent any overflow by accepting - * two less 255 steps. See Documentation/lzo.txt for more information. + * two less 255 steps. See Documentation/staging/lzo.rst for more information. */ #define MAX_255_COUNT ((((size_t)~0) / 255) - 2) diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index 22528743d4ce..5cb50245a878 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig @@ -5,7 +5,7 @@ config XZ_DEC help LZMA2 compression algorithm and BCJ filters are supported using the .xz file format as the container. For integrity checking, - CRC32 is supported. See Documentation/xz.txt for more information. + CRC32 is supported. See Documentation/staging/xz.rst for more information. if XZ_DEC diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 501911d1b327..240f2435ce6f 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c @@ -5,7 +5,7 @@ * stack trace and selected registers when _do_fork() is called. * * For more information on theory of operation of kprobes, see - * Documentation/kprobes.txt + * Documentation/staging/kprobes.rst * * You will see the trace data in /var/log/messages and on the console * whenever _do_fork() is invoked to create a new process. diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index 013e8e6ebae9..78a2da6fb3cd 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c @@ -11,7 +11,7 @@ * If no func_name is specified, _do_fork is instrumented * * For more information on theory of operation of kretprobes, see - * Documentation/kprobes.txt + * Documentation/staging/kprobes.rst * * Build and insert the kernel module as done in the kprobe example. * You will see the trace data in /var/log/messages and on the console -- cgit v1.2.3 From cc7a21b6fbd945f8d8f61422ccd27203c1fafeb7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Jun 2020 12:02:59 -0700 Subject: ipv6: icmp6: avoid indirect call for icmpv6_send() If IPv6 is builtin, we do not need an expensive indirect call to reach icmp6_send(). v2: put inline keyword before the type to avoid sparse warnings. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/icmpv6.h | 22 +++++++++++++++++++++- net/ipv6/icmp.c | 5 +++-- net/ipv6/ip6_icmp.c | 10 +++++----- 3 files changed, 29 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 33d379602314..1b3371ae8193 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -13,12 +13,32 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #include #if IS_ENABLED(CONFIG_IPV6) -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, const struct in6_addr *force_saddr); +#if IS_BUILTIN(CONFIG_IPV6) +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr); +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + icmp6_send(skb, type, code, info, NULL); +} +static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +#else +extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); +#endif + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index fc5000370030..91e0f2fd2523 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -439,8 +439,8 @@ static int icmp6_iif(const struct sk_buff *skb) /* * Send an ICMP message in response to a packet in error */ -static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr) +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr) { struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -625,6 +625,7 @@ out: out_bh_enable: local_bh_enable(); } +EXPORT_SYMBOL(icmp6_send); /* Slightly more convenient version of icmp6_send. */ diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c index e0086758b6ee..70c8c2f36c98 100644 --- a/net/ipv6/ip6_icmp.c +++ b/net/ipv6/ip6_icmp.c @@ -9,6 +9,8 @@ #if IS_ENABLED(CONFIG_IPV6) +#if !IS_BUILTIN(CONFIG_IPV6) + static ip6_icmp_send_t __rcu *ip6_icmp_send; int inet6_register_icmp_sender(ip6_icmp_send_t *fn) @@ -37,14 +39,12 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) rcu_read_lock(); send = rcu_dereference(ip6_icmp_send); - - if (!send) - goto out; - send(skb, type, code, info, NULL); -out: + if (send) + send(skb, type, code, info, NULL); rcu_read_unlock(); } EXPORT_SYMBOL(icmpv6_send); +#endif #if IS_ENABLED(CONFIG_NF_NAT) #include -- cgit v1.2.3 From c7f03eea07682639ef320aab348b706c330941dd Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:24 +0200 Subject: mips: bmips: add BCM3368 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM3368 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-2-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm3368-clock.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 include/dt-bindings/clock/bcm3368-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm3368-clock.h b/include/dt-bindings/clock/bcm3368-clock.h new file mode 100644 index 000000000000..74a7382f77b8 --- /dev/null +++ b/include/dt-bindings/clock/bcm3368-clock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM3368_H +#define __DT_BINDINGS_CLOCK_BCM3368_H + +#define BCM3368_CLK_MAC 3 +#define BCM3368_CLK_TC 5 +#define BCM3368_CLK_US_TOP 6 +#define BCM3368_CLK_DS_TOP 7 +#define BCM3368_CLK_ACM 8 +#define BCM3368_CLK_SPI 9 +#define BCM3368_CLK_USBS 10 +#define BCM3368_CLK_BMU 11 +#define BCM3368_CLK_PCM 12 +#define BCM3368_CLK_NTP 13 +#define BCM3368_CLK_ACP_B 14 +#define BCM3368_CLK_ACP_A 15 +#define BCM3368_CLK_EMUSB 17 +#define BCM3368_CLK_ENET0 18 +#define BCM3368_CLK_ENET1 19 +#define BCM3368_CLK_USBSU 20 +#define BCM3368_CLK_EPHY 21 + +#endif /* __DT_BINDINGS_CLOCK_BCM3368_H */ -- cgit v1.2.3 From 020c89c5a981cb6d0424aadab8ae067a3b6bd8e6 Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:25 +0200 Subject: mips: bmips: add BCM6318 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6318 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-3-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6318-clock.h | 42 +++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6318-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6318-clock.h b/include/dt-bindings/clock/bcm6318-clock.h new file mode 100644 index 000000000000..c4417f8983ab --- /dev/null +++ b/include/dt-bindings/clock/bcm6318-clock.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6318_H +#define __DT_BINDINGS_CLOCK_BCM6318_H + +#define BCM6318_CLK_ADSL_ASB 0 +#define BCM6318_CLK_USB_ASB 1 +#define BCM6318_CLK_MIPS_ASB 2 +#define BCM6318_CLK_PCIE_ASB 3 +#define BCM6318_CLK_PHYMIPS_ASB 4 +#define BCM6318_CLK_ROBOSW_ASB 5 +#define BCM6318_CLK_SAR_ASB 6 +#define BCM6318_CLK_SDR_ASB 7 +#define BCM6318_CLK_SWREG_ASB 8 +#define BCM6318_CLK_PERIPH_ASB 9 +#define BCM6318_CLK_CPUBUS160 10 +#define BCM6318_CLK_ADSL 11 +#define BCM6318_CLK_SAR125 12 +#define BCM6318_CLK_MIPS 13 +#define BCM6318_CLK_PCIE 14 +#define BCM6318_CLK_ROBOSW250 16 +#define BCM6318_CLK_ROBOSW025 17 +#define BCM6318_CLK_SDR 19 +#define BCM6318_CLK_USBD 20 +#define BCM6318_CLK_HSSPI 25 +#define BCM6318_CLK_PCIE25 27 +#define BCM6318_CLK_PHYMIPS 28 +#define BCM6318_CLK_AFE 29 +#define BCM6318_CLK_QPROC 30 + +#define BCM6318_UCLK_ADSL 0 +#define BCM6318_UCLK_ARB 1 +#define BCM6318_UCLK_MIPS 2 +#define BCM6318_UCLK_PCIE 3 +#define BCM6318_UCLK_PERIPH 4 +#define BCM6318_UCLK_PHYMIPS 5 +#define BCM6318_UCLK_ROBOSW 6 +#define BCM6318_UCLK_SAR 7 +#define BCM6318_UCLK_SDR 8 +#define BCM6318_UCLK_USB 9 + +#endif /* __DT_BINDINGS_CLOCK_BCM6318_H */ -- cgit v1.2.3 From 92cd8bb27a692d93ba7442ec123e96528f5e992c Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:26 +0200 Subject: mips: bmips: add BCM6328 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6328 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-4-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6328-clock.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6328-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6328-clock.h b/include/dt-bindings/clock/bcm6328-clock.h new file mode 100644 index 000000000000..1f6a3103f3dc --- /dev/null +++ b/include/dt-bindings/clock/bcm6328-clock.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6328_H +#define __DT_BINDINGS_CLOCK_BCM6328_H + +#define BCM6328_CLK_PHYMIPS 0 +#define BCM6328_CLK_ADSL_QPROC 1 +#define BCM6328_CLK_ADSL_AFE 2 +#define BCM6328_CLK_ADSL 3 +#define BCM6328_CLK_MIPS 4 +#define BCM6328_CLK_SAR 5 +#define BCM6328_CLK_PCM 6 +#define BCM6328_CLK_USBD 7 +#define BCM6328_CLK_USBH 8 +#define BCM6328_CLK_HSSPI 9 +#define BCM6328_CLK_PCIE 10 +#define BCM6328_CLK_ROBOSW 11 + +#endif /* __DT_BINDINGS_CLOCK_BCM6328_H */ -- cgit v1.2.3 From d3499bda4e176de6853c24e5243f3906d9390d54 Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:27 +0200 Subject: mips: bmips: add BCM6358 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6358 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-5-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6358-clock.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6358-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6358-clock.h b/include/dt-bindings/clock/bcm6358-clock.h new file mode 100644 index 000000000000..980c9cac4765 --- /dev/null +++ b/include/dt-bindings/clock/bcm6358-clock.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6358_H +#define __DT_BINDINGS_CLOCK_BCM6358_H + +#define BCM6358_CLK_ENET 4 +#define BCM6358_CLK_ADSLPHY 5 +#define BCM6358_CLK_PCM 8 +#define BCM6358_CLK_SPI 9 +#define BCM6358_CLK_USBS 10 +#define BCM6358_CLK_SAR 11 +#define BCM6358_CLK_EMUSB 17 +#define BCM6358_CLK_ENET0 18 +#define BCM6358_CLK_ENET1 19 +#define BCM6358_CLK_USBSU 20 +#define BCM6358_CLK_EPHY 21 + +#endif /* __DT_BINDINGS_CLOCK_BCM6358_H */ -- cgit v1.2.3 From fb8fb3f13f86fda0af72c02691333fdba5164c20 Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:28 +0200 Subject: mips: bmips: add BCM6362 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6362 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-6-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6362-clock.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6362-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6362-clock.h b/include/dt-bindings/clock/bcm6362-clock.h new file mode 100644 index 000000000000..17655cd5bf25 --- /dev/null +++ b/include/dt-bindings/clock/bcm6362-clock.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6362_H +#define __DT_BINDINGS_CLOCK_BCM6362_H + +#define BCM6362_CLK_ADSL_QPROC 1 +#define BCM6362_CLK_ADSL_AFE 2 +#define BCM6362_CLK_ADSL 3 +#define BCM6362_CLK_MIPS 4 +#define BCM6362_CLK_WLAN_OCP 5 +#define BCM6362_CLK_SWPKT_USB 7 +#define BCM6362_CLK_SWPKT_SAR 8 +#define BCM6362_CLK_SAR 9 +#define BCM6362_CLK_ROBOSW 10 +#define BCM6362_CLK_PCM 11 +#define BCM6362_CLK_USBD 12 +#define BCM6362_CLK_USBH 13 +#define BCM6362_CLK_IPSEC 14 +#define BCM6362_CLK_SPI 15 +#define BCM6362_CLK_HSSPI 16 +#define BCM6362_CLK_PCIE 17 +#define BCM6362_CLK_FAP 18 +#define BCM6362_CLK_PHYMIPS 19 +#define BCM6362_CLK_NAND 20 + +#endif /* __DT_BINDINGS_CLOCK_BCM6362_H */ -- cgit v1.2.3 From ad31e793f246d5276bc24829cb3d1ca95c3c92ff Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:29 +0200 Subject: mips: bmips: add BCM6368 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM6368 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-7-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm6368-clock.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 include/dt-bindings/clock/bcm6368-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm6368-clock.h b/include/dt-bindings/clock/bcm6368-clock.h new file mode 100644 index 000000000000..f161d5333883 --- /dev/null +++ b/include/dt-bindings/clock/bcm6368-clock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM6368_H +#define __DT_BINDINGS_CLOCK_BCM6368_H + +#define BCM6368_CLK_VDSL_QPROC 2 +#define BCM6368_CLK_VDSL_AFE 3 +#define BCM6368_CLK_VDSL_BONDING 4 +#define BCM6368_CLK_VDSL 5 +#define BCM6368_CLK_PHYMIPS 6 +#define BCM6368_CLK_SWPKT_USB 7 +#define BCM6368_CLK_SWPKT_SAR 8 +#define BCM6368_CLK_SPI 9 +#define BCM6368_CLK_USBD 10 +#define BCM6368_CLK_SAR 11 +#define BCM6368_CLK_ROBOSW 12 +#define BCM6368_CLK_UTOPIA 13 +#define BCM6368_CLK_PCM 14 +#define BCM6368_CLK_USBH 15 +#define BCM6368_CLK_DIS_GLESS 16 +#define BCM6368_CLK_NAND 17 +#define BCM6368_CLK_IPSEC 18 + +#endif /* __DT_BINDINGS_CLOCK_BCM6368_H */ -- cgit v1.2.3 From f3cd8c96a97ca970a116af092555778f792d0abf Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Mon, 15 Jun 2020 11:02:30 +0200 Subject: mips: bmips: add BCM63268 clock definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add header with BCM63268 definitions in order to be able to include it from device tree files. Signed-off-by: Álvaro Fernández Rojas Link: https://lore.kernel.org/r/20200615090231.2932696-8-noltari@gmail.com Acked-by: Florian Fainelli Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/bcm63268-clock.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 include/dt-bindings/clock/bcm63268-clock.h (limited to 'include') diff --git a/include/dt-bindings/clock/bcm63268-clock.h b/include/dt-bindings/clock/bcm63268-clock.h new file mode 100644 index 000000000000..da23e691d359 --- /dev/null +++ b/include/dt-bindings/clock/bcm63268-clock.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __DT_BINDINGS_CLOCK_BCM63268_H +#define __DT_BINDINGS_CLOCK_BCM63268_H + +#define BCM63268_CLK_DIS_GLESS 0 +#define BCM63268_CLK_VDSL_QPROC 1 +#define BCM63268_CLK_VDSL_AFE 2 +#define BCM63268_CLK_VDSL 3 +#define BCM63268_CLK_MIPS 4 +#define BCM63268_CLK_WLAN_OCP 5 +#define BCM63268_CLK_DECT 6 +#define BCM63268_CLK_FAP0 7 +#define BCM63268_CLK_FAP1 8 +#define BCM63268_CLK_SAR 9 +#define BCM63268_CLK_ROBOSW 10 +#define BCM63268_CLK_PCM 11 +#define BCM63268_CLK_USBD 12 +#define BCM63268_CLK_USBH 13 +#define BCM63268_CLK_IPSEC 14 +#define BCM63268_CLK_SPI 15 +#define BCM63268_CLK_HSSPI 16 +#define BCM63268_CLK_PCIE 17 +#define BCM63268_CLK_PHYMIPS 18 +#define BCM63268_CLK_GMAC 19 +#define BCM63268_CLK_NAND 20 +#define BCM63268_CLK_TBUS 27 +#define BCM63268_CLK_ROBOSW250 31 + +#endif /* __DT_BINDINGS_CLOCK_BCM63268_H */ -- cgit v1.2.3 From c2710fdf935bb1286e8eb6a6b44991bab1fe87af Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 16 Jun 2020 15:24:15 -0500 Subject: dt-bindings: agilex: add NAND_X_CLK and NAND_ECC_CLK Add the NAND_X_CLK and NAND_ECC_CLK clocks. Signed-off-by: Dinh Nguyen Link: https://lore.kernel.org/r/20200616202417.14376-1-dinguyen@kernel.org Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/agilex-clock.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/dt-bindings/clock/agilex-clock.h b/include/dt-bindings/clock/agilex-clock.h index f19cf8ccbdd2..06feca07e08e 100644 --- a/include/dt-bindings/clock/agilex-clock.h +++ b/include/dt-bindings/clock/agilex-clock.h @@ -65,6 +65,8 @@ #define AGILEX_SDMMC_CLK 50 #define AGILEX_SPI_M_CLK 51 #define AGILEX_USB_CLK 52 -#define AGILEX_NUM_CLKS 53 +#define AGILEX_NAND_X_CLK 53 +#define AGILEX_NAND_ECC_CLK 54 +#define AGILEX_NUM_CLKS 55 #endif /* __AGILEX_CLOCK_H */ -- cgit v1.2.3 From c746053d275c8b6ff1d713addabf049c9c9a58fc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:45:58 +0100 Subject: net: phy: add support for probing MMDs >= 8 for devices-in-package Add support for probing MMDs above 7 for a valid devices-in-package specifier, but only probe the vendor MMDs for this if they also report that there the device is present in status register 2. This avoids issues where the MMD is implemented, but does not provide IEEE 802.3 compliant registers (such as the MV88X3310 PHY.) Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 40 ++++++++++++++++++++++++++++++++++++++-- include/linux/phy.h | 2 ++ 2 files changed, 40 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 09096c3ceb86..8d9af2772853 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -661,6 +661,28 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, } EXPORT_SYMBOL(phy_device_create); +/* phy_c45_probe_present - checks to see if a MMD is present in the package + * @bus: the target MII bus + * @prtad: PHY package address on the MII bus + * @devad: PHY device (MMD) address + * + * Read the MDIO_STAT2 register, and check whether a device is responding + * at this address. + * + * Returns: negative error number on bus access error, zero if no device + * is responding, or positive if a device is present. + */ +static int phy_c45_probe_present(struct mii_bus *bus, int prtad, int devad) +{ + int stat2; + + stat2 = mdiobus_c45_read(bus, prtad, devad, MDIO_STAT2); + if (stat2 < 0) + return stat2; + + return (stat2 & MDIO_STAT2_DEVPRST) == MDIO_STAT2_DEVPRST_VAL; +} + /* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers. * @bus: the target MII bus * @addr: PHY address on the MII bus @@ -711,12 +733,26 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, { const int num_ids = ARRAY_SIZE(c45_ids->device_ids); u32 *devs = &c45_ids->devices_in_package; - int i, phy_reg; + int i, ret, phy_reg; /* Find first non-zero Devices In package. Device zero is reserved * for 802.3 c45 complied PHYs, so don't probe it at first. */ - for (i = 1; i < num_ids && *devs == 0; i++) { + for (i = 1; i < MDIO_MMD_NUM && *devs == 0; i++) { + if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { + /* Check that there is a device present at this + * address before reading the devices-in-package + * register to avoid reading garbage from the PHY. + * Some PHYs (88x3310) vendor space is not IEEE802.3 + * compliant. + */ + ret = phy_c45_probe_present(bus, addr, i); + if (ret < 0) + return -EIO; + + if (!ret) + continue; + } phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs); if (phy_reg < 0) return -EIO; diff --git a/include/linux/phy.h b/include/linux/phy.h index 8c05d0fb5c00..abe318387331 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -388,6 +388,8 @@ enum phy_state { PHY_CABLETEST, }; +#define MDIO_MMD_NUM 32 + /** * struct phy_c45_device_ids - 802.3-c45 Device Identifiers * @devices_in_package: Bit vector of devices present. -- cgit v1.2.3 From 320ed3bf900075614c43499dc01db8d25717b986 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:46:08 +0100 Subject: net: phy: split devices_in_package We have two competing requirements for the devices_in_package field. We want to use it as a bit array indicating which MMDs are present, but we also want to know if the Clause 22 registers are present. Since "devices in package" is a term used in the 802.3 specification, keep this as the as-specified values read from the PHY, and introduce a new member "mmds_present" to indicate which MMDs are actually present in the PHY, derived from the "devices in package" value. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 4 ++-- drivers/net/phy/phy_device.c | 6 +++--- drivers/net/phy/phylink.c | 8 ++++---- include/linux/phy.h | 4 +++- 4 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index defe09d94422..bd11e62bfdfe 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -219,7 +219,7 @@ int genphy_c45_read_link(struct phy_device *phydev) int val, devad; bool link = true; - if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + if (phydev->c45_ids.mmds_present & MDIO_DEVS_AN) { val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); if (val < 0) return val; @@ -409,7 +409,7 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev) int val; linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); - if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + if (phydev->c45_ids.mmds_present & MDIO_DEVS_AN) { val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (val < 0) return val; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8e11e3d3a801..c1f81c4d0bb3 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -709,9 +709,6 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, return -EIO; *devices_in_package |= phy_reg; - /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ - *devices_in_package &= ~BIT(0); - return 0; } @@ -789,6 +786,8 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, } c45_ids->devices_in_package = devs_in_pkg; + /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ + c45_ids->mmds_present = devs_in_pkg & ~BIT(0); return 0; } @@ -857,6 +856,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) int r; c45_ids.devices_in_package = 0; + c45_ids.mmds_present = 0; memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids)); if (is_c45) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 0ab65fb75258..7ce787c227b3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1638,11 +1638,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, case MII_BMSR: case MII_PHYSID1: case MII_PHYSID2: - devad = __ffs(phydev->c45_ids.devices_in_package); + devad = __ffs(phydev->c45_ids.mmds_present); break; case MII_ADVERTISE: case MII_LPA: - if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + if (!(phydev->c45_ids.mmds_present & MDIO_DEVS_AN)) return -EINVAL; devad = MDIO_MMD_AN; if (reg == MII_ADVERTISE) @@ -1678,11 +1678,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, case MII_BMSR: case MII_PHYSID1: case MII_PHYSID2: - devad = __ffs(phydev->c45_ids.devices_in_package); + devad = __ffs(phydev->c45_ids.mmds_present); break; case MII_ADVERTISE: case MII_LPA: - if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + if (!(phydev->c45_ids.mmds_present & MDIO_DEVS_AN)) return -EINVAL; devad = MDIO_MMD_AN; if (reg == MII_ADVERTISE) diff --git a/include/linux/phy.h b/include/linux/phy.h index abe318387331..19d9e040ad84 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -392,11 +392,13 @@ enum phy_state { /** * struct phy_c45_device_ids - 802.3-c45 Device Identifiers - * @devices_in_package: Bit vector of devices present. + * @devices_in_package: IEEE 802.3 devices in package register value. + * @mmds_present: bit vector of MMDs present. * @device_ids: The device identifer for each present device. */ struct phy_c45_device_ids { u32 devices_in_package; + u32 mmds_present; u32 device_ids[8]; }; -- cgit v1.2.3 From 389a338999875b6e7b111096e8b6484434556449 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:46:13 +0100 Subject: net: phy: read MMD ID from all present MMDs Expand the device_ids[] array to allow all MMD IDs to be read rather than just the first 8 MMDs, but only read the ID if the MDIO_STAT2 register reports that a device really is present here for these new devices to maintain compatibility with our current behaviour. Note that only a limited number of devices have MDIO_STAT2. 88X3310 PHY vendor MMDs do are marked as present in the devices_in_package, but do not contain IEE 802.3 compatible register sets in their lower space. This avoids reading incorrect values as MMD identifiers. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 13 +++++++++++++ include/linux/phy.h | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c1f81c4d0bb3..29ef4456ac25 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -774,6 +774,19 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, if (!(devs_in_pkg & (1 << i))) continue; + if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { + /* Probe the "Device Present" bits for the vendor MMDs + * to ignore these if they do not contain IEEE 802.3 + * registers. + */ + ret = phy_c45_probe_present(bus, addr, i); + if (ret < 0) + return ret; + + if (!ret) + continue; + } + phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID1); if (phy_reg < 0) return -EIO; diff --git a/include/linux/phy.h b/include/linux/phy.h index 19d9e040ad84..9248dd2ce4ca 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -399,7 +399,7 @@ enum phy_state { struct phy_c45_device_ids { u32 devices_in_package; u32 mmds_present; - u32 device_ids[8]; + u32 device_ids[MDIO_MMD_NUM]; }; struct macsec_context; -- cgit v1.2.3 From 775f43facfe89af605d77a9669aa311b5b95cd07 Mon Sep 17 00:00:00 2001 From: "Andrea Parri (Microsoft)" Date: Wed, 17 Jun 2020 18:46:42 +0200 Subject: Drivers: hv: vmbus: Remove the lock field from the vmbus_channel struct The spinlock is (now) *not used to protect test-and-set accesses to attributes of the structure or sc_list operations. There is, AFAICT, a distinct lack of {WRITE,READ}_ONCE()s in the handling of channel->state, but the changes below do not seem to make things "worse". ;-) Signed-off-by: Andrea Parri (Microsoft) Link: https://lore.kernel.org/r/20200617164642.37393-9-parri.andrea@gmail.com Reviewed-by: Michael Kelley Signed-off-by: Wei Liu --- drivers/hv/channel.c | 6 +----- drivers/hv/channel_mgmt.c | 1 - include/linux/hyperv.h | 6 ------ 3 files changed, 1 insertion(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 8848d1548b3f..3ebda7707e46 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -129,12 +129,8 @@ static int __vmbus_open(struct vmbus_channel *newchannel, send_pages = newchannel->ringbuffer_send_offset; recv_pages = newchannel->ringbuffer_pagecount - send_pages; - spin_lock_irqsave(&newchannel->lock, flags); - if (newchannel->state != CHANNEL_OPEN_STATE) { - spin_unlock_irqrestore(&newchannel->lock, flags); + if (newchannel->state != CHANNEL_OPEN_STATE) return -EINVAL; - } - spin_unlock_irqrestore(&newchannel->lock, flags); newchannel->state = CHANNEL_OPENING_STATE; newchannel->onchannel_callback = onchannelcallback; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 92f8bb2077a9..591106cf58fc 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -317,7 +317,6 @@ static struct vmbus_channel *alloc_channel(void) return NULL; spin_lock_init(&channel->sched_lock); - spin_lock_init(&channel->lock); init_completion(&channel->rescind_event); INIT_LIST_HEAD(&channel->sc_list); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 690394b79d72..38100e80360a 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -840,12 +840,6 @@ struct vmbus_channel { */ void (*chn_rescind_callback)(struct vmbus_channel *channel); - /* - * The spinlock to protect the structure. It is being used to protect - * test-and-set access to various attributes of the structure as well - * as all sc_list operations. - */ - spinlock_t lock; /* * All Sub-channels of a primary channel are linked here. */ -- cgit v1.2.3 From f11d59d87b8622d4cf9f856c0b8029fb030d8612 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 25 May 2020 14:38:53 +0300 Subject: iio: Move attach/detach of the poll func to the core All devices using a triggered buffer need to attach and detach the trigger to the device in order to properly work. Instead of doing this in each and every driver by hand move this into the core. At this point in time, all drivers should have been resolved to attach/detach the poll-function in the same order. This patch removes all explicit calls of iio_triggered_buffer_postenable() & iio_triggered_buffer_predisable() in all drivers, since the core handles now the pollfunc attach/detach. The more peculiar change is for the 'at91-sama5d2_adc' driver, since it's not immediately obvious that removing the hooks doesn't break anything. Eugen was able to test on at91-sama5d2-adc driver, sama5d2-xplained board. All seems to be fine. Signed-off-by: Lars-Peter Clausen Signed-off-by: Alexandru Ardelean Tested-by: Eugen Hristev #for at91-sama5d2-adc Signed-off-by: Jonathan Cameron --- drivers/iio/accel/adxl372.c | 20 +++-------- drivers/iio/accel/bmc150-accel-core.c | 4 +-- drivers/iio/accel/kxcjk-1013.c | 2 -- drivers/iio/accel/kxsd9.c | 2 -- drivers/iio/accel/st_accel_buffer.c | 22 +++--------- drivers/iio/accel/stk8312.c | 2 -- drivers/iio/accel/stk8ba50.c | 2 -- drivers/iio/adc/ad7266.c | 2 -- drivers/iio/adc/ad7606.c | 3 +- drivers/iio/adc/ad7766.c | 2 -- drivers/iio/adc/ad7768-1.c | 8 +---- drivers/iio/adc/ad7887.c | 2 -- drivers/iio/adc/ad_sigma_delta.c | 5 --- drivers/iio/adc/at91-sama5d2_adc.c | 18 ---------- drivers/iio/adc/dln2-adc.c | 12 +------ drivers/iio/adc/mxs-lradc-adc.c | 2 -- drivers/iio/adc/stm32-adc.c | 36 +++----------------- drivers/iio/adc/stm32-dfsdm-adc.c | 39 +++------------------- drivers/iio/adc/ti-adc084s021.c | 2 -- drivers/iio/adc/ti-ads1015.c | 2 -- drivers/iio/adc/vf610_adc.c | 7 +--- drivers/iio/adc/xilinx-xadc-core.c | 2 -- drivers/iio/buffer/industrialio-triggered-buffer.c | 10 +----- drivers/iio/chemical/atlas-sensor.c | 6 +--- drivers/iio/dummy/iio_simple_dummy_buffer.c | 14 -------- drivers/iio/gyro/bmg160_core.c | 2 -- drivers/iio/gyro/mpu3050-core.c | 2 -- drivers/iio/gyro/st_gyro_buffer.c | 21 +++--------- drivers/iio/humidity/hdc100x.c | 12 +------ drivers/iio/humidity/hts221_buffer.c | 2 -- drivers/iio/iio_core_trigger.h | 17 ++++++++++ drivers/iio/industrialio-buffer.c | 13 ++++++++ drivers/iio/industrialio-trigger.c | 22 +++--------- drivers/iio/light/gp2ap020a00f.c | 10 ------ drivers/iio/light/isl29125.c | 20 ++--------- drivers/iio/light/rpr0521.c | 2 -- drivers/iio/light/si1145.c | 2 -- drivers/iio/light/st_uvis25_core.c | 2 -- drivers/iio/light/tcs3414.c | 20 ++--------- drivers/iio/light/vcnl4000.c | 35 ++++--------------- drivers/iio/magnetometer/bmc150_magn.c | 2 -- drivers/iio/magnetometer/rm3100-core.c | 2 -- drivers/iio/magnetometer/st_magn_buffer.c | 26 ++------------- drivers/iio/potentiostat/lmp91000.c | 13 ++------ drivers/iio/pressure/st_pressure_buffer.c | 26 ++------------- drivers/iio/pressure/zpa2326.c | 27 ++++++--------- drivers/iio/proximity/sx9310.c | 2 -- drivers/iio/proximity/sx9500.c | 9 ----- include/linux/iio/trigger_consumer.h | 7 ---- 49 files changed, 93 insertions(+), 429 deletions(-) (limited to 'include') diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c index 26ca45073a56..e7e316b75e87 100644 --- a/drivers/iio/accel/adxl372.c +++ b/drivers/iio/accel/adxl372.c @@ -795,13 +795,9 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) unsigned int mask; int i, ret; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret < 0) - return ret; - ret = adxl372_set_interrupts(st, ADXL372_INT1_MAP_FIFO_FULL_MSK, 0); if (ret < 0) - goto err; + return ret; mask = *indio_dev->active_scan_mask; @@ -810,10 +806,8 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) break; } - if (i == ARRAY_SIZE(adxl372_axis_lookup_table)) { - ret = -EINVAL; - goto err; - } + if (i == ARRAY_SIZE(adxl372_axis_lookup_table)) + return -EINVAL; st->fifo_format = adxl372_axis_lookup_table[i].fifo_format; st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask, @@ -833,14 +827,10 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev) if (ret < 0) { st->fifo_mode = ADXL372_FIFO_BYPASSED; adxl372_set_interrupts(st, 0, 0); - goto err; + return ret; } return 0; - -err: - iio_triggered_buffer_predisable(indio_dev); - return ret; } static int adxl372_buffer_predisable(struct iio_dev *indio_dev) @@ -851,7 +841,7 @@ static int adxl372_buffer_predisable(struct iio_dev *indio_dev) st->fifo_mode = ADXL372_FIFO_BYPASSED; adxl372_configure_fifo(st); - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_buffer_setup_ops adxl372_buffer_ops = { diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 8f60d0727ee8..24864d9dfab5 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1411,7 +1411,7 @@ static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev) int ret = 0; if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) - return iio_triggered_buffer_postenable(indio_dev); + return 0; mutex_lock(&data->mutex); @@ -1443,7 +1443,7 @@ static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev) struct bmc150_accel_data *data = iio_priv(indio_dev); if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) - return iio_triggered_buffer_predisable(indio_dev); + return 0; mutex_lock(&data->mutex); diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 6b93521c0e17..beb38d9d607d 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1027,9 +1027,7 @@ static const struct iio_chan_spec kxcjk1013_channels[] = { static const struct iio_buffer_setup_ops kxcjk1013_buffer_setup_ops = { .preenable = kxcjk1013_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, .postdisable = kxcjk1013_buffer_postdisable, - .predisable = iio_triggered_buffer_predisable, }; static const struct iio_info kxcjk1013_info = { diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 63b1d8ee6c6f..66b2e4cf24cf 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -252,8 +252,6 @@ static int kxsd9_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops kxsd9_buffer_setup_ops = { .preenable = kxsd9_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = kxsd9_buffer_postdisable, }; diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c index b5c814ef1637..492263589e04 100644 --- a/drivers/iio/accel/st_accel_buffer.c +++ b/drivers/iio/accel/st_accel_buffer.c @@ -33,13 +33,9 @@ static int st_accel_buffer_postenable(struct iio_dev *indio_dev) { int err; - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) - return err; - err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]); if (err < 0) - goto st_accel_buffer_predisable; + return err; err = st_sensors_set_enable(indio_dev, true); if (err < 0) @@ -49,27 +45,19 @@ static int st_accel_buffer_postenable(struct iio_dev *indio_dev) st_accel_buffer_enable_all_axis: st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); -st_accel_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); return err; } static int st_accel_buffer_predisable(struct iio_dev *indio_dev) { - int err, err2; + int err; err = st_sensors_set_enable(indio_dev, false); if (err < 0) - goto st_accel_buffer_predisable; - - err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); - -st_accel_buffer_predisable: - err2 = iio_triggered_buffer_predisable(indio_dev); - if (!err) - err = err2; + return err; - return err; + return st_sensors_set_axis_enable(indio_dev, + ST_SENSORS_ENABLE_ALL_AXIS); } static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = { diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c index 616d0b90dd92..3b59887a8581 100644 --- a/drivers/iio/accel/stk8312.c +++ b/drivers/iio/accel/stk8312.c @@ -492,8 +492,6 @@ static int stk8312_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops stk8312_buffer_setup_ops = { .preenable = stk8312_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = stk8312_buffer_postdisable, }; diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c index 98fad8cd6fe7..3ead378b02c9 100644 --- a/drivers/iio/accel/stk8ba50.c +++ b/drivers/iio/accel/stk8ba50.c @@ -376,8 +376,6 @@ static int stk8ba50_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops stk8ba50_buffer_setup_ops = { .preenable = stk8ba50_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = stk8ba50_buffer_postdisable, }; diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 3dc15ec04f66..a8ec3efd659e 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -74,8 +74,6 @@ static int ad7266_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { .preenable = &ad7266_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &ad7266_postdisable, }; diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index 23d8277086ee..ee7b108688b3 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -499,7 +499,6 @@ static int ad7606_buffer_postenable(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); - iio_triggered_buffer_postenable(indio_dev); gpiod_set_value(st->gpio_convst, 1); return 0; @@ -511,7 +510,7 @@ static int ad7606_buffer_predisable(struct iio_dev *indio_dev) gpiod_set_value(st->gpio_convst, 0); - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_buffer_setup_ops ad7606_buffer_ops = { diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c index 4bcf8a34bc46..b6b6765be7b4 100644 --- a/drivers/iio/adc/ad7766.c +++ b/drivers/iio/adc/ad7766.c @@ -178,8 +178,6 @@ static const struct ad7766_chip_info ad7766_chip_info[] = { static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = { .preenable = &ad7766_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &ad7766_postdisable, }; diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index be2aed551324..0e93b0766eb4 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -490,7 +490,6 @@ static int ad7768_buffer_postenable(struct iio_dev *indio_dev) { struct ad7768_state *st = iio_priv(indio_dev); - iio_triggered_buffer_postenable(indio_dev); /* * Write a 1 to the LSB of the INTERFACE_FORMAT register to enter * continuous read mode. Subsequent data reads do not require an @@ -502,17 +501,12 @@ static int ad7768_buffer_postenable(struct iio_dev *indio_dev) static int ad7768_buffer_predisable(struct iio_dev *indio_dev) { struct ad7768_state *st = iio_priv(indio_dev); - int ret; /* * To exit continuous read mode, perform a single read of the ADC_DATA * reg (0x2C), which allows further configuration of the device. */ - ret = ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3); - if (ret < 0) - return ret; - - return iio_triggered_buffer_predisable(indio_dev); + return ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3); } static const struct iio_buffer_setup_ops ad7768_buffer_ops = { diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c index d1d43fb700ba..f8e6243457bb 100644 --- a/drivers/iio/adc/ad7887.c +++ b/drivers/iio/adc/ad7887.c @@ -136,8 +136,6 @@ done: static const struct iio_buffer_setup_ops ad7887_ring_setup_ops = { .preenable = &ad7887_ring_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &ad7887_ring_postdisable, }; diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index dd3d54b3bc8b..3554ee6ee099 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -345,10 +345,6 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) unsigned int channel; int ret; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret < 0) - return ret; - channel = find_first_bit(indio_dev->active_scan_mask, indio_dev->masklength); ret = ad_sigma_delta_set_channel(sigma_delta, @@ -441,7 +437,6 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p) static const struct iio_buffer_setup_ops ad_sd_buffer_setup_ops = { .postenable = &ad_sd_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &ad_sd_buffer_postdisable, .validate_scan_mask = &iio_validate_scan_mask_onehot, }; diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 1b4340a6c6bb..6cc06f1566eb 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -937,14 +937,6 @@ static int at91_adc_buffer_preenable(struct iio_dev *indio_dev) return 0; } -static int at91_adc_buffer_postenable(struct iio_dev *indio_dev) -{ - if (at91_adc_current_chan_is_touch(indio_dev)) - return 0; - - return iio_triggered_buffer_postenable(indio_dev); -} - static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev) { struct at91_adc_state *st = iio_priv(indio_dev); @@ -995,19 +987,9 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev) return 0; } -static int at91_adc_buffer_predisable(struct iio_dev *indio_dev) -{ - if (at91_adc_current_chan_is_touch(indio_dev)) - return 0; - - return iio_triggered_buffer_predisable(indio_dev); -} - static const struct iio_buffer_setup_ops at91_buffer_setup_ops = { .preenable = &at91_adc_buffer_preenable, .postdisable = &at91_adc_buffer_postdisable, - .postenable = &at91_adc_buffer_postenable, - .predisable = &at91_adc_buffer_predisable, }; static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio, diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c index 6689cb93c6a2..0d53ef18e045 100644 --- a/drivers/iio/adc/dln2-adc.c +++ b/drivers/iio/adc/dln2-adc.c @@ -524,10 +524,6 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) u16 conflict; unsigned int trigger_chan; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - mutex_lock(&dln2->mutex); /* Enable ADC */ @@ -541,7 +537,6 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) (int)conflict); ret = -EBUSY; } - iio_triggered_buffer_predisable(indio_dev); return ret; } @@ -555,7 +550,6 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) mutex_unlock(&dln2->mutex); if (ret < 0) { dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); - iio_triggered_buffer_predisable(indio_dev); return ret; } } else { @@ -568,7 +562,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) { - int ret, ret2; + int ret; struct dln2_adc *dln2 = iio_priv(indio_dev); mutex_lock(&dln2->mutex); @@ -586,10 +580,6 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) if (ret < 0) dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); - ret2 = iio_triggered_buffer_predisable(indio_dev); - if (ret == 0) - ret = ret2; - return ret; } diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c index c371565374d1..30e29f44ebd2 100644 --- a/drivers/iio/adc/mxs-lradc-adc.c +++ b/drivers/iio/adc/mxs-lradc-adc.c @@ -568,8 +568,6 @@ static bool mxs_lradc_adc_validate_scan_mask(struct iio_dev *iio, static const struct iio_buffer_setup_ops mxs_lradc_adc_buffer_ops = { .preenable = &mxs_lradc_adc_buffer_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &mxs_lradc_adc_buffer_postdisable, .validate_scan_mask = &mxs_lradc_adc_validate_scan_mask, }; diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index e5a2bfcb5681..3eb9ebe8372f 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1492,7 +1492,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev) return 0; } -static int __stm32_adc_buffer_postenable(struct iio_dev *indio_dev) +static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); struct device *dev = indio_dev->dev.parent; @@ -1537,22 +1537,7 @@ err_pm_put: return ret; } -static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev) -{ - int ret; - - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret < 0) - return ret; - - ret = __stm32_adc_buffer_postenable(indio_dev); - if (ret < 0) - iio_triggered_buffer_predisable(indio_dev); - - return ret; -} - -static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev) +static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); struct device *dev = indio_dev->dev.parent; @@ -1571,19 +1556,8 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev) pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); -} - -static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev) -{ - int ret; - - __stm32_adc_buffer_predisable(indio_dev); - - ret = iio_triggered_buffer_predisable(indio_dev); - if (ret < 0) - dev_err(&indio_dev->dev, "predisable failed\n"); - return ret; + return 0; } static const struct iio_buffer_setup_ops stm32_adc_buffer_setup_ops = { @@ -2024,7 +1998,7 @@ static int stm32_adc_suspend(struct device *dev) struct iio_dev *indio_dev = dev_get_drvdata(dev); if (iio_buffer_enabled(indio_dev)) - __stm32_adc_buffer_predisable(indio_dev); + stm32_adc_buffer_predisable(indio_dev); return pm_runtime_force_suspend(dev); } @@ -2046,7 +2020,7 @@ static int stm32_adc_resume(struct device *dev) if (ret < 0) return ret; - return __stm32_adc_buffer_postenable(indio_dev); + return stm32_adc_buffer_postenable(indio_dev); } #endif diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index 017d1ba4c14e..5e10fb4f3704 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c @@ -995,7 +995,7 @@ static int stm32_dfsdm_update_scan_mode(struct iio_dev *indio_dev, return 0; } -static int __stm32_dfsdm_postenable(struct iio_dev *indio_dev) +static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); int ret; @@ -1038,30 +1038,7 @@ err_stop_hwc: return ret; } -static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) -{ - int ret; - - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret < 0) - return ret; - } - - ret = __stm32_dfsdm_postenable(indio_dev); - if (ret < 0) - goto err_predisable; - - return 0; - -err_predisable: - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) - iio_triggered_buffer_predisable(indio_dev); - - return ret; -} - -static void __stm32_dfsdm_predisable(struct iio_dev *indio_dev) +static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) { struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); @@ -1073,14 +1050,6 @@ static void __stm32_dfsdm_predisable(struct iio_dev *indio_dev) if (adc->hwc) iio_hw_consumer_disable(adc->hwc); -} - -static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) -{ - __stm32_dfsdm_predisable(indio_dev); - - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) - iio_triggered_buffer_predisable(indio_dev); return 0; } @@ -1668,7 +1637,7 @@ static int __maybe_unused stm32_dfsdm_adc_suspend(struct device *dev) struct iio_dev *indio_dev = dev_get_drvdata(dev); if (iio_buffer_enabled(indio_dev)) - __stm32_dfsdm_predisable(indio_dev); + stm32_dfsdm_predisable(indio_dev); return 0; } @@ -1691,7 +1660,7 @@ static int __maybe_unused stm32_dfsdm_adc_resume(struct device *dev) } if (iio_buffer_enabled(indio_dev)) - __stm32_dfsdm_postenable(indio_dev); + stm32_dfsdm_postenable(indio_dev); return 0; } diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c index f22f004c1eb6..c2db2435f419 100644 --- a/drivers/iio/adc/ti-adc084s021.c +++ b/drivers/iio/adc/ti-adc084s021.c @@ -187,8 +187,6 @@ static const struct iio_info adc084s021_info = { static const struct iio_buffer_setup_ops adc084s021_buffer_setup_ops = { .preenable = adc084s021_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = adc084s021_buffer_postdisable, }; diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index 1a5f520080d2..f42ab112986e 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -788,8 +788,6 @@ static int ads1015_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = { .preenable = ads1015_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = ads1015_buffer_postdisable, .validate_scan_mask = &iio_validate_scan_mask_onehot, }; diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index f5637bf38d37..1d794cf3e3f1 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -724,13 +724,8 @@ static int vf610_adc_buffer_postenable(struct iio_dev *indio_dev) { struct vf610_adc *info = iio_priv(indio_dev); unsigned int channel; - int ret; int val; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - val = readl(info->regs + VF610_REG_ADC_GC); val |= VF610_ADC_ADCON; writel(val, info->regs + VF610_REG_ADC_GC); @@ -761,7 +756,7 @@ static int vf610_adc_buffer_predisable(struct iio_dev *indio_dev) writel(hc_cfg, info->regs + VF610_REG_ADC_HC0); - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 69be9e82fe3e..d0b7ef296afb 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -839,8 +839,6 @@ err: static const struct iio_buffer_setup_ops xadc_buffer_ops = { .preenable = &xadc_preenable, - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, .postdisable = &xadc_postdisable, }; diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c index e8046c1ecd6b..6c20a83f887e 100644 --- a/drivers/iio/buffer/industrialio-triggered-buffer.c +++ b/drivers/iio/buffer/industrialio-triggered-buffer.c @@ -13,11 +13,6 @@ #include #include -static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { - .postenable = &iio_triggered_buffer_postenable, - .predisable = &iio_triggered_buffer_predisable, -}; - /** * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc * @indio_dev: IIO device structure @@ -67,10 +62,7 @@ int iio_triggered_buffer_setup(struct iio_dev *indio_dev, } /* Ring buffer functions - here trigger setup related */ - if (setup_ops) - indio_dev->setup_ops = setup_ops; - else - indio_dev->setup_ops = &iio_triggered_buffer_setup_ops; + indio_dev->setup_ops = setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c index 047f4b3f5a0e..43069636fcd5 100644 --- a/drivers/iio/chemical/atlas-sensor.c +++ b/drivers/iio/chemical/atlas-sensor.c @@ -410,10 +410,6 @@ static int atlas_buffer_postenable(struct iio_dev *indio_dev) struct atlas_data *data = iio_priv(indio_dev); int ret; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - ret = pm_runtime_get_sync(&data->client->dev); if (ret < 0) { pm_runtime_put_noidle(&data->client->dev); @@ -437,7 +433,7 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev) if (ret) return ret; - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_trigger_ops atlas_interrupt_trigger_ops = { diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c index 17606eca42b4..8e13c53d4360 100644 --- a/drivers/iio/dummy/iio_simple_dummy_buffer.c +++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c @@ -99,20 +99,6 @@ done: } static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = { - /* - * iio_triggered_buffer_postenable: - * Generic function that simply attaches the pollfunc to the trigger. - * Replace this to mess with hardware state before we attach the - * trigger. - */ - .postenable = &iio_triggered_buffer_postenable, - /* - * iio_triggered_buffer_predisable: - * Generic function that simple detaches the pollfunc from the trigger. - * Replace this to put hardware state back again after the trigger is - * detached but before userspace knows we have disabled the ring. - */ - .predisable = &iio_triggered_buffer_predisable, }; int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev) diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 933492b33189..8ddda96455fc 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -1051,8 +1051,6 @@ static int bmg160_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops bmg160_buffer_setup_ops = { .preenable = bmg160_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = bmg160_buffer_postdisable, }; diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 157330451e0a..00e58060968c 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -662,8 +662,6 @@ static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops mpu3050_buffer_setup_ops = { .preenable = mpu3050_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = mpu3050_buffer_postdisable, }; diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c index 9c92ff7a82be..4feb7ada7195 100644 --- a/drivers/iio/gyro/st_gyro_buffer.c +++ b/drivers/iio/gyro/st_gyro_buffer.c @@ -33,13 +33,9 @@ static int st_gyro_buffer_postenable(struct iio_dev *indio_dev) { int err; - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) - return err; - err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]); if (err < 0) - goto st_gyro_buffer_predisable; + return err; err = st_sensors_set_enable(indio_dev, true); if (err < 0) @@ -49,27 +45,18 @@ static int st_gyro_buffer_postenable(struct iio_dev *indio_dev) st_gyro_buffer_enable_all_axis: st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); -st_gyro_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); return err; } static int st_gyro_buffer_predisable(struct iio_dev *indio_dev) { - int err, err2; + int err; err = st_sensors_set_enable(indio_dev, false); if (err < 0) - goto st_gyro_buffer_predisable; - - err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); - -st_gyro_buffer_predisable: - err2 = iio_triggered_buffer_predisable(indio_dev); - if (!err) - err = err2; + return err; - return err; + return st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS); } static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = { diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index 3331141734c8..413204cd9bbd 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -278,17 +278,11 @@ static int hdc100x_buffer_postenable(struct iio_dev *indio_dev) struct hdc100x_data *data = iio_priv(indio_dev); int ret; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - /* Buffer is enabled. First set ACQ Mode, then attach poll func */ mutex_lock(&data->lock); ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, HDC100X_REG_CONFIG_ACQ_MODE); mutex_unlock(&data->lock); - if (ret) - iio_triggered_buffer_predisable(indio_dev); return ret; } @@ -296,16 +290,12 @@ static int hdc100x_buffer_postenable(struct iio_dev *indio_dev) static int hdc100x_buffer_predisable(struct iio_dev *indio_dev) { struct hdc100x_data *data = iio_priv(indio_dev); - int ret, ret2; + int ret; mutex_lock(&data->lock); ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0); mutex_unlock(&data->lock); - ret2 = iio_triggered_buffer_predisable(indio_dev); - if (ret == 0) - ret = ret2; - return ret; } diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c index 21c6c160462d..f75f0b218fea 100644 --- a/drivers/iio/humidity/hts221_buffer.c +++ b/drivers/iio/humidity/hts221_buffer.c @@ -153,8 +153,6 @@ static int hts221_buffer_postdisable(struct iio_dev *iio_dev) static const struct iio_buffer_setup_ops hts221_buffer_ops = { .preenable = hts221_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = hts221_buffer_postdisable, }; diff --git a/drivers/iio/iio_core_trigger.h b/drivers/iio/iio_core_trigger.h index e59fe2f36bbb..9d1a92cc6480 100644 --- a/drivers/iio/iio_core_trigger.h +++ b/drivers/iio/iio_core_trigger.h @@ -18,6 +18,12 @@ void iio_device_register_trigger_consumer(struct iio_dev *indio_dev); **/ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev); + +int iio_trigger_attach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf); +int iio_trigger_detach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf); + #else /** @@ -37,4 +43,15 @@ static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) { } +static inline int iio_trigger_attach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) +{ + return 0; +} +static inline int iio_trigger_detach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) +{ + return 0; +} + #endif /* CONFIG_TRIGGER_CONSUMER */ diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 9fa238c0a7d4..329dd4d6757a 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -20,6 +20,7 @@ #include #include "iio_core.h" +#include "iio_core_trigger.h" #include #include #include @@ -972,6 +973,13 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, } } + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + ret = iio_trigger_attach_poll_func(indio_dev->trig, + indio_dev->pollfunc); + if (ret) + goto err_disable_buffers; + } + return 0; err_disable_buffers: @@ -998,6 +1006,11 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) if (list_empty(&indio_dev->buffer_list)) return 0; + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + iio_trigger_detach_poll_func(indio_dev->trig, + indio_dev->pollfunc); + } + /* * If things go wrong at some step in disable we still need to continue * to perform the other steps, otherwise we leave the device in a diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index 53d1931f6be8..6f16357fd732 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -239,8 +239,8 @@ static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) * the relevant function is in there may be the best option. */ /* Worth protecting against double additions? */ -static int iio_trigger_attach_poll_func(struct iio_trigger *trig, - struct iio_poll_func *pf) +int iio_trigger_attach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) { int ret = 0; bool notinuse @@ -290,8 +290,8 @@ out_put_module: return ret; } -static int iio_trigger_detach_poll_func(struct iio_trigger *trig, - struct iio_poll_func *pf) +int iio_trigger_detach_poll_func(struct iio_trigger *trig, + struct iio_poll_func *pf) { int ret = 0; bool no_other_users @@ -705,17 +705,3 @@ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) if (indio_dev->trig) iio_trigger_put(indio_dev->trig); } - -int iio_triggered_buffer_postenable(struct iio_dev *indio_dev) -{ - return iio_trigger_attach_poll_func(indio_dev->trig, - indio_dev->pollfunc); -} -EXPORT_SYMBOL(iio_triggered_buffer_postenable); - -int iio_triggered_buffer_predisable(struct iio_dev *indio_dev) -{ - return iio_trigger_detach_poll_func(indio_dev->trig, - indio_dev->pollfunc); -} -EXPORT_SYMBOL(iio_triggered_buffer_predisable); diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c index dd9ad880deca..e2850c1a7353 100644 --- a/drivers/iio/light/gp2ap020a00f.c +++ b/drivers/iio/light/gp2ap020a00f.c @@ -1390,12 +1390,6 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev) mutex_lock(&data->lock); - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) { - mutex_unlock(&data->lock); - return err; - } - /* * Enable triggers according to the scan_mask. Enabling either * LIGHT_CLEAR or LIGHT_IR scan mode results in enabling ALS @@ -1430,8 +1424,6 @@ static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev) err = -ENOMEM; error_unlock: - if (err < 0) - iio_triggered_buffer_predisable(indio_dev); mutex_unlock(&data->lock); return err; @@ -1465,8 +1457,6 @@ static int gp2ap020a00f_buffer_predisable(struct iio_dev *indio_dev) if (err == 0) kfree(data->buffer); - iio_triggered_buffer_predisable(indio_dev); - mutex_unlock(&data->lock); return err; diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c index 663c72610655..b93b85dbc3a6 100644 --- a/drivers/iio/light/isl29125.c +++ b/drivers/iio/light/isl29125.c @@ -216,36 +216,20 @@ static const struct iio_info isl29125_info = { static int isl29125_buffer_postenable(struct iio_dev *indio_dev) { struct isl29125_data *data = iio_priv(indio_dev); - int err; - - err = iio_triggered_buffer_postenable(indio_dev); - if (err) - return err; data->conf1 |= ISL29125_MODE_RGB; - err = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, + return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, data->conf1); - if (err) { - iio_triggered_buffer_predisable(indio_dev); - return err; - } - - return 0; } static int isl29125_buffer_predisable(struct iio_dev *indio_dev) { struct isl29125_data *data = iio_priv(indio_dev); - int ret; data->conf1 &= ~ISL29125_MODE_MASK; data->conf1 |= ISL29125_MODE_PD; - ret = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, + return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1, data->conf1); - - iio_triggered_buffer_predisable(indio_dev); - - return ret; } static const struct iio_buffer_setup_ops isl29125_buffer_setup_ops = { diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c index c20fbc730d65..aa2972b04833 100644 --- a/drivers/iio/light/rpr0521.c +++ b/drivers/iio/light/rpr0521.c @@ -570,8 +570,6 @@ static int rpr0521_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops rpr0521_buffer_setup_ops = { .preenable = rpr0521_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = rpr0521_buffer_postdisable, }; diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c index e1f989dd3a3d..521e8adb93a7 100644 --- a/drivers/iio/light/si1145.c +++ b/drivers/iio/light/si1145.c @@ -1171,8 +1171,6 @@ static bool si1145_validate_scan_mask(struct iio_dev *indio_dev, static const struct iio_buffer_setup_ops si1145_buffer_setup_ops = { .preenable = si1145_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .validate_scan_mask = si1145_validate_scan_mask, }; diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c index 4d001d50e775..a18a82e6bbf5 100644 --- a/drivers/iio/light/st_uvis25_core.c +++ b/drivers/iio/light/st_uvis25_core.c @@ -227,8 +227,6 @@ static int st_uvis25_buffer_postdisable(struct iio_dev *iio_dev) static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = { .preenable = st_uvis25_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = st_uvis25_buffer_postdisable, }; diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c index bd6d3e4a0c4d..6fe5d46f80d4 100644 --- a/drivers/iio/light/tcs3414.c +++ b/drivers/iio/light/tcs3414.c @@ -243,35 +243,19 @@ static const struct iio_info tcs3414_info = { static int tcs3414_buffer_postenable(struct iio_dev *indio_dev) { struct tcs3414_data *data = iio_priv(indio_dev); - int ret; - - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; data->control |= TCS3414_CONTROL_ADC_EN; - ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, + return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, data->control); - if (ret) - iio_triggered_buffer_predisable(indio_dev); - - return ret; } static int tcs3414_buffer_predisable(struct iio_dev *indio_dev) { struct tcs3414_data *data = iio_priv(indio_dev); - int ret, ret2; data->control &= ~TCS3414_CONTROL_ADC_EN; - ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, + return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL, data->control); - - ret2 = iio_triggered_buffer_predisable(indio_dev); - if (!ret) - ret = ret2; - - return ret; } static const struct iio_buffer_setup_ops tcs3414_buffer_setup_ops = { diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index ac1ab715d4dd..fff4b36b8b58 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -957,50 +957,29 @@ static int vcnl4010_buffer_postenable(struct iio_dev *indio_dev) int ret; int cmd; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - /* Do not enable the buffer if we are already capturing events. */ - if (vcnl4010_is_in_periodic_mode(data)) { - ret = -EBUSY; - goto end; - } + if (vcnl4010_is_in_periodic_mode(data)) + return -EBUSY; ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, VCNL4010_INT_PROX_EN); if (ret < 0) - goto end; + return ret; cmd = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN; - ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd); - if (ret < 0) - goto end; - - return 0; -end: - iio_triggered_buffer_predisable(indio_dev); - - return ret; + return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd); } static int vcnl4010_buffer_predisable(struct iio_dev *indio_dev) { struct vcnl4000_data *data = iio_priv(indio_dev); - int ret, ret_disable; + int ret; ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 0); if (ret < 0) - goto end; - - ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0); - -end: - ret_disable = iio_triggered_buffer_predisable(indio_dev); - if (ret == 0) - ret = ret_disable; + return ret; - return ret; + return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0); } static const struct iio_buffer_setup_ops vcnl4010_buffer_ops = { diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index 8fc52057837d..fc6840f9c1fa 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -836,8 +836,6 @@ static int bmc150_magn_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops bmc150_magn_buffer_setup_ops = { .preenable = bmc150_magn_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = bmc150_magn_buffer_postdisable, }; diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c index a3e754943456..7242897a05e9 100644 --- a/drivers/iio/magnetometer/rm3100-core.c +++ b/drivers/iio/magnetometer/rm3100-core.c @@ -463,8 +463,6 @@ static int rm3100_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops rm3100_buffer_ops = { .preenable = rm3100_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = rm3100_buffer_postdisable, }; diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c index bb425c167a96..4917721fa2e5 100644 --- a/drivers/iio/magnetometer/st_magn_buffer.c +++ b/drivers/iio/magnetometer/st_magn_buffer.c @@ -31,34 +31,12 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state) static int st_magn_buffer_postenable(struct iio_dev *indio_dev) { - int err; - - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) - return err; - - err = st_sensors_set_enable(indio_dev, true); - if (err < 0) - goto st_magn_buffer_predisable; - - return 0; - -st_magn_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); - return err; + return st_sensors_set_enable(indio_dev, true); } static int st_magn_buffer_predisable(struct iio_dev *indio_dev) { - int err, err2; - - err = st_sensors_set_enable(indio_dev, false); - - err2 = iio_triggered_buffer_predisable(indio_dev); - if (!err) - err = err2; - - return err; + return st_sensors_set_enable(indio_dev, false); } static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = { diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c index 2d601889c8c0..67ae635a05f3 100644 --- a/drivers/iio/potentiostat/lmp91000.c +++ b/drivers/iio/potentiostat/lmp91000.c @@ -278,17 +278,8 @@ static const struct iio_trigger_ops lmp91000_trigger_ops = { static int lmp91000_buffer_postenable(struct iio_dev *indio_dev) { struct lmp91000_data *data = iio_priv(indio_dev); - int err; - err = iio_triggered_buffer_postenable(indio_dev); - if (err) - return err; - - err = iio_channel_start_all_cb(data->cb_buffer); - if (err) - iio_triggered_buffer_predisable(indio_dev); - - return err; + return iio_channel_start_all_cb(data->cb_buffer); } static int lmp91000_buffer_predisable(struct iio_dev *indio_dev) @@ -297,7 +288,7 @@ static int lmp91000_buffer_predisable(struct iio_dev *indio_dev) iio_channel_stop_all_cb(data->cb_buffer); - return iio_triggered_buffer_predisable(indio_dev); + return 0; } static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = { diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c index 418dbf9e6e1e..7cf6f06797e1 100644 --- a/drivers/iio/pressure/st_pressure_buffer.c +++ b/drivers/iio/pressure/st_pressure_buffer.c @@ -31,34 +31,12 @@ int st_press_trig_set_state(struct iio_trigger *trig, bool state) static int st_press_buffer_postenable(struct iio_dev *indio_dev) { - int err; - - err = iio_triggered_buffer_postenable(indio_dev); - if (err < 0) - return err; - - err = st_sensors_set_enable(indio_dev, true); - if (err < 0) - goto st_press_buffer_predisable; - - return 0; - -st_press_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); - return err; + return st_sensors_set_enable(indio_dev, true); } static int st_press_buffer_predisable(struct iio_dev *indio_dev) { - int err, err2; - - err = st_sensors_set_enable(indio_dev, false); - - err2 = iio_triggered_buffer_predisable(indio_dev); - if (!err) - err = err2; - - return err; + return st_sensors_set_enable(indio_dev, false); } static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = { diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index ef818f2aebd6..2b8796ef91ef 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -1242,19 +1242,17 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev) const struct zpa2326_private *priv = iio_priv(indio_dev); int err; - /* Plug our own trigger event handler. */ - err = iio_triggered_buffer_postenable(indio_dev); - if (err) - goto err; - if (!priv->waken) { /* * We were already power supplied. Just clear hardware FIFO to * get rid of samples acquired during previous rounds (if any). */ err = zpa2326_clear_fifo(indio_dev, 0); - if (err) - goto err_buffer_predisable; + if (err) { + zpa2326_err(indio_dev, + "failed to enable buffering (%d)", err); + return err; + } } if (!iio_trigger_using_own(indio_dev) && priv->waken) { @@ -1263,18 +1261,14 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev) * powered up: reconfigure one-shot mode. */ err = zpa2326_config_oneshot(indio_dev, priv->irq); - if (err) - goto err_buffer_predisable; + if (err) { + zpa2326_err(indio_dev, + "failed to enable buffering (%d)", err); + return err; + } } return 0; - -err_buffer_predisable: - iio_triggered_buffer_predisable(indio_dev); -err: - zpa2326_err(indio_dev, "failed to enable buffering (%d)", err); - - return err; } static int zpa2326_postdisable_buffer(struct iio_dev *indio_dev) @@ -1287,7 +1281,6 @@ static int zpa2326_postdisable_buffer(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops zpa2326_buffer_setup_ops = { .preenable = zpa2326_preenable_buffer, .postenable = zpa2326_postenable_buffer, - .predisable = iio_triggered_buffer_predisable, .postdisable = zpa2326_postdisable_buffer }; diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c index 08c6100e2c94..dc2e11b43431 100644 --- a/drivers/iio/proximity/sx9310.c +++ b/drivers/iio/proximity/sx9310.c @@ -736,8 +736,6 @@ static int sx9310_buffer_postdisable(struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops sx9310_buffer_setup_ops = { .preenable = sx9310_buffer_preenable, - .postenable = iio_triggered_buffer_postenable, - .predisable = iio_triggered_buffer_predisable, .postdisable = sx9310_buffer_postdisable, }; diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index f6eec54faef4..acb821cbad46 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -680,10 +680,6 @@ static int sx9500_buffer_postenable(struct iio_dev *indio_dev) struct sx9500_data *data = iio_priv(indio_dev); int ret = 0, i; - ret = iio_triggered_buffer_postenable(indio_dev); - if (ret) - return ret; - mutex_lock(&data->mutex); for (i = 0; i < SX9500_NUM_CHANNELS; i++) @@ -700,9 +696,6 @@ static int sx9500_buffer_postenable(struct iio_dev *indio_dev) mutex_unlock(&data->mutex); - if (ret) - iio_triggered_buffer_predisable(indio_dev); - return ret; } @@ -727,8 +720,6 @@ static int sx9500_buffer_predisable(struct iio_dev *indio_dev) mutex_unlock(&data->mutex); - iio_triggered_buffer_predisable(indio_dev); - return ret; } diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h index c3c6ba5ec423..3aa2f132dd67 100644 --- a/include/linux/iio/trigger_consumer.h +++ b/include/linux/iio/trigger_consumer.h @@ -50,11 +50,4 @@ irqreturn_t iio_pollfunc_store_time(int irq, void *p); void iio_trigger_notify_done(struct iio_trigger *trig); -/* - * Two functions for common case where all that happens is a pollfunc - * is attached and detached from a trigger - */ -int iio_triggered_buffer_postenable(struct iio_dev *indio_dev); -int iio_triggered_buffer_predisable(struct iio_dev *indio_dev); - #endif -- cgit v1.2.3 From b3337eb24831db058231ea87838f316d9eb86253 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 15 Jun 2020 18:05:41 +0300 Subject: gpiolib: Introduce for_each_requested_gpio_in_range() macro Introduce for_each_requested_gpio_in_range() macro which helps to iterate over requested GPIO in a range. There are already potential users of it, which are going to be converted by the following patches. For most of them for_each_requested_gpio() shortcut has been added. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200615150545.87964-2-andriy.shevchenko@linux.intel.com Signed-off-by: Linus Walleij --- include/linux/gpio/driver.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include') diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index c4f272af7af5..11cdcb195635 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -474,6 +474,22 @@ struct gpio_chip { extern const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset); +/** + * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range + * @chip: the chip to query + * @i: loop variable + * @base: first GPIO in the range + * @size: amount of GPIOs to check starting from @base + * @label: label of current GPIO + */ +#define for_each_requested_gpio_in_range(chip, i, base, size, label) \ + for (i = 0; i < size; i++) \ + if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else + +/* Iterates over all requested GPIO of the given @chip */ +#define for_each_requested_gpio(chip, i, label) \ + for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label) + /* add/remove chips */ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, -- cgit v1.2.3 From 49042c220b3a31e25902b36df71b23dc10efa0b8 Mon Sep 17 00:00:00 2001 From: Andrea Mayer Date: Sat, 20 Jun 2020 00:54:43 +0200 Subject: l3mdev: add infrastructure for table to VRF mapping Add infrastructure to l3mdev (the core code for Layer 3 master devices) in order to find out the corresponding VRF device for a given table id. Therefore, the l3mdev implementations: - can register a callback that returns the device index of the l3mdev associated with a given table id; - can offer the lookup function (table to VRF device). Signed-off-by: Andrea Mayer Signed-off-by: David S. Miller --- include/net/l3mdev.h | 39 ++++++++++++++++++++++ net/l3mdev/l3mdev.c | 93 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+) (limited to 'include') diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index e942372b077b..031c661aa14d 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -10,6 +10,16 @@ #include #include +enum l3mdev_type { + L3MDEV_TYPE_UNSPEC, + L3MDEV_TYPE_VRF, + __L3MDEV_TYPE_MAX +}; + +#define L3MDEV_TYPE_MAX (__L3MDEV_TYPE_MAX - 1) + +typedef int (*lookup_by_table_id_t)(struct net *net, u32 table_d); + /** * struct l3mdev_ops - l3mdev operations * @@ -37,6 +47,15 @@ struct l3mdev_ops { #ifdef CONFIG_NET_L3_MASTER_DEV +int l3mdev_table_lookup_register(enum l3mdev_type l3type, + lookup_by_table_id_t fn); + +void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, + lookup_by_table_id_t fn); + +int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net, + u32 table_id); + int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, struct fib_lookup_arg *arg); @@ -280,6 +299,26 @@ struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb) return skb; } +static inline +int l3mdev_table_lookup_register(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ + return -EOPNOTSUPP; +} + +static inline +void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ +} + +static inline +int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net, + u32 table_id) +{ + return -ENODEV; +} + static inline int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, struct fib_lookup_arg *arg) diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c index f35899d45a9a..e71ca5aec684 100644 --- a/net/l3mdev/l3mdev.c +++ b/net/l3mdev/l3mdev.c @@ -9,6 +9,99 @@ #include #include +static DEFINE_SPINLOCK(l3mdev_lock); + +struct l3mdev_handler { + lookup_by_table_id_t dev_lookup; +}; + +static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1]; + +static int l3mdev_check_type(enum l3mdev_type l3type) +{ + if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX) + return -EINVAL; + + return 0; +} + +int l3mdev_table_lookup_register(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ + struct l3mdev_handler *hdlr; + int res; + + res = l3mdev_check_type(l3type); + if (res) + return res; + + hdlr = &l3mdev_handlers[l3type]; + + spin_lock(&l3mdev_lock); + + if (hdlr->dev_lookup) { + res = -EBUSY; + goto unlock; + } + + hdlr->dev_lookup = fn; + res = 0; + +unlock: + spin_unlock(&l3mdev_lock); + + return res; +} +EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register); + +void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ + struct l3mdev_handler *hdlr; + + if (l3mdev_check_type(l3type)) + return; + + hdlr = &l3mdev_handlers[l3type]; + + spin_lock(&l3mdev_lock); + + if (hdlr->dev_lookup == fn) + hdlr->dev_lookup = NULL; + + spin_unlock(&l3mdev_lock); +} +EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister); + +int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, + struct net *net, u32 table_id) +{ + lookup_by_table_id_t lookup; + struct l3mdev_handler *hdlr; + int ifindex = -EINVAL; + int res; + + res = l3mdev_check_type(l3type); + if (res) + return res; + + hdlr = &l3mdev_handlers[l3type]; + + spin_lock(&l3mdev_lock); + + lookup = hdlr->dev_lookup; + if (!lookup) + goto unlock; + + ifindex = lookup(net, table_id); + +unlock: + spin_unlock(&l3mdev_lock); + + return ifindex; +} +EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id); + /** * l3mdev_master_ifindex - get index of L3 master device * @dev: targeted interface -- cgit v1.2.3 From aae4e500e106d2ce48d5bdb21210e36efc7460cb Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:46 +0300 Subject: net: mscc: ocelot: generalize the "ACE/ACL" names Access Control Lists (and their respective Access Control Entries) are specifically entries in the VCAP IS2, the security enforcement block, according to the documentation. Let's rename the structures and functions to something more generic, so that VCAP IS1 structures (which would otherwise have to be called Ingress Classification Entries) can reuse the same code without confusion. Some renaming that was done: struct ocelot_ace_rule -> struct ocelot_vcap_filter struct ocelot_acl_block -> struct ocelot_vcap_block enum ocelot_ace_type -> enum ocelot_vcap_key_type struct ocelot_ace_vlan -> struct ocelot_vcap_key_vlan enum ocelot_ace_action -> enum ocelot_vcap_action struct ocelot_ace_stats -> struct ocelot_vcap_stats enum ocelot_ace_type -> enum ocelot_vcap_key_type struct ocelot_ace_frame_* -> struct ocelot_vcap_key_* No functional change is intended. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 2 +- drivers/net/ethernet/mscc/ocelot_flower.c | 122 ++++++------- drivers/net/ethernet/mscc/ocelot_police.h | 6 +- drivers/net/ethernet/mscc/ocelot_vcap.c | 286 +++++++++++++++--------------- drivers/net/ethernet/mscc/ocelot_vcap.h | 88 ++++----- include/soc/mscc/ocelot.h | 4 +- 6 files changed, 257 insertions(+), 251 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index d4ad7ffe6f6e..52b180280d2f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1392,7 +1392,7 @@ int ocelot_init(struct ocelot *ocelot) INIT_LIST_HEAD(&ocelot->multicast); ocelot_mact_init(ocelot); ocelot_vlan_init(ocelot); - ocelot_ace_init(ocelot); + ocelot_vcap_init(ocelot); for (port = 0; port < ocelot->num_phys_ports; port++) { /* Clear all counters (5 groups) */ diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index d57d6948ebf2..f2a85b06a6e7 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -9,7 +9,7 @@ #include "ocelot_vcap.h" static int ocelot_flower_parse_action(struct flow_cls_offload *f, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { const struct flow_action_entry *a; s64 burst; @@ -26,17 +26,17 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, flow_action_for_each(i, a, &f->rule->action) { switch (a->id) { case FLOW_ACTION_DROP: - ace->action = OCELOT_ACL_ACTION_DROP; + filter->action = OCELOT_VCAP_ACTION_DROP; break; case FLOW_ACTION_TRAP: - ace->action = OCELOT_ACL_ACTION_TRAP; + filter->action = OCELOT_VCAP_ACTION_TRAP; break; case FLOW_ACTION_POLICE: - ace->action = OCELOT_ACL_ACTION_POLICE; + filter->action = OCELOT_VCAP_ACTION_POLICE; rate = a->police.rate_bytes_ps; - ace->pol.rate = div_u64(rate, 1000) * 8; + filter->pol.rate = div_u64(rate, 1000) * 8; burst = rate * PSCHED_NS2TICKS(a->police.burst); - ace->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC); + filter->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC); break; default: return -EOPNOTSUPP; @@ -47,7 +47,7 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, } static int ocelot_flower_parse(struct flow_cls_offload *f, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; @@ -88,14 +88,14 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, return -EOPNOTSUPP; flow_rule_match_eth_addrs(rule, &match); - ace->type = OCELOT_ACE_TYPE_ETYPE; - ether_addr_copy(ace->frame.etype.dmac.value, + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + ether_addr_copy(filter->key.etype.dmac.value, match.key->dst); - ether_addr_copy(ace->frame.etype.smac.value, + ether_addr_copy(filter->key.etype.smac.value, match.key->src); - ether_addr_copy(ace->frame.etype.dmac.mask, + ether_addr_copy(filter->key.etype.dmac.mask, match.mask->dst); - ether_addr_copy(ace->frame.etype.smac.mask, + ether_addr_copy(filter->key.etype.smac.mask, match.mask->src); goto finished_key_parsing; } @@ -105,18 +105,18 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, flow_rule_match_basic(rule, &match); if (ntohs(match.key->n_proto) == ETH_P_IP) { - ace->type = OCELOT_ACE_TYPE_IPV4; - ace->frame.ipv4.proto.value[0] = + filter->key_type = OCELOT_VCAP_KEY_IPV4; + filter->key.ipv4.proto.value[0] = match.key->ip_proto; - ace->frame.ipv4.proto.mask[0] = + filter->key.ipv4.proto.mask[0] = match.mask->ip_proto; match_protocol = false; } if (ntohs(match.key->n_proto) == ETH_P_IPV6) { - ace->type = OCELOT_ACE_TYPE_IPV6; - ace->frame.ipv6.proto.value[0] = + filter->key_type = OCELOT_VCAP_KEY_IPV6; + filter->key.ipv6.proto.value[0] = match.key->ip_proto; - ace->frame.ipv6.proto.mask[0] = + filter->key.ipv6.proto.mask[0] = match.mask->ip_proto; match_protocol = false; } @@ -128,16 +128,16 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, u8 *tmp; flow_rule_match_ipv4_addrs(rule, &match); - tmp = &ace->frame.ipv4.sip.value.addr[0]; + tmp = &filter->key.ipv4.sip.value.addr[0]; memcpy(tmp, &match.key->src, 4); - tmp = &ace->frame.ipv4.sip.mask.addr[0]; + tmp = &filter->key.ipv4.sip.mask.addr[0]; memcpy(tmp, &match.mask->src, 4); - tmp = &ace->frame.ipv4.dip.value.addr[0]; + tmp = &filter->key.ipv4.dip.value.addr[0]; memcpy(tmp, &match.key->dst, 4); - tmp = &ace->frame.ipv4.dip.mask.addr[0]; + tmp = &filter->key.ipv4.dip.mask.addr[0]; memcpy(tmp, &match.mask->dst, 4); match_protocol = false; } @@ -151,10 +151,10 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, struct flow_match_ports match; flow_rule_match_ports(rule, &match); - ace->frame.ipv4.sport.value = ntohs(match.key->src); - ace->frame.ipv4.sport.mask = ntohs(match.mask->src); - ace->frame.ipv4.dport.value = ntohs(match.key->dst); - ace->frame.ipv4.dport.mask = ntohs(match.mask->dst); + filter->key.ipv4.sport.value = ntohs(match.key->src); + filter->key.ipv4.sport.mask = ntohs(match.mask->src); + filter->key.ipv4.dport.value = ntohs(match.key->dst); + filter->key.ipv4.dport.mask = ntohs(match.mask->dst); match_protocol = false; } @@ -162,11 +162,11 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); - ace->type = OCELOT_ACE_TYPE_ANY; - ace->vlan.vid.value = match.key->vlan_id; - ace->vlan.vid.mask = match.mask->vlan_id; - ace->vlan.pcp.value[0] = match.key->vlan_priority; - ace->vlan.pcp.mask[0] = match.mask->vlan_priority; + filter->key_type = OCELOT_VCAP_KEY_ANY; + filter->vlan.vid.value = match.key->vlan_id; + filter->vlan.vid.mask = match.mask->vlan_id; + filter->vlan.pcp.value[0] = match.key->vlan_priority; + filter->vlan.pcp.mask[0] = match.mask->vlan_priority; match_protocol = false; } @@ -175,76 +175,76 @@ finished_key_parsing: /* TODO: support SNAP, LLC etc */ if (proto < ETH_P_802_3_MIN) return -EOPNOTSUPP; - ace->type = OCELOT_ACE_TYPE_ETYPE; - *(__be16 *)ace->frame.etype.etype.value = htons(proto); - *(__be16 *)ace->frame.etype.etype.mask = htons(0xffff); + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + *(__be16 *)filter->key.etype.etype.value = htons(proto); + *(__be16 *)filter->key.etype.etype.mask = htons(0xffff); } - /* else, a rule of type OCELOT_ACE_TYPE_ANY is implicitly added */ + /* else, a filter of type OCELOT_VCAP_KEY_ANY is implicitly added */ - ace->prio = f->common.prio; - ace->id = f->cookie; - return ocelot_flower_parse_action(f, ace); + filter->prio = f->common.prio; + filter->id = f->cookie; + return ocelot_flower_parse_action(f, filter); } -static -struct ocelot_ace_rule *ocelot_ace_rule_create(struct ocelot *ocelot, int port, - struct flow_cls_offload *f) +static struct ocelot_vcap_filter +*ocelot_vcap_filter_create(struct ocelot *ocelot, int port, + struct flow_cls_offload *f) { - struct ocelot_ace_rule *ace; + struct ocelot_vcap_filter *filter; - ace = kzalloc(sizeof(*ace), GFP_KERNEL); - if (!ace) + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) return NULL; - ace->ingress_port_mask = BIT(port); - return ace; + filter->ingress_port_mask = BIT(port); + return filter; } int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { - struct ocelot_ace_rule *ace; + struct ocelot_vcap_filter *filter; int ret; - ace = ocelot_ace_rule_create(ocelot, port, f); - if (!ace) + filter = ocelot_vcap_filter_create(ocelot, port, f); + if (!filter) return -ENOMEM; - ret = ocelot_flower_parse(f, ace); + ret = ocelot_flower_parse(f, filter); if (ret) { - kfree(ace); + kfree(filter); return ret; } - return ocelot_ace_rule_offload_add(ocelot, ace, f->common.extack); + return ocelot_vcap_filter_add(ocelot, filter, f->common.extack); } EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace); int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { - struct ocelot_ace_rule ace; + struct ocelot_vcap_filter filter; - ace.prio = f->common.prio; - ace.id = f->cookie; + filter.prio = f->common.prio; + filter.id = f->cookie; - return ocelot_ace_rule_offload_del(ocelot, &ace); + return ocelot_vcap_filter_del(ocelot, &filter); } EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy); int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { - struct ocelot_ace_rule ace; + struct ocelot_vcap_filter filter; int ret; - ace.prio = f->common.prio; - ace.id = f->cookie; - ret = ocelot_ace_rule_stats_update(ocelot, &ace); + filter.prio = f->common.prio; + filter.id = f->cookie; + ret = ocelot_vcap_filter_stats_update(ocelot, &filter); if (ret) return ret; - flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0, 0x0, + flow_stats_update(&f->stats, 0x0, filter.stats.pkts, 0, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h index 79d18442aa9b..be6f2286a5cd 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.h +++ b/drivers/net/ethernet/mscc/ocelot_police.h @@ -33,9 +33,9 @@ struct qos_policer_conf { int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, struct qos_policer_conf *conf); -int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol); +int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol); -int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix); +int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix); #endif /* _MSCC_OCELOT_POLICE_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 33b5b015e8a7..8597034fd3b7 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -302,10 +302,10 @@ static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data, } static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { - switch (ace->action) { - case OCELOT_ACL_ACTION_DROP: + switch (filter->action) { + case OCELOT_VCAP_ACTION_DROP: vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1); @@ -314,7 +314,7 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0); break; - case OCELOT_ACL_ACTION_TRAP: + case OCELOT_VCAP_ACTION_TRAP: vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 0); @@ -322,12 +322,12 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 1); break; - case OCELOT_ACL_ACTION_POLICE: + case OCELOT_VCAP_ACTION_POLICE: vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX, - ace->pol_ix); + filter->pol_ix); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0); break; @@ -335,11 +335,11 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, } static void is2_entry_set(struct ocelot *ocelot, int ix, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; + struct ocelot_vcap_key_vlan *tag = &filter->vlan; u32 val, msk, type, type_mask = 0xf, i, count; - struct ocelot_ace_vlan *tag = &ace->vlan; struct ocelot_vcap_u64 payload; struct vcap_data data; int row = (ix / 2); @@ -355,19 +355,19 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, data.tg_sw = VCAP_TG_HALF; is2_data_get(ocelot, &data, ix); data.tg = (data.tg & ~data.tg_mask); - if (ace->prio != 0) + if (filter->prio != 0) data.tg |= data.tg_value; data.type = IS2_ACTION_TYPE_NORMAL; vcap_key_set(ocelot, &data, VCAP_IS2_HK_PAG, 0, 0); vcap_key_set(ocelot, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0, - ~ace->ingress_port_mask); + ~filter->ingress_port_mask); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_1); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_HOST_MATCH, OCELOT_VCAP_BIT_ANY); - vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, ace->dmac_mc); - vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, ace->dmac_bc); + vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc); + vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged); vcap_key_set(ocelot, &data, VCAP_IS2_HK_VID, tag->vid.value, tag->vid.mask); @@ -375,9 +375,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, tag->pcp.value[0], tag->pcp.mask[0]); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DEI, tag->dei); - switch (ace->type) { - case OCELOT_ACE_TYPE_ETYPE: { - struct ocelot_ace_frame_etype *etype = &ace->frame.etype; + switch (filter->key_type) { + case OCELOT_VCAP_KEY_ETYPE: { + struct ocelot_vcap_key_etype *etype = &filter->key.etype; type = IS2_TYPE_ETYPE; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC, @@ -398,8 +398,8 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, etype->data.value, etype->data.mask); break; } - case OCELOT_ACE_TYPE_LLC: { - struct ocelot_ace_frame_llc *llc = &ace->frame.llc; + case OCELOT_VCAP_KEY_LLC: { + struct ocelot_vcap_key_llc *llc = &filter->key.llc; type = IS2_TYPE_LLC; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC, @@ -414,8 +414,8 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, payload.value, payload.mask); break; } - case OCELOT_ACE_TYPE_SNAP: { - struct ocelot_ace_frame_snap *snap = &ace->frame.snap; + case OCELOT_VCAP_KEY_SNAP: { + struct ocelot_vcap_key_snap *snap = &filter->key.snap; type = IS2_TYPE_SNAP; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC, @@ -423,12 +423,12 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC, snap->smac.value, snap->smac.mask); vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP, - ace->frame.snap.snap.value, - ace->frame.snap.snap.mask); + filter->key.snap.snap.value, + filter->key.snap.snap.mask); break; } - case OCELOT_ACE_TYPE_ARP: { - struct ocelot_ace_frame_arp *arp = &ace->frame.arp; + case OCELOT_VCAP_KEY_ARP: { + struct ocelot_vcap_key_arp *arp = &filter->key.arp; type = IS2_TYPE_ARP; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_SMAC, @@ -469,20 +469,20 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, 0, 0); break; } - case OCELOT_ACE_TYPE_IPV4: - case OCELOT_ACE_TYPE_IPV6: { + case OCELOT_VCAP_KEY_IPV4: + case OCELOT_VCAP_KEY_IPV6: { enum ocelot_vcap_bit sip_eq_dip, sport_eq_dport, seq_zero, tcp; enum ocelot_vcap_bit ttl, fragment, options, tcp_ack, tcp_urg; enum ocelot_vcap_bit tcp_fin, tcp_syn, tcp_rst, tcp_psh; - struct ocelot_ace_frame_ipv4 *ipv4 = NULL; - struct ocelot_ace_frame_ipv6 *ipv6 = NULL; + struct ocelot_vcap_key_ipv4 *ipv4 = NULL; + struct ocelot_vcap_key_ipv6 *ipv6 = NULL; struct ocelot_vcap_udp_tcp *sport, *dport; struct ocelot_vcap_ipv4 sip, dip; struct ocelot_vcap_u8 proto, ds; struct ocelot_vcap_u48 *ip_data; - if (ace->type == OCELOT_ACE_TYPE_IPV4) { - ipv4 = &ace->frame.ipv4; + if (filter->key_type == OCELOT_VCAP_KEY_IPV4) { + ipv4 = &filter->key.ipv4; ttl = ipv4->ttl; fragment = ipv4->fragment; options = ipv4->options; @@ -503,7 +503,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, sport_eq_dport = ipv4->sport_eq_dport; seq_zero = ipv4->seq_zero; } else { - ipv6 = &ace->frame.ipv6; + ipv6 = &filter->key.ipv6; ttl = ipv6->ttl; fragment = OCELOT_VCAP_BIT_ANY; options = OCELOT_VCAP_BIT_ANY; @@ -607,7 +607,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, } break; } - case OCELOT_ACE_TYPE_ANY: + case OCELOT_VCAP_KEY_ANY: default: type = 0; type_mask = 0; @@ -623,9 +623,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, } vcap_key_set(ocelot, &data, VCAP_IS2_TYPE, type, type_mask); - is2_action_set(ocelot, &data, ace); + is2_action_set(ocelot, &data, filter); vcap_data_set(data.counter, data.counter_offset, - vcap_is2->counter_width, ace->stats.pkts); + vcap_is2->counter_width, filter->stats.pkts); /* Write row */ vcap_entry2cache(ocelot, &data); @@ -633,7 +633,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL); } -static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule, +static void is2_entry_get(struct ocelot *ocelot, struct ocelot_vcap_filter *filter, int ix) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; @@ -648,55 +648,56 @@ static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule, cnt = vcap_data_get(data.counter, data.counter_offset, vcap_is2->counter_width); - rule->stats.pkts = cnt; + filter->stats.pkts = cnt; } -static void ocelot_ace_rule_add(struct ocelot *ocelot, - struct ocelot_acl_block *block, - struct ocelot_ace_rule *rule) +static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; struct list_head *pos, *n; - if (rule->action == OCELOT_ACL_ACTION_POLICE) { + if (filter->action == OCELOT_VCAP_ACTION_POLICE) { block->pol_lpr--; - rule->pol_ix = block->pol_lpr; - ocelot_ace_policer_add(ocelot, rule->pol_ix, &rule->pol); + filter->pol_ix = block->pol_lpr; + ocelot_vcap_policer_add(ocelot, filter->pol_ix, &filter->pol); } block->count++; if (list_empty(&block->rules)) { - list_add(&rule->list, &block->rules); + list_add(&filter->list, &block->rules); return; } list_for_each_safe(pos, n, &block->rules) { - tmp = list_entry(pos, struct ocelot_ace_rule, list); - if (rule->prio < tmp->prio) + tmp = list_entry(pos, struct ocelot_vcap_filter, list); + if (filter->prio < tmp->prio) break; } - list_add(&rule->list, pos->prev); + list_add(&filter->list, pos->prev); } -static int ocelot_ace_rule_get_index_id(struct ocelot_acl_block *block, - struct ocelot_ace_rule *rule) +static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; int index = -1; list_for_each_entry(tmp, &block->rules, list) { ++index; - if (rule->id == tmp->id) + if (filter->id == tmp->id) break; } return index; } -static struct ocelot_ace_rule* -ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index) +static struct ocelot_vcap_filter* +ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block, + int index) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; int i = 0; list_for_each_entry(tmp, &block->rules, list) { @@ -739,15 +740,16 @@ static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port, ANA_PORT_VCAP_S2_CFG, port); } -static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace) +static bool +ocelot_vcap_is_problematic_mac_etype(struct ocelot_vcap_filter *filter) { u16 proto, mask; - if (ace->type != OCELOT_ACE_TYPE_ETYPE) + if (filter->key_type != OCELOT_VCAP_KEY_ETYPE) return false; - proto = ntohs(*(__be16 *)ace->frame.etype.etype.value); - mask = ntohs(*(__be16 *)ace->frame.etype.etype.mask); + proto = ntohs(*(__be16 *)filter->key.etype.etype.value); + mask = ntohs(*(__be16 *)filter->key.etype.etype.mask); /* ETH_P_ALL match, so all protocols below are included */ if (mask == 0) @@ -762,49 +764,51 @@ static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace) return false; } -static bool ocelot_ace_is_problematic_non_mac_etype(struct ocelot_ace_rule *ace) +static bool +ocelot_vcap_is_problematic_non_mac_etype(struct ocelot_vcap_filter *filter) { - if (ace->type == OCELOT_ACE_TYPE_SNAP) + if (filter->key_type == OCELOT_VCAP_KEY_SNAP) return true; - if (ace->type == OCELOT_ACE_TYPE_ARP) + if (filter->key_type == OCELOT_VCAP_KEY_ARP) return true; - if (ace->type == OCELOT_ACE_TYPE_IPV4) + if (filter->key_type == OCELOT_VCAP_KEY_IPV4) return true; - if (ace->type == OCELOT_ACE_TYPE_IPV6) + if (filter->key_type == OCELOT_VCAP_KEY_IPV6) return true; return false; } -static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot, - struct ocelot_ace_rule *ace) +static bool +ocelot_exclusive_mac_etype_filter_rules(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_block *block = &ocelot->block; + struct ocelot_vcap_filter *tmp; unsigned long port; int i; - if (ocelot_ace_is_problematic_mac_etype(ace)) { + if (ocelot_vcap_is_problematic_mac_etype(filter)) { /* Search for any non-MAC_ETYPE rules on the port */ for (i = 0; i < block->count; i++) { - tmp = ocelot_ace_rule_get_rule_index(block, i); - if (tmp->ingress_port_mask & ace->ingress_port_mask && - ocelot_ace_is_problematic_non_mac_etype(tmp)) + tmp = ocelot_vcap_block_find_filter(block, i); + if (tmp->ingress_port_mask & filter->ingress_port_mask && + ocelot_vcap_is_problematic_non_mac_etype(tmp)) return false; } - for_each_set_bit(port, &ace->ingress_port_mask, + for_each_set_bit(port, &filter->ingress_port_mask, ocelot->num_phys_ports) ocelot_match_all_as_mac_etype(ocelot, port, true); - } else if (ocelot_ace_is_problematic_non_mac_etype(ace)) { + } else if (ocelot_vcap_is_problematic_non_mac_etype(filter)) { /* Search for any MAC_ETYPE rules on the port */ for (i = 0; i < block->count; i++) { - tmp = ocelot_ace_rule_get_rule_index(block, i); - if (tmp->ingress_port_mask & ace->ingress_port_mask && - ocelot_ace_is_problematic_mac_etype(tmp)) + tmp = ocelot_vcap_block_find_filter(block, i); + if (tmp->ingress_port_mask & filter->ingress_port_mask && + ocelot_vcap_is_problematic_mac_etype(tmp)) return false; } - for_each_set_bit(port, &ace->ingress_port_mask, + for_each_set_bit(port, &filter->ingress_port_mask, ocelot->num_phys_ports) ocelot_match_all_as_mac_etype(ocelot, port, false); } @@ -812,39 +816,40 @@ static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot, return true; } -int ocelot_ace_rule_offload_add(struct ocelot *ocelot, - struct ocelot_ace_rule *rule, - struct netlink_ext_ack *extack) +int ocelot_vcap_filter_add(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter, + struct netlink_ext_ack *extack) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule *ace; + struct ocelot_vcap_block *block = &ocelot->block; int i, index; - if (!ocelot_exclusive_mac_etype_ace_rules(ocelot, rule)) { + if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) { NL_SET_ERR_MSG_MOD(extack, "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules"); return -EBUSY; } - /* Add rule to the linked list */ - ocelot_ace_rule_add(ocelot, block, rule); + /* Add filter to the linked list */ + ocelot_vcap_filter_add_to_block(ocelot, block, filter); - /* Get the index of the inserted rule */ - index = ocelot_ace_rule_get_index_id(block, rule); + /* Get the index of the inserted filter */ + index = ocelot_vcap_block_get_filter_index(block, filter); - /* Move down the rules to make place for the new rule */ + /* Move down the rules to make place for the new filter */ for (i = block->count - 1; i > index; i--) { - ace = ocelot_ace_rule_get_rule_index(block, i); - is2_entry_set(ocelot, i, ace); + struct ocelot_vcap_filter *tmp; + + tmp = ocelot_vcap_block_find_filter(block, i); + is2_entry_set(ocelot, i, tmp); } - /* Now insert the new rule */ - is2_entry_set(ocelot, index, rule); + /* Now insert the new filter */ + is2_entry_set(ocelot, index, filter); return 0; } -int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol) +int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol) { struct qos_policer_conf pp = { 0 }; @@ -858,7 +863,7 @@ int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); } -int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix) +int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix) { struct qos_policer_conf pp = { 0 }; @@ -867,44 +872,44 @@ int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix) return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); } -static void ocelot_ace_police_del(struct ocelot *ocelot, - struct ocelot_acl_block *block, - u32 ix) +static void ocelot_vcap_police_del(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + u32 ix) { - struct ocelot_ace_rule *ace; + struct ocelot_vcap_filter *filter; int index = -1; if (ix < block->pol_lpr) return; - list_for_each_entry(ace, &block->rules, list) { + list_for_each_entry(filter, &block->rules, list) { index++; - if (ace->action == OCELOT_ACL_ACTION_POLICE && - ace->pol_ix < ix) { - ace->pol_ix += 1; - ocelot_ace_policer_add(ocelot, ace->pol_ix, - &ace->pol); - is2_entry_set(ocelot, index, ace); + if (filter->action == OCELOT_VCAP_ACTION_POLICE && + filter->pol_ix < ix) { + filter->pol_ix += 1; + ocelot_vcap_policer_add(ocelot, filter->pol_ix, + &filter->pol); + is2_entry_set(ocelot, index, filter); } } - ocelot_ace_policer_del(ocelot, block->pol_lpr); + ocelot_vcap_policer_del(ocelot, block->pol_lpr); block->pol_lpr++; } -static void ocelot_ace_rule_del(struct ocelot *ocelot, - struct ocelot_acl_block *block, - struct ocelot_ace_rule *rule) +static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; struct list_head *pos, *q; list_for_each_safe(pos, q, &block->rules) { - tmp = list_entry(pos, struct ocelot_ace_rule, list); - if (tmp->id == rule->id) { - if (tmp->action == OCELOT_ACL_ACTION_POLICE) - ocelot_ace_police_del(ocelot, block, - tmp->pol_ix); + tmp = list_entry(pos, struct ocelot_vcap_filter, list); + if (tmp->id == filter->id) { + if (tmp->action == OCELOT_VCAP_ACTION_POLICE) + ocelot_vcap_police_del(ocelot, block, + tmp->pol_ix); list_del(pos); kfree(tmp); @@ -914,56 +919,57 @@ static void ocelot_ace_rule_del(struct ocelot *ocelot, block->count--; } -int ocelot_ace_rule_offload_del(struct ocelot *ocelot, - struct ocelot_ace_rule *rule) +int ocelot_vcap_filter_del(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule del_ace; - struct ocelot_ace_rule *ace; + struct ocelot_vcap_block *block = &ocelot->block; + struct ocelot_vcap_filter del_filter; int i, index; - memset(&del_ace, 0, sizeof(del_ace)); + memset(&del_filter, 0, sizeof(del_filter)); - /* Gets index of the rule */ - index = ocelot_ace_rule_get_index_id(block, rule); + /* Gets index of the filter */ + index = ocelot_vcap_block_get_filter_index(block, filter); - /* Delete rule */ - ocelot_ace_rule_del(ocelot, block, rule); + /* Delete filter */ + ocelot_vcap_block_remove_filter(ocelot, block, filter); - /* Move up all the blocks over the deleted rule */ + /* Move up all the blocks over the deleted filter */ for (i = index; i < block->count; i++) { - ace = ocelot_ace_rule_get_rule_index(block, i); - is2_entry_set(ocelot, i, ace); + struct ocelot_vcap_filter *tmp; + + tmp = ocelot_vcap_block_find_filter(block, i); + is2_entry_set(ocelot, i, tmp); } - /* Now delete the last rule, because it is duplicated */ - is2_entry_set(ocelot, block->count, &del_ace); + /* Now delete the last filter, because it is duplicated */ + is2_entry_set(ocelot, block->count, &del_filter); return 0; } -int ocelot_ace_rule_stats_update(struct ocelot *ocelot, - struct ocelot_ace_rule *rule) +int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_block *block = &ocelot->block; + struct ocelot_vcap_filter *tmp; int index; - index = ocelot_ace_rule_get_index_id(block, rule); - is2_entry_get(ocelot, rule, index); + index = ocelot_vcap_block_get_filter_index(block, filter); + is2_entry_get(ocelot, filter, index); /* After we get the result we need to clear the counters */ - tmp = ocelot_ace_rule_get_rule_index(block, index); + tmp = ocelot_vcap_block_find_filter(block, index); tmp->stats.pkts = 0; is2_entry_set(ocelot, index, tmp); return 0; } -int ocelot_ace_init(struct ocelot *ocelot) +int ocelot_vcap_init(struct ocelot *ocelot) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; - struct ocelot_acl_block *block = &ocelot->acl_block; + struct ocelot_vcap_block *block = &ocelot->block; struct vcap_data data; memset(&data, 0, sizeof(data)); @@ -994,7 +1000,7 @@ int ocelot_ace_init(struct ocelot *ocelot) block->pol_lpr = OCELOT_POLICER_DISCARD - 1; - INIT_LIST_HEAD(&ocelot->acl_block.rules); + INIT_LIST_HEAD(&ocelot->block.rules); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h index 099e177f2617..0dfbfc011b2e 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.h +++ b/drivers/net/ethernet/mscc/ocelot_vcap.h @@ -3,8 +3,8 @@ * Copyright (c) 2019 Microsemi Corporation */ -#ifndef _MSCC_OCELOT_ACE_H_ -#define _MSCC_OCELOT_ACE_H_ +#ifndef _MSCC_OCELOT_VCAP_H_ +#define _MSCC_OCELOT_VCAP_H_ #include "ocelot.h" #include "ocelot_police.h" @@ -76,31 +76,31 @@ struct ocelot_vcap_udp_tcp { u16 mask; }; -enum ocelot_ace_type { - OCELOT_ACE_TYPE_ANY, - OCELOT_ACE_TYPE_ETYPE, - OCELOT_ACE_TYPE_LLC, - OCELOT_ACE_TYPE_SNAP, - OCELOT_ACE_TYPE_ARP, - OCELOT_ACE_TYPE_IPV4, - OCELOT_ACE_TYPE_IPV6 +enum ocelot_vcap_key_type { + OCELOT_VCAP_KEY_ANY, + OCELOT_VCAP_KEY_ETYPE, + OCELOT_VCAP_KEY_LLC, + OCELOT_VCAP_KEY_SNAP, + OCELOT_VCAP_KEY_ARP, + OCELOT_VCAP_KEY_IPV4, + OCELOT_VCAP_KEY_IPV6 }; -struct ocelot_ace_vlan { +struct ocelot_vcap_key_vlan { struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */ struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */ enum ocelot_vcap_bit dei; /* DEI */ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */ }; -struct ocelot_ace_frame_etype { +struct ocelot_vcap_key_etype { struct ocelot_vcap_u48 dmac; struct ocelot_vcap_u48 smac; struct ocelot_vcap_u16 etype; struct ocelot_vcap_u16 data; /* MAC data */ }; -struct ocelot_ace_frame_llc { +struct ocelot_vcap_key_llc { struct ocelot_vcap_u48 dmac; struct ocelot_vcap_u48 smac; @@ -108,7 +108,7 @@ struct ocelot_ace_frame_llc { struct ocelot_vcap_u32 llc; }; -struct ocelot_ace_frame_snap { +struct ocelot_vcap_key_snap { struct ocelot_vcap_u48 dmac; struct ocelot_vcap_u48 smac; @@ -116,7 +116,7 @@ struct ocelot_ace_frame_snap { struct ocelot_vcap_u40 snap; }; -struct ocelot_ace_frame_arp { +struct ocelot_vcap_key_arp { struct ocelot_vcap_u48 smac; enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */ enum ocelot_vcap_bit req; /* Opcode request/reply */ @@ -133,7 +133,7 @@ struct ocelot_ace_frame_arp { struct ocelot_vcap_ipv4 dip; /* Target IP address */ }; -struct ocelot_ace_frame_ipv4 { +struct ocelot_vcap_key_ipv4 { enum ocelot_vcap_bit ttl; /* TTL zero */ enum ocelot_vcap_bit fragment; /* Fragment */ enum ocelot_vcap_bit options; /* Header options */ @@ -155,7 +155,7 @@ struct ocelot_ace_frame_ipv4 { enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ }; -struct ocelot_ace_frame_ipv6 { +struct ocelot_vcap_key_ipv6 { struct ocelot_vcap_u8 proto; /* IPv6 protocol */ struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */ enum ocelot_vcap_bit ttl; /* TTL zero */ @@ -174,58 +174,58 @@ struct ocelot_ace_frame_ipv6 { enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ }; -enum ocelot_ace_action { - OCELOT_ACL_ACTION_DROP, - OCELOT_ACL_ACTION_TRAP, - OCELOT_ACL_ACTION_POLICE, +enum ocelot_vcap_action { + OCELOT_VCAP_ACTION_DROP, + OCELOT_VCAP_ACTION_TRAP, + OCELOT_VCAP_ACTION_POLICE, }; -struct ocelot_ace_stats { +struct ocelot_vcap_stats { u64 bytes; u64 pkts; u64 used; }; -struct ocelot_ace_rule { +struct ocelot_vcap_filter { struct list_head list; u16 prio; u32 id; - enum ocelot_ace_action action; - struct ocelot_ace_stats stats; + enum ocelot_vcap_action action; + struct ocelot_vcap_stats stats; unsigned long ingress_port_mask; enum ocelot_vcap_bit dmac_mc; enum ocelot_vcap_bit dmac_bc; - struct ocelot_ace_vlan vlan; + struct ocelot_vcap_key_vlan vlan; - enum ocelot_ace_type type; + enum ocelot_vcap_key_type key_type; union { - /* ocelot_ACE_TYPE_ANY: No specific fields */ - struct ocelot_ace_frame_etype etype; - struct ocelot_ace_frame_llc llc; - struct ocelot_ace_frame_snap snap; - struct ocelot_ace_frame_arp arp; - struct ocelot_ace_frame_ipv4 ipv4; - struct ocelot_ace_frame_ipv6 ipv6; - } frame; + /* OCELOT_VCAP_KEY_ANY: No specific fields */ + struct ocelot_vcap_key_etype etype; + struct ocelot_vcap_key_llc llc; + struct ocelot_vcap_key_snap snap; + struct ocelot_vcap_key_arp arp; + struct ocelot_vcap_key_ipv4 ipv4; + struct ocelot_vcap_key_ipv6 ipv6; + } key; struct ocelot_policer pol; u32 pol_ix; }; -int ocelot_ace_rule_offload_add(struct ocelot *ocelot, - struct ocelot_ace_rule *rule, - struct netlink_ext_ack *extack); -int ocelot_ace_rule_offload_del(struct ocelot *ocelot, - struct ocelot_ace_rule *rule); -int ocelot_ace_rule_stats_update(struct ocelot *ocelot, - struct ocelot_ace_rule *rule); +int ocelot_vcap_filter_add(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule, + struct netlink_ext_ack *extack); +int ocelot_vcap_filter_del(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule); +int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule); -int ocelot_ace_init(struct ocelot *ocelot); +int ocelot_vcap_init(struct ocelot *ocelot); int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, struct flow_cls_offload *f, bool ingress); -#endif /* _MSCC_OCELOT_ACE_H_ */ +#endif /* _MSCC_OCELOT_VCAP_H_ */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 4953e9994df3..fa2c3904049e 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -470,7 +470,7 @@ struct ocelot_ops { int (*reset)(struct ocelot *ocelot); }; -struct ocelot_acl_block { +struct ocelot_vcap_block { struct list_head rules; int count; int pol_lpr; @@ -535,7 +535,7 @@ struct ocelot { struct list_head multicast; - struct ocelot_acl_block acl_block; + struct ocelot_vcap_block block; const struct vcap_field *vcap_is2_keys; const struct vcap_field *vcap_is2_actions; -- cgit v1.2.3 From 78e57f152c001eed0321ba4413a07c9e33e753e6 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Thu, 18 Jun 2020 14:22:15 -0700 Subject: net: Avoid overwriting valid skb->napi_id This will be useful to allow busy poll for tunneled traffic. In case of busy poll for sessions over tunnels, the underlying physical device's queues need to be polled. Tunnels schedule NAPI either via netif_rx() for backlog queue or schedule the gro_cell_poll(). netif_rx() propagates the valid skb->napi_id to the socket. OTOH, gro_cell_poll() stamps the skb->napi_id again by calling skb_mark_napi_id() with the tunnel NAPI which is not a busy poll candidate. This was preventing tunneled traffic to use busy poll. A valid NAPI ID in the skb indicates it was already marked for busy poll by a NAPI driver and hence needs to be copied into the socket. Signed-off-by: Amritha Nambiar Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/busy_poll.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 86e028388bad..b001fa91c14e 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -114,7 +114,11 @@ static inline void skb_mark_napi_id(struct sk_buff *skb, struct napi_struct *napi) { #ifdef CONFIG_NET_RX_BUSY_POLL - skb->napi_id = napi->napi_id; + /* If the skb was already marked with a valid NAPI ID, avoid overwriting + * it. + */ + if (skb->napi_id < MIN_NAPI_ID) + skb->napi_id = napi->napi_id; #endif } -- cgit v1.2.3 From 05e22e8395058745bd0312bc488b522197852aff Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Jun 2020 12:12:34 -0700 Subject: tcp: remove indirect calls for icsk->icsk_af_ops->queue_xmit Mitigate RETPOLINE costs in __tcp_transmit_skb() by using INDIRECT_CALL_INET() wrapper. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip.h | 6 +----- include/net/tcp.h | 1 + net/ipv4/ip_output.c | 6 ++++++ net/ipv4/tcp_output.c | 7 ++++++- 4 files changed, 14 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/ip.h b/include/net/ip.h index 04ebe7bf54c6..862c9545833a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -231,11 +231,7 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct ipcm_cookie *ipc, struct rtable **rtp, struct inet_cork *cork, unsigned int flags); -static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, - struct flowi *fl) -{ - return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); -} +int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) { diff --git a/include/net/tcp.h b/include/net/tcp.h index 4de9485f73d9..e5d7e0b09924 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 090d3097ee15..d946356187ed 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -539,6 +539,12 @@ no_route: } EXPORT_SYMBOL(__ip_queue_xmit); +int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) +{ + return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); +} +EXPORT_SYMBOL(ip_queue_xmit); + static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) { to->pkt_type = from->pkt_type; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a50e1990a845..be1bd37185d8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1064,6 +1064,9 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); } +INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); +INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); + /* This routine actually transmits TCP packets queued in by * tcp_do_sendmsg(). This is used by both the initial * transmission and possible later retransmissions. @@ -1235,7 +1238,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, tcp_add_tx_delay(skb, tp); - err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); + err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, + inet6_csk_xmit, ip_queue_xmit, + sk, skb, &inet->cork.fl); if (unlikely(err > 0)) { tcp_enter_cwr(sk); -- cgit v1.2.3 From dd2e0b86fc4ee146ac8f3275833d0187efeb950a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Jun 2020 12:12:35 -0700 Subject: tcp: remove indirect calls for icsk->icsk_af_ops->send_check Mitigate RETPOLINE costs in __tcp_transmit_skb() by using INDIRECT_CALL_INET() wrapper. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip6_checksum.h | 9 --------- include/net/tcp.h | 3 +++ net/ipv4/tcp_output.c | 5 ++++- net/ipv6/tcp_ipv6.c | 7 +++++++ 4 files changed, 14 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 27ec612cd4a4..b3f4eaa88672 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -85,15 +85,6 @@ static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb) th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); } -#if IS_ENABLED(CONFIG_IPV6) -static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) -{ - struct ipv6_pinfo *np = inet6_sk(sk); - - __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); -} -#endif - static inline __sum16 udp_v6_check(int len, const struct in6_addr *saddr, const struct in6_addr *daddr, diff --git a/include/net/tcp.h b/include/net/tcp.h index e5d7e0b09924..cd9cc348dbf9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -932,6 +932,9 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) #endif return 0; } + +INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); + #endif static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index be1bd37185d8..04b70fe31fa2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1066,6 +1066,7 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); +INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)); /* This routine actually transmits TCP packets queued in by * tcp_do_sendmsg(). This is used by both the initial @@ -1210,7 +1211,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, } #endif - icsk->icsk_af_ops->send_check(sk, skb); + INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, + tcp_v6_send_check, tcp_v4_send_check, + sk, skb); if (likely(tcb->tcp_flags & TCPHDR_ACK)) tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f67d45ff00b4..4502db706f75 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1811,6 +1811,13 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_destructor = tcp_twsk_destructor, }; +INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); +} + const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, -- cgit v1.2.3 From 5769a351b89cd4d82016f18fa5f6c4077403564d Mon Sep 17 00:00:00 2001 From: Jiufei Xue Date: Wed, 17 Jun 2020 17:53:55 +0800 Subject: io_uring: change the poll type to be 32-bits poll events should be 32-bits to cover EPOLLEXCLUSIVE. Explicit word-swap the poll32_events for big endian to make sure the ABI is not changed. We call this feature IORING_FEAT_POLL_32BITS, applications who want to use EPOLLEXCLUSIVE should check the feature bit first. Signed-off-by: Jiufei Xue Signed-off-by: Jens Axboe --- fs/io_uring.c | 13 +++++++++---- include/uapi/linux/io_uring.h | 4 +++- tools/io_uring/liburing.h | 6 +++++- 3 files changed, 17 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/fs/io_uring.c b/fs/io_uring.c index a78201b96179..0eb063daa9b5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4589,7 +4589,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_poll_iocb *poll = &req->poll; - u16 events; + u32 events; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; @@ -4598,7 +4598,10 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe if (!poll->file) return -EBADF; - events = READ_ONCE(sqe->poll_events); + events = READ_ONCE(sqe->poll32_events); +#ifdef __BIG_ENDIAN + events = swahw32(events); +#endif poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; io_get_req_task(req); @@ -7928,7 +7931,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | - IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL; + IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | + IORING_FEAT_POLL_32BITS; if (copy_to_user(params, p, sizeof(*p))) { ret = -EFAULT; @@ -8217,7 +8221,8 @@ static int __init io_uring_init(void) BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); - BUILD_BUG_SQE_ELEM(28, __u16, poll_events); + BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); + BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 92c22699a5a7..8d033961cb78 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -31,7 +31,8 @@ struct io_uring_sqe { union { __kernel_rwf_t rw_flags; __u32 fsync_flags; - __u16 poll_events; + __u16 poll_events; /* compatibility */ + __u32 poll32_events; /* word-reversed for BE */ __u32 sync_range_flags; __u32 msg_flags; __u32 timeout_flags; @@ -248,6 +249,7 @@ struct io_uring_params { #define IORING_FEAT_RW_CUR_POS (1U << 3) #define IORING_FEAT_CUR_PERSONALITY (1U << 4) #define IORING_FEAT_FAST_POLL (1U << 5) +#define IORING_FEAT_POLL_32BITS (1U << 6) /* * io_uring_register(2) opcodes and arguments diff --git a/tools/io_uring/liburing.h b/tools/io_uring/liburing.h index 5f305c86b892..28a837b6069d 100644 --- a/tools/io_uring/liburing.h +++ b/tools/io_uring/liburing.h @@ -10,6 +10,7 @@ extern "C" { #include #include "../../include/uapi/linux/io_uring.h" #include +#include #include "barrier.h" /* @@ -145,11 +146,14 @@ static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd, } static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd, - short poll_mask) + unsigned poll_mask) { memset(sqe, 0, sizeof(*sqe)); sqe->opcode = IORING_OP_POLL_ADD; sqe->fd = fd; +#if __BYTE_ORDER == __BIG_ENDIAN + poll_mask = __swahw32(poll_mask); +#endif sqe->poll_events = poll_mask; } -- cgit v1.2.3 From 5a473e8311b582a40c10409a0f4bb39f42aa8123 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 4 Jun 2020 11:23:39 -0600 Subject: block: provide plug based way of signaling forced no-wait semantics Provide a way for the caller to specify that IO should be marked with REQ_NOWAIT to avoid blocking on allocation. Signed-off-by: Jens Axboe --- block/blk-core.c | 6 ++++++ include/linux/blkdev.h | 1 + 2 files changed, 7 insertions(+) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index 03252af8c82c..62a4904db921 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -958,6 +958,7 @@ generic_make_request_checks(struct bio *bio) struct request_queue *q; int nr_sectors = bio_sectors(bio); blk_status_t status = BLK_STS_IOERR; + struct blk_plug *plug; char b[BDEVNAME_SIZE]; might_sleep(); @@ -971,6 +972,10 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + plug = blk_mq_plug(q, bio); + if (plug && plug->nowait) + bio->bi_opf |= REQ_NOWAIT; + /* * For a REQ_NOWAIT based request, return -EOPNOTSUPP * if queue is not a request based queue. @@ -1800,6 +1805,7 @@ void blk_start_plug(struct blk_plug *plug) INIT_LIST_HEAD(&plug->cb_list); plug->rq_count = 0; plug->multiple_queues = false; + plug->nowait = false; /* * Store ordering should not be needed here, since a potential diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8fd900998b4e..6e067dca94cf 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1189,6 +1189,7 @@ struct blk_plug { struct list_head cb_list; /* md requires an unplug callback */ unsigned short rq_count; bool multiple_queues; + bool nowait; }; #define BLK_MAX_REQUEST_COUNT 16 #define BLK_PLUG_FLUSH_SIZE (128 * 1024) -- cgit v1.2.3 From c7510ab2cf5ccd997fe7f194edfe09cc511abf99 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 23 May 2020 08:22:14 -0600 Subject: mm: abstract out wake_page_match() from wake_page_function() No functional changes in this patch, just in preparation for allowing more callers. Acked-by: Johannes Weiner Signed-off-by: Jens Axboe --- include/linux/pagemap.h | 37 +++++++++++++++++++++++++++++++++++++ mm/filemap.c | 35 ++++------------------------------- 2 files changed, 41 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index cf2468da68e9..2f18221bb5c8 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -496,6 +496,43 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, return pgoff; } +/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ +struct wait_page_key { + struct page *page; + int bit_nr; + int page_match; +}; + +struct wait_page_queue { + struct page *page; + int bit_nr; + wait_queue_entry_t wait; +}; + +static inline int wake_page_match(struct wait_page_queue *wait_page, + struct wait_page_key *key) +{ + if (wait_page->page != key->page) + return 0; + key->page_match = 1; + + if (wait_page->bit_nr != key->bit_nr) + return 0; + + /* + * Stop walking if it's locked. + * Is this safe if put_and_wait_on_page_locked() is in use? + * Yes: the waker must hold a reference to this page, and if PG_locked + * has now already been set by another task, that task must also hold + * a reference to the *same usage* of this page; so there is no need + * to walk on to wake even the put_and_wait_on_page_locked() callers. + */ + if (test_bit(key->bit_nr, &key->page->flags)) + return -1; + + return 1; +} + extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, diff --git a/mm/filemap.c b/mm/filemap.c index 3378d4fca883..c3175dbd8fba 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -987,43 +987,16 @@ void __init pagecache_init(void) page_writeback_init(); } -/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ -struct wait_page_key { - struct page *page; - int bit_nr; - int page_match; -}; - -struct wait_page_queue { - struct page *page; - int bit_nr; - wait_queue_entry_t wait; -}; - static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) { struct wait_page_key *key = arg; struct wait_page_queue *wait_page = container_of(wait, struct wait_page_queue, wait); + int ret; - if (wait_page->page != key->page) - return 0; - key->page_match = 1; - - if (wait_page->bit_nr != key->bit_nr) - return 0; - - /* - * Stop walking if it's locked. - * Is this safe if put_and_wait_on_page_locked() is in use? - * Yes: the waker must hold a reference to this page, and if PG_locked - * has now already been set by another task, that task must also hold - * a reference to the *same usage* of this page; so there is no need - * to walk on to wake even the put_and_wait_on_page_locked() callers. - */ - if (test_bit(key->bit_nr, &key->page->flags)) - return -1; - + ret = wake_page_match(wait_page, key); + if (ret != 1) + return ret; return autoremove_wake_function(wait, mode, sync, key); } -- cgit v1.2.3 From dd3e6d5039de1cbff4e20e2b34390ff44cdb182f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 22 May 2020 09:12:09 -0600 Subject: mm: add support for async page locking Normally waiting for a page to become unlocked, or locking the page, requires waiting for IO to complete. Add support for lock_page_async() and wait_on_page_locked_async(), which are callback based instead. This allows a caller to get notified when a page becomes unlocked, rather than wait for it. We add a new iocb field, ki_waitq, to pass in the necessary data for this to happen. We can unionize this with ki_cookie, since that is only used for polled IO. Polled IO can never co-exist with async callbacks, as it is (by definition) polled completions. struct wait_page_key is made public, and we define struct wait_page_async as the interface between the caller and the core. Acked-by: Johannes Weiner Signed-off-by: Jens Axboe --- include/linux/fs.h | 7 ++++++- include/linux/pagemap.h | 17 +++++++++++++++++ mm/filemap.c | 45 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 67 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 3f881a892ea7..2a5cf6080e68 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -315,6 +315,8 @@ enum rw_hint { #define IOCB_SYNC (1 << 5) #define IOCB_WRITE (1 << 6) #define IOCB_NOWAIT (1 << 7) +/* iocb->ki_waitq is valid */ +#define IOCB_WAITQ (1 << 8) struct kiocb { struct file *ki_filp; @@ -328,7 +330,10 @@ struct kiocb { int ki_flags; u16 ki_hint; u16 ki_ioprio; /* See linux/ioprio.h */ - unsigned int ki_cookie; /* for ->iopoll */ + union { + unsigned int ki_cookie; /* for ->iopoll */ + struct wait_page_queue *ki_waitq; /* for async buffered IO */ + }; randomized_struct_fields_end }; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 2f18221bb5c8..e053e1d9a4d7 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -535,6 +535,7 @@ static inline int wake_page_match(struct wait_page_queue *wait_page, extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); +extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); @@ -571,6 +572,22 @@ static inline int lock_page_killable(struct page *page) return 0; } +/* + * lock_page_async - Lock the page, unless this would block. If the page + * is already locked, then queue a callback when the page becomes unlocked. + * This callback can then retry the operation. + * + * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page + * was already locked and the callback defined in 'wait' was queued. + */ +static inline int lock_page_async(struct page *page, + struct wait_page_queue *wait) +{ + if (!trylock_page(page)) + return __lock_page_async(page, wait); + return 0; +} + /* * lock_page_or_retry - Lock the page, unless this would block and the * caller indicated that it can handle a retry. diff --git a/mm/filemap.c b/mm/filemap.c index c3175dbd8fba..e8aaf43bee9f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1180,6 +1180,36 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr) } EXPORT_SYMBOL(wait_on_page_bit_killable); +static int __wait_on_page_locked_async(struct page *page, + struct wait_page_queue *wait, bool set) +{ + struct wait_queue_head *q = page_waitqueue(page); + int ret = 0; + + wait->page = page; + wait->bit_nr = PG_locked; + + spin_lock_irq(&q->lock); + __add_wait_queue_entry_tail(q, &wait->wait); + SetPageWaiters(page); + if (set) + ret = !trylock_page(page); + else + ret = PageLocked(page); + /* + * If we were succesful now, we know we're still on the + * waitqueue as we're still under the lock. This means it's + * safe to remove and return success, we know the callback + * isn't going to trigger. + */ + if (!ret) + __remove_wait_queue(q, &wait->wait); + else + ret = -EIOCBQUEUED; + spin_unlock_irq(&q->lock); + return ret; +} + /** * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked * @page: The page to wait for. @@ -1342,6 +1372,11 @@ int __lock_page_killable(struct page *__page) } EXPORT_SYMBOL_GPL(__lock_page_killable); +int __lock_page_async(struct page *page, struct wait_page_queue *wait) +{ + return __wait_on_page_locked_async(page, wait, true); +} + /* * Return values: * 1 - page is locked; mmap_lock is still held. @@ -2131,6 +2166,11 @@ page_not_up_to_date_locked: } readpage: + if (iocb->ki_flags & IOCB_NOWAIT) { + unlock_page(page); + put_page(page); + goto would_block; + } /* * A previous I/O error may have been due to temporary * failures, eg. multipath errors. @@ -2150,7 +2190,10 @@ readpage: } if (!PageUptodate(page)) { - error = lock_page_killable(page); + if (iocb->ki_flags & IOCB_WAITQ) + error = lock_page_async(page, iocb->ki_waitq); + else + error = lock_page_killable(page); if (unlikely(error)) goto readpage_error; if (!PageUptodate(page)) { -- cgit v1.2.3 From c2a25ec0f1005dde004cd671484f578a9c8ca7de Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 22 May 2020 09:12:51 -0600 Subject: fs: add FMODE_BUF_RASYNC If set, this indicates that the file system supports IOCB_WAITQ for buffered reads. Signed-off-by: Jens Axboe --- include/linux/fs.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 2a5cf6080e68..4090320360f4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -175,6 +175,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File does not contribute to nr_files count */ #define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) +/* File supports async buffered reads */ +#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) + /* * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector * that indicates that they should check the contents of the iovec are -- cgit v1.2.3 From d1932dc3dc268f8dd5201c64971324d06ba977cc Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 22 May 2020 10:18:23 -0600 Subject: mm: add kiocb_wait_page_queue_init() helper Checks if the file supports it, and initializes the values that we need. Caller passes in 'data' pointer, if any, and the callback function to be used. Acked-by: Johannes Weiner Signed-off-by: Jens Axboe --- include/linux/pagemap.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'include') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e053e1d9a4d7..7386bc67cc5a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -533,6 +533,27 @@ static inline int wake_page_match(struct wait_page_queue *wait_page, return 1; } +static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb, + struct wait_page_queue *wait, + wait_queue_func_t func, + void *data) +{ + /* Can't support async wakeup with polled IO */ + if (kiocb->ki_flags & IOCB_HIPRI) + return -EINVAL; + if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) { + wait->wait.func = func; + wait->wait.private = data; + wait->wait.flags = 0; + INIT_LIST_HEAD(&wait->wait.entry); + kiocb->ki_flags |= IOCB_WAITQ; + kiocb->ki_waitq = wait; + return 0; + } + + return -EOPNOTSUPP; +} + extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); -- cgit v1.2.3 From 49bcaef86eba1a8097980f341e243ba01177a685 Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Mon, 22 Jun 2020 09:58:11 +0530 Subject: clk: qcom: Add DT bindings for ipq6018 apss clock controller Add dt-binding for ipq6018 apss clock controller Acked-by: Rob Herring Signed-off-by: Sivaprakash Murugesan Link: https://lore.kernel.org/r/1592800092-20533-4-git-send-email-sivaprak@codeaurora.org Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/qcom,apss-ipq.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 include/dt-bindings/clock/qcom,apss-ipq.h (limited to 'include') diff --git a/include/dt-bindings/clock/qcom,apss-ipq.h b/include/dt-bindings/clock/qcom,apss-ipq.h new file mode 100644 index 000000000000..77b6e05492e2 --- /dev/null +++ b/include/dt-bindings/clock/qcom,apss-ipq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLOCK_QCA_APSS_IPQ6018_H +#define _DT_BINDINGS_CLOCK_QCA_APSS_IPQ6018_H + +#define APCS_ALIAS0_CLK_SRC 0 +#define APCS_ALIAS0_CORE_CLK 1 + +#endif -- cgit v1.2.3 From b608013ac5b55a2e42d8734f29f9757b75d26165 Mon Sep 17 00:00:00 2001 From: Konrad Dybcio Date: Mon, 22 Jun 2020 11:02:52 +0200 Subject: clk: qcom: smd: Add support for SDM660 rpm clocks Add rpm smd clocks, PMIC and bus clocks which are required on SDM630/660 (and APQ variants) for clients to vote on. Signed-off-by: Konrad Dybcio Link: https://lore.kernel.org/r/20200622090252.36568-1-konradybcio@gmail.com Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/qcom,rpmcc.txt | 1 + drivers/clk/qcom/clk-smd-rpm.c | 76 ++++++++++++++++++++++ include/dt-bindings/clock/qcom,rpmcc.h | 10 +++ 3 files changed, 87 insertions(+) (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt index 90a1349bc713..86190acc71bc 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt @@ -20,6 +20,7 @@ Required properties : "qcom,rpmcc-msm8996", "qcom,rpmcc" "qcom,rpmcc-msm8998", "qcom,rpmcc" "qcom,rpmcc-qcs404", "qcom,rpmcc" + "qcom,rpmcc-sdm660", "qcom,rpmcc" - #clock-cells : shall contain 1 diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 52f63ad787ba..643bc355df5c 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -766,6 +766,81 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = { .num_clks = ARRAY_SIZE(msm8998_clks), }; +/* sdm660 */ +DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0, + 19200000); +DEFINE_CLK_SMD_RPM(sdm660, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(sdm660, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(sdm660, cnoc_periph_clk, cnoc_periph_a_clk, + QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, mmssnoc_axi_clk, mmssnoc_axi_a_clk, + QCOM_SMD_RPM_MMAXI_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, aggre2_noc_clk, aggre2_noc_a_clk, + QCOM_SMD_RPM_AGGR_CLK, 2); +DEFINE_CLK_SMD_RPM_QDSS(sdm660, qdss_clk, qdss_a_clk, + QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, div_clk1, div_clk1_a, 11); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk1, ln_bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk2, ln_bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk3, ln_bb_clk3_a, 3); + +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk1_pin, + ln_bb_clk1_pin_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk2_pin, + ln_bb_clk2_pin_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk3_pin, + ln_bb_clk3_pin_a, 3); +static struct clk_smd_rpm *sdm660_clks[] = { + [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo, + [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a, + [RPM_SMD_SNOC_CLK] = &sdm660_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &sdm660_snoc_a_clk, + [RPM_SMD_CNOC_CLK] = &sdm660_cnoc_clk, + [RPM_SMD_CNOC_A_CLK] = &sdm660_cnoc_a_clk, + [RPM_SMD_CNOC_PERIPH_CLK] = &sdm660_cnoc_periph_clk, + [RPM_SMD_CNOC_PERIPH_A_CLK] = &sdm660_cnoc_periph_a_clk, + [RPM_SMD_BIMC_CLK] = &sdm660_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &sdm660_bimc_a_clk, + [RPM_SMD_MMSSNOC_AXI_CLK] = &sdm660_mmssnoc_axi_clk, + [RPM_SMD_MMSSNOC_AXI_CLK_A] = &sdm660_mmssnoc_axi_a_clk, + [RPM_SMD_IPA_CLK] = &sdm660_ipa_clk, + [RPM_SMD_IPA_A_CLK] = &sdm660_ipa_a_clk, + [RPM_SMD_CE1_CLK] = &sdm660_ce1_clk, + [RPM_SMD_CE1_A_CLK] = &sdm660_ce1_a_clk, + [RPM_SMD_AGGR2_NOC_CLK] = &sdm660_aggre2_noc_clk, + [RPM_SMD_AGGR2_NOC_A_CLK] = &sdm660_aggre2_noc_a_clk, + [RPM_SMD_QDSS_CLK] = &sdm660_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &sdm660_qdss_a_clk, + [RPM_SMD_RF_CLK1] = &sdm660_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &sdm660_rf_clk1_a, + [RPM_SMD_DIV_CLK1] = &sdm660_div_clk1, + [RPM_SMD_DIV_A_CLK1] = &sdm660_div_clk1_a, + [RPM_SMD_LN_BB_CLK] = &sdm660_ln_bb_clk1, + [RPM_SMD_LN_BB_A_CLK] = &sdm660_ln_bb_clk1_a, + [RPM_SMD_LN_BB_CLK2] = &sdm660_ln_bb_clk2, + [RPM_SMD_LN_BB_CLK2_A] = &sdm660_ln_bb_clk2_a, + [RPM_SMD_LN_BB_CLK3] = &sdm660_ln_bb_clk3, + [RPM_SMD_LN_BB_CLK3_A] = &sdm660_ln_bb_clk3_a, + [RPM_SMD_RF_CLK1_PIN] = &sdm660_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &sdm660_rf_clk1_a_pin, + [RPM_SMD_LN_BB_CLK1_PIN] = &sdm660_ln_bb_clk1_pin, + [RPM_SMD_LN_BB_CLK1_A_PIN] = &sdm660_ln_bb_clk1_pin_a, + [RPM_SMD_LN_BB_CLK2_PIN] = &sdm660_ln_bb_clk2_pin, + [RPM_SMD_LN_BB_CLK2_A_PIN] = &sdm660_ln_bb_clk2_pin_a, + [RPM_SMD_LN_BB_CLK3_PIN] = &sdm660_ln_bb_clk3_pin, + [RPM_SMD_LN_BB_CLK3_A_PIN] = &sdm660_ln_bb_clk3_pin_a, +}; + +static const struct rpm_smd_clk_desc rpm_clk_sdm660 = { + .clks = sdm660_clks, + .num_clks = ARRAY_SIZE(sdm660_clks), +}; + static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, @@ -773,6 +848,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, { .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 }, { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 }, + { .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 }, { } }; MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table); diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index ae74c43c485d..d1afa634b58d 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -133,5 +133,15 @@ #define RPM_SMD_RF_CLK3_A 87 #define RPM_SMD_RF_CLK3_PIN 88 #define RPM_SMD_RF_CLK3_A_PIN 89 +#define RPM_SMD_MMSSNOC_AXI_CLK 90 +#define RPM_SMD_MMSSNOC_AXI_CLK_A 91 +#define RPM_SMD_CNOC_PERIPH_CLK 92 +#define RPM_SMD_CNOC_PERIPH_A_CLK 93 +#define RPM_SMD_LN_BB_CLK3 94 +#define RPM_SMD_LN_BB_CLK3_A 95 +#define RPM_SMD_LN_BB_CLK1_PIN 96 +#define RPM_SMD_LN_BB_CLK1_A_PIN 97 +#define RPM_SMD_LN_BB_CLK2_PIN 98 +#define RPM_SMD_LN_BB_CLK2_A_PIN 99 #endif -- cgit v1.2.3 From 613c2e2c7e69e574411e2a3609459e18e91ae3fb Mon Sep 17 00:00:00 2001 From: Dennis YC Hsieh Date: Sun, 21 Jun 2020 22:18:26 +0800 Subject: soc: mediatek: cmdq: add assign function Add assign function in cmdq helper which assign constant value into internal register by index. Signed-off-by: Dennis YC Hsieh Link: https://lore.kernel.org/r/1592749115-24158-3-git-send-email-dennis-yc.hsieh@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-cmdq-helper.c | 24 +++++++++++++++++++++++- include/linux/mailbox/mtk-cmdq-mailbox.h | 1 + include/linux/soc/mediatek/mtk-cmdq.h | 14 ++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 87ee9f767b7a..b9e5e4eea876 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -12,6 +12,7 @@ #define CMDQ_WRITE_ENABLE_MASK BIT(0) #define CMDQ_POLL_ENABLE_MASK BIT(0) #define CMDQ_EOC_IRQ_EN BIT(0) +#define CMDQ_REG_TYPE 1 struct cmdq_instruction { union { @@ -21,8 +22,17 @@ struct cmdq_instruction { union { u16 offset; u16 event; + u16 reg_dst; + }; + union { + u8 subsys; + struct { + u8 sop:5; + u8 arg_c_t:1; + u8 src_t:1; + u8 dst_t:1; + }; }; - u8 subsys; u8 op; }; @@ -278,6 +288,18 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, } EXPORT_SYMBOL(cmdq_pkt_poll_mask); +int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) +{ + struct cmdq_instruction inst = {}; + + inst.op = CMDQ_CODE_LOGIC; + inst.dst_t = CMDQ_REG_TYPE; + inst.reg_dst = reg_idx; + inst.value = value; + return cmdq_pkt_append_command(pkt, inst); +} +EXPORT_SYMBOL(cmdq_pkt_assign); + static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { struct cmdq_instruction inst = { {0} }; diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index a4dc45fbec0a..70b740a552c2 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -59,6 +59,7 @@ enum cmdq_code { CMDQ_CODE_JUMP = 0x10, CMDQ_CODE_WFE = 0x20, CMDQ_CODE_EOC = 0x40, + CMDQ_CODE_LOGIC = 0xa0, }; enum cmdq_cb_status { diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index a74c1d5acdf3..83340211e1d3 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -152,6 +152,20 @@ int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, */ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value, u32 mask); + +/** + * cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE + * to execute an instruction that set a constant value into + * internal register and use as value, mask or address in + * read/write instruction. + * @pkt: the CMDQ packet + * @reg_idx: the CMDQ internal register ID + * @value: the specified value + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value); + /** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ * packet and call back at the end of done packet -- cgit v1.2.3 From 995818588b6deedbed6b2b9d4c67e59b9afd6c60 Mon Sep 17 00:00:00 2001 From: Dennis YC Hsieh Date: Sun, 21 Jun 2020 22:18:32 +0800 Subject: soc: mediatek: cmdq: export finalize function Export finalize function to client which helps append eoc and jump command to pkt. Let client decide call finalize or not. Signed-off-by: Dennis YC Hsieh Reviewed-by: CK Hu Acked-by: Chun-Kuang Hu Link: https://lore.kernel.org/r/1592749115-24158-9-git-send-email-dennis-yc.hsieh@mediatek.com Signed-off-by: Matthias Brugger --- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 1 + drivers/soc/mediatek/mtk-cmdq-helper.c | 7 ++----- include/linux/soc/mediatek/mtk-cmdq.h | 8 ++++++++ 3 files changed, 11 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index fe46c4bac64d..ec6c9ffbf35e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -492,6 +492,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_finalize(cmdq_handle); cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); } #endif diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index b9e5e4eea876..2eda03fc7662 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -300,7 +300,7 @@ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) } EXPORT_SYMBOL(cmdq_pkt_assign); -static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) +int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { struct cmdq_instruction inst = { {0} }; int err; @@ -319,6 +319,7 @@ static int cmdq_pkt_finalize(struct cmdq_pkt *pkt) return err; } +EXPORT_SYMBOL(cmdq_pkt_finalize); static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data) { @@ -353,10 +354,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, unsigned long flags = 0; struct cmdq_client *client = (struct cmdq_client *)pkt->cl; - err = cmdq_pkt_finalize(pkt); - if (err < 0) - return err; - pkt->cb.cb = cb; pkt->cb.data = data; pkt->async_cb.cb = cmdq_pkt_flush_async_cb; diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 83340211e1d3..c331255eacd1 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -166,6 +166,14 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, */ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value); +/** + * cmdq_pkt_finalize() - Append EOC and jump command to pkt. + * @pkt: the CMDQ packet + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_finalize(struct cmdq_pkt *pkt); + /** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ * packet and call back at the end of done packet -- cgit v1.2.3 From 7de796cac48c0f55974cfe9cff683dfb3b7c71b6 Mon Sep 17 00:00:00 2001 From: Dennis YC Hsieh Date: Sun, 21 Jun 2020 22:18:35 +0800 Subject: soc: mediatek: cmdq: add set event function Add set event function in cmdq helper functions to set specific event. Signed-off-by: Dennis YC Hsieh Link: https://lore.kernel.org/r/1592749115-24158-12-git-send-email-dennis-yc.hsieh@mediatek.com Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-cmdq-helper.c | 15 +++++++++++++++ include/linux/mailbox/mtk-cmdq-mailbox.h | 1 + include/linux/soc/mediatek/mtk-cmdq.h | 9 +++++++++ 3 files changed, 25 insertions(+) (limited to 'include') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 2eda03fc7662..dc644cfb6419 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -253,6 +253,21 @@ int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) } EXPORT_SYMBOL(cmdq_pkt_clear_event); +int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event) +{ + struct cmdq_instruction inst = {}; + + if (event >= CMDQ_MAX_EVENT) + return -EINVAL; + + inst.op = CMDQ_CODE_WFE; + inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE; + inst.event = event; + + return cmdq_pkt_append_command(pkt, inst); +} +EXPORT_SYMBOL(cmdq_pkt_set_event); + int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) { diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index 70b740a552c2..a96e8c252bac 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -17,6 +17,7 @@ #define CMDQ_JUMP_PASS CMDQ_INST_SIZE #define CMDQ_WFE_UPDATE BIT(31) +#define CMDQ_WFE_UPDATE_VALUE BIT(16) #define CMDQ_WFE_WAIT BIT(15) #define CMDQ_WFE_WAIT_VALUE 0x1 diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index c331255eacd1..2249ecaf77e4 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -120,6 +120,15 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); */ int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); +/** + * cmdq_pkt_set_event() - append set event command to the CMDQ packet + * @pkt: the CMDQ packet + * @event: the desired event to be set + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event); + /** * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to * execute an instruction that wait for a specified -- cgit v1.2.3 From 3af8588c77186bf08e55e7281da83d88373481d7 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 8 Jun 2020 17:28:50 +0200 Subject: fork: fold legacy_clone_args_valid() into _do_fork() This separate helper only existed to guarantee the mutual exclusivity of CLONE_PIDFD and CLONE_PARENT_SETTID for legacy clone since CLONE_PIDFD abuses the parent_tid field to return the pidfd. But we can actually handle this uniformely thus removing the helper. For legacy clone we can detect that CLONE_PIDFD is specified in conjunction with CLONE_PARENT_SETTID because they will share the same memory which is invalid and for clone3() setting the separate pidfd and parent_tid fields to the same memory is bogus as well. So fold that helper directly into _do_fork() by detecting this case. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Al Viro Cc: Geert Uytterhoeven Cc: "Matthew Wilcox (Oracle)" Cc: "Peter Zijlstra (Intel)" Cc: linux-m68k@lists.linux-m68k.org Cc: x86@kernel.org Signed-off-by: Christian Brauner --- arch/m68k/kernel/process.c | 3 --- arch/x86/kernel/sys_ia32.c | 3 --- include/linux/sched/task.h | 1 - kernel/fork.c | 30 ++++++++++++++---------------- 4 files changed, 14 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 90ae376b7ab1..0608439ba452 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -125,9 +125,6 @@ asmlinkage int m68k_clone(struct pt_regs *regs) .tls = regs->d5, }; - if (!legacy_clone_args_valid(&args)) - return -EINVAL; - return _do_fork(&args); } diff --git a/arch/x86/kernel/sys_ia32.c b/arch/x86/kernel/sys_ia32.c index f8d65c99feb8..720cde885042 100644 --- a/arch/x86/kernel/sys_ia32.c +++ b/arch/x86/kernel/sys_ia32.c @@ -251,9 +251,6 @@ COMPAT_SYSCALL_DEFINE5(ia32_clone, unsigned long, clone_flags, .tls = tls_val, }; - if (!legacy_clone_args_valid(&args)) - return -EINVAL; - return _do_fork(&args); } #endif /* CONFIG_IA32_EMULATION */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 38359071236a..ddce0ea515d1 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -96,7 +96,6 @@ extern void exit_files(struct task_struct *); extern void exit_itimers(struct signal_struct *); extern long _do_fork(struct kernel_clone_args *kargs); -extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); diff --git a/kernel/fork.c b/kernel/fork.c index 142b23645d82..9875aeb2ba41 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2422,6 +2422,20 @@ long _do_fork(struct kernel_clone_args *args) int trace = 0; long nr; + /* + * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument + * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are + * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate + * field in struct clone_args and it still doesn't make sense to have + * them both point at the same memory location. Performing this check + * here has the advantage that we don't need to have a separate helper + * to check for legacy clone(). + */ + if ((args->flags & CLONE_PIDFD) && + (args->flags & CLONE_PARENT_SETTID) && + (args->pidfd == args->parent_tid)) + return -EINVAL; + /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly @@ -2479,16 +2493,6 @@ long _do_fork(struct kernel_clone_args *args) return nr; } -bool legacy_clone_args_valid(const struct kernel_clone_args *kargs) -{ - /* clone(CLONE_PIDFD) uses parent_tidptr to return a pidfd */ - if ((kargs->flags & CLONE_PIDFD) && - (kargs->flags & CLONE_PARENT_SETTID)) - return false; - - return true; -} - #ifndef CONFIG_HAVE_COPY_THREAD_TLS /* For compatibility with architectures that call do_fork directly rather than * using the syscall entry points below. */ @@ -2508,9 +2512,6 @@ long do_fork(unsigned long clone_flags, .stack_size = stack_size, }; - if (!legacy_clone_args_valid(&args)) - return -EINVAL; - return _do_fork(&args); } #endif @@ -2593,9 +2594,6 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, .tls = tls, }; - if (!legacy_clone_args_valid(&args)) - return -EINVAL; - return _do_fork(&args); } #endif -- cgit v1.2.3 From 5cbd3ebde859bd43bd0584c146060638b1a3abb4 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Mon, 22 Jun 2020 13:30:28 +0000 Subject: Bluetooth: use configured params for ext adv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the extended advertisement feature is enabled, a hardcoded min and max interval of 0x8000 is used. This patch fixes this issue by using the configured min/max value. This was validated by setting min/max in main.conf and making sure the right setting is applied: < HCI Command: LE Set Extended Advertising Parameters (0x08|0x0036) plen 25 #93 [hci0] 10.953011 … Min advertising interval: 181.250 msec (0x0122) Max advertising interval: 181.250 msec (0x0122) … Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Reviewed-by: Daniel Winkler Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 8 ++++++++ net/bluetooth/hci_request.c | 7 +++---- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 16ab6ce87883..1f18f71363e9 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -2516,4 +2516,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) #define hci_iso_data_len(h) ((h) & 0x3fff) #define hci_iso_data_flags(h) ((h) >> 14) +/* le24 support */ +static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3]) +{ + dst[0] = val & 0xff; + dst[1] = (val & 0xff00) >> 8; + dst[2] = (val & 0xff0000) >> 16; +} + #endif /* __HCI_H */ diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 29decd7e8051..86ae4b953a01 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -1799,8 +1799,6 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) int err; struct adv_info *adv_instance; bool secondary_adv; - /* In ext adv set param interval is 3 octets */ - const u8 adv_interval[3] = { 0x00, 0x08, 0x00 }; if (instance > 0) { adv_instance = hci_find_adv_instance(hdev, instance); @@ -1833,8 +1831,9 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) memset(&cp, 0, sizeof(cp)); - memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval)); - memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval)); + /* In ext adv set param interval is 3 octets */ + hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); + hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); -- cgit v1.2.3 From cf6e26c71bfdff823fd40945b07666d75f1e1412 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 16 Jun 2020 14:19:41 +0900 Subject: ASoC: soc-component: merge snd_soc_component_read() and snd_soc_component_read32() We had read/write function for Codec, Platform, etc, but these has been merged into snd_soc_component_read/write(). Internally, it is using regmap or driver function. In read case, each styles are like below regmap ret = regmap_read(..., reg, &val); driver function val = xxx->read(..., reg); Because of this kind of different style, to keep same read style, when we merged each read function into snd_soc_component_read(), we created snd_soc_component_read32(), like below. commit 738b49efe6c6 ("ASoC: add snd_soc_component_read32") (1) val = snd_soc_component_read32(component, reg); (2) ret = snd_soc_component_read(component, reg, &val); Many drivers are using snd_soc_component_read32(), and some drivers are using snd_soc_component_read() today. In generally, we don't check read function successes, because, we will have many other issues at initial timing if read function didn't work. Now we can use soc_component_err() when error case. This means, it is easy to notice if error occurred. This patch aggressively merge snd_soc_component_read() and _read32(), and makes snd_soc_component_read/write() as generally style. This patch do 1) merge snd_soc_component_read() and snd_soc_component_read32() 2) it uses soc_component_err() when error case (easy to notice) 3) keeps read32 for now by #define 4) update snd_soc_component_read() for all drivers Because _read() user drivers are not too many, this patch changes all user drivers. Signed-off-by: Kuninori Morimoto Reviewed-by: Kai Vehmanen Link: https://lore.kernel.org/r/87sgev4mfl.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 5 ++-- sound/soc/codecs/ak4613.c | 4 ++-- sound/soc/codecs/cs47l35.c | 10 +------- sound/soc/codecs/cs47l85.c | 10 +------- sound/soc/codecs/inno_rk3036.c | 6 ++--- sound/soc/codecs/madera.c | 49 ++++++++------------------------------- sound/soc/codecs/nau8822.c | 2 +- sound/soc/codecs/rt1305.c | 2 +- sound/soc/codecs/rt5682.c | 3 +-- sound/soc/codecs/tas5720.c | 4 ++-- sound/soc/codecs/tda7419.c | 9 ++----- sound/soc/codecs/tscs454.c | 24 ++++--------------- sound/soc/fsl/fsl_audmix.c | 10 ++------ sound/soc/fsl/fsl_easrc.c | 5 +--- sound/soc/meson/aiu-encoder-i2s.c | 3 +-- sound/soc/meson/aiu-fifo-i2s.c | 3 +-- sound/soc/meson/aiu-fifo.c | 3 +-- sound/soc/soc-ac97.c | 7 +++--- sound/soc/soc-component.c | 40 ++++++++++---------------------- sound/soc/soc-dapm.c | 31 +++++++++---------------- sound/soc/soc-ops.c | 43 +++++++--------------------------- 21 files changed, 69 insertions(+), 204 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 4a4bb723ca9f..f64cffa12967 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -333,9 +333,8 @@ void snd_soc_component_set_aux(struct snd_soc_component *component, int snd_soc_component_init(struct snd_soc_component *component); /* component IO */ -int snd_soc_component_read(struct snd_soc_component *component, - unsigned int reg, unsigned int *val); -unsigned int snd_soc_component_read32(struct snd_soc_component *component, +#define snd_soc_component_read32 snd_soc_component_read +unsigned int snd_soc_component_read(struct snd_soc_component *component, unsigned int reg); int snd_soc_component_write(struct snd_soc_component *component, unsigned int reg, unsigned int val); diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c index c1181a20714d..d4d2f0d9231a 100644 --- a/sound/soc/codecs/ak4613.c +++ b/sound/soc/codecs/ak4613.c @@ -490,8 +490,8 @@ static void ak4613_dummy_write(struct work_struct *work) */ udelay(5000000 / priv->rate); - snd_soc_component_read(component, PW_MGMT1, &mgmt1); - snd_soc_component_read(component, PW_MGMT3, &mgmt3); + mgmt1 = snd_soc_component_read(component, PW_MGMT1); + mgmt3 = snd_soc_component_read(component, PW_MGMT3); snd_soc_component_write(component, PW_MGMT1, mgmt1); snd_soc_component_write(component, PW_MGMT3, mgmt3); diff --git a/sound/soc/codecs/cs47l35.c b/sound/soc/codecs/cs47l35.c index d7538d50bbd3..e9b1fc4c7580 100644 --- a/sound/soc/codecs/cs47l35.c +++ b/sound/soc/codecs/cs47l35.c @@ -129,19 +129,11 @@ static void cs47l35_hp_post_enable(struct snd_soc_dapm_widget *w) struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); unsigned int val; - int ret; switch (w->shift) { case MADERA_OUT1L_ENA_SHIFT: case MADERA_OUT1R_ENA_SHIFT: - ret = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1, - &val); - if (ret) { - dev_err(component->dev, - "Failed to check output enables: %d\n", ret); - return; - } - + val = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1); val &= (MADERA_OUT1L_ENA | MADERA_OUT1R_ENA); if (val != (MADERA_OUT1L_ENA | MADERA_OUT1R_ENA)) diff --git a/sound/soc/codecs/cs47l85.c b/sound/soc/codecs/cs47l85.c index 9de991adad74..64db07a99408 100644 --- a/sound/soc/codecs/cs47l85.c +++ b/sound/soc/codecs/cs47l85.c @@ -191,19 +191,11 @@ static void cs47l85_hp_post_enable(struct snd_soc_dapm_widget *w) struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); unsigned int val; - int ret; switch (w->shift) { case MADERA_OUT1L_ENA_SHIFT: case MADERA_OUT1R_ENA_SHIFT: - ret = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1, - &val); - if (ret) { - dev_err(component->dev, - "Failed to check output enables: %d\n", ret); - return; - } - + val = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1); val &= (MADERA_OUT1L_ENA | MADERA_OUT1R_ENA); if (val != (MADERA_OUT1L_ENA | MADERA_OUT1R_ENA)) diff --git a/sound/soc/codecs/inno_rk3036.c b/sound/soc/codecs/inno_rk3036.c index 14d8fe1c28a4..d0e8f0d2fbc1 100644 --- a/sound/soc/codecs/inno_rk3036.c +++ b/sound/soc/codecs/inno_rk3036.c @@ -48,11 +48,9 @@ static int rk3036_codec_antipop_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - int val, ret, regval; + int val, regval; - ret = snd_soc_component_read(component, INNO_R09, ®val); - if (ret) - return ret; + regval = snd_soc_component_read(component, INNO_R09); val = ((regval >> INNO_R09_HPL_ANITPOP_SHIFT) & INNO_R09_HP_ANTIPOP_MSK) == INNO_R09_HP_ANTIPOP_ON; ucontrol->value.integer.value[0] = val; diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c index ec380b0b2d4e..680f31a6493a 100644 --- a/sound/soc/codecs/madera.c +++ b/sound/soc/codecs/madera.c @@ -628,12 +628,8 @@ int madera_out1_demux_get(struct snd_kcontrol *kcontrol, struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol); unsigned int val; - int ret; - - ret = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1, &val); - if (ret) - return ret; + val = snd_soc_component_read(component, MADERA_OUTPUT_ENABLES_1); val &= MADERA_EP_SEL_MASK; val >>= MADERA_EP_SEL_SHIFT; ucontrol->value.enumerated.item[0] = val; @@ -1068,12 +1064,7 @@ int madera_rate_put(struct snd_kcontrol *kcontrol, */ mutex_lock(&priv->rate_lock); - ret = snd_soc_component_read(component, e->reg, &val); - if (ret < 0) { - dev_warn(priv->madera->dev, "Failed to read 0x%x (%d)\n", - e->reg, ret); - goto out; - } + val = snd_soc_component_read(component, e->reg); val >>= e->shift_l; val &= e->mask; if (snd_soc_enum_item_to_val(e, item) == val) { @@ -2178,10 +2169,7 @@ int madera_dfc_put(struct snd_kcontrol *kcontrol, snd_soc_dapm_mutex_lock(dapm); - ret = snd_soc_component_read(component, reg, &val); - if (ret) - goto exit; - + val = snd_soc_component_read(component, reg); if (val & MADERA_DFC1_ENA) { ret = -EBUSY; dev_err(component->dev, "Can't change mode on an active DFC\n"); @@ -2211,9 +2199,7 @@ int madera_lp_mode_put(struct snd_kcontrol *kcontrol, snd_soc_dapm_mutex_lock(dapm); /* Cannot change lp mode on an active input */ - ret = snd_soc_component_read(component, MADERA_INPUT_ENABLES, &val); - if (ret) - goto exit; + val = snd_soc_component_read(component, MADERA_INPUT_ENABLES); mask = (mc->reg - MADERA_ADC_DIGITAL_VOLUME_1L) / 4; mask ^= 0x1; /* Flip bottom bit for channel order */ @@ -2276,7 +2262,6 @@ int madera_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct madera_priv *priv = snd_soc_component_get_drvdata(component); unsigned int reg, val; - int ret; if (w->shift % 2) reg = MADERA_ADC_DIGITAL_VOLUME_1L + ((w->shift / 2) * 8); @@ -2305,9 +2290,8 @@ int madera_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, break; case SND_SOC_DAPM_POST_PMD: /* Disable volume updates if no inputs are enabled */ - ret = snd_soc_component_read(component, MADERA_INPUT_ENABLES, - &val); - if (!ret && !val) + val = snd_soc_component_read(component, MADERA_INPUT_ENABLES); + if (!val) madera_in_set_vu(priv, false); break; default: @@ -3087,26 +3071,16 @@ static int madera_aif_cfg_changed(struct snd_soc_component *component, int base, int bclk, int lrclk, int frame) { unsigned int val; - int ret; - ret = snd_soc_component_read(component, base + MADERA_AIF_BCLK_CTRL, - &val); - if (ret) - return ret; + val = snd_soc_component_read(component, base + MADERA_AIF_BCLK_CTRL); if (bclk != (val & MADERA_AIF1_BCLK_FREQ_MASK)) return 1; - ret = snd_soc_component_read(component, base + MADERA_AIF_RX_BCLK_RATE, - &val); - if (ret) - return ret; + val = snd_soc_component_read(component, base + MADERA_AIF_RX_BCLK_RATE); if (lrclk != (val & MADERA_AIF1RX_BCPF_MASK)) return 1; - ret = snd_soc_component_read(component, base + MADERA_AIF_FRAME_CTRL_1, - &val); - if (ret) - return ret; + val = snd_soc_component_read(component, base + MADERA_AIF_FRAME_CTRL_1); if (frame != (val & (MADERA_AIF1TX_WL_MASK | MADERA_AIF1TX_SLOT_LEN_MASK))) return 1; @@ -3162,10 +3136,7 @@ static int madera_hw_params(struct snd_pcm_substream *substream, } /* Force multiple of 2 channels for I2S mode */ - ret = snd_soc_component_read(component, base + MADERA_AIF_FORMAT, &val); - if (ret) - return ret; - + val = snd_soc_component_read(component, base + MADERA_AIF_FORMAT); val &= MADERA_AIF1_FMT_MASK; if ((channels & 1) && val == MADERA_FMT_I2S_MODE) { madera_aif_dbg(dai, "Forcing stereo mode\n"); diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c index 78db3bd0b3bc..a4f661335c57 100644 --- a/sound/soc/codecs/nau8822.c +++ b/sound/soc/codecs/nau8822.c @@ -831,7 +831,7 @@ static int nau8822_hw_params(struct snd_pcm_substream *substream, unsigned int ctrl_val, bclk_fs, bclk_div; /* make BCLK and LRC divide configuration if the codec as master. */ - snd_soc_component_read(component, NAU8822_REG_CLOCKING, &ctrl_val); + ctrl_val = snd_soc_component_read(component, NAU8822_REG_CLOCKING); if (ctrl_val & NAU8822_CLK_MASTER) { /* get the bclk and fs ratio */ bclk_fs = snd_soc_params_to_bclk(params) / params_rate(params); diff --git a/sound/soc/codecs/rt1305.c b/sound/soc/codecs/rt1305.c index e27742abfa76..4e9dfd235e59 100644 --- a/sound/soc/codecs/rt1305.c +++ b/sound/soc/codecs/rt1305.c @@ -411,7 +411,7 @@ static int rt1305_is_rc_clk_from_pll(struct snd_soc_dapm_widget *source, struct rt1305_priv *rt1305 = snd_soc_component_get_drvdata(component); unsigned int val; - snd_soc_component_read(component, RT1305_CLK_1, &val); + val = snd_soc_component_read(component, RT1305_CLK_1); if (rt1305->sysclk_src == RT1305_FS_SYS_PRE_S_PLL1 && (val & RT1305_SEL_PLL_SRC_2_RCCLK)) diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 36cfd10f8b04..8b592069a7e2 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -2640,8 +2640,7 @@ static unsigned long rt5682_bclk_recalc_rate(struct clk_hw *hw, struct snd_soc_component *component = rt5682->component; unsigned int bclks_per_wclk; - snd_soc_component_read(component, RT5682_TDM_TCON_CTRL, - &bclks_per_wclk); + bclks_per_wclk = snd_soc_component_read(component, RT5682_TDM_TCON_CTRL); switch (bclks_per_wclk & RT5682_TDM_BCLK_MS1_MASK) { case RT5682_TDM_BCLK_MS1_256: diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c index 37fab8f22800..e159f839d928 100644 --- a/sound/soc/codecs/tas5720.c +++ b/sound/soc/codecs/tas5720.c @@ -508,10 +508,10 @@ static int tas5722_volume_get(struct snd_kcontrol *kcontrol, struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); unsigned int val; - snd_soc_component_read(component, TAS5720_VOLUME_CTRL_REG, &val); + val = snd_soc_component_read(component, TAS5720_VOLUME_CTRL_REG); ucontrol->value.integer.value[0] = val << 1; - snd_soc_component_read(component, TAS5722_DIGITAL_CTRL2_REG, &val); + val = snd_soc_component_read(component, TAS5722_DIGITAL_CTRL2_REG); ucontrol->value.integer.value[0] |= val & TAS5722_VOL_CONTROL_LSB; return 0; diff --git a/sound/soc/codecs/tda7419.c b/sound/soc/codecs/tda7419.c index 2bf4f5e8af27..83d220054c96 100644 --- a/sound/soc/codecs/tda7419.c +++ b/sound/soc/codecs/tda7419.c @@ -187,18 +187,13 @@ static int tda7419_vol_get(struct snd_kcontrol *kcontrol, int thresh = tvc->thresh; unsigned int invert = tvc->invert; int val; - int ret; - ret = snd_soc_component_read(component, reg, &val); - if (ret < 0) - return ret; + val = snd_soc_component_read(component, reg); ucontrol->value.integer.value[0] = tda7419_vol_get_value(val, mask, min, thresh, invert); if (tda7419_vol_is_stereo(tvc)) { - ret = snd_soc_component_read(component, rreg, &val); - if (ret < 0) - return ret; + val = snd_soc_component_read(component, rreg); ucontrol->value.integer.value[1] = tda7419_vol_get_value(val, mask, min, thresh, invert); } diff --git a/sound/soc/codecs/tscs454.c b/sound/soc/codecs/tscs454.c index c3587af9985c..d0af16b4db2f 100644 --- a/sound/soc/codecs/tscs454.c +++ b/sound/soc/codecs/tscs454.c @@ -353,12 +353,7 @@ static int write_coeff_ram(struct snd_soc_component *component, u8 *coeff_ram, for (cnt = 0; cnt < coeff_cnt; cnt++, coeff_addr++) { for (trys = 0; trys < DACCRSTAT_MAX_TRYS; trys++) { - ret = snd_soc_component_read(component, r_stat, &val); - if (ret < 0) { - dev_err(component->dev, - "Failed to read stat (%d)\n", ret); - return ret; - } + val = snd_soc_component_read(component, r_stat); if (!val) break; } @@ -444,12 +439,7 @@ static int coeff_ram_put(struct snd_kcontrol *kcontrol, mutex_lock(&tscs454->pll1.lock); mutex_lock(&tscs454->pll2.lock); - ret = snd_soc_component_read(component, R_PLLSTAT, &val); - if (ret < 0) { - dev_err(component->dev, "Failed to read PLL status (%d)\n", - ret); - goto exit; - } + val = snd_soc_component_read(component, R_PLLSTAT); if (val) { /* PLLs locked */ ret = write_coeff_ram(component, coeff_ram, r_stat, r_addr, r_wr, @@ -2642,13 +2632,10 @@ static int tscs454_set_sysclk(struct snd_soc_dai *dai, struct tscs454 *tscs454 = snd_soc_component_get_drvdata(component); unsigned int val; int bclk_dai; - int ret; dev_dbg(component->dev, "%s(): freq = %u\n", __func__, freq); - ret = snd_soc_component_read(component, R_PLLCTL, &val); - if (ret < 0) - return ret; + val = snd_soc_component_read(component, R_PLLCTL); bclk_dai = (val & FM_PLLCTL_BCLKSEL) >> FB_PLLCTL_BCLKSEL; if (bclk_dai != dai->id) @@ -3204,10 +3191,7 @@ static int tscs454_hw_params(struct snd_pcm_substream *substream, } if (!aifs_active(&tscs454->aifs_status)) { /* First active aif */ - ret = snd_soc_component_read(component, R_ISRC, &val); - if (ret < 0) - goto exit; - + val = snd_soc_component_read(component, R_ISRC); if ((val & FM_ISRC_IBR) == FV_IBR_48) tscs454->internal_rate.pll = &tscs454->pll1; else diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c index 8b9027f76d8a..a447bafa00d2 100644 --- a/sound/soc/fsl/fsl_audmix.c +++ b/sound/soc/fsl/fsl_audmix.c @@ -116,13 +116,9 @@ static int fsl_audmix_put_mix_clk_src(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int *item = ucontrol->value.enumerated.item; unsigned int reg_val, val, mix_clk; - int ret; /* Get current state */ - ret = snd_soc_component_read(comp, FSL_AUDMIX_CTR, ®_val); - if (ret) - return ret; - + reg_val = snd_soc_component_read(comp, FSL_AUDMIX_CTR); mix_clk = ((reg_val & FSL_AUDMIX_CTR_MIXCLK_MASK) >> FSL_AUDMIX_CTR_MIXCLK_SHIFT); val = snd_soc_enum_item_to_val(e, item[0]); @@ -162,9 +158,7 @@ static int fsl_audmix_put_out_src(struct snd_kcontrol *kcontrol, int ret; /* Get current state */ - ret = snd_soc_component_read(comp, FSL_AUDMIX_CTR, ®_val); - if (ret) - return ret; + reg_val = snd_soc_component_read(comp, FSL_AUDMIX_CTR); /* "From" state */ out_src = ((reg_val & FSL_AUDMIX_CTR_OUTSRC_MASK) diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c index 2f6b3d8bfcfc..58cc093ad741 100644 --- a/sound/soc/fsl/fsl_easrc.c +++ b/sound/soc/fsl/fsl_easrc.c @@ -79,11 +79,8 @@ static int fsl_easrc_get_reg(struct snd_kcontrol *kcontrol, struct soc_mreg_control *mc = (struct soc_mreg_control *)kcontrol->private_value; unsigned int regval; - int ret; - ret = snd_soc_component_read(component, mc->regbase, ®val); - if (ret < 0) - return ret; + regval = snd_soc_component_read(component, mc->regbase); ucontrol->value.integer.value[0] = regval; diff --git a/sound/soc/meson/aiu-encoder-i2s.c b/sound/soc/meson/aiu-encoder-i2s.c index 832e22d275fe..932224552146 100644 --- a/sound/soc/meson/aiu-encoder-i2s.c +++ b/sound/soc/meson/aiu-encoder-i2s.c @@ -72,11 +72,10 @@ static int aiu_encoder_i2s_setup_desc(struct snd_soc_component *component, { /* Always operate in split (classic interleaved) mode */ unsigned int desc = AIU_I2S_SOURCE_DESC_MODE_SPLIT; - unsigned int val; /* Reset required to update the pipeline */ snd_soc_component_write(component, AIU_RST_SOFT, AIU_RST_SOFT_I2S_FAST); - snd_soc_component_read(component, AIU_I2S_SYNC, &val); + snd_soc_component_read(component, AIU_I2S_SYNC); switch (params_physical_width(params)) { case 16: /* Nothing to do */ diff --git a/sound/soc/meson/aiu-fifo-i2s.c b/sound/soc/meson/aiu-fifo-i2s.c index 9a5271ce80fe..d91b0d874342 100644 --- a/sound/soc/meson/aiu-fifo-i2s.c +++ b/sound/soc/meson/aiu-fifo-i2s.c @@ -46,7 +46,6 @@ static int aiu_fifo_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; - unsigned int val; switch (cmd) { case SNDRV_PCM_TRIGGER_START: @@ -54,7 +53,7 @@ static int aiu_fifo_i2s_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: snd_soc_component_write(component, AIU_RST_SOFT, AIU_RST_SOFT_I2S_FAST); - snd_soc_component_read(component, AIU_I2S_SYNC, &val); + snd_soc_component_read(component, AIU_I2S_SYNC); break; } diff --git a/sound/soc/meson/aiu-fifo.c b/sound/soc/meson/aiu-fifo.c index d9cede4c33ff..aa88aae8e517 100644 --- a/sound/soc/meson/aiu-fifo.c +++ b/sound/soc/meson/aiu-fifo.c @@ -37,8 +37,7 @@ snd_pcm_uframes_t aiu_fifo_pointer(struct snd_soc_component *component, struct snd_pcm_runtime *runtime = substream->runtime; unsigned int addr; - snd_soc_component_read(component, fifo->mem_offset + AIU_MEM_RD, - &addr); + addr = snd_soc_component_read(component, fifo->mem_offset + AIU_MEM_RD); return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr); } diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c index c086786e4471..906106ed8ca1 100644 --- a/sound/soc/soc-ac97.c +++ b/sound/soc/soc-ac97.c @@ -82,13 +82,12 @@ static int snd_soc_ac97_gpio_get(struct gpio_chip *chip, unsigned offset) struct snd_soc_component *component = gpio_to_component(chip); int ret; - if (snd_soc_component_read(component, AC97_GPIO_STATUS, &ret) < 0) - ret = -1; + ret = snd_soc_component_read(component, AC97_GPIO_STATUS); dev_dbg(component->dev, "get gpio %d : %d\n", offset, - ret < 0 ? ret : ret & (1 << offset)); + ret & (1 << offset)); - return ret < 0 ? ret : !!(ret & (1 << offset)); + return !!(ret & (1 << offset)); } static void snd_soc_ac97_gpio_set(struct gpio_chip *chip, unsigned offset, diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index d121f5f7633c..428f88decfdb 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -407,41 +407,30 @@ EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap); * snd_soc_component_read() - Read register value * @component: Component to read from * @reg: Register to read - * @val: Pointer to where the read value is stored * - * Return: 0 on success, a negative error code otherwise. + * Return: read value */ -int snd_soc_component_read(struct snd_soc_component *component, - unsigned int reg, unsigned int *val) +unsigned int snd_soc_component_read(struct snd_soc_component *component, + unsigned int reg) { int ret; + unsigned int val = 0; if (component->regmap) - ret = regmap_read(component->regmap, reg, val); + ret = regmap_read(component->regmap, reg, &val); else if (component->driver->read) { - *val = component->driver->read(component, reg); ret = 0; + val = component->driver->read(component, reg); } else ret = -EIO; - return soc_component_ret(component, ret); -} -EXPORT_SYMBOL_GPL(snd_soc_component_read); - -unsigned int snd_soc_component_read32(struct snd_soc_component *component, - unsigned int reg) -{ - unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); if (ret < 0) - return soc_component_ret(component, -1); + soc_component_ret(component, ret); return val; } -EXPORT_SYMBOL_GPL(snd_soc_component_read32); +EXPORT_SYMBOL_GPL(snd_soc_component_read); /** * snd_soc_component_write() - Write register value @@ -470,19 +459,17 @@ static int snd_soc_component_update_bits_legacy( unsigned int mask, unsigned int val, bool *change) { unsigned int old, new; - int ret; + int ret = 0; mutex_lock(&component->io_mutex); - ret = snd_soc_component_read(component, reg, &old); - if (ret < 0) - goto out_unlock; + old = snd_soc_component_read(component, reg); new = (old & ~mask) | (val & mask); *change = old != new; if (*change) ret = snd_soc_component_write(component, reg, new); -out_unlock: + mutex_unlock(&component->io_mutex); return soc_component_ret(component, ret); @@ -584,11 +571,8 @@ int snd_soc_component_test_bits(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int value) { unsigned int old, new; - int ret; - ret = snd_soc_component_read(component, reg, &old); - if (ret < 0) - return soc_component_ret(component, ret); + old = snd_soc_component_read(component, reg); new = (old & ~mask) | value; return old != new; } diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 2491e1ce16d3..e51aa2efc65c 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -616,12 +616,11 @@ static const char *soc_dapm_prefix(struct snd_soc_dapm_context *dapm) return dapm->component->name_prefix; } -static int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg, - unsigned int *value) +static unsigned int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg) { if (!dapm->component) return -EIO; - return snd_soc_component_read(dapm->component, reg, value); + return snd_soc_component_read(dapm->component, reg); } static int soc_dapm_update_bits(struct snd_soc_dapm_context *dapm, @@ -753,7 +752,7 @@ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm, int i; if (e->reg != SND_SOC_NOPM) { - soc_dapm_read(dapm, e->reg, &val); + val = soc_dapm_read(dapm, e->reg); val = (val >> e->shift_l) & e->mask; item = snd_soc_enum_val_to_item(e, val); } else { @@ -790,7 +789,7 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i, unsigned int val; if (reg != SND_SOC_NOPM) { - soc_dapm_read(p->sink->dapm, reg, &val); + val = soc_dapm_read(p->sink->dapm, reg); /* * The nth_path argument allows this function to know * which path of a kcontrol it is setting the initial @@ -805,7 +804,7 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i, */ if (snd_soc_volsw_is_stereo(mc) && nth_path > 0) { if (reg != mc->rreg) - soc_dapm_read(p->sink->dapm, mc->rreg, &val); + val = soc_dapm_read(p->sink->dapm, mc->rreg); val = (val >> mc->rshift) & mask; } else { val = (val >> shift) & mask; @@ -3246,7 +3245,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_card *card) /* Read the initial power state from the device */ if (w->reg >= 0) { - soc_dapm_read(w->dapm, w->reg, &val); + val = soc_dapm_read(w->dapm, w->reg); val = val >> w->shift; val &= w->mask; if (val == w->on_val) @@ -3288,15 +3287,14 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int reg_val, val, rval = 0; - int ret = 0; mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); if (dapm_kcontrol_is_powered(kcontrol) && reg != SND_SOC_NOPM) { - ret = soc_dapm_read(dapm, reg, ®_val); + reg_val = soc_dapm_read(dapm, reg); val = (reg_val >> shift) & mask; - if (ret == 0 && reg != mc->rreg) - ret = soc_dapm_read(dapm, mc->rreg, ®_val); + if (reg != mc->rreg) + reg_val = soc_dapm_read(dapm, mc->rreg); if (snd_soc_volsw_is_stereo(mc)) rval = (reg_val >> mc->rshift) & mask; @@ -3309,9 +3307,6 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, } mutex_unlock(&card->dapm_mutex); - if (ret) - return ret; - if (invert) ucontrol->value.integer.value[0] = max - val; else @@ -3324,7 +3319,7 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, ucontrol->value.integer.value[1] = rval; } - return ret; + return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_volsw); @@ -3439,11 +3434,7 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol, mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); if (e->reg != SND_SOC_NOPM && dapm_kcontrol_is_powered(kcontrol)) { - int ret = soc_dapm_read(dapm, e->reg, ®_val); - if (ret) { - mutex_unlock(&card->dapm_mutex); - return ret; - } + reg_val = soc_dapm_read(dapm, e->reg); } else { reg_val = dapm_kcontrol_get_value(kcontrol); } diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 55ffb34be95e..10f48827bb0e 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -63,11 +63,8 @@ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val, item; unsigned int reg_val; - int ret; - ret = snd_soc_component_read(component, e->reg, ®_val); - if (ret) - return ret; + reg_val = snd_soc_component_read(component, e->reg); val = (reg_val >> e->shift_l) & e->mask; item = snd_soc_enum_val_to_item(e, val); ucontrol->value.enumerated.item[0] = item; @@ -136,10 +133,7 @@ static int snd_soc_read_signed(struct snd_soc_component *component, int ret; unsigned int val; - ret = snd_soc_component_read(component, reg, &val); - if (ret < 0) - return ret; - + val = snd_soc_component_read(component, reg); val = (val >> shift) & mask; if (!sign_bit) { @@ -375,19 +369,12 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol, int min = mc->min; unsigned int mask = (1U << (fls(min + max) - 1)) - 1; unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret < 0) - return ret; + val = snd_soc_component_read(component, reg); ucontrol->value.integer.value[0] = ((val >> shift) - min) & mask; if (snd_soc_volsw_is_stereo(mc)) { - ret = snd_soc_component_read(component, reg2, &val); - if (ret < 0) - return ret; - + val = snd_soc_component_read(component, reg2); val = ((val >> rshift) - min) & mask; ucontrol->value.integer.value[1] = val; } @@ -548,12 +535,8 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret) - return ret; + val = snd_soc_component_read(component, reg); ucontrol->value.integer.value[0] = (val >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = @@ -563,10 +546,7 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, ucontrol->value.integer.value[0] - min; if (snd_soc_volsw_is_stereo(mc)) { - ret = snd_soc_component_read(component, rreg, &val); - if (ret) - return ret; - + val = snd_soc_component_read(component, rreg); ucontrol->value.integer.value[1] = (val >> shift) & mask; if (invert) ucontrol->value.integer.value[1] = @@ -833,12 +813,9 @@ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol, long val = 0; unsigned int regval; unsigned int i; - int ret; for (i = 0; i < regcount; i++) { - ret = snd_soc_component_read(component, regbase+i, ®val); - if (ret) - return ret; + regval = snd_soc_component_read(component, regbase+i); val |= (regval & regwmask) << (regwshift*(regcount-i-1)); } val &= mask; @@ -918,12 +895,8 @@ int snd_soc_get_strobe(struct snd_kcontrol *kcontrol, unsigned int mask = 1 << shift; unsigned int invert = mc->invert != 0; unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret) - return ret; + val = snd_soc_component_read(component, reg); val &= mask; if (shift != 0 && val != 0) -- cgit v1.2.3 From 5b554b0a29ce9610e3c237c77a1f76db87454b72 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 16 Jun 2020 14:21:55 +0900 Subject: ASoC: remove snd_soc_component_read32() No driver is using snd_soc_component_read32() anymore. This patch removes it. Signed-off-by: Kuninori Morimoto Link: https://lore.kernel.org/r/877dw74mbv.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index f64cffa12967..8917b15eccae 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -333,7 +333,6 @@ void snd_soc_component_set_aux(struct snd_soc_component *component, int snd_soc_component_init(struct snd_soc_component *component); /* component IO */ -#define snd_soc_component_read32 snd_soc_component_read unsigned int snd_soc_component_read(struct snd_soc_component *component, unsigned int reg); int snd_soc_component_write(struct snd_soc_component *component, -- cgit v1.2.3 From 8746f135bb01872ff412d408ea1aa9ebd328c1f5 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Wed, 20 May 2020 14:20:14 -0700 Subject: Bluetooth: Disconnect if E0 is used for Level 4 E0 is not allowed with Level 4: BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C page 1319: '128-bit equivalent strength for link and encryption keys required using FIPS approved algorithms (E0 not allowed, SAFER+ not allowed, and P-192 not allowed; encryption key not shortened' SC enabled: > HCI Event: Read Remote Extended Features (0x23) plen 13 Status: Success (0x00) Handle: 256 Page: 1/2 Features: 0x0b 0x00 0x00 0x00 0x00 0x00 0x00 0x00 Secure Simple Pairing (Host Support) LE Supported (Host) Secure Connections (Host Support) > HCI Event: Encryption Change (0x08) plen 4 Status: Success (0x00) Handle: 256 Encryption: Enabled with AES-CCM (0x02) SC disabled: > HCI Event: Read Remote Extended Features (0x23) plen 13 Status: Success (0x00) Handle: 256 Page: 1/2 Features: 0x03 0x00 0x00 0x00 0x00 0x00 0x00 0x00 Secure Simple Pairing (Host Support) LE Supported (Host) > HCI Event: Encryption Change (0x08) plen 4 Status: Success (0x00) Handle: 256 Encryption: Enabled with E0 (0x01) [May 8 20:23] Bluetooth: hci0: Invalid security: expect AES but E0 was used < HCI Command: Disconnect (0x01|0x0006) plen 3 Handle: 256 Reason: Authentication Failure (0x05) Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 10 ++++++---- net/bluetooth/hci_conn.c | 17 +++++++++++++++++ net/bluetooth/hci_event.c | 20 ++++++++------------ 3 files changed, 31 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 77d29341b064..836dc997ff94 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1481,11 +1481,13 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) else encrypt = 0x01; - if (conn->sec_level == BT_SECURITY_SDP) - conn->sec_level = BT_SECURITY_LOW; + if (!status) { + if (conn->sec_level == BT_SECURITY_SDP) + conn->sec_level = BT_SECURITY_LOW; - if (conn->pending_sec_level > conn->sec_level) - conn->sec_level = conn->pending_sec_level; + if (conn->pending_sec_level > conn->sec_level) + conn->sec_level = conn->pending_sec_level; + } mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 47f3a45d7dcb..8805d68e65f2 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1322,6 +1322,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn) return 0; } + /* AES encryption is required for Level 4: + * + * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C + * page 1319: + * + * 128-bit equivalent strength for link and encryption keys + * required using FIPS approved algorithms (E0 not allowed, + * SAFER+ not allowed, and P-192 not allowed; encryption key + * not shortened) + */ + if (conn->sec_level == BT_SECURITY_FIPS && + !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { + bt_dev_err(conn->hdev, + "Invalid security: Missing AES-CCM usage"); + return 0; + } + if (hci_conn_ssp_enabled(conn) && !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) return 0; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index e08d4dd9a24e..e060fc9ebb18 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3065,27 +3065,23 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + /* Check link security requirements are met */ + if (!hci_conn_check_link_mode(conn)) + ev->status = HCI_ERROR_AUTH_FAILURE; + if (ev->status && conn->state == BT_CONNECTED) { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + /* Notify upper layers so they can cleanup before + * disconnecting. + */ + hci_encrypt_cfm(conn, ev->status); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } - /* In Secure Connections Only mode, do not allow any connections - * that are not encrypted with AES-CCM using a P-256 authenticated - * combination key. - */ - if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && - (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || - conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { - hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); - hci_conn_drop(conn); - goto unlock; - } - /* Try reading the encryption key size for encrypted ACL links */ if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { struct hci_cp_read_enc_key_size cp; -- cgit v1.2.3 From 53f13319d13197dd1f4c8ce5fc1ef4c32509b4e2 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 9 Jun 2020 18:10:39 +0300 Subject: thunderbolt: Get rid of E2E workaround The end-to-end (E2E) workaround is needed for Falcon Ridge (TBT 2) controller when E2E is enabled for both ends of the host-to-host connection. However, we never supported full E2E in the first place so this code is not necessary at the moment. Further this allows us to use all available rings for data except ring 0 which is reserved for the control path. The complete E2E flow control is explained in the USB4 spec so we may add it back later if needed but at least the networking driver seems to work fine without, and the higher level stack, like TCP will retransmit lost packets anyway. Signed-off-by: Mika Westerberg --- drivers/net/thunderbolt.c | 4 ++-- drivers/thunderbolt/nhi.c | 26 ++------------------------ include/linux/thunderbolt.h | 2 -- 3 files changed, 4 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index dacb4f680fd4..a812726703a4 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -866,8 +866,8 @@ static int tbnet_open(struct net_device *dev) eof_mask = BIT(TBIP_PDF_FRAME_END); ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, - RING_FLAG_FRAME | RING_FLAG_E2E, sof_mask, - eof_mask, tbnet_start_poll, net); + RING_FLAG_FRAME, sof_mask, eof_mask, + tbnet_start_poll, net); if (!ring) { netdev_err(dev, "failed to allocate Rx ring\n"); tb_ring_free(net->tx_ring.ring); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index b617922b5b0a..5f7489fa1327 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -24,12 +24,7 @@ #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") -/* - * Used to enable end-to-end workaround for missing RX packets. Do not - * use this ring for anything else. - */ -#define RING_E2E_UNUSED_HOPID 2 -#define RING_FIRST_USABLE_HOPID TB_PATH_MIN_HOPID +#define RING_FIRST_USABLE_HOPID 1 /* * Minimal number of vectors when we use MSI-X. Two for control channel @@ -440,7 +435,7 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) /* * Automatically allocate HopID from the non-reserved - * range 8 .. hop_count - 1. + * range 1 .. hop_count - 1. */ for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { if (ring->is_tx) { @@ -496,10 +491,6 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", transmit ? "TX" : "RX", hop, size); - /* Tx Ring 2 is reserved for E2E workaround */ - if (transmit && hop == RING_E2E_UNUSED_HOPID) - return NULL; - ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) return NULL; @@ -614,19 +605,6 @@ void tb_ring_start(struct tb_ring *ring) flags = RING_FLAG_ENABLE | RING_FLAG_RAW; } - if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { - u32 hop; - - /* - * In order not to lose Rx packets we enable end-to-end - * workaround which transfers Rx credits to an unused Tx - * HopID. - */ - hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT; - hop &= REG_RX_OPTIONS_E2E_HOP_MASK; - flags |= hop | RING_FLAG_E2E_FLOW_CONTROL; - } - ring_iowrite64desc(ring, ring->descriptors_dma, 0); if (ring->is_tx) { ring_iowrite32desc(ring, ring->size, 12); diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index ff397c0d5c07..5db2b11ab085 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -504,8 +504,6 @@ struct tb_ring { #define RING_FLAG_NO_SUSPEND BIT(0) /* Configure the ring to be in frame mode */ #define RING_FLAG_FRAME BIT(1) -/* Enable end-to-end flow control */ -#define RING_FLAG_E2E BIT(2) struct ring_frame; typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled); -- cgit v1.2.3 From 41c48f3a98231738c5ce79f6f2aa6e40ba924d18 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Fri, 19 Jun 2020 14:11:43 -0700 Subject: bpf: Support access to bpf map fields There are multiple use-cases when it's convenient to have access to bpf map fields, both `struct bpf_map` and map type specific struct-s such as `struct bpf_array`, `struct bpf_htab`, etc. For example while working with sock arrays it can be necessary to calculate the key based on map->max_entries (some_hash % max_entries). Currently this is solved by communicating max_entries via "out-of-band" channel, e.g. via additional map with known key to get info about target map. That works, but is not very convenient and error-prone while working with many maps. In other cases necessary data is dynamic (i.e. unknown at loading time) and it's impossible to get it at all. For example while working with a hash table it can be convenient to know how much capacity is already used (bpf_htab.count.counter for BPF_F_NO_PREALLOC case). At the same time kernel knows this info and can provide it to bpf program. Fill this gap by adding support to access bpf map fields from bpf program for both `struct bpf_map` and map type specific fields. Support is implemented via btf_struct_access() so that a user can define their own `struct bpf_map` or map type specific struct in their program with only necessary fields and preserve_access_index attribute, cast a map to this struct and use a field. For example: struct bpf_map { __u32 max_entries; } __attribute__((preserve_access_index)); struct bpf_array { struct bpf_map map; __u32 elem_size; } __attribute__((preserve_access_index)); struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 4); __type(key, __u32); __type(value, __u32); } m_array SEC(".maps"); SEC("cgroup_skb/egress") int cg_skb(void *ctx) { struct bpf_array *array = (struct bpf_array *)&m_array; struct bpf_map *map = (struct bpf_map *)&m_array; /* .. use map->max_entries or array->map.max_entries .. */ } Similarly to other btf_struct_access() use-cases (e.g. struct tcp_sock in net/ipv4/bpf_tcp_ca.c) the patch allows access to any fields of corresponding struct. Only reading from map fields is supported. For btf_struct_access() to work there should be a way to know btf id of a struct that corresponds to a map type. To get btf id there should be a way to get a stringified name of map-specific struct, such as "bpf_array", "bpf_htab", etc for a map type. Two new fields are added to `struct bpf_map_ops` to handle it: * .map_btf_name keeps a btf name of a struct returned by map_alloc(); * .map_btf_id is used to cache btf id of that struct. To make btf ids calculation cheaper they're calculated once while preparing btf_vmlinux and cached same way as it's done for btf_id field of `struct bpf_func_proto` While calculating btf ids, struct names are NOT checked for collision. Collisions will be checked as a part of the work to prepare btf ids used in verifier in compile time that should land soon. The only known collision for `struct bpf_htab` (kernel/bpf/hashtab.c vs net/core/sock_map.c) was fixed earlier. Both new fields .map_btf_name and .map_btf_id must be set for a map type for the feature to work. If neither is set for a map type, verifier will return ENOTSUPP on a try to access map_ptr of corresponding type. If just one of them set, it's verifier misconfiguration. Only `struct bpf_array` for BPF_MAP_TYPE_ARRAY and `struct bpf_htab` for BPF_MAP_TYPE_HASH are supported by this patch. Other map types will be supported separately. The feature is available only for CONFIG_DEBUG_INFO_BTF=y and gated by perfmon_capable() so that unpriv programs won't have access to bpf map fields. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/6479686a0cd1e9067993df57b4c3eef0e276fec9.1592600985.git.rdna@fb.com --- include/linux/bpf.h | 9 +++ include/linux/bpf_verifier.h | 1 + kernel/bpf/arraymap.c | 3 + kernel/bpf/btf.c | 40 +++++++++++ kernel/bpf/hashtab.c | 3 + kernel/bpf/verifier.c | 82 +++++++++++++++++++--- .../selftests/bpf/verifier/map_ptr_mixing.c | 2 +- 7 files changed, 131 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 07052d44bca1..1e1501ee53ce 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -92,6 +92,10 @@ struct bpf_map_ops { int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, struct poll_table_struct *pts); + + /* BTF name and id of struct allocated by map_alloc */ + const char * const map_btf_name; + int *map_btf_id; }; struct bpf_map_memory { @@ -1109,6 +1113,11 @@ static inline bool bpf_allow_ptr_leaks(void) return perfmon_capable(); } +static inline bool bpf_allow_ptr_to_map_access(void) +{ + return perfmon_capable(); +} + static inline bool bpf_bypass_spec_v1(void) { return perfmon_capable(); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index ca08db4ffb5f..53c7bd568c5d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -379,6 +379,7 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; + bool allow_ptr_to_map_access; bool bpf_capable; bool bypass_spec_v1; bool bypass_spec_v4; diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 11584618e861..e7caa48812fb 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -494,6 +494,7 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) vma->vm_pgoff + pgoff); } +static int array_map_btf_id; const struct bpf_map_ops array_map_ops = { .map_alloc_check = array_map_alloc_check, .map_alloc = array_map_alloc, @@ -510,6 +511,8 @@ const struct bpf_map_ops array_map_ops = { .map_check_btf = array_map_check_btf, .map_lookup_batch = generic_map_lookup_batch, .map_update_batch = generic_map_update_batch, + .map_btf_name = "bpf_array", + .map_btf_id = &array_map_btf_id, }; const struct bpf_map_ops percpu_array_map_ops = { diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 3eb804618a53..e377d1981730 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3571,6 +3571,41 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf, return ctx_type; } +static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) +#define BPF_LINK_TYPE(_id, _name) +#define BPF_MAP_TYPE(_id, _ops) \ + [_id] = &_ops, +#include +#undef BPF_PROG_TYPE +#undef BPF_LINK_TYPE +#undef BPF_MAP_TYPE +}; + +static int btf_vmlinux_map_ids_init(const struct btf *btf, + struct bpf_verifier_log *log) +{ + const struct bpf_map_ops *ops; + int i, btf_id; + + for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) { + ops = btf_vmlinux_map_ops[i]; + if (!ops || (!ops->map_btf_name && !ops->map_btf_id)) + continue; + if (!ops->map_btf_name || !ops->map_btf_id) { + bpf_log(log, "map type %d is misconfigured\n", i); + return -EINVAL; + } + btf_id = btf_find_by_name_kind(btf, ops->map_btf_name, + BTF_KIND_STRUCT); + if (btf_id < 0) + return btf_id; + *ops->map_btf_id = btf_id; + } + + return 0; +} + static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, struct btf *btf, const struct btf_type *t, @@ -3633,6 +3668,11 @@ struct btf *btf_parse_vmlinux(void) /* btf_parse_vmlinux() runs under bpf_verifier_lock */ bpf_ctx_convert.t = btf_type_by_id(btf, btf_id); + /* find bpf map structs for map_ptr access checking */ + err = btf_vmlinux_map_ids_init(btf, log); + if (err < 0) + goto errout; + bpf_struct_ops_init(btf, log); btf_verifier_env_free(env); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index b4b288a3c3c9..2c5999e02060 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1614,6 +1614,7 @@ htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, true, false); } +static int htab_map_btf_id; const struct bpf_map_ops htab_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1625,6 +1626,8 @@ const struct bpf_map_ops htab_map_ops = { .map_gen_lookup = htab_map_gen_lookup, .map_seq_show_elem = htab_map_seq_show_elem, BATCH_OPS(htab), + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_map_btf_id, }; const struct bpf_map_ops htab_lru_map_ops = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a1857c4ffaaf..7460f967cb75 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1351,6 +1351,19 @@ static void mark_reg_not_init(struct bpf_verifier_env *env, __mark_reg_not_init(env, regs + regno); } +static void mark_btf_ld_reg(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, u32 regno, + enum bpf_reg_type reg_type, u32 btf_id) +{ + if (reg_type == SCALAR_VALUE) { + mark_reg_unknown(env, regs, regno); + return; + } + mark_reg_known_zero(env, regs, regno); + regs[regno].type = PTR_TO_BTF_ID; + regs[regno].btf_id = btf_id; +} + #define DEF_NOT_SUBREG (0) static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) @@ -3182,19 +3195,68 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, if (ret < 0) return ret; - if (atype == BPF_READ && value_regno >= 0) { - if (ret == SCALAR_VALUE) { - mark_reg_unknown(env, regs, value_regno); - return 0; - } - mark_reg_known_zero(env, regs, value_regno); - regs[value_regno].type = PTR_TO_BTF_ID; - regs[value_regno].btf_id = btf_id; + if (atype == BPF_READ && value_regno >= 0) + mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); + + return 0; +} + +static int check_ptr_to_map_access(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, + int regno, int off, int size, + enum bpf_access_type atype, + int value_regno) +{ + struct bpf_reg_state *reg = regs + regno; + struct bpf_map *map = reg->map_ptr; + const struct btf_type *t; + const char *tname; + u32 btf_id; + int ret; + + if (!btf_vmlinux) { + verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); + return -ENOTSUPP; + } + + if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { + verbose(env, "map_ptr access not supported for map type %d\n", + map->map_type); + return -ENOTSUPP; + } + + t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); + tname = btf_name_by_offset(btf_vmlinux, t->name_off); + + if (!env->allow_ptr_to_map_access) { + verbose(env, + "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", + tname); + return -EPERM; } + if (off < 0) { + verbose(env, "R%d is %s invalid negative access: off=%d\n", + regno, tname, off); + return -EACCES; + } + + if (atype != BPF_READ) { + verbose(env, "only read from %s is supported\n", tname); + return -EACCES; + } + + ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); + if (ret < 0) + return ret; + + if (value_regno >= 0) + mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); + return 0; } + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -3363,6 +3425,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else if (reg->type == PTR_TO_BTF_ID) { err = check_ptr_to_btf_access(env, regs, regno, off, size, t, value_regno); + } else if (reg->type == CONST_PTR_TO_MAP) { + err = check_ptr_to_map_access(env, regs, regno, off, size, t, + value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -10951,6 +11016,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, env->strict_alignment = false; env->allow_ptr_leaks = bpf_allow_ptr_leaks(); + env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); env->bypass_spec_v1 = bpf_bypass_spec_v1(); env->bypass_spec_v4 = bpf_bypass_spec_v4(); env->bpf_capable = bpf_capable(); diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c index cd26ee6b7b1d..1f2b8c4cb26d 100644 --- a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c +++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c @@ -56,7 +56,7 @@ .fixup_map_in_map = { 16 }, .fixup_map_array_48b = { 13 }, .result = REJECT, - .errstr = "R0 invalid mem access 'map_ptr'", + .errstr = "only read from bpf_array is supported", }, { "cond: two branches returning different map pointers for lookup (tail, tail)", -- cgit v1.2.3 From 73edcd38d7720bb6a761966ea14c0bc64e95dc26 Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Thu, 18 Jun 2020 18:35:53 +0530 Subject: soc: qcom: rpmh: Update rpmh_invalidate function to return void Currently rpmh_invalidate() always returns success. Update its return type to void. Reviewed-by: Lina Iyer Reviewed-by: Stephen Boyd Suggested-by: Stephen Boyd Signed-off-by: Maulik Shah Link: https://lore.kernel.org/r/1592485553-29163-1-git-send-email-mkshah@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/interconnect/qcom/bcm-voter.c | 6 +----- drivers/soc/qcom/rpmh.c | 4 +--- include/soc/qcom/rpmh.h | 7 ++++--- 3 files changed, 6 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c index 2a11a63e7217..a3d2ef1d9903 100644 --- a/drivers/interconnect/qcom/bcm-voter.c +++ b/drivers/interconnect/qcom/bcm-voter.c @@ -266,11 +266,7 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter) if (!commit_idx[0]) goto out; - ret = rpmh_invalidate(voter->dev); - if (ret) { - pr_err("Error invalidating RPMH client (%d)\n", ret); - goto out; - } + rpmh_invalidate(voter->dev); ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE, cmds, commit_idx); diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index f2b5b46ccd1f..b61e183ede69 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -497,7 +497,7 @@ exit: * * Invalidate the sleep and wake values in batch_cache. */ -int rpmh_invalidate(const struct device *dev) +void rpmh_invalidate(const struct device *dev) { struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); struct batch_cache_req *req, *tmp; @@ -509,7 +509,5 @@ int rpmh_invalidate(const struct device *dev) INIT_LIST_HEAD(&ctrlr->batch_cache); ctrlr->dirty = true; spin_unlock_irqrestore(&ctrlr->cache_lock, flags); - - return 0; } EXPORT_SYMBOL(rpmh_invalidate); diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h index f9ec353d24a5..bdbee1a97d36 100644 --- a/include/soc/qcom/rpmh.h +++ b/include/soc/qcom/rpmh.h @@ -20,7 +20,7 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state, int rpmh_write_batch(const struct device *dev, enum rpmh_state state, const struct tcs_cmd *cmd, u32 *n); -int rpmh_invalidate(const struct device *dev); +void rpmh_invalidate(const struct device *dev); #else @@ -38,8 +38,9 @@ static inline int rpmh_write_batch(const struct device *dev, const struct tcs_cmd *cmd, u32 *n) { return -ENODEV; } -static inline int rpmh_invalidate(const struct device *dev) -{ return -ENODEV; } +static inline void rpmh_invalidate(const struct device *dev) +{ +} #endif /* CONFIG_QCOM_RPMH */ -- cgit v1.2.3 From 2a916ecc405686c1d86f632281bc06aa75ebae4e Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:48 +0000 Subject: net/devlink: Support querying hardware address of port function PCI PF and VF devlink port can manage the function represented by a devlink port. Enable users to query port function's hardware address. Example of a PCI VF port which supports a port function: $ devlink port show pci/0000:06:00.0/2 pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 function: hw_addr 00:11:22:33:44:66 $ devlink port show pci/0000:06:00.0/2 -jp { "port": { "pci/0000:06:00.0/2": { "type": "eth", "netdev": "enp6s0pf0vf1", "flavour": "pcivf", "pfnum": 0, "vfnum": 1, "function": { "hw_addr": "00:11:22:33:44:66" } } } } Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 12 ++++++++++++ include/uapi/linux/devlink.h | 10 ++++++++++ net/core/devlink.c | 45 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+) (limited to 'include') diff --git a/include/net/devlink.h b/include/net/devlink.h index 1df6dfec26c2..56fc9cdb189d 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1107,6 +1107,18 @@ struct devlink_ops { int (*trap_policer_counter_get)(struct devlink *devlink, const struct devlink_trap_policer *policer, u64 *p_drops); + /** + * @port_function_hw_addr_get: Port function's hardware address get function. + * + * Should be used by device drivers to report the hardware address of a function managed + * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port + * function handling for a particular port. + * + * Note: @extack can be NULL when port notifier queries the port function. + */ + int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 08563e6a424d..07d0af8f5923 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -451,6 +451,8 @@ enum devlink_attr { DEVLINK_ATTR_TRAP_POLICER_RATE, /* u64 */ DEVLINK_ATTR_TRAP_POLICER_BURST, /* u64 */ + DEVLINK_ATTR_PORT_FUNCTION, /* nested */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, @@ -497,4 +499,12 @@ enum devlink_resource_unit { DEVLINK_RESOURCE_UNIT_ENTRY, }; +enum devlink_port_function_attr { + DEVLINK_PORT_FUNCTION_ATTR_UNSPEC, + DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, /* binary */ + + __DEVLINK_PORT_FUNCTION_ATTR_MAX, + DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1 +}; + #endif /* _UAPI_LINUX_DEVLINK_H_ */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 05197631d52a..b6848b607e9c 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -563,6 +563,49 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, return 0; } +static int +devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = port->devlink; + const struct devlink_ops *ops; + struct nlattr *function_attr; + bool empty_nest = true; + int err = 0; + + function_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PORT_FUNCTION); + if (!function_attr) + return -EMSGSIZE; + + ops = devlink->ops; + if (ops->port_function_hw_addr_get) { + int uninitialized_var(hw_addr_len); + u8 hw_addr[MAX_ADDR_LEN]; + + err = ops->port_function_hw_addr_get(devlink, port, hw_addr, &hw_addr_len, extack); + if (err == -EOPNOTSUPP) { + /* Port function attributes are optional for a port. If port doesn't + * support function attribute, returning -EOPNOTSUPP is not an error. + */ + err = 0; + goto out; + } else if (err) { + goto out; + } + err = nla_put(msg, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, hw_addr_len, hw_addr); + if (err) + goto out; + empty_nest = false; + } + +out: + if (err || empty_nest) + nla_nest_cancel(msg, function_attr); + else + nla_nest_end(msg, function_attr); + return err; +} + static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_port *devlink_port, enum devlink_command cmd, u32 portid, @@ -608,6 +651,8 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, spin_unlock_bh(&devlink_port->type_lock); if (devlink_nl_port_attrs_put(msg, devlink_port)) goto nla_put_failure; + if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack)) + goto nla_put_failure; genlmsg_end(msg, hdr); return 0; -- cgit v1.2.3 From a1e8ae907c8d67f57432190bb742802a76516b00 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:49 +0000 Subject: net/devlink: Support setting hardware address of port function PCI PF and VF devlink port can manage the function represented by a devlink port. Allow users to set port function's hardware address. Example of a PCI VF port which supports a port function: $ devlink port show pci/0000:06:00.0/2 pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 function: hw_addr 00:00:00:00:00:00 $ devlink port function set pci/0000:06:00.0/2 hw_addr 00:11:22:33:44:55 $ devlink port show pci/0000:06:00.0/2 pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 function: hw_addr 00:11:22:33:44:55 Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 10 +++++++ net/core/devlink.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) (limited to 'include') diff --git a/include/net/devlink.h b/include/net/devlink.h index 56fc9cdb189d..7007f93585a5 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1119,6 +1119,16 @@ struct devlink_ops { int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack); + /** + * @port_function_hw_addr_set: Port function's hardware address set function. + * + * Should be used by device drivers to set the hardware address of a function managed + * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port + * function handling for a particular port. + */ + int (*port_function_hw_addr_set)(struct devlink *devlink, struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) diff --git a/net/core/devlink.c b/net/core/devlink.c index b6848b607e9c..baa45eca6b5a 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -85,6 +85,10 @@ EXPORT_SYMBOL(devlink_dpipe_header_ipv6); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr); +static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = { + [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY }, +}; + static LIST_HEAD(devlink_list); /* devlink_mutex @@ -827,6 +831,67 @@ static int devlink_port_type_set(struct devlink *devlink, return -EOPNOTSUPP; } +static int +devlink_port_function_hw_addr_set(struct devlink *devlink, struct devlink_port *port, + const struct nlattr *attr, struct netlink_ext_ack *extack) +{ + const struct devlink_ops *ops; + const u8 *hw_addr; + int hw_addr_len; + int err; + + hw_addr = nla_data(attr); + hw_addr_len = nla_len(attr); + if (hw_addr_len > MAX_ADDR_LEN) { + NL_SET_ERR_MSG_MOD(extack, "Port function hardware address too long"); + return -EINVAL; + } + if (port->type == DEVLINK_PORT_TYPE_ETH) { + if (hw_addr_len != ETH_ALEN) { + NL_SET_ERR_MSG_MOD(extack, "Address must be 6 bytes for Ethernet device"); + return -EINVAL; + } + if (!is_unicast_ether_addr(hw_addr)) { + NL_SET_ERR_MSG_MOD(extack, "Non-unicast hardware address unsupported"); + return -EINVAL; + } + } + + ops = devlink->ops; + if (!ops->port_function_hw_addr_set) { + NL_SET_ERR_MSG_MOD(extack, "Port doesn't support function attributes"); + return -EOPNOTSUPP; + } + + err = ops->port_function_hw_addr_set(devlink, port, hw_addr, hw_addr_len, extack); + if (err) + return err; + + devlink_port_notify(port, DEVLINK_CMD_PORT_NEW); + return 0; +} + +static int +devlink_port_function_set(struct devlink *devlink, struct devlink_port *port, + const struct nlattr *attr, struct netlink_ext_ack *extack) +{ + struct nlattr *tb[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1]; + int err; + + err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr, + devlink_function_nl_policy, extack); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Fail to parse port function attributes"); + return err; + } + + attr = tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR]; + if (attr) + err = devlink_port_function_hw_addr_set(devlink, port, attr, extack); + + return err; +} + static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info) { @@ -842,6 +907,16 @@ static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, if (err) return err; } + + if (info->attrs[DEVLINK_ATTR_PORT_FUNCTION]) { + struct nlattr *attr = info->attrs[DEVLINK_ATTR_PORT_FUNCTION]; + struct netlink_ext_ack *extack = info->extack; + + err = devlink_port_function_set(devlink, devlink_port, attr, extack); + if (err) + return err; + } + return 0; } @@ -6758,6 +6833,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 }, [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 }, [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 }, + [DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED }, }; static const struct genl_ops devlink_nl_ops[] = { -- cgit v1.2.3 From fa997825ebeca820f4001a9e6d285345d3a535ba Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:50 +0000 Subject: net/mlx5: Constify mac address pointer Since none of the functions need to modify the input mac address, constify them. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 2 +- include/linux/mlx5/vport.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1116ab9bea6c..d6a585a143dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1127,7 +1127,7 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW); } -static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) +static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) { ((u8 *)node_guid)[7] = mac[0]; ((u8 *)node_guid)[6] = mac[1]; @@ -1779,7 +1779,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) /* Vport Administration */ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, - u16 vport, u8 mac[ETH_ALEN]) + u16 vport, const u8 *mac) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); u64 node_guid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a5175e98c0b3..165a23efc608 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -311,7 +311,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, - u16 vport, u8 mac[ETH_ALEN]); + u16 vport, const u8 *mac); int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state); int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index c107d92dc118..88cdb9bb4c4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -173,7 +173,7 @@ int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr) EXPORT_SYMBOL_GPL(mlx5_query_mac_address); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, - u16 vport, u8 *addr) + u16 vport, const u8 *addr) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 8170da1e9f70..4db87bcfce7b 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -75,7 +75,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, - u16 vport, u8 *addr); + u16 vport, const u8 *addr); int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, -- cgit v1.2.3 From 272c2330adc9c68284cb0066719160c24bfe605f Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 19 Jun 2020 10:31:52 -0400 Subject: xfrm: bail early on slave pass over skb This is prep work for initial support of bonding hardware encryption pass-through support. The bonding driver will fill in the slave_dev pointer, and we use that to know not to skb_push() again on a given skb that was already processed on the bond device. CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- include/net/xfrm.h | 1 + net/xfrm/xfrm_device.c | 34 +++++++++++++++++----------------- 2 files changed, 18 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 094fe682f5d7..e20b2b27ec48 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -127,6 +127,7 @@ struct xfrm_state_walk { struct xfrm_state_offload { struct net_device *dev; + struct net_device *slave_dev; unsigned long offload_handle; unsigned int num_exthdrs; u8 flags; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index f50d1f97cf8e..b8918fc5248b 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -106,6 +106,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur struct sk_buff *skb2, *nskb, *pskb = NULL; netdev_features_t esp_features = features; struct xfrm_offload *xo = xfrm_offload(skb); + struct net_device *dev = skb->dev; struct sec_path *sp; if (!xo) @@ -119,6 +120,10 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) return skb; + /* This skb was already validated on the master dev */ + if ((x->xso.dev != dev) && (x->xso.slave_dev == dev)) + return skb; + local_irq_save(flags); sd = this_cpu_ptr(&softnet_data); err = !skb_queue_empty(&sd->xfrm_backlog); @@ -129,25 +134,20 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } - if (skb_is_gso(skb)) { - struct net_device *dev = skb->dev; - - if (unlikely(x->xso.dev != dev)) { - struct sk_buff *segs; + if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) { + struct sk_buff *segs; - /* Packet got rerouted, fixup features and segment it. */ - esp_features = esp_features & ~(NETIF_F_HW_ESP - | NETIF_F_GSO_ESP); + /* Packet got rerouted, fixup features and segment it. */ + esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP); - segs = skb_gso_segment(skb, esp_features); - if (IS_ERR(segs)) { - kfree_skb(skb); - atomic_long_inc(&dev->tx_dropped); - return NULL; - } else { - consume_skb(skb); - skb = segs; - } + segs = skb_gso_segment(skb, esp_features); + if (IS_ERR(segs)) { + kfree_skb(skb); + atomic_long_inc(&dev->tx_dropped); + return NULL; + } else { + consume_skb(skb); + skb = segs; } } -- cgit v1.2.3 From 18cb261afd7bf50134e5ccacc5ec91ea16efadd4 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 19 Jun 2020 10:31:55 -0400 Subject: bonding: support hardware encryption offload to slaves Currently, this support is limited to active-backup mode, as I'm not sure about the feasilibity of mapping an xfrm_state's offload handle to multiple hardware devices simultaneously, and we rely on being able to pass some hints to both the xfrm and NIC driver about whether or not they're operating on a slave device. I've tested this atop an Intel x520 device (ixgbe) using libreswan in transport mode, succesfully achieving ~4.3Gbps throughput with netperf (more or less identical to throughput on a bare NIC in this system), as well as successful failover and recovery mid-netperf. v2: just use CONFIG_XFRM_OFFLOAD for wrapping, isolate more code with it CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 127 +++++++++++++++++++++++++++++++++++++++- include/net/bonding.h | 3 + 2 files changed, 128 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 004919aea5fb..90939ccf2a94 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -278,8 +279,6 @@ const char *bond_mode_name(int mode) return names[mode]; } -/*---------------------------------- VLAN -----------------------------------*/ - /** * bond_dev_queue_xmit - Prepare skb for xmit. * @@ -302,6 +301,8 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, return dev_queue_xmit(skb); } +/*---------------------------------- VLAN -----------------------------------*/ + /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, * We don't protect the slave list iteration with a lock because: * a. This operation is performed in IOCTL context, @@ -372,6 +373,84 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, return 0; } +/*---------------------------------- XFRM -----------------------------------*/ + +#ifdef CONFIG_XFRM_OFFLOAD +/** + * bond_ipsec_add_sa - program device with a security association + * @xs: pointer to transformer state struct + **/ +static int bond_ipsec_add_sa(struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave = rtnl_dereference(bond->curr_active_slave); + + xs->xso.slave_dev = slave->dev; + bond->xs = xs; + + if (!(slave->dev->xfrmdev_ops + && slave->dev->xfrmdev_ops->xdo_dev_state_add)) { + slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); + return -EINVAL; + } + + return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); +} + +/** + * bond_ipsec_del_sa - clear out this specific SA + * @xs: pointer to transformer state struct + **/ +static void bond_ipsec_del_sa(struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave = rtnl_dereference(bond->curr_active_slave); + + if (!slave) + return; + + xs->xso.slave_dev = slave->dev; + + if (!(slave->dev->xfrmdev_ops + && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { + slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); + return; + } + + slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); +} + +/** + * bond_ipsec_offload_ok - can this packet use the xfrm hw offload + * @skb: current data packet + * @xs: pointer to transformer state struct + **/ +static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond = netdev_priv(bond_dev); + struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); + struct net_device *slave_dev = curr_active->dev; + + if (!(slave_dev->xfrmdev_ops + && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) { + slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); + return false; + } + + xs->xso.slave_dev = slave_dev; + return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); +} + +static const struct xfrmdev_ops bond_xfrmdev_ops = { + .xdo_dev_state_add = bond_ipsec_add_sa, + .xdo_dev_state_delete = bond_ipsec_del_sa, + .xdo_dev_offload_ok = bond_ipsec_offload_ok, +}; +#endif /* CONFIG_XFRM_OFFLOAD */ + /*------------------------------- Link status -------------------------------*/ /* Set the carrier state for the master according to the state of its @@ -879,6 +958,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) return; if (new_active) { +#ifdef CONFIG_XFRM_OFFLOAD + if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) && bond->xs) + bond_ipsec_del_sa(bond->xs); +#endif /* CONFIG_XFRM_OFFLOAD */ + new_active->last_link_up = jiffies; if (new_active->link == BOND_LINK_BACK) { @@ -941,6 +1025,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) bond_should_notify_peers(bond); } +#ifdef CONFIG_XFRM_OFFLOAD + if (old_active && bond->xs) { + xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true); + bond_ipsec_add_sa(bond->xs); + } +#endif /* CONFIG_XFRM_OFFLOAD */ + call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); if (should_notify_peers) { bond->send_peer_notif--; @@ -1127,15 +1218,24 @@ static netdev_features_t bond_fix_features(struct net_device *dev, #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) +#ifdef CONFIG_XFRM_OFFLOAD +#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) +#endif /* CONFIG_XFRM_OFFLOAD */ + #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_ALL_TSO) + static void bond_compute_features(struct bonding *bond) { unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; netdev_features_t vlan_features = BOND_VLAN_FEATURES; netdev_features_t enc_features = BOND_ENC_FEATURES; +#ifdef CONFIG_XFRM_OFFLOAD + netdev_features_t xfrm_features = BOND_XFRM_FEATURES; +#endif /* CONFIG_XFRM_OFFLOAD */ netdev_features_t mpls_features = BOND_MPLS_FEATURES; struct net_device *bond_dev = bond->dev; struct list_head *iter; @@ -1157,6 +1257,12 @@ static void bond_compute_features(struct bonding *bond) slave->dev->hw_enc_features, BOND_ENC_FEATURES); +#ifdef CONFIG_XFRM_OFFLOAD + xfrm_features = netdev_increment_features(xfrm_features, + slave->dev->hw_enc_features, + BOND_XFRM_FEATURES); +#endif /* CONFIG_XFRM_OFFLOAD */ + mpls_features = netdev_increment_features(mpls_features, slave->dev->mpls_features, BOND_MPLS_FEATURES); @@ -1176,6 +1282,9 @@ done: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; +#ifdef CONFIG_XFRM_OFFLOAD + bond_dev->hw_enc_features |= xfrm_features; +#endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->mpls_features = mpls_features; bond_dev->gso_max_segs = gso_max_segs; netif_set_gso_max_size(bond_dev, gso_max_size); @@ -1464,6 +1573,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n"); } + if (slave_dev->features & NETIF_F_HW_ESP) + slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n"); + /* Old ifenslave binaries are no longer supported. These can * be identified with moderate accuracy by the state of the slave: * the current ifenslave will set the interface down prior to @@ -4540,6 +4652,13 @@ void bond_setup(struct net_device *bond_dev) bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); +#ifdef CONFIG_XFRM_OFFLOAD + /* set up xfrm device ops (only supported in active-backup right now) */ + if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) + bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; + bond->xs = NULL; +#endif /* CONFIG_XFRM_OFFLOAD */ + /* don't acquire bond device's netif_tx_lock when transmitting */ bond_dev->features |= NETIF_F_LLTX; @@ -4558,6 +4677,10 @@ void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_CTAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; +#ifdef CONFIG_XFRM_OFFLOAD + if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) + bond_dev->hw_features |= BOND_XFRM_FEATURES; +#endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->features |= bond_dev->hw_features; bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } diff --git a/include/net/bonding.h b/include/net/bonding.h index aa854a9c01e2..a00e1764e9b1 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -238,6 +238,9 @@ struct bonding { struct dentry *debug_dir; #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; +#ifdef CONFIG_XFRM_OFFLOAD + struct xfrm_state *xs; +#endif /* CONFIG_XFRM_OFFLOAD */ }; #define bond_slave_get_rcu(dev) \ -- cgit v1.2.3 From b5872cd0e823e4cb50b3a75cd9522167eeb676a2 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Sat, 20 Jun 2020 22:01:56 +0530 Subject: devlink: Add support for board.serial_number to info_get cb. Board serial number is a serial number, often available in PCI *Vital Product Data*. Also, update devlink-info.rst documentation file. Cc: Jiri Pirko Cc: Jakub Kicinski Signed-off-by: Vasundhara Volam Reviewed-by: Michael Chan Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/devlink/devlink-info.rst | 12 +++++------- include/net/devlink.h | 2 ++ include/uapi/linux/devlink.h | 2 ++ net/core/devlink.c | 8 ++++++++ 4 files changed, 17 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/networking/devlink/devlink-info.rst b/Documentation/networking/devlink/devlink-info.rst index 3fe11401b838..7572bf6de5c1 100644 --- a/Documentation/networking/devlink/devlink-info.rst +++ b/Documentation/networking/devlink/devlink-info.rst @@ -44,9 +44,11 @@ versions is generally discouraged - here, and via any other Linux API. reported for two ports of the same device or on two hosts of a multi-host device should be identical. - .. note:: ``devlink-info`` API should be extended with a new field - if devices want to report board/product serial number (often - reported in PCI *Vital Product Data* capability). + * - ``board.serial_number`` + - Board serial number of the device. + + This is usually the serial number of the board, often available in + PCI *Vital Product Data*. * - ``fixed`` - Group for hardware identifiers, and versions of components @@ -201,10 +203,6 @@ Future work The following extensions could be useful: - - product serial number - NIC boards often get labeled with a board serial - number rather than ASIC serial number; it'd be useful to add board serial - numbers to the API if they can be retrieved from the device; - - on-disk firmware file names - drivers list the file names of firmware they may need to load onto devices via the ``MODULE_FIRMWARE()`` macro. These, however, are per module, rather than per device. It'd be useful to list diff --git a/include/net/devlink.h b/include/net/devlink.h index 7007f93585a5..428f55f8197c 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1284,6 +1284,8 @@ int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn); int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name); +int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn); int devlink_info_version_fixed_put(struct devlink_info_req *req, const char *version_name, const char *version_value); diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 07d0af8f5923..87c83a82991b 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -453,6 +453,8 @@ enum devlink_attr { DEVLINK_ATTR_PORT_FUNCTION, /* nested */ + DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, /* string */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index baa45eca6b5a..455998a57671 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4502,6 +4502,14 @@ int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn) } EXPORT_SYMBOL_GPL(devlink_info_serial_number_put); +int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn) +{ + return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, + bsn); +} +EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put); + static int devlink_info_version_put(struct devlink_info_req *req, int attr, const char *version_name, const char *version_value) -- cgit v1.2.3 From a602ea86e9f0d82f5c7ba1d3f7487d4097380b96 Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Sun, 21 Jun 2020 10:59:51 +0300 Subject: net: phy: marvell: Add Marvell 88E1340S support Add support for this new phy ID. Signed-off-by: Maxim Kochetkov Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 23 +++++++++++++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 24 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index ee9c352f67ab..0842deb33085 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2943,6 +2943,28 @@ static struct phy_driver marvell_drivers[] = { .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, .cable_test_get_status = marvell_vct7_cable_test_get_status, }, + { + .phy_id = MARVELL_PHY_ID_88E1340S, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1340S", + .probe = m88e1510_probe, + /* PHY_GBIT_FEATURES */ + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + }, }; module_phy_driver(marvell_drivers); @@ -2963,6 +2985,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK }, { } }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index af6b11d4d673..c4390e9cbf15 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -15,6 +15,7 @@ #define MARVELL_PHY_ID_88E1149R 0x01410e50 #define MARVELL_PHY_ID_88E1240 0x01410e30 #define MARVELL_PHY_ID_88E1318S 0x01410e90 +#define MARVELL_PHY_ID_88E1340S 0x01410dc0 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 -- cgit v1.2.3 From f59babf95ef969a18744082ee16e4dfd17743c0b Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Sun, 21 Jun 2020 10:59:52 +0300 Subject: net: phy: marvell: Add Marvell 88E1548P support Add support for this new phy ID. Signed-off-by: Maxim Kochetkov Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 23 +++++++++++++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 24 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 0842deb33085..bb86ac0bd092 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2965,6 +2965,28 @@ static struct phy_driver marvell_drivers[] = { .get_tunable = m88e1540_get_tunable, .set_tunable = m88e1540_set_tunable, }, + { + .phy_id = MARVELL_PHY_ID_88E1548P, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1548P", + .probe = m88e1510_probe, + .features = PHY_GBIT_FIBRE_FEATURES, + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + }, }; module_phy_driver(marvell_drivers); @@ -2986,6 +3008,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK }, { } }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index c4390e9cbf15..ff7b7607c8cf 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -20,6 +20,7 @@ #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 #define MARVELL_PHY_ID_88E1545 0x01410ea0 +#define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 #define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 -- cgit v1.2.3 From 59390282b7542c6050c1deaca0b2949180903175 Mon Sep 17 00:00:00 2001 From: Vincent Knecht Date: Sat, 13 Jun 2020 09:27:42 +0200 Subject: clk: qcom: smd: Add support for MSM8936 rpm clocks Add missing definition of rpm clk for msm8936 soc (also used by msm8939) Signed-off-by: Vincent Knecht Link: https://lore.kernel.org/r/20200613072745.1249003-2-vincent.knecht@mailoo.org Signed-off-by: Stephen Boyd --- drivers/clk/qcom/clk-smd-rpm.c | 50 ++++++++++++++++++++++++++++++++++ include/dt-bindings/clock/qcom,rpmcc.h | 2 ++ 2 files changed, 52 insertions(+) (limited to 'include') diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 643bc355df5c..083399affc8e 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -452,6 +452,55 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8916 = { .num_clks = ARRAY_SIZE(msm8916_clks), }; +/* msm8936 */ +DEFINE_CLK_SMD_RPM(msm8936, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8936, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8936, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM_QDSS(msm8936, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, bb_clk2_pin, bb_clk2_a_pin, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, rf_clk2_pin, rf_clk2_a_pin, 5); + +static struct clk_smd_rpm *msm8936_clks[] = { + [RPM_SMD_PCNOC_CLK] = &msm8936_pcnoc_clk, + [RPM_SMD_PCNOC_A_CLK] = &msm8936_pcnoc_a_clk, + [RPM_SMD_SNOC_CLK] = &msm8936_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8936_snoc_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8936_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8936_bimc_a_clk, + [RPM_SMD_SYSMMNOC_CLK] = &msm8936_sysmmnoc_clk, + [RPM_SMD_SYSMMNOC_A_CLK] = &msm8936_sysmmnoc_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8936_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8936_qdss_a_clk, + [RPM_SMD_BB_CLK1] = &msm8936_bb_clk1, + [RPM_SMD_BB_CLK1_A] = &msm8936_bb_clk1_a, + [RPM_SMD_BB_CLK2] = &msm8936_bb_clk2, + [RPM_SMD_BB_CLK2_A] = &msm8936_bb_clk2_a, + [RPM_SMD_RF_CLK1] = &msm8936_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &msm8936_rf_clk1_a, + [RPM_SMD_RF_CLK2] = &msm8936_rf_clk2, + [RPM_SMD_RF_CLK2_A] = &msm8936_rf_clk2_a, + [RPM_SMD_BB_CLK1_PIN] = &msm8936_bb_clk1_pin, + [RPM_SMD_BB_CLK1_A_PIN] = &msm8936_bb_clk1_a_pin, + [RPM_SMD_BB_CLK2_PIN] = &msm8936_bb_clk2_pin, + [RPM_SMD_BB_CLK2_A_PIN] = &msm8936_bb_clk2_a_pin, + [RPM_SMD_RF_CLK1_PIN] = &msm8936_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &msm8936_rf_clk1_a_pin, + [RPM_SMD_RF_CLK2_PIN] = &msm8936_rf_clk2_pin, + [RPM_SMD_RF_CLK2_A_PIN] = &msm8936_rf_clk2_a_pin, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8936 = { + .clks = msm8936_clks, + .num_clks = ARRAY_SIZE(msm8936_clks), +}; + /* msm8974 */ DEFINE_CLK_SMD_RPM(msm8974, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); DEFINE_CLK_SMD_RPM(msm8974, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); @@ -843,6 +892,7 @@ static const struct rpm_smd_clk_desc rpm_clk_sdm660 = { static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, + { .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 }, { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, { .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 }, { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index d1afa634b58d..e98ed70d91b3 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -143,5 +143,7 @@ #define RPM_SMD_LN_BB_CLK1_A_PIN 97 #define RPM_SMD_LN_BB_CLK2_PIN 98 #define RPM_SMD_LN_BB_CLK2_A_PIN 99 +#define RPM_SMD_SYSMMNOC_CLK 100 +#define RPM_SMD_SYSMMNOC_A_CLK 101 #endif -- cgit v1.2.3 From 34662f6e30846ae0f82bbc9605deff67781f6616 Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Wed, 3 Jun 2020 10:43:28 -0500 Subject: dt: Add additional option bindings for IDT VersaClock The VersaClock driver now supports some additional bindings to support child nodes which can configure optional settings like mode, voltage and slew. This patch updates the binding document to describe what is available in the driver. Signed-off-by: Adam Ford Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20200603154329.31579-2-aford173@gmail.com Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/idt,versaclock5.txt | 33 ++++++++++++++++++++++ include/dt-bindings/clk/versaclock.h | 13 +++++++++ 2 files changed, 46 insertions(+) create mode 100644 include/dt-bindings/clk/versaclock.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt index bcff681a4bd0..6165b6ddb1a9 100644 --- a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt +++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt @@ -31,6 +31,29 @@ Required properties: - 5p49v5933 and - 5p49v5935: (optional) property not present or "clkin". +For all output ports, a corresponding, optional child node named OUT1, +OUT2, etc. can represent a each output, and the node can be used to +specify the following: + +- itd,mode: can be one of the following: + - VC5_LVPECL + - VC5_CMOS + - VC5_HCSL33 + - VC5_LVDS + - VC5_CMOS2 + - VC5_CMOSD + - VC5_HCSL25 + +- idt,voltage-microvolts: can be one of the following + - 1800000 + - 2500000 + - 3300000 +- idt,slew-percent: Percent of normal, can be one of + - 80 + - 85 + - 90 + - 100 + ==Mapping between clock specifier and physical pins== When referencing the provided clock in the DT using phandle and @@ -81,6 +104,16 @@ i2c-master-node { /* Connect XIN input to 25MHz reference */ clocks = <&ref25m>; clock-names = "xin"; + + OUT1 { + itd,mode = ; + idt,voltage-microvolts = <1800000>; + idt,slew-percent = <80>; + }; + OUT2 { + ... + }; + ... }; }; diff --git a/include/dt-bindings/clk/versaclock.h b/include/dt-bindings/clk/versaclock.h new file mode 100644 index 000000000000..c6a6a0946564 --- /dev/null +++ b/include/dt-bindings/clk/versaclock.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* This file defines field values used by the versaclock 6 family + * for defining output type + */ + +#define VC5_LVPECL 0 +#define VC5_CMOS 1 +#define VC5_HCSL33 2 +#define VC5_LVDS 3 +#define VC5_CMOS2 4 +#define VC5_CMOSD 5 +#define VC5_HCSL25 6 -- cgit v1.2.3 From 209edf95da63a0ad19750769f473f4ea1553d21d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 21 Jun 2020 14:46:01 +0300 Subject: net: dsa: felix: call port mdb operations from ocelot This adds the mdb hooks in felix and exports the mdb functions from ocelot. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 26 ++++++++++++++++++++++++++ drivers/net/ethernet/mscc/ocelot.c | 23 ++++++++--------------- drivers/net/ethernet/mscc/ocelot.h | 5 ----- drivers/net/ethernet/mscc/ocelot_net.c | 26 ++++++++++++++++++++++++++ include/soc/mscc/ocelot.h | 4 ++++ 5 files changed, 64 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 66648986e6e3..25046777c993 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -59,6 +59,29 @@ static int felix_fdb_del(struct dsa_switch *ds, int port, return ocelot_fdb_del(ocelot, port, addr, vid); } +/* This callback needs to be present */ +static int felix_mdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + return 0; +} + +static void felix_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_mdb_add(ocelot, port, mdb); +} + +static int felix_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_mdb_del(ocelot, port, mdb); +} + static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, u8 state) { @@ -771,6 +794,9 @@ static const struct dsa_switch_ops felix_switch_ops = { .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, .port_fdb_del = felix_fdb_del, + .port_mdb_prepare = felix_mdb_prepare, + .port_mdb_add = felix_mdb_add, + .port_mdb_del = felix_mdb_del, .port_bridge_join = felix_bridge_join, .port_bridge_leave = felix_bridge_leave, .port_stp_state_set = felix_bridge_stp_state_set, diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 4aadb65a6af8..468eaf5916e5 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -944,16 +944,12 @@ static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, return NULL; } -int ocelot_port_obj_add_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) +int ocelot_port_mdb_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb) { - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_port *ocelot_port = ocelot->ports[port]; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; - int port = priv->chip_port; u16 vid = mdb->vid; bool new = false; @@ -991,17 +987,14 @@ int ocelot_port_obj_add_mdb(struct net_device *dev, return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } -EXPORT_SYMBOL(ocelot_port_obj_add_mdb); +EXPORT_SYMBOL(ocelot_port_mdb_add); -int ocelot_port_obj_del_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb) +int ocelot_port_mdb_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb) { - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_port *ocelot_port = ocelot->ports[port]; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; - int port = priv->chip_port; u16 vid = mdb->vid; if (port == ocelot->npi) @@ -1032,7 +1025,7 @@ int ocelot_port_obj_del_mdb(struct net_device *dev, return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } -EXPORT_SYMBOL(ocelot_port_obj_del_mdb); +EXPORT_SYMBOL(ocelot_port_mdb_del); int ocelot_port_bridge_join(struct ocelot *ocelot, int port, struct net_device *bridge) diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 0c23734a87be..be4a41646e5e 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -97,11 +97,6 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port, struct net_device *bond); void ocelot_port_lag_leave(struct ocelot *ocelot, int port, struct net_device *bond); -int ocelot_port_obj_del_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb); -int ocelot_port_obj_add_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans); u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 80cb1873e9d9..1bad146a0105 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -795,6 +795,32 @@ static int ocelot_port_vlan_del_vlan(struct net_device *dev, return 0; } +static int ocelot_port_obj_add_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return ocelot_port_mdb_add(ocelot, port, mdb); +} + +static int ocelot_port_obj_del_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + return ocelot_port_mdb_del(ocelot, port, mdb); +} + static int ocelot_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans, diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index fa2c3904049e..80415b63ccfa 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -641,5 +641,9 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress); int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress); +int ocelot_port_mdb_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb); +int ocelot_port_mdb_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb); #endif -- cgit v1.2.3 From 96b029b004942ecdb50e40d3e45cdc8a3aec9135 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 21 Jun 2020 14:46:02 +0300 Subject: net: mscc: ocelot: introduce macros for iterating over PGIDs The current iterators are impossible to understand at first glance without switching back and forth between the definitions and their actual use in the for loops. So introduce some convenience names to help readability. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 8 ++++---- drivers/net/ethernet/mscc/ocelot_net.c | 2 +- include/soc/mscc/ocelot.h | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 468eaf5916e5..b6254c20f2f0 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1064,10 +1064,10 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot) int i, port, lag; /* Reset destination and aggregation PGIDS */ - for (port = 0; port < ocelot->num_phys_ports; port++) + for_each_unicast_dest_pgid(ocelot, port) ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); - for (i = PGID_AGGR; i < PGID_SRC; i++) + for_each_aggr_pgid(ocelot, i) ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), ANA_PGID_PGID, i); @@ -1089,7 +1089,7 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot) aggr_count++; } - for (i = PGID_AGGR; i < PGID_SRC; i++) { + for_each_aggr_pgid(ocelot, i) { u32 ac; ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); @@ -1451,7 +1451,7 @@ int ocelot_init(struct ocelot *ocelot) } /* Allow broadcast MAC frames. */ - for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) { + for_each_nonreserved_multicast_dest_pgid(ocelot, i) { u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 1bad146a0105..702b42543fb7 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -422,7 +422,7 @@ static void ocelot_set_rx_mode(struct net_device *dev) * forwarded to the CPU port. */ val = GENMASK(ocelot->num_phys_ports - 1, 0); - for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) + for_each_nonreserved_multicast_dest_pgid(ocelot, i) ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync); diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 80415b63ccfa..e050f8121ba2 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -65,6 +65,21 @@ #define PGID_MCIPV4 62 #define PGID_MCIPV6 63 +#define for_each_unicast_dest_pgid(ocelot, pgid) \ + for ((pgid) = 0; \ + (pgid) < (ocelot)->num_phys_ports; \ + (pgid)++) + +#define for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) \ + for ((pgid) = (ocelot)->num_phys_ports + 1; \ + (pgid) < PGID_CPU; \ + (pgid)++) + +#define for_each_aggr_pgid(ocelot, pgid) \ + for ((pgid) = PGID_AGGR; \ + (pgid) < PGID_SRC; \ + (pgid)++) + /* Aggregation PGIDs, one per Link Aggregation Code */ #define PGID_AGGR 64 -- cgit v1.2.3 From 018e4308349dbf32f4d971cbe72f4f3d6b1c217a Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Mon, 1 Jun 2020 16:06:07 -0700 Subject: clk: imx: vf610: add CAAM clock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to Vybrid Security RM, CCM_CCGR11[CG176] can be used to gate CAAM ipg clock. Signed-off-by: Horia Geantă Signed-off-by: Andrey Smirnov Cc: Chris Healy Cc: Shawn Guo Cc: Fabio Estevam Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: linux-imx@nxp.com Tested-by: Chris Healy Signed-off-by: Shawn Guo --- drivers/clk/imx/clk-vf610.c | 1 + include/dt-bindings/clock/vf610-clock.h | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c index cd04e7dc1878..5129ef8e1d6e 100644 --- a/drivers/clk/imx/clk-vf610.c +++ b/drivers/clk/imx/clk-vf610.c @@ -438,6 +438,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_SNVS] = imx_clk_gate2("snvs-rtc", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(7)); clk[VF610_CLK_DAP] = imx_clk_gate("dap", "platform_bus", CCM_CCSR, 24); clk[VF610_CLK_OCOTP] = imx_clk_gate("ocotp", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(5)); + clk[VF610_CLK_CAAM] = imx_clk_gate2("caam", "ipg_bus", CCM_CCGR11, CCM_CCGRx_CGn(0)); imx_check_clocks(clk, ARRAY_SIZE(clk)); diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h index 95394f35a74a..0f2d60e884dc 100644 --- a/include/dt-bindings/clock/vf610-clock.h +++ b/include/dt-bindings/clock/vf610-clock.h @@ -195,6 +195,7 @@ #define VF610_CLK_WKPU 186 #define VF610_CLK_TCON0 187 #define VF610_CLK_TCON1 188 -#define VF610_CLK_END 189 +#define VF610_CLK_CAAM 189 +#define VF610_CLK_END 190 #endif /* __DT_BINDINGS_CLOCK_VF610_H */ -- cgit v1.2.3 From 169caf692567897da35382503a5caeb64ab4b8c7 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Fri, 5 Jun 2020 09:59:31 +0800 Subject: firmware: imx: add resource management api Add resource management API, when we have multiple partition running together, resources not owned to current partition should not be used. Reviewed-by: Leonard Crestez Reviewed-by: Dong Aisheng Signed-off-by: Peng Fan Signed-off-by: Shawn Guo --- drivers/firmware/imx/Makefile | 2 +- drivers/firmware/imx/rm.c | 45 ++++++++++++++++++++++++ include/linux/firmware/imx/sci.h | 1 + include/linux/firmware/imx/svc/rm.h | 69 +++++++++++++++++++++++++++++++++++++ 4 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/imx/rm.c create mode 100644 include/linux/firmware/imx/svc/rm.h (limited to 'include') diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile index 08bc9ddfbdfb..17ea3613e142 100644 --- a/drivers/firmware/imx/Makefile +++ b/drivers/firmware/imx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IMX_DSP) += imx-dsp.o -obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o +obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c new file mode 100644 index 000000000000..a12db6ff323b --- /dev/null +++ b/drivers/firmware/imx/rm.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2020 NXP + * + * File containing client-side RPC functions for the RM service. These + * function are ported to clients that communicate to the SC. + */ + +#include + +struct imx_sc_msg_rm_rsrc_owned { + struct imx_sc_rpc_msg hdr; + u16 resource; +} __packed __aligned(4); + +/* + * This function check @resource is owned by current partition or not + * + * @param[in] ipc IPC handle + * @param[in] resource resource the control is associated with + * + * @return Returns 0 for not owned and 1 for owned. + */ +bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) +{ + struct imx_sc_msg_rm_rsrc_owned msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_RM; + hdr->func = IMX_SC_RM_FUNC_IS_RESOURCE_OWNED; + hdr->size = 2; + + msg.resource = resource; + + /* + * SCU firmware only returns value 0 or 1 + * for resource owned check which means not owned or owned. + * So it is always successful. + */ + imx_scu_call_rpc(ipc, &msg, true); + + return hdr->func; +} +EXPORT_SYMBOL(imx_sc_rm_is_resource_owned); diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h index 3fa418a4ca67..3c459f54a88f 100644 --- a/include/linux/firmware/imx/sci.h +++ b/include/linux/firmware/imx/sci.h @@ -14,6 +14,7 @@ #include #include +#include int imx_scu_enable_general_irq_channel(struct device *dev); int imx_scu_irq_register_notifier(struct notifier_block *nb); diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h new file mode 100644 index 000000000000..456b6a59d29b --- /dev/null +++ b/include/linux/firmware/imx/svc/rm.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2020 NXP + * + * Header file containing the public API for the System Controller (SC) + * Resource Management (RM) function. This includes functions for + * partitioning resources, pads, and memory regions. + * + * RM_SVC (SVC) Resource Management Service + * + * Module for the Resource Management (RM) service. + */ + +#ifndef _SC_RM_API_H +#define _SC_RM_API_H + +#include + +/* + * This type is used to indicate RPC RM function calls. + */ +enum imx_sc_rm_func { + IMX_SC_RM_FUNC_UNKNOWN = 0, + IMX_SC_RM_FUNC_PARTITION_ALLOC = 1, + IMX_SC_RM_FUNC_SET_CONFIDENTIAL = 31, + IMX_SC_RM_FUNC_PARTITION_FREE = 2, + IMX_SC_RM_FUNC_GET_DID = 26, + IMX_SC_RM_FUNC_PARTITION_STATIC = 3, + IMX_SC_RM_FUNC_PARTITION_LOCK = 4, + IMX_SC_RM_FUNC_GET_PARTITION = 5, + IMX_SC_RM_FUNC_SET_PARENT = 6, + IMX_SC_RM_FUNC_MOVE_ALL = 7, + IMX_SC_RM_FUNC_ASSIGN_RESOURCE = 8, + IMX_SC_RM_FUNC_SET_RESOURCE_MOVABLE = 9, + IMX_SC_RM_FUNC_SET_SUBSYS_RSRC_MOVABLE = 28, + IMX_SC_RM_FUNC_SET_MASTER_ATTRIBUTES = 10, + IMX_SC_RM_FUNC_SET_MASTER_SID = 11, + IMX_SC_RM_FUNC_SET_PERIPHERAL_PERMISSIONS = 12, + IMX_SC_RM_FUNC_IS_RESOURCE_OWNED = 13, + IMX_SC_RM_FUNC_GET_RESOURCE_OWNER = 33, + IMX_SC_RM_FUNC_IS_RESOURCE_MASTER = 14, + IMX_SC_RM_FUNC_IS_RESOURCE_PERIPHERAL = 15, + IMX_SC_RM_FUNC_GET_RESOURCE_INFO = 16, + IMX_SC_RM_FUNC_MEMREG_ALLOC = 17, + IMX_SC_RM_FUNC_MEMREG_SPLIT = 29, + IMX_SC_RM_FUNC_MEMREG_FRAG = 32, + IMX_SC_RM_FUNC_MEMREG_FREE = 18, + IMX_SC_RM_FUNC_FIND_MEMREG = 30, + IMX_SC_RM_FUNC_ASSIGN_MEMREG = 19, + IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS = 20, + IMX_SC_RM_FUNC_IS_MEMREG_OWNED = 21, + IMX_SC_RM_FUNC_GET_MEMREG_INFO = 22, + IMX_SC_RM_FUNC_ASSIGN_PAD = 23, + IMX_SC_RM_FUNC_SET_PAD_MOVABLE = 24, + IMX_SC_RM_FUNC_IS_PAD_OWNED = 25, + IMX_SC_RM_FUNC_DUMP = 27, +}; + +#if IS_ENABLED(CONFIG_IMX_SCU) +bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource); +#else +static inline bool +imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) +{ + return true; +} +#endif +#endif -- cgit v1.2.3 From 23a60f834406c8e3805328b630d09d5546b460c1 Mon Sep 17 00:00:00 2001 From: Collin Walling Date: Mon, 22 Jun 2020 11:46:36 -0400 Subject: s390/kvm: diagnose 0x318 sync and reset DIAGNOSE 0x318 (diag318) sets information regarding the environment the VM is running in (Linux, z/VM, etc) and is observed via firmware/service events. This is a privileged s390x instruction that must be intercepted by SIE. Userspace handles the instruction as well as migration. Data is communicated via VCPU register synchronization. The Control Program Name Code (CPNC) is stored in the SIE block. The CPNC along with the Control Program Version Code (CPVC) are stored in the kvm_vcpu_arch struct. This data is reset on load normal and clear resets. Signed-off-by: Collin Walling Reviewed-by: Janosch Frank Acked-by: Cornelia Huck Reviewed-by: David Hildenbrand Link: https://lore.kernel.org/r/20200622154636.5499-3-walling@linux.ibm.com [borntraeger@de.ibm.com: fix sync_reg position] Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 4 +++- arch/s390/include/uapi/asm/kvm.h | 7 +++++-- arch/s390/kvm/kvm-s390.c | 11 ++++++++++- arch/s390/kvm/vsie.c | 1 + include/uapi/linux/kvm.h | 1 + 5 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index cee3cb6455a2..371ec6beb618 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -260,7 +260,8 @@ struct kvm_s390_sie_block { __u32 scaol; /* 0x0064 */ __u8 sdf; /* 0x0068 */ __u8 epdx; /* 0x0069 */ - __u8 reserved6a[2]; /* 0x006a */ + __u8 cpnc; /* 0x006a */ + __u8 reserved6b; /* 0x006b */ __u32 todpr; /* 0x006c */ #define GISA_FORMAT1 0x00000001 __u32 gd; /* 0x0070 */ @@ -745,6 +746,7 @@ struct kvm_vcpu_arch { bool gs_enabled; bool skey_enabled; struct kvm_s390_pv_vcpu pv; + union diag318_info diag318_info; }; struct kvm_vm_stat { diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 436ec7636927..7a6b14874d65 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -231,11 +231,13 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_GSCB (1UL << 9) #define KVM_SYNC_BPBC (1UL << 10) #define KVM_SYNC_ETOKEN (1UL << 11) +#define KVM_SYNC_DIAG318 (1UL << 12) #define KVM_SYNC_S390_VALID_FIELDS \ (KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \ KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \ - KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN) + KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN | \ + KVM_SYNC_DIAG318) /* length and alignment of the sdnx as a power of two */ #define SDNXC 8 @@ -264,7 +266,8 @@ struct kvm_sync_regs { __u8 reserved2 : 7; __u8 padding1[51]; /* riccb needs to be 64byte aligned */ __u8 riccb[64]; /* runtime instrumentation controls block */ - __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ + __u64 diag318; /* diagnose 0x318 info */ + __u8 padding2[184]; /* sdnx needs to be 256byte aligned */ union { __u8 sdnx[SDNXL]; /* state description annex */ struct { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d47c19718615..08e6cf6cb454 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -545,6 +545,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_AIS_MIGRATION: case KVM_CAP_S390_VCPU_RESETS: case KVM_CAP_SET_GUEST_DEBUG: + case KVM_CAP_S390_DIAG318: r = 1; break; case KVM_CAP_S390_HPAGE_1M: @@ -3267,7 +3268,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) KVM_SYNC_ACRS | KVM_SYNC_CRS | KVM_SYNC_ARCH0 | - KVM_SYNC_PFAULT; + KVM_SYNC_PFAULT | + KVM_SYNC_DIAG318; kvm_s390_set_prefix(vcpu, 0); if (test_kvm_facility(vcpu->kvm, 64)) vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; @@ -3562,6 +3564,7 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->pp = 0; vcpu->arch.sie_block->fpf &= ~FPF_BPBC; vcpu->arch.sie_block->todpr = 0; + vcpu->arch.sie_block->cpnc = 0; } } @@ -3579,6 +3582,7 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu) regs->etoken = 0; regs->etoken_extension = 0; + regs->diag318 = 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) @@ -4196,6 +4200,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) kvm_clear_async_pf_completion_queue(vcpu); } + if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { + vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; + vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; + } /* * If userspace sets the riccb (e.g. after migration) to a valid state, * we should enable RI here instead of doing the lazy enablement. @@ -4297,6 +4305,7 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; + kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; if (MACHINE_HAS_GS) { __ctl_set_bit(2, 4); if (vcpu->arch.gs_enabled) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 9e9056cebfcf..4f3cbf6003a9 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -548,6 +548,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->ecd |= scb_o->ecd & ECD_ETOKENF; scb_s->hpid = HPID_VSIE; + scb_s->cpnc = scb_o->cpnc; prepare_ibc(vcpu, vsie_page); rc = shadow_crycb(vcpu, vsie_page); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4fdf30316582..35cdb4307904 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1031,6 +1031,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_SECURE_GUEST 181 #define KVM_CAP_HALT_POLL 182 #define KVM_CAP_ASYNC_PF_INT 183 +#define KVM_CAP_S390_DIAG318 184 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From 830e87ed15f85765bb7e57f310f95dfd87f11dfa Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:40 +0200 Subject: media: videobuf2: use explicit unsigned int in vb2_queue Switch from 'unsigned' to 'unsigned int' so that checkpatch doesn't complain. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/videobuf2-core.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index f11b96514cf7..9e522bd2acc7 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -558,15 +558,15 @@ struct vb2_queue { unsigned int io_modes; struct device *dev; unsigned long dma_attrs; - unsigned bidirectional:1; - unsigned fileio_read_once:1; - unsigned fileio_write_immediately:1; - unsigned allow_zero_bytesused:1; - unsigned quirk_poll_must_check_waiting_for_buffers:1; - unsigned supports_requests:1; - unsigned requires_requests:1; - unsigned uses_qbuf:1; - unsigned uses_requests:1; + unsigned int bidirectional:1; + unsigned int fileio_read_once:1; + unsigned int fileio_write_immediately:1; + unsigned int allow_zero_bytesused:1; + unsigned int quirk_poll_must_check_waiting_for_buffers:1; + unsigned int supports_requests:1; + unsigned int requires_requests:1; + unsigned int uses_qbuf:1; + unsigned int uses_requests:1; struct mutex *lock; void *owner; -- cgit v1.2.3 From 6d2199868a9aede70a4ee5fa32e6ae2800b8b25a Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:41 +0200 Subject: media: videobuf2: add cache management members Extend vb2_buffer and vb2_queue structs with cache management members. V4L2 UAPI already contains two buffer flags which user-space, supposedly, can use to control buffer cache sync: - V4L2_BUF_FLAG_NO_CACHE_INVALIDATE - V4L2_BUF_FLAG_NO_CACHE_CLEAN None of these, however, do anything at the moment. This patch set is intended to change it. Since user-space cache management hints are supposed to be implemented on a per-buffer basis we need to extend vb2_buffer struct with two new members ->need_cache_sync_on_prepare and ->need_cache_sync_on_finish, which will store corresponding user-space hints. In order to preserve the existing behaviour, user-space cache managements flags will be handled only by those drivers that permit user-space cache hints. That's the purpose of vb2_queue ->allow_cache_hints member. Driver must set ->allow_cache_hints during queue initialisation to enable cache management hints mechanism. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/videobuf2-core.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 9e522bd2acc7..7f39d9fffc8c 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -263,6 +263,10 @@ struct vb2_buffer { * after the 'buf_finish' op is called. * copied_timestamp: the timestamp of this capture buffer was copied * from an output buffer. + * need_cache_sync_on_prepare: when set buffer's ->prepare() function + * performs cache sync/invalidation. + * need_cache_sync_on_finish: when set buffer's ->finish() function + * performs cache sync/invalidation. * queued_entry: entry on the queued buffers list, which holds * all buffers queued from userspace * done_entry: entry on the list that stores all buffers ready @@ -273,6 +277,8 @@ struct vb2_buffer { unsigned int synced:1; unsigned int prepared:1; unsigned int copied_timestamp:1; + unsigned int need_cache_sync_on_prepare:1; + unsigned int need_cache_sync_on_finish:1; struct vb2_plane planes[VB2_MAX_PLANES]; struct list_head queued_entry; @@ -491,6 +497,9 @@ struct vb2_buf_ops { * @uses_requests: requests are used for this queue. Set to 1 the first time * a request is queued. Set to 0 when the queue is canceled. * If this is 1, then you cannot queue buffers directly. + * @allow_cache_hints: when set user-space can pass cache management hints in + * order to skip cache flush/invalidation on ->prepare() or/and + * ->finish(). * @lock: pointer to a mutex that protects the &struct vb2_queue. The * driver can set this to a mutex to let the v4l2 core serialize * the queuing ioctls. If the driver wants to handle locking @@ -567,6 +576,7 @@ struct vb2_queue { unsigned int requires_requests:1; unsigned int uses_qbuf:1; unsigned int uses_requests:1; + unsigned int allow_cache_hints:1; struct mutex *lock; void *owner; -- cgit v1.2.3 From f5f5fa73fbfb9f346f1b5f37ebf343bae6ef6361 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:42 +0200 Subject: media: videobuf2: handle V4L2 buffer cache flags Set video buffer cache management flags corresponding to V4L2 cache flags. Both ->prepare() and ->finish() cache management hints should be passed during this stage (buffer preparation), because there is no other way for user-space to tell V4L2 to avoid ->finish() cache flush. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/videobuf2/videobuf2-v4l2.c | 48 +++++++++++++++++++++++++ include/media/videobuf2-core.h | 11 ++++++ 2 files changed, 59 insertions(+) (limited to 'include') diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index eb5d5db96552..f13851212cc8 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -337,6 +337,53 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b return 0; } +static void set_buffer_cache_hints(struct vb2_queue *q, + struct vb2_buffer *vb, + struct v4l2_buffer *b) +{ + /* + * DMA exporter should take care of cache syncs, so we can avoid + * explicit ->prepare()/->finish() syncs. For other ->memory types + * we always need ->prepare() or/and ->finish() cache sync. + */ + if (q->memory == VB2_MEMORY_DMABUF) { + vb->need_cache_sync_on_finish = 0; + vb->need_cache_sync_on_prepare = 0; + return; + } + + /* + * Cache sync/invalidation flags are set by default in order to + * preserve existing behaviour for old apps/drivers. + */ + vb->need_cache_sync_on_prepare = 1; + vb->need_cache_sync_on_finish = 1; + + if (!vb2_queue_allows_cache_hints(q)) { + /* + * Clear buffer cache flags if queue does not support user + * space hints. That's to indicate to userspace that these + * flags won't work. + */ + b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE; + b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN; + return; + } + + /* + * ->finish() cache sync can be avoided when queue direction is + * TO_DEVICE. + */ + if (q->dma_dir == DMA_TO_DEVICE) + vb->need_cache_sync_on_finish = 0; + + if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE) + vb->need_cache_sync_on_finish = 0; + + if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN) + vb->need_cache_sync_on_prepare = 0; +} + static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, struct v4l2_buffer *b, bool is_prepare, struct media_request **p_req) @@ -381,6 +428,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md } if (!vb->prepared) { + set_buffer_cache_hints(q, vb, b); /* Copy relevant information provided by the userspace */ memset(vbuf->planes, 0, sizeof(vbuf->planes[0]) * vb->num_planes); diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 7f39d9fffc8c..ccc5c498d3e3 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -635,6 +635,17 @@ struct vb2_queue { #endif }; +/** + * vb2_queue_allows_cache_hints() - Return true if the queue allows cache + * and memory consistency hints. + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + */ +static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q) +{ + return q->allow_cache_hints && q->memory == VB2_MEMORY_MMAP; +} + /** * vb2_plane_vaddr() - Return a kernel virtual address of a given plane. * @vb: pointer to &struct vb2_buffer to which the plane in -- cgit v1.2.3 From 21a00fb33790f828a34b9ce50ab9f9130bc1ffb4 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Mon, 22 Jun 2020 10:42:37 -0500 Subject: ASoC: soc-link: introduce exit() callback Some machine drivers allocate or request resources with snd_soc_link_init() phase of the card probe. These resources need to be properly released when removing a card, and this patch suggests a dual exit() callback. The exit() is invoked in soc_remove_pcm_runtime(), which is not completely symmetric with the init() invoked in soc_init_pcm_runtime(). Alternate solutions were considered, e.g. adding a .remove() callback for the platform driver, but that's not symmetrical at all and would be difficult to handle if there are more than one dailink implementing an .init(). We looked also into using .remove_dai_link() callback, but that would also be imbalanced. Note that because of the error handling in snd_soc_bind_card(), which jumps to probe_end, there is no way to guarantee the exit() is invoked with resources allocated in the init(). Prior to releasing those resources, implementations of the exit() callback shall check the resources are valid. Suggested-by: Andy Shevchenko Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Reviewed-by: Curtis Malainey Link: https://lore.kernel.org/r/20200622154241.29053-2-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown --- include/sound/soc-link.h | 1 + include/sound/soc.h | 3 +++ sound/soc/soc-core.c | 3 +++ sound/soc/soc-link.c | 6 ++++++ 4 files changed, 13 insertions(+) (limited to 'include') diff --git a/include/sound/soc-link.h b/include/sound/soc-link.h index 3dd6e33e94ec..337ac5666757 100644 --- a/include/sound/soc-link.h +++ b/include/sound/soc-link.h @@ -9,6 +9,7 @@ #define __SOC_LINK_H int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd); +void snd_soc_link_exit(struct snd_soc_pcm_runtime *rtd); int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params); diff --git a/include/sound/soc.h b/include/sound/soc.h index 2756f9bcac3e..33aceadebd03 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -799,6 +799,9 @@ struct snd_soc_dai_link { /* codec/machine specific init - e.g. add machine controls */ int (*init)(struct snd_soc_pcm_runtime *rtd); + /* codec/machine specific exit - dual of init() */ + void (*exit)(struct snd_soc_pcm_runtime *rtd); + /* optional hw_params re-writing for BE and FE sync */ int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 62c0c9482018..adedadcb0efb 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -945,6 +945,9 @@ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card, { lockdep_assert_held(&client_mutex); + /* release machine specific resources */ + snd_soc_link_exit(rtd); + /* * Notify the machine driver for extra destruction */ diff --git a/sound/soc/soc-link.c b/sound/soc/soc-link.c index f849278beba0..1c3bf2118718 100644 --- a/sound/soc/soc-link.c +++ b/sound/soc/soc-link.c @@ -40,6 +40,12 @@ int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd) return soc_link_ret(rtd, ret); } +void snd_soc_link_exit(struct snd_soc_pcm_runtime *rtd) +{ + if (rtd->dai_link->exit) + rtd->dai_link->exit(rtd); +} + int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { -- cgit v1.2.3 From ac53503ee38a1ffbc47c7cca6cbfc48ba9c65c5e Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:43 +0200 Subject: media: videobuf2: add V4L2_FLAG_MEMORY_NON_CONSISTENT flag By setting or clearing V4L2_FLAG_MEMORY_NON_CONSISTENT flag user-space should be able to set or clear queue's NON_CONSISTENT ->dma_attrs. Queue's ->dma_attrs are passed to the underlying allocator in __vb2_buf_mem_alloc(), so thus user-space is able to request vb2 buffer's memory to be either consistent (coherent) or non-consistent. The patch set also adds a corresponding capability flag: fill_buf_caps() reports V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS when queue supports user-space cache management hints. Note, however, that MMAP_CACHE_HINTS capability only valid when the queue is used for memory MMAP-ed streaming I/O. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- Documentation/userspace-api/media/v4l/buffer.rst | 40 ++++++++++++++++++++-- .../userspace-api/media/v4l/vidioc-reqbufs.rst | 10 ++++++ drivers/media/common/videobuf2/videobuf2-v4l2.c | 2 ++ include/uapi/linux/videodev2.h | 3 ++ 4 files changed, 53 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/Documentation/userspace-api/media/v4l/buffer.rst b/Documentation/userspace-api/media/v4l/buffer.rst index 951ae1ed485f..5088393b5a5c 100644 --- a/Documentation/userspace-api/media/v4l/buffer.rst +++ b/Documentation/userspace-api/media/v4l/buffer.rst @@ -577,7 +577,10 @@ Buffer Flags applications shall use this flag if the data captured in the buffer is not going to be touched by the CPU, instead the buffer will, probably, be passed on to a DMA-capable hardware unit for - further processing or output. + further processing or output. This flag is ignored unless the + queue is used for :ref:`memory mapping ` streaming I/O and + reports :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS + ` capability. * .. _`V4L2-BUF-FLAG-NO-CACHE-CLEAN`: - ``V4L2_BUF_FLAG_NO_CACHE_CLEAN`` @@ -585,7 +588,10 @@ Buffer Flags - Caches do not have to be cleaned for this buffer. Typically applications shall use this flag for output buffers if the data in this buffer has not been created by the CPU but by some - DMA-capable unit, in which case caches have not been used. + DMA-capable unit, in which case caches have not been used. This flag + is ignored unless the queue is used for :ref:`memory mapping ` + streaming I/O and reports :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS + ` capability. * .. _`V4L2-BUF-FLAG-M2M-HOLD-CAPTURE-BUF`: - ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` @@ -681,6 +687,36 @@ Buffer Flags \normalsize +.. _memory-flags: + +Memory Consistency Flags +======================== + +.. tabularcolumns:: |p{7.0cm}|p{2.2cm}|p{8.3cm}| + +.. cssclass:: longtable + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + :widths: 3 1 4 + + * .. _`V4L2-FLAG-MEMORY-NON-CONSISTENT`: + + - ``V4L2_FLAG_MEMORY_NON_CONSISTENT`` + - 0x00000001 + - A buffer is allocated either in consistent (it will be automatically + coherent between the CPU and the bus) or non-consistent memory. The + latter can provide performance gains, for instance the CPU cache + sync/flush operations can be avoided if the buffer is accessed by the + corresponding device only and the CPU does not read/write to/from that + buffer. However, this requires extra care from the driver -- it must + guarantee memory consistency by issuing a cache flush/sync when + consistency is needed. If this flag is set V4L2 will attempt to + allocate the buffer in non-consistent memory. The flag takes effect + only if the buffer is used for :ref:`memory mapping ` I/O and the + queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS + ` capability. .. c:type:: v4l2_memory diff --git a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst index b6d52083707b..96a59793d857 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst @@ -126,6 +126,7 @@ aborting or finishing any DMA in progress, an implicit .. _V4L2-BUF-CAP-SUPPORTS-REQUESTS: .. _V4L2-BUF-CAP-SUPPORTS-ORPHANED-BUFS: .. _V4L2-BUF-CAP-SUPPORTS-M2M-HOLD-CAPTURE-BUF: +.. _V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS: .. cssclass:: longtable @@ -156,6 +157,15 @@ aborting or finishing any DMA in progress, an implicit - Only valid for stateless decoders. If set, then userspace can set the ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag to hold off on returning the capture buffer until the OUTPUT timestamp changes. + * - ``V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS`` + - 0x00000040 + - This capability is set by the driver to indicate that the queue supports + cache and memory management hints. However, it's only valid when the + queue is used for :ref:`memory mapping ` streaming I/O. See + :ref:`V4L2_FLAG_MEMORY_NON_CONSISTENT `, + :ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE ` and + :ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN `. + Return Value ============ diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index f13851212cc8..e4b4354b42b8 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -710,6 +710,8 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps) *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF; if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF) *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF; + if (q->allow_cache_hints && q->io_modes & VB2_MMAP) + *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS; #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API if (q->supports_requests) *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index c3a1cf1c507f..34ba1017b89b 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -189,6 +189,8 @@ enum v4l2_memory { V4L2_MEMORY_DMABUF = 4, }; +#define V4L2_FLAG_MEMORY_NON_CONSISTENT (1 << 0) + /* see also http://vektor.theorem.ca/graphics/ycbcr/ */ enum v4l2_colorspace { /* @@ -954,6 +956,7 @@ struct v4l2_requestbuffers { #define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3) #define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4) #define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5) +#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6) /** * struct v4l2_plane - plane info for multi-planar buffers -- cgit v1.2.3 From 7b4b45555c79db03dad8192e6ef85cb30236827b Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:44 +0200 Subject: media: videobuf2: add queue memory consistency parameter Preparations for future V4L2_FLAG_MEMORY_NON_CONSISTENT support. Extend vb2_core_reqbufs() parameters list to accept requests' ->flags, which will be used for memory consistency configuration. An attempt to allocate a buffer with consistency requirements which don't match queue's consistency model will fail. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/videobuf2/videobuf2-core.c | 51 ++++++++++++++++++++----- drivers/media/common/videobuf2/videobuf2-v4l2.c | 6 +-- drivers/media/dvb-core/dvb_vb2.c | 2 +- include/media/videobuf2-core.h | 8 +++- 4 files changed, 51 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 44d65f5be845..0fdcf90330df 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -664,11 +664,33 @@ int vb2_verify_memory_type(struct vb2_queue *q, } EXPORT_SYMBOL(vb2_verify_memory_type); +static void set_queue_consistency(struct vb2_queue *q, bool consistent_mem) +{ + q->dma_attrs &= ~DMA_ATTR_NON_CONSISTENT; + + if (!vb2_queue_allows_cache_hints(q)) + return; + if (!consistent_mem) + q->dma_attrs |= DMA_ATTR_NON_CONSISTENT; +} + +static bool verify_consistency_attr(struct vb2_queue *q, bool consistent_mem) +{ + bool queue_is_consistent = !(q->dma_attrs & DMA_ATTR_NON_CONSISTENT); + + if (consistent_mem != queue_is_consistent) { + dprintk(1, "memory consistency model mismatch\n"); + return false; + } + return true; +} + int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count) + unsigned int flags, unsigned int *count) { unsigned int num_buffers, allocated_buffers, num_planes = 0; unsigned plane_sizes[VB2_MAX_PLANES] = { }; + bool consistent_mem = true; unsigned int i; int ret; @@ -683,7 +705,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, } if (*count == 0 || q->num_buffers != 0 || - (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) { + (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || + !verify_consistency_attr(q, consistent_mem)) { /* * We already have buffers allocated, so first check if they * are not in use and can be freed. @@ -720,6 +743,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q->memory = memory; + set_queue_consistency(q, consistent_mem); /* * Ask the driver how many buffers and planes per buffer it requires. @@ -804,11 +828,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, EXPORT_SYMBOL_GPL(vb2_core_reqbufs); int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count, unsigned requested_planes, - const unsigned requested_sizes[]) + unsigned int flags, unsigned int *count, + unsigned int requested_planes, + const unsigned int requested_sizes[]) { unsigned int num_planes = 0, num_buffers, allocated_buffers; unsigned plane_sizes[VB2_MAX_PLANES] = { }; + bool consistent_mem = true; int ret; if (q->num_buffers == VB2_MAX_FRAME) { @@ -823,10 +849,15 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, } memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q->memory = memory; + set_queue_consistency(q, consistent_mem); q->waiting_for_buffers = !q->is_output; - } else if (q->memory != memory) { - dprintk(1, "memory model mismatch\n"); - return -EINVAL; + } else { + if (q->memory != memory) { + dprintk(1, "memory model mismatch\n"); + return -EINVAL; + } + if (!verify_consistency_attr(q, consistent_mem)) + return -EINVAL; } num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); @@ -2498,7 +2529,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) fileio->memory = VB2_MEMORY_MMAP; fileio->type = q->type; q->fileio = fileio; - ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count); + ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); if (ret) goto err_kfree; @@ -2555,7 +2586,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) err_reqbufs: fileio->count = 0; - vb2_core_reqbufs(q, fileio->memory, &fileio->count); + vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); err_kfree: q->fileio = NULL; @@ -2575,7 +2606,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q) vb2_core_streamoff(q, q->type); q->fileio = NULL; fileio->count = 0; - vb2_core_reqbufs(q, fileio->memory, &fileio->count); + vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); kfree(fileio); dprintk(3, "file io emulator closed\n"); } diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index e4b4354b42b8..26a3ec333bb7 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -723,7 +723,7 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) int ret = vb2_verify_memory_type(q, req->memory, req->type); fill_buf_caps(q, &req->capabilities); - return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count); + return ret ? ret : vb2_core_reqbufs(q, req->memory, 0, &req->count); } EXPORT_SYMBOL_GPL(vb2_reqbufs); @@ -797,7 +797,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) for (i = 0; i < requested_planes; i++) if (requested_sizes[i] == 0) return -EINVAL; - return ret ? ret : vb2_core_create_bufs(q, create->memory, + return ret ? ret : vb2_core_create_bufs(q, create->memory, 0, &create->count, requested_planes, requested_sizes); } EXPORT_SYMBOL_GPL(vb2_create_bufs); @@ -973,7 +973,7 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv, return res; if (vb2_queue_is_busy(vdev, file)) return -EBUSY; - res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count); + res = vb2_core_reqbufs(vdev->queue, p->memory, 0, &p->count); /* If count == 0, then the owner has released all buffers and he is no longer owner of the queue. Otherwise we have a new owner. */ if (res == 0) diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 6974f1731529..959d110407a4 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -342,7 +342,7 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req) ctx->buf_siz = req->size; ctx->buf_cnt = req->count; - ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, &req->count); + ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, 0, &req->count); if (ret) { ctx->state = DVB_VB2_STATE_NONE; dprintk(1, "[%s] count=%d size=%d errno=%d\n", ctx->name, diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index ccc5c498d3e3..9e68fe043a6c 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -740,6 +740,8 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); * vb2_core_reqbufs() - Initiate streaming. * @q: pointer to &struct vb2_queue with videobuf2 queue. * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. Currently, the only + * used flag is %V4L2_FLAG_MEMORY_NON_CONSISTENT. * @count: requested buffer count. * * Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called @@ -764,12 +766,13 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); * Return: returns zero on success; an error code otherwise. */ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count); + unsigned int flags, unsigned int *count); /** * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs * @q: pointer to &struct vb2_queue with videobuf2 queue. * @memory: memory type, as defined by &enum vb2_memory. + * @flags: auxiliary queue/buffer management flags. * @count: requested buffer count. * @requested_planes: number of planes requested. * @requested_sizes: array with the size of the planes. @@ -787,7 +790,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, * Return: returns zero on success; an error code otherwise. */ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int *count, unsigned int requested_planes, + unsigned int flags, unsigned int *count, + unsigned int requested_planes, const unsigned int requested_sizes[]); /** -- cgit v1.2.3 From 1e0b2318fa75d186ee0d2be31843ce867385fcc4 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Thu, 14 May 2020 18:01:45 +0200 Subject: media: videobuf2: handle V4L2_FLAG_MEMORY_NON_CONSISTENT flag This patch lets user-space to request a non-consistent memory allocation during CREATE_BUFS and REQBUFS ioctl calls. = CREATE_BUFS struct v4l2_create_buffers has seven 4-byte reserved areas, so reserved[0] is renamed to ->flags. The struct, thus, now has six reserved 4-byte regions. = CREATE_BUFS32 struct v4l2_create_buffers32 has seven 4-byte reserved areas, so reserved[0] is renamed to ->flags. The struct, thus, now has six reserved 4-byte regions. = REQBUFS We use one bit of a ->reserved[1] member of struct v4l2_requestbuffers, which is now renamed to ->flags. Unlike v4l2_create_buffers, struct v4l2_requestbuffers does not have enough reserved room. Therefore for backward compatibility ->reserved and ->flags were put into anonymous union. Signed-off-by: Sergey Senozhatsky Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- .../userspace-api/media/v4l/vidioc-create-bufs.rst | 7 ++++++- .../userspace-api/media/v4l/vidioc-reqbufs.rst | 11 ++++++++-- drivers/media/common/videobuf2/videobuf2-core.c | 6 ++++++ drivers/media/common/videobuf2/videobuf2-v4l2.c | 24 ++++++++++++++++++---- drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 10 +++++++-- drivers/media/v4l2-core/v4l2-ioctl.c | 5 +---- include/uapi/linux/videodev2.h | 11 ++++++++-- 7 files changed, 59 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst b/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst index e1afc5b504c2..f2a702870fad 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst @@ -121,7 +121,12 @@ than the number requested. other changes, then set ``count`` to 0, ``memory`` to ``V4L2_MEMORY_MMAP`` and ``format.type`` to the buffer type. * - __u32 - - ``reserved``\ [7] + - ``flags`` + - Specifies additional buffer management attributes. + See :ref:`memory-flags`. + + * - __u32 + - ``reserved``\ [6] - A place holder for future extensions. Drivers and applications must set the array to zero. diff --git a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst index 96a59793d857..75d894d9c36c 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst @@ -112,10 +112,17 @@ aborting or finishing any DMA in progress, an implicit ``V4L2_MEMORY_MMAP`` and ``type`` set to the buffer type. This will free any previously allocated buffers, so this is typically something that will be done at the start of the application. + * - union { + - (anonymous) + * - __u32 + - ``flags`` + - Specifies additional buffer management attributes. + See :ref:`memory-flags`. * - __u32 - ``reserved``\ [1] - - A place holder for future extensions. Drivers and applications - must set the array to zero. + - Kept for backwards compatibility. Use ``flags`` instead. + * - } + - .. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}| diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 0fdcf90330df..626c4db5134c 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -694,6 +694,9 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, unsigned int i; int ret; + if (flags & V4L2_FLAG_MEMORY_NON_CONSISTENT) + consistent_mem = false; + if (q->streaming) { dprintk(1, "streaming active\n"); return -EBUSY; @@ -837,6 +840,9 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, bool consistent_mem = true; int ret; + if (flags & V4L2_FLAG_MEMORY_NON_CONSISTENT) + consistent_mem = false; + if (q->num_buffers == VB2_MAX_FRAME) { dprintk(1, "maximum number of buffers already allocated\n"); return -ENOBUFS; diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 26a3ec333bb7..559a229cac41 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -718,12 +718,22 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps) #endif } +static void clear_consistency_attr(struct vb2_queue *q, + int memory, + unsigned int *flags) +{ + if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) + *flags &= ~V4L2_FLAG_MEMORY_NON_CONSISTENT; +} + int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) { int ret = vb2_verify_memory_type(q, req->memory, req->type); fill_buf_caps(q, &req->capabilities); - return ret ? ret : vb2_core_reqbufs(q, req->memory, 0, &req->count); + clear_consistency_attr(q, req->memory, &req->flags); + return ret ? ret : vb2_core_reqbufs(q, req->memory, + req->flags, &req->count); } EXPORT_SYMBOL_GPL(vb2_reqbufs); @@ -755,6 +765,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) unsigned i; fill_buf_caps(q, &create->capabilities); + clear_consistency_attr(q, create->memory, &create->flags); create->index = q->num_buffers; if (create->count == 0) return ret != -EBUSY ? ret : 0; @@ -797,8 +808,11 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) for (i = 0; i < requested_planes; i++) if (requested_sizes[i] == 0) return -EINVAL; - return ret ? ret : vb2_core_create_bufs(q, create->memory, 0, - &create->count, requested_planes, requested_sizes); + return ret ? ret : vb2_core_create_bufs(q, create->memory, + create->flags, + &create->count, + requested_planes, + requested_sizes); } EXPORT_SYMBOL_GPL(vb2_create_bufs); @@ -969,11 +983,12 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv, int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type); fill_buf_caps(vdev->queue, &p->capabilities); + clear_consistency_attr(vdev->queue, p->memory, &p->flags); if (res) return res; if (vb2_queue_is_busy(vdev, file)) return -EBUSY; - res = vb2_core_reqbufs(vdev->queue, p->memory, 0, &p->count); + res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count); /* If count == 0, then the owner has released all buffers and he is no longer owner of the queue. Otherwise we have a new owner. */ if (res == 0) @@ -991,6 +1006,7 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv, p->index = vdev->queue->num_buffers; fill_buf_caps(vdev->queue, &p->capabilities); + clear_consistency_attr(vdev->queue, p->memory, &p->flags); /* * If count == 0, then just check if memory and type are valid. * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0. diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index a99e82ec9ab6..593bcf6c3735 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -246,6 +246,9 @@ struct v4l2_format32 { * @memory: buffer memory type * @format: frame format, for which buffers are requested * @capabilities: capabilities of this buffer type. + * @flags: additional buffer management attributes (ignored unless the + * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and + * configured for MMAP streaming I/O). * @reserved: future extensions */ struct v4l2_create_buffers32 { @@ -254,7 +257,8 @@ struct v4l2_create_buffers32 { __u32 memory; /* enum v4l2_memory */ struct v4l2_format32 format; __u32 capabilities; - __u32 reserved[7]; + __u32 flags; + __u32 reserved[6]; }; static int __bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size) @@ -355,7 +359,8 @@ static int get_v4l2_create32(struct v4l2_create_buffers __user *p64, { if (!access_ok(p32, sizeof(*p32)) || copy_in_user(p64, p32, - offsetof(struct v4l2_create_buffers32, format))) + offsetof(struct v4l2_create_buffers32, format)) || + assign_in_user(&p64->flags, &p32->flags)) return -EFAULT; return __get_v4l2_format32(&p64->format, &p32->format, aux_buf, aux_space); @@ -417,6 +422,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers __user *p64, copy_in_user(p32, p64, offsetof(struct v4l2_create_buffers32, format)) || assign_in_user(&p32->capabilities, &p64->capabilities) || + assign_in_user(&p32->flags, &p64->flags) || copy_in_user(p32->reserved, p64->reserved, sizeof(p64->reserved))) return -EFAULT; return __put_v4l2_format32(&p64->format, &p32->format); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 2322f08a98be..02bfef0da76d 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2038,9 +2038,6 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, if (ret) return ret; - - CLEAR_AFTER_FIELD(p, capabilities); - return ops->vidioc_reqbufs(file, fh, p); } @@ -2080,7 +2077,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops, if (ret) return ret; - CLEAR_AFTER_FIELD(create, capabilities); + CLEAR_AFTER_FIELD(create, flags); v4l_sanitize_format(&create->format); diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 34ba1017b89b..fec2607a07e3 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -946,7 +946,10 @@ struct v4l2_requestbuffers { __u32 type; /* enum v4l2_buf_type */ __u32 memory; /* enum v4l2_memory */ __u32 capabilities; - __u32 reserved[1]; + union { + __u32 flags; + __u32 reserved[1]; + }; }; /* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */ @@ -2450,6 +2453,9 @@ struct v4l2_dbg_chip_info { * @memory: enum v4l2_memory; buffer memory type * @format: frame format, for which buffers are requested * @capabilities: capabilities of this buffer type. + * @flags: additional buffer management attributes (ignored unless the + * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability + * and configured for MMAP streaming I/O). * @reserved: future extensions */ struct v4l2_create_buffers { @@ -2458,7 +2464,8 @@ struct v4l2_create_buffers { __u32 memory; struct v4l2_format format; __u32 capabilities; - __u32 reserved[7]; + __u32 flags; + __u32 reserved[6]; }; /* -- cgit v1.2.3 From 286cf7d3a99e1ca8c1d8e674b9a98f2dbe8520dc Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Tue, 26 May 2020 10:59:53 +0200 Subject: media: videodev2.h: add V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL flag Add the V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL flag to signal that the coded frame interval can be set separately from the raw frame interval for stateful encoders. Signed-off-by: Hans Verkuil Reviewed-by: Michael Tretter Acked-by: Tomasz Figa Signed-off-by: Mauro Carvalho Chehab --- .../userspace-api/media/v4l/vidioc-enum-fmt.rst | 30 ++++++++++++++++++---- .../userspace-api/media/videodev2.h.rst.exceptions | 1 + include/uapi/linux/videodev2.h | 1 + 3 files changed, 27 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst b/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst index a53dd3d7f7e2..05835e04c20b 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-enum-fmt.rst @@ -167,17 +167,37 @@ the ``mbus_code`` field is handled differently: - The hardware decoder for this compressed bytestream format (aka coded format) is capable of parsing a continuous bytestream. Applications do not need to parse the bytestream themselves to find the boundaries - between frames/fields. This flag can only be used in combination with - the ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to compressed + between frames/fields. + + This flag can only be used in combination with the + ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to compressed formats only. This flag is valid for stateful decoders only. * - ``V4L2_FMT_FLAG_DYN_RESOLUTION`` - 0x0008 - Dynamic resolution switching is supported by the device for this compressed bytestream format (aka coded format). It will notify the user via the event ``V4L2_EVENT_SOURCE_CHANGE`` when changes in the video - parameters are detected. This flag can only be used in combination - with the ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to - compressed formats only. It is also only applies to stateful codecs. + parameters are detected. + + This flag can only be used in combination with the + ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to + compressed formats only. This flag is valid for stateful codecs only. + * - ``V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL`` + - 0x0010 + - The hardware encoder supports setting the ``CAPTURE`` coded frame + interval separately from the ``OUTPUT`` raw frame interval. + Setting the ``OUTPUT`` raw frame interval with :ref:`VIDIOC_S_PARM ` + also sets the ``CAPTURE`` coded frame interval to the same value. + If this flag is set, then the ``CAPTURE`` coded frame interval can be + set to a different value afterwards. This is typically used for + offline encoding where the ``OUTPUT`` raw frame interval is used as + a hint for reserving hardware encoder resources and the ``CAPTURE`` coded + frame interval is the actual frame rate embedded in the encoded video + stream. + + This flag can only be used in combination with the + ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to + compressed formats only. This flag is valid for stateful encoders only. Return Value diff --git a/Documentation/userspace-api/media/videodev2.h.rst.exceptions b/Documentation/userspace-api/media/videodev2.h.rst.exceptions index a625fb90e3a9..ca05e4e126b2 100644 --- a/Documentation/userspace-api/media/videodev2.h.rst.exceptions +++ b/Documentation/userspace-api/media/videodev2.h.rst.exceptions @@ -187,6 +187,7 @@ replace define V4L2_FMT_FLAG_COMPRESSED fmtdesc-flags replace define V4L2_FMT_FLAG_EMULATED fmtdesc-flags replace define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM fmtdesc-flags replace define V4L2_FMT_FLAG_DYN_RESOLUTION fmtdesc-flags +replace define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL fmtdesc-flags # V4L2 timecode types replace define V4L2_TC_TYPE_24FPS timecode-type diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index fec2607a07e3..303805438814 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -794,6 +794,7 @@ struct v4l2_fmtdesc { #define V4L2_FMT_FLAG_EMULATED 0x0002 #define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004 #define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008 +#define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL 0x0010 /* Frame Size and frame rate enumeration */ /* -- cgit v1.2.3 From 809b1b04df898b6d182069146231a3cbf5f2d9cc Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Wed, 17 Jun 2020 06:42:08 +0800 Subject: spi: introduce fallback to pio Add fallback to pio mode in case dma transfer failed with error status SPI_TRANS_FAIL_NO_START. If spi client driver want to enable this feature please set xfer->error in the proper place such as dmaengine_prep_slave_sg() failure detect(but no any data put into spi bus yet). Besides, add master->fallback checking in its can_dma() so that spi core could switch to pio next time. Please refer to spi-imx.c. Signed-off-by: Robin Gong Link: https://lore.kernel.org/r/1592347329-28363-2-git-send-email-yibin.gong@nxp.com Signed-off-by: Mark Brown --- drivers/spi/spi.c | 12 ++++++++++++ include/linux/spi/spi.h | 7 +++++++ 2 files changed, 19 insertions(+) (limited to 'include') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8158e281f354..6fa56590bba2 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -982,6 +982,8 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } + ctlr->cur_msg_mapped = false; + return 0; } #else /* !CONFIG_HAS_DMA */ @@ -1234,8 +1236,17 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, if (xfer->tx_buf || xfer->rx_buf) { reinit_completion(&ctlr->xfer_completion); +fallback_pio: ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { + if (ctlr->cur_msg_mapped && + (xfer->error & SPI_TRANS_FAIL_NO_START)) { + __spi_unmap_msg(ctlr, msg); + ctlr->fallback = true; + xfer->error &= ~SPI_TRANS_FAIL_NO_START; + goto fallback_pio; + } + SPI_STATISTICS_INCREMENT_FIELD(statm, errors); SPI_STATISTICS_INCREMENT_FIELD(stats, @@ -1693,6 +1704,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr->cur_msg = NULL; ctlr->cur_msg_prepared = false; + ctlr->fallback = false; kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index aac57b5b7c21..b4917df79637 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -447,6 +447,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * If the driver does not set this, the SPI core takes the snapshot as * close to the driver hand-over as possible. * @irq_flags: Interrupt enable state during PTP system timestamping + * @fallback: fallback to pio if dma transfer return failure with + * SPI_TRANS_FAIL_NO_START. * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -602,6 +604,7 @@ struct spi_controller { bool auto_runtime_pm; bool cur_msg_prepared; bool cur_msg_mapped; + bool fallback; struct completion xfer_completion; size_t max_dma_len; @@ -847,6 +850,7 @@ extern void spi_res_release(struct spi_controller *ctlr, * back unset and they need the better resolution. * @timestamped_post: See above. The reason why both exist is that these * booleans are also used to keep state in the core SPI logic. + * @error: Error status logged by spi controller driver. * * SPI transfers always write the same number of bytes as they read. * Protocol drivers should always provide @rx_buf and/or @tx_buf. @@ -940,6 +944,9 @@ struct spi_transfer { bool timestamped; struct list_head transfer_list; + +#define SPI_TRANS_FAIL_NO_START BIT(0) + u16 error; }; /** -- cgit v1.2.3 From 8dd65ed67e1679830dc8f94169d25ec9452da99d Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sat, 20 Jun 2020 12:16:40 +0200 Subject: media: cec: remove unused waitq and phys_addrs fields The cec_adapter struct contained a waitq field that isn't used anywhere, so drop this. It also contained a phys_addrs array to store any reported physical addresses. However, this was never actually used, so this field is removed as well. The original idea was to let the core keep track of this information, but nothing was ever done with this. Should this be needed in the future then it is easy enough to resurrect this. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/cec/core/cec-adap.c | 4 ---- drivers/media/cec/core/cec-core.c | 1 - include/media/cec.h | 2 -- 3 files changed, 7 deletions(-) (limited to 'include') diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 6a04d19a96b2..4efe8014445e 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1306,7 +1306,6 @@ static int cec_config_log_addr(struct cec_adapter *adap, las->log_addr[idx] = log_addr; las->log_addr_mask |= 1 << log_addr; - adap->phys_addrs[log_addr] = adap->phys_addr; return 1; } @@ -1324,7 +1323,6 @@ static void cec_adap_unconfigure(struct cec_adapter *adap) adap->log_addrs.log_addr_mask = 0; adap->is_configuring = false; adap->is_configured = false; - memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs)); cec_flush(adap); wake_up_interruptible(&adap->kthread_waitq); cec_post_state_event(adap); @@ -1974,8 +1972,6 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, case CEC_MSG_REPORT_PHYSICAL_ADDR: { u16 pa = (msg->msg[2] << 8) | msg->msg[3]; - if (!from_unregistered) - adap->phys_addrs[init_laddr] = pa; dprintk(1, "reported physical address %x.%x.%x.%x for logical address %d\n", cec_phys_addr_exp(pa), init_laddr); break; diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c index 0c52e1bb3910..c599cd94dd62 100644 --- a/drivers/media/cec/core/cec-core.c +++ b/drivers/media/cec/core/cec-core.c @@ -265,7 +265,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, adap->sequence = 0; adap->ops = ops; adap->priv = priv; - memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs)); mutex_init(&adap->lock); INIT_LIST_HEAD(&adap->transmit_queue); INIT_LIST_HEAD(&adap->wait_queue); diff --git a/include/media/cec.h b/include/media/cec.h index 972bc8cd4384..1de44a4fc390 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -162,7 +162,6 @@ struct cec_adapter { struct task_struct *kthread; wait_queue_head_t kthread_waitq; - wait_queue_head_t waitq; const struct cec_adap_ops *ops; void *priv; @@ -197,7 +196,6 @@ struct cec_adapter { struct dentry *status_file; struct dentry *error_inj_file; - u16 phys_addrs[15]; u32 sequence; char input_phys[32]; -- cgit v1.2.3 From e233f81cfc906e415e5784526d9a6ec7d7cf3c5c Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sat, 20 Jun 2020 12:16:41 +0200 Subject: media: media/cec.h: document cec_adapter fields Document this core CEC structure. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/cec.h | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'include') diff --git a/include/media/cec.h b/include/media/cec.h index 1de44a4fc390..32f7c695d7b5 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -144,6 +144,55 @@ struct cec_adap_ops { */ #define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1) +/** + * struct cec_adapter - cec adapter structure + * @owner: module owner + * @name: name of the CEC adapter + * @devnode: device node for the /dev/cecX device + * @lock: mutex controlling access to this structure + * @rc: remote control device + * @transmit_queue: queue of pending transmits + * @transmit_queue_sz: number of pending transmits + * @wait_queue: queue of transmits waiting for a reply + * @transmitting: CEC messages currently being transmitted + * @transmit_in_progress: true if a transmit is in progress + * @kthread_config: kthread used to configure a CEC adapter + * @config_completion: used to signal completion of the config kthread + * @kthread: main CEC processing thread + * @kthread_waitq: main CEC processing wait_queue + * @ops: cec adapter ops + * @priv: cec driver's private data + * @capabilities: cec adapter capabilities + * @available_log_addrs: maximum number of available logical addresses + * @phys_addr: the current physical address + * @needs_hpd: if true, then the HDMI HotPlug Detect pin must be high + * in order to transmit or receive CEC messages. This is usually a HW + * limitation. + * @is_configuring: the CEC adapter is configuring (i.e. claiming LAs) + * @is_configured: the CEC adapter is configured (i.e. has claimed LAs) + * @cec_pin_is_high: if true then the CEC pin is high. Only used with the + * CEC pin framework. + * @last_initiator: the initiator of the last transmitted message. + * @monitor_all_cnt: number of filehandles monitoring all msgs + * @monitor_pin_cnt: number of filehandles monitoring pin changes + * @follower_cnt: number of filehandles in follower mode + * @cec_follower: filehandle of the exclusive follower + * @cec_initiator: filehandle of the exclusive initiator + * @passthrough: if true, then the exclusive follower is in + * passthrough mode. + * @log_addrs: current logical addresses + * @conn_info: current connector info + * @tx_timeouts: number of transmit timeouts + * @notifier: CEC notifier + * @pin: CEC pin status struct + * @cec_dir: debugfs cec directory + * @status_file: debugfs cec status file + * @error_inj_file: debugfs cec error injection file + * @sequence: transmit sequence counter + * @input_phys: remote control input_phys name + * + * This structure represents a cec adapter. + */ struct cec_adapter { struct module *owner; char name[32]; -- cgit v1.2.3 From d63cc24933c774ea464090af1998a7b63f11c166 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 8 Apr 2020 12:42:09 +0300 Subject: net/mlx5: Export resource dump interface Export some of the resource dump API. mlx5_ib driver will use it in downstream patches. Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky --- .../ethernet/mellanox/mlx5/core/diag/rsc_dump.c | 3 ++ .../ethernet/mellanox/mlx5/core/diag/rsc_dump.h | 33 +-------------- include/linux/mlx5/rsc_dump.h | 48 ++++++++++++++++++++++ 3 files changed, 52 insertions(+), 32 deletions(-) create mode 100644 include/linux/mlx5/rsc_dump.h (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c index 17ab7efe693d..10218c2324cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c @@ -130,11 +130,13 @@ struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev, cmd->mem_size = key->size; return cmd; } +EXPORT_SYMBOL(mlx5_rsc_dump_cmd_create); void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd) { kfree(cmd); } +EXPORT_SYMBOL(mlx5_rsc_dump_cmd_destroy); int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, struct page *page, int *size) @@ -155,6 +157,7 @@ int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, return more_dump; } +EXPORT_SYMBOL(mlx5_rsc_dump_next); #define MLX5_RSC_DUMP_MENU_SEGMENT 0xffff static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h index 148270073e71..64c4956db6d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h @@ -4,41 +4,10 @@ #ifndef __MLX5_RSC_DUMP_H #define __MLX5_RSC_DUMP_H +#include #include #include "mlx5_core.h" -enum mlx5_sgmt_type { - MLX5_SGMT_TYPE_HW_CQPC, - MLX5_SGMT_TYPE_HW_SQPC, - MLX5_SGMT_TYPE_HW_RQPC, - MLX5_SGMT_TYPE_FULL_SRQC, - MLX5_SGMT_TYPE_FULL_CQC, - MLX5_SGMT_TYPE_FULL_EQC, - MLX5_SGMT_TYPE_FULL_QPC, - MLX5_SGMT_TYPE_SND_BUFF, - MLX5_SGMT_TYPE_RCV_BUFF, - MLX5_SGMT_TYPE_SRQ_BUFF, - MLX5_SGMT_TYPE_CQ_BUFF, - MLX5_SGMT_TYPE_EQ_BUFF, - MLX5_SGMT_TYPE_SX_SLICE, - MLX5_SGMT_TYPE_SX_SLICE_ALL, - MLX5_SGMT_TYPE_RDB, - MLX5_SGMT_TYPE_RX_SLICE_ALL, - MLX5_SGMT_TYPE_MENU, - MLX5_SGMT_TYPE_TERMINATE, - - MLX5_SGMT_TYPE_NUM, /* Keep last */ -}; - -struct mlx5_rsc_key { - enum mlx5_sgmt_type rsc; - int index1; - int index2; - int num_of_obj1; - int num_of_obj2; - int size; -}; - #define MLX5_RSC_DUMP_ALL 0xFFFF struct mlx5_rsc_dump_cmd; struct mlx5_rsc_dump; diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h new file mode 100644 index 000000000000..87415fa754fe --- /dev/null +++ b/include/linux/mlx5/rsc_dump.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies inc. */ + +#include + +#ifndef __MLX5_RSC_DUMP +#define __MLX5_RSC_DUMP + +enum mlx5_sgmt_type { + MLX5_SGMT_TYPE_HW_CQPC, + MLX5_SGMT_TYPE_HW_SQPC, + MLX5_SGMT_TYPE_HW_RQPC, + MLX5_SGMT_TYPE_FULL_SRQC, + MLX5_SGMT_TYPE_FULL_CQC, + MLX5_SGMT_TYPE_FULL_EQC, + MLX5_SGMT_TYPE_FULL_QPC, + MLX5_SGMT_TYPE_SND_BUFF, + MLX5_SGMT_TYPE_RCV_BUFF, + MLX5_SGMT_TYPE_SRQ_BUFF, + MLX5_SGMT_TYPE_CQ_BUFF, + MLX5_SGMT_TYPE_EQ_BUFF, + MLX5_SGMT_TYPE_SX_SLICE, + MLX5_SGMT_TYPE_SX_SLICE_ALL, + MLX5_SGMT_TYPE_RDB, + MLX5_SGMT_TYPE_RX_SLICE_ALL, + MLX5_SGMT_TYPE_MENU, + MLX5_SGMT_TYPE_TERMINATE, + + MLX5_SGMT_TYPE_NUM, /* Keep last */ +}; + +struct mlx5_rsc_key { + enum mlx5_sgmt_type rsc; + int index1; + int index2; + int num_of_obj1; + int num_of_obj2; + int size; +}; + +struct mlx5_rsc_dump_cmd; + +struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev, + struct mlx5_rsc_key *key); +void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd); +int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, + struct page *page, int *size); +#endif /* __MLX5_RSC_DUMP */ -- cgit v1.2.3 From 608ca553c9a2008908120e0e45b1cfc4aefcfd49 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 8 Apr 2020 12:36:20 +0300 Subject: net/mlx5: Add support in query QP, CQ and MKEY segments Introduce new resource dump segments - PRM_QUERY_QP, PRM_QUERY_CQ and PRM_QUERY_MKEY. These segments contains the resource dump in PRM query format. Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c | 3 +++ include/linux/mlx5/rsc_dump.h | 3 +++ 2 files changed, 6 insertions(+) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c index 10218c2324cc..4924a5658853 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c @@ -23,6 +23,9 @@ static const char *const mlx5_rsc_sgmt_name[] = { MLX5_SGMT_STR_ASSING(SX_SLICE_ALL), MLX5_SGMT_STR_ASSING(RDB), MLX5_SGMT_STR_ASSING(RX_SLICE_ALL), + MLX5_SGMT_STR_ASSING(PRM_QUERY_QP), + MLX5_SGMT_STR_ASSING(PRM_QUERY_CQ), + MLX5_SGMT_STR_ASSING(PRM_QUERY_MKEY), }; struct mlx5_rsc_dump { diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h index 87415fa754fe..d11c0b228620 100644 --- a/include/linux/mlx5/rsc_dump.h +++ b/include/linux/mlx5/rsc_dump.h @@ -23,6 +23,9 @@ enum mlx5_sgmt_type { MLX5_SGMT_TYPE_SX_SLICE_ALL, MLX5_SGMT_TYPE_RDB, MLX5_SGMT_TYPE_RX_SLICE_ALL, + MLX5_SGMT_TYPE_PRM_QUERY_QP, + MLX5_SGMT_TYPE_PRM_QUERY_CQ, + MLX5_SGMT_TYPE_PRM_QUERY_MKEY, MLX5_SGMT_TYPE_MENU, MLX5_SGMT_TYPE_TERMINATE, -- cgit v1.2.3 From f4434529003522d72b314d26d65b18c06ea9307c Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:36 +0300 Subject: RDMA: Add dedicated MR resource tracker function In order to avoid double multiplexing of the resource when it is a MR, add a dedicated callback function. Link: https://lore.kernel.org/r/20200623113043.1228482-5-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 3 ++- drivers/infiniband/core/nldev.c | 18 ++++-------------- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 + drivers/infiniband/hw/cxgb4/provider.c | 1 + drivers/infiniband/hw/cxgb4/restrack.c | 5 +---- drivers/infiniband/hw/mlx5/main.c | 4 ++-- drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 ++---- drivers/infiniband/hw/mlx5/restrack.c | 28 ++++------------------------ include/rdma/ib_verbs.h | 4 ++-- 9 files changed, 19 insertions(+), 51 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 905a2beaf885..ffdf9787e7f6 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2618,7 +2618,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, drain_sq); SET_DEVICE_OP(dev_ops, enable_driver); SET_DEVICE_OP(dev_ops, fill_res_entry); - SET_DEVICE_OP(dev_ops, fill_stat_entry); + SET_DEVICE_OP(dev_ops, fill_res_mr_entry); + SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); SET_DEVICE_OP(dev_ops, get_dev_fw_str); SET_DEVICE_OP(dev_ops, get_dma_mr); SET_DEVICE_OP(dev_ops, get_hw_stats); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 8548f09746ab..a4f3f838d6fe 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -454,14 +454,6 @@ static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg, return dev->ops.fill_res_entry(msg, res); } -static bool fill_stat_entry(struct ib_device *dev, struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (!dev->ops.fill_stat_entry) - return false; - return dev->ops.fill_stat_entry(msg, res); -} - static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { @@ -641,9 +633,8 @@ static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, if (fill_res_name_pid(msg, res)) goto err; - if (fill_res_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_res_mr_entry) + return dev->ops.fill_res_mr_entry(msg, mr); return 0; err: return -EMSGSIZE; @@ -786,9 +777,8 @@ static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) goto err; - if (fill_stat_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_stat_mr_entry) + return dev->ops.fill_stat_mr_entry(msg, mr); return 0; err: diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index e8e11bd95e42..5b9884ca2f5e 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -1055,6 +1055,7 @@ struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp); typedef int c4iw_restrack_func(struct sk_buff *msg, struct rdma_restrack_entry *res); +int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr); extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX]; #endif diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index ba83d942997c..36eeb595d41c 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -486,6 +486,7 @@ static const struct ib_device_ops c4iw_dev_ops = { .destroy_qp = c4iw_destroy_qp, .destroy_srq = c4iw_destroy_srq, .fill_res_entry = fill_res_entry, + .fill_res_mr_entry = c4iw_fill_res_mr_entry, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = c4iw_get_dma_mr, .get_hw_stats = c4iw_get_mib, diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c index f82d46ed969d..9a5ca9192c1c 100644 --- a/drivers/infiniband/hw/cxgb4/restrack.c +++ b/drivers/infiniband/hw/cxgb4/restrack.c @@ -433,10 +433,8 @@ err: return -EMSGSIZE; } -static int fill_res_mr_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr) { - struct ib_mr *ibmr = container_of(res, struct ib_mr, res); struct c4iw_mr *mhp = to_c4iw_mr(ibmr); struct c4iw_dev *dev = mhp->rhp; u32 stag = mhp->attr.stag; @@ -497,5 +495,4 @@ c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_QP] = fill_res_qp_entry, [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry, [RDMA_RESTRACK_CQ] = fill_res_cq_entry, - [RDMA_RESTRACK_MR] = fill_res_mr_entry, }; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 343a8b8361e7..fa9237a10ed8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6598,8 +6598,8 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .drain_rq = mlx5_ib_drain_rq, .drain_sq = mlx5_ib_drain_sq, .enable_driver = mlx5_ib_enable_driver, - .fill_res_entry = mlx5_ib_fill_res_entry, - .fill_stat_entry = mlx5_ib_fill_stat_entry, + .fill_res_mr_entry = mlx5_ib_fill_res_mr_entry, + .fill_stat_mr_entry = mlx5_ib_fill_stat_mr_entry, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = mlx5_ib_get_dma_mr, .get_link_layer = mlx5_ib_port_link_layer, diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 5dbe3eb0d9cb..37ead14eb317 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1375,10 +1375,8 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev, u8 *native_port_num); void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, u8 port_num); -int mlx5_ib_fill_res_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res); -int mlx5_ib_fill_stat_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res); +int mlx5_ib_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr); +int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr); extern const struct uapi_definition mlx5_ib_devx_defs[]; extern const struct uapi_definition mlx5_ib_flow_defs[]; diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c index 8f6c04f12531..598a09796d09 100644 --- a/drivers/infiniband/hw/mlx5/restrack.c +++ b/drivers/infiniband/hw/mlx5/restrack.c @@ -8,10 +8,9 @@ #include #include "mlx5_ib.h" -static int fill_stat_mr_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg, + struct ib_mr *ibmr) { - struct ib_mr *ibmr = container_of(res, struct ib_mr, res); struct mlx5_ib_mr *mr = to_mmr(ibmr); struct nlattr *table_attr; @@ -41,10 +40,9 @@ err: return -EMSGSIZE; } -static int fill_res_mr_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int mlx5_ib_fill_res_mr_entry(struct sk_buff *msg, + struct ib_mr *ibmr) { - struct ib_mr *ibmr = container_of(res, struct ib_mr, res); struct mlx5_ib_mr *mr = to_mmr(ibmr); struct nlattr *table_attr; @@ -70,21 +68,3 @@ err: nla_nest_cancel(msg, table_attr); return -EMSGSIZE; } - -int mlx5_ib_fill_res_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (res->type == RDMA_RESTRACK_MR) - return fill_res_mr_entry(msg, res); - - return 0; -} - -int mlx5_ib_fill_stat_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (res->type == RDMA_RESTRACK_MR) - return fill_stat_mr_entry(msg, res); - - return 0; -} diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ef2f3986c493..117a0e802aa1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2583,6 +2583,7 @@ struct ib_device_ops { */ int (*fill_res_entry)(struct sk_buff *msg, struct rdma_restrack_entry *entry); + int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); /* Device lifecycle callbacks */ /* @@ -2637,8 +2638,7 @@ struct ib_device_ops { * Allows rdma drivers to add their own restrack attributes * dumped via 'rdma stat' iproute2 command. */ - int (*fill_stat_entry)(struct sk_buff *msg, - struct rdma_restrack_entry *entry); + int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_cq); -- cgit v1.2.3 From 9e2a187a93c395f573ed38b888ba4bd731e70622 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:37 +0300 Subject: RDMA: Add a dedicated CQ resource tracker function In order to avoid double multiplexing of the resource when it is a CQ, add a dedicated callback function. Link: https://lore.kernel.org/r/20200623113043.1228482-6-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 1 + drivers/infiniband/core/nldev.c | 5 ++--- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 + drivers/infiniband/hw/cxgb4/provider.c | 1 + drivers/infiniband/hw/cxgb4/restrack.c | 5 +---- drivers/infiniband/hw/hns/hns_roce_device.h | 4 ++-- drivers/infiniband/hw/hns/hns_roce_main.c | 2 +- drivers/infiniband/hw/hns/hns_roce_restrack.c | 14 ++------------ include/rdma/ib_verbs.h | 1 + 9 files changed, 12 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index ffdf9787e7f6..9eeac8cb600e 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2617,6 +2617,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, drain_rq); SET_DEVICE_OP(dev_ops, drain_sq); SET_DEVICE_OP(dev_ops, enable_driver); + SET_DEVICE_OP(dev_ops, fill_res_cq_entry); SET_DEVICE_OP(dev_ops, fill_res_entry); SET_DEVICE_OP(dev_ops, fill_res_mr_entry); SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index a4f3f838d6fe..707f724db1dd 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -598,9 +598,8 @@ static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, if (fill_res_name_pid(msg, res)) goto err; - if (fill_res_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_res_cq_entry) + return dev->ops.fill_res_cq_entry(msg, cq); return 0; err: return -EMSGSIZE; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 5b9884ca2f5e..18a2c1a44dcc 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -1056,6 +1056,7 @@ struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp); typedef int c4iw_restrack_func(struct sk_buff *msg, struct rdma_restrack_entry *res); int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr); +int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq); extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX]; #endif diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 36eeb595d41c..d6b20aa314a0 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -485,6 +485,7 @@ static const struct ib_device_ops c4iw_dev_ops = { .destroy_cq = c4iw_destroy_cq, .destroy_qp = c4iw_destroy_qp, .destroy_srq = c4iw_destroy_srq, + .fill_res_cq_entry = c4iw_fill_res_cq_entry, .fill_res_entry = fill_res_entry, .fill_res_mr_entry = c4iw_fill_res_mr_entry, .get_dev_fw_str = get_dev_fw_str, diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c index 9a5ca9192c1c..ead2cd08793d 100644 --- a/drivers/infiniband/hw/cxgb4/restrack.c +++ b/drivers/infiniband/hw/cxgb4/restrack.c @@ -372,10 +372,8 @@ err: return -EMSGSIZE; } -static int fill_res_cq_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq) { - struct ib_cq *ibcq = container_of(res, struct ib_cq, res); struct c4iw_cq *chp = to_c4iw_cq(ibcq); struct nlattr *table_attr; struct t4_cqe hwcqes[2]; @@ -494,5 +492,4 @@ err: c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_QP] = fill_res_qp_entry, [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry, - [RDMA_RESTRACK_CQ] = fill_res_cq_entry, }; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a77fa6730b2d..a61f0c4d4dbb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1266,6 +1266,6 @@ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); int hns_roce_init(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev); -int hns_roce_fill_res_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res); +int hns_roce_fill_res_cq_entry(struct sk_buff *msg, + struct ib_cq *ib_cq); #endif /* _HNS_ROCE_DEVICE_H */ diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 50763cf4fa3d..5907cfd878a6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -428,7 +428,7 @@ static const struct ib_device_ops hns_roce_dev_ops = { .destroy_ah = hns_roce_destroy_ah, .destroy_cq = hns_roce_destroy_cq, .disassociate_ucontext = hns_roce_disassociate_ucontext, - .fill_res_entry = hns_roce_fill_res_entry, + .fill_res_cq_entry = hns_roce_fill_res_cq_entry, .get_dma_mr = hns_roce_get_dma_mr, .get_link_layer = hns_roce_get_link_layer, .get_port_immutable = hns_roce_port_immutable, diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 06871731ac43..259444c0a630 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -76,10 +76,9 @@ err: return -EMSGSIZE; } -static int hns_roce_fill_res_cq_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int hns_roce_fill_res_cq_entry(struct sk_buff *msg, + struct ib_cq *ib_cq) { - struct ib_cq *ib_cq = container_of(res, struct ib_cq, res); struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_v2_cq_context *context; @@ -119,12 +118,3 @@ err: kfree(context); return ret; } - -int hns_roce_fill_res_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (res->type == RDMA_RESTRACK_CQ) - return hns_roce_fill_res_cq_entry(msg, res); - - return 0; -} diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 117a0e802aa1..097b1d497d5f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2584,6 +2584,7 @@ struct ib_device_ops { int (*fill_res_entry)(struct sk_buff *msg, struct rdma_restrack_entry *entry); int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); + int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); /* Device lifecycle callbacks */ /* -- cgit v1.2.3 From 5cc34116ccec60032dbaa92768f41e95ce2d8ec7 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:38 +0300 Subject: RDMA: Add dedicated QP resource tracker function In order to avoid double multiplexing of the resource when it is a QP, add a dedicated callback function. Link: https://lore.kernel.org/r/20200623113043.1228482-7-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 1 + drivers/infiniband/core/nldev.c | 5 ++--- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 + drivers/infiniband/hw/cxgb4/restrack.c | 5 +---- include/rdma/ib_verbs.h | 1 + 5 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 9eeac8cb600e..f94989274df5 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2620,6 +2620,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, fill_res_cq_entry); SET_DEVICE_OP(dev_ops, fill_res_entry); SET_DEVICE_OP(dev_ops, fill_res_mr_entry); + SET_DEVICE_OP(dev_ops, fill_res_qp_entry); SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); SET_DEVICE_OP(dev_ops, get_dev_fw_str); SET_DEVICE_OP(dev_ops, get_dma_mr); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 707f724db1dd..79d0980a75e0 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -507,9 +507,8 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, if (fill_res_name_pid(msg, res)) goto err; - if (fill_res_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_res_qp_entry) + return dev->ops.fill_res_qp_entry(msg, qp); return 0; err: return -EMSGSIZE; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 18a2c1a44dcc..c84aa7c937f1 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -1057,6 +1057,7 @@ typedef int c4iw_restrack_func(struct sk_buff *msg, struct rdma_restrack_entry *res); int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr); int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq); +int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp); extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX]; #endif diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c index ead2cd08793d..5144d3b67293 100644 --- a/drivers/infiniband/hw/cxgb4/restrack.c +++ b/drivers/infiniband/hw/cxgb4/restrack.c @@ -134,10 +134,8 @@ err: return -EMSGSIZE; } -static int fill_res_qp_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp) { - struct ib_qp *ibqp = container_of(res, struct ib_qp, res); struct t4_swsqe *fsp = NULL, *lsp = NULL; struct c4iw_qp *qhp = to_c4iw_qp(ibqp); u16 first_sq_idx = 0, last_sq_idx = 0; @@ -490,6 +488,5 @@ err: } c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = { - [RDMA_RESTRACK_QP] = fill_res_qp_entry, [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry, }; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 097b1d497d5f..4e8519ac7363 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2585,6 +2585,7 @@ struct ib_device_ops { struct rdma_restrack_entry *entry); int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); + int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); /* Device lifecycle callbacks */ /* -- cgit v1.2.3 From 211cd9459fdabe9f556e539966f50825853bf262 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:39 +0300 Subject: RDMA: Add dedicated CM_ID resource tracker function In order to avoid double multiplexing of the resource when it is a cm id, add a dedicated callback function. In addition remove fill_res_entry which is not used anymore. Link: https://lore.kernel.org/r/20200623113043.1228482-8-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 2 +- drivers/infiniband/core/nldev.c | 13 ++----------- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 4 +--- drivers/infiniband/hw/cxgb4/provider.c | 9 +-------- drivers/infiniband/hw/cxgb4/restrack.c | 9 ++------- include/rdma/ib_verbs.h | 4 ++-- 6 files changed, 9 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index f94989274df5..cbe95e729cf1 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2617,8 +2617,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, drain_rq); SET_DEVICE_OP(dev_ops, drain_sq); SET_DEVICE_OP(dev_ops, enable_driver); + SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry); SET_DEVICE_OP(dev_ops, fill_res_cq_entry); - SET_DEVICE_OP(dev_ops, fill_res_entry); SET_DEVICE_OP(dev_ops, fill_res_mr_entry); SET_DEVICE_OP(dev_ops, fill_res_qp_entry); SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 79d0980a75e0..394e307c342c 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -446,14 +446,6 @@ static int fill_res_name_pid(struct sk_buff *msg, return err ? -EMSGSIZE : 0; } -static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg, - struct rdma_restrack_entry *res) -{ - if (!dev->ops.fill_res_entry) - return false; - return dev->ops.fill_res_entry(msg, res); -} - static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { @@ -559,9 +551,8 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, if (fill_res_name_pid(msg, res)) goto err; - if (fill_res_entry(dev, msg, res)) - goto err; - + if (dev->ops.fill_res_cm_id_entry) + return dev->ops.fill_res_cm_id_entry(msg, cm_id); return 0; err: return -EMSGSIZE; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index c84aa7c937f1..27da0705c88a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -1053,11 +1053,9 @@ int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp); -typedef int c4iw_restrack_func(struct sk_buff *msg, - struct rdma_restrack_entry *res); int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr); int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq); int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp); -extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX]; +int c4iw_fill_res_cm_id_entry(struct sk_buff *msg, struct rdma_cm_id *cm_id); #endif diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index d6b20aa314a0..1d3ff59e4060 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -458,13 +458,6 @@ static void get_dev_fw_str(struct ib_device *dev, char *str) FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers)); } -static int fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res) -{ - return (res->type < ARRAY_SIZE(c4iw_restrack_funcs) && - c4iw_restrack_funcs[res->type]) ? - c4iw_restrack_funcs[res->type](msg, res) : 0; -} - static const struct ib_device_ops c4iw_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_CXGB4, @@ -486,7 +479,7 @@ static const struct ib_device_ops c4iw_dev_ops = { .destroy_qp = c4iw_destroy_qp, .destroy_srq = c4iw_destroy_srq, .fill_res_cq_entry = c4iw_fill_res_cq_entry, - .fill_res_entry = fill_res_entry, + .fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry, .fill_res_mr_entry = c4iw_fill_res_mr_entry, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = c4iw_get_dma_mr, diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c index 5144d3b67293..b32e6516d65f 100644 --- a/drivers/infiniband/hw/cxgb4/restrack.c +++ b/drivers/infiniband/hw/cxgb4/restrack.c @@ -193,10 +193,9 @@ union union_ep { struct c4iw_ep ep; }; -static int fill_res_ep_entry(struct sk_buff *msg, - struct rdma_restrack_entry *res) +int c4iw_fill_res_cm_id_entry(struct sk_buff *msg, + struct rdma_cm_id *cm_id) { - struct rdma_cm_id *cm_id = rdma_res_to_id(res); struct nlattr *table_attr; struct c4iw_ep_common *epcp; struct c4iw_listen_ep *listen_ep = NULL; @@ -486,7 +485,3 @@ err_cancel_table: err: return -EMSGSIZE; } - -c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = { - [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry, -}; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 4e8519ac7363..9127cffafccd 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -75,6 +75,7 @@ struct ib_umem_odp; struct ib_uqp_object; struct ib_usrq_object; struct ib_uwq_object; +struct rdma_cm_id; extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -2581,11 +2582,10 @@ struct ib_device_ops { /** * Allows rdma drivers to add their own restrack attributes. */ - int (*fill_res_entry)(struct sk_buff *msg, - struct rdma_restrack_entry *entry); int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); + int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id); /* Device lifecycle callbacks */ /* -- cgit v1.2.3 From 79a28ddd18e9c653f13f60dfabee15c024e64b9b Mon Sep 17 00:00:00 2001 From: Alexandre Cassen Date: Tue, 23 Jun 2020 10:33:45 +0200 Subject: rtnetlink: add keepalived rtm_protocol Keepalived can set global static ip routes or virtual ip routes dynamically following VRRP protocol states. Using a dedicated rtm_protocol will help keeping track of it. Changes in v2: - fix tab/space indenting Signed-off-by: Alexandre Cassen Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 45 +++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 073e71ef6bdd..879e64950a0a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -257,12 +257,12 @@ enum { /* rtm_protocol */ -#define RTPROT_UNSPEC 0 -#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects; - not used by current IPv4 */ -#define RTPROT_KERNEL 2 /* Route installed by kernel */ -#define RTPROT_BOOT 3 /* Route installed during boot */ -#define RTPROT_STATIC 4 /* Route installed by administrator */ +#define RTPROT_UNSPEC 0 +#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects; + not used by current IPv4 */ +#define RTPROT_KERNEL 2 /* Route installed by kernel */ +#define RTPROT_BOOT 3 /* Route installed during boot */ +#define RTPROT_STATIC 4 /* Route installed by administrator */ /* Values of protocol >= RTPROT_STATIC are not interpreted by kernel; they are just passed from user and back as is. @@ -271,22 +271,23 @@ enum { avoid conflicts. */ -#define RTPROT_GATED 8 /* Apparently, GateD */ -#define RTPROT_RA 9 /* RDISC/ND router advertisements */ -#define RTPROT_MRT 10 /* Merit MRT */ -#define RTPROT_ZEBRA 11 /* Zebra */ -#define RTPROT_BIRD 12 /* BIRD */ -#define RTPROT_DNROUTED 13 /* DECnet routing daemon */ -#define RTPROT_XORP 14 /* XORP */ -#define RTPROT_NTK 15 /* Netsukuku */ -#define RTPROT_DHCP 16 /* DHCP client */ -#define RTPROT_MROUTED 17 /* Multicast daemon */ -#define RTPROT_BABEL 42 /* Babel daemon */ -#define RTPROT_BGP 186 /* BGP Routes */ -#define RTPROT_ISIS 187 /* ISIS Routes */ -#define RTPROT_OSPF 188 /* OSPF Routes */ -#define RTPROT_RIP 189 /* RIP Routes */ -#define RTPROT_EIGRP 192 /* EIGRP Routes */ +#define RTPROT_GATED 8 /* Apparently, GateD */ +#define RTPROT_RA 9 /* RDISC/ND router advertisements */ +#define RTPROT_MRT 10 /* Merit MRT */ +#define RTPROT_ZEBRA 11 /* Zebra */ +#define RTPROT_BIRD 12 /* BIRD */ +#define RTPROT_DNROUTED 13 /* DECnet routing daemon */ +#define RTPROT_XORP 14 /* XORP */ +#define RTPROT_NTK 15 /* Netsukuku */ +#define RTPROT_DHCP 16 /* DHCP client */ +#define RTPROT_MROUTED 17 /* Multicast daemon */ +#define RTPROT_KEEPALIVED 18 /* Keepalived daemon */ +#define RTPROT_BABEL 42 /* Babel daemon */ +#define RTPROT_BGP 186 /* BGP Routes */ +#define RTPROT_ISIS 187 /* ISIS Routes */ +#define RTPROT_OSPF 188 /* OSPF Routes */ +#define RTPROT_RIP 189 /* RIP Routes */ +#define RTPROT_EIGRP 192 /* EIGRP Routes */ /* rtm_scope -- cgit v1.2.3 From bdb7b79b4ce864a724250e1d35948c46f135de36 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 22 Jun 2020 20:22:21 -0700 Subject: bpf: Switch most helper return values from 32-bit int to 64-bit long Switch most of BPF helper definitions from returning int to long. These definitions are coming from comments in BPF UAPI header and are used to generate bpf_helper_defs.h (under libbpf) to be later included and used from BPF programs. In actual in-kernel implementation, all the helpers are defined as returning u64, but due to some historical reasons, most of them are actually defined as returning int in UAPI (usually, to return 0 on success, and negative value on error). This actually causes Clang to quite often generate sub-optimal code, because compiler believes that return value is 32-bit, and in a lot of cases has to be up-converted (usually with a pair of 32-bit bit shifts) to 64-bit values, before they can be used further in BPF code. Besides just "polluting" the code, these 32-bit shifts quite often cause problems for cases in which return value matters. This is especially the case for the family of bpf_probe_read_str() functions. There are few other similar helpers (e.g., bpf_read_branch_records()), in which return value is used by BPF program logic to record variable-length data and process it. For such cases, BPF program logic carefully manages offsets within some array or map to read variable-length data. For such uses, it's crucial for BPF verifier to track possible range of register values to prove that all the accesses happen within given memory bounds. Those extraneous zero-extending bit shifts, inserted by Clang (and quite often interleaved with other code, which makes the issues even more challenging and sometimes requires employing extra per-variable compiler barriers), throws off verifier logic and makes it mark registers as having unknown variable offset. We'll study this pattern a bit later below. Another common pattern is to check return of BPF helper for non-zero state to detect error conditions and attempt alternative actions in such case. Even in this simple and straightforward case, this 32-bit vs BPF's native 64-bit mode quite often leads to sub-optimal and unnecessary extra code. We'll look at this pattern as well. Clang's BPF target supports two modes of code generation: ALU32, in which it is capable of using lower 32-bit parts of registers, and no-ALU32, in which only full 64-bit registers are being used. ALU32 mode somewhat mitigates the above described problems, but not in all cases. This patch switches all the cases in which BPF helpers return 0 or negative error from returning int to returning long. It is shown below that such change in definition leads to equivalent or better code. No-ALU32 mode benefits more, but ALU32 mode doesn't degrade or still gets improved code generation. Another class of cases switched from int to long are bpf_probe_read_str()-like helpers, which encode successful case as non-negative values, while still returning negative value for errors. In all of such cases, correctness is preserved due to two's complement encoding of negative values and the fact that all helpers return values with 32-bit absolute value. Two's complement ensures that for negative values higher 32 bits are all ones and when truncated, leave valid negative 32-bit value with the same value. Non-negative values have upper 32 bits set to zero and similarly preserve value when high 32 bits are truncated. This means that just casting to int/u32 is correct and efficient (and in ALU32 mode doesn't require any extra shifts). To minimize the chances of regressions, two code patterns were investigated, as mentioned above. For both patterns, BPF assembly was analyzed in ALU32/NO-ALU32 compiler modes, both with current 32-bit int return type and new 64-bit long return type. Case 1. Variable-length data reading and concatenation. This is quite ubiquitous pattern in tracing/monitoring applications, reading data like process's environment variables, file path, etc. In such case, many pieces of string-like variable-length data are read into a single big buffer, and at the end of the process, only a part of array containing actual data is sent to user-space for further processing. This case is tested in test_varlen.c selftest (in the next patch). Code flow is roughly as follows: void *payload = &sample->payload; u64 len; len = bpf_probe_read_kernel_str(payload, MAX_SZ1, &source_data1); if (len <= MAX_SZ1) { payload += len; sample->len1 = len; } len = bpf_probe_read_kernel_str(payload, MAX_SZ2, &source_data2); if (len <= MAX_SZ2) { payload += len; sample->len2 = len; } /* and so on */ sample->total_len = payload - &sample->payload; /* send over, e.g., perf buffer */ There could be two variations with slightly different code generated: when len is 64-bit integer and when it is 32-bit integer. Both variations were analysed. BPF assembly instructions between two successive invocations of bpf_probe_read_kernel_str() were used to check code regressions. Results are below, followed by short analysis. Left side is using helpers with int return type, the right one is after the switch to long. ALU32 + INT ALU32 + LONG =========== ============ 64-BIT (13 insns): 64-BIT (10 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: if w0 > 256 goto +9 18: if r0 > 256 goto +6 19: w1 = w0 19: r1 = 0 ll 20: r1 <<= 32 21: *(u64 *)(r1 + 0) = r0 21: r1 s>>= 32 22: r6 = 0 ll 22: r2 = 0 ll 24: r6 += r0 24: *(u64 *)(r2 + 0) = r1 00000000000000c8 : 25: r6 = 0 ll 25: r1 = r6 27: r6 += r1 26: w2 = 256 00000000000000e0 : 27: r3 = 0 ll 28: r1 = r6 29: call 115 29: w2 = 256 30: r3 = 0 ll 32: call 115 32-BIT (11 insns): 32-BIT (12 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: if w0 > 256 goto +7 18: if w0 > 256 goto +8 19: r1 = 0 ll 19: r1 = 0 ll 21: *(u32 *)(r1 + 0) = r0 21: *(u32 *)(r1 + 0) = r0 22: w1 = w0 22: r0 <<= 32 23: r6 = 0 ll 23: r0 >>= 32 25: r6 += r1 24: r6 = 0 ll 00000000000000d0 : 26: r6 += r0 26: r1 = r6 00000000000000d8 : 27: w2 = 256 27: r1 = r6 28: r3 = 0 ll 28: w2 = 256 30: call 115 29: r3 = 0 ll 31: call 115 In ALU32 mode, the variant using 64-bit length variable clearly wins and avoids unnecessary zero-extension bit shifts. In practice, this is even more important and good, because BPF code won't need to do extra checks to "prove" that payload/len are within good bounds. 32-bit len is one instruction longer. Clang decided to do 64-to-32 casting with two bit shifts, instead of equivalent `w1 = w0` assignment. The former uses extra register. The latter might potentially lose some range information, but not for 32-bit value. So in this case, verifier infers that r0 is [0, 256] after check at 18:, and shifting 32 bits left/right keeps that range intact. We should probably look into Clang's logic and see why it chooses bitshifts over sub-register assignments for this. NO-ALU32 + INT NO-ALU32 + LONG ============== =============== 64-BIT (14 insns): 64-BIT (10 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: r0 <<= 32 18: if r0 > 256 goto +6 19: r1 = r0 19: r1 = 0 ll 20: r1 >>= 32 21: *(u64 *)(r1 + 0) = r0 21: if r1 > 256 goto +7 22: r6 = 0 ll 22: r0 s>>= 32 24: r6 += r0 23: r1 = 0 ll 00000000000000c8 : 25: *(u64 *)(r1 + 0) = r0 25: r1 = r6 26: r6 = 0 ll 26: r2 = 256 28: r6 += r0 27: r3 = 0 ll 00000000000000e8 : 29: call 115 29: r1 = r6 30: r2 = 256 31: r3 = 0 ll 33: call 115 32-BIT (13 insns): 32-BIT (13 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: r1 = r0 18: r1 = r0 19: r1 <<= 32 19: r1 <<= 32 20: r1 >>= 32 20: r1 >>= 32 21: if r1 > 256 goto +6 21: if r1 > 256 goto +6 22: r2 = 0 ll 22: r2 = 0 ll 24: *(u32 *)(r2 + 0) = r0 24: *(u32 *)(r2 + 0) = r0 25: r6 = 0 ll 25: r6 = 0 ll 27: r6 += r1 27: r6 += r1 00000000000000e0 : 00000000000000e0 : 28: r1 = r6 28: r1 = r6 29: r2 = 256 29: r2 = 256 30: r3 = 0 ll 30: r3 = 0 ll 32: call 115 32: call 115 In NO-ALU32 mode, for the case of 64-bit len variable, Clang generates much superior code, as expected, eliminating unnecessary bit shifts. For 32-bit len, code is identical. So overall, only ALU-32 32-bit len case is more-or-less equivalent and the difference stems from internal Clang decision, rather than compiler lacking enough information about types. Case 2. Let's look at the simpler case of checking return result of BPF helper for errors. The code is very simple: long bla; if (bpf_probe_read_kenerl(&bla, sizeof(bla), 0)) return 1; else return 0; ALU32 + CHECK (9 insns) ALU32 + CHECK (9 insns) ==================================== ==================================== 0: r1 = r10 0: r1 = r10 1: r1 += -8 1: r1 += -8 2: w2 = 8 2: w2 = 8 3: r3 = 0 3: r3 = 0 4: call 113 4: call 113 5: w1 = w0 5: r1 = r0 6: w0 = 1 6: w0 = 1 7: if w1 != 0 goto +1 7: if r1 != 0 goto +1 8: w0 = 0 8: w0 = 0 0000000000000048 : 0000000000000048 : 9: exit 9: exit Almost identical code, the only difference is the use of full register assignment (r1 = r0) vs half-registers (w1 = w0) in instruction #5. On 32-bit architectures, new BPF assembly might be slightly less optimal, in theory. But one can argue that's not a big issue, given that use of full registers is still prevalent (e.g., for parameter passing). NO-ALU32 + CHECK (11 insns) NO-ALU32 + CHECK (9 insns) ==================================== ==================================== 0: r1 = r10 0: r1 = r10 1: r1 += -8 1: r1 += -8 2: r2 = 8 2: r2 = 8 3: r3 = 0 3: r3 = 0 4: call 113 4: call 113 5: r1 = r0 5: r1 = r0 6: r1 <<= 32 6: r0 = 1 7: r1 >>= 32 7: if r1 != 0 goto +1 8: r0 = 1 8: r0 = 0 9: if r1 != 0 goto +1 0000000000000048 : 10: r0 = 0 9: exit 0000000000000058 : 11: exit NO-ALU32 is a clear improvement, getting rid of unnecessary zero-extension bit shifts. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200623032224.4020118-1-andriin@fb.com --- include/uapi/linux/bpf.h | 192 ++++++++++++++++++++--------------------- tools/include/uapi/linux/bpf.h | 192 ++++++++++++++++++++--------------------- 2 files changed, 192 insertions(+), 192 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 19684813faae..be0efee49093 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -653,7 +653,7 @@ union bpf_attr { * Map value associated to *key*, or **NULL** if no entry was * found. * - * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) + * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: @@ -671,13 +671,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_delete_elem(struct bpf_map *map, const void *key) + * long bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. @@ -695,7 +695,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) + * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) @@ -775,7 +775,7 @@ union bpf_attr { * Return * The SMP id of the processor running the program. * - * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) + * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of @@ -792,7 +792,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) + * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper @@ -817,7 +817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) + * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the @@ -849,7 +849,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) + * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack @@ -880,7 +880,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) + * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress @@ -916,7 +916,7 @@ union bpf_attr { * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * - * int bpf_get_current_comm(void *buf, u32 size_of_buf) + * long bpf_get_current_comm(void *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of @@ -953,7 +953,7 @@ union bpf_attr { * Return * The classid, or 0 for the default unconfigured classid. * - * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) + * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update @@ -969,7 +969,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_vlan_pop(struct sk_buff *skb) + * long bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * @@ -981,7 +981,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be @@ -1032,7 +1032,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The @@ -1098,7 +1098,7 @@ union bpf_attr { * The value of the perf event counter read from the map, or a * negative error code in case of failure. * - * int bpf_redirect(u32 ifindex, u64 flags) + * long bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ @@ -1145,7 +1145,7 @@ union bpf_attr { * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * - * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1190,7 +1190,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) + * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from @@ -1207,7 +1207,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) + * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context @@ -1276,7 +1276,7 @@ union bpf_attr { * The checksum result, or a negative error code in case of * failure. * - * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* @@ -1294,7 +1294,7 @@ union bpf_attr { * Return * The size of the option data retrieved. * - * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. @@ -1304,7 +1304,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) + * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to @@ -1331,7 +1331,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_type(struct sk_buff *skb, u32 type) + * long bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except @@ -1358,7 +1358,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) + * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. @@ -1389,7 +1389,7 @@ union bpf_attr { * Return * A pointer to the current task struct. * - * int bpf_probe_write_user(void *dst, const void *src, u32 len) + * long bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in @@ -1408,7 +1408,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) + * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by @@ -1420,7 +1420,7 @@ union bpf_attr { * * 1, if the *skb* task does not belong to the cgroup2. * * A negative error code, if an error occurred. * - * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must @@ -1444,7 +1444,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) + * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes @@ -1500,7 +1500,7 @@ union bpf_attr { * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * - * int bpf_get_numa_node_id(void) + * long bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA @@ -1511,7 +1511,7 @@ union bpf_attr { * Return * The id of current NUMA node. * - * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of @@ -1532,7 +1532,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper @@ -1547,7 +1547,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for @@ -1595,14 +1595,14 @@ union bpf_attr { * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * - * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) + * long bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * - * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1630,7 +1630,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) + * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. @@ -1676,7 +1676,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) + * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain @@ -1697,7 +1697,7 @@ union bpf_attr { * **XDP_REDIRECT** on success, or the value of the two lower bits * of the *flags* argument on error. * - * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) + * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and @@ -1708,7 +1708,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to @@ -1727,7 +1727,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this @@ -1756,7 +1756,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type @@ -1806,7 +1806,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in @@ -1817,7 +1817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1842,7 +1842,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_override_return(struct pt_regs *regs, u64 rc) + * long bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. @@ -1867,7 +1867,7 @@ union bpf_attr { * Return * 0 * - * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) + * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to @@ -1911,7 +1911,7 @@ union bpf_attr { * be set is returned (which comes down to 0 if all bits were set * as required). * - * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) + * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -1925,7 +1925,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. @@ -1959,7 +1959,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been @@ -1977,7 +1977,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) + * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ @@ -2008,7 +2008,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) + * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing @@ -2026,7 +2026,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * possible to both shrink and grow the packet tail. @@ -2040,7 +2040,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) + * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. @@ -2056,7 +2056,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) + * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer @@ -2089,7 +2089,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) + * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -2111,7 +2111,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) + * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be @@ -2142,7 +2142,7 @@ union bpf_attr { * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * - * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to @@ -2161,7 +2161,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) + * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -2175,7 +2175,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. @@ -2189,7 +2189,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) + * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at @@ -2226,7 +2226,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) + * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs @@ -2241,7 +2241,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) + * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to @@ -2257,7 +2257,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) + * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter @@ -2286,7 +2286,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_repeat(void *ctx) + * long bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays @@ -2305,7 +2305,7 @@ union bpf_attr { * Return * 0 * - * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) + * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, @@ -2370,7 +2370,7 @@ union bpf_attr { * Return * A pointer to the local storage area. * - * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. @@ -2471,7 +2471,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_sk_release(struct bpf_sock *sock) + * long bpf_sk_release(struct bpf_sock *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from @@ -2479,7 +2479,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) + * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * @@ -2489,19 +2489,19 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * long bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * long bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. @@ -2517,7 +2517,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if @@ -2529,7 +2529,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. @@ -2543,7 +2543,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_lock(struct bpf_spin_lock *lock) + * long bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to @@ -2591,7 +2591,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_unlock(struct bpf_spin_lock *lock) + * long bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). @@ -2614,7 +2614,7 @@ union bpf_attr { * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * - * int bpf_skb_ecn_set_ce(struct sk_buff *skb) + * long bpf_skb_ecn_set_ce(struct sk_buff *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** @@ -2651,7 +2651,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. @@ -2666,7 +2666,7 @@ union bpf_attr { * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. * - * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) + * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. @@ -2682,7 +2682,7 @@ union bpf_attr { * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * - * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided @@ -2701,7 +2701,7 @@ union bpf_attr { * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. * - * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into @@ -2718,7 +2718,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) + * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) * Description * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. @@ -2735,7 +2735,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) + * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base @@ -2759,7 +2759,7 @@ union bpf_attr { * * **-ERANGE** if resulting value was out of range. * - * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) + * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the @@ -2810,7 +2810,7 @@ union bpf_attr { * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * - * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) + * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return @@ -2818,7 +2818,7 @@ union bpf_attr { * * **-ENOENT** if the bpf-local-storage cannot be found. * - * int bpf_send_signal(u32 sig) + * long bpf_send_signal(u32 sig) * Description * Send signal *sig* to the process of the current task. * The signal may be delivered to any of this process's threads. @@ -2859,7 +2859,7 @@ union bpf_attr { * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * - * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -2883,21 +2883,21 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from user space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from kernel space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe user address * *unsafe_ptr* to *dst*. The *size* should include the @@ -2941,7 +2941,7 @@ union bpf_attr { * including the trailing NUL character. On error, a negative * value. * - * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. @@ -2949,14 +2949,14 @@ union bpf_attr { * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. * - * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt) + * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) * Description * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_send_signal_thread(u32 sig) + * long bpf_send_signal_thread(u32 sig) * Description * Send signal *sig* to the thread corresponding to the current task. * Return @@ -2976,7 +2976,7 @@ union bpf_attr { * Return * The 64 bit jiffies * - * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) * Description * For an eBPF program attached to a perf event, retrieve the * branch records (**struct perf_branch_entry**) associated to *ctx* @@ -2995,7 +2995,7 @@ union bpf_attr { * * **-ENOENT** if architecture does not support branch records. * - * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) * Description * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. @@ -3007,7 +3007,7 @@ union bpf_attr { * * **-ENOENT** if pidns does not exists for the current task. * - * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -3062,7 +3062,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) + * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * Description * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, @@ -3097,7 +3097,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print * out the format string. @@ -3126,7 +3126,7 @@ union bpf_attr { * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * - * int bpf_seq_write(struct seq_file *m, const void *data, u32 len) + * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) * Description * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. * The *m* represents the seq_file. The *data* and *len* represent the @@ -3221,7 +3221,7 @@ union bpf_attr { * Return * Requested value, or 0, if flags are not recognized. * - * int bpf_csum_level(struct sk_buff *skb, u64 level) + * long bpf_csum_level(struct sk_buff *skb, u64 level) * Description * Change the skbs checksum level by one layer up or down, or * reset it entirely to none in order to have the stack perform diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 19684813faae..be0efee49093 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -653,7 +653,7 @@ union bpf_attr { * Map value associated to *key*, or **NULL** if no entry was * found. * - * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) + * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: @@ -671,13 +671,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_delete_elem(struct bpf_map *map, const void *key) + * long bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. @@ -695,7 +695,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) + * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) @@ -775,7 +775,7 @@ union bpf_attr { * Return * The SMP id of the processor running the program. * - * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) + * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of @@ -792,7 +792,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) + * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper @@ -817,7 +817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) + * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the @@ -849,7 +849,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) + * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack @@ -880,7 +880,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) + * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress @@ -916,7 +916,7 @@ union bpf_attr { * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * - * int bpf_get_current_comm(void *buf, u32 size_of_buf) + * long bpf_get_current_comm(void *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of @@ -953,7 +953,7 @@ union bpf_attr { * Return * The classid, or 0 for the default unconfigured classid. * - * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) + * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update @@ -969,7 +969,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_vlan_pop(struct sk_buff *skb) + * long bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * @@ -981,7 +981,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be @@ -1032,7 +1032,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The @@ -1098,7 +1098,7 @@ union bpf_attr { * The value of the perf event counter read from the map, or a * negative error code in case of failure. * - * int bpf_redirect(u32 ifindex, u64 flags) + * long bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ @@ -1145,7 +1145,7 @@ union bpf_attr { * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * - * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1190,7 +1190,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) + * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from @@ -1207,7 +1207,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) + * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context @@ -1276,7 +1276,7 @@ union bpf_attr { * The checksum result, or a negative error code in case of * failure. * - * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* @@ -1294,7 +1294,7 @@ union bpf_attr { * Return * The size of the option data retrieved. * - * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. @@ -1304,7 +1304,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) + * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to @@ -1331,7 +1331,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_type(struct sk_buff *skb, u32 type) + * long bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except @@ -1358,7 +1358,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) + * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. @@ -1389,7 +1389,7 @@ union bpf_attr { * Return * A pointer to the current task struct. * - * int bpf_probe_write_user(void *dst, const void *src, u32 len) + * long bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in @@ -1408,7 +1408,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) + * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by @@ -1420,7 +1420,7 @@ union bpf_attr { * * 1, if the *skb* task does not belong to the cgroup2. * * A negative error code, if an error occurred. * - * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must @@ -1444,7 +1444,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) + * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes @@ -1500,7 +1500,7 @@ union bpf_attr { * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * - * int bpf_get_numa_node_id(void) + * long bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA @@ -1511,7 +1511,7 @@ union bpf_attr { * Return * The id of current NUMA node. * - * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of @@ -1532,7 +1532,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper @@ -1547,7 +1547,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for @@ -1595,14 +1595,14 @@ union bpf_attr { * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * - * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) + * long bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * - * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1630,7 +1630,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) + * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. @@ -1676,7 +1676,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) + * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain @@ -1697,7 +1697,7 @@ union bpf_attr { * **XDP_REDIRECT** on success, or the value of the two lower bits * of the *flags* argument on error. * - * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) + * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and @@ -1708,7 +1708,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to @@ -1727,7 +1727,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this @@ -1756,7 +1756,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type @@ -1806,7 +1806,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in @@ -1817,7 +1817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1842,7 +1842,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_override_return(struct pt_regs *regs, u64 rc) + * long bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. @@ -1867,7 +1867,7 @@ union bpf_attr { * Return * 0 * - * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) + * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to @@ -1911,7 +1911,7 @@ union bpf_attr { * be set is returned (which comes down to 0 if all bits were set * as required). * - * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) + * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -1925,7 +1925,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. @@ -1959,7 +1959,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been @@ -1977,7 +1977,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) + * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ @@ -2008,7 +2008,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) + * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing @@ -2026,7 +2026,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * possible to both shrink and grow the packet tail. @@ -2040,7 +2040,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) + * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. @@ -2056,7 +2056,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) + * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer @@ -2089,7 +2089,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) + * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -2111,7 +2111,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) + * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be @@ -2142,7 +2142,7 @@ union bpf_attr { * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * - * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to @@ -2161,7 +2161,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) + * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -2175,7 +2175,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. @@ -2189,7 +2189,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) + * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at @@ -2226,7 +2226,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) + * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs @@ -2241,7 +2241,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) + * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to @@ -2257,7 +2257,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) + * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter @@ -2286,7 +2286,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_repeat(void *ctx) + * long bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays @@ -2305,7 +2305,7 @@ union bpf_attr { * Return * 0 * - * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) + * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, @@ -2370,7 +2370,7 @@ union bpf_attr { * Return * A pointer to the local storage area. * - * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. @@ -2471,7 +2471,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_sk_release(struct bpf_sock *sock) + * long bpf_sk_release(struct bpf_sock *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from @@ -2479,7 +2479,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) + * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * @@ -2489,19 +2489,19 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * long bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * long bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. @@ -2517,7 +2517,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if @@ -2529,7 +2529,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. @@ -2543,7 +2543,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_lock(struct bpf_spin_lock *lock) + * long bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to @@ -2591,7 +2591,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_unlock(struct bpf_spin_lock *lock) + * long bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). @@ -2614,7 +2614,7 @@ union bpf_attr { * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * - * int bpf_skb_ecn_set_ce(struct sk_buff *skb) + * long bpf_skb_ecn_set_ce(struct sk_buff *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** @@ -2651,7 +2651,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. @@ -2666,7 +2666,7 @@ union bpf_attr { * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. * - * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) + * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. @@ -2682,7 +2682,7 @@ union bpf_attr { * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * - * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided @@ -2701,7 +2701,7 @@ union bpf_attr { * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. * - * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into @@ -2718,7 +2718,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) + * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) * Description * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. @@ -2735,7 +2735,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) + * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base @@ -2759,7 +2759,7 @@ union bpf_attr { * * **-ERANGE** if resulting value was out of range. * - * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) + * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the @@ -2810,7 +2810,7 @@ union bpf_attr { * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * - * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) + * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return @@ -2818,7 +2818,7 @@ union bpf_attr { * * **-ENOENT** if the bpf-local-storage cannot be found. * - * int bpf_send_signal(u32 sig) + * long bpf_send_signal(u32 sig) * Description * Send signal *sig* to the process of the current task. * The signal may be delivered to any of this process's threads. @@ -2859,7 +2859,7 @@ union bpf_attr { * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * - * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -2883,21 +2883,21 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from user space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from kernel space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe user address * *unsafe_ptr* to *dst*. The *size* should include the @@ -2941,7 +2941,7 @@ union bpf_attr { * including the trailing NUL character. On error, a negative * value. * - * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. @@ -2949,14 +2949,14 @@ union bpf_attr { * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. * - * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt) + * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) * Description * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_send_signal_thread(u32 sig) + * long bpf_send_signal_thread(u32 sig) * Description * Send signal *sig* to the thread corresponding to the current task. * Return @@ -2976,7 +2976,7 @@ union bpf_attr { * Return * The 64 bit jiffies * - * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) * Description * For an eBPF program attached to a perf event, retrieve the * branch records (**struct perf_branch_entry**) associated to *ctx* @@ -2995,7 +2995,7 @@ union bpf_attr { * * **-ENOENT** if architecture does not support branch records. * - * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) * Description * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. @@ -3007,7 +3007,7 @@ union bpf_attr { * * **-ENOENT** if pidns does not exists for the current task. * - * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -3062,7 +3062,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) + * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * Description * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, @@ -3097,7 +3097,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print * out the format string. @@ -3126,7 +3126,7 @@ union bpf_attr { * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * - * int bpf_seq_write(struct seq_file *m, const void *data, u32 len) + * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) * Description * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. * The *m* represents the seq_file. The *data* and *len* represent the @@ -3221,7 +3221,7 @@ union bpf_attr { * Return * Requested value, or 0, if flags are not recognized. * - * int bpf_csum_level(struct sk_buff *skb, u64 level) + * long bpf_csum_level(struct sk_buff *skb, u64 level) * Description * Change the skbs checksum level by one layer up or down, or * reset it entirely to none in order to have the stack perform -- cgit v1.2.3 From e678e9ddea96590888b034e2a7ad1128bf1ea1ea Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Tue, 23 Jun 2020 09:42:31 -0700 Subject: indirect_call_wrapper: extend indirect wrapper to support up to 4 calls There are many places where 2 annotations are not enough. This patch adds INDIRECT_CALL_3 and INDIRECT_CALL_4 to cover such cases. Signed-off-by: Brian Vazquez Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- include/linux/indirect_call_wrapper.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h index 00d7e8e919c6..54c02c84906a 100644 --- a/include/linux/indirect_call_wrapper.h +++ b/include/linux/indirect_call_wrapper.h @@ -23,6 +23,16 @@ likely(f == f2) ? f2(__VA_ARGS__) : \ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ }) +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \ + ({ \ + likely(f == f3) ? f3(__VA_ARGS__) : \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \ + }) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \ + ({ \ + likely(f == f4) ? f4(__VA_ARGS__) : \ + INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \ + }) #define INDIRECT_CALLABLE_DECLARE(f) f #define INDIRECT_CALLABLE_SCOPE @@ -30,6 +40,8 @@ #else #define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALLABLE_DECLARE(f) #define INDIRECT_CALLABLE_SCOPE static #endif -- cgit v1.2.3 From 55cced4f813bece01f96256d96f283b9210d19ee Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Tue, 23 Jun 2020 09:42:32 -0700 Subject: ipv6: fib6: avoid indirect calls from fib6_rule_lookup It was reported that a considerable amount of cycles were spent on the expensive indirect calls on fib6_rule_lookup. This patch introduces an inline helper called pol_route_func that uses the indirect_call_wrappers to avoid the indirect calls. This patch saves around 50ns per call. Performance was measured on the receiver by checking the amount of syncookies that server was able to generate under a synflood load. Traffic was generated using trafgen[1] which was pushing around 1Mpps on a single queue. Receiver was using only one rx queue which help to create a bottle neck and make the experiment rx-bounded. These are the syncookies generated over 10s from the different runs: Whithout the patch: TcpExtSyncookiesSent 3553749 0.0 TcpExtSyncookiesSent 3550895 0.0 TcpExtSyncookiesSent 3553845 0.0 TcpExtSyncookiesSent 3541050 0.0 TcpExtSyncookiesSent 3539921 0.0 TcpExtSyncookiesSent 3557659 0.0 TcpExtSyncookiesSent 3526812 0.0 TcpExtSyncookiesSent 3536121 0.0 TcpExtSyncookiesSent 3529963 0.0 TcpExtSyncookiesSent 3536319 0.0 With the patch: TcpExtSyncookiesSent 3611786 0.0 TcpExtSyncookiesSent 3596682 0.0 TcpExtSyncookiesSent 3606878 0.0 TcpExtSyncookiesSent 3599564 0.0 TcpExtSyncookiesSent 3601304 0.0 TcpExtSyncookiesSent 3609249 0.0 TcpExtSyncookiesSent 3617437 0.0 TcpExtSyncookiesSent 3608765 0.0 TcpExtSyncookiesSent 3620205 0.0 TcpExtSyncookiesSent 3601895 0.0 Without the patch the average is 354263 pkt/s or 2822 ns/pkt and with the patch the average is 360738 pkt/s or 2772 ns/pkt which gives an estimate of 50 ns per packet. [1] http://netsniff-ng.org/ Changelog since v1: - Change ordering in the ICW (Paolo Abeni) Cc: Luigi Rizzo Cc: Paolo Abeni Reported-by: Eric Dumazet Signed-off-by: Brian Vazquez Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 36 ++++++++++++++++++++++++++++++++++++ net/ipv6/fib6_rules.c | 9 ++++++--- net/ipv6/ip6_fib.c | 3 ++- net/ipv6/route.c | 8 ++++---- 4 files changed, 48 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 3f615a29766e..cc8356fd927f 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -19,6 +19,7 @@ #include #include #include +#include #ifdef CONFIG_IPV6_MULTIPLE_TABLES #define FIB6_TABLE_HASHSZ 256 @@ -552,6 +553,41 @@ struct bpf_iter__ipv6_route { }; #endif +INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_output(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_input(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +INDIRECT_CALLABLE_DECLARE(struct rt6_info *__ip6_route_redirect(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_lookup(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +static inline struct rt6_info *pol_lookup_func(pol_lookup_t lookup, + struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) +{ + return INDIRECT_CALL_4(lookup, + ip6_pol_route_output, + ip6_pol_route_input, + ip6_pol_route_lookup, + __ip6_route_redirect, + net, table, fl6, skb, flags); +} + #ifdef CONFIG_IPV6_MULTIPLE_TABLES static inline bool fib6_has_custom_rules(const struct net *net) { diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index fafe556d21e0..6053ef851555 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -111,11 +111,13 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, } else { struct rt6_info *rt; - rt = lookup(net, net->ipv6.fib6_local_tbl, fl6, skb, flags); + rt = pol_lookup_func(lookup, + net, net->ipv6.fib6_local_tbl, fl6, skb, flags); if (rt != net->ipv6.ip6_null_entry && rt->dst.error != -EAGAIN) return &rt->dst; ip6_rt_put_flags(rt, flags); - rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags); + rt = pol_lookup_func(lookup, + net, net->ipv6.fib6_main_tbl, fl6, skb, flags); if (rt->dst.error != -EAGAIN) return &rt->dst; ip6_rt_put_flags(rt, flags); @@ -226,7 +228,8 @@ static int __fib6_rule_action(struct fib_rule *rule, struct flowi *flp, goto out; } - rt = lookup(net, table, flp6, arg->lookup_data, flags); + rt = pol_lookup_func(lookup, + net, table, flp6, arg->lookup_data, flags); if (rt != net->ipv6.ip6_null_entry) { err = fib6_rule_saddr(net, rule, flags, flp6, ip6_dst_idev(&rt->dst)->dev); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 49ee89bbcba0..25a90f3f705c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -314,7 +314,8 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, { struct rt6_info *rt; - rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags); + rt = pol_lookup_func(lookup, + net, net->ipv6.fib6_main_tbl, fl6, skb, flags); if (rt->dst.error == -EAGAIN) { ip6_rt_put_flags(rt, flags); rt = net->ipv6.ip6_null_entry; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 82cbb46a2a4f..5852039ca9cf 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1207,7 +1207,7 @@ fallback: return nrt; } -static struct rt6_info *ip6_pol_route_lookup(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, @@ -2274,7 +2274,7 @@ out: } EXPORT_SYMBOL_GPL(ip6_pol_route); -static struct rt6_info *ip6_pol_route_input(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, @@ -2465,7 +2465,7 @@ void ip6_route_input(struct sk_buff *skb) &fl6, skb, flags)); } -static struct rt6_info *ip6_pol_route_output(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, @@ -2912,7 +2912,7 @@ struct ip6rd_flowi { struct in6_addr gateway; }; -static struct rt6_info *__ip6_route_redirect(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, -- cgit v1.2.3 From bdfd2d1fa79acd03e18d1683419572f3682b39fd Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Tue, 23 Jun 2020 16:40:01 -0400 Subject: bonding/xfrm: use real_dev instead of slave_dev Rather than requiring every hw crypto capable NIC driver to do a check for slave_dev being set, set real_dev in the xfrm layer and xso init time, and then override it in the bonding driver as needed. Then NIC drivers can always use real_dev, and at the same time, we eliminate the use of a variable name that probably shouldn't have been used in the first place, particularly given recent current events. CC: Boris Pismenny CC: Saeed Mahameed CC: Leon Romanovsky CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org Suggested-by: Saeed Mahameed Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 6 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 47 ++++++---------------- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 10 +---- include/net/xfrm.h | 2 +- net/xfrm/xfrm_device.c | 5 ++- 5 files changed, 21 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 90939ccf2a94..4ef99efc37f6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -386,7 +386,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) struct bonding *bond = netdev_priv(bond_dev); struct slave *slave = rtnl_dereference(bond->curr_active_slave); - xs->xso.slave_dev = slave->dev; + xs->xso.real_dev = slave->dev; bond->xs = xs; if (!(slave->dev->xfrmdev_ops @@ -411,7 +411,7 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) if (!slave) return; - xs->xso.slave_dev = slave->dev; + xs->xso.real_dev = slave->dev; if (!(slave->dev->xfrmdev_ops && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { @@ -440,7 +440,7 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) return false; } - xs->xso.slave_dev = slave_dev; + xs->xso.real_dev = slave_dev; return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 26b0a58a064d..6516980965a2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -427,14 +427,11 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - if (!xs->aead) { netdev_err(dev, "Unsupported IPsec algorithm\n"); return -EINVAL; @@ -480,9 +477,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, **/ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; + struct net_device *dev = xs->xso.real_dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; u32 mfval, manc, reg; int num_filters = 4; bool manc_ipv4; @@ -500,12 +497,6 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) #define BMCIP_V6 0x3 #define BMCIP_MASK 0x3 - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - - adapter = netdev_priv(dev); - hw = &adapter->hw; - manc = IXGBE_READ_REG(hw, IXGBE_MANC); manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); @@ -569,22 +560,15 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) **/ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter; - struct ixgbe_ipsec *ipsec; - struct ixgbe_hw *hw; + struct net_device *dev = xs->xso.real_dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; int checked, match, first; u16 sa_idx; int ret; int i; - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - - adapter = netdev_priv(dev); - ipsec = adapter->ipsec; - hw = &adapter->hw; - if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", xs->id.proto); @@ -761,20 +745,13 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) **/ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter; - struct ixgbe_ipsec *ipsec; - struct ixgbe_hw *hw; + struct net_device *dev = xs->xso.real_dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; u32 zerobuf[4] = {0, 0, 0, 0}; u16 sa_idx; - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - - adapter = netdev_priv(dev); - ipsec = adapter->ipsec; - hw = &adapter->hw; - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { struct rx_sa *rsa; u8 ipi; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 72ad6664bd73..bc55c82b55ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -207,12 +207,9 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) { - struct net_device *netdev = x->xso.dev; + struct net_device *netdev = x->xso.real_dev; struct mlx5e_priv *priv; - if (x->xso.slave_dev) - netdev = x->xso.slave_dev; - priv = netdev_priv(netdev); if (x->props.aalgo != SADB_AALG_NONE) { @@ -288,15 +285,12 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; - struct net_device *netdev = x->xso.dev; + struct net_device *netdev = x->xso.real_dev; struct mlx5_accel_esp_xfrm_attrs attrs; struct mlx5e_priv *priv; unsigned int sa_handle; int err; - if (x->xso.slave_dev) - netdev = x->xso.slave_dev; - priv = netdev_priv(netdev); err = mlx5e_xfrm_validate_state(x); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index e20b2b27ec48..e648c9e6c919 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -127,7 +127,7 @@ struct xfrm_state_walk { struct xfrm_state_offload { struct net_device *dev; - struct net_device *slave_dev; + struct net_device *real_dev; unsigned long offload_handle; unsigned int num_exthdrs; u8 flags; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index b8918fc5248b..7b64bb64c822 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -120,8 +120,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) return skb; - /* This skb was already validated on the master dev */ - if ((x->xso.dev != dev) && (x->xso.slave_dev == dev)) + /* This skb was already validated on the upper/virtual dev */ + if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) return skb; local_irq_save(flags); @@ -259,6 +259,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, } xso->dev = dev; + xso->real_dev = dev; xso->num_exthdrs = 1; xso->flags = xuo->flags; -- cgit v1.2.3 From 8e6cf365e1d5c70e275a77a3c5ad7e3dc685474c Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Thu, 4 Jun 2020 09:20:49 -0400 Subject: audit: log nftables configuration change events iptables, ip6tables, arptables and ebtables table registration, replacement and unregistration configuration events are logged for the native (legacy) iptables setsockopt api, but not for the nftables netlink api which is used by the nft-variant of iptables in addition to nftables itself. Add calls to log the configuration actions in the nftables netlink api. This uses the same NETFILTER_CFG record format but overloads the table field. type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.878:162) : table=?:0;?:0 family=unspecified entries=2 op=nft_register_gen pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.878:162) : table=firewalld:1;?:0 family=inet entries=0 op=nft_register_table pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.911:163) : table=firewalld:1;filter_FORWARD:85 family=inet entries=8 op=nft_register_chain pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.911:163) : table=firewalld:1;filter_FORWARD:85 family=inet entries=101 op=nft_register_rule pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.911:163) : table=firewalld:1;__set0:87 family=inet entries=87 op=nft_register_setelem pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld ... type=NETFILTER_CFG msg=audit(2020-05-28 17:46:41.911:163) : table=firewalld:1;__set0:87 family=inet entries=0 op=nft_register_set pid=396 subj=system_u:system_r:firewalld_t:s0 comm=firewalld For further information please see issue https://github.com/linux-audit/audit-kernel/issues/124 Signed-off-by: Richard Guy Briggs Signed-off-by: Paul Moore --- include/linux/audit.h | 18 ++++++++ kernel/auditsc.c | 24 ++++++++-- net/netfilter/nf_tables_api.c | 103 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 142 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/audit.h b/include/linux/audit.h index 3fcd9ee49734..604ede630580 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -12,6 +12,7 @@ #include #include #include +#include #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) @@ -98,6 +99,23 @@ enum audit_nfcfgop { AUDIT_XT_OP_REGISTER, AUDIT_XT_OP_REPLACE, AUDIT_XT_OP_UNREGISTER, + AUDIT_NFT_OP_TABLE_REGISTER, + AUDIT_NFT_OP_TABLE_UNREGISTER, + AUDIT_NFT_OP_CHAIN_REGISTER, + AUDIT_NFT_OP_CHAIN_UNREGISTER, + AUDIT_NFT_OP_RULE_REGISTER, + AUDIT_NFT_OP_RULE_UNREGISTER, + AUDIT_NFT_OP_SET_REGISTER, + AUDIT_NFT_OP_SET_UNREGISTER, + AUDIT_NFT_OP_SETELEM_REGISTER, + AUDIT_NFT_OP_SETELEM_UNREGISTER, + AUDIT_NFT_OP_GEN_REGISTER, + AUDIT_NFT_OP_OBJ_REGISTER, + AUDIT_NFT_OP_OBJ_UNREGISTER, + AUDIT_NFT_OP_OBJ_RESET, + AUDIT_NFT_OP_FLOWTABLE_REGISTER, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, + AUDIT_NFT_OP_INVALID, }; extern int is_audit_feature_set(int which); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 468a23390457..3a9100e95fda 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -75,6 +75,7 @@ #include #include #include +#include #include "audit.h" @@ -136,9 +137,26 @@ struct audit_nfcfgop_tab { }; static const struct audit_nfcfgop_tab audit_nfcfgs[] = { - { AUDIT_XT_OP_REGISTER, "register" }, - { AUDIT_XT_OP_REPLACE, "replace" }, - { AUDIT_XT_OP_UNREGISTER, "unregister" }, + { AUDIT_XT_OP_REGISTER, "xt_register" }, + { AUDIT_XT_OP_REPLACE, "xt_replace" }, + { AUDIT_XT_OP_UNREGISTER, "xt_unregister" }, + { AUDIT_NFT_OP_TABLE_REGISTER, "nft_register_table" }, + { AUDIT_NFT_OP_TABLE_UNREGISTER, "nft_unregister_table" }, + { AUDIT_NFT_OP_CHAIN_REGISTER, "nft_register_chain" }, + { AUDIT_NFT_OP_CHAIN_UNREGISTER, "nft_unregister_chain" }, + { AUDIT_NFT_OP_RULE_REGISTER, "nft_register_rule" }, + { AUDIT_NFT_OP_RULE_UNREGISTER, "nft_unregister_rule" }, + { AUDIT_NFT_OP_SET_REGISTER, "nft_register_set" }, + { AUDIT_NFT_OP_SET_UNREGISTER, "nft_unregister_set" }, + { AUDIT_NFT_OP_SETELEM_REGISTER, "nft_register_setelem" }, + { AUDIT_NFT_OP_SETELEM_UNREGISTER, "nft_unregister_setelem" }, + { AUDIT_NFT_OP_GEN_REGISTER, "nft_register_gen" }, + { AUDIT_NFT_OP_OBJ_REGISTER, "nft_register_obj" }, + { AUDIT_NFT_OP_OBJ_UNREGISTER, "nft_unregister_obj" }, + { AUDIT_NFT_OP_OBJ_RESET, "nft_reset_obj" }, + { AUDIT_NFT_OP_FLOWTABLE_REGISTER, "nft_register_flowtable" }, + { AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, "nft_unregister_flowtable" }, + { AUDIT_NFT_OP_INVALID, "nft_invalid" }, }; static int audit_match_perm(struct audit_context *ctx, int mask) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 073aa1051d43..164700273947 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -693,6 +694,16 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;?:0", + ctx->table->name, ctx->table->handle); + + audit_log_nfcfg(buf, + ctx->family, + ctx->table->use, + event == NFT_MSG_NEWTABLE ? + AUDIT_NFT_OP_TABLE_REGISTER : + AUDIT_NFT_OP_TABLE_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -1428,6 +1439,17 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", + ctx->table->name, ctx->table->handle, + ctx->chain->name, ctx->chain->handle); + + audit_log_nfcfg(buf, + ctx->family, + ctx->chain->use, + event == NFT_MSG_NEWCHAIN ? + AUDIT_NFT_OP_CHAIN_REGISTER : + AUDIT_NFT_OP_CHAIN_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -2693,6 +2715,17 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx, { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", + ctx->table->name, ctx->table->handle, + ctx->chain->name, ctx->chain->handle); + + audit_log_nfcfg(buf, + ctx->family, + rule->handle, + event == NFT_MSG_NEWRULE ? + AUDIT_NFT_OP_RULE_REGISTER : + AUDIT_NFT_OP_RULE_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -3695,6 +3728,17 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx, struct sk_buff *skb; u32 portid = ctx->portid; int err; + char *buf = kasprintf(gfp_flags, "%s:%llu;%s:%llu", + ctx->table->name, ctx->table->handle, + set->name, set->handle); + + audit_log_nfcfg(buf, + ctx->family, + set->field_count, + event == NFT_MSG_NEWSET ? + AUDIT_NFT_OP_SET_REGISTER : + AUDIT_NFT_OP_SET_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -4811,6 +4855,17 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx, u32 portid = ctx->portid; struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", + ctx->table->name, ctx->table->handle, + set->name, set->handle); + + audit_log_nfcfg(buf, + ctx->family, + set->handle, + event == NFT_MSG_NEWSETELEM ? + AUDIT_NFT_OP_SETELEM_REGISTER : + AUDIT_NFT_OP_SETELEM_UNREGISTER); + kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return; @@ -5892,6 +5947,19 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) obj->ops->type->type != filter->type) goto cont; + if (reset) { + char *buf = kasprintf(GFP_KERNEL, + "%s:%llu;?:0", + table->name, + table->handle); + + audit_log_nfcfg(buf, + family, + obj->handle, + AUDIT_NFT_OP_OBJ_RESET); + kfree(buf); + } + if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, @@ -6002,6 +6070,17 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, if (NFNL_MSG_TYPE(nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET) reset = true; + if (reset) { + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;?:0", + table->name, table->handle); + + audit_log_nfcfg(buf, + family, + obj->handle, + AUDIT_NFT_OP_OBJ_RESET); + kfree(buf); + } + err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, family, table, obj, reset); @@ -6077,6 +6156,16 @@ void nft_obj_notify(struct net *net, const struct nft_table *table, { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;?:0", + table->name, table->handle); + + audit_log_nfcfg(buf, + family, + obj->handle, + event == NFT_MSG_NEWOBJ ? + AUDIT_NFT_OP_OBJ_REGISTER : + AUDIT_NFT_OP_OBJ_UNREGISTER); + kfree(buf); if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) @@ -6856,6 +6945,17 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx, { struct sk_buff *skb; int err; + char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", + flowtable->table->name, flowtable->table->handle, + flowtable->name, flowtable->handle); + + audit_log_nfcfg(buf, + ctx->family, + flowtable->hooknum, + event == NFT_MSG_NEWFLOWTABLE ? + AUDIT_NFT_OP_FLOWTABLE_REGISTER : + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER); + kfree(buf); if (ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -6977,6 +7077,9 @@ static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb, struct sk_buff *skb2; int err; + audit_log_nfcfg("?:0;?:0", 0, net->nft.base_seq, + AUDIT_NFT_OP_GEN_REGISTER); + if (nlmsg_report(nlh) && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return; -- cgit v1.2.3 From 243600ee660531d2b5b5ef3faab90c5f8ff4c2b6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:11 -0700 Subject: tcp: add declarations to avoid warnings Remove these errors: net/ipv6/tcp_ipv6.c:1550:29: warning: symbol 'tcp_v6_rcv' was not declared. Should it be static? net/ipv6/tcp_ipv6.c:1770:30: warning: symbol 'tcp_v6_early_demux' was not declared. Should it be static? net/ipv6/tcp_ipv6.c:1550:29: warning: no previous prototype for 'tcp_v6_rcv' [-Wmissing-prototypes] 1550 | INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) | ^~~~~~~~~~ net/ipv6/tcp_ipv6.c:1770:30: warning: no previous prototype for 'tcp_v6_early_demux' [-Wmissing-prototypes] 1770 | INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) | ^~~~~~~~~~~~~~~~~~ Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index cd9cc348dbf9..a8c36fa886a4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -934,6 +934,8 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) } INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); +INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); +INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); #endif -- cgit v1.2.3 From b03d2142bea8cf7407a0a668ce8f5f115bd226c4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:12 -0700 Subject: tcp: move ipv6_specific declaration to remove a warning ipv6_specific should be declared in tcp include files, not mptcp. This removes the following warning : CHECK net/ipv6/tcp_ipv6.c net/ipv6/tcp_ipv6.c:78:42: warning: symbol 'ipv6_specific' was not declared. Should it be static? Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ net/mptcp/protocol.h | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index a8c36fa886a4..e6920ae0765c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -933,6 +933,8 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) return 0; } +extern const struct inet_connection_sock_af_ops ipv6_specific; + INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index db56535dfc29..d4294b6d23e4 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -355,9 +355,6 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk, } extern const struct inet_connection_sock_af_ops ipv4_specific; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) -extern const struct inet_connection_sock_af_ops ipv6_specific; -#endif void mptcp_proto_init(void); #if IS_ENABLED(CONFIG_MPTCP_IPV6) -- cgit v1.2.3 From 9b9e2f250e3e6f59ad07e6d03838c27a100e0042 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:13 -0700 Subject: tcp: move ipv4_specific to tcp include file Declare ipv4_specific once, in tcp.h were it belongs. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ include/net/transp_v6.h | 3 --- net/mptcp/protocol.h | 2 -- 3 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index e6920ae0765c..b0f0f93c681c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -907,6 +907,8 @@ static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb) TCP_SKB_CB(skb)->bpf.sk_redir = NULL; } +extern const struct inet_connection_sock_af_ops ipv4_specific; + #if IS_ENABLED(CONFIG_IPV6) /* This is the variant of inet6_iif() that must be used by TCP, * as TCP moves IP6CB into a different location in skb->cb[] diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index a8f6020f1196..da06613c9603 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -56,9 +56,6 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) -/* address family specific functions */ -extern const struct inet_connection_sock_af_ops ipv4_specific; - void inet6_destroy_sock(struct sock *sk); #define IPV6_SEQ_DGRAM_HEADER \ diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index d4294b6d23e4..06661781c9af 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -354,8 +354,6 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk, inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops; } -extern const struct inet_connection_sock_af_ops ipv4_specific; - void mptcp_proto_init(void); #if IS_ENABLED(CONFIG_MPTCP_IPV6) int mptcp_proto_v6_init(void); -- cgit v1.2.3 From 5521d95e076238f1615cf1cdb135f318ef798b49 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:14 -0700 Subject: net: move tcp gro declarations to net/tcp.h This patch removes following (C=1 W=1) warnings for CONFIG_RETPOLINE=y : net/ipv4/tcp_offload.c:306:16: warning: symbol 'tcp4_gro_receive' was not declared. Should it be static? net/ipv4/tcp_offload.c:306:17: warning: no previous prototype for 'tcp4_gro_receive' [-Wmissing-prototypes] net/ipv4/tcp_offload.c:319:29: warning: symbol 'tcp4_gro_complete' was not declared. Should it be static? net/ipv4/tcp_offload.c:319:29: warning: no previous prototype for 'tcp4_gro_complete' [-Wmissing-prototypes] CHECK net/ipv6/tcpv6_offload.c net/ipv6/tcpv6_offload.c:16:16: warning: symbol 'tcp6_gro_receive' was not declared. Should it be static? net/ipv6/tcpv6_offload.c:29:29: warning: symbol 'tcp6_gro_complete' was not declared. Should it be static? CC net/ipv6/tcpv6_offload.o net/ipv6/tcpv6_offload.c:16:17: warning: no previous prototype for 'tcp6_gro_receive' [-Wmissing-prototypes] 16 | struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) | ^~~~~~~~~~~~~~~~ net/ipv6/tcpv6_offload.c:29:29: warning: no previous prototype for 'tcp6_gro_complete' [-Wmissing-prototypes] 29 | INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff) | ^~~~~~~~~~~~~~~~~ Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 4 ++++ net/ipv4/af_inet.c | 3 --- net/ipv6/ip6_offload.c | 4 +--- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index b0f0f93c681c..27f848ab3995 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1957,6 +1957,10 @@ void tcp_v4_destroy_sock(struct sock *sk); struct sk_buff *tcp_gso_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); +INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)); +INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)); int tcp_gro_complete(struct sk_buff *skb); void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 02aa5cb3a4fd..d8dbff1dd1fa 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1432,8 +1432,6 @@ static struct sk_buff *ipip_gso_segment(struct sk_buff *skb, return inet_gso_segment(skb, features); } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *, - struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *, struct sk_buff *)); struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) @@ -1608,7 +1606,6 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) return -EINVAL; } -INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); int inet_gro_complete(struct sk_buff *skb, int nhoff) { diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 7fbb44736a34..78eec5b42385 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "ip6_offload.h" @@ -177,8 +178,6 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, return len; } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *, - struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, struct sk_buff *)); INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, @@ -319,7 +318,6 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, return inet_gro_receive(head, skb); } -INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { -- cgit v1.2.3 From 6db693285cd109e566c553694002dea769612b24 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:15 -0700 Subject: udp: move gro declarations to net/udp.h This removes following warnings : CC net/ipv4/udp_offload.o net/ipv4/udp_offload.c:504:17: warning: no previous prototype for 'udp4_gro_receive' [-Wmissing-prototypes] 504 | struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) | ^~~~~~~~~~~~~~~~ net/ipv4/udp_offload.c:584:29: warning: no previous prototype for 'udp4_gro_complete' [-Wmissing-prototypes] 584 | INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff) | ^~~~~~~~~~~~~~~~~ CHECK net/ipv6/udp_offload.c net/ipv6/udp_offload.c:115:16: warning: symbol 'udp6_gro_receive' was not declared. Should it be static? net/ipv6/udp_offload.c:148:29: warning: symbol 'udp6_gro_complete' was not declared. Should it be static? CC net/ipv6/udp_offload.o net/ipv6/udp_offload.c:115:17: warning: no previous prototype for 'udp6_gro_receive' [-Wmissing-prototypes] 115 | struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) | ^~~~~~~~~~~~~~~~ net/ipv6/udp_offload.c:148:29: warning: no previous prototype for 'udp6_gro_complete' [-Wmissing-prototypes] 148 | INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff) | ^~~~~~~~~~~~~~~~~ Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/udp.h | 7 +++++++ net/ipv4/af_inet.c | 3 --- net/ipv6/ip6_offload.c | 4 +--- 3 files changed, 8 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/udp.h b/include/net/udp.h index a8fa6c0c6ded..5a2d677432f0 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -27,6 +27,7 @@ #include #include #include +#include /** * struct udp_skb_cb - UDP(-Lite) private variables @@ -166,6 +167,12 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, __be16 dport); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *, + struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, + struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, struct udphdr *uh, struct sock *sk); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index d8dbff1dd1fa..ea6ed6d487ed 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1432,8 +1432,6 @@ static struct sk_buff *ipip_gso_segment(struct sk_buff *skb, return inet_gso_segment(skb, features); } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *, - struct sk_buff *)); struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct net_offload *ops; @@ -1606,7 +1604,6 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) return -EINVAL; } -INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); int inet_gro_complete(struct sk_buff *skb, int nhoff) { __be16 newlen = htons(skb->len - nhoff); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 78eec5b42385..a80f90bf3ae7 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "ip6_offload.h" @@ -178,8 +179,6 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, return len; } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, - struct sk_buff *)); INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, struct sk_buff *skb) { @@ -318,7 +317,6 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, return inet_gro_receive(head, skb); } -INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; -- cgit v1.2.3 From 6f3934576853a4fa60dea74ac8822f0f016ef9e8 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 22 Jun 2020 18:07:41 -0500 Subject: net: ipv6: Use struct_size() helper and kcalloc() Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. Also, remove unnecessary function ipv6_rpl_srh_alloc_size() and replace kzalloc() with kcalloc(), which has a 2-factor argument form for multiplication. This code was detected with the help of Coccinelle and, audited and fixed manually. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- include/net/rpl.h | 6 ------ net/ipv6/exthdrs.c | 2 +- net/ipv6/rpl_iptunnel.c | 3 +-- 3 files changed, 2 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/rpl.h b/include/net/rpl.h index dceff60e8baf..308ef0a05cae 100644 --- a/include/net/rpl.h +++ b/include/net/rpl.h @@ -26,12 +26,6 @@ static inline void rpl_exit(void) {} /* Worst decompression memory usage ipv6 address (16) + pad 7 */ #define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7) -static inline size_t ipv6_rpl_srh_alloc_size(unsigned char n) -{ - return sizeof(struct ipv6_rpl_sr_hdr) + - ((n + 1) * sizeof(struct in6_addr)); -} - size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, unsigned char cmpre); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 5a8bbcdcaf2b..e9b366994475 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -580,7 +580,7 @@ looped_back: hdr->segments_left--; i = n - hdr->segments_left; - buf = kzalloc(ipv6_rpl_srh_alloc_size(n + 1) * 2, GFP_ATOMIC); + buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC); if (unlikely(!buf)) { kfree_skb(skb); return -1; diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index c3ececd7cfc1..5fdf3ebb953f 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -136,8 +136,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt, oldhdr = ipv6_hdr(skb); - buf = kzalloc(ipv6_rpl_srh_alloc_size(srh->segments_left - 1) * 2, - GFP_ATOMIC); + buf = kcalloc(struct_size(srh, segments.addr, srh->segments_left), 2, GFP_ATOMIC); if (!buf) return -ENOMEM; -- cgit v1.2.3 From 0cc8fecf041d3e5285380da62cc6662bdc942d8c Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 22 Jun 2020 20:35:32 +0530 Subject: net: phy: Allow mdio buses to auto-probe c45 devices The mdiobus_scan logic is currently hardcoded to only work with c22 devices. This works fairly well in most cases, but its possible that a c45 device doesn't respond despite being a standard phy. If the parent hardware is capable, it makes sense to scan for c22 devices before falling back to c45. As we want this to reflect the capabilities of the STA, lets add a field to the mii_bus structure to represent the capability. That way devices can opt into the extended scanning. Existing users should continue to default to c22 only scanning as long as they are zero'ing the structure before use. Signed-off-by: Jeremy Linton Signed-off-by: Calvin Johnson Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 18 ++++++++++++++++-- include/linux/phy.h | 8 ++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 6ceee82b2839..ab9233c558d8 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -739,10 +739,24 @@ EXPORT_SYMBOL(mdiobus_free); */ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) { - struct phy_device *phydev; + struct phy_device *phydev = ERR_PTR(-ENODEV); int err; - phydev = get_phy_device(bus, addr, false); + switch (bus->probe_capabilities) { + case MDIOBUS_NO_CAP: + case MDIOBUS_C22: + phydev = get_phy_device(bus, addr, false); + break; + case MDIOBUS_C45: + phydev = get_phy_device(bus, addr, true); + break; + case MDIOBUS_C22_C45: + phydev = get_phy_device(bus, addr, false); + if (IS_ERR(phydev)) + phydev = get_phy_device(bus, addr, true); + break; + } + if (IS_ERR(phydev)) return phydev; diff --git a/include/linux/phy.h b/include/linux/phy.h index 9248dd2ce4ca..7860d56c6bf5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -298,6 +298,14 @@ struct mii_bus { /* RESET GPIO descriptor pointer */ struct gpio_desc *reset_gpiod; + /* bus capabilities, used for probing */ + enum { + MDIOBUS_NO_CAP = 0, + MDIOBUS_C22, + MDIOBUS_C45, + MDIOBUS_C22_C45, + } probe_capabilities; + /* protect access to the shared element */ struct mutex shared_lock; -- cgit v1.2.3 From 428d2459cceb77357b81c242ca22462a6a904817 Mon Sep 17 00:00:00 2001 From: Petr Vaněk Date: Sat, 30 May 2020 14:39:12 +0200 Subject: xfrm: introduce oseq-may-wrap flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RFC 4303 in section 3.3.3 suggests to disable anti-replay for manually distributed ICVs in which case the sender does not need to monitor or reset the counter. However, the sender still increments the counter and when it reaches the maximum value, the counter rolls over back to zero. This patch introduces new extra_flag XFRM_SA_XFLAG_OSEQ_MAY_WRAP which allows sequence number to cycle in outbound packets if set. This flag is used only in legacy and bmp code, because esn should not be negotiated if anti-replay is disabled (see note in 3.3.3 section). Signed-off-by: Petr Vaněk Acked-by: Christophe Gouault Signed-off-by: Steffen Klassert --- include/uapi/linux/xfrm.h | 1 + net/xfrm/xfrm_replay.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index ff7cfdc6cb44..ffc6a5391bb7 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -387,6 +387,7 @@ struct xfrm_usersa_info { }; #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1 +#define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2 struct xfrm_usersa_id { xfrm_address_t daddr; diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 98943f8d01aa..c6a4338a0d08 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -89,7 +89,8 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; XFRM_SKB_CB(skb)->seq.output.hi = 0; - if (unlikely(x->replay.oseq == 0)) { + if (unlikely(x->replay.oseq == 0) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { x->replay.oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; @@ -168,7 +169,8 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; XFRM_SKB_CB(skb)->seq.output.hi = 0; - if (unlikely(replay_esn->oseq == 0)) { + if (unlikely(replay_esn->oseq == 0) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { replay_esn->oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; @@ -572,7 +574,8 @@ static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *sk XFRM_SKB_CB(skb)->seq.output.hi = 0; xo->seq.hi = 0; - if (unlikely(oseq < x->replay.oseq)) { + if (unlikely(oseq < x->replay.oseq) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; @@ -611,7 +614,8 @@ static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff XFRM_SKB_CB(skb)->seq.output.hi = 0; xo->seq.hi = 0; - if (unlikely(oseq < replay_esn->oseq)) { + if (unlikely(oseq < replay_esn->oseq) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; -- cgit v1.2.3 From 65959522f8060659e308977f09f3eb7b7af5e43f Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 23 Jun 2020 14:30:40 +0300 Subject: RDMA: Add support to dump resource tracker in RAW format Add support to get resource dump in raw format. It enable drivers to return the entire device specific QP/CQ/MR context without a need from the driver to set each field separately. The raw query returns only the device specific data, general data is still returned by using the existing queries. Example: $ rdma res show mr dev mlx5_1 mrn 2 -r -j [{"ifindex":7,"ifname":"mlx5_1", "data":[0,4,255,254,0,0,0,0,0,0,0,0,16,28,0,216,...]}] Link: https://lore.kernel.org/r/20200623113043.1228482-9-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 3 + drivers/infiniband/core/nldev.c | 180 +++++++++++++++++++++++++-------------- include/rdma/ib_verbs.h | 3 + include/uapi/rdma/rdma_netlink.h | 8 ++ 4 files changed, 132 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index cbe95e729cf1..1335ed1f1e4a 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2619,8 +2619,11 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, enable_driver); SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry); SET_DEVICE_OP(dev_ops, fill_res_cq_entry); + SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw); SET_DEVICE_OP(dev_ops, fill_res_mr_entry); + SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw); SET_DEVICE_OP(dev_ops, fill_res_qp_entry); + SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw); SET_DEVICE_OP(dev_ops, fill_stat_mr_entry); SET_DEVICE_OP(dev_ops, get_dev_fw_str); SET_DEVICE_OP(dev_ops, get_dma_mr); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 394e307c342c..1051b5622b62 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -114,6 +114,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, + [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY }, [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, @@ -446,11 +447,11 @@ static int fill_res_name_pid(struct sk_buff *msg, return err ? -EMSGSIZE : 0; } -static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, - struct rdma_restrack_entry *res, uint32_t port) +static int fill_res_qp_entry_query(struct sk_buff *msg, + struct rdma_restrack_entry *res, + struct ib_device *dev, + struct ib_qp *qp) { - struct ib_qp *qp = container_of(res, struct ib_qp, res); - struct ib_device *dev = qp->device; struct ib_qp_init_attr qp_init_attr; struct ib_qp_attr qp_attr; int ret; @@ -459,16 +460,6 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, if (ret) return ret; - if (port && port != qp_attr.port_num) - return -EAGAIN; - - /* In create_qp() port is not set yet */ - if (qp_attr.port_num && - nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num)) - goto err; - - if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num)) - goto err; if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, qp_attr.dest_qp_num)) @@ -492,13 +483,6 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) goto err; - if (!rdma_is_kernel_res(res) && - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) - goto err; - - if (fill_res_name_pid(msg, res)) - goto err; - if (dev->ops.fill_res_qp_entry) return dev->ops.fill_res_qp_entry(msg, qp); return 0; @@ -506,6 +490,48 @@ static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, err: return -EMSGSIZE; } +static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_qp *qp = container_of(res, struct ib_qp, res); + struct ib_device *dev = qp->device; + int ret; + + if (port && port != qp->port) + return -EAGAIN; + + /* In create_qp() port is not set yet */ + if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) + return -EINVAL; + + ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); + if (ret) + return -EMSGSIZE; + + if (!rdma_is_kernel_res(res) && + nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) + return -EMSGSIZE; + + ret = fill_res_name_pid(msg, res); + if (ret) + return -EMSGSIZE; + + return fill_res_qp_entry_query(msg, res, dev, qp); +} + +static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_qp *qp = container_of(res, struct ib_qp, res); + struct ib_device *dev = qp->device; + + if (port && port != qp->port) + return -EAGAIN; + if (!dev->ops.fill_res_qp_entry_raw) + return -EINVAL; + return dev->ops.fill_res_qp_entry_raw(msg, qp); +} + static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { @@ -565,34 +591,42 @@ static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, struct ib_device *dev = cq->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) - goto err; + return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) - goto err; + return -EMSGSIZE; /* Poll context is only valid for kernel CQs */ if (rdma_is_kernel_res(res) && nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) - goto err; + return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) - goto err; + return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) - goto err; + return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, cq->uobject->uevent.uobject.context->res.id)) - goto err; + return -EMSGSIZE; if (fill_res_name_pid(msg, res)) - goto err; + return -EMSGSIZE; - if (dev->ops.fill_res_cq_entry) - return dev->ops.fill_res_cq_entry(msg, cq); - return 0; + return (dev->ops.fill_res_cq_entry) ? + dev->ops.fill_res_cq_entry(msg, cq) : 0; +} -err: return -EMSGSIZE; +static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_cq *cq = container_of(res, struct ib_cq, res); + struct ib_device *dev = cq->device; + + if (!dev->ops.fill_res_cq_entry_raw) + return -EINVAL; + return dev->ops.fill_res_cq_entry_raw(msg, cq); } static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, @@ -603,30 +637,39 @@ static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, if (has_cap_net_admin) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) - goto err; + return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) - goto err; + return -EMSGSIZE; } if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, RDMA_NLDEV_ATTR_PAD)) - goto err; + return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) - goto err; + return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) - goto err; + return -EMSGSIZE; if (fill_res_name_pid(msg, res)) - goto err; + return -EMSGSIZE; - if (dev->ops.fill_res_mr_entry) - return dev->ops.fill_res_mr_entry(msg, mr); - return 0; + return (dev->ops.fill_res_mr_entry) ? + dev->ops.fill_res_mr_entry(msg, mr) : + 0; +} -err: return -EMSGSIZE; +static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, + struct rdma_restrack_entry *res, uint32_t port) +{ + struct ib_mr *mr = container_of(res, struct ib_mr, res); + struct ib_device *dev = mr->pd->device; + + if (!dev->ops.fill_res_mr_entry_raw) + return -EINVAL; + return dev->ops.fill_res_mr_entry_raw(msg, mr); } static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, @@ -1149,7 +1192,6 @@ static int nldev_res_get_dumpit(struct sk_buff *skb, struct nldev_fill_res_entry { enum rdma_nldev_attr nldev_attr; - enum rdma_nldev_command nldev_cmd; u8 flags; u32 entry; u32 id; @@ -1161,40 +1203,34 @@ enum nldev_res_flags { static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_QP] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY, .id = RDMA_NLDEV_ATTR_RES_LQPN, }, [RDMA_RESTRACK_CM_ID] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CM_IDN, }, [RDMA_RESTRACK_CQ] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CQN, }, [RDMA_RESTRACK_MR] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY, .id = RDMA_NLDEV_ATTR_RES_MRN, }, [RDMA_RESTRACK_PD] = { - .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET, .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY, .id = RDMA_NLDEV_ATTR_RES_PDN, }, [RDMA_RESTRACK_COUNTER] = { - .nldev_cmd = RDMA_NLDEV_CMD_STAT_GET, .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER, .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID, @@ -1253,7 +1289,8 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, - RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd), + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, + RDMA_NL_GET_OP(nlh->nlmsg_type)), 0, 0); if (fill_nldev_handle(msg, device)) { @@ -1331,7 +1368,8 @@ static int res_get_common_dumpit(struct sk_buff *skb, } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd), + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, + RDMA_NL_GET_OP(cb->nlh->nlmsg_type)), 0, NLM_F_MULTI); if (fill_nldev_handle(skb, device)) { @@ -1413,26 +1451,29 @@ err_index: return ret; } -#define RES_GET_FUNCS(name, type) \ - static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ +#define RES_GET_FUNCS(name, type) \ + static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ struct netlink_callback *cb) \ - { \ - return res_get_common_dumpit(skb, cb, type, \ - fill_res_##name##_entry); \ - } \ - static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ - struct nlmsghdr *nlh, \ + { \ + return res_get_common_dumpit(skb, cb, type, \ + fill_res_##name##_entry); \ + } \ + static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ + struct nlmsghdr *nlh, \ struct netlink_ext_ack *extack) \ - { \ - return res_get_common_doit(skb, nlh, extack, type, \ - fill_res_##name##_entry); \ + { \ + return res_get_common_doit(skb, nlh, extack, type, \ + fill_res_##name##_entry); \ } RES_GET_FUNCS(qp, RDMA_RESTRACK_QP); +RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP); RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID); RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); +RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ); RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); +RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR); RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER); static LIST_HEAD(link_ops); @@ -2117,6 +2158,21 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { .doit = nldev_stat_del_doit, .flags = RDMA_NL_ADMIN_PERM, }, + [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = { + .doit = nldev_res_get_qp_raw_doit, + .dump = nldev_res_get_qp_raw_dumpit, + .flags = RDMA_NL_ADMIN_PERM, + }, + [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = { + .doit = nldev_res_get_cq_raw_doit, + .dump = nldev_res_get_cq_raw_dumpit, + .flags = RDMA_NL_ADMIN_PERM, + }, + [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = { + .doit = nldev_res_get_mr_raw_doit, + .dump = nldev_res_get_mr_raw_dumpit, + .flags = RDMA_NL_ADMIN_PERM, + }, }; void __init nldev_init(void) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9127cffafccd..77106ff3cd26 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2583,8 +2583,11 @@ struct ib_device_ops { * Allows rdma drivers to add their own restrack attributes. */ int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); + int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr); int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); + int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq); int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); + int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp); int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id); /* Device lifecycle callbacks */ diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 8e277783fa96..3826143d420d 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -287,6 +287,12 @@ enum rdma_nldev_command { RDMA_NLDEV_CMD_STAT_DEL, + RDMA_NLDEV_CMD_RES_QP_GET_RAW, + + RDMA_NLDEV_CMD_RES_CQ_GET_RAW, + + RDMA_NLDEV_CMD_RES_MR_GET_RAW, + RDMA_NLDEV_NUM_OPS }; @@ -525,6 +531,8 @@ enum rdma_nldev_attr { */ RDMA_NLDEV_ATTR_DEV_DIM, /* u8 */ + RDMA_NLDEV_ATTR_RES_RAW, /* binary */ + /* * Always the end */ -- cgit v1.2.3 From 62fb45d317c5fa08e4db093441835bb6f33acbd7 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 18 Jun 2020 16:42:06 +0200 Subject: USB: ch9: add "USB_" prefix in front of TEST defines For some reason, the TEST_ defines in the usb/ch9.h files did not have the USB_ prefix on it, making it a bit confusing when reading the file, as well as not the nicest thing to do in a uapi file. So fix that up and add the USB_ prefix on to them, and fix up all in-kernel usages. This included deleting the duplicate copy in the net2272.h file. Cc: Felipe Balbi Cc: Michal Simek Cc: Mathias Nyman Cc: Pawel Laszczak Cc: YueHaibing Cc: Nathan Chancellor Cc: Jason Yan Cc: Jia-Ju Bai Cc: Stephen Boyd Cc: Christophe JAILLET Cc: Arnd Bergmann Cc: Jules Irenge Cc: Alan Stern Cc: Thinh Nguyen Cc: Rob Gill Cc: Macpaul Lin Acked-by: Minas Harutyunyan Acked-by: Bin Liu Acked-by: Chunfeng Yun Acked-by: Peter Chen Link: https://lore.kernel.org/r/20200618144206.2655890-1-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/cdns3/ep0.c | 8 ++++---- drivers/usb/chipidea/udc.c | 10 +++++----- drivers/usb/common/debug.c | 10 +++++----- drivers/usb/dwc2/debugfs.c | 20 ++++++++++---------- drivers/usb/dwc2/gadget.c | 10 +++++----- drivers/usb/dwc3/debugfs.c | 20 ++++++++++---------- drivers/usb/dwc3/ep0.c | 10 +++++----- drivers/usb/dwc3/gadget.c | 10 +++++----- drivers/usb/gadget/udc/bdc/bdc_ep.c | 10 +++++----- drivers/usb/gadget/udc/gr_udc.c | 4 ++-- drivers/usb/gadget/udc/mv_udc_core.c | 2 +- drivers/usb/gadget/udc/net2272.c | 2 +- drivers/usb/gadget/udc/net2272.h | 5 ----- drivers/usb/gadget/udc/udc-xilinx.c | 4 ++-- drivers/usb/host/xhci-hub.c | 7 ++++--- drivers/usb/misc/ehset.c | 8 ++++---- drivers/usb/mtu3/mtu3_gadget_ep0.c | 16 ++++++++-------- drivers/usb/musb/musb_gadget_ep0.c | 20 ++++++++------------ drivers/usb/musb/musb_virthub.c | 20 ++++++++++---------- include/uapi/linux/usb/ch9.h | 10 +++++----- 20 files changed, 99 insertions(+), 107 deletions(-) (limited to 'include') diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c index 82645a2a0f52..04a522f5ae58 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/ep0.c @@ -328,10 +328,10 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, return -EINVAL; switch (tmode >> 8) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: cdns3_set_register_bit(&priv_dev->regs->usb_cmd, USB_CMD_STMODE | USB_STS_TMODE_SEL(tmode - 1)); diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index db0cfde0cc3c..4beb25888917 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1215,11 +1215,11 @@ __acquires(ci->lock) case USB_DEVICE_TEST_MODE: tmode = le16_to_cpu(req.wIndex) >> 8; switch (tmode) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: ci->test_mode = tmode; err = isr_setup_status_phase( ci); diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c index 92a986aeaa5d..410acd670ca7 100644 --- a/drivers/usb/common/debug.c +++ b/drivers/usb/common/debug.c @@ -53,15 +53,15 @@ static const char *usb_decode_device_feature(u16 wValue) static const char *usb_decode_test_mode(u16 wIndex) { switch (wIndex) { - case TEST_J: + case USB_TEST_J: return ": TEST_J"; - case TEST_K: + case USB_TEST_K: return ": TEST_K"; - case TEST_SE0_NAK: + case USB_TEST_SE0_NAK: return ": TEST_SE0_NAK"; - case TEST_PACKET: + case USB_TEST_PACKET: return ": TEST_PACKET"; - case TEST_FORCE_EN: + case USB_TEST_FORCE_ENABLE: return ": TEST_FORCE_EN"; default: return ": UNKNOWN"; diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c index 3a0dcbfbc827..aaafd463d72a 100644 --- a/drivers/usb/dwc2/debugfs.c +++ b/drivers/usb/dwc2/debugfs.c @@ -37,15 +37,15 @@ static ssize_t testmode_write(struct file *file, const char __user *ubuf, size_t return -EFAULT; if (!strncmp(buf, "test_j", 6)) - testmode = TEST_J; + testmode = USB_TEST_J; else if (!strncmp(buf, "test_k", 6)) - testmode = TEST_K; + testmode = USB_TEST_K; else if (!strncmp(buf, "test_se0_nak", 12)) - testmode = TEST_SE0_NAK; + testmode = USB_TEST_SE0_NAK; else if (!strncmp(buf, "test_packet", 11)) - testmode = TEST_PACKET; + testmode = USB_TEST_PACKET; else if (!strncmp(buf, "test_force_enable", 17)) - testmode = TEST_FORCE_EN; + testmode = USB_TEST_FORCE_ENABLE; else testmode = 0; @@ -78,19 +78,19 @@ static int testmode_show(struct seq_file *s, void *unused) case 0: seq_puts(s, "no test\n"); break; - case TEST_J: + case USB_TEST_J: seq_puts(s, "test_j\n"); break; - case TEST_K: + case USB_TEST_K: seq_puts(s, "test_k\n"); break; - case TEST_SE0_NAK: + case USB_TEST_SE0_NAK: seq_puts(s, "test_se0_nak\n"); break; - case TEST_PACKET: + case USB_TEST_PACKET: seq_puts(s, "test_packet\n"); break; - case TEST_FORCE_EN: + case USB_TEST_FORCE_ENABLE: seq_puts(s, "test_force_enable\n"); break; default: diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 12b98b466287..38fc46b0c026 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1561,11 +1561,11 @@ int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) dctl &= ~DCTL_TSTCTL_MASK; switch (testmode) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: dctl |= testmode << DCTL_TSTCTL_SHIFT; break; default: diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index 6d9de334e46a..14dc6a37305d 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -466,19 +466,19 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) case 0: seq_printf(s, "no test\n"); break; - case TEST_J: + case USB_TEST_J: seq_printf(s, "test_j\n"); break; - case TEST_K: + case USB_TEST_K: seq_printf(s, "test_k\n"); break; - case TEST_SE0_NAK: + case USB_TEST_SE0_NAK: seq_printf(s, "test_se0_nak\n"); break; - case TEST_PACKET: + case USB_TEST_PACKET: seq_printf(s, "test_packet\n"); break; - case TEST_FORCE_EN: + case USB_TEST_FORCE_ENABLE: seq_printf(s, "test_force_enable\n"); break; default: @@ -506,15 +506,15 @@ static ssize_t dwc3_testmode_write(struct file *file, return -EFAULT; if (!strncmp(buf, "test_j", 6)) - testmode = TEST_J; + testmode = USB_TEST_J; else if (!strncmp(buf, "test_k", 6)) - testmode = TEST_K; + testmode = USB_TEST_K; else if (!strncmp(buf, "test_se0_nak", 12)) - testmode = TEST_SE0_NAK; + testmode = USB_TEST_SE0_NAK; else if (!strncmp(buf, "test_packet", 11)) - testmode = TEST_PACKET; + testmode = USB_TEST_PACKET; else if (!strncmp(buf, "test_force_enable", 17)) - testmode = TEST_FORCE_EN; + testmode = USB_TEST_FORCE_ENABLE; else testmode = 0; diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 6dee4dabc0a4..8dd69728add3 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -425,11 +425,11 @@ static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state, return -EINVAL; switch (wIndex >> 8) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: dwc->test_mode_nr = wIndex >> 8; dwc->test_mode = true; break; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 80c3ef134e41..0b59b2f1cf26 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -46,11 +46,11 @@ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) reg &= ~DWC3_DCTL_TSTCTRL_MASK; switch (mode) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: reg |= mode << 1; break; default: diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d49c6dc1082d..ba250cf75bef 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -927,11 +927,11 @@ static int bdc_set_test_mode(struct bdc *bdc) usb2_pm &= ~BDC_PTC_MASK; dev_dbg(bdc->dev, "%s\n", __func__); switch (bdc->test_mode) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: + case USB_TEST_J: + case USB_TEST_K: + case USB_TEST_SE0_NAK: + case USB_TEST_PACKET: + case USB_TEST_FORCE_ENABLE: usb2_pm |= bdc->test_mode << 28; break; default: diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 7164ad9800f1..345e28d76709 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -912,9 +912,9 @@ static int gr_device_request(struct gr_udc *dev, u8 type, u8 request, return gr_ep0_respond_empty(dev); case USB_DEVICE_TEST_MODE: - /* The hardware does not support TEST_FORCE_EN */ + /* The hardware does not support USB_TEST_FORCE_ENABLE */ test = index >> 8; - if (test >= TEST_J && test <= TEST_PACKET) { + if (test >= USB_TEST_J && test <= USB_TEST_PACKET) { dev->test_mode = test; return gr_ep0_respond(dev, NULL, 0, gr_ep0_testmode_complete); diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index cafde053788b..69289717d856 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -1502,7 +1502,7 @@ out: static void mv_udc_testmode(struct mv_udc *udc, u16 index) { - if (index <= TEST_FORCE_EN) { + if (index <= USB_TEST_FORCE_ENABLE) { udc->test_mode = index; if (udc_prime_status(udc, EP_DIR_IN, 0, true)) ep0_stall(udc); diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 928057b206f1..fbbe62513545 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -1688,7 +1688,7 @@ net2272_set_test_mode(struct net2272 *dev, int mode) net2272_write(dev, USBTEST, mode); /* load test packet */ - if (mode == TEST_PACKET) { + if (mode == USB_TEST_PACKET) { /* switch to 8 bit mode */ net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) & ~(1 << DATA_WIDTH)); diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h index 8e644627992d..87d0ab9ffeeb 100644 --- a/drivers/usb/gadget/udc/net2272.h +++ b/drivers/usb/gadget/udc/net2272.h @@ -105,11 +105,6 @@ #define USBTEST 0x32 #define TEST_MODE_SELECT 0 #define NORMAL_OPERATION 0 -#define TEST_J 1 -#define TEST_K 2 -#define TEST_SE0_NAK 3 -#define TEST_PACKET 4 -#define TEST_FORCE_ENABLE 5 #define XCVRDIAG 0x33 #define FORCE_FULL_SPEED 2 #define FORCE_HIGH_SPEED 3 diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index 709553bdb233..d5e9d20c097d 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -2097,9 +2097,9 @@ static int xudc_probe(struct platform_device *pdev) /* Check for IP endianness */ udc->write_fn = xudc_write32_be; udc->read_fn = xudc_read32_be; - udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, TEST_J); + udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, USB_TEST_J); if ((udc->read_fn(udc->addr + XUSB_TESTMODE_OFFSET)) - != TEST_J) { + != USB_TEST_J) { udc->write_fn = xudc_write32; udc->read_fn = xudc_read32; } diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index f37316d2c8fa..073c54e42223 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -612,7 +612,7 @@ static void xhci_port_set_test_mode(struct xhci_hcd *xhci, temp |= test_mode << PORT_TEST_MODE_SHIFT; writel(temp, port->addr + PORTPMSC); xhci->test_mode = test_mode; - if (test_mode == TEST_FORCE_EN) + if (test_mode == USB_TEST_FORCE_ENABLE) xhci_start(xhci); } @@ -666,7 +666,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci) xhci_err(xhci, "Not in test mode, do nothing.\n"); return 0; } - if (xhci->test_mode == TEST_FORCE_EN && + if (xhci->test_mode == USB_TEST_FORCE_ENABLE && !(xhci->xhc_state & XHCI_STATE_HALTED)) { retval = xhci_halt(xhci); if (retval) @@ -1421,7 +1421,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, /* 4.19.6 Port Test Modes (USB2 Test Mode) */ if (hcd->speed != HCD_USB2) goto error; - if (test_mode > TEST_FORCE_EN || test_mode < TEST_J) + if (test_mode > USB_TEST_FORCE_ENABLE || + test_mode < USB_TEST_J) goto error; retval = xhci_enter_test_mode(xhci, test_mode, wIndex, &flags); diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c index 7895d61e733b..2752e1f4f4d0 100644 --- a/drivers/usb/misc/ehset.c +++ b/drivers/usb/misc/ehset.c @@ -33,28 +33,28 @@ static int ehset_probe(struct usb_interface *intf, ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, - (TEST_SE0_NAK << 8) | portnum, + (USB_TEST_SE0_NAK << 8) | portnum, NULL, 0, 1000); break; case TEST_J_PID: ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, - (TEST_J << 8) | portnum, + (USB_TEST_J << 8) | portnum, NULL, 0, 1000); break; case TEST_K_PID: ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, - (TEST_K << 8) | portnum, + (USB_TEST_K << 8) | portnum, NULL, 0, 1000); break; case TEST_PACKET_PID: ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0), USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, - (TEST_PACKET << 8) | portnum, + (USB_TEST_PACKET << 8) | portnum, NULL, 0, 1000); break; case TEST_HS_HOST_PORT_SUSPEND_RESUME: diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c index 2be182bd793a..563a0a2e970d 100644 --- a/drivers/usb/mtu3/mtu3_gadget_ep0.c +++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c @@ -278,20 +278,20 @@ static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup) u32 value; switch (le16_to_cpu(setup->wIndex) >> 8) { - case TEST_J: - dev_dbg(mtu->dev, "TEST_J\n"); + case USB_TEST_J: + dev_dbg(mtu->dev, "USB_TEST_J\n"); mtu->test_mode_nr = TEST_J_MODE; break; - case TEST_K: - dev_dbg(mtu->dev, "TEST_K\n"); + case USB_TEST_K: + dev_dbg(mtu->dev, "USB_TEST_K\n"); mtu->test_mode_nr = TEST_K_MODE; break; - case TEST_SE0_NAK: - dev_dbg(mtu->dev, "TEST_SE0_NAK\n"); + case USB_TEST_SE0_NAK: + dev_dbg(mtu->dev, "USB_TEST_SE0_NAK\n"); mtu->test_mode_nr = TEST_SE0_NAK_MODE; break; - case TEST_PACKET: - dev_dbg(mtu->dev, "TEST_PACKET\n"); + case USB_TEST_PACKET: + dev_dbg(mtu->dev, "USB_TEST_PACKET\n"); mtu->test_mode_nr = TEST_PACKET_MODE; break; default: diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 91a5027b5c1f..0ae3e0be043e 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -311,27 +311,23 @@ __acquires(musb->lock) goto stall; switch (ctrlrequest->wIndex >> 8) { - case 1: - pr_debug("TEST_J\n"); - /* TEST_J */ + case USB_TEST_J: + pr_debug("USB_TEST_J\n"); musb->test_mode_nr = MUSB_TEST_J; break; - case 2: - /* TEST_K */ - pr_debug("TEST_K\n"); + case USB_TEST_K: + pr_debug("USB_TEST_K\n"); musb->test_mode_nr = MUSB_TEST_K; break; - case 3: - /* TEST_SE0_NAK */ - pr_debug("TEST_SE0_NAK\n"); + case USB_TEST_SE0_NAK: + pr_debug("USB_TEST_SE0_NAK\n"); musb->test_mode_nr = MUSB_TEST_SE0_NAK; break; - case 4: - /* TEST_PACKET */ - pr_debug("TEST_PACKET\n"); + case USB_TEST_PACKET: + pr_debug("USB_TEST_PACKET\n"); musb->test_mode_nr = MUSB_TEST_PACKET; break; diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index a84ec27c4c12..cb7ae297a3af 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -385,25 +385,25 @@ int musb_hub_control( wIndex >>= 8; switch (wIndex) { - case 1: - pr_debug("TEST_J\n"); + case USB_TEST_J: + pr_debug("USB_TEST_J\n"); temp = MUSB_TEST_J; break; - case 2: - pr_debug("TEST_K\n"); + case USB_TEST_K: + pr_debug("USB_TEST_K\n"); temp = MUSB_TEST_K; break; - case 3: - pr_debug("TEST_SE0_NAK\n"); + case USB_TEST_SE0_NAK: + pr_debug("USB_TEST_SE0_NAK\n"); temp = MUSB_TEST_SE0_NAK; break; - case 4: - pr_debug("TEST_PACKET\n"); + case USB_TEST_PACKET: + pr_debug("USB_TEST_PACKET\n"); temp = MUSB_TEST_PACKET; musb_load_testpacket(musb); break; - case 5: - pr_debug("TEST_FORCE_ENABLE\n"); + case USB_TEST_FORCE_ENABLE: + pr_debug("USB_TEST_FORCE_ENABLE\n"); temp = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_HS; diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index b1ed2ccfe9cf..48766fdf6580 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -138,11 +138,11 @@ * Test Mode Selectors * See USB 2.0 spec Table 9-7 */ -#define TEST_J 1 -#define TEST_K 2 -#define TEST_SE0_NAK 3 -#define TEST_PACKET 4 -#define TEST_FORCE_EN 5 +#define USB_TEST_J 1 +#define USB_TEST_K 2 +#define USB_TEST_SE0_NAK 3 +#define USB_TEST_PACKET 4 +#define USB_TEST_FORCE_ENABLE 5 /* Status Type */ #define USB_STATUS_TYPE_STANDARD 0 -- cgit v1.2.3 From feee1b8c490821f29aae416a5422795f5a29263d Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 24 Jun 2020 15:33:29 +0300 Subject: gcc-plugins/stackleak: Use asm instrumentation to avoid useless register saving The kernel code instrumentation in stackleak gcc plugin works in two stages. At first, stack tracking is added to GIMPLE representation of every function (except some special cases). And later, when stack frame size info is available, stack tracking is removed from the RTL representation of the functions with small stack frame. There is an unwanted side-effect for these functions: some of them do useless work with caller-saved registers. As an example of such case, proc_sys_write without() instrumentation: 55 push %rbp 41 b8 01 00 00 00 mov $0x1,%r8d 48 89 e5 mov %rsp,%rbp e8 11 ff ff ff callq ffffffff81284610 5d pop %rbp c3 retq 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1) 00 00 00 proc_sys_write() with instrumentation: 55 push %rbp 48 89 e5 mov %rsp,%rbp 41 56 push %r14 41 55 push %r13 41 54 push %r12 53 push %rbx 49 89 f4 mov %rsi,%r12 48 89 fb mov %rdi,%rbx 49 89 d5 mov %rdx,%r13 49 89 ce mov %rcx,%r14 4c 89 f1 mov %r14,%rcx 4c 89 ea mov %r13,%rdx 4c 89 e6 mov %r12,%rsi 48 89 df mov %rbx,%rdi 41 b8 01 00 00 00 mov $0x1,%r8d e8 f2 fe ff ff callq ffffffff81298e80 5b pop %rbx 41 5c pop %r12 41 5d pop %r13 41 5e pop %r14 5d pop %rbp c3 retq 66 0f 1f 84 00 00 00 nopw 0x0(%rax,%rax,1) 00 00 Let's improve the instrumentation to avoid this: 1. Make stackleak_track_stack() save all register that it works with. Use no_caller_saved_registers attribute for that function. This attribute is available for x86_64 and i386 starting from gcc-7. 2. Insert calling stackleak_track_stack() in asm: asm volatile("call stackleak_track_stack" :: "r" (current_stack_pointer)) Here we use ASM_CALL_CONSTRAINT trick from arch/x86/include/asm/asm.h. The input constraint is taken into account during gcc shrink-wrapping optimization. It is needed to be sure that stackleak_track_stack() call is inserted after the prologue of the containing function, when the stack frame is prepared. This work is a deep reengineering of the idea described on grsecurity blog https://grsecurity.net/resolving_an_unfortunate_stackleak_interaction Signed-off-by: Alexander Popov Acked-by: Miguel Ojeda Link: https://lore.kernel.org/r/20200624123330.83226-5-alex.popov@linux.com Signed-off-by: Kees Cook --- include/linux/compiler_attributes.h | 13 +++ kernel/stackleak.c | 16 +-- scripts/Makefile.gcc-plugins | 2 + scripts/gcc-plugins/stackleak_plugin.c | 205 ++++++++++++++++++++++++++++----- 4 files changed, 196 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index cdf016596659..551ea8cb70b1 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -37,6 +37,7 @@ # define __GCC4_has_attribute___copy__ 0 # define __GCC4_has_attribute___designated_init__ 0 # define __GCC4_has_attribute___externally_visible__ 1 +# define __GCC4_has_attribute___no_caller_saved_registers__ 0 # define __GCC4_has_attribute___noclone__ 1 # define __GCC4_has_attribute___nonstring__ 0 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) @@ -175,6 +176,18 @@ */ #define __mode(x) __attribute__((__mode__(x))) +/* + * Optional: only supported since gcc >= 7 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/x86-Function-Attributes.html#index-no_005fcaller_005fsaved_005fregisters-function-attribute_002c-x86 + * clang: https://clang.llvm.org/docs/AttributeReference.html#no-caller-saved-registers + */ +#if __has_attribute(__no_caller_saved_registers__) +# define __no_caller_saved_registers __attribute__((__no_caller_saved_registers__)) +#else +# define __no_caller_saved_registers +#endif + /* * Optional: not supported by clang * diff --git a/kernel/stackleak.c b/kernel/stackleak.c index b193a59fc05b..a8fc9ae1d03d 100644 --- a/kernel/stackleak.c +++ b/kernel/stackleak.c @@ -104,19 +104,9 @@ asmlinkage void notrace stackleak_erase(void) } NOKPROBE_SYMBOL(stackleak_erase); -void __used notrace stackleak_track_stack(void) +void __used __no_caller_saved_registers notrace stackleak_track_stack(void) { - /* - * N.B. stackleak_erase() fills the kernel stack with the poison value, - * which has the register width. That code assumes that the value - * of 'lowest_stack' is aligned on the register width boundary. - * - * That is true for x86 and x86_64 because of the kernel stack - * alignment on these platforms (for details, see 'cc_stack_align' in - * arch/x86/Makefile). Take care of that when you port STACKLEAK to - * new platforms. - */ - unsigned long sp = (unsigned long)&sp; + unsigned long sp = current_stack_pointer; /* * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than @@ -125,6 +115,8 @@ void __used notrace stackleak_track_stack(void) */ BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH); + /* 'lowest_stack' should be aligned on the register width boundary */ + sp = ALIGN(sp, sizeof(unsigned long)); if (sp < current->lowest_stack && sp >= (unsigned long)task_stack_page(current) + sizeof(unsigned long)) { diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index 5f7df50cfe7a..952e46876329 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -33,6 +33,8 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ += -DSTACKLEAK_PLUGIN gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE) +gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ + += -fplugin-arg-stackleak_plugin-arch=$(SRCARCH) ifdef CONFIG_GCC_PLUGIN_STACKLEAK DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable endif diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c index cc75eeba0be1..a18b0d4af456 100644 --- a/scripts/gcc-plugins/stackleak_plugin.c +++ b/scripts/gcc-plugins/stackleak_plugin.c @@ -20,7 +20,7 @@ * * Debugging: * - use fprintf() to stderr, debug_generic_expr(), debug_gimple_stmt(), - * print_rtl() and print_simple_rtl(); + * print_rtl_single() and debug_rtx(); * - add "-fdump-tree-all -fdump-rtl-all" to the plugin CFLAGS in * Makefile.gcc-plugins to see the verbose dumps of the gcc passes; * - use gcc -E to understand the preprocessing shenanigans; @@ -32,6 +32,7 @@ __visible int plugin_is_GPL_compatible; static int track_frame_size = -1; +static bool build_for_x86 = false; static const char track_function[] = "stackleak_track_stack"; /* @@ -43,32 +44,31 @@ static GTY(()) tree track_function_decl; static struct plugin_info stackleak_plugin_info = { .version = "201707101337", .help = "track-min-size=nn\ttrack stack for functions with a stack frame size >= nn bytes\n" + "arch=target_arch\tspecify target build arch\n" "disable\t\tdo not activate the plugin\n" }; -static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after) +static void add_stack_tracking_gcall(gimple_stmt_iterator *gsi, bool after) { gimple stmt; - gcall *stackleak_track_stack; + gcall *gimple_call; cgraph_node_ptr node; basic_block bb; - /* Insert call to void stackleak_track_stack(void) */ + /* Insert calling stackleak_track_stack() */ stmt = gimple_build_call(track_function_decl, 0); - stackleak_track_stack = as_a_gcall(stmt); - if (after) { - gsi_insert_after(gsi, stackleak_track_stack, - GSI_CONTINUE_LINKING); - } else { - gsi_insert_before(gsi, stackleak_track_stack, GSI_SAME_STMT); - } + gimple_call = as_a_gcall(stmt); + if (after) + gsi_insert_after(gsi, gimple_call, GSI_CONTINUE_LINKING); + else + gsi_insert_before(gsi, gimple_call, GSI_SAME_STMT); /* Update the cgraph */ - bb = gimple_bb(stackleak_track_stack); + bb = gimple_bb(gimple_call); node = cgraph_get_create_node(track_function_decl); gcc_assert(node); cgraph_create_edge(cgraph_get_node(current_function_decl), node, - stackleak_track_stack, bb->count, + gimple_call, bb->count, compute_call_stmt_bb_frequency(current_function_decl, bb)); } @@ -85,6 +85,79 @@ static bool is_alloca(gimple stmt) return false; } +static tree get_current_stack_pointer_decl(void) +{ + varpool_node_ptr node; + + FOR_EACH_VARIABLE(node) { + tree var = NODE_DECL(node); + tree name = DECL_NAME(var); + + if (DECL_NAME_LENGTH(var) != sizeof("current_stack_pointer") - 1) + continue; + + if (strcmp(IDENTIFIER_POINTER(name), "current_stack_pointer")) + continue; + + return var; + } + + return NULL_TREE; +} + +static void add_stack_tracking_gasm(gimple_stmt_iterator *gsi, bool after) +{ + gasm *asm_call = NULL; + tree sp_decl, input; + vec *inputs = NULL; + + /* 'no_caller_saved_registers' is currently supported only for x86 */ + gcc_assert(build_for_x86); + + /* + * Insert calling stackleak_track_stack() in asm: + * asm volatile("call stackleak_track_stack" + * :: "r" (current_stack_pointer)) + * Use ASM_CALL_CONSTRAINT trick from arch/x86/include/asm/asm.h. + * This constraint is taken into account during gcc shrink-wrapping + * optimization. It is needed to be sure that stackleak_track_stack() + * call is inserted after the prologue of the containing function, + * when the stack frame is prepared. + */ + sp_decl = get_current_stack_pointer_decl(); + if (sp_decl == NULL_TREE) { + add_stack_tracking_gcall(gsi, after); + return; + } + input = build_tree_list(NULL_TREE, build_const_char_string(2, "r")); + input = chainon(NULL_TREE, build_tree_list(input, sp_decl)); + vec_safe_push(inputs, input); + asm_call = gimple_build_asm_vec("call stackleak_track_stack", + inputs, NULL, NULL, NULL); + gimple_asm_set_volatile(asm_call, true); + if (after) + gsi_insert_after(gsi, asm_call, GSI_CONTINUE_LINKING); + else + gsi_insert_before(gsi, asm_call, GSI_SAME_STMT); + update_stmt(asm_call); +} + +static void add_stack_tracking(gimple_stmt_iterator *gsi, bool after) +{ + /* + * The 'no_caller_saved_registers' attribute is used for + * stackleak_track_stack(). If the compiler supports this attribute for + * the target arch, we can add calling stackleak_track_stack() in asm. + * That improves performance: we avoid useless operations with the + * caller-saved registers in the functions from which we will remove + * stackleak_track_stack() call during the stackleak_cleanup pass. + */ + if (lookup_attribute_spec(get_identifier("no_caller_saved_registers"))) + add_stack_tracking_gasm(gsi, after); + else + add_stack_tracking_gcall(gsi, after); +} + /* * Work with the GIMPLE representation of the code. Insert the * stackleak_track_stack() call after alloca() and into the beginning @@ -94,7 +167,7 @@ static unsigned int stackleak_instrument_execute(void) { basic_block bb, entry_bb; bool prologue_instrumented = false, is_leaf = true; - gimple_stmt_iterator gsi; + gimple_stmt_iterator gsi = { 0 }; /* * ENTRY_BLOCK_PTR is a basic block which represents possible entry @@ -123,7 +196,7 @@ static unsigned int stackleak_instrument_execute(void) continue; /* Insert stackleak_track_stack() call after alloca() */ - stackleak_add_track_stack(&gsi, true); + add_stack_tracking(&gsi, true); if (bb == entry_bb) prologue_instrumented = true; } @@ -168,7 +241,7 @@ static unsigned int stackleak_instrument_execute(void) bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); } gsi = gsi_after_labels(bb); - stackleak_add_track_stack(&gsi, false); + add_stack_tracking(&gsi, false); return 0; } @@ -182,21 +255,10 @@ static bool large_stack_frame(void) #endif } -/* - * Work with the RTL representation of the code. - * Remove the unneeded stackleak_track_stack() calls from the functions - * which don't call alloca() and don't have a large enough stack frame size. - */ -static unsigned int stackleak_cleanup_execute(void) +static void remove_stack_tracking_gcall(void) { rtx_insn *insn, *next; - if (cfun->calls_alloca) - return 0; - - if (large_stack_frame()) - return 0; - /* * Find stackleak_track_stack() calls. Loop through the chain of insns, * which is an RTL representation of the code for a function. @@ -257,6 +319,84 @@ static unsigned int stackleak_cleanup_execute(void) } #endif } +} + +static bool remove_stack_tracking_gasm(void) +{ + bool removed = false; + rtx_insn *insn, *next; + + /* 'no_caller_saved_registers' is currently supported only for x86 */ + gcc_assert(build_for_x86); + + /* + * Find stackleak_track_stack() asm calls. Loop through the chain of + * insns, which is an RTL representation of the code for a function. + * + * The example of a matching insn: + * (insn 11 5 12 2 (parallel [ (asm_operands/v + * ("call stackleak_track_stack") ("") 0 + * [ (reg/v:DI 7 sp [ current_stack_pointer ]) ] + * [ (asm_input:DI ("r")) ] []) + * (clobber (reg:CC 17 flags)) ]) -1 (nil)) + */ + for (insn = get_insns(); insn; insn = next) { + rtx body; + + next = NEXT_INSN(insn); + + /* Check the expression code of the insn */ + if (!NONJUMP_INSN_P(insn)) + continue; + + /* + * Check the expression code of the insn body, which is an RTL + * Expression (RTX) describing the side effect performed by + * that insn. + */ + body = PATTERN(insn); + + if (GET_CODE(body) != PARALLEL) + continue; + + body = XVECEXP(body, 0, 0); + + if (GET_CODE(body) != ASM_OPERANDS) + continue; + + if (strcmp(ASM_OPERANDS_TEMPLATE(body), + "call stackleak_track_stack")) { + continue; + } + + delete_insn_and_edges(insn); + gcc_assert(!removed); + removed = true; + } + + return removed; +} + +/* + * Work with the RTL representation of the code. + * Remove the unneeded stackleak_track_stack() calls from the functions + * which don't call alloca() and don't have a large enough stack frame size. + */ +static unsigned int stackleak_cleanup_execute(void) +{ + bool removed = false; + + if (cfun->calls_alloca) + return 0; + + if (large_stack_frame()) + return 0; + + if (lookup_attribute_spec(get_identifier("no_caller_saved_registers"))) + removed = remove_stack_tracking_gasm(); + + if (!removed) + remove_stack_tracking_gcall(); return 0; } @@ -392,6 +532,15 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, plugin_name, argv[i].key, argv[i].value); return 1; } + } else if (!strcmp(argv[i].key, "arch")) { + if (!argv[i].value) { + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), + plugin_name, argv[i].key); + return 1; + } + + if (!strcmp(argv[i].value, "x86")) + build_for_x86 = true; } else { error(G_("unknown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); -- cgit v1.2.3 From 28bc24fc46f9c9f39ddefb424d6072041805b563 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:33 +0200 Subject: vc: separate state There are two copies of some members of struct vc_data. This is because we need to save them and restore later. Move these memebers to a separate structure called vc_state. So now instead of members like: vc_x, vc_y and vc_saved_x, vc_saved_y we have state and saved_state (of type: struct vc_state) containing state.x, state.y and saved_state.x, saved_state.y This change: * makes clear what is saved & restored * eases save & restore by using memcpy (see save_cur and restore_cur) Finally, we document the newly added struct vc_state using kernel-doc. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-1-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/accessibility/braille/braille_console.c | 10 +- drivers/staging/speakup/main.c | 28 +- drivers/tty/vt/vt.c | 327 ++++++++++++------------ drivers/video/console/mdacon.c | 2 +- drivers/video/console/sticon.c | 6 +- drivers/video/console/vgacon.c | 22 +- drivers/video/fbdev/core/bitblit.c | 6 +- drivers/video/fbdev/core/fbcon.c | 8 +- drivers/video/fbdev/core/fbcon_ccw.c | 4 +- drivers/video/fbdev/core/fbcon_cw.c | 4 +- drivers/video/fbdev/core/fbcon_ud.c | 4 +- drivers/video/fbdev/core/tileblit.c | 4 +- include/linux/console_struct.h | 54 ++-- 13 files changed, 239 insertions(+), 240 deletions(-) (limited to 'include') diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c index a8f7c278b691..c2b452af6806 100644 --- a/drivers/accessibility/braille/braille_console.c +++ b/drivers/accessibility/braille/braille_console.c @@ -109,16 +109,16 @@ static void braille_write(u16 *buf) /* Follow the VC cursor*/ static void vc_follow_cursor(struct vc_data *vc) { - vc_x = vc->vc_x - (vc->vc_x % WIDTH); - vc_y = vc->vc_y; - lastvc_x = vc->vc_x; - lastvc_y = vc->vc_y; + vc_x = vc->state.x - (vc->state.x % WIDTH); + vc_y = vc->state.y; + lastvc_x = vc->state.x; + lastvc_y = vc->state.y; } /* Maybe the VC cursor moved, if so follow it */ static void vc_maybe_cursor_moved(struct vc_data *vc) { - if (vc->vc_x != lastvc_x || vc->vc_y != lastvc_y) + if (vc->state.x != lastvc_x || vc->state.y != lastvc_y) vc_follow_cursor(vc); } diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 02471d932d71..ddfd12afe3b9 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -263,8 +263,8 @@ static unsigned char get_attributes(struct vc_data *vc, u16 *pos) static void speakup_date(struct vc_data *vc) { - spk_x = spk_cx = vc->vc_x; - spk_y = spk_cy = vc->vc_y; + spk_x = spk_cx = vc->state.x; + spk_y = spk_cy = vc->state.y; spk_pos = spk_cp = vc->vc_pos; spk_old_attr = spk_attr; spk_attr = get_attributes(vc, (u_short *)spk_pos); @@ -1551,9 +1551,9 @@ static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag) */ is_cursor = value + 1; old_cursor_pos = vc->vc_pos; - old_cursor_x = vc->vc_x; - old_cursor_y = vc->vc_y; - speakup_console[vc->vc_num]->ht.cy = vc->vc_y; + old_cursor_x = vc->state.x; + old_cursor_y = vc->state.y; + speakup_console[vc->vc_num]->ht.cy = vc->state.y; cursor_con = vc->vc_num; if (cursor_track == CT_Highlight) reset_highlight_buffers(vc); @@ -1574,8 +1574,8 @@ static void update_color_buffer(struct vc_data *vc, const u16 *ic, int len) i = 0; if (speakup_console[vc_num]->ht.highsize[bi] == 0) { speakup_console[vc_num]->ht.rpos[bi] = vc->vc_pos; - speakup_console[vc_num]->ht.rx[bi] = vc->vc_x; - speakup_console[vc_num]->ht.ry[bi] = vc->vc_y; + speakup_console[vc_num]->ht.rx[bi] = vc->state.x; + speakup_console[vc_num]->ht.ry[bi] = vc->state.y; } while ((hi < COLOR_BUFFER_SIZE) && (i < len)) { if (ic[i] > 32) { @@ -1664,9 +1664,9 @@ static int speak_highlight(struct vc_data *vc) return 0; hc = get_highlight_color(vc); if (hc != -1) { - d = vc->vc_y - speakup_console[vc_num]->ht.cy; + d = vc->state.y - speakup_console[vc_num]->ht.cy; if ((d == 1) || (d == -1)) - if (speakup_console[vc_num]->ht.ry[hc] != vc->vc_y) + if (speakup_console[vc_num]->ht.ry[hc] != vc->state.y) return 0; spk_parked |= 0x01; spk_do_flush(); @@ -1693,8 +1693,8 @@ static void cursor_done(struct timer_list *unused) } speakup_date(vc); if (win_enabled) { - if (vc->vc_x >= win_left && vc->vc_x <= win_right && - vc->vc_y >= win_top && vc->vc_y <= win_bottom) { + if (vc->state.x >= win_left && vc->state.x <= win_right && + vc->state.y >= win_top && vc->state.y <= win_bottom) { spk_keydown = 0; is_cursor = 0; goto out; @@ -1757,7 +1757,7 @@ static void speakup_con_write(struct vc_data *vc, u16 *str, int len) if (!spin_trylock_irqsave(&speakup_info.spinlock, flags)) /* Speakup output, discard */ return; - if (spk_bell_pos && spk_keydown && (vc->vc_x == spk_bell_pos - 1)) + if (spk_bell_pos && spk_keydown && (vc->state.x == spk_bell_pos - 1)) bleep(3); if ((is_cursor) || (cursor_track == read_all_mode)) { if (cursor_track == CT_Highlight) @@ -1766,8 +1766,8 @@ static void speakup_con_write(struct vc_data *vc, u16 *str, int len) return; } if (win_enabled) { - if (vc->vc_x >= win_left && vc->vc_x <= win_right && - vc->vc_y >= win_top && vc->vc_y <= win_bottom) { + if (vc->state.x >= win_left && vc->state.x <= win_right && + vc->state.y >= win_top && vc->state.y <= win_bottom) { spin_unlock_irqrestore(&speakup_info.spinlock, flags); return; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 48a8199f7845..76f52935e0c8 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -381,7 +381,7 @@ static void vc_uniscr_putc(struct vc_data *vc, char32_t uc) struct uni_screen *uniscr = get_vc_uniscr(vc); if (uniscr) - uniscr->lines[vc->vc_y][vc->vc_x] = uc; + uniscr->lines[vc->state.y][vc->state.x] = uc; } static void vc_uniscr_insert(struct vc_data *vc, unsigned int nr) @@ -389,8 +389,8 @@ static void vc_uniscr_insert(struct vc_data *vc, unsigned int nr) struct uni_screen *uniscr = get_vc_uniscr(vc); if (uniscr) { - char32_t *ln = uniscr->lines[vc->vc_y]; - unsigned int x = vc->vc_x, cols = vc->vc_cols; + char32_t *ln = uniscr->lines[vc->state.y]; + unsigned int x = vc->state.x, cols = vc->vc_cols; memmove(&ln[x + nr], &ln[x], (cols - x - nr) * sizeof(*ln)); memset32(&ln[x], ' ', nr); @@ -402,8 +402,8 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr) struct uni_screen *uniscr = get_vc_uniscr(vc); if (uniscr) { - char32_t *ln = uniscr->lines[vc->vc_y]; - unsigned int x = vc->vc_x, cols = vc->vc_cols; + char32_t *ln = uniscr->lines[vc->state.y]; + unsigned int x = vc->state.x, cols = vc->vc_cols; memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln)); memset32(&ln[cols - nr], ' ', nr); @@ -416,7 +416,7 @@ static void vc_uniscr_clear_line(struct vc_data *vc, unsigned int x, struct uni_screen *uniscr = get_vc_uniscr(vc); if (uniscr) { - char32_t *ln = uniscr->lines[vc->vc_y]; + char32_t *ln = uniscr->lines[vc->state.y]; memset32(&ln[x], ' ', nr); } @@ -750,10 +750,11 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, static void update_attr(struct vc_data *vc) { - vc->vc_attr = build_attr(vc, vc->vc_color, vc->vc_intensity, - vc->vc_blink, vc->vc_underline, - vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic); - vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' '; + vc->vc_attr = build_attr(vc, vc->state.color, vc->state.intensity, + vc->state.blink, vc->state.underline, + vc->state.reverse ^ vc->vc_decscnm, vc->state.italic); + vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color, 1, + vc->state.blink, 0, vc->vc_decscnm, 0) << 8); } /* Note: inverting the screen twice should revert to the original state */ @@ -842,12 +843,12 @@ static void insert_char(struct vc_data *vc, unsigned int nr) unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_insert(vc, nr); - scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2); + scr_memmovew(p + nr, p, (vc->vc_cols - vc->state.x - nr) * 2); scr_memsetw(p, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; if (con_should_update(vc)) do_update_region(vc, (unsigned long) p, - vc->vc_cols - vc->vc_x); + vc->vc_cols - vc->state.x); } static void delete_char(struct vc_data *vc, unsigned int nr) @@ -855,13 +856,13 @@ static void delete_char(struct vc_data *vc, unsigned int nr) unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_delete(vc, nr); - scr_memcpyw(p, p + nr, (vc->vc_cols - vc->vc_x - nr) * 2); - scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char, + scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2); + scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; if (con_should_update(vc)) do_update_region(vc, (unsigned long) p, - vc->vc_cols - vc->vc_x); + vc->vc_cols - vc->state.x); } static int softcursor_original = -1; @@ -880,7 +881,7 @@ static void add_softcursor(struct vc_data *vc) if ((type & 0x40) && ((i & 0x700) == ((i & 0x7000) >> 4))) i ^= 0x0700; scr_writew(i, (u16 *) vc->vc_pos); if (con_should_update(vc)) - vc->vc_sw->con_putc(vc, i, vc->vc_y, vc->vc_x); + vc->vc_sw->con_putc(vc, i, vc->state.y, vc->state.x); } static void hide_softcursor(struct vc_data *vc) @@ -889,7 +890,7 @@ static void hide_softcursor(struct vc_data *vc) scr_writew(softcursor_original, (u16 *)vc->vc_pos); if (con_should_update(vc)) vc->vc_sw->con_putc(vc, softcursor_original, - vc->vc_y, vc->vc_x); + vc->state.y, vc->state.x); softcursor_original = -1; } } @@ -927,7 +928,8 @@ static void set_origin(struct vc_data *vc) vc->vc_origin = (unsigned long)vc->vc_screenbuf; vc->vc_visible_origin = vc->vc_origin; vc->vc_scr_end = vc->vc_origin + vc->vc_screenbuf_size; - vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->vc_y + 2 * vc->vc_x; + vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->state.y + + 2 * vc->state.x; } static void save_screen(struct vc_data *vc) @@ -1250,8 +1252,8 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, new_origin = (long) newscreen; new_scr_end = new_origin + new_screen_size; - if (vc->vc_y > new_rows) { - if (old_rows - vc->vc_y < new_rows) { + if (vc->state.y > new_rows) { + if (old_rows - vc->state.y < new_rows) { /* * Cursor near the bottom, copy contents from the * bottom of buffer @@ -1262,7 +1264,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, * Cursor is in no man's land, copy 1/2 screenful * from the top and bottom of cursor position */ - first_copied_row = (vc->vc_y - new_rows/2); + first_copied_row = (vc->state.y - new_rows/2); } old_origin += first_copied_row * old_row_size; } else @@ -1296,7 +1298,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, /* do part of a reset_terminal() */ vc->vc_top = 0; vc->vc_bottom = vc->vc_rows; - gotoxy(vc, vc->vc_x, vc->vc_y); + gotoxy(vc, vc->state.x, vc->state.y); save_cur(vc); if (tty) { @@ -1431,12 +1433,12 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y) int min_y, max_y; if (new_x < 0) - vc->vc_x = 0; + vc->state.x = 0; else { if (new_x >= vc->vc_cols) - vc->vc_x = vc->vc_cols - 1; + vc->state.x = vc->vc_cols - 1; else - vc->vc_x = new_x; + vc->state.x = new_x; } if (vc->vc_decom) { @@ -1447,12 +1449,13 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y) max_y = vc->vc_rows; } if (new_y < min_y) - vc->vc_y = min_y; + vc->state.y = min_y; else if (new_y >= max_y) - vc->vc_y = max_y - 1; + vc->state.y = max_y - 1; else - vc->vc_y = new_y; - vc->vc_pos = vc->vc_origin + vc->vc_y * vc->vc_size_row + (vc->vc_x<<1); + vc->state.y = new_y; + vc->vc_pos = vc->vc_origin + vc->state.y * vc->vc_size_row + + (vc->state.x << 1); vc->vc_need_wrap = 0; } @@ -1479,10 +1482,10 @@ static void lf(struct vc_data *vc) /* don't scroll if above bottom of scrolling region, or * if below scrolling region */ - if (vc->vc_y + 1 == vc->vc_bottom) + if (vc->state.y + 1 == vc->vc_bottom) con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_UP, 1); - else if (vc->vc_y < vc->vc_rows - 1) { - vc->vc_y++; + else if (vc->state.y < vc->vc_rows - 1) { + vc->state.y++; vc->vc_pos += vc->vc_size_row; } vc->vc_need_wrap = 0; @@ -1494,10 +1497,10 @@ static void ri(struct vc_data *vc) /* don't scroll if below top of scrolling region, or * if above scrolling region */ - if (vc->vc_y == vc->vc_top) + if (vc->state.y == vc->vc_top) con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_DOWN, 1); - else if (vc->vc_y > 0) { - vc->vc_y--; + else if (vc->state.y > 0) { + vc->state.y--; vc->vc_pos -= vc->vc_size_row; } vc->vc_need_wrap = 0; @@ -1505,16 +1508,16 @@ static void ri(struct vc_data *vc) static inline void cr(struct vc_data *vc) { - vc->vc_pos -= vc->vc_x << 1; - vc->vc_need_wrap = vc->vc_x = 0; + vc->vc_pos -= vc->state.x << 1; + vc->vc_need_wrap = vc->state.x = 0; notify_write(vc, '\r'); } static inline void bs(struct vc_data *vc) { - if (vc->vc_x) { + if (vc->state.x) { vc->vc_pos -= 2; - vc->vc_x--; + vc->state.x--; vc->vc_need_wrap = 0; notify_write(vc, '\b'); } @@ -1532,16 +1535,16 @@ static void csi_J(struct vc_data *vc, int vpar) switch (vpar) { case 0: /* erase from cursor to end of display */ - vc_uniscr_clear_line(vc, vc->vc_x, - vc->vc_cols - vc->vc_x); - vc_uniscr_clear_lines(vc, vc->vc_y + 1, - vc->vc_rows - vc->vc_y - 1); + vc_uniscr_clear_line(vc, vc->state.x, + vc->vc_cols - vc->state.x); + vc_uniscr_clear_lines(vc, vc->state.y + 1, + vc->vc_rows - vc->state.y - 1); count = (vc->vc_scr_end - vc->vc_pos) >> 1; start = (unsigned short *)vc->vc_pos; break; case 1: /* erase from start to cursor */ - vc_uniscr_clear_line(vc, 0, vc->vc_x + 1); - vc_uniscr_clear_lines(vc, 0, vc->vc_y); + vc_uniscr_clear_line(vc, 0, vc->state.x + 1); + vc_uniscr_clear_lines(vc, 0, vc->state.y); count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1; start = (unsigned short *)vc->vc_origin; break; @@ -1571,20 +1574,20 @@ static void csi_K(struct vc_data *vc, int vpar) switch (vpar) { case 0: /* erase from cursor to end of line */ offset = 0; - count = vc->vc_cols - vc->vc_x; + count = vc->vc_cols - vc->state.x; break; case 1: /* erase from start of line to cursor */ - offset = -vc->vc_x; - count = vc->vc_x + 1; + offset = -vc->state.x; + count = vc->state.x + 1; break; case 2: /* erase whole line */ - offset = -vc->vc_x; + offset = -vc->state.x; count = vc->vc_cols; break; default: return; } - vc_uniscr_clear_line(vc, vc->vc_x + offset, count); + vc_uniscr_clear_line(vc, vc->state.x + offset, count); scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count); vc->vc_need_wrap = 0; if (con_should_update(vc)) @@ -1597,23 +1600,23 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi if (!vpar) vpar++; - count = (vpar > vc->vc_cols - vc->vc_x) ? (vc->vc_cols - vc->vc_x) : vpar; + count = (vpar > vc->vc_cols - vc->state.x) ? (vc->vc_cols - vc->state.x) : vpar; - vc_uniscr_clear_line(vc, vc->vc_x, count); + vc_uniscr_clear_line(vc, vc->state.x, count); scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count); if (con_should_update(vc)) - vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1, count); + vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, 1, count); vc->vc_need_wrap = 0; } static void default_attr(struct vc_data *vc) { - vc->vc_intensity = 1; - vc->vc_italic = 0; - vc->vc_underline = 0; - vc->vc_reverse = 0; - vc->vc_blink = 0; - vc->vc_color = vc->vc_def_color; + vc->state.intensity = 1; + vc->state.italic = 0; + vc->state.underline = 0; + vc->state.reverse = 0; + vc->state.blink = 0; + vc->state.color = vc->vc_def_color; } struct rgb { u8 r; u8 g; u8 b; }; @@ -1649,19 +1652,19 @@ static void rgb_foreground(struct vc_data *vc, const struct rgb *c) if (hue == 7 && max <= 0x55) { hue = 0; - vc->vc_intensity = 2; + vc->state.intensity = 2; } else if (max > 0xaa) - vc->vc_intensity = 2; + vc->state.intensity = 2; else - vc->vc_intensity = 1; + vc->state.intensity = 1; - vc->vc_color = (vc->vc_color & 0xf0) | hue; + vc->state.color = (vc->state.color & 0xf0) | hue; } static void rgb_background(struct vc_data *vc, const struct rgb *c) { /* For backgrounds, err on the dark side. */ - vc->vc_color = (vc->vc_color & 0x0f) + vc->state.color = (vc->state.color & 0x0f) | (c->r&0x80) >> 1 | (c->g&0x80) >> 2 | (c->b&0x80) >> 3; } @@ -1712,13 +1715,13 @@ static void csi_m(struct vc_data *vc) default_attr(vc); break; case 1: - vc->vc_intensity = 2; + vc->state.intensity = 2; break; case 2: - vc->vc_intensity = 0; + vc->state.intensity = 0; break; case 3: - vc->vc_italic = 1; + vc->state.italic = 1; break; case 21: /* @@ -1726,21 +1729,21 @@ static void csi_m(struct vc_data *vc) * convert it to a single underline. */ case 4: - vc->vc_underline = 1; + vc->state.underline = 1; break; case 5: - vc->vc_blink = 1; + vc->state.blink = 1; break; case 7: - vc->vc_reverse = 1; + vc->state.reverse = 1; break; case 10: /* ANSI X3.64-1979 (SCO-ish?) * Select primary font, don't display control chars if * defined, don't set bit 8 on output. */ - vc->vc_translate = set_translate(vc->vc_charset == 0 - ? vc->vc_G0_charset - : vc->vc_G1_charset, vc); + vc->vc_translate = set_translate(vc->state.charset == 0 + ? vc->state.G0_charset + : vc->state.G1_charset, vc); vc->vc_disp_ctrl = 0; vc->vc_toggle_meta = 0; break; @@ -1761,19 +1764,19 @@ static void csi_m(struct vc_data *vc) vc->vc_toggle_meta = 1; break; case 22: - vc->vc_intensity = 1; + vc->state.intensity = 1; break; case 23: - vc->vc_italic = 0; + vc->state.italic = 0; break; case 24: - vc->vc_underline = 0; + vc->state.underline = 0; break; case 25: - vc->vc_blink = 0; + vc->state.blink = 0; break; case 27: - vc->vc_reverse = 0; + vc->state.reverse = 0; break; case 38: i = vc_t416_color(vc, i, rgb_foreground); @@ -1782,25 +1785,25 @@ static void csi_m(struct vc_data *vc) i = vc_t416_color(vc, i, rgb_background); break; case 39: - vc->vc_color = (vc->vc_def_color & 0x0f) | - (vc->vc_color & 0xf0); + vc->state.color = (vc->vc_def_color & 0x0f) | + (vc->state.color & 0xf0); break; case 49: - vc->vc_color = (vc->vc_def_color & 0xf0) | - (vc->vc_color & 0x0f); + vc->state.color = (vc->vc_def_color & 0xf0) | + (vc->state.color & 0x0f); break; default: if (vc->vc_par[i] >= 90 && vc->vc_par[i] <= 107) { if (vc->vc_par[i] < 100) - vc->vc_intensity = 2; + vc->state.intensity = 2; vc->vc_par[i] -= 60; } if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37) - vc->vc_color = color_table[vc->vc_par[i] - 30] - | (vc->vc_color & 0xf0); + vc->state.color = color_table[vc->vc_par[i] - 30] + | (vc->state.color & 0xf0); else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47) - vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4) - | (vc->vc_color & 0x0f); + vc->state.color = (color_table[vc->vc_par[i] - 40] << 4) + | (vc->state.color & 0x0f); break; } update_attr(vc); @@ -1819,7 +1822,7 @@ static void cursor_report(struct vc_data *vc, struct tty_struct *tty) { char buf[40]; - sprintf(buf, "\033[%d;%dR", vc->vc_y + (vc->vc_decom ? vc->vc_top + 1 : 1), vc->vc_x + 1); + sprintf(buf, "\033[%d;%dR", vc->state.y + (vc->vc_decom ? vc->vc_top + 1 : 1), vc->state.x + 1); respond_string(buf, tty->port); } @@ -1924,14 +1927,14 @@ static void setterm_command(struct vc_data *vc) case 1: /* set color for underline mode */ if (vc->vc_can_do_color && vc->vc_par[1] < 16) { vc->vc_ulcolor = color_table[vc->vc_par[1]]; - if (vc->vc_underline) + if (vc->state.underline) update_attr(vc); } break; case 2: /* set color for half intensity mode */ if (vc->vc_can_do_color && vc->vc_par[1] < 16) { vc->vc_halfcolor = color_table[vc->vc_par[1]]; - if (vc->vc_intensity == 0) + if (vc->state.intensity == 0) update_attr(vc); } break; @@ -1985,8 +1988,8 @@ static void setterm_command(struct vc_data *vc) /* console_lock is held */ static void csi_at(struct vc_data *vc, unsigned int nr) { - if (nr > vc->vc_cols - vc->vc_x) - nr = vc->vc_cols - vc->vc_x; + if (nr > vc->vc_cols - vc->state.x) + nr = vc->vc_cols - vc->state.x; else if (!nr) nr = 1; insert_char(vc, nr); @@ -1995,19 +1998,19 @@ static void csi_at(struct vc_data *vc, unsigned int nr) /* console_lock is held */ static void csi_L(struct vc_data *vc, unsigned int nr) { - if (nr > vc->vc_rows - vc->vc_y) - nr = vc->vc_rows - vc->vc_y; + if (nr > vc->vc_rows - vc->state.y) + nr = vc->vc_rows - vc->state.y; else if (!nr) nr = 1; - con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_DOWN, nr); + con_scroll(vc, vc->state.y, vc->vc_bottom, SM_DOWN, nr); vc->vc_need_wrap = 0; } /* console_lock is held */ static void csi_P(struct vc_data *vc, unsigned int nr) { - if (nr > vc->vc_cols - vc->vc_x) - nr = vc->vc_cols - vc->vc_x; + if (nr > vc->vc_cols - vc->state.x) + nr = vc->vc_cols - vc->state.x; else if (!nr) nr = 1; delete_char(vc, nr); @@ -2016,44 +2019,28 @@ static void csi_P(struct vc_data *vc, unsigned int nr) /* console_lock is held */ static void csi_M(struct vc_data *vc, unsigned int nr) { - if (nr > vc->vc_rows - vc->vc_y) - nr = vc->vc_rows - vc->vc_y; + if (nr > vc->vc_rows - vc->state.y) + nr = vc->vc_rows - vc->state.y; else if (!nr) nr=1; - con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_UP, nr); + con_scroll(vc, vc->state.y, vc->vc_bottom, SM_UP, nr); vc->vc_need_wrap = 0; } /* console_lock is held (except via vc_init->reset_terminal */ static void save_cur(struct vc_data *vc) { - vc->vc_saved_x = vc->vc_x; - vc->vc_saved_y = vc->vc_y; - vc->vc_s_intensity = vc->vc_intensity; - vc->vc_s_italic = vc->vc_italic; - vc->vc_s_underline = vc->vc_underline; - vc->vc_s_blink = vc->vc_blink; - vc->vc_s_reverse = vc->vc_reverse; - vc->vc_s_charset = vc->vc_charset; - vc->vc_s_color = vc->vc_color; - vc->vc_saved_G0 = vc->vc_G0_charset; - vc->vc_saved_G1 = vc->vc_G1_charset; + memcpy(&vc->saved_state, &vc->state, sizeof(vc->state)); } /* console_lock is held */ static void restore_cur(struct vc_data *vc) { - gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y); - vc->vc_intensity = vc->vc_s_intensity; - vc->vc_italic = vc->vc_s_italic; - vc->vc_underline = vc->vc_s_underline; - vc->vc_blink = vc->vc_s_blink; - vc->vc_reverse = vc->vc_s_reverse; - vc->vc_charset = vc->vc_s_charset; - vc->vc_color = vc->vc_s_color; - vc->vc_G0_charset = vc->vc_saved_G0; - vc->vc_G1_charset = vc->vc_saved_G1; - vc->vc_translate = set_translate(vc->vc_charset ? vc->vc_G1_charset : vc->vc_G0_charset, vc); + memcpy(&vc->state, &vc->saved_state, sizeof(vc->state)); + + gotoxy(vc, vc->state.x, vc->state.y); + vc->vc_translate = set_translate(vc->state.charset ? vc->state.G1_charset : + vc->state.G0_charset, vc); update_attr(vc); vc->vc_need_wrap = 0; } @@ -2070,9 +2057,9 @@ static void reset_terminal(struct vc_data *vc, int do_clear) vc->vc_state = ESnormal; vc->vc_priv = EPecma; vc->vc_translate = set_translate(LAT1_MAP, vc); - vc->vc_G0_charset = LAT1_MAP; - vc->vc_G1_charset = GRAF_MAP; - vc->vc_charset = 0; + vc->state.G0_charset = LAT1_MAP; + vc->state.G1_charset = GRAF_MAP; + vc->state.charset = 0; vc->vc_need_wrap = 0; vc->vc_report_mouse = 0; vc->vc_utf = default_utf8; @@ -2136,13 +2123,13 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) bs(vc); return; case 9: - vc->vc_pos -= (vc->vc_x << 1); - while (vc->vc_x < vc->vc_cols - 1) { - vc->vc_x++; - if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31))) + vc->vc_pos -= (vc->state.x << 1); + while (vc->state.x < vc->vc_cols - 1) { + vc->state.x++; + if (vc->vc_tab_stop[7 & (vc->state.x >> 5)] & (1 << (vc->state.x & 31))) break; } - vc->vc_pos += (vc->vc_x << 1); + vc->vc_pos += (vc->state.x << 1); notify_write(vc, '\t'); return; case 10: case 11: case 12: @@ -2154,13 +2141,13 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) cr(vc); return; case 14: - vc->vc_charset = 1; - vc->vc_translate = set_translate(vc->vc_G1_charset, vc); + vc->state.charset = 1; + vc->vc_translate = set_translate(vc->state.G1_charset, vc); vc->vc_disp_ctrl = 1; return; case 15: - vc->vc_charset = 0; - vc->vc_translate = set_translate(vc->vc_G0_charset, vc); + vc->state.charset = 0; + vc->vc_translate = set_translate(vc->state.G0_charset, vc); vc->vc_disp_ctrl = 0; return; case 24: case 26: @@ -2200,7 +2187,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); return; case 'H': - vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->state.x >> 5)] |= (1 << (vc->state.x & 31)); return; case 'Z': respond_ID(tty); @@ -2347,42 +2334,42 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) case 'G': case '`': if (vc->vc_par[0]) vc->vc_par[0]--; - gotoxy(vc, vc->vc_par[0], vc->vc_y); + gotoxy(vc, vc->vc_par[0], vc->state.y); return; case 'A': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, vc->vc_x, vc->vc_y - vc->vc_par[0]); + gotoxy(vc, vc->state.x, vc->state.y - vc->vc_par[0]); return; case 'B': case 'e': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, vc->vc_x, vc->vc_y + vc->vc_par[0]); + gotoxy(vc, vc->state.x, vc->state.y + vc->vc_par[0]); return; case 'C': case 'a': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, vc->vc_x + vc->vc_par[0], vc->vc_y); + gotoxy(vc, vc->state.x + vc->vc_par[0], vc->state.y); return; case 'D': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, vc->vc_x - vc->vc_par[0], vc->vc_y); + gotoxy(vc, vc->state.x - vc->vc_par[0], vc->state.y); return; case 'E': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, 0, vc->vc_y + vc->vc_par[0]); + gotoxy(vc, 0, vc->state.y + vc->vc_par[0]); return; case 'F': if (!vc->vc_par[0]) vc->vc_par[0]++; - gotoxy(vc, 0, vc->vc_y - vc->vc_par[0]); + gotoxy(vc, 0, vc->state.y - vc->vc_par[0]); return; case 'd': if (vc->vc_par[0]) vc->vc_par[0]--; - gotoxay(vc, vc->vc_x ,vc->vc_par[0]); + gotoxay(vc, vc->state.x ,vc->vc_par[0]); return; case 'H': case 'f': if (vc->vc_par[0]) @@ -2412,7 +2399,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 'g': if (!vc->vc_par[0]) - vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->state.x >> 5)] &= ~(1 << (vc->state.x & 31)); else if (vc->vc_par[0] == 3) { vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = @@ -2497,28 +2484,28 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case ESsetG0: if (c == '0') - vc->vc_G0_charset = GRAF_MAP; + vc->state.G0_charset = GRAF_MAP; else if (c == 'B') - vc->vc_G0_charset = LAT1_MAP; + vc->state.G0_charset = LAT1_MAP; else if (c == 'U') - vc->vc_G0_charset = IBMPC_MAP; + vc->state.G0_charset = IBMPC_MAP; else if (c == 'K') - vc->vc_G0_charset = USER_MAP; - if (vc->vc_charset == 0) - vc->vc_translate = set_translate(vc->vc_G0_charset, vc); + vc->state.G0_charset = USER_MAP; + if (vc->state.charset == 0) + vc->vc_translate = set_translate(vc->state.G0_charset, vc); vc->vc_state = ESnormal; return; case ESsetG1: if (c == '0') - vc->vc_G1_charset = GRAF_MAP; + vc->state.G1_charset = GRAF_MAP; else if (c == 'B') - vc->vc_G1_charset = LAT1_MAP; + vc->state.G1_charset = LAT1_MAP; else if (c == 'U') - vc->vc_G1_charset = IBMPC_MAP; + vc->state.G1_charset = IBMPC_MAP; else if (c == 'K') - vc->vc_G1_charset = USER_MAP; - if (vc->vc_charset == 1) - vc->vc_translate = set_translate(vc->vc_G1_charset, vc); + vc->state.G1_charset = USER_MAP; + if (vc->state.charset == 1) + vc->vc_translate = set_translate(vc->state.G1_charset, vc); vc->vc_state = ESnormal; return; case ESosc: @@ -2572,7 +2559,7 @@ static void con_flush(struct vc_data *vc, unsigned long draw_from, return; vc->vc_sw->con_putcs(vc, (u16 *)draw_from, - (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, *draw_x); + (u16 *)draw_to - (u16 *)draw_from, vc->state.y, *draw_x); *draw_x = -1; } @@ -2788,14 +2775,14 @@ rescan_last_byte: (vc_attr << 8) + tc, (u16 *) vc->vc_pos); if (con_should_update(vc) && draw_x < 0) { - draw_x = vc->vc_x; + draw_x = vc->state.x; draw_from = vc->vc_pos; } - if (vc->vc_x == vc->vc_cols - 1) { + if (vc->state.x == vc->vc_cols - 1) { vc->vc_need_wrap = vc->vc_decawm; draw_to = vc->vc_pos + 2; } else { - vc->vc_x++; + vc->state.x++; draw_to = (vc->vc_pos += 2); } @@ -2972,25 +2959,25 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) hide_cursor(vc); start = (ushort *)vc->vc_pos; - start_x = vc->vc_x; + start_x = vc->state.x; cnt = 0; while (count--) { c = *b++; if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { if (cnt && con_is_visible(vc)) - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); + vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x); cnt = 0; if (c == 8) { /* backspace */ bs(vc); start = (ushort *)vc->vc_pos; - start_x = vc->vc_x; + start_x = vc->state.x; continue; } if (c != 13) lf(vc); cr(vc); start = (ushort *)vc->vc_pos; - start_x = vc->vc_x; + start_x = vc->state.x; if (c == 10 || c == 13) continue; } @@ -2998,15 +2985,15 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); notify_write(vc, c); cnt++; - if (vc->vc_x == vc->vc_cols - 1) { + if (vc->state.x == vc->vc_cols - 1) { vc->vc_need_wrap = 1; } else { vc->vc_pos += 2; - vc->vc_x++; + vc->state.x++; } } if (cnt && con_is_visible(vc)) - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); + vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x); set_cursor(vc); notify_update(vc); @@ -3401,7 +3388,7 @@ static int __init con_init(void) master_display_fg = vc = vc_cons[currcons].d; set_origin(vc); save_screen(vc); - gotoxy(vc, vc->vc_x, vc->vc_y); + gotoxy(vc, vc->state.x, vc->state.y); csi_J(vc, 0); update_screen(vc); pr_info("Console: %s %s %dx%d\n", @@ -4680,8 +4667,8 @@ EXPORT_SYMBOL_GPL(screen_pos); void getconsxy(struct vc_data *vc, unsigned char *p) { /* clamp values if they don't fit */ - p[0] = min(vc->vc_x, 0xFFu); - p[1] = min(vc->vc_y, 0xFFu); + p[0] = min(vc->state.x, 0xFFu); + p[1] = min(vc->state.y, 0xFFu); } void putconsxy(struct vc_data *vc, unsigned char *p) diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index d0d427a2f1a3..d64c5ce84125 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -488,7 +488,7 @@ static void mdacon_cursor(struct vc_data *c, int mode) return; } - mda_set_cursor(c->vc_y*mda_num_columns*2 + c->vc_x*2); + mda_set_cursor(c->state.y * mda_num_columns * 2 + c->state.x * 2); switch (c->vc_cursor_type & 0x0f) { diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 79c9bd8d3025..90083eb80515 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -132,10 +132,10 @@ static void sticon_cursor(struct vc_data *conp, int mode) { unsigned short car1; - car1 = conp->vc_screenbuf[conp->vc_x + conp->vc_y * conp->vc_cols]; + car1 = conp->vc_screenbuf[conp->state.x + conp->state.y * conp->vc_cols]; switch (mode) { case CM_ERASE: - sti_putc(sticon_sti, car1, conp->vc_y, conp->vc_x); + sti_putc(sticon_sti, car1, conp->state.y, conp->state.x); break; case CM_MOVE: case CM_DRAW: @@ -146,7 +146,7 @@ static void sticon_cursor(struct vc_data *conp, int mode) case CUR_TWO_THIRDS: case CUR_BLOCK: sti_putc(sticon_sti, (car1 & 255) + (0 << 8) + (7 << 11), - conp->vc_y, conp->vc_x); + conp->state.y, conp->state.x); break; } break; diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 998b0de1812f..d073fa167e87 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -718,9 +718,9 @@ static void vgacon_cursor(struct vc_data *c, int mode) case CM_ERASE: write_vga(14, (c->vc_pos - vga_vram_base) / 2); if (vga_video_type >= VIDEO_TYPE_VGAC) - vgacon_set_cursor_size(c->vc_x, 31, 30); + vgacon_set_cursor_size(c->state.x, 31, 30); else - vgacon_set_cursor_size(c->vc_x, 31, 31); + vgacon_set_cursor_size(c->state.x, 31, 31); break; case CM_MOVE: @@ -728,7 +728,7 @@ static void vgacon_cursor(struct vc_data *c, int mode) write_vga(14, (c->vc_pos - vga_vram_base) / 2); switch (c->vc_cursor_type & 0x0f) { case CUR_UNDERLINE: - vgacon_set_cursor_size(c->vc_x, + vgacon_set_cursor_size(c->state.x, c->vc_font.height - (c->vc_font.height < 10 ? 2 : 3), @@ -737,21 +737,21 @@ static void vgacon_cursor(struct vc_data *c, int mode) 10 ? 1 : 2)); break; case CUR_TWO_THIRDS: - vgacon_set_cursor_size(c->vc_x, + vgacon_set_cursor_size(c->state.x, c->vc_font.height / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_THIRD: - vgacon_set_cursor_size(c->vc_x, + vgacon_set_cursor_size(c->state.x, (c->vc_font.height * 2) / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_HALF: - vgacon_set_cursor_size(c->vc_x, + vgacon_set_cursor_size(c->state.x, c->vc_font.height / 2, c->vc_font.height - (c->vc_font.height < @@ -759,12 +759,12 @@ static void vgacon_cursor(struct vc_data *c, int mode) break; case CUR_NONE: if (vga_video_type >= VIDEO_TYPE_VGAC) - vgacon_set_cursor_size(c->vc_x, 31, 30); + vgacon_set_cursor_size(c->state.x, 31, 30); else - vgacon_set_cursor_size(c->vc_x, 31, 31); + vgacon_set_cursor_size(c->state.x, 31, 31); break; default: - vgacon_set_cursor_size(c->vc_x, 1, + vgacon_set_cursor_size(c->state.x, 1, c->vc_font.height); break; } @@ -1352,8 +1352,8 @@ static void vgacon_save_screen(struct vc_data *c) * console initialization routines. */ vga_bootup_console = 1; - c->vc_x = screen_info.orig_x; - c->vc_y = screen_info.orig_y; + c->state.x = screen_info.orig_x; + c->state.y = screen_info.orig_y; } /* We can't copy in more than the size of the video buffer, diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index ca935c09a261..c750470a31ec 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -240,7 +240,7 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = DIV_ROUND_UP(vc->vc_font.width, 8), c; - int y = real_y(ops->p, vc->vc_y); + int y = real_y(ops->p, vc->state.y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1; char *src; @@ -286,10 +286,10 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set |= FB_CUR_SETCMAP; } - if ((ops->cursor_state.image.dx != (vc->vc_font.width * vc->vc_x)) || + if ((ops->cursor_state.image.dx != (vc->vc_font.width * vc->state.x)) || (ops->cursor_state.image.dy != (vc->vc_font.height * y)) || ops->cursor_reset) { - ops->cursor_state.image.dx = vc->vc_font.width * vc->vc_x; + ops->cursor_state.image.dx = vc->vc_font.width * vc->state.x; ops->cursor_state.image.dy = vc->vc_font.height * y; cursor.set |= FB_CUR_SETPOS; } diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 9d28a8e3328f..38d2a00b0ccf 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -655,11 +655,11 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, } if (!save) { int lines; - if (vc->vc_y + logo_lines >= rows) - lines = rows - vc->vc_y - 1; + if (vc->state.y + logo_lines >= rows) + lines = rows - vc->state.y - 1; else lines = logo_lines; - vc->vc_y += lines; + vc->state.y += lines; vc->vc_pos += lines * vc->vc_size_row; } } @@ -677,7 +677,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, vc->vc_size_row * rows); scr_memcpyw(q, save, logo_lines * new_cols * 2); - vc->vc_y += logo_lines; + vc->state.y += logo_lines; vc->vc_pos += logo_lines * vc->vc_size_row; kfree(save); } diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index dfa9a8aa4509..9d06446a1a3b 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -225,7 +225,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.height + 7) >> 3, c; - int y = real_y(ops->p, vc->vc_y); + int y = real_y(ops->p, vc->state.y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; @@ -284,7 +284,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, } dx = y * vc->vc_font.height; - dy = vyres - ((vc->vc_x + 1) * vc->vc_font.width); + dy = vyres - ((vc->state.x + 1) * vc->vc_font.width); if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index ce08251bfd38..4b5f76bb01e5 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -208,7 +208,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.height + 7) >> 3, c; - int y = real_y(ops->p, vc->vc_y); + int y = real_y(ops->p, vc->state.y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; @@ -267,7 +267,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, } dx = vxres - ((y * vc->vc_font.height) + vc->vc_font.height); - dy = vc->vc_x * vc->vc_font.width; + dy = vc->state.x * vc->vc_font.width; if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 1936afc78fec..7e0ae3549dc7 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -255,7 +255,7 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.width + 7) >> 3, c; - int y = real_y(ops->p, vc->vc_y); + int y = real_y(ops->p, vc->state.y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; @@ -315,7 +315,7 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, } dy = vyres - ((y * vc->vc_font.height) + vc->vc_font.height); - dx = vxres - ((vc->vc_x * vc->vc_font.width) + vc->vc_font.width); + dx = vxres - ((vc->state.x * vc->vc_font.width) + vc->vc_font.width); if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 93390312957f..ac51425687e4 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -85,8 +85,8 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, struct fb_tilecursor cursor; int use_sw = (vc->vc_cursor_type & 0x10); - cursor.sx = vc->vc_x; - cursor.sy = vc->vc_y; + cursor.sx = vc->state.x; + cursor.sy = vc->state.y; cursor.mode = (mode == CM_ERASE || use_sw) ? 0 : 1; cursor.fg = fg; cursor.bg = bg; diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 24d4c16e3ae0..162f4337c767 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -22,6 +22,37 @@ struct uni_screen; #define NPAR 16 +/** + * struct vc_state -- state of a VC + * @x: cursor's x-position + * @y: cursor's y-position + * @color: foreground & background colors + * @G0_charset: what's G0 slot set to (like GRAF_MAP, LAT1_MAP) + * @G1_charset: what's G1 slot set to (like GRAF_MAP, LAT1_MAP) + * @charset: what character set to use (0=G0 or 1=G1) + * @intensity: 0=half-bright, 1=normal, 2=bold + * @reverse: reversed foreground/background colors + * + * These members are defined separately from struct vc_data as we save & + * restore them at times. + */ +struct vc_state { + unsigned int x, y; + + unsigned char color; + + unsigned char G0_charset; + unsigned char G1_charset; + unsigned int charset : 1; + + /* attribute flags */ + unsigned int intensity : 2; + unsigned int italic : 1; + unsigned int underline : 1; + unsigned int blink : 1; + unsigned int reverse : 1; +}; + /* * Example: vc_data of a console that was scrolled 3 lines down. * @@ -57,6 +88,8 @@ struct uni_screen; struct vc_data { struct tty_port port; /* Upper level data */ + struct vc_state state, saved_state; + unsigned short vc_num; /* Console number */ unsigned int vc_cols; /* [#] Console size */ unsigned int vc_rows; @@ -73,8 +106,6 @@ struct vc_data { /* attributes for all characters on screen */ unsigned char vc_attr; /* Current attributes */ unsigned char vc_def_color; /* Default colors */ - unsigned char vc_color; /* Foreground & background */ - unsigned char vc_s_color; /* Saved foreground & background */ unsigned char vc_ulcolor; /* Color for underline mode */ unsigned char vc_itcolor; unsigned char vc_halfcolor; /* Color for half intensity mode */ @@ -82,8 +113,6 @@ struct vc_data { unsigned int vc_cursor_type; unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */ unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */ - unsigned int vc_x, vc_y; /* Cursor position */ - unsigned int vc_saved_x, vc_saved_y; unsigned long vc_pos; /* Cursor address */ /* fonts */ unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ @@ -98,8 +127,6 @@ struct vc_data { int vt_newvt; wait_queue_head_t paste_wait; /* mode flags */ - unsigned int vc_charset : 1; /* Character set G0 / G1 */ - unsigned int vc_s_charset : 1; /* Saved character set */ unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */ unsigned int vc_toggle_meta : 1; /* Toggle high bit? */ unsigned int vc_decscnm : 1; /* Screen Mode */ @@ -107,17 +134,6 @@ struct vc_data { unsigned int vc_decawm : 1; /* Autowrap Mode */ unsigned int vc_deccm : 1; /* Cursor Visible */ unsigned int vc_decim : 1; /* Insert Mode */ - /* attribute flags */ - unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ - unsigned int vc_italic:1; - unsigned int vc_underline : 1; - unsigned int vc_blink : 1; - unsigned int vc_reverse : 1; - unsigned int vc_s_intensity : 2; /* saved rendition */ - unsigned int vc_s_italic:1; - unsigned int vc_s_underline : 1; - unsigned int vc_s_blink : 1; - unsigned int vc_s_reverse : 1; /* misc */ unsigned int vc_priv : 3; unsigned int vc_need_wrap : 1; @@ -129,10 +145,6 @@ struct vc_data { unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */ unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */ unsigned short * vc_translate; - unsigned char vc_G0_charset; - unsigned char vc_G1_charset; - unsigned char vc_saved_G0; - unsigned char vc_saved_G1; unsigned int vc_resize_user; /* resize request from user */ unsigned int vc_bell_pitch; /* Console bell pitch */ unsigned int vc_bell_duration; /* Console bell duration */ -- cgit v1.2.3 From b84ae3dc70fedf4bdee2dbfa487fd23b606fbb82 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:34 +0200 Subject: vt: introduce enum vc_intensity for intensity Introduce names (en enum) for 0, 1, and 2 constants. We now have VCI_HALF_BRIGHT, VCI_NORMAL, and VCI_BOLD instead. Apart from the cleanup, 1) the enum allows for better type checking, and 2) this saves some code. No more fiddling with bits is needed in assembly now. (OTOH, the structure is larger.) Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-2-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 32 +++++++++++++++++--------------- drivers/usb/misc/sisusbvga/sisusb_con.c | 6 +++--- drivers/video/console/mdacon.c | 5 +++-- drivers/video/console/sticon.c | 3 ++- drivers/video/console/vgacon.c | 9 +++++---- include/linux/console.h | 5 ++++- include/linux/console_struct.h | 11 +++++++++-- 7 files changed, 43 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 76f52935e0c8..71bf483e5640 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -705,8 +705,9 @@ void update_region(struct vc_data *vc, unsigned long start, int count) /* Structure of attributes is hardware-dependent */ -static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, - u8 _underline, u8 _reverse, u8 _italic) +static u8 build_attr(struct vc_data *vc, u8 _color, + enum vc_intensity _intensity, u8 _blink, u8 _underline, + u8 _reverse, u8 _italic) { if (vc->vc_sw->con_build_attr) return vc->vc_sw->con_build_attr(vc, _color, _intensity, @@ -734,13 +735,13 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, a = (a & 0xF0) | vc->vc_itcolor; else if (_underline) a = (a & 0xf0) | vc->vc_ulcolor; - else if (_intensity == 0) + else if (_intensity == VCI_HALF_BRIGHT) a = (a & 0xf0) | vc->vc_halfcolor; if (_reverse) a = ((a) & 0x88) | ((((a) >> 4) | ((a) << 4)) & 0x77); if (_blink) a ^= 0x80; - if (_intensity == 2) + if (_intensity == VCI_BOLD) a ^= 0x08; if (vc->vc_hi_font_mask == 0x100) a <<= 1; @@ -753,8 +754,9 @@ static void update_attr(struct vc_data *vc) vc->vc_attr = build_attr(vc, vc->state.color, vc->state.intensity, vc->state.blink, vc->state.underline, vc->state.reverse ^ vc->vc_decscnm, vc->state.italic); - vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color, 1, - vc->state.blink, 0, vc->vc_decscnm, 0) << 8); + vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color, + VCI_NORMAL, vc->state.blink, 0, vc->vc_decscnm, + 0) << 8); } /* Note: inverting the screen twice should revert to the original state */ @@ -1611,7 +1613,7 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi static void default_attr(struct vc_data *vc) { - vc->state.intensity = 1; + vc->state.intensity = VCI_NORMAL; vc->state.italic = 0; vc->state.underline = 0; vc->state.reverse = 0; @@ -1652,11 +1654,11 @@ static void rgb_foreground(struct vc_data *vc, const struct rgb *c) if (hue == 7 && max <= 0x55) { hue = 0; - vc->state.intensity = 2; + vc->state.intensity = VCI_BOLD; } else if (max > 0xaa) - vc->state.intensity = 2; + vc->state.intensity = VCI_BOLD; else - vc->state.intensity = 1; + vc->state.intensity = VCI_NORMAL; vc->state.color = (vc->state.color & 0xf0) | hue; } @@ -1715,10 +1717,10 @@ static void csi_m(struct vc_data *vc) default_attr(vc); break; case 1: - vc->state.intensity = 2; + vc->state.intensity = VCI_BOLD; break; case 2: - vc->state.intensity = 0; + vc->state.intensity = VCI_HALF_BRIGHT; break; case 3: vc->state.italic = 1; @@ -1764,7 +1766,7 @@ static void csi_m(struct vc_data *vc) vc->vc_toggle_meta = 1; break; case 22: - vc->state.intensity = 1; + vc->state.intensity = VCI_NORMAL; break; case 23: vc->state.italic = 0; @@ -1795,7 +1797,7 @@ static void csi_m(struct vc_data *vc) default: if (vc->vc_par[i] >= 90 && vc->vc_par[i] <= 107) { if (vc->vc_par[i] < 100) - vc->state.intensity = 2; + vc->state.intensity = VCI_BOLD; vc->vc_par[i] -= 60; } if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37) @@ -1934,7 +1936,7 @@ static void setterm_command(struct vc_data *vc) case 2: /* set color for half intensity mode */ if (vc->vc_can_do_color && vc->vc_par[1] < 16) { vc->vc_halfcolor = color_table[vc->vc_par[1]]; - if (vc->state.intensity == 0) + if (vc->state.intensity == VCI_HALF_BRIGHT) update_attr(vc); } break; diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c index cd0155310fea..c59fe641b8b5 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ b/drivers/usb/misc/sisusbvga/sisusb_con.c @@ -302,14 +302,14 @@ sisusbcon_deinit(struct vc_data *c) /* interface routine */ static u8 -sisusbcon_build_attr(struct vc_data *c, u8 color, u8 intensity, +sisusbcon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, u8 blink, u8 underline, u8 reverse, u8 unused) { u8 attr = color; if (underline) attr = (attr & 0xf0) | c->vc_ulcolor; - else if (intensity == 0) + else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | c->vc_halfcolor; if (reverse) @@ -320,7 +320,7 @@ sisusbcon_build_attr(struct vc_data *c, u8 color, u8 intensity, if (blink) attr ^= 0x80; - if (intensity == 2) + if (intensity == VCI_BOLD) attr ^= 0x08; return attr; diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index d64c5ce84125..e3da664df16c 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -394,7 +394,8 @@ static inline u16 mda_convert_attr(u16 ch) (ch & 0x00ff) | attr; } -static u8 mdacon_build_attr(struct vc_data *c, u8 color, u8 intensity, +static u8 mdacon_build_attr(struct vc_data *c, u8 color, + enum vc_intensity intensity, u8 blink, u8 underline, u8 reverse, u8 italic) { /* The attribute is just a bit vector: @@ -405,7 +406,7 @@ static u8 mdacon_build_attr(struct vc_data *c, u8 color, u8 intensity, * Bit 7 : blink */ - return (intensity & 3) | + return (intensity & VCI_MASK) | ((underline & 1) << 2) | ((reverse & 1) << 3) | (!!italic << 4) | diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 90083eb80515..a847067abbe5 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -288,7 +288,8 @@ static unsigned long sticon_getxy(struct vc_data *conp, unsigned long pos, return ret; } -static u8 sticon_build_attr(struct vc_data *conp, u8 color, u8 intens, +static u8 sticon_build_attr(struct vc_data *conp, u8 color, + enum vc_intensity intens, u8 blink, u8 underline, u8 reverse, u8 italic) { u8 attr = ((color & 0x70) >> 1) | ((color & 7)); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index d073fa167e87..d0b26e2318d3 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -629,7 +629,8 @@ static void vgacon_deinit(struct vc_data *c) con_set_default_unimap(c); } -static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, +static u8 vgacon_build_attr(struct vc_data *c, u8 color, + enum vc_intensity intensity, u8 blink, u8 underline, u8 reverse, u8 italic) { u8 attr = color; @@ -639,7 +640,7 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, attr = (attr & 0xF0) | c->vc_itcolor; else if (underline) attr = (attr & 0xf0) | c->vc_ulcolor; - else if (intensity == 0) + else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | c->vc_halfcolor; } if (reverse) @@ -648,14 +649,14 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, 0x77); if (blink) attr ^= 0x80; - if (intensity == 2) + if (intensity == VCI_BOLD) attr ^= 0x08; if (!vga_can_do_color) { if (italic) attr = (attr & 0xF8) | 0x02; else if (underline) attr = (attr & 0xf8) | 0x01; - else if (intensity == 0) + else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | 0x08; } return attr; diff --git a/include/linux/console.h b/include/linux/console.h index 75dd20650fbe..10c04779ae49 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -35,6 +35,8 @@ enum con_scroll { SM_DOWN, }; +enum vc_intensity; + /** * struct consw - callbacks for consoles * @@ -74,7 +76,8 @@ struct consw { void (*con_scrolldelta)(struct vc_data *vc, int lines); int (*con_set_origin)(struct vc_data *vc); void (*con_save_screen)(struct vc_data *vc); - u8 (*con_build_attr)(struct vc_data *vc, u8 color, u8 intensity, + u8 (*con_build_attr)(struct vc_data *vc, u8 color, + enum vc_intensity intensity, u8 blink, u8 underline, u8 reverse, u8 italic); void (*con_invert_region)(struct vc_data *vc, u16 *p, int count); u16 *(*con_screen_pos)(struct vc_data *vc, int offset); diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 162f4337c767..e901d98790bf 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -22,6 +22,13 @@ struct uni_screen; #define NPAR 16 +enum vc_intensity { + VCI_HALF_BRIGHT, + VCI_NORMAL, + VCI_BOLD, + VCI_MASK = 0x3, +}; + /** * struct vc_state -- state of a VC * @x: cursor's x-position @@ -30,7 +37,7 @@ struct uni_screen; * @G0_charset: what's G0 slot set to (like GRAF_MAP, LAT1_MAP) * @G1_charset: what's G1 slot set to (like GRAF_MAP, LAT1_MAP) * @charset: what character set to use (0=G0 or 1=G1) - * @intensity: 0=half-bright, 1=normal, 2=bold + * @intensity: see enum vc_intensity for values * @reverse: reversed foreground/background colors * * These members are defined separately from struct vc_data as we save & @@ -46,7 +53,7 @@ struct vc_state { unsigned int charset : 1; /* attribute flags */ - unsigned int intensity : 2; + enum vc_intensity intensity; unsigned int italic : 1; unsigned int underline : 1; unsigned int blink : 1; -- cgit v1.2.3 From 77bc14f273c2dfecbf87f41fdc00345d99597e13 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:35 +0200 Subject: vc: switch state to bool The code currently uses bitfields to store true-false values. Switch all of that to bools. Apart from the cleanup, it saves 20B of code as many shifts, ANDs, and ORs became simple movzb's. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-3-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 32 ++++++++++++++++---------------- drivers/usb/misc/sisusbvga/sisusb_con.c | 3 ++- drivers/video/console/mdacon.c | 11 ++++++----- drivers/video/console/sticon.c | 3 ++- drivers/video/console/vgacon.c | 3 ++- include/linux/console.h | 2 +- include/linux/console_struct.h | 8 ++++---- 7 files changed, 33 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 71bf483e5640..26cb1fc48b27 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -706,8 +706,8 @@ void update_region(struct vc_data *vc, unsigned long start, int count) /* Structure of attributes is hardware-dependent */ static u8 build_attr(struct vc_data *vc, u8 _color, - enum vc_intensity _intensity, u8 _blink, u8 _underline, - u8 _reverse, u8 _italic) + enum vc_intensity _intensity, bool _blink, bool _underline, + bool _reverse, bool _italic) { if (vc->vc_sw->con_build_attr) return vc->vc_sw->con_build_attr(vc, _color, _intensity, @@ -755,8 +755,8 @@ static void update_attr(struct vc_data *vc) vc->state.blink, vc->state.underline, vc->state.reverse ^ vc->vc_decscnm, vc->state.italic); vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color, - VCI_NORMAL, vc->state.blink, 0, vc->vc_decscnm, - 0) << 8); + VCI_NORMAL, vc->state.blink, false, + vc->vc_decscnm, false) << 8); } /* Note: inverting the screen twice should revert to the original state */ @@ -1614,10 +1614,10 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi static void default_attr(struct vc_data *vc) { vc->state.intensity = VCI_NORMAL; - vc->state.italic = 0; - vc->state.underline = 0; - vc->state.reverse = 0; - vc->state.blink = 0; + vc->state.italic = false; + vc->state.underline = false; + vc->state.reverse = false; + vc->state.blink = false; vc->state.color = vc->vc_def_color; } @@ -1723,7 +1723,7 @@ static void csi_m(struct vc_data *vc) vc->state.intensity = VCI_HALF_BRIGHT; break; case 3: - vc->state.italic = 1; + vc->state.italic = true; break; case 21: /* @@ -1731,13 +1731,13 @@ static void csi_m(struct vc_data *vc) * convert it to a single underline. */ case 4: - vc->state.underline = 1; + vc->state.underline = true; break; case 5: - vc->state.blink = 1; + vc->state.blink = true; break; case 7: - vc->state.reverse = 1; + vc->state.reverse = true; break; case 10: /* ANSI X3.64-1979 (SCO-ish?) * Select primary font, don't display control chars if @@ -1769,16 +1769,16 @@ static void csi_m(struct vc_data *vc) vc->state.intensity = VCI_NORMAL; break; case 23: - vc->state.italic = 0; + vc->state.italic = false; break; case 24: - vc->state.underline = 0; + vc->state.underline = false; break; case 25: - vc->state.blink = 0; + vc->state.blink = false; break; case 27: - vc->state.reverse = 0; + vc->state.reverse = false; break; case 38: i = vc_t416_color(vc, i, rgb_foreground); diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c index c59fe641b8b5..80657c49310a 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ b/drivers/usb/misc/sisusbvga/sisusb_con.c @@ -303,7 +303,8 @@ sisusbcon_deinit(struct vc_data *c) /* interface routine */ static u8 sisusbcon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, - u8 blink, u8 underline, u8 reverse, u8 unused) + bool blink, bool underline, bool reverse, + bool unused) { u8 attr = color; diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index e3da664df16c..00cb6245fbef 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -396,7 +396,8 @@ static inline u16 mda_convert_attr(u16 ch) static u8 mdacon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, - u8 blink, u8 underline, u8 reverse, u8 italic) + bool blink, bool underline, bool reverse, + bool italic) { /* The attribute is just a bit vector: * @@ -407,10 +408,10 @@ static u8 mdacon_build_attr(struct vc_data *c, u8 color, */ return (intensity & VCI_MASK) | - ((underline & 1) << 2) | - ((reverse & 1) << 3) | - (!!italic << 4) | - ((blink & 1) << 7); + (underline << 2) | + (reverse << 3) | + (italic << 4) | + (blink << 7); } static void mdacon_invert_region(struct vc_data *c, u16 *p, int count) diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index a847067abbe5..bbcdfd312c36 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -290,7 +290,8 @@ static unsigned long sticon_getxy(struct vc_data *conp, unsigned long pos, static u8 sticon_build_attr(struct vc_data *conp, u8 color, enum vc_intensity intens, - u8 blink, u8 underline, u8 reverse, u8 italic) + bool blink, bool underline, bool reverse, + bool italic) { u8 attr = ((color & 0x70) >> 1) | ((color & 7)); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index d0b26e2318d3..c1c4ce28ac5e 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -631,7 +631,8 @@ static void vgacon_deinit(struct vc_data *c) static u8 vgacon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, - u8 blink, u8 underline, u8 reverse, u8 italic) + bool blink, bool underline, bool reverse, + bool italic) { u8 attr = color; diff --git a/include/linux/console.h b/include/linux/console.h index 10c04779ae49..964b67912b04 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -78,7 +78,7 @@ struct consw { void (*con_save_screen)(struct vc_data *vc); u8 (*con_build_attr)(struct vc_data *vc, u8 color, enum vc_intensity intensity, - u8 blink, u8 underline, u8 reverse, u8 italic); + bool blink, bool underline, bool reverse, bool italic); void (*con_invert_region)(struct vc_data *vc, u16 *p, int count); u16 *(*con_screen_pos)(struct vc_data *vc, int offset); unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position, diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index e901d98790bf..fa1abffe64be 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -54,10 +54,10 @@ struct vc_state { /* attribute flags */ enum vc_intensity intensity; - unsigned int italic : 1; - unsigned int underline : 1; - unsigned int blink : 1; - unsigned int reverse : 1; + bool italic; + bool underline; + bool blink; + bool reverse; }; /* -- cgit v1.2.3 From b70ec4d97f4cc4f6f804ca57419d070b80a9874e Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:37 +0200 Subject: vt: switch G0/1_charset to an array Declare Gx_charset[2] instead of G0_charset and G1_charset. It makes the code simpler (without ternary operators). Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-5-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 19 ++++++++----------- include/linux/console_struct.h | 6 ++---- 2 files changed, 10 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 729c7c8c682b..4e79cda0c2be 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1743,9 +1743,7 @@ static void csi_m(struct vc_data *vc) * Select primary font, don't display control chars if * defined, don't set bit 8 on output. */ - vc->vc_translate = set_translate(vc->state.charset == 0 - ? vc->state.G0_charset - : vc->state.G1_charset, vc); + vc->vc_translate = set_translate(vc->state.Gx_charset[vc->state.charset], vc); vc->vc_disp_ctrl = 0; vc->vc_toggle_meta = 0; break; @@ -2041,8 +2039,8 @@ static void restore_cur(struct vc_data *vc) memcpy(&vc->state, &vc->saved_state, sizeof(vc->state)); gotoxy(vc, vc->state.x, vc->state.y); - vc->vc_translate = set_translate(vc->state.charset ? vc->state.G1_charset : - vc->state.G0_charset, vc); + vc->vc_translate = set_translate(vc->state.Gx_charset[vc->state.charset], + vc); update_attr(vc); vc->vc_need_wrap = 0; } @@ -2059,8 +2057,8 @@ static void reset_terminal(struct vc_data *vc, int do_clear) vc->vc_state = ESnormal; vc->vc_priv = EPecma; vc->vc_translate = set_translate(LAT1_MAP, vc); - vc->state.G0_charset = LAT1_MAP; - vc->state.G1_charset = GRAF_MAP; + vc->state.Gx_charset[0] = LAT1_MAP; + vc->state.Gx_charset[1] = GRAF_MAP; vc->state.charset = 0; vc->vc_need_wrap = 0; vc->vc_report_mouse = 0; @@ -2105,8 +2103,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) static void vc_setGx(struct vc_data *vc, unsigned int which, int c) { - unsigned char *charset = which == 0 ? &vc->state.G0_charset : - &vc->state.G1_charset; + unsigned char *charset = &vc->state.Gx_charset[which]; switch (c) { case '0': @@ -2168,12 +2165,12 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 14: vc->state.charset = 1; - vc->vc_translate = set_translate(vc->state.G1_charset, vc); + vc->vc_translate = set_translate(vc->state.Gx_charset[1], vc); vc->vc_disp_ctrl = 1; return; case 15: vc->state.charset = 0; - vc->vc_translate = set_translate(vc->state.G0_charset, vc); + vc->vc_translate = set_translate(vc->state.Gx_charset[0], vc); vc->vc_disp_ctrl = 0; return; case 24: case 26: diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index fa1abffe64be..623e86689c3a 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -34,8 +34,7 @@ enum vc_intensity { * @x: cursor's x-position * @y: cursor's y-position * @color: foreground & background colors - * @G0_charset: what's G0 slot set to (like GRAF_MAP, LAT1_MAP) - * @G1_charset: what's G1 slot set to (like GRAF_MAP, LAT1_MAP) + * @Gx_charset: what's G0/G1 slot set to (like GRAF_MAP, LAT1_MAP) * @charset: what character set to use (0=G0 or 1=G1) * @intensity: see enum vc_intensity for values * @reverse: reversed foreground/background colors @@ -48,8 +47,7 @@ struct vc_state { unsigned char color; - unsigned char G0_charset; - unsigned char G1_charset; + unsigned char Gx_charset[2]; unsigned int charset : 1; /* attribute flags */ -- cgit v1.2.3 From dbee4cffa1bfe23263edde1d17cdbef0de3a6ac0 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:38 +0200 Subject: vt: convert vc_tab_stop to bitmap vc_tab_stop is used as a bitmap, but defined as an unsigned int array. Switch it to bitmap and convert all users to the bitmap interface. Note the difference in behavior! We no longer mask the top 24 bits away from x, hence we do not wrap tabs at 256th column. Instead, we silently drop attempts to set a tab behind 256 columns. And we will also seek by '\t' to the rightmost column, when behind that boundary. I do not think the original behavior was desired and that someone relies on that. If this turns out to be the case, we can change the added 'if's back to masks here and there instead... (Or we can increase the limit as fb consoles now have 240 chars here. And they could have more with higher than my resolution, of course.) Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-6-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 44 +++++++++++++++++------------------------- include/linux/console_struct.h | 3 ++- 2 files changed, 20 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 4e79cda0c2be..3adb7f409524 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -2052,6 +2052,8 @@ enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey, /* console_lock is held (except via vc_init()) */ static void reset_terminal(struct vc_data *vc, int do_clear) { + unsigned int i; + vc->vc_top = 0; vc->vc_bottom = vc->vc_rows; vc->vc_state = ESnormal; @@ -2082,14 +2084,9 @@ static void reset_terminal(struct vc_data *vc, int do_clear) default_attr(vc); update_attr(vc); - vc->vc_tab_stop[0] = - vc->vc_tab_stop[1] = - vc->vc_tab_stop[2] = - vc->vc_tab_stop[3] = - vc->vc_tab_stop[4] = - vc->vc_tab_stop[5] = - vc->vc_tab_stop[6] = - vc->vc_tab_stop[7] = 0x01010101; + bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT); + for (i = 0; i < VC_TABSTOPS_COUNT; i += 8) + set_bit(i, vc->vc_tab_stop); vc->vc_bell_pitch = DEFAULT_BELL_PITCH; vc->vc_bell_duration = DEFAULT_BELL_DURATION; @@ -2147,11 +2144,13 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 9: vc->vc_pos -= (vc->state.x << 1); - while (vc->state.x < vc->vc_cols - 1) { - vc->state.x++; - if (vc->vc_tab_stop[7 & (vc->state.x >> 5)] & (1 << (vc->state.x & 31))) - break; - } + + vc->state.x = find_next_bit(vc->vc_tab_stop, + min(vc->vc_cols - 1, VC_TABSTOPS_COUNT), + vc->state.x + 1); + if (vc->state.x >= VC_TABSTOPS_COUNT) + vc->state.x = vc->vc_cols - 1; + vc->vc_pos += (vc->state.x << 1); notify_write(vc, '\t'); return; @@ -2210,7 +2209,8 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); return; case 'H': - vc->vc_tab_stop[7 & (vc->state.x >> 5)] |= (1 << (vc->state.x & 31)); + if (vc->state.x < VC_TABSTOPS_COUNT) + set_bit(vc->state.x, vc->vc_tab_stop); return; case 'Z': respond_ID(tty); @@ -2421,18 +2421,10 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) respond_ID(tty); return; case 'g': - if (!vc->vc_par[0]) - vc->vc_tab_stop[7 & (vc->state.x >> 5)] &= ~(1 << (vc->state.x & 31)); - else if (vc->vc_par[0] == 3) { - vc->vc_tab_stop[0] = - vc->vc_tab_stop[1] = - vc->vc_tab_stop[2] = - vc->vc_tab_stop[3] = - vc->vc_tab_stop[4] = - vc->vc_tab_stop[5] = - vc->vc_tab_stop[6] = - vc->vc_tab_stop[7] = 0; - } + if (!vc->vc_par[0] && vc->state.x < VC_TABSTOPS_COUNT) + set_bit(vc->state.x, vc->vc_tab_stop); + else if (vc->vc_par[0] == 3) + bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT); return; case 'm': csi_m(vc); diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 623e86689c3a..81f7afcd061a 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -21,6 +21,7 @@ struct uni_pagedir; struct uni_screen; #define NPAR 16 +#define VC_TABSTOPS_COUNT 256U enum vc_intensity { VCI_HALF_BRIGHT, @@ -147,7 +148,7 @@ struct vc_data { unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ unsigned char vc_utf_count; int vc_utf_char; - unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */ + DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */ unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */ unsigned short * vc_translate; unsigned int vc_resize_user; /* resize request from user */ -- cgit v1.2.3 From 7d4a3112f07878ba9c6bffbcdb2dea2dcfc5c1f9 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:39 +0200 Subject: vt: remove 25 years stale comment vc_cons was made global (non-static) in 1.3.38, almost 25 years ago. Remove a comment which says that it would be a disadvantage to do so :P. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-7-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- include/linux/console_struct.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 81f7afcd061a..40ed52f67bc5 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -167,8 +167,7 @@ struct vc { struct work_struct SAK_work; /* might add scrmem, kbd at some time, - to have everything in one place - the disadvantage - would be that vc_cons etc can no longer be static */ + to have everything in one place */ }; extern struct vc vc_cons [MAX_NR_CONSOLES]; -- cgit v1.2.3 From 9a6f72d9b6c121415bc2867329fe77b0d2a52dc1 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:41 +0200 Subject: vt: get rid of VT10.ID macros VT100ID is unused, but defined twice. Kill it. VT102ID is used only in respond_ID. Define there a variable with proper type and use that instead. Then drop both defines of VT102ID too. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-9-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 11 ++++------- include/linux/console.h | 6 ------ 2 files changed, 4 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 49c9d1e4067c..8d9e532f050a 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1397,12 +1397,6 @@ enum { EPecma = 0, EPdec, EPeq, EPgt, EPlt}; #define kbdapplic VC_APPLIC #define lnm VC_CRLF -/* - * this is what the terminal answers to a ESC-Z or csi0c query. - */ -#define VT100ID "\033[?1;2c" -#define VT102ID "\033[?6c" - const unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7, 8,12,10,14, 9,13,11,15 }; @@ -1835,7 +1829,10 @@ static inline void status_report(struct tty_struct *tty) static inline void respond_ID(struct tty_struct *tty) { - respond_string(VT102ID, strlen(VT102ID), tty->port); + /* terminal answer to an ESC-Z or csi0c query. */ + static const char vt102_id[] = "\033[?6c"; + + respond_string(vt102_id, strlen(vt102_id), tty->port); } void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry) diff --git a/include/linux/console.h b/include/linux/console.h index 964b67912b04..0670d3491e0e 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -24,12 +24,6 @@ struct module; struct tty_struct; struct notifier_block; -/* - * this is what the terminal answers to a ESC-Z or csi0c query. - */ -#define VT100ID "\033[?1;2c" -#define VT102ID "\033[?6c" - enum con_scroll { SM_UP, SM_DOWN, -- cgit v1.2.3 From a018180cc3485e71f7912e36bf93caa635e0e4af Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:42 +0200 Subject: vt: move vc_translate to vt.c and rename it vc_translate is used only in vt.c, so move the definition from a header there. Also, it used to be a macro, so be modern and make a static inline from it. This makes the code actually readable. And as a preparation for next patches, rename it to vc_translate_ascii. vc_translate will be a wrapper for both unicode and this one. Signed-off-by: Jiri Slaby Link: https://lore.kernel.org/r/20200615074910.19267-10-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 14 +++++++++++++- include/linux/vt_kern.h | 3 --- 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 8d9e532f050a..b86639351dd2 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -2560,6 +2560,18 @@ static void con_flush(struct vc_data *vc, unsigned long draw_from, *draw_x = -1; } +static inline int vc_translate_ascii(const struct vc_data *vc, int c) +{ + if (IS_ENABLED(CONFIG_CONSOLE_TRANSLATIONS)) { + if (vc->vc_toggle_meta) + c |= 0x80; + + return vc->vc_translate[c]; + } + + return c; +} + /* acquires console_lock */ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count) { @@ -2687,7 +2699,7 @@ rescan_last_byte: c = 0xfffd; tc = c; } else { /* no utf or alternate charset mode */ - tc = vc_translate(vc, c); + tc = vc_translate_ascii(vc, c); } param.c = tc; diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index abf5bccf906a..349e39c3ab60 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -74,8 +74,6 @@ int con_set_default_unimap(struct vc_data *vc); void con_free_unimap(struct vc_data *vc); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); -#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ - ((vc)->vc_toggle_meta ? 0x80 : 0)]) #else static inline int con_set_trans_old(unsigned char __user *table) { @@ -124,7 +122,6 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc) return 0; } -#define vc_translate(vc, c) (c) #endif /* vt.c */ -- cgit v1.2.3 From 4dfa3c54f908d7ec20b88671329d6a3205d37d36 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 15 Jun 2020 09:48:57 +0200 Subject: vt: redefine world of cursor macros The cursor code used to use magic constants, ANDs, ORs, and some macros. Redefine all this to make some sense. In particular: * Drop CUR_DEFAULT, which is CUR_UNDERLINE. CUR_DEFAULT was used only for cur_default variable initialization, so use CUR_UNDERLINE there to make obvious what's the default. * Drop CUR_HWMASK. Instead, define CUR_SIZE() which explains it more. And use it all over the places. * Define few more masks and bits which will be used in next patches instead of magic constants. * Define CUR_MAKE to build up cursor value. Signed-off-by: Jiri Slaby Cc: Bartlomiej Zolnierkiewicz Cc: dri-devel@lists.freedesktop.org Cc: linux-fbdev@vger.kernel.org Link: https://lore.kernel.org/r/20200615074910.19267-25-jslaby@suse.cz Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 2 +- drivers/video/fbdev/core/bitblit.c | 2 +- drivers/video/fbdev/core/fbcon_ccw.c | 2 +- drivers/video/fbdev/core/fbcon_cw.c | 2 +- drivers/video/fbdev/core/fbcon_ud.c | 2 +- include/linux/console_struct.h | 28 +++++++++++++++++----------- 6 files changed, 22 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index f7d5a3c3845f..af1ef717f416 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -163,7 +163,7 @@ module_param(default_utf8, int, S_IRUGO | S_IWUSR); int global_cursor_default = -1; module_param(global_cursor_default, int, S_IRUGO | S_IWUSR); -static int cur_default = CUR_DEFAULT; +static int cur_default = CUR_UNDERLINE; module_param(cur_default, int, S_IRUGO | S_IWUSR); /* diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index c750470a31ec..3b002b365a5a 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -325,7 +325,7 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (ops->p->cursor_shape & CUR_HWMASK) { + switch (CUR_SIZE(ops->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 9d06446a1a3b..5b67bcebe34c 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -325,7 +325,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (ops->p->cursor_shape & CUR_HWMASK) { + switch (CUR_SIZE(ops->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 4b5f76bb01e5..f1aab3ae3bc9 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -308,7 +308,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (ops->p->cursor_shape & CUR_HWMASK) { + switch (CUR_SIZE(ops->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 7e0ae3549dc7..81ed6f6bed67 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -348,7 +348,7 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; - switch (ops->p->cursor_shape & CUR_HWMASK) { + switch (CUR_SIZE(ops->p->cursor_shape)) { case CUR_NONE: cur_height = 0; break; diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 40ed52f67bc5..153734816b49 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -173,17 +173,23 @@ struct vc { extern struct vc vc_cons [MAX_NR_CONSOLES]; extern void vc_SAK(struct work_struct *work); -#define CUR_DEF 0 -#define CUR_NONE 1 -#define CUR_UNDERLINE 2 -#define CUR_LOWER_THIRD 3 -#define CUR_LOWER_HALF 4 -#define CUR_TWO_THIRDS 5 -#define CUR_BLOCK 6 -#define CUR_HWMASK 0x0f -#define CUR_SWMASK 0xfff0 - -#define CUR_DEFAULT CUR_UNDERLINE +#define CUR_MAKE(size, change, set) ((size) | ((change) << 8) | \ + ((set) << 16)) +#define CUR_SIZE(c) ((c) & 0x00000f) +# define CUR_DEF 0 +# define CUR_NONE 1 +# define CUR_UNDERLINE 2 +# define CUR_LOWER_THIRD 3 +# define CUR_LOWER_HALF 4 +# define CUR_TWO_THIRDS 5 +# define CUR_BLOCK 6 +#define CUR_SW 0x000010 +#define CUR_ALWAYS_BG 0x000020 +#define CUR_INVERT_FG_BG 0x000040 +#define CUR_FG 0x000700 +#define CUR_BG 0x007000 +#define CUR_CHANGE(c) ((c) & 0x00ff00) +#define CUR_SET(c) (((c) & 0xff0000) >> 8) bool con_is_visible(const struct vc_data *vc); -- cgit v1.2.3 From 521b512b157a1315ff2bf11c11ab184c79515aea Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:47 +0100 Subject: PM / EM: change naming convention from 'capacity' to 'performance' The Energy Model uses concept of performance domain and capacity states in order to calculate power used by CPUs. Change naming convention from capacity to performance state would enable wider usage in future, e.g. upcoming support for other devices other than CPUs. Acked-by: Daniel Lezcano Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- drivers/thermal/cpufreq_cooling.c | 12 +++--- include/linux/energy_model.h | 86 +++++++++++++++++++++------------------ kernel/power/energy_model.c | 44 ++++++++++---------- kernel/sched/topology.c | 20 ++++----- 4 files changed, 84 insertions(+), 78 deletions(-) (limited to 'include') diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index 9e124020519f..641995ebc107 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -333,18 +333,18 @@ static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev, return false; policy = cpufreq_cdev->policy; - if (!cpumask_equal(policy->related_cpus, to_cpumask(em->cpus))) { + if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) { pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n", - cpumask_pr_args(to_cpumask(em->cpus)), + cpumask_pr_args(em_span_cpus(em)), cpumask_pr_args(policy->related_cpus)); return false; } nr_levels = cpufreq_cdev->max_level + 1; - if (em->nr_cap_states != nr_levels) { - pr_err("The number of cap states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n", - cpumask_pr_args(to_cpumask(em->cpus)), - em->nr_cap_states, nr_levels); + if (em_pd_nr_perf_states(em) != nr_levels) { + pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n", + cpumask_pr_args(em_span_cpus(em)), + em_pd_nr_perf_states(em), nr_levels); return false; } diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index ade6486a3382..fe336a9eb5d4 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -10,13 +10,13 @@ #include /** - * em_cap_state - Capacity state of a performance domain + * em_perf_state - Performance state of a performance domain * @frequency: The CPU frequency in KHz, for consistency with CPUFreq * @power: The power consumed by 1 CPU at this level, in milli-watts * @cost: The cost coefficient associated with this level, used during * energy calculation. Equal to: power * max_frequency / frequency */ -struct em_cap_state { +struct em_perf_state { unsigned long frequency; unsigned long power; unsigned long cost; @@ -24,8 +24,8 @@ struct em_cap_state { /** * em_perf_domain - Performance domain - * @table: List of capacity states, in ascending order - * @nr_cap_states: Number of capacity states + * @table: List of performance states, in ascending order + * @nr_perf_states: Number of performance states * @cpus: Cpumask covering the CPUs of the domain * * A "performance domain" represents a group of CPUs whose performance is @@ -34,22 +34,27 @@ struct em_cap_state { * CPUFreq policies. */ struct em_perf_domain { - struct em_cap_state *table; - int nr_cap_states; + struct em_perf_state *table; + int nr_perf_states; unsigned long cpus[]; }; +#define em_span_cpus(em) (to_cpumask((em)->cpus)) + #ifdef CONFIG_ENERGY_MODEL #define EM_CPU_MAX_POWER 0xFFFF struct em_data_callback { /** - * active_power() - Provide power at the next capacity state of a CPU - * @power : Active power at the capacity state in mW (modified) - * @freq : Frequency at the capacity state in kHz (modified) + * active_power() - Provide power at the next performance state of + * a CPU + * @power : Active power at the performance state in mW + * (modified) + * @freq : Frequency at the performance state in kHz + * (modified) * @cpu : CPU for which we do this operation * - * active_power() must find the lowest capacity state of 'cpu' above + * active_power() must find the lowest performance state of 'cpu' above * 'freq' and update 'power' and 'freq' to the matching active power * and frequency. * @@ -80,46 +85,46 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { unsigned long freq, scale_cpu; - struct em_cap_state *cs; + struct em_perf_state *ps; int i, cpu; /* - * In order to predict the capacity state, map the utilization of the - * most utilized CPU of the performance domain to a requested frequency, - * like schedutil. + * In order to predict the performance state, map the utilization of + * the most utilized CPU of the performance domain to a requested + * frequency, like schedutil. */ cpu = cpumask_first(to_cpumask(pd->cpus)); scale_cpu = arch_scale_cpu_capacity(cpu); - cs = &pd->table[pd->nr_cap_states - 1]; - freq = map_util_freq(max_util, cs->frequency, scale_cpu); + ps = &pd->table[pd->nr_perf_states - 1]; + freq = map_util_freq(max_util, ps->frequency, scale_cpu); /* - * Find the lowest capacity state of the Energy Model above the + * Find the lowest performance state of the Energy Model above the * requested frequency. */ - for (i = 0; i < pd->nr_cap_states; i++) { - cs = &pd->table[i]; - if (cs->frequency >= freq) + for (i = 0; i < pd->nr_perf_states; i++) { + ps = &pd->table[i]; + if (ps->frequency >= freq) break; } /* - * The capacity of a CPU in the domain at that capacity state (cs) + * The capacity of a CPU in the domain at the performance state (ps) * can be computed as: * - * cs->freq * scale_cpu - * cs->cap = -------------------- (1) + * ps->freq * scale_cpu + * ps->cap = -------------------- (1) * cpu_max_freq * * So, ignoring the costs of idle states (which are not available in - * the EM), the energy consumed by this CPU at that capacity state is - * estimated as: + * the EM), the energy consumed by this CPU at that performance state + * is estimated as: * - * cs->power * cpu_util + * ps->power * cpu_util * cpu_nrg = -------------------- (2) - * cs->cap + * ps->cap * - * since 'cpu_util / cs->cap' represents its percentage of busy time. + * since 'cpu_util / ps->cap' represents its percentage of busy time. * * NOTE: Although the result of this computation actually is in * units of power, it can be manipulated as an energy value @@ -129,34 +134,35 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product * of two terms: * - * cs->power * cpu_max_freq cpu_util + * ps->power * cpu_max_freq cpu_util * cpu_nrg = ------------------------ * --------- (3) - * cs->freq scale_cpu + * ps->freq scale_cpu * - * The first term is static, and is stored in the em_cap_state struct - * as 'cs->cost'. + * The first term is static, and is stored in the em_perf_state struct + * as 'ps->cost'. * * Since all CPUs of the domain have the same micro-architecture, they - * share the same 'cs->cost', and the same CPU capacity. Hence, the + * share the same 'ps->cost', and the same CPU capacity. Hence, the * total energy of the domain (which is the simple sum of the energy of * all of its CPUs) can be factorized as: * - * cs->cost * \Sum cpu_util + * ps->cost * \Sum cpu_util * pd_nrg = ------------------------ (4) * scale_cpu */ - return cs->cost * sum_util / scale_cpu; + return ps->cost * sum_util / scale_cpu; } /** - * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain + * em_pd_nr_perf_states() - Get the number of performance states of a perf. + * domain * @pd : performance domain for which this must be done * - * Return: the number of capacity states in the performance domain table + * Return: the number of performance states in the performance domain table */ -static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) { - return pd->nr_cap_states; + return pd->nr_perf_states; } #else @@ -177,7 +183,7 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, { return 0; } -static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) { return 0; } diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 0a9326f5f421..9892d548a0fa 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -27,18 +27,18 @@ static DEFINE_MUTEX(em_pd_mutex); #ifdef CONFIG_DEBUG_FS static struct dentry *rootdir; -static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd) +static void em_debug_create_ps(struct em_perf_state *ps, struct dentry *pd) { struct dentry *d; char name[24]; - snprintf(name, sizeof(name), "cs:%lu", cs->frequency); + snprintf(name, sizeof(name), "ps:%lu", ps->frequency); - /* Create per-cs directory */ + /* Create per-ps directory */ d = debugfs_create_dir(name, pd); - debugfs_create_ulong("frequency", 0444, d, &cs->frequency); - debugfs_create_ulong("power", 0444, d, &cs->power); - debugfs_create_ulong("cost", 0444, d, &cs->cost); + debugfs_create_ulong("frequency", 0444, d, &ps->frequency); + debugfs_create_ulong("power", 0444, d, &ps->power); + debugfs_create_ulong("cost", 0444, d, &ps->cost); } static int em_debug_cpus_show(struct seq_file *s, void *unused) @@ -62,9 +62,9 @@ static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops); - /* Create a sub-directory for each capacity state */ - for (i = 0; i < pd->nr_cap_states; i++) - em_debug_create_cs(&pd->table[i], d); + /* Create a sub-directory for each performance state */ + for (i = 0; i < pd->nr_perf_states; i++) + em_debug_create_ps(&pd->table[i], d); } static int __init em_debug_init(void) @@ -84,7 +84,7 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, unsigned long opp_eff, prev_opp_eff = ULONG_MAX; unsigned long power, freq, prev_freq = 0; int i, ret, cpu = cpumask_first(span); - struct em_cap_state *table; + struct em_perf_state *table; struct em_perf_domain *pd; u64 fmax; @@ -99,26 +99,26 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, if (!table) goto free_pd; - /* Build the list of capacity states for this performance domain */ + /* Build the list of performance states for this performance domain */ for (i = 0, freq = 0; i < nr_states; i++, freq++) { /* * active_power() is a driver callback which ceils 'freq' to - * lowest capacity state of 'cpu' above 'freq' and updates + * lowest performance state of 'cpu' above 'freq' and updates * 'power' and 'freq' accordingly. */ ret = cb->active_power(&power, &freq, cpu); if (ret) { - pr_err("pd%d: invalid cap. state: %d\n", cpu, ret); - goto free_cs_table; + pr_err("pd%d: invalid perf. state: %d\n", cpu, ret); + goto free_ps_table; } /* * We expect the driver callback to increase the frequency for - * higher capacity states. + * higher performance states. */ if (freq <= prev_freq) { pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq); - goto free_cs_table; + goto free_ps_table; } /* @@ -127,7 +127,7 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, */ if (!power || power > EM_CPU_MAX_POWER) { pr_err("pd%d: invalid power: %lu\n", cpu, power); - goto free_cs_table; + goto free_ps_table; } table[i].power = power; @@ -141,12 +141,12 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, */ opp_eff = freq / power; if (opp_eff >= prev_opp_eff) - pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_cap_state %d >= em_cap_state%d\n", + pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_perf_state %d >= em_perf_state%d\n", cpu, i, i - 1); prev_opp_eff = opp_eff; } - /* Compute the cost of each capacity_state. */ + /* Compute the cost of each performance state. */ fmax = (u64) table[nr_states - 1].frequency; for (i = 0; i < nr_states; i++) { table[i].cost = div64_u64(fmax * table[i].power, @@ -154,14 +154,14 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, } pd->table = table; - pd->nr_cap_states = nr_states; + pd->nr_perf_states = nr_states; cpumask_copy(to_cpumask(pd->cpus), span); em_debug_create_pd(pd, cpu); return pd; -free_cs_table: +free_ps_table: kfree(table); free_pd: kfree(pd); @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(em_cpu_get); /** * em_register_perf_domain() - Register the Energy Model of a performance domain * @span : Mask of CPUs in the performance domain - * @nr_states : Number of capacity states to register + * @nr_states : Number of performance states to register * @cb : Callback functions providing the data of the Energy Model * * Create Energy Model tables for a performance domain using the callbacks diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index ba81187bb7af..2f91d3126365 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -272,10 +272,10 @@ static void perf_domain_debug(const struct cpumask *cpu_map, printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); while (pd) { - printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }", + printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }", cpumask_first(perf_domain_span(pd)), cpumask_pr_args(perf_domain_span(pd)), - em_pd_nr_cap_states(pd->em_pd)); + em_pd_nr_perf_states(pd->em_pd)); pd = pd->next; } @@ -313,26 +313,26 @@ static void sched_energy_set(bool has_eas) * * The complexity of the Energy Model is defined as: * - * C = nr_pd * (nr_cpus + nr_cs) + * C = nr_pd * (nr_cpus + nr_ps) * * with parameters defined as: * - nr_pd: the number of performance domains * - nr_cpus: the number of CPUs - * - nr_cs: the sum of the number of capacity states of all performance + * - nr_ps: the sum of the number of performance states of all performance * domains (for example, on a system with 2 performance domains, - * with 10 capacity states each, nr_cs = 2 * 10 = 20). + * with 10 performance states each, nr_ps = 2 * 10 = 20). * * It is generally not a good idea to use such a model in the wake-up path on * very complex platforms because of the associated scheduling overheads. The * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs - * with per-CPU DVFS and less than 8 capacity states each, for example. + * with per-CPU DVFS and less than 8 performance states each, for example. */ #define EM_MAX_COMPLEXITY 2048 extern struct cpufreq_governor schedutil_gov; static bool build_perf_domains(const struct cpumask *cpu_map) { - int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map); + int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); struct perf_domain *pd = NULL, *tmp; int cpu = cpumask_first(cpu_map); struct root_domain *rd = cpu_rq(cpu)->rd; @@ -384,15 +384,15 @@ static bool build_perf_domains(const struct cpumask *cpu_map) pd = tmp; /* - * Count performance domains and capacity states for the + * Count performance domains and performance states for the * complexity check. */ nr_pd++; - nr_cs += em_pd_nr_cap_states(pd->em_pd); + nr_ps += em_pd_nr_perf_states(pd->em_pd); } /* Bail out if the Energy Model complexity is too high. */ - if (nr_pd * (nr_cs + nr_cpus) > EM_MAX_COMPLEXITY) { + if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) { WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n", cpumask_pr_args(cpu_map)); goto free; -- cgit v1.2.3 From 7d9895c7fbfc9c70afce7029b7de0f3f974adb88 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:48 +0100 Subject: PM / EM: introduce em_dev_register_perf_domain function Add now function in the Energy Model framework which is going to support new devices. This function will help in transition and make it smoother. For now it still checks if the cpumask is a valid pointer, which will be removed later when the new structures and infrastructure will be ready. Acked-by: Daniel Lezcano Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/energy_model.h | 13 +++++++++++-- kernel/power/energy_model.c | 40 ++++++++++++++++++++++++++++++++++------ 2 files changed, 45 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index fe336a9eb5d4..7c048df98447 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -2,6 +2,7 @@ #ifndef _LINUX_ENERGY_MODEL_H #define _LINUX_ENERGY_MODEL_H #include +#include #include #include #include @@ -42,7 +43,7 @@ struct em_perf_domain { #define em_span_cpus(em) (to_cpumask((em)->cpus)) #ifdef CONFIG_ENERGY_MODEL -#define EM_CPU_MAX_POWER 0xFFFF +#define EM_MAX_POWER 0xFFFF struct em_data_callback { /** @@ -59,7 +60,7 @@ struct em_data_callback { * and frequency. * * The power is the one of a single CPU in the domain, expressed in - * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER] + * milli-watts. It is expected to fit in the [0, EM_MAX_POWER] * range. * * Return 0 on success. @@ -71,6 +72,8 @@ struct em_data_callback { struct em_perf_domain *em_cpu_get(int cpu); int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, struct em_data_callback *cb); +int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, + struct em_data_callback *cb, cpumask_t *span); /** * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain @@ -174,6 +177,12 @@ static inline int em_register_perf_domain(cpumask_t *span, { return -EINVAL; } +static inline +int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, + struct em_data_callback *cb, cpumask_t *span) +{ + return -EINVAL; +} static inline struct em_perf_domain *em_cpu_get(int cpu) { return NULL; diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 9892d548a0fa..875b163e54ab 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -125,7 +125,7 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, * The power returned by active_state() is expected to be * positive, in milli-watts and to fit into 16 bits. */ - if (!power || power > EM_CPU_MAX_POWER) { + if (!power || power > EM_MAX_POWER) { pr_err("pd%d: invalid power: %lu\n", cpu, power); goto free_ps_table; } @@ -183,10 +183,13 @@ struct em_perf_domain *em_cpu_get(int cpu) EXPORT_SYMBOL_GPL(em_cpu_get); /** - * em_register_perf_domain() - Register the Energy Model of a performance domain - * @span : Mask of CPUs in the performance domain + * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device + * @dev : Device for which the EM is to register * @nr_states : Number of performance states to register * @cb : Callback functions providing the data of the Energy Model + * @span : Pointer to cpumask_t, which in case of a CPU device is + * obligatory. It can be taken from i.e. 'policy->cpus'. For other + * type of devices this should be set to NULL. * * Create Energy Model tables for a performance domain using the callbacks * defined in cb. @@ -196,14 +199,14 @@ EXPORT_SYMBOL_GPL(em_cpu_get); * * Return 0 on success */ -int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, - struct em_data_callback *cb) +int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, + struct em_data_callback *cb, cpumask_t *span) { unsigned long cap, prev_cap = 0; struct em_perf_domain *pd; int cpu, ret = 0; - if (!span || !nr_states || !cb) + if (!dev || !span || !nr_states || !cb) return -EINVAL; /* @@ -255,4 +258,29 @@ unlock: return ret; } +EXPORT_SYMBOL_GPL(em_dev_register_perf_domain); + +/** + * em_register_perf_domain() - Register the Energy Model of a performance domain + * @span : Mask of CPUs in the performance domain + * @nr_states : Number of capacity states to register + * @cb : Callback functions providing the data of the Energy Model + * + * Create Energy Model tables for a performance domain using the callbacks + * defined in cb. + * + * If multiple clients register the same performance domain, all but the first + * registration will be ignored. + * + * Return 0 on success + */ +int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, + struct em_data_callback *cb) +{ + struct device *cpu_dev; + + cpu_dev = get_cpu_device(cpumask_first(span)); + + return em_dev_register_perf_domain(cpu_dev, nr_states, cb, span); +} EXPORT_SYMBOL_GPL(em_register_perf_domain); -- cgit v1.2.3 From d0351cc3b0f57214d157e4d589564730af2aedae Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:49 +0100 Subject: PM / EM: update callback structure and add device pointer The Energy Model framework is going to support devices other that CPUs. In order to make this happen change the callback function and add pointer to a device as an argument. Update the related users to use new function and new callback from the Energy Model. Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Acked-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/scmi-cpufreq.c | 11 +++-------- drivers/opp/of.c | 9 ++------- include/linux/energy_model.h | 15 ++++++++------- kernel/power/energy_model.c | 9 +++++---- 4 files changed, 18 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 61623e2ff149..11ee24e06d12 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -103,17 +103,12 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) } static int __maybe_unused -scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu) +scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, + struct device *cpu_dev) { - struct device *cpu_dev = get_cpu_device(cpu); unsigned long Hz; int ret, domain; - if (!cpu_dev) { - pr_err("failed to get cpu%d device\n", cpu); - return -ENODEV; - } - domain = handle->perf_ops->device_domain_id(cpu_dev); if (domain < 0) return domain; @@ -200,7 +195,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->fast_switch_possible = true; - em_register_perf_domain(policy->cpus, nr_opp, &em_cb); + em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus); return 0; diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 9a5873591a40..e273f419a4bf 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1216,9 +1216,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); * calculation failed because of missing parameters, 0 otherwise. */ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, - int cpu) + struct device *cpu_dev) { - struct device *cpu_dev; struct dev_pm_opp *opp; struct device_node *np; unsigned long mV, Hz; @@ -1226,10 +1225,6 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, u64 tmp; int ret; - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) - return -ENODEV; - np = of_node_get(cpu_dev->of_node); if (!np) return -EINVAL; @@ -1297,6 +1292,6 @@ void dev_pm_opp_of_register_em(struct cpumask *cpus) if (ret || !cap) return; - em_register_perf_domain(cpus, nr_opp, &em_cb); + em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, cpus); } EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em); diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 7c048df98447..7076cb22b247 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -48,24 +48,25 @@ struct em_perf_domain { struct em_data_callback { /** * active_power() - Provide power at the next performance state of - * a CPU + * a device * @power : Active power at the performance state in mW * (modified) * @freq : Frequency at the performance state in kHz * (modified) - * @cpu : CPU for which we do this operation + * @dev : Device for which we do this operation (can be a CPU) * - * active_power() must find the lowest performance state of 'cpu' above + * active_power() must find the lowest performance state of 'dev' above * 'freq' and update 'power' and 'freq' to the matching active power * and frequency. * - * The power is the one of a single CPU in the domain, expressed in - * milli-watts. It is expected to fit in the [0, EM_MAX_POWER] - * range. + * In case of CPUs, the power is the one of a single CPU in the domain, + * expressed in milli-watts. It is expected to fit in the + * [0, EM_MAX_POWER] range. * * Return 0 on success. */ - int (*active_power)(unsigned long *power, unsigned long *freq, int cpu); + int (*active_power)(unsigned long *power, unsigned long *freq, + struct device *dev); }; #define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb } diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 875b163e54ab..5b8a1566526a 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -78,8 +78,9 @@ core_initcall(em_debug_init); #else /* CONFIG_DEBUG_FS */ static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {} #endif -static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, - struct em_data_callback *cb) +static struct em_perf_domain * +em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, + cpumask_t *span) { unsigned long opp_eff, prev_opp_eff = ULONG_MAX; unsigned long power, freq, prev_freq = 0; @@ -106,7 +107,7 @@ static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, * lowest performance state of 'cpu' above 'freq' and updates * 'power' and 'freq' accordingly. */ - ret = cb->active_power(&power, &freq, cpu); + ret = cb->active_power(&power, &freq, dev); if (ret) { pr_err("pd%d: invalid perf. state: %d\n", cpu, ret); goto free_ps_table; @@ -237,7 +238,7 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, } /* Create the performance domain and add it to the Energy Model. */ - pd = em_create_pd(span, nr_states, cb); + pd = em_create_pd(dev, nr_states, cb, span); if (!pd) { ret = -EINVAL; goto unlock; -- cgit v1.2.3 From c3077b5d97a39223a2d4b95a21ccff660836170f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 11 Jun 2020 08:44:41 +0200 Subject: blk-mq: merge blk-softirq.c into blk-mq.c __blk_complete_request is only called from the blk-mq code, and duplicates a lot of code from blk-mq.c. Move it there to prepare for better code sharing and simplifications. Signed-off-by: Christoph Hellwig Reviewed-by: Daniel Wagner Signed-off-by: Jens Axboe --- block/Makefile | 2 +- block/blk-mq.c | 135 ++++++++++++++++++++++++++++++++++++++++++ block/blk-softirq.c | 156 ------------------------------------------------- include/linux/blkdev.h | 1 - 4 files changed, 136 insertions(+), 158 deletions(-) delete mode 100644 block/blk-softirq.c (limited to 'include') diff --git a/block/Makefile b/block/Makefile index 78719169fb2a..8d841f5f986f 100644 --- a/block/Makefile +++ b/block/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ - blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ + blk-exec.o blk-merge.o blk-timeout.o \ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \ genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o diff --git a/block/blk-mq.c b/block/blk-mq.c index a9aa6d1e44cf..60febbf6f8d9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -41,6 +41,8 @@ #include "blk-mq-sched.h" #include "blk-rq-qos.h" +static DEFINE_PER_CPU(struct list_head, blk_cpu_done); + static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); @@ -574,6 +576,130 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); +/* + * Softirq action handler - move entries to local list and loop over them + * while passing them to the queue registered handler. + */ +static __latent_entropy void blk_done_softirq(struct softirq_action *h) +{ + struct list_head *cpu_list, local_list; + + local_irq_disable(); + cpu_list = this_cpu_ptr(&blk_cpu_done); + list_replace_init(cpu_list, &local_list); + local_irq_enable(); + + while (!list_empty(&local_list)) { + struct request *rq; + + rq = list_entry(local_list.next, struct request, ipi_list); + list_del_init(&rq->ipi_list); + rq->q->mq_ops->complete(rq); + } +} + +#ifdef CONFIG_SMP +static void trigger_softirq(void *data) +{ + struct request *rq = data; + struct list_head *list; + + list = this_cpu_ptr(&blk_cpu_done); + list_add_tail(&rq->ipi_list, list); + + if (list->next == &rq->ipi_list) + raise_softirq_irqoff(BLOCK_SOFTIRQ); +} + +/* + * Setup and invoke a run of 'trigger_softirq' on the given cpu. + */ +static int raise_blk_irq(int cpu, struct request *rq) +{ + if (cpu_online(cpu)) { + call_single_data_t *data = &rq->csd; + + data->func = trigger_softirq; + data->info = rq; + data->flags = 0; + + smp_call_function_single_async(cpu, data); + return 0; + } + + return 1; +} +#else /* CONFIG_SMP */ +static int raise_blk_irq(int cpu, struct request *rq) +{ + return 1; +} +#endif + +static int blk_softirq_cpu_dead(unsigned int cpu) +{ + /* + * If a CPU goes away, splice its entries to the current CPU + * and trigger a run of the softirq + */ + local_irq_disable(); + list_splice_init(&per_cpu(blk_cpu_done, cpu), + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); + + return 0; +} + +static void __blk_complete_request(struct request *req) +{ + struct request_queue *q = req->q; + int cpu, ccpu = req->mq_ctx->cpu; + unsigned long flags; + bool shared = false; + + BUG_ON(!q->mq_ops->complete); + + local_irq_save(flags); + cpu = smp_processor_id(); + + /* + * Select completion CPU + */ + if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) { + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) + shared = cpus_share_cache(cpu, ccpu); + } else + ccpu = cpu; + + /* + * If current CPU and requested CPU share a cache, run the softirq on + * the current CPU. One might concern this is just like + * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is + * running in interrupt handler, and currently I/O controller doesn't + * support multiple interrupts, so current CPU is unique actually. This + * avoids IPI sending from current CPU to the first CPU of a group. + */ + if (ccpu == cpu || shared) { + struct list_head *list; +do_local: + list = this_cpu_ptr(&blk_cpu_done); + list_add_tail(&req->ipi_list, list); + + /* + * if the list only contains our just added request, + * signal a raise of the softirq. If there are already + * entries there, someone already raised the irq but it + * hasn't run yet. + */ + if (list->next == &req->ipi_list) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + } else if (raise_blk_irq(ccpu, req)) + goto do_local; + + local_irq_restore(flags); +} + static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; @@ -3760,6 +3886,15 @@ EXPORT_SYMBOL(blk_mq_rq_cpu); static int __init blk_mq_init(void) { + int i; + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); + open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); + + cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, + "block/softirq:dead", NULL, + blk_softirq_cpu_dead); cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, blk_mq_hctx_notify_dead); cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", diff --git a/block/blk-softirq.c b/block/blk-softirq.c deleted file mode 100644 index 6e7ec87d49fa..000000000000 --- a/block/blk-softirq.c +++ /dev/null @@ -1,156 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Functions related to softirq rq completions - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "blk.h" - -static DEFINE_PER_CPU(struct list_head, blk_cpu_done); - -/* - * Softirq action handler - move entries to local list and loop over them - * while passing them to the queue registered handler. - */ -static __latent_entropy void blk_done_softirq(struct softirq_action *h) -{ - struct list_head *cpu_list, local_list; - - local_irq_disable(); - cpu_list = this_cpu_ptr(&blk_cpu_done); - list_replace_init(cpu_list, &local_list); - local_irq_enable(); - - while (!list_empty(&local_list)) { - struct request *rq; - - rq = list_entry(local_list.next, struct request, ipi_list); - list_del_init(&rq->ipi_list); - rq->q->mq_ops->complete(rq); - } -} - -#ifdef CONFIG_SMP -static void trigger_softirq(void *data) -{ - struct request *rq = data; - struct list_head *list; - - list = this_cpu_ptr(&blk_cpu_done); - list_add_tail(&rq->ipi_list, list); - - if (list->next == &rq->ipi_list) - raise_softirq_irqoff(BLOCK_SOFTIRQ); -} - -/* - * Setup and invoke a run of 'trigger_softirq' on the given cpu. - */ -static int raise_blk_irq(int cpu, struct request *rq) -{ - if (cpu_online(cpu)) { - call_single_data_t *data = &rq->csd; - - data->func = trigger_softirq; - data->info = rq; - data->flags = 0; - - smp_call_function_single_async(cpu, data); - return 0; - } - - return 1; -} -#else /* CONFIG_SMP */ -static int raise_blk_irq(int cpu, struct request *rq) -{ - return 1; -} -#endif - -static int blk_softirq_cpu_dead(unsigned int cpu) -{ - /* - * If a CPU goes away, splice its entries to the current CPU - * and trigger a run of the softirq - */ - local_irq_disable(); - list_splice_init(&per_cpu(blk_cpu_done, cpu), - this_cpu_ptr(&blk_cpu_done)); - raise_softirq_irqoff(BLOCK_SOFTIRQ); - local_irq_enable(); - - return 0; -} - -void __blk_complete_request(struct request *req) -{ - struct request_queue *q = req->q; - int cpu, ccpu = req->mq_ctx->cpu; - unsigned long flags; - bool shared = false; - - BUG_ON(!q->mq_ops->complete); - - local_irq_save(flags); - cpu = smp_processor_id(); - - /* - * Select completion CPU - */ - if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) { - if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) - shared = cpus_share_cache(cpu, ccpu); - } else - ccpu = cpu; - - /* - * If current CPU and requested CPU share a cache, run the softirq on - * the current CPU. One might concern this is just like - * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is - * running in interrupt handler, and currently I/O controller doesn't - * support multiple interrupts, so current CPU is unique actually. This - * avoids IPI sending from current CPU to the first CPU of a group. - */ - if (ccpu == cpu || shared) { - struct list_head *list; -do_local: - list = this_cpu_ptr(&blk_cpu_done); - list_add_tail(&req->ipi_list, list); - - /* - * if the list only contains our just added request, - * signal a raise of the softirq. If there are already - * entries there, someone already raised the irq but it - * hasn't run yet. - */ - if (list->next == &req->ipi_list) - raise_softirq_irqoff(BLOCK_SOFTIRQ); - } else if (raise_blk_irq(ccpu, req)) - goto do_local; - - local_irq_restore(flags); -} - -static __init int blk_softirq_init(void) -{ - int i; - - for_each_possible_cpu(i) - INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); - - open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); - cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, - "block/softirq:dead", NULL, - blk_softirq_cpu_dead); - return 0; -} -subsys_initcall(blk_softirq_init); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8fd900998b4e..98712cfc7a34 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1078,7 +1078,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq); extern bool blk_update_request(struct request *rq, blk_status_t error, unsigned int nr_bytes); -extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); /* -- cgit v1.2.3 From 15f73f5b3e5958f2d169fe13c420eeeeae07bbf2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 11 Jun 2020 08:44:47 +0200 Subject: blk-mq: move failure injection out of blk_mq_complete_request Move the call to blk_should_fake_timeout out of blk_mq_complete_request and into the drivers, skipping call sites that are obvious error handlers, and remove the now superflous blk_mq_force_complete_rq helper. This ensures we don't keep injecting errors into completions that just terminate the Linux request after the hardware has been reset or the command has been aborted. Reviewed-by: Daniel Wagner Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 34 +++++++--------------------------- block/blk-timeout.c | 6 ++---- block/blk.h | 9 --------- block/bsg-lib.c | 5 ++++- drivers/block/loop.c | 6 ++++-- drivers/block/mtip32xx/mtip32xx.c | 3 ++- drivers/block/nbd.c | 5 ++++- drivers/block/null_blk_main.c | 5 +++-- drivers/block/skd_main.c | 9 ++++++--- drivers/block/virtio_blk.c | 3 ++- drivers/block/xen-blkfront.c | 3 ++- drivers/md/dm-rq.c | 3 ++- drivers/mmc/core/block.c | 8 ++++---- drivers/nvme/host/core.c | 2 +- drivers/nvme/host/nvme.h | 3 ++- drivers/s390/block/dasd.c | 2 +- drivers/s390/block/scm_blk.c | 3 ++- drivers/scsi/scsi_lib.c | 12 +++--------- include/linux/blk-mq.h | 12 ++++++++++-- 19 files changed, 61 insertions(+), 72 deletions(-) (limited to 'include') diff --git a/block/blk-mq.c b/block/blk-mq.c index ce772ab19188..3f4f227cf830 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -655,16 +655,13 @@ static void __blk_mq_complete_request_remote(void *data) } /** - * blk_mq_force_complete_rq() - Force complete the request, bypassing any error - * injection that could drop the completion. - * @rq: Request to be force completed + * blk_mq_complete_request - end I/O on a request + * @rq: the request being processed * - * Drivers should use blk_mq_complete_request() to complete requests in their - * normal IO path. For timeout error recovery, drivers may call this forced - * completion routine after they've reclaimed timed out requests to bypass - * potentially subsequent fake timeouts. - */ -void blk_mq_force_complete_rq(struct request *rq) + * Description: + * Complete a request by scheduling the ->complete_rq operation. + **/ +void blk_mq_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; struct request_queue *q = rq->q; @@ -702,7 +699,7 @@ void blk_mq_force_complete_rq(struct request *rq) } put_cpu(); } -EXPORT_SYMBOL_GPL(blk_mq_force_complete_rq); +EXPORT_SYMBOL(blk_mq_complete_request); static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) __releases(hctx->srcu) @@ -724,23 +721,6 @@ static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) *srcu_idx = srcu_read_lock(hctx->srcu); } -/** - * blk_mq_complete_request - end I/O on a request - * @rq: the request being processed - * - * Description: - * Ends all I/O on a request. It does not handle partial completions. - * The actual completion happens out-of-order, through a IPI handler. - **/ -bool blk_mq_complete_request(struct request *rq) -{ - if (unlikely(blk_should_fake_timeout(rq->q))) - return false; - blk_mq_force_complete_rq(rq); - return true; -} -EXPORT_SYMBOL(blk_mq_complete_request); - /** * blk_mq_start_request - Start processing a request * @rq: Pointer to request to be started diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 8aa68fae96ad..3a1ac6434758 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -20,13 +20,11 @@ static int __init setup_fail_io_timeout(char *str) } __setup("fail_io_timeout=", setup_fail_io_timeout); -int blk_should_fake_timeout(struct request_queue *q) +bool __blk_should_fake_timeout(struct request_queue *q) { - if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) - return 0; - return should_fail(&fail_io_timeout, 1); } +EXPORT_SYMBOL_GPL(__blk_should_fake_timeout); static int __init fail_io_timeout_debugfs(void) { diff --git a/block/blk.h b/block/blk.h index b5d1f0fc6547..8ba4a5e4fe07 100644 --- a/block/blk.h +++ b/block/blk.h @@ -223,18 +223,9 @@ ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); - -#ifdef CONFIG_FAIL_IO_TIMEOUT -int blk_should_fake_timeout(struct request_queue *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); -#else -static inline int blk_should_fake_timeout(struct request_queue *q) -{ - return 0; -} -#endif void __blk_queue_split(struct request_queue *q, struct bio **bio, unsigned int *nr_segs); diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 6cbb7926534c..fb7b347f8010 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -181,9 +181,12 @@ EXPORT_SYMBOL_GPL(bsg_job_get); void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len) { + struct request *rq = blk_mq_rq_from_pdu(job); + job->result = result; job->reply_payload_rcv_len = reply_payload_rcv_len; - blk_mq_complete_request(blk_mq_rq_from_pdu(job)); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } EXPORT_SYMBOL_GPL(bsg_job_done); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 475e1a738560..4acae248790c 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -509,7 +509,8 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd) return; kfree(cmd->bvec); cmd->bvec = NULL; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) @@ -2048,7 +2049,8 @@ static void loop_handle_cmd(struct loop_cmd *cmd) cmd->ret = ret; else cmd->ret = ret ? -EIO : 0; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index f6bafa9a68b9..153e2cdecb4d 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -492,7 +492,8 @@ static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status) struct request *req = blk_mq_rq_from_pdu(cmd); cmd->status = status; - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); } /* diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 43cff01a5a67..01794cd2b6ca 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -784,6 +784,7 @@ static void recv_work(struct work_struct *work) struct nbd_device *nbd = args->nbd; struct nbd_config *config = nbd->config; struct nbd_cmd *cmd; + struct request *rq; while (1) { cmd = nbd_read_stat(nbd, args->index); @@ -796,7 +797,9 @@ static void recv_work(struct work_struct *work) break; } - blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); + rq = blk_mq_rq_from_pdu(cmd); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } atomic_dec(&config->recv_threads); wake_up(&config->recv_wq); diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 87b31f9ca362..82259242b9b5 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1283,7 +1283,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (cmd->nq->dev->queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); + if (likely(!blk_should_fake_timeout(cmd->rq->q))) + blk_mq_complete_request(cmd->rq); break; case NULL_Q_BIO: /* @@ -1423,7 +1424,7 @@ static bool should_requeue_request(struct request *rq) static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) { pr_info("rq %p timed out\n", rq); - blk_mq_force_complete_rq(rq); + blk_mq_complete_request(rq); return BLK_EH_DONE; } diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 51569c199a6c..3a476dc1d14f 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -1417,7 +1417,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev, case SKD_CHECK_STATUS_REPORT_GOOD: case SKD_CHECK_STATUS_REPORT_SMART_ALERT: skreq->status = BLK_STS_OK; - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); break; case SKD_CHECK_STATUS_BUSY_IMMINENT: @@ -1440,7 +1441,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev, case SKD_CHECK_STATUS_REPORT_ERROR: default: skreq->status = BLK_STS_IOERR; - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); break; } } @@ -1560,7 +1562,8 @@ static int skd_isr_completion_posted(struct skd_device *skdev, */ if (likely(cmp_status == SAM_STAT_GOOD)) { skreq->status = BLK_STS_OK; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } else { skd_resolve_req_exception(skdev, skreq, rq); } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 9d21bf0f155e..741804bd8a14 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -171,7 +171,8 @@ static void virtblk_done(struct virtqueue *vq) while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { struct request *req = blk_mq_rq_from_pdu(vbr); - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3b889ea950c2..3bb3dd8da9b0 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1655,7 +1655,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) BUG(); } - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); } rinfo->ring.rsp_cons = i; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index f60c02512121..5aec1cd09348 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -288,7 +288,8 @@ static void dm_complete_request(struct request *rq, blk_status_t error) struct dm_rq_target_io *tio = tio_from_request(rq); tio->error = error; - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) + blk_mq_complete_request(rq); } /* diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 7896952de1ac..4791c82f8f7c 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1446,7 +1446,7 @@ static void mmc_blk_cqe_req_done(struct mmc_request *mrq) */ if (mq->in_recovery) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); } @@ -1926,7 +1926,7 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq) */ if (mq->in_recovery) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); } @@ -1936,7 +1936,7 @@ void mmc_blk_mq_complete(struct request *req) if (mq->use_cqe) mmc_blk_cqe_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) mmc_blk_mq_complete_rq(mq, req); } @@ -1988,7 +1988,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) */ if (mq->in_recovery) mmc_blk_mq_complete_rq(mq, req); - else + else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); mmc_blk_mq_dec_in_flight(mq, req); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c2c5bc4fb702..6810c8812aed 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -304,7 +304,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) return true; nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; - blk_mq_force_complete_rq(req); + blk_mq_complete_request(req); return true; } EXPORT_SYMBOL_GPL(nvme_cancel_request); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index c0f4226d3299..034613205701 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -481,7 +481,8 @@ static inline void nvme_end_request(struct request *req, __le16 status, rq->result = result; /* inject error when permitted by fault injection framework */ nvme_should_fail(req); - blk_mq_complete_request(req); + if (likely(!blk_should_fake_timeout(req->q))) + blk_mq_complete_request(req); } static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index cf87eb27879f..eb17fea8075c 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2802,7 +2802,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) blk_update_request(req, BLK_STS_OK, blk_rq_bytes(req) - proc_bytes); blk_mq_requeue_request(req, true); - } else { + } else if (likely(!blk_should_fake_timeout(req->q))) { blk_mq_complete_request(req); } } diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index e01889394c84..a4f6f2e62b1d 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -256,7 +256,8 @@ static void scm_request_finish(struct scm_request *scmrq) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { error = blk_mq_rq_to_pdu(scmrq->request[i]); *error = scmrq->error; - blk_mq_complete_request(scmrq->request[i]); + if (likely(!blk_should_fake_timeout(scmrq->request[i]->q))) + blk_mq_complete_request(scmrq->request[i]); } atomic_dec(&bdev->queued_reqs); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0ba7a65e7c8d..6ca91d09eca1 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1589,18 +1589,12 @@ static blk_status_t scsi_mq_prep_fn(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { + if (unlikely(blk_should_fake_timeout(cmd->request->q))) + return; if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) return; trace_scsi_dispatch_cmd_done(cmd); - - /* - * If the block layer didn't complete the request due to a timeout - * injection, scsi must clear its internal completed state so that the - * timeout handler will see it needs to escalate its own error - * recovery. - */ - if (unlikely(!blk_mq_complete_request(cmd->request))) - clear_bit(SCMD_STATE_COMPLETE, &cmd->state); + blk_mq_complete_request(cmd->request); } static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index d6fcae17da5a..8e6ab766aef7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -503,8 +503,7 @@ void __blk_mq_end_request(struct request *rq, blk_status_t error); void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); -bool blk_mq_complete_request(struct request *rq); -void blk_mq_force_complete_rq(struct request *rq); +void blk_mq_complete_request(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q); @@ -537,6 +536,15 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q); unsigned int blk_mq_rq_cpu(struct request *rq); +bool __blk_should_fake_timeout(struct request_queue *q); +static inline bool blk_should_fake_timeout(struct request_queue *q) +{ + if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && + test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) + return __blk_should_fake_timeout(q); + return false; +} + /** * blk_mq_rq_from_pdu - cast a PDU to a request * @pdu: the PDU (Protocol Data Unit) to be casted -- cgit v1.2.3 From 40d09b53bfc557af7481b9d80f060a7ac9c7d314 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 11 Jun 2020 08:44:50 +0200 Subject: blk-mq: add a new blk_mq_complete_request_remote API This is a variant of blk_mq_complete_request_remote that only completes the request if it needs to be bounced to another CPU or a softirq. If the request can be completed locally the function returns false and lets the driver complete it without requring and indirect function call. Reviewed-by: Daniel Wagner Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 45 ++++++++++++++++++++++++++------------------- include/linux/blk-mq.h | 1 + 2 files changed, 27 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/block/blk-mq.c b/block/blk-mq.c index 961635b40999..b8738b3c6d06 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -632,8 +632,11 @@ static int blk_softirq_cpu_dead(unsigned int cpu) return 0; } -static void __blk_mq_complete_request(struct request *rq) + +static void __blk_mq_complete_request_remote(void *data) { + struct request *rq = data; + /* * For most of single queue controllers, there is only one irq vector * for handling I/O completion, and the only irq's affinity is set @@ -649,11 +652,6 @@ static void __blk_mq_complete_request(struct request *rq) rq->q->mq_ops->complete(rq); } -static void __blk_mq_complete_request_remote(void *data) -{ - __blk_mq_complete_request(data); -} - static inline bool blk_mq_complete_need_ipi(struct request *rq) { int cpu = raw_smp_processor_id(); @@ -672,14 +670,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) return cpu_online(rq->mq_ctx->cpu); } -/** - * blk_mq_complete_request - end I/O on a request - * @rq: the request being processed - * - * Description: - * Complete a request by scheduling the ->complete_rq operation. - **/ -void blk_mq_complete_request(struct request *rq) +bool blk_mq_complete_request_remote(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); @@ -687,10 +678,8 @@ void blk_mq_complete_request(struct request *rq) * For a polled request, always complete locallly, it's pointless * to redirect the completion. */ - if (rq->cmd_flags & REQ_HIPRI) { - rq->q->mq_ops->complete(rq); - return; - } + if (rq->cmd_flags & REQ_HIPRI) + return false; if (blk_mq_complete_need_ipi(rq)) { rq->csd.func = __blk_mq_complete_request_remote; @@ -698,8 +687,26 @@ void blk_mq_complete_request(struct request *rq) rq->csd.flags = 0; smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); } else { - __blk_mq_complete_request(rq); + if (rq->q->nr_hw_queues > 1) + return false; + blk_mq_trigger_softirq(rq); } + + return true; +} +EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); + +/** + * blk_mq_complete_request - end I/O on a request + * @rq: the request being processed + * + * Description: + * Complete a request by scheduling the ->complete_rq operation. + **/ +void blk_mq_complete_request(struct request *rq) +{ + if (!blk_mq_complete_request_remote(rq)) + rq->q->mq_ops->complete(rq); } EXPORT_SYMBOL(blk_mq_complete_request); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8e6ab766aef7..1641ec6cd7e5 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -504,6 +504,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_complete_request(struct request *rq); +bool blk_mq_complete_request_remote(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q); -- cgit v1.2.3 From e8c7d14ac6c37c173ec606907d38802b00302988 Mon Sep 17 00:00:00 2001 From: Luis Chamberlain Date: Fri, 19 Jun 2020 20:47:25 +0000 Subject: block: revert back to synchronous request_queue removal Commit dc9edc44de6c ("block: Fix a blk_exit_rl() regression") merged on v4.12 moved the work behind blk_release_queue() into a workqueue after a splat floated around which indicated some work on blk_release_queue() could sleep in blk_exit_rl(). This splat would be possible when a driver called blk_put_queue() or blk_cleanup_queue() (which calls blk_put_queue() as its final call) from an atomic context. blk_put_queue() decrements the refcount for the request_queue kobject, and upon reaching 0 blk_release_queue() is called. Although blk_exit_rl() is now removed through commit db6d99523560 ("block: remove request_list code") on v5.0, we reserve the right to be able to sleep within blk_release_queue() context. The last reference for the request_queue must not be called from atomic context. *When* the last reference to the request_queue reaches 0 varies, and so let's take the opportunity to document when that is expected to happen and also document the context of the related calls as best as possible so we can avoid future issues, and with the hopes that the synchronous request_queue removal sticks. We revert back to synchronous request_queue removal because asynchronous removal creates a regression with expected userspace interaction with several drivers. An example is when removing the loopback driver, one uses ioctls from userspace to do so, but upon return and if successful, one expects the device to be removed. Likewise if one races to add another device the new one may not be added as it is still being removed. This was expected behavior before and it now fails as the device is still present and busy still. Moving to asynchronous request_queue removal could have broken many scripts which relied on the removal to have been completed if there was no error. Document this expectation as well so that this doesn't regress userspace again. Using asynchronous request_queue removal however has helped us find other bugs. In the future we can test what could break with this arrangement by enabling CONFIG_DEBUG_KOBJECT_RELEASE. While at it, update the docs with the context expectations for the request_queue / gendisk refcount decrement, and make these expectations explicit by using might_sleep(). Fixes: dc9edc44de6c ("block: Fix a blk_exit_rl() regression") Suggested-by: Nicolai Stange Signed-off-by: Luis Chamberlain Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Cc: Bart Van Assche Cc: Omar Sandoval Cc: Hannes Reinecke Cc: Nicolai Stange Cc: Greg Kroah-Hartman Cc: Michal Hocko Cc: yu kuai Signed-off-by: Jens Axboe --- block/blk-core.c | 8 ++++++++ block/blk-sysfs.c | 43 ++++++++++++++++++++++--------------------- block/genhd.c | 17 +++++++++++++++++ include/linux/blkdev.h | 2 -- 4 files changed, 47 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index f68398cb2ef6..a99b22fac38a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -327,6 +327,9 @@ EXPORT_SYMBOL_GPL(blk_clear_pm_only); * * Decrements the refcount of the request_queue kobject. When this reaches 0 * we'll have blk_release_queue() called. + * + * Context: Any context, but the last reference must not be dropped from + * atomic context. */ void blk_put_queue(struct request_queue *q) { @@ -359,9 +362,14 @@ EXPORT_SYMBOL_GPL(blk_set_queue_dying); * * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and * put it. All future requests will be failed immediately with -ENODEV. + * + * Context: can sleep */ void blk_cleanup_queue(struct request_queue *q) { + /* cannot be called from atomic context */ + might_sleep(); + WARN_ON_ONCE(blk_queue_registered(q)); /* mark @q DYING, no new request or merges will be allowed afterwards */ diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 02643e149d5e..561624d4cc4e 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -873,22 +873,32 @@ static void blk_exit_queue(struct request_queue *q) bdi_put(q->backing_dev_info); } - /** - * __blk_release_queue - release a request queue - * @work: pointer to the release_work member of the request queue to be released + * blk_release_queue - releases all allocated resources of the request_queue + * @kobj: pointer to a kobject, whose container is a request_queue + * + * This function releases all allocated resources of the request queue. + * + * The struct request_queue refcount is incremented with blk_get_queue() and + * decremented with blk_put_queue(). Once the refcount reaches 0 this function + * is called. + * + * For drivers that have a request_queue on a gendisk and added with + * __device_add_disk() the refcount to request_queue will reach 0 with + * the last put_disk() called by the driver. For drivers which don't use + * __device_add_disk() this happens with blk_cleanup_queue(). * - * Description: - * This function is called when a block device is being unregistered. The - * process of releasing a request queue starts with blk_cleanup_queue, which - * set the appropriate flags and then calls blk_put_queue, that decrements - * the reference counter of the request queue. Once the reference counter - * of the request queue reaches zero, blk_release_queue is called to release - * all allocated resources of the request queue. + * Drivers exist which depend on the release of the request_queue to be + * synchronous, it should not be deferred. + * + * Context: can sleep */ -static void __blk_release_queue(struct work_struct *work) +static void blk_release_queue(struct kobject *kobj) { - struct request_queue *q = container_of(work, typeof(*q), release_work); + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); + + might_sleep(); if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); @@ -917,15 +927,6 @@ static void __blk_release_queue(struct work_struct *work) call_rcu(&q->rcu_head, blk_free_queue_rcu); } -static void blk_release_queue(struct kobject *kobj) -{ - struct request_queue *q = - container_of(kobj, struct request_queue, kobj); - - INIT_WORK(&q->release_work, __blk_release_queue); - schedule_work(&q->release_work); -} - static const struct sysfs_ops queue_sysfs_ops = { .show = queue_attr_show, .store = queue_attr_store, diff --git a/block/genhd.c b/block/genhd.c index 1be86b1f43ec..60ae4e1b4d38 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -889,12 +889,19 @@ static void invalidate_partition(struct gendisk *disk, int partno) * The final removal of the struct gendisk happens when its refcount reaches 0 * with put_disk(), which should be called after del_gendisk(), if * __device_add_disk() was used. + * + * Drivers exist which depend on the release of the gendisk to be synchronous, + * it should not be deferred. + * + * Context: can sleep */ void del_gendisk(struct gendisk *disk) { struct disk_part_iter piter; struct hd_struct *part; + might_sleep(); + blk_integrity_del(disk); disk_del_events(disk); @@ -1548,11 +1555,15 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno) * drivers we also call blk_put_queue() for them, and we expect the * request_queue refcount to reach 0 at this point, and so the request_queue * will also be freed prior to the disk. + * + * Context: can sleep */ static void disk_release(struct device *dev) { struct gendisk *disk = dev_to_disk(dev); + might_sleep(); + blk_free_devt(dev->devt); disk_release_events(disk); kfree(disk->random); @@ -1797,6 +1808,9 @@ EXPORT_SYMBOL(get_disk_and_module); * * This decrements the refcount for the struct gendisk. When this reaches 0 * we'll have disk_release() called. + * + * Context: Any context, but the last reference must not be dropped from + * atomic context. */ void put_disk(struct gendisk *disk) { @@ -1811,6 +1825,9 @@ EXPORT_SYMBOL(put_disk); * * This is a counterpart of get_disk_and_module() and thus also of * get_gendisk(). + * + * Context: Any context, but the last reference must not be dropped from + * atomic context. */ void put_disk_and_module(struct gendisk *disk) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 98712cfc7a34..e214e0e9f868 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -584,8 +584,6 @@ struct request_queue { size_t cmd_size; - struct work_struct release_work; - #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; }; -- cgit v1.2.3 From 85e0cbbb8a79537dbc465e9deb449a08b2b092a6 Mon Sep 17 00:00:00 2001 From: Luis Chamberlain Date: Fri, 19 Jun 2020 20:47:30 +0000 Subject: block: create the request_queue debugfs_dir on registration We were only creating the request_queue debugfs_dir only for make_request block drivers (multiqueue), but never for request-based block drivers. We did this as we were only creating non-blktrace additional debugfs files on that directory for make_request drivers. However, since blktrace *always* creates that directory anyway, we special-case the use of that directory on blktrace. Other than this being an eye-sore, this exposes request-based block drivers to the same debugfs fragile race that used to exist with make_request block drivers where if we start adding files onto that directory we can later run a race with a double removal of dentries on the directory if we don't deal with this carefully on blktrace. Instead, just simplify things by always creating the request_queue debugfs_dir on request_queue registration. Rename the mutex also to reflect the fact that this is used outside of the blktrace context. Signed-off-by: Luis Chamberlain Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 8 +------ block/blk-mq-debugfs.c | 5 ----- block/blk-sysfs.c | 9 ++++++++ block/blk.h | 2 -- include/linux/blkdev.h | 5 +++-- kernel/trace/blktrace.c | 58 ++++++++++++++++++++++--------------------------- 6 files changed, 39 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index a99b22fac38a..a9769c1a2875 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -51,9 +51,7 @@ #include "blk-pm.h" #include "blk-rq-qos.h" -#ifdef CONFIG_DEBUG_FS struct dentry *blk_debugfs_root; -#endif EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); @@ -555,9 +553,7 @@ struct request_queue *__blk_alloc_queue(int node_id) kobject_init(&q->kobj, &blk_queue_ktype); -#ifdef CONFIG_BLK_DEV_IO_TRACE - mutex_init(&q->blk_trace_mutex); -#endif + mutex_init(&q->debugfs_mutex); mutex_init(&q->sysfs_lock); mutex_init(&q->sysfs_dir_lock); spin_lock_init(&q->queue_lock); @@ -1931,9 +1927,7 @@ int __init blk_dev_init(void) blk_requestq_cachep = kmem_cache_create("request_queue", sizeof(struct request_queue), 0, SLAB_PANIC, NULL); -#ifdef CONFIG_DEBUG_FS blk_debugfs_root = debugfs_create_dir("block", NULL); -#endif return 0; } diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 15df3a36e9fa..a2800bc56fb4 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -824,9 +824,6 @@ void blk_mq_debugfs_register(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), - blk_debugfs_root); - debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); /* @@ -857,9 +854,7 @@ void blk_mq_debugfs_register(struct request_queue *q) void blk_mq_debugfs_unregister(struct request_queue *q) { - debugfs_remove_recursive(q->debugfs_dir); q->sched_debugfs_dir = NULL; - q->debugfs_dir = NULL; } static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 561624d4cc4e..be67952e7be2 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "blk.h" #include "blk-mq.h" @@ -917,6 +918,9 @@ static void blk_release_queue(struct kobject *kobj) blk_mq_release(q); blk_trace_shutdown(q); + mutex_lock(&q->debugfs_mutex); + debugfs_remove_recursive(q->debugfs_dir); + mutex_unlock(&q->debugfs_mutex); if (queue_is_mq(q)) blk_mq_debugfs_unregister(q); @@ -989,6 +993,11 @@ int blk_register_queue(struct gendisk *disk) goto unlock; } + mutex_lock(&q->debugfs_mutex); + q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), + blk_debugfs_root); + mutex_unlock(&q->debugfs_mutex); + if (queue_is_mq(q)) { __blk_mq_register_dev(dev, q); blk_mq_debugfs_register(q); diff --git a/block/blk.h b/block/blk.h index 8ba4a5e4fe07..3a120a070dac 100644 --- a/block/blk.h +++ b/block/blk.h @@ -14,9 +14,7 @@ /* Max future timer expiry for timeouts */ #define BLK_MAX_TIMEOUT (5 * HZ) -#ifdef CONFIG_DEBUG_FS extern struct dentry *blk_debugfs_root; -#endif struct blk_flush_queue { unsigned int flush_pending_idx:1; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e214e0e9f868..c0701237116d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -528,9 +528,9 @@ struct request_queue { unsigned int sg_timeout; unsigned int sg_reserved_size; int node; + struct mutex debugfs_mutex; #ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace __rcu *blk_trace; - struct mutex blk_trace_mutex; #endif /* * for flush operations @@ -574,8 +574,9 @@ struct request_queue { struct list_head tag_set_list; struct bio_set bio_split; -#ifdef CONFIG_BLK_DEBUG_FS struct dentry *debugfs_dir; + +#ifdef CONFIG_BLK_DEBUG_FS struct dentry *sched_debugfs_dir; struct dentry *rqos_debugfs_dir; #endif diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 46c273f4dec5..c086c38f4954 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -348,7 +348,7 @@ static int __blk_trace_remove(struct request_queue *q) struct blk_trace *bt; bt = rcu_replace_pointer(q->blk_trace, NULL, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (!bt) return -EINVAL; @@ -362,9 +362,9 @@ int blk_trace_remove(struct request_queue *q) { int ret; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); ret = __blk_trace_remove(q); - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); return ret; } @@ -483,14 +483,11 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct dentry *dir = NULL; int ret; - lockdep_assert_held(&q->blk_trace_mutex); + lockdep_assert_held(&q->debugfs_mutex); if (!buts->buf_size || !buts->buf_nr) return -EINVAL; - if (!blk_debugfs_root) - return -ENOENT; - strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; @@ -505,7 +502,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, * we can be. */ if (rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex))) { + lockdep_is_held(&q->debugfs_mutex))) { pr_warn("Concurrent blktraces are not allowed on %s\n", buts->name); return -EBUSY; @@ -524,18 +521,15 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, if (!bt->msg_data) goto err; -#ifdef CONFIG_BLK_DEBUG_FS /* - * When tracing whole make_request drivers (multiqueue) block devices, - * reuse the existing debugfs directory created by the block layer on - * init. For request-based block devices, all partitions block devices, + * When tracing the whole disk reuse the existing debugfs directory + * created by the block layer on init. For partitions block devices, * and scsi-generic block devices we create a temporary new debugfs * directory that will be removed once the trace ends. */ - if (queue_is_mq(q) && bdev && bdev == bdev->bd_contains) + if (bdev && bdev == bdev->bd_contains) dir = q->debugfs_dir; else -#endif bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root); /* @@ -617,9 +611,9 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, { int ret; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); ret = __blk_trace_setup(q, name, dev, bdev, arg); - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); return ret; } @@ -665,7 +659,7 @@ static int __blk_trace_startstop(struct request_queue *q, int start) struct blk_trace *bt; bt = rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (bt == NULL) return -EINVAL; @@ -705,9 +699,9 @@ int blk_trace_startstop(struct request_queue *q, int start) { int ret; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); ret = __blk_trace_startstop(q, start); - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); return ret; } @@ -736,7 +730,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) if (!q) return -ENXIO; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); switch (cmd) { case BLKTRACESETUP: @@ -763,7 +757,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) break; } - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); return ret; } @@ -774,14 +768,14 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) **/ void blk_trace_shutdown(struct request_queue *q) { - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); if (rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex))) { + lockdep_is_held(&q->debugfs_mutex))) { __blk_trace_startstop(q, 0); __blk_trace_remove(q); } - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); } #ifdef CONFIG_BLK_CGROUP @@ -1662,7 +1656,7 @@ static int blk_trace_remove_queue(struct request_queue *q) struct blk_trace *bt; bt = rcu_replace_pointer(q->blk_trace, NULL, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (bt == NULL) return -EINVAL; @@ -1837,10 +1831,10 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, if (q == NULL) goto out_bdput; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); bt = rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (attr == &dev_attr_enable) { ret = sprintf(buf, "%u\n", !!bt); goto out_unlock_bdev; @@ -1858,7 +1852,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, ret = sprintf(buf, "%llu\n", bt->end_lba); out_unlock_bdev: - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); out_bdput: bdput(bdev); out: @@ -1901,10 +1895,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, if (q == NULL) goto out_bdput; - mutex_lock(&q->blk_trace_mutex); + mutex_lock(&q->debugfs_mutex); bt = rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); if (attr == &dev_attr_enable) { if (!!value == !!bt) { ret = 0; @@ -1921,7 +1915,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, if (bt == NULL) { ret = blk_trace_setup_queue(q, bdev); bt = rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->blk_trace_mutex)); + lockdep_is_held(&q->debugfs_mutex)); } if (ret == 0) { @@ -1936,7 +1930,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, } out_unlock_bdev: - mutex_unlock(&q->blk_trace_mutex); + mutex_unlock(&q->debugfs_mutex); out_bdput: bdput(bdev); out: -- cgit v1.2.3 From b818f09e46f9f6a66471f81bf83094ff0a477d0c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:35 +0200 Subject: tty/sysrq: emergency_thaw_all does not depend on CONFIG_BLOCK We can also thaw non-block file systems. Remove the CONFIG_BLOCK in sysrq.c after making the prototype available unconditionally. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: Greg Kroah-Hartman Signed-off-by: Jens Axboe --- drivers/tty/sysrq.c | 2 -- include/linux/fs.h | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 7c95afa905a0..a8e39b2cdd55 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -403,7 +403,6 @@ static const struct sysrq_key_op sysrq_moom_op = { .enable_mask = SYSRQ_ENABLE_SIGNAL, }; -#ifdef CONFIG_BLOCK static void sysrq_handle_thaw(int key) { emergency_thaw_all(); @@ -414,7 +413,6 @@ static const struct sysrq_key_op sysrq_thaw_op = { .action_msg = "Emergency Thaw of all frozen filesystems", .enable_mask = SYSRQ_ENABLE_SIGNAL, }; -#endif static void sysrq_handle_kill(int key) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 3f881a892ea7..7f40dbafbf6d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2593,7 +2593,6 @@ extern void invalidate_bdev(struct block_device *); extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); extern int sync_blockdev(struct block_device *bdev); extern struct super_block *freeze_bdev(struct block_device *); -extern void emergency_thaw_all(void); extern void emergency_thaw_bdev(struct super_block *sb); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); @@ -2633,6 +2632,7 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb) return false; } #endif +void emergency_thaw_all(void); extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; -- cgit v1.2.3 From 764b23bd9af8ff8ecc664816e39d4791b6a72bfd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:36 +0200 Subject: block: mark bd_finish_claiming static Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- fs/block_dev.c | 5 ++--- include/linux/fs.h | 2 -- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/block_dev.c b/fs/block_dev.c index 0ae656e022fd..0e0d43dc27d3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1187,8 +1187,8 @@ static void bd_clear_claiming(struct block_device *whole, void *holder) * Finish exclusive open of a block device. Mark the device as exlusively * open by the holder and wake up all waiters for exclusive open to finish. */ -void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, - void *holder) +static void bd_finish_claiming(struct block_device *bdev, + struct block_device *whole, void *holder) { spin_lock(&bdev_lock); BUG_ON(!bd_may_claim(bdev, whole, holder)); @@ -1203,7 +1203,6 @@ void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, bd_clear_claiming(whole, holder); spin_unlock(&bdev_lock); } -EXPORT_SYMBOL(bd_finish_claiming); /** * bd_abort_claiming - abort claiming of a block device diff --git a/include/linux/fs.h b/include/linux/fs.h index 7f40dbafbf6d..b1c960e9b84e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2646,8 +2646,6 @@ extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); extern struct block_device *bd_start_claiming(struct block_device *bdev, void *holder); -extern void bd_finish_claiming(struct block_device *bdev, - struct block_device *whole, void *holder); extern void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, void *holder); extern void blkdev_put(struct block_device *bdev, fmode_t mode); -- cgit v1.2.3 From 7dbac5baa887facf80373825b8f66c626703621f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:37 +0200 Subject: fs: remove an unused block_device_operations forward declaration Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/fs.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index b1c960e9b84e..0d282c853691 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1774,8 +1774,6 @@ struct dir_context { loff_t pos; }; -struct block_device_operations; - /* These macros are for out of kernel modules to test that * the kernel supports the unlocked_ioctl and compat_ioctl * fields in struct file_operations. */ -- cgit v1.2.3 From 4e24566a134ea167441a1ffa3d439a27cf400880 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:38 +0200 Subject: fs: remove the HAVE_UNLOCKED_IOCTL and HAVE_COMPAT_IOCTL defines These are not defined anywhere, and contrary to the comments we really do not care about out of tree code at all. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/fs.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 0d282c853691..224edcc5b56e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1774,12 +1774,6 @@ struct dir_context { loff_t pos; }; -/* These macros are for out of kernel modules to test that - * the kernel supports the unlocked_ioctl and compat_ioctl - * fields in struct file_operations. */ -#define HAVE_COMPAT_IOCTL 1 -#define HAVE_UNLOCKED_IOCTL 1 - /* * These flags let !MMU mmap() govern direct device mapping vs immediate * copying more easily for MAP_PRIVATE, especially for ROM filesystems. -- cgit v1.2.3 From 75362a1792d16a61f0277d3610dea2f50a16bf3e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:39 +0200 Subject: fs: remove the mount_bdev and kill_block_super stubs No one calls these functions without CONFIG_BLOCK, so don't bother stubbing them out. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/fs.h | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 224edcc5b56e..9ee09e2b5a97 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2256,18 +2256,9 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) -#ifdef CONFIG_BLOCK extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); -#else -static inline struct dentry *mount_bdev(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data, - int (*fill_super)(struct super_block *, void *, int)) -{ - return ERR_PTR(-ENODEV); -} -#endif extern struct dentry *mount_single(struct file_system_type *fs_type, int flags, void *data, int (*fill_super)(struct super_block *, void *, int)); @@ -2276,14 +2267,7 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type, int (*fill_super)(struct super_block *, void *, int)); extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); void generic_shutdown_super(struct super_block *sb); -#ifdef CONFIG_BLOCK void kill_block_super(struct super_block *sb); -#else -static inline void kill_block_super(struct super_block *sb) -{ - BUG(); -} -#endif void kill_anon_super(struct super_block *sb); void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); -- cgit v1.2.3 From dd0dca223e091bbacdd3c7ce9cf06b373da59816 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:40 +0200 Subject: block: simplify sb_is_blkdev_sb Just use IS_ENABLED instead of providing a stub for !CONFIG_BLOCK. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/fs.h | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ee09e2b5a97..7f3ae38335d4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2557,6 +2557,12 @@ extern struct kmem_cache *names_cachep; #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) +extern struct super_block *blockdev_superblock; +static inline bool sb_is_blkdev_sb(struct super_block *sb) +{ + return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock; +} + #ifdef CONFIG_BLOCK extern int register_blkdev(unsigned int, const char *); extern void unregister_blkdev(unsigned int, const char *); @@ -2572,13 +2578,6 @@ extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_bdev(struct super_block *sb); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); - -extern struct super_block *blockdev_superblock; - -static inline bool sb_is_blkdev_sb(struct super_block *sb) -{ - return sb == blockdev_superblock; -} #else static inline void bd_forget(struct inode *inode) {} static inline int sync_blockdev(struct block_device *bdev) { return 0; } @@ -2602,11 +2601,6 @@ static inline int emergency_thaw_bdev(struct super_block *sb) static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) { } - -static inline bool sb_is_blkdev_sb(struct super_block *sb) -{ - return false; -} #endif void emergency_thaw_all(void); extern int sync_filesystem(struct super_block *); -- cgit v1.2.3 From 3f1266f1f82d7b8c72472a8921e80aa3e611fb62 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:41 +0200 Subject: block: move block-related definitions out of fs.h Move most of the block related definition out of fs.h into more suitable headers. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/affs/file.c | 1 + fs/hfs/inode.c | 1 + fs/internal.h | 17 ++++++++- fs/ntfs/dir.c | 1 + fs/proc/devices.c | 1 + fs/quota/dquot.c | 1 + fs/reiserfs/procfs.c | 1 + include/linux/blkdev.h | 46 +++++++++++++++++++++++ include/linux/fs.h | 92 ---------------------------------------------- include/linux/genhd.h | 27 ++++++++++++++ include/linux/jbd2.h | 1 + security/loadpin/loadpin.c | 1 + 12 files changed, 96 insertions(+), 94 deletions(-) (limited to 'include') diff --git a/fs/affs/file.c b/fs/affs/file.c index a85817f54483..a26a0f96c119 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -14,6 +14,7 @@ */ #include +#include #include "affs.h" static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 2f224b98ee94..f35a37c65e5f 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "hfs_fs.h" #include "btree.h" diff --git a/fs/internal.h b/fs/internal.h index 9b863a7bd708..969988d3d397 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -23,7 +23,9 @@ struct user_namespace; extern void __init bdev_cache_init(void); extern int __sync_blockdev(struct block_device *bdev, int wait); - +void iterate_bdevs(void (*)(struct block_device *, void *), void *); +void emergency_thaw_bdev(struct super_block *sb); +void bd_forget(struct inode *inode); #else static inline void bdev_cache_init(void) { @@ -33,7 +35,18 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) { return 0; } -#endif +static inline void iterate_bdevs(void (*f)(struct block_device *, void *), + void *arg) +{ +} +static inline int emergency_thaw_bdev(struct super_block *sb) +{ + return 0; +} +static inline void bd_forget(struct inode *inode) +{ +} +#endif /* CONFIG_BLOCK */ /* * buffer.c diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index 3c4811469ae8..a87d4391e6b5 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -8,6 +8,7 @@ #include #include +#include #include "dir.h" #include "aops.h" diff --git a/fs/proc/devices.c b/fs/proc/devices.c index 37d38697eaf8..837971e74109 100644 --- a/fs/proc/devices.c +++ b/fs/proc/devices.c @@ -3,6 +3,7 @@ #include #include #include +#include static int devinfo_show(struct seq_file *f, void *v) { diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 7b4bac91146b..bb02989d92b6 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -78,6 +78,7 @@ #include #include #include +#include #include "../internal.h" /* ugh */ #include diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index ff336513c254..155b82870333 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -15,6 +15,7 @@ #include "reiserfs.h" #include #include +#include /* * LOCKING: diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c0701237116d..cf8f692f62a9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1918,4 +1918,50 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) } #endif /* CONFIG_BLOCK */ +int bdev_read_only(struct block_device *bdev); +int set_blocksize(struct block_device *bdev, int size); + +const char *bdevname(struct block_device *bdev, char *buffer); +struct block_device *lookup_bdev(const char *); + +void blkdev_show(struct seq_file *seqf, off_t offset); + +#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ +#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ +#ifdef CONFIG_BLOCK +#define BLKDEV_MAJOR_MAX 512 +#else +#define BLKDEV_MAJOR_MAX 0 +#endif + +int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); +struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder); +struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); +struct block_device *bd_start_claiming(struct block_device *bdev, void *holder); +void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, + void *holder); +void blkdev_put(struct block_device *bdev, fmode_t mode); + +struct block_device *bdget(dev_t); +struct block_device *bdgrab(struct block_device *bdev); +void bdput(struct block_device *); + +#ifdef CONFIG_BLOCK +void invalidate_bdev(struct block_device *bdev); +int sync_blockdev(struct block_device *bdev); +#else +static inline void invalidate_bdev(struct block_device *bdev) +{ +} +static inline int sync_blockdev(struct block_device *bdev) +{ + return 0; +} #endif +int fsync_bdev(struct block_device *bdev); + +struct super_block *freeze_bdev(struct block_device *bdev); +int thaw_bdev(struct block_device *bdev, struct super_block *sb); + +#endif /* _LINUX_BLKDEV_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 7f3ae38335d4..add30c3bdf9a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2563,79 +2563,10 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb) return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock; } -#ifdef CONFIG_BLOCK -extern int register_blkdev(unsigned int, const char *); -extern void unregister_blkdev(unsigned int, const char *); -extern struct block_device *bdget(dev_t); -extern struct block_device *bdgrab(struct block_device *bdev); -extern void bd_set_size(struct block_device *, loff_t size); -extern void bd_forget(struct inode *inode); -extern void bdput(struct block_device *); -extern void invalidate_bdev(struct block_device *); -extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); -extern int sync_blockdev(struct block_device *bdev); -extern struct super_block *freeze_bdev(struct block_device *); -extern void emergency_thaw_bdev(struct super_block *sb); -extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); -extern int fsync_bdev(struct block_device *); -#else -static inline void bd_forget(struct inode *inode) {} -static inline int sync_blockdev(struct block_device *bdev) { return 0; } -static inline void invalidate_bdev(struct block_device *bdev) {} - -static inline struct super_block *freeze_bdev(struct block_device *sb) -{ - return NULL; -} - -static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) -{ - return 0; -} - -static inline int emergency_thaw_bdev(struct super_block *sb) -{ - return 0; -} - -static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) -{ -} -#endif void emergency_thaw_all(void); extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; -#ifdef CONFIG_BLOCK -extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); -extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); -extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); -extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, - void *holder); -extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, - void *holder); -extern struct block_device *bd_start_claiming(struct block_device *bdev, - void *holder); -extern void bd_abort_claiming(struct block_device *bdev, - struct block_device *whole, void *holder); -extern void blkdev_put(struct block_device *bdev, fmode_t mode); - -#ifdef CONFIG_SYSFS -extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); -extern void bd_unlink_disk_holder(struct block_device *bdev, - struct gendisk *disk); -#else -static inline int bd_link_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ - return 0; -} -static inline void bd_unlink_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ -} -#endif -#endif /* fs/char_dev.c */ #define CHRDEV_MAJOR_MAX 512 @@ -2666,31 +2597,12 @@ static inline void unregister_chrdev(unsigned int major, const char *name) __unregister_chrdev(major, 0, 256, name); } -/* fs/block_dev.c */ -#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ -#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ - -#ifdef CONFIG_BLOCK -#define BLKDEV_MAJOR_MAX 512 -extern const char *bdevname(struct block_device *bdev, char *buffer); -extern struct block_device *lookup_bdev(const char *); -extern void blkdev_show(struct seq_file *,off_t); - -#else -#define BLKDEV_MAJOR_MAX 0 -#endif - extern void init_special_inode(struct inode *, umode_t, dev_t); /* Invalid inode operations -- fs/bad_inode.c */ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); -#ifdef CONFIG_BLOCK -extern int revalidate_disk(struct gendisk *); -extern int check_disk_change(struct block_device *); -extern int __invalidate_device(struct block_device *, bool); -#endif unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -3090,10 +3002,6 @@ static inline void remove_inode_hash(struct inode *inode) extern void inode_sb_list_add(struct inode *inode); -#ifdef CONFIG_BLOCK -extern int bdev_read_only(struct block_device *); -#endif -extern int set_blocksize(struct block_device *, int); extern int sb_set_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 392aad5e29a2..83f8e0d83228 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -373,6 +373,33 @@ extern void blk_unregister_region(dev_t devt, unsigned long range); #define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) +int register_blkdev(unsigned int major, const char *name); +void unregister_blkdev(unsigned int major, const char *name); + +int revalidate_disk(struct gendisk *disk); +int check_disk_change(struct block_device *bdev); +int __invalidate_device(struct block_device *bdev, bool kill_dirty); +void bd_set_size(struct block_device *bdev, loff_t size); + +/* for drivers/char/raw.c: */ +int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); +long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); + +#ifdef CONFIG_SYSFS +int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); +void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); +#else +static inline int bd_link_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ + return 0; +} +static inline void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ +} +#endif /* CONFIG_SYSFS */ + #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index d56128df2aff..4aaa29772bb0 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #endif diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c index ee5cb944f4ad..670a1aebb8a1 100644 --- a/security/loadpin/loadpin.c +++ b/security/loadpin/loadpin.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include /* current */ #include -- cgit v1.2.3 From d2de7ea48d83195ef1310555f1fdd9e8e1bab0d3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:42 +0200 Subject: fs: move the buffer_heads_over_limit stub to buffer_head.h Move the !CONFIG_BLOCK stub to the same place as the non-stub declaration. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 1 - include/linux/buffer_head.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cf8f692f62a9..c824c6fee35d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1838,7 +1838,6 @@ struct block_device; /* * stubs for when the block layer is configured out */ -#define buffer_heads_over_limit 0 static inline long nr_blockdev_pages(void) { diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 22fb11e2d2e0..6b47f94378c5 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -406,6 +406,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } +#define buffer_heads_over_limit 0 #endif /* CONFIG_BLOCK */ #endif /* _LINUX_BUFFER_HEAD_H */ -- cgit v1.2.3 From 1a4dcfa8bc10d6bf4f94ac20adc2b30a1da72cfd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:43 +0200 Subject: block: reduce ifdef CONFIG_BLOCK madness in headers Large part of bio.h, blkdev.h and genhd.h are under ifdef CONFIG_BLOCK for no good reason. Only stub out function that are called from code that is not dependent on CONFIG_BLOCK and leave the harmless other declarations around. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/bio.h | 3 -- include/linux/blkdev.h | 92 ++++++++++++++++++++++---------------------------- include/linux/genhd.h | 14 ++++---- 3 files changed, 46 insertions(+), 63 deletions(-) (limited to 'include') diff --git a/include/linux/bio.h b/include/linux/bio.h index 91676d4b2dfe..0282f8aa8593 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -8,8 +8,6 @@ #include #include #include - -#ifdef CONFIG_BLOCK /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include @@ -824,5 +822,4 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) bio->bi_opf |= REQ_NOWAIT; } -#endif /* CONFIG_BLOCK */ #endif /* __LINUX_BIO_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c824c6fee35d..f788bddc9219 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -4,9 +4,6 @@ #include #include - -#ifdef CONFIG_BLOCK - #include #include #include @@ -1163,13 +1160,13 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, return __blk_rq_map_sg(q, rq, sglist, &last_sg); } extern void blk_dump_rq_flags(struct request *, char *); -extern long nr_blockdev_pages(void); bool __must_check blk_get_queue(struct request_queue *); struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); +#ifdef CONFIG_BLOCK /* * blk_plug permits building a queue of related requests by holding the I/O * fragments for a short period. This allows merging of sequential requests @@ -1229,9 +1226,47 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) !list_empty(&plug->cb_list)); } +int blkdev_issue_flush(struct block_device *, gfp_t); +long nr_blockdev_pages(void); +#else /* CONFIG_BLOCK */ +struct blk_plug { +}; + +static inline void blk_start_plug(struct blk_plug *plug) +{ +} + +static inline void blk_finish_plug(struct blk_plug *plug) +{ +} + +static inline void blk_flush_plug(struct task_struct *task) +{ +} + +static inline void blk_schedule_flush_plug(struct task_struct *task) +{ +} + + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + return false; +} + +static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) +{ + return 0; +} + +static inline long nr_blockdev_pages(void) +{ + return 0; +} +#endif /* CONFIG_BLOCK */ + extern void blk_io_schedule(void); -int blkdev_issue_flush(struct block_device *, gfp_t); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); @@ -1831,51 +1866,6 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq) } #endif /* CONFIG_BLK_DEV_ZONED */ -#else /* CONFIG_BLOCK */ - -struct block_device; - -/* - * stubs for when the block layer is configured out - */ - -static inline long nr_blockdev_pages(void) -{ - return 0; -} - -struct blk_plug { -}; - -static inline void blk_start_plug(struct blk_plug *plug) -{ -} - -static inline void blk_finish_plug(struct blk_plug *plug) -{ -} - -static inline void blk_flush_plug(struct task_struct *task) -{ -} - -static inline void blk_schedule_flush_plug(struct task_struct *task) -{ -} - - -static inline bool blk_needs_flush_plug(struct task_struct *tsk) -{ - return false; -} - -static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) -{ - return 0; -} - -#endif /* CONFIG_BLOCK */ - static inline void blk_wake_io_task(struct task_struct *waiter) { /* @@ -1889,7 +1879,6 @@ static inline void blk_wake_io_task(struct task_struct *waiter) wake_up_process(waiter); } -#ifdef CONFIG_BLOCK unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, unsigned int op); void disk_end_io_acct(struct gendisk *disk, unsigned int op, @@ -1915,7 +1904,6 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) { return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time); } -#endif /* CONFIG_BLOCK */ int bdev_read_only(struct block_device *bdev); int set_blocksize(struct block_device *bdev, int size); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 83f8e0d83228..31a54072ffd6 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -19,8 +19,6 @@ #include #include -#ifdef CONFIG_BLOCK - #define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) #define dev_to_part(device) container_of((device), struct hd_struct, __dev) #define disk_to_dev(disk) (&(disk)->part0.__dev) @@ -337,12 +335,9 @@ static inline void set_capacity(struct gendisk *disk, sector_t size) disk->part0.nr_sects = size; } -extern dev_t blk_lookup_devt(const char *name, int partno); - int bdev_disk_changed(struct block_device *bdev, bool invalidate); int blk_add_partitions(struct gendisk *disk, struct block_device *bdev); int blk_drop_partitions(struct block_device *bdev); -extern void printk_all_partitions(void); extern struct gendisk *__alloc_disk_node(int minors, int node_id); extern struct kobject *get_disk_and_module(struct gendisk *disk); @@ -400,10 +395,13 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, } #endif /* CONFIG_SYSFS */ +#ifdef CONFIG_BLOCK +void printk_all_partitions(void); +dev_t blk_lookup_devt(const char *name, int partno); #else /* CONFIG_BLOCK */ - -static inline void printk_all_partitions(void) { } - +static inline void printk_all_partitions(void) +{ +} static inline dev_t blk_lookup_devt(const char *name, int partno) { dev_t devt = MKDEV(0, 0); -- cgit v1.2.3 From 621c1f42945e76015c3a585e7a9fe6e71665eba0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 20 Jun 2020 09:16:44 +0200 Subject: block: move struct block_device to blk_types.h Move the struct block_device definition together with most of the block layer definitions, as it has nothing to do with the rest of fs.h. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/adfs/super.c | 1 + fs/befs/linuxvfs.c | 1 + fs/efs/super.c | 1 + fs/jfs/jfs_mount.c | 1 + fs/jfs/resize.c | 1 + include/linux/blk_types.h | 39 ++++++++++++++++++++++++++++++++++++++- include/linux/blkdev.h | 1 + include/linux/dasd_mod.h | 2 ++ include/linux/fs.h | 41 ----------------------------------------- 9 files changed, 46 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/fs/adfs/super.c b/fs/adfs/super.c index a3cc8ecb50da..d553bb5bc17a 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "adfs.h" #include "dir_f.h" #include "dir_fplus.h" diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 64cdf4d8e424..2482032021ca 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "befs.h" #include "btree.h" diff --git a/fs/efs/super.c b/fs/efs/super.c index 4a6ebff2af76..a4a945d0ac6a 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "efs.h" #include diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c index eb8b9e233d73..2935d4c776ec 100644 --- a/fs/jfs/jfs_mount.c +++ b/fs/jfs/jfs_mount.c @@ -36,6 +36,7 @@ #include #include +#include #include "jfs_incore.h" #include "jfs_filsys.h" diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c index 66acea9d878b..bde787c354fc 100644 --- a/fs/jfs/resize.c +++ b/fs/jfs/resize.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_metapage.h" diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ccb895f911b1..a602132cbe32 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -14,12 +14,49 @@ struct bio_set; struct bio; struct bio_integrity_payload; struct page; -struct block_device; struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); struct bio_crypt_ctx; +struct block_device { + dev_t bd_dev; /* not a kdev_t - it's a search key */ + int bd_openers; + struct inode * bd_inode; /* will die */ + struct super_block * bd_super; + struct mutex bd_mutex; /* open/close mutex */ + void * bd_claiming; + void * bd_holder; + int bd_holders; + bool bd_write_holder; +#ifdef CONFIG_SYSFS + struct list_head bd_holder_disks; +#endif + struct block_device * bd_contains; + unsigned bd_block_size; + u8 bd_partno; + struct hd_struct * bd_part; + /* number of times partitions within this device have been opened. */ + unsigned bd_part_count; + int bd_invalidated; + struct gendisk * bd_disk; + struct request_queue * bd_queue; + struct backing_dev_info *bd_bdi; + struct list_head bd_list; + /* + * Private data. You must have bd_claim'ed the block_device + * to use this. NOTE: bd_claim allows an owner to claim + * the same device multiple times, the owner must take special + * care to not mess up bd_private for that case. + */ + unsigned long bd_private; + + /* The counter of freeze processes */ + int bd_fsfreeze_count; + /* Mutex for freeze */ + struct mutex bd_fsfreeze_mutex; +} __randomize_layout; + /* * Block error status values. See block/blk-core:blk_errors for the details. * Alpha cannot write a byte atomically, so we need to use 32-bit value. diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f788bddc9219..15497782c176 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1930,6 +1930,7 @@ void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, void *holder); void blkdev_put(struct block_device *bdev, fmode_t mode); +struct block_device *I_BDEV(struct inode *inode); struct block_device *bdget(dev_t); struct block_device *bdgrab(struct block_device *bdev); void bdput(struct block_device *); diff --git a/include/linux/dasd_mod.h b/include/linux/dasd_mod.h index d39abad2ff6e..14e6cf8c6267 100644 --- a/include/linux/dasd_mod.h +++ b/include/linux/dasd_mod.h @@ -4,6 +4,8 @@ #include +struct gendisk; + extern int dasd_biodasdinfo(struct gendisk *disk, dasd_information2_t *info); #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index add30c3bdf9a..1d7c4f7465d2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -470,45 +470,6 @@ struct address_space { * must be enforced here for CRIS, to let the least significant bit * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. */ -struct request_queue; - -struct block_device { - dev_t bd_dev; /* not a kdev_t - it's a search key */ - int bd_openers; - struct inode * bd_inode; /* will die */ - struct super_block * bd_super; - struct mutex bd_mutex; /* open/close mutex */ - void * bd_claiming; - void * bd_holder; - int bd_holders; - bool bd_write_holder; -#ifdef CONFIG_SYSFS - struct list_head bd_holder_disks; -#endif - struct block_device * bd_contains; - unsigned bd_block_size; - u8 bd_partno; - struct hd_struct * bd_part; - /* number of times partitions within this device have been opened. */ - unsigned bd_part_count; - int bd_invalidated; - struct gendisk * bd_disk; - struct request_queue * bd_queue; - struct backing_dev_info *bd_bdi; - struct list_head bd_list; - /* - * Private data. You must have bd_claim'ed the block_device - * to use this. NOTE: bd_claim allows an owner to claim - * the same device multiple times, the owner must take special - * care to not mess up bd_private for that case. - */ - unsigned long bd_private; - - /* The counter of freeze processes */ - int bd_fsfreeze_count; - /* Mutex for freeze */ - struct mutex bd_fsfreeze_mutex; -} __randomize_layout; /* XArray tags, for tagging dirty and writeback pages in the pagecache. */ #define PAGECACHE_TAG_DIRTY XA_MARK_0 @@ -907,8 +868,6 @@ static inline unsigned imajor(const struct inode *inode) return MAJOR(inode->i_rdev); } -extern struct block_device *I_BDEV(struct inode *inode); - struct fown_struct { rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ -- cgit v1.2.3 From 1bc138c622959979eb547be2d3bbc6442a5c80b0 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 10 Jun 2020 11:12:23 +0100 Subject: PM / EM: add support for other devices than CPUs in Energy Model Add support for other devices than CPUs. The registration function does not require a valid cpumask pointer and is ready to handle new devices. Some of the internal structures has been reorganized in order to keep consistent view (like removing per_cpu pd pointers). Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/device.h | 5 + include/linux/energy_model.h | 29 +++-- kernel/power/energy_model.c | 244 +++++++++++++++++++++++++++++-------------- 3 files changed, 194 insertions(+), 84 deletions(-) (limited to 'include') diff --git a/include/linux/device.h b/include/linux/device.h index 15460a5ac024..b72e6f9ad845 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -13,6 +13,7 @@ #define _DEVICE_H_ #include +#include #include #include #include @@ -559,6 +560,10 @@ struct device { struct dev_pm_info power; struct dev_pm_domain *pm_domain; +#ifdef CONFIG_ENERGY_MODEL + struct em_perf_domain *em_pd; +#endif + #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN struct irq_domain *msi_domain; #endif diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 7076cb22b247..2d4689964029 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -12,8 +12,10 @@ /** * em_perf_state - Performance state of a performance domain - * @frequency: The CPU frequency in KHz, for consistency with CPUFreq - * @power: The power consumed by 1 CPU at this level, in milli-watts + * @frequency: The frequency in KHz, for consistency with CPUFreq + * @power: The power consumed at this level, in milli-watts (by 1 CPU or + by a registered device). It can be a total power: static and + dynamic. * @cost: The cost coefficient associated with this level, used during * energy calculation. Equal to: power * max_frequency / frequency */ @@ -27,12 +29,16 @@ struct em_perf_state { * em_perf_domain - Performance domain * @table: List of performance states, in ascending order * @nr_perf_states: Number of performance states - * @cpus: Cpumask covering the CPUs of the domain + * @cpus: Cpumask covering the CPUs of the domain. It's here + * for performance reasons to avoid potential cache + * misses during energy calculations in the scheduler + * and simplifies allocating/freeing that memory region. * - * A "performance domain" represents a group of CPUs whose performance is - * scaled together. All CPUs of a performance domain must have the same - * micro-architecture. Performance domains often have a 1-to-1 mapping with - * CPUFreq policies. + * In case of CPU device, a "performance domain" represents a group of CPUs + * whose performance is scaled together. All CPUs of a performance domain + * must have the same micro-architecture. Performance domains often have + * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus + * field is unused. */ struct em_perf_domain { struct em_perf_state *table; @@ -71,10 +77,12 @@ struct em_data_callback { #define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb } struct em_perf_domain *em_cpu_get(int cpu); +struct em_perf_domain *em_pd_get(struct device *dev); int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, struct em_data_callback *cb); int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span); +void em_dev_unregister_perf_domain(struct device *dev); /** * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain @@ -184,10 +192,17 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, { return -EINVAL; } +static inline void em_dev_unregister_perf_domain(struct device *dev) +{ +} static inline struct em_perf_domain *em_cpu_get(int cpu) { return NULL; } +static inline struct em_perf_domain *em_pd_get(struct device *dev) +{ + return NULL; +} static inline unsigned long em_pd_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 5b8a1566526a..32d76e78f992 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Energy Model of CPUs + * Energy Model of devices * - * Copyright (c) 2018, Arm ltd. + * Copyright (c) 2018-2020, Arm ltd. * Written by: Quentin Perret, Arm ltd. + * Improvements provided by: Lukasz Luba, Arm ltd. */ #define pr_fmt(fmt) "energy_model: " fmt @@ -15,15 +16,17 @@ #include #include -/* Mapping of each CPU to the performance domain to which it belongs. */ -static DEFINE_PER_CPU(struct em_perf_domain *, em_data); - /* * Mutex serializing the registrations of performance domains and letting * callbacks defined by drivers sleep. */ static DEFINE_MUTEX(em_pd_mutex); +static bool _is_cpu_device(struct device *dev) +{ + return (dev->bus == &cpu_subsys); +} + #ifdef CONFIG_DEBUG_FS static struct dentry *rootdir; @@ -49,22 +52,30 @@ static int em_debug_cpus_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(em_debug_cpus); -static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) +static void em_debug_create_pd(struct device *dev) { struct dentry *d; - char name[8]; int i; - snprintf(name, sizeof(name), "pd%d", cpu); - /* Create the directory of the performance domain */ - d = debugfs_create_dir(name, rootdir); + d = debugfs_create_dir(dev_name(dev), rootdir); - debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops); + if (_is_cpu_device(dev)) + debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, + &em_debug_cpus_fops); /* Create a sub-directory for each performance state */ - for (i = 0; i < pd->nr_perf_states; i++) - em_debug_create_ps(&pd->table[i], d); + for (i = 0; i < dev->em_pd->nr_perf_states; i++) + em_debug_create_ps(&dev->em_pd->table[i], d); + +} + +static void em_debug_remove_pd(struct device *dev) +{ + struct dentry *debug_dir; + + debug_dir = debugfs_lookup(dev_name(dev), rootdir); + debugfs_remove_recursive(debug_dir); } static int __init em_debug_init(void) @@ -76,40 +87,34 @@ static int __init em_debug_init(void) } core_initcall(em_debug_init); #else /* CONFIG_DEBUG_FS */ -static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {} +static void em_debug_create_pd(struct device *dev) {} +static void em_debug_remove_pd(struct device *dev) {} #endif -static struct em_perf_domain * -em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, - cpumask_t *span) + +static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, + int nr_states, struct em_data_callback *cb) { unsigned long opp_eff, prev_opp_eff = ULONG_MAX; unsigned long power, freq, prev_freq = 0; - int i, ret, cpu = cpumask_first(span); struct em_perf_state *table; - struct em_perf_domain *pd; + int i, ret; u64 fmax; - if (!cb->active_power) - return NULL; - - pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); - if (!pd) - return NULL; - table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL); if (!table) - goto free_pd; + return -ENOMEM; /* Build the list of performance states for this performance domain */ for (i = 0, freq = 0; i < nr_states; i++, freq++) { /* * active_power() is a driver callback which ceils 'freq' to - * lowest performance state of 'cpu' above 'freq' and updates + * lowest performance state of 'dev' above 'freq' and updates * 'power' and 'freq' accordingly. */ ret = cb->active_power(&power, &freq, dev); if (ret) { - pr_err("pd%d: invalid perf. state: %d\n", cpu, ret); + dev_err(dev, "EM: invalid perf. state: %d\n", + ret); goto free_ps_table; } @@ -118,7 +123,8 @@ em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, * higher performance states. */ if (freq <= prev_freq) { - pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq); + dev_err(dev, "EM: non-increasing freq: %lu\n", + freq); goto free_ps_table; } @@ -127,7 +133,8 @@ em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, * positive, in milli-watts and to fit into 16 bits. */ if (!power || power > EM_MAX_POWER) { - pr_err("pd%d: invalid power: %lu\n", cpu, power); + dev_err(dev, "EM: invalid power: %lu\n", + power); goto free_ps_table; } @@ -142,8 +149,8 @@ em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, */ opp_eff = freq / power; if (opp_eff >= prev_opp_eff) - pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_perf_state %d >= em_perf_state%d\n", - cpu, i, i - 1); + dev_dbg(dev, "EM: hertz/watts ratio non-monotonically decreasing: em_perf_state %d >= em_perf_state%d\n", + i, i - 1); prev_opp_eff = opp_eff; } @@ -156,30 +163,82 @@ em_create_pd(struct device *dev, int nr_states, struct em_data_callback *cb, pd->table = table; pd->nr_perf_states = nr_states; - cpumask_copy(to_cpumask(pd->cpus), span); - em_debug_create_pd(pd, cpu); - - return pd; + return 0; free_ps_table: kfree(table); -free_pd: - kfree(pd); + return -EINVAL; +} + +static int em_create_pd(struct device *dev, int nr_states, + struct em_data_callback *cb, cpumask_t *cpus) +{ + struct em_perf_domain *pd; + struct device *cpu_dev; + int cpu, ret; + + if (_is_cpu_device(dev)) { + pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + cpumask_copy(em_span_cpus(pd), cpus); + } else { + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + } + + ret = em_create_perf_table(dev, pd, nr_states, cb); + if (ret) { + kfree(pd); + return ret; + } + + if (_is_cpu_device(dev)) + for_each_cpu(cpu, cpus) { + cpu_dev = get_cpu_device(cpu); + cpu_dev->em_pd = pd; + } + + dev->em_pd = pd; + + return 0; +} + +/** + * em_pd_get() - Return the performance domain for a device + * @dev : Device to find the performance domain for + * + * Returns the performance domain to which @dev belongs, or NULL if it doesn't + * exist. + */ +struct em_perf_domain *em_pd_get(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev)) + return NULL; - return NULL; + return dev->em_pd; } +EXPORT_SYMBOL_GPL(em_pd_get); /** * em_cpu_get() - Return the performance domain for a CPU * @cpu : CPU to find the performance domain for * - * Return: the performance domain to which 'cpu' belongs, or NULL if it doesn't + * Returns the performance domain to which @cpu belongs, or NULL if it doesn't * exist. */ struct em_perf_domain *em_cpu_get(int cpu) { - return READ_ONCE(per_cpu(em_data, cpu)); + struct device *cpu_dev; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return NULL; + + return em_pd_get(cpu_dev); } EXPORT_SYMBOL_GPL(em_cpu_get); @@ -188,7 +247,7 @@ EXPORT_SYMBOL_GPL(em_cpu_get); * @dev : Device for which the EM is to register * @nr_states : Number of performance states to register * @cb : Callback functions providing the data of the Energy Model - * @span : Pointer to cpumask_t, which in case of a CPU device is + * @cpus : Pointer to cpumask_t, which in case of a CPU device is * obligatory. It can be taken from i.e. 'policy->cpus'. For other * type of devices this should be set to NULL. * @@ -201,13 +260,12 @@ EXPORT_SYMBOL_GPL(em_cpu_get); * Return 0 on success */ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, - struct em_data_callback *cb, cpumask_t *span) + struct em_data_callback *cb, cpumask_t *cpus) { unsigned long cap, prev_cap = 0; - struct em_perf_domain *pd; - int cpu, ret = 0; + int cpu, ret; - if (!dev || !span || !nr_states || !cb) + if (!dev || !nr_states || !cb) return -EINVAL; /* @@ -216,47 +274,50 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, */ mutex_lock(&em_pd_mutex); - for_each_cpu(cpu, span) { - /* Make sure we don't register again an existing domain. */ - if (READ_ONCE(per_cpu(em_data, cpu))) { - ret = -EEXIST; - goto unlock; - } + if (dev->em_pd) { + ret = -EEXIST; + goto unlock; + } - /* - * All CPUs of a domain must have the same micro-architecture - * since they all share the same table. - */ - cap = arch_scale_cpu_capacity(cpu); - if (prev_cap && prev_cap != cap) { - pr_err("CPUs of %*pbl must have the same capacity\n", - cpumask_pr_args(span)); + if (_is_cpu_device(dev)) { + if (!cpus) { + dev_err(dev, "EM: invalid CPU mask\n"); ret = -EINVAL; goto unlock; } - prev_cap = cap; + + for_each_cpu(cpu, cpus) { + if (em_cpu_get(cpu)) { + dev_err(dev, "EM: exists for CPU%d\n", cpu); + ret = -EEXIST; + goto unlock; + } + /* + * All CPUs of a domain must have the same + * micro-architecture since they all share the same + * table. + */ + cap = arch_scale_cpu_capacity(cpu); + if (prev_cap && prev_cap != cap) { + dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n", + cpumask_pr_args(cpus)); + + ret = -EINVAL; + goto unlock; + } + prev_cap = cap; + } } - /* Create the performance domain and add it to the Energy Model. */ - pd = em_create_pd(dev, nr_states, cb, span); - if (!pd) { - ret = -EINVAL; + ret = em_create_pd(dev, nr_states, cb, cpus); + if (ret) goto unlock; - } - for_each_cpu(cpu, span) { - /* - * The per-cpu array can be read concurrently from em_cpu_get(). - * The barrier enforces the ordering needed to make sure readers - * can only access well formed em_perf_domain structs. - */ - smp_store_release(per_cpu_ptr(&em_data, cpu), pd); - } + em_debug_create_pd(dev); + dev_info(dev, "EM: created perf domain\n"); - pr_debug("Created perf domain %*pbl\n", cpumask_pr_args(span)); unlock: mutex_unlock(&em_pd_mutex); - return ret; } EXPORT_SYMBOL_GPL(em_dev_register_perf_domain); @@ -285,3 +346,32 @@ int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, return em_dev_register_perf_domain(cpu_dev, nr_states, cb, span); } EXPORT_SYMBOL_GPL(em_register_perf_domain); + +/** + * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device + * @dev : Device for which the EM is registered + * + * Unregister the EM for the specified @dev (but not a CPU device). + */ +void em_dev_unregister_perf_domain(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev) || !dev->em_pd) + return; + + if (_is_cpu_device(dev)) + return; + + /* + * The mutex separates all register/unregister requests and protects + * from potential clean-up/setup issues in the debugfs directories. + * The debugfs directory name is the same as device's name. + */ + mutex_lock(&em_pd_mutex); + em_debug_remove_pd(dev); + + kfree(dev->em_pd->table); + kfree(dev->em_pd); + dev->em_pd = NULL; + mutex_unlock(&em_pd_mutex); +} +EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain); -- cgit v1.2.3 From 07891f15d91317b2220a0b610a2d7e324a88105d Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:51 +0100 Subject: PM / EM: remove em_register_perf_domain Remove old function em_register_perf_domain which is no longer needed. There is em_dev_register_perf_domain that covers old use cases and new as well. Acked-by: Daniel Lezcano Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/energy_model.h | 7 ------- kernel/power/energy_model.c | 25 ------------------------- 2 files changed, 32 deletions(-) (limited to 'include') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 2d4689964029..0f94e871a202 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -78,8 +78,6 @@ struct em_data_callback { struct em_perf_domain *em_cpu_get(int cpu); struct em_perf_domain *em_pd_get(struct device *dev); -int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, - struct em_data_callback *cb); int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span); void em_dev_unregister_perf_domain(struct device *dev); @@ -181,11 +179,6 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) struct em_data_callback {}; #define EM_DATA_CB(_active_power_cb) { } -static inline int em_register_perf_domain(cpumask_t *span, - unsigned int nr_states, struct em_data_callback *cb) -{ - return -EINVAL; -} static inline int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, struct em_data_callback *cb, cpumask_t *span) diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 32d76e78f992..c1ff7fa030ab 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -322,31 +322,6 @@ unlock: } EXPORT_SYMBOL_GPL(em_dev_register_perf_domain); -/** - * em_register_perf_domain() - Register the Energy Model of a performance domain - * @span : Mask of CPUs in the performance domain - * @nr_states : Number of capacity states to register - * @cb : Callback functions providing the data of the Energy Model - * - * Create Energy Model tables for a performance domain using the callbacks - * defined in cb. - * - * If multiple clients register the same performance domain, all but the first - * registration will be ignored. - * - * Return 0 on success - */ -int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, - struct em_data_callback *cb) -{ - struct device *cpu_dev; - - cpu_dev = get_cpu_device(cpumask_first(span)); - - return em_dev_register_perf_domain(cpu_dev, nr_states, cb, span); -} -EXPORT_SYMBOL_GPL(em_register_perf_domain); - /** * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device * @dev : Device for which the EM is registered -- cgit v1.2.3 From f0b5694791ce70dba16758c3b838d5ddc7731b02 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:52 +0100 Subject: PM / EM: change name of em_pd_energy to em_cpu_energy Energy Model framework now supports other devices than CPUs. Refactor some of the functions in order to prevent wrong usage. The old function em_pd_energy has to generic name. It must not be used without proper cpumask pointer, which is possible only for CPU devices. Thus, rename it and add proper description to warn of potential wrong usage for other devices. Acked-by: Daniel Lezcano Acked-by: Quentin Perret Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/energy_model.h | 11 ++++++++--- kernel/sched/fair.c | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 0f94e871a202..b67a51c574b9 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -83,15 +83,20 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, void em_dev_unregister_perf_domain(struct device *dev); /** - * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain + * em_cpu_energy() - Estimates the energy consumed by the CPUs of a + performance domain * @pd : performance domain for which energy has to be estimated * @max_util : highest utilization among CPUs of the domain * @sum_util : sum of the utilization of all CPUs in the domain * + * This function must be used only for CPU devices. There is no validation, + * i.e. if the EM is a CPU type and has cpumask allocated. It is called from + * the scheduler code quite frequently and that is why there is not checks. + * * Return: the sum of the energy consumed by the CPUs of the domain assuming * a capacity state satisfying the max utilization of the domain. */ -static inline unsigned long em_pd_energy(struct em_perf_domain *pd, +static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { unsigned long freq, scale_cpu; @@ -196,7 +201,7 @@ static inline struct em_perf_domain *em_pd_get(struct device *dev) { return NULL; } -static inline unsigned long em_pd_energy(struct em_perf_domain *pd, +static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { return 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cbcb2f71599b..6da601c8d383 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6497,7 +6497,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) max_util = max(max_util, cpu_util); } - return em_pd_energy(pd->em_pd, max_util, sum_util); + return em_cpu_energy(pd->em_pd, max_util, sum_util); } /* -- cgit v1.2.3 From 0e0ffa855d1590e54ec0033404a49e2e57e294fe Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 27 May 2020 10:58:54 +0100 Subject: OPP: refactor dev_pm_opp_of_register_em() and update related drivers The Energy Model framework supports not only CPU devices. Drop the CPU specific interface with cpumask and add struct device. Add also a return value, user might use it. This new interface provides easy way to create a simple Energy Model, which then might be used by e.g. thermal subsystem. Acked-by: Daniel Lezcano Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt.c | 2 +- drivers/cpufreq/imx6q-cpufreq.c | 2 +- drivers/cpufreq/mediatek-cpufreq.c | 2 +- drivers/cpufreq/omap-cpufreq.c | 2 +- drivers/cpufreq/qcom-cpufreq-hw.c | 2 +- drivers/cpufreq/scpi-cpufreq.c | 2 +- drivers/cpufreq/vexpress-spc-cpufreq.c | 2 +- drivers/opp/of.c | 71 +++++++++++++++++++++------------- include/linux/pm_opp.h | 15 ++++++- 9 files changed, 65 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 79742bbd221f..944d7b45afe9 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -279,7 +279,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = transition_latency; policy->dvfs_possible_from_any_cpu = true; - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index fdb2ffffbd15..ef7b34c1fd2b 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -193,7 +193,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy) policy->clk = clks[ARM].clk; cpufreq_generic_init(policy, freq_table, transition_latency); policy->suspend_freq = max_freq; - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; } diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 0c98dd08273d..7d1212c9b7c8 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -448,7 +448,7 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) policy->driver_data = info; policy->clk = info->cpu_clk; - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus); return 0; } diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 8d14b42a8c6f..3694bb030df3 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -131,7 +131,7 @@ static int omap_cpu_init(struct cpufreq_policy *policy) /* FIXME: what's the actual transition time? */ cpufreq_generic_init(policy, freq_table, 300 * 1000); - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(mpu_dev, policy->cpus); return 0; } diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index fc92a8842e25..0a04b6f03b9a 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -238,7 +238,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) goto error; } - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); policy->fast_switch_possible = true; diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 20d1f85d5f5a..b0f5388b8854 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -167,7 +167,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) policy->fast_switch_possible = false; - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); return 0; diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 83c85d3d67e3..4e8b1dee7c9a 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -450,7 +450,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) policy->freq_table = freq_table[cur_cluster]; policy->cpuinfo.transition_latency = 1000000; /* 1 ms */ - dev_pm_opp_of_register_em(policy->cpus); + dev_pm_opp_of_register_em(cpu_dev, policy->cpus); if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = diff --git a/drivers/opp/of.c b/drivers/opp/of.c index e273f419a4bf..4aa42739599e 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1205,18 +1205,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); /* * Callback function provided to the Energy Model framework upon registration. - * This computes the power estimated by @CPU at @kHz if it is the frequency + * This computes the power estimated by @dev at @kHz if it is the frequency * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled * frequency and @mW to the associated power. The power is estimated as - * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively - * the voltage and frequency of the OPP. + * P = C * V^2 * f with C being the device's capacitance and V and f + * respectively the voltage and frequency of the OPP. * - * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power - * calculation failed because of missing parameters, 0 otherwise. + * Returns -EINVAL if the power calculation failed because of missing + * parameters, 0 otherwise. */ -static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, - struct device *cpu_dev) +static int __maybe_unused _get_power(unsigned long *mW, unsigned long *kHz, + struct device *dev) { struct dev_pm_opp *opp; struct device_node *np; @@ -1225,7 +1225,7 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, u64 tmp; int ret; - np = of_node_get(cpu_dev->of_node); + np = of_node_get(dev->of_node); if (!np) return -EINVAL; @@ -1235,7 +1235,7 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, return -EINVAL; Hz = *kHz * 1000; - opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz); + opp = dev_pm_opp_find_freq_ceil(dev, &Hz); if (IS_ERR(opp)) return -EINVAL; @@ -1255,30 +1255,38 @@ static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, /** * dev_pm_opp_of_register_em() - Attempt to register an Energy Model - * @cpus : CPUs for which an Energy Model has to be registered + * @dev : Device for which an Energy Model has to be registered + * @cpus : CPUs for which an Energy Model has to be registered. For + * other type of devices it should be set to NULL. * * This checks whether the "dynamic-power-coefficient" devicetree property has * been specified, and tries to register an Energy Model with it if it has. + * Having this property means the voltages are known for OPPs and the EM + * might be calculated. */ -void dev_pm_opp_of_register_em(struct cpumask *cpus) +int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) { - struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power); - int ret, nr_opp, cpu = cpumask_first(cpus); - struct device *cpu_dev; + struct em_data_callback em_cb = EM_DATA_CB(_get_power); struct device_node *np; + int ret, nr_opp; u32 cap; - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) - return; + if (IS_ERR_OR_NULL(dev)) { + ret = -EINVAL; + goto failed; + } - nr_opp = dev_pm_opp_get_opp_count(cpu_dev); - if (nr_opp <= 0) - return; + nr_opp = dev_pm_opp_get_opp_count(dev); + if (nr_opp <= 0) { + ret = -EINVAL; + goto failed; + } - np = of_node_get(cpu_dev->of_node); - if (!np) - return; + np = of_node_get(dev->of_node); + if (!np) { + ret = -EINVAL; + goto failed; + } /* * Register an EM only if the 'dynamic-power-coefficient' property is @@ -1289,9 +1297,20 @@ void dev_pm_opp_of_register_em(struct cpumask *cpus) */ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); of_node_put(np); - if (ret || !cap) - return; + if (ret || !cap) { + dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n"); + ret = -EINVAL; + goto failed; + } - em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, cpus); + ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus); + if (ret) + goto failed; + + return 0; + +failed: + dev_dbg(dev, "Couldn't register Energy Model %d\n", ret); + return ret; } EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index d5c4a329321d..ee34c553f6bf 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -11,6 +11,7 @@ #ifndef __LINUX_OPP_H__ #define __LINUX_OPP_H__ +#include #include #include @@ -373,7 +374,11 @@ struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); -void dev_pm_opp_of_register_em(struct cpumask *cpus); +int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus); +static inline void dev_pm_opp_of_unregister_em(struct device *dev) +{ + em_dev_unregister_perf_domain(dev); +} #else static inline int dev_pm_opp_of_add_table(struct device *dev) { @@ -413,7 +418,13 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) return NULL; } -static inline void dev_pm_opp_of_register_em(struct cpumask *cpus) +static inline int dev_pm_opp_of_register_em(struct device *dev, + struct cpumask *cpus) +{ + return -ENOTSUPP; +} + +static inline void dev_pm_opp_of_unregister_em(struct device *dev) { } -- cgit v1.2.3 From dfde1d7dee9bfd095a4f16c9e0579a10f4092e81 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Sat, 20 Jun 2020 18:30:50 +0300 Subject: sock: Move sock_valbool_flag to header This is preparation for usage in bpf_setsockopt. Signed-off-by: Dmitry Yakunin Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200620153052.9439-1-zeil@yandex-team.ru --- include/net/sock.h | 9 +++++++++ net/core/sock.c | 9 --------- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index c53cc42b5ab9..8ba438b671d7 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -879,6 +879,15 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) __clear_bit(flag, &sk->sk_flags); } +static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, + int valbool) +{ + if (valbool) + sock_set_flag(sk, bit); + else + sock_reset_flag(sk, bit); +} + static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) { return test_bit(flag, &sk->sk_flags); diff --git a/net/core/sock.c b/net/core/sock.c index 6c4acf1f0220..5ba4753bc04d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -695,15 +695,6 @@ out: return ret; } -static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, - int valbool) -{ - if (valbool) - sock_set_flag(sk, bit); - else - sock_reset_flag(sk, bit); -} - bool sk_mc_loop(struct sock *sk) { if (dev_recursion_level()) -- cgit v1.2.3 From aad4a0a9513af962137c4842463d11ed491eec37 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Sat, 20 Jun 2020 18:30:51 +0300 Subject: tcp: Expose tcp_sock_set_keepidle_locked This is preparation for usage in bpf_setsockopt. v2: - remove redundant EXPORT_SYMBOL (Alexei Starovoitov) Signed-off-by: Dmitry Yakunin Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200620153052.9439-2-zeil@yandex-team.ru --- include/linux/tcp.h | 1 + net/ipv4/tcp.c | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9aac824c523c..3bdec31ce8f4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -499,6 +499,7 @@ int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, void tcp_sock_set_cork(struct sock *sk, bool on); int tcp_sock_set_keepcnt(struct sock *sk, int val); +int tcp_sock_set_keepidle_locked(struct sock *sk, int val); int tcp_sock_set_keepidle(struct sock *sk, int val); int tcp_sock_set_keepintvl(struct sock *sk, int val); void tcp_sock_set_nodelay(struct sock *sk); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 810cc164f795..de36c91d32ea 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2957,7 +2957,7 @@ void tcp_sock_set_user_timeout(struct sock *sk, u32 val) } EXPORT_SYMBOL(tcp_sock_set_user_timeout); -static int __tcp_sock_set_keepidle(struct sock *sk, int val) +int tcp_sock_set_keepidle_locked(struct sock *sk, int val) { struct tcp_sock *tp = tcp_sk(sk); @@ -2984,7 +2984,7 @@ int tcp_sock_set_keepidle(struct sock *sk, int val) int err; lock_sock(sk); - err = __tcp_sock_set_keepidle(sk, val); + err = tcp_sock_set_keepidle_locked(sk, val); release_sock(sk); return err; } @@ -3183,7 +3183,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, break; case TCP_KEEPIDLE: - err = __tcp_sock_set_keepidle(sk, val); + err = tcp_sock_set_keepidle_locked(sk, val); break; case TCP_KEEPINTVL: if (val < 1 || val > MAX_TCP_KEEPINTVL) -- cgit v1.2.3 From f9bcf96837f158db6ea982d15cd2c8161ca6bc23 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Sat, 20 Jun 2020 18:30:52 +0300 Subject: bpf: Add SO_KEEPALIVE and related options to bpf_setsockopt This patch adds support of SO_KEEPALIVE flag and TCP related options to bpf_setsockopt() routine. This is helpful if we want to enable or tune TCP keepalive for applications which don't do it in the userspace code. v3: - update kernel-doc in uapi (Nikita Vetoshkin ) v4: - update kernel-doc in tools too (Alexei Starovoitov) - add test to selftests (Alexei Starovoitov) Signed-off-by: Dmitry Yakunin Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200620153052.9439-3-zeil@yandex-team.ru --- include/uapi/linux/bpf.h | 7 +++-- net/core/filter.c | 36 ++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 7 +++-- tools/testing/selftests/bpf/progs/connect4_prog.c | 27 +++++++++++++++++ 4 files changed, 72 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 9d3923e6b860..d9737d51dd19 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1621,10 +1621,13 @@ union bpf_attr { * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, - * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, - * **TCP_BPF_SNDCWND_CLAMP**. + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return diff --git a/net/core/filter.c b/net/core/filter.c index 73395384afe2..c713b6b8938f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4289,10 +4289,10 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen, u32 flags) { char devname[IFNAMSIZ]; + int val, valbool; struct net *net; int ifindex; int ret = 0; - int val; if (!sk_fullsock(sk)) return -EINVAL; @@ -4303,6 +4303,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, if (optlen != sizeof(int) && optname != SO_BINDTODEVICE) return -EINVAL; val = *((int *)optval); + valbool = val ? 1 : 0; /* Only some socketops are supported */ switch (optname) { @@ -4361,6 +4362,11 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, } ret = sock_bindtoindex(sk, ifindex, false); break; + case SO_KEEPALIVE: + if (sk->sk_prot->keepalive) + sk->sk_prot->keepalive(sk, valbool); + sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); + break; default: ret = -EINVAL; } @@ -4421,6 +4427,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, ret = tcp_set_congestion_control(sk, name, false, reinit, true); } else { + struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); if (optlen != sizeof(int)) @@ -4449,6 +4456,33 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, else tp->save_syn = val; break; + case TCP_KEEPIDLE: + ret = tcp_sock_set_keepidle_locked(sk, val); + break; + case TCP_KEEPINTVL: + if (val < 1 || val > MAX_TCP_KEEPINTVL) + ret = -EINVAL; + else + tp->keepalive_intvl = val * HZ; + break; + case TCP_KEEPCNT: + if (val < 1 || val > MAX_TCP_KEEPCNT) + ret = -EINVAL; + else + tp->keepalive_probes = val; + break; + case TCP_SYNCNT: + if (val < 1 || val > MAX_TCP_SYNCNT) + ret = -EINVAL; + else + icsk->icsk_syn_retries = val; + break; + case TCP_USER_TIMEOUT: + if (val < 0) + ret = -EINVAL; + else + icsk->icsk_user_timeout = val; + break; default: ret = -EINVAL; } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9d3923e6b860..d9737d51dd19 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1621,10 +1621,13 @@ union bpf_attr { * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, - * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, - * **TCP_BPF_SNDCWND_CLAMP**. + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index 1ab2c5eba86c..b1b2773c0b9d 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -104,6 +104,30 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx) return 0; } +static __inline int set_keepalive(struct bpf_sock_addr *ctx) +{ + int zero = 0, one = 1; + + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one))) + return 1; + if (ctx->type == SOCK_STREAM) { + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPIDLE, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPINTVL, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPCNT, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_SYNCNT, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_USER_TIMEOUT, &one, sizeof(one))) + return 1; + } + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &zero, sizeof(zero))) + return 1; + + return 0; +} + SEC("cgroup/connect4") int connect_v4_prog(struct bpf_sock_addr *ctx) { @@ -121,6 +145,9 @@ int connect_v4_prog(struct bpf_sock_addr *ctx) if (bind_to_device(ctx)) return 0; + if (set_keepalive(ctx)) + return 0; + if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM) return 0; else if (ctx->type == SOCK_STREAM) -- cgit v1.2.3 From 14c2b89634a28577a242cb34f339d981da6d3ef6 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 24 Jun 2020 13:54:21 +0300 Subject: RDMA/core: Delete not-used create RWQ table function The RWQ table is used for RSS uverbs and not in used for the kernel consumers, delete ib_create_rwq_ind_table() routine that is not called at all. Link: https://lore.kernel.org/r/20200624105422.1452290-5-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/verbs.c | 39 --------------------------------------- include/rdma/ib_verbs.h | 3 --- 2 files changed, 42 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a3761d432a79..7232e6ec2e91 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2409,45 +2409,6 @@ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, } EXPORT_SYMBOL(ib_modify_wq); -/* - * ib_create_rwq_ind_table - Creates a RQ Indirection Table. - * @device: The device on which to create the rwq indirection table. - * @ib_rwq_ind_table_init_attr: A list of initial attributes required to - * create the Indirection Table. - * - * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less - * than the created ib_rwq_ind_table object and the caller is responsible - * for its memory allocation/free. - */ -struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, - struct ib_rwq_ind_table_init_attr *init_attr) -{ - struct ib_rwq_ind_table *rwq_ind_table; - int i; - u32 table_size; - - if (!device->ops.create_rwq_ind_table) - return ERR_PTR(-EOPNOTSUPP); - - table_size = (1 << init_attr->log_ind_tbl_size); - rwq_ind_table = device->ops.create_rwq_ind_table(device, - init_attr, NULL); - if (IS_ERR(rwq_ind_table)) - return rwq_ind_table; - - rwq_ind_table->ind_tbl = init_attr->ind_tbl; - rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; - rwq_ind_table->device = device; - rwq_ind_table->uobject = NULL; - atomic_set(&rwq_ind_table->usecnt, 0); - - for (i = 0; i < table_size; i++) - atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); - - return rwq_ind_table; -} -EXPORT_SYMBOL(ib_create_rwq_ind_table); - /* * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. * @wq_ind_table: The Indirection Table to destroy. diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 77106ff3cd26..1e902a8f1713 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -4422,9 +4422,6 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd, int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, u32 wq_attr_mask); -struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, - struct ib_rwq_ind_table_init_attr* - wq_ind_table_init_attr); int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, -- cgit v1.2.3 From 0ef44e5cab8dbf0a0327871b48fe7c8425d0d885 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 23 Jun 2020 16:30:07 +0200 Subject: net: phy: add support for a common probe between shared PHYs Shared PHYs (PHYs in the same hardware package) may have shared registers and their drivers would usually need to share information. There is currently a way to have a shared (part of the) init, by using phy_package_init_once(). This patch extends the logic to share parts of the probe to allow sharing the initialization of locks or resources retrieval. Signed-off-by: Antoine Tenart Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/phy.h b/include/linux/phy.h index 7860d56c6bf5..6fb8f302978d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -244,7 +244,8 @@ struct phy_package_shared { }; /* used as bit number in atomic bitops */ -#define PHY_SHARED_F_INIT_DONE 0 +#define PHY_SHARED_F_INIT_DONE 0 +#define PHY_SHARED_F_PROBE_DONE 1 /* * The Bus class for PHYs. Devices which provide access to @@ -1566,14 +1567,25 @@ static inline int __phy_package_write(struct phy_device *phydev, return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); } -static inline bool phy_package_init_once(struct phy_device *phydev) +static inline bool __phy_package_set_once(struct phy_device *phydev, + unsigned int b) { struct phy_package_shared *shared = phydev->shared; if (!shared) return false; - return !test_and_set_bit(PHY_SHARED_F_INIT_DONE, &shared->flags); + return !test_and_set_bit(b, &shared->flags); +} + +static inline bool phy_package_init_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE); +} + +static inline bool phy_package_probe_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE); } extern struct bus_type mdio_bus_type; -- cgit v1.2.3 From 899426b3bdd947541ba4af8c767575889c8b842a Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:16 +0300 Subject: net: neighbor: add fdb extended attribute Add an attribute to NDA which will contain all future fdb-specific attributes in order to avoid polluting the NDA namespace with e.g. bridge or vxlan specific attributes. The attribute is called NDA_FDB_EXT_ATTRS and the structure would look like: [NDA_FDB_EXT_ATTRS] = { [NFEA_xxx] } Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 12 ++++++++++++ net/core/neighbour.c | 1 + 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index eefcda8ca44e..540ff48402a1 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -30,6 +30,7 @@ enum { NDA_SRC_VNI, NDA_PROTOCOL, /* Originator of entry */ NDA_NH_ID, + NDA_FDB_EXT_ATTRS, __NDA_MAX }; @@ -172,4 +173,15 @@ enum { }; #define NDTA_MAX (__NDTA_MAX - 1) +/* embedded into NDA_FDB_EXT_ATTRS: + * [NDA_FDB_EXT_ATTRS] = { + * ... + * } + */ +enum { + NFEA_UNSPEC, + __NFEA_MAX +}; +#define NFEA_MAX (__NFEA_MAX - 1) + #endif diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ef6b5a8f629c..8e39e28b0a8d 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1783,6 +1783,7 @@ const struct nla_policy nda_policy[NDA_MAX+1] = { [NDA_MASTER] = { .type = NLA_U32 }, [NDA_PROTOCOL] = { .type = NLA_U8 }, [NDA_NH_ID] = { .type = NLA_U32 }, + [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED }, }; static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, -- cgit v1.2.3 From 31cbc39b6344916c20452e43a9171009214c409c Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:17 +0300 Subject: net: bridge: add option to allow activity notifications for any fdb entries This patch adds the ability to notify about activity of any entries (static, permanent or ext_learn). EVPN multihoming peers need it to properly and efficiently handle mac sync (peer active/locally active). We add a new NFEA_ACTIVITY_NOTIFY attribute which is used to dump the current activity state and to control if static entries should be monitored at all. We use 2 bits - one to activate fdb entry tracking (disabled by default) and the second to denote that an entry is inactive. We need the second bit in order to avoid multiple notifications of inactivity. Obviously this makes no difference for dynamic entries since at the time of inactivity they get deleted, while the tracked non-dynamic entries get the inactive bit set and get a notification. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 11 ++++ net/bridge/br_fdb.c | 117 ++++++++++++++++++++++++++++++++++++----- net/bridge/br_private.h | 4 ++ 3 files changed, 119 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 540ff48402a1..21e569297355 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -173,13 +173,24 @@ enum { }; #define NDTA_MAX (__NDTA_MAX - 1) + /* FDB activity notification bits used in NFEA_ACTIVITY_NOTIFY: + * - FDB_NOTIFY_BIT - notify on activity/expire for any entry + * - FDB_NOTIFY_INACTIVE_BIT - mark as inactive to avoid multiple notifications + */ +enum { + FDB_NOTIFY_BIT = (1 << 0), + FDB_NOTIFY_INACTIVE_BIT = (1 << 1) +}; + /* embedded into NDA_FDB_EXT_ATTRS: * [NDA_FDB_EXT_ATTRS] = { + * [NFEA_ACTIVITY_NOTIFY] * ... * } */ enum { NFEA_UNSPEC, + NFEA_ACTIVITY_NOTIFY, __NFEA_MAX }; #define NFEA_MAX (__NFEA_MAX - 1) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index ed80d9ab0fb9..642deb57c064 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -349,12 +349,21 @@ void br_fdb_cleanup(struct work_struct *work) */ rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { - unsigned long this_timer; + unsigned long this_timer = f->updated + delay; if (test_bit(BR_FDB_STATIC, &f->flags) || - test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) + test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) { + if (test_bit(BR_FDB_NOTIFY, &f->flags)) { + if (time_after(this_timer, now)) + work_delay = min(work_delay, + this_timer - now); + else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, + &f->flags)) + fdb_notify(br, f, RTM_NEWNEIGH, false); + } continue; - this_timer = f->updated + delay; + } + if (time_after(this_timer, now)) { work_delay = min(work_delay, this_timer - now); } else { @@ -556,11 +565,17 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, return ret; } +/* returns true if the fdb was modified */ +static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb) +{ + return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) && + test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)); +} + void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid, unsigned long flags) { struct net_bridge_fdb_entry *fdb; - bool fdb_modified = false; /* some users want to always flood. */ if (hold_time(br) == 0) @@ -575,6 +590,12 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, source->dev->name, addr, vid); } else { unsigned long now = jiffies; + bool fdb_modified = false; + + if (now != fdb->updated) { + fdb->updated = now; + fdb_modified = __fdb_mark_active(fdb); + } /* fastpath: update of existing entry */ if (unlikely(source != fdb->dst && @@ -587,8 +608,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, clear_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags); } - if (now != fdb->updated) - fdb->updated = now; + if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); if (unlikely(fdb_modified)) { @@ -667,6 +687,23 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, &fdb->key.vlan_id)) goto nla_put_failure; + if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) { + struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS); + u8 notify_bits = FDB_NOTIFY_BIT; + + if (!nest) + goto nla_put_failure; + if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) + notify_bits |= FDB_NOTIFY_INACTIVE_BIT; + + if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) { + nla_nest_cancel(skb, nest); + goto nla_put_failure; + } + + nla_nest_end(skb, nest); + } + nlmsg_end(skb, nlh); return 0; @@ -681,7 +718,9 @@ static inline size_t fdb_nlmsg_size(void) + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ + nla_total_size(sizeof(u32)) /* NDA_MASTER */ + nla_total_size(sizeof(u16)) /* NDA_VLAN */ - + nla_total_size(sizeof(struct nda_cacheinfo)); + + nla_total_size(sizeof(struct nda_cacheinfo)) + + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */ + + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */ } static void fdb_notify(struct net_bridge *br, @@ -791,14 +830,40 @@ errout: return err; } +/* returns true if the fdb is modified */ +static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify) +{ + bool modified = false; + + /* allow to mark an entry as inactive, usually done on creation */ + if ((notify & FDB_NOTIFY_INACTIVE_BIT) && + !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) + modified = true; + + if ((notify & FDB_NOTIFY_BIT) && + !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) { + /* enabled activity tracking */ + modified = true; + } else if (!(notify & FDB_NOTIFY_BIT) && + test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) { + /* disabled activity tracking, clear notify state */ + clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags); + modified = true; + } + + return modified; +} + /* Update (create or replace) forwarding database entry */ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, - const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid) + const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid, + struct nlattr *nfea_tb[]) { bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY); struct net_bridge_fdb_entry *fdb; u16 state = ndm->ndm_state; bool modified = false; + u8 notify = 0; /* If the port cannot learn allow only local and static entries */ if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) && @@ -815,6 +880,13 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, if (is_sticky && (state & NUD_PERMANENT)) return -EINVAL; + if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) { + notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]); + if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) || + (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT) + return -EINVAL; + } + fdb = br_fdb_find(br, addr, vid); if (fdb == NULL) { if (!(flags & NLM_F_CREATE)) @@ -858,6 +930,9 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, modified = true; } + if (fdb_handle_notify(fdb, notify)) + modified = true; + set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); fdb->used = jiffies; @@ -871,7 +946,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, - u16 nlh_flags, u16 vid) + u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[]) { int err = 0; @@ -893,19 +968,24 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, err = br_fdb_external_learn_add(br, p, addr, vid, true); } else { spin_lock_bh(&br->hash_lock); - err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid); + err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb); spin_unlock_bh(&br->hash_lock); } return err; } +static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = { + [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 }, +}; + /* Add new permanent fdb entry with RTM_NEWNEIGH */ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 nlh_flags, struct netlink_ext_ack *extack) { + struct nlattr *nfea_tb[NFEA_MAX + 1], *attr; struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; struct net_bridge_vlan *v; @@ -938,6 +1018,16 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], vg = nbp_vlan_group(p); } + if (tb[NDA_FDB_EXT_ATTRS]) { + attr = tb[NDA_FDB_EXT_ATTRS]; + err = nla_parse_nested(nfea_tb, NFEA_MAX, attr, + br_nda_fdb_pol, extack); + if (err) + return err; + } else { + memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1)); + } + if (vid) { v = br_vlan_find(vg, vid); if (!v || !br_vlan_should_use(v)) { @@ -946,9 +1036,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } /* VID was specified, so use it. */ - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb); } else { - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb); if (err || !vg || !vg->num_vlans) goto out; @@ -959,7 +1049,8 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid, + nfea_tb); if (err) goto out; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 7501be4eeba0..c0ae639e1b36 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -48,6 +48,8 @@ enum { /* Path to usermode spanning tree program */ #define BR_STP_PROG "/sbin/bridge-stp" +#define BR_FDB_NOTIFY_SETTABLE_BITS (FDB_NOTIFY_BIT | FDB_NOTIFY_INACTIVE_BIT) + typedef struct bridge_id bridge_id; typedef struct mac_addr mac_addr; typedef __u16 port_id; @@ -184,6 +186,8 @@ enum { BR_FDB_ADDED_BY_USER, BR_FDB_ADDED_BY_EXT_LEARN, BR_FDB_OFFLOADED, + BR_FDB_NOTIFY, + BR_FDB_NOTIFY_INACTIVE }; struct net_bridge_fdb_key { -- cgit v1.2.3 From b5f1d9ec283bd28a452cf61d7e5c2f2b1a9cccda Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:18 +0300 Subject: net: bridge: add a flag to avoid refreshing fdb when changing/adding When we modify or create a new fdb entry sometimes we want to avoid refreshing its activity in order to track it properly. One example is when a mac is received from EVPN multi-homing peer by FRR, which doesn't want to change local activity accounting. It makes it static and sets a flag to track its activity. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 1 + net/bridge/br_fdb.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 21e569297355..dc8b72201f6c 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -191,6 +191,7 @@ enum { enum { NFEA_UNSPEC, NFEA_ACTIVITY_NOTIFY, + NFEA_DONT_REFRESH, __NFEA_MAX }; #define NFEA_MAX (__NFEA_MAX - 1) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 642deb57c064..9db504baa094 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -860,6 +860,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, struct nlattr *nfea_tb[]) { bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY); + bool refresh = !nfea_tb[NFEA_DONT_REFRESH]; struct net_bridge_fdb_entry *fdb; u16 state = ndm->ndm_state; bool modified = false; @@ -937,7 +938,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, fdb->used = jiffies; if (modified) { - fdb->updated = jiffies; + if (refresh) + fdb->updated = jiffies; fdb_notify(br, fdb, RTM_NEWNEIGH, true); } @@ -977,6 +979,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = { [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 }, + [NFEA_DONT_REFRESH] = { .type = NLA_FLAG }, }; /* Add new permanent fdb entry with RTM_NEWNEIGH */ -- cgit v1.2.3 From b08d4d3b6c0460306e8a0608413b201705200d33 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:04 -0700 Subject: net: bpf: Add bpf_seq_afinfo in tcp_iter_state A new field bpf_seq_afinfo is added to tcp_iter_state to provide bpf tcp iterator afinfo. There are two reasons on why we did this. First, the current way to get afinfo from PDE_DATA does not work for bpf iterator as its seq_file inode does not conform to /proc/net/{tcp,tcp6} inode structures. More specifically, anonymous bpf iterator will use an anonymous inode which is shared in the system and we cannot change inode private data structure at all. Second, bpf iterator for tcp/tcp6 wants to traverse all tcp and tcp6 sockets in one pass and bpf program can control whether they want to skip one sk_family or not. Having a different afinfo with family AF_UNSPEC make it easier to understand in the code. This patch does not change /proc/net/{tcp,tcp6} behavior as the bpf_seq_afinfo will be NULL for these two proc files. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230804.3987829-1-yhs@fb.com --- include/net/tcp.h | 1 + net/ipv4/tcp_ipv4.c | 30 ++++++++++++++++++++++++------ 2 files changed, 25 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 4de9485f73d9..eab1c7d0facb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1935,6 +1935,7 @@ struct tcp_iter_state { struct seq_net_private p; enum tcp_seq_states state; struct sock *syn_wait_sk; + struct tcp_seq_afinfo *bpf_seq_afinfo; int bucket, offset, sbucket, num; loff_t last_pos; }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ad6435ba6d72..9cb65ee4ec63 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2211,13 +2211,18 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock); */ static void *listening_get_next(struct seq_file *seq, void *cur) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; struct hlist_nulls_node *node; struct sock *sk = cur; + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; @@ -2235,7 +2240,8 @@ get_sk: sk_nulls_for_each_from(sk, node) { if (!net_eq(sock_net(sk), net)) continue; - if (sk->sk_family == afinfo->family) + if (afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) return sk; } spin_unlock(&ilb->lock); @@ -2272,11 +2278,16 @@ static inline bool empty_bucket(const struct tcp_iter_state *st) */ static void *established_get_first(struct seq_file *seq) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + st->offset = 0; for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; @@ -2289,7 +2300,8 @@ static void *established_get_first(struct seq_file *seq) spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { - if (sk->sk_family != afinfo->family || + if ((afinfo->family != AF_UNSPEC && + sk->sk_family != afinfo->family) || !net_eq(sock_net(sk), net)) { continue; } @@ -2304,19 +2316,25 @@ out: static void *established_get_next(struct seq_file *seq, void *cur) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct sock *sk = cur; struct hlist_nulls_node *node; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + ++st->num; ++st->offset; sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { - if (sk->sk_family == afinfo->family && + if ((afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) && net_eq(sock_net(sk), net)) return sk; } -- cgit v1.2.3 From af7ec13833619e17f03aa73a785a2f871da6d66b Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:09 -0700 Subject: bpf: Add bpf_skc_to_tcp6_sock() helper The helper is used in tracing programs to cast a socket pointer to a tcp6_sock pointer. The return value could be NULL if the casting is illegal. A new helper return type RET_PTR_TO_BTF_ID_OR_NULL is added so the verifier is able to deduce proper return types for the helper. Different from the previous BTF_ID based helpers, the bpf_skc_to_tcp6_sock() argument can be several possible btf_ids. More specifically, all possible socket data structures with sock_common appearing in the first in the memory layout. This patch only added socket types related to tcp and udp. All possible argument btf_id and return value btf_id for helper bpf_skc_to_tcp6_sock() are pre-calculcated and cached. In the future, it is even possible to precompute these btf_id's at kernel build time. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230809.3988195-1-yhs@fb.com --- include/linux/bpf.h | 12 +++++++ include/uapi/linux/bpf.h | 9 ++++- kernel/bpf/btf.c | 1 + kernel/bpf/verifier.c | 43 ++++++++++++++++------ kernel/trace/bpf_trace.c | 2 ++ net/core/filter.c | 82 ++++++++++++++++++++++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 2 ++ tools/include/uapi/linux/bpf.h | 9 ++++- 8 files changed, 148 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1e1501ee53ce..c0e38ad07848 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -265,6 +265,7 @@ enum bpf_return_type { RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ + RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -287,6 +288,12 @@ struct bpf_func_proto { enum bpf_arg_type arg_type[5]; }; int *btf_id; /* BTF ids of arguments */ + bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is + * valid. Often used if more + * than one btf id is permitted + * for this argument. + */ + int *ret_btf_id; /* return value btf_id */ }; /* bpf_context is intentionally undefined structure. Pointer to bpf_context is @@ -1524,6 +1531,7 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map) struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); void bpf_map_offload_map_free(struct bpf_map *map); +void init_btf_sock_ids(struct btf *btf); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -1549,6 +1557,9 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) static inline void bpf_map_offload_map_free(struct bpf_map *map) { } +static inline void init_btf_sock_ids(struct btf *btf) +{ +} #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_BPF_STREAM_PARSER) @@ -1638,6 +1649,7 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; extern const struct bpf_func_proto bpf_ringbuf_submit_proto; extern const struct bpf_func_proto bpf_ringbuf_discard_proto; extern const struct bpf_func_proto bpf_ringbuf_query_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d9737d51dd19..e90ad07b291a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3255,6 +3255,12 @@ union bpf_attr { * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level * is returned or the error code -EACCES in case the skb is not * subject to CHECKSUM_UNNECESSARY. + * + * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3392,7 +3398,8 @@ union bpf_attr { FN(ringbuf_submit), \ FN(ringbuf_discard), \ FN(ringbuf_query), \ - FN(csum_level), + FN(csum_level), \ + FN(skc_to_tcp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index e377d1981730..4c3007f428b1 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3674,6 +3674,7 @@ struct btf *btf_parse_vmlinux(void) goto errout; bpf_struct_ops_init(btf, log); + init_btf_sock_ids(btf); btf_verifier_env_free(env); refcount_set(&btf->refcnt, 1); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7460f967cb75..7de98906ddf4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3800,12 +3800,14 @@ static int int_ptr_type_to_size(enum bpf_arg_type type) return -EINVAL; } -static int check_func_arg(struct bpf_verifier_env *env, u32 regno, - enum bpf_arg_type arg_type, - struct bpf_call_arg_meta *meta) +static int check_func_arg(struct bpf_verifier_env *env, u32 arg, + struct bpf_call_arg_meta *meta, + const struct bpf_func_proto *fn) { + u32 regno = BPF_REG_1 + arg; struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; enum bpf_reg_type expected_type, type = reg->type; + enum bpf_arg_type arg_type = fn->arg_type[arg]; int err = 0; if (arg_type == ARG_DONTCARE) @@ -3885,9 +3887,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, expected_type = PTR_TO_BTF_ID; if (type != expected_type) goto err_type; - if (reg->btf_id != meta->btf_id) { - verbose(env, "Helper has type %s got %s in R%d\n", - kernel_type_name(meta->btf_id), + if (!fn->check_btf_id) { + if (reg->btf_id != meta->btf_id) { + verbose(env, "Helper has type %s got %s in R%d\n", + kernel_type_name(meta->btf_id), + kernel_type_name(reg->btf_id), regno); + + return -EACCES; + } + } else if (!fn->check_btf_id(reg->btf_id, arg)) { + verbose(env, "Helper does not support %s in R%d\n", kernel_type_name(reg->btf_id), regno); return -EACCES; @@ -4709,10 +4718,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn meta.func_id = func_id; /* check args */ for (i = 0; i < 5; i++) { - err = btf_resolve_helper_id(&env->log, fn, i); - if (err > 0) - meta.btf_id = err; - err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta); + if (!fn->check_btf_id) { + err = btf_resolve_helper_id(&env->log, fn, i); + if (err > 0) + meta.btf_id = err; + } + err = check_func_arg(env, i, &meta, fn); if (err) return err; } @@ -4815,6 +4826,18 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; regs[BPF_REG_0].mem_size = meta.mem_size; + } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) { + int ret_btf_id; + + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL; + ret_btf_id = *fn->ret_btf_id; + if (ret_btf_id == 0) { + verbose(env, "invalid return type %d of func %s#%d\n", + fn->ret_type, func_id_name(func_id), func_id); + return -EINVAL; + } + regs[BPF_REG_0].btf_id = ret_btf_id; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0159f12d2af5..2a97a268f533 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1515,6 +1515,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skb_output_proto; case BPF_FUNC_xdp_output: return &bpf_xdp_output_proto; + case BPF_FUNC_skc_to_tcp6_sock: + return &bpf_skc_to_tcp6_sock_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/core/filter.c b/net/core/filter.c index c713b6b8938f..176e27d75c51 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -9225,3 +9226,84 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) { bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); } + +/* Define a list of socket types which can be the argument for + * skc_to_*_sock() helpers. All these sockets should have + * sock_common as the first argument in its memory layout. + */ +#define BTF_SOCK_TYPE_xxx \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, "inet_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, "inet_connection_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, "inet_request_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, "inet_timewait_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, "request_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, "sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, "sock_common") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, "tcp_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, "tcp_request_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, "tcp_timewait_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, "tcp6_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, "udp_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, "udp6_sock") + +enum { +#define BTF_SOCK_TYPE(name, str) name, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +MAX_BTF_SOCK_TYPE, +}; + +static int btf_sock_ids[MAX_BTF_SOCK_TYPE]; + +#ifdef CONFIG_BPF_SYSCALL +static const char *bpf_sock_types[] = { +#define BTF_SOCK_TYPE(name, str) str, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +}; + +void init_btf_sock_ids(struct btf *btf) +{ + int i, btf_id; + + for (i = 0; i < MAX_BTF_SOCK_TYPE; i++) { + btf_id = btf_find_by_name_kind(btf, bpf_sock_types[i], + BTF_KIND_STRUCT); + if (btf_id > 0) + btf_sock_ids[i] = btf_id; + } +} +#endif + +static bool check_arg_btf_id(u32 btf_id, u32 arg) +{ + int i; + + /* only one argument, no need to check arg */ + for (i = 0; i < MAX_BTF_SOCK_TYPE; i++) + if (btf_sock_ids[i] == btf_id) + return true; + return false; +} + +BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) +{ + /* tcp6_sock type is not generated in dwarf and hence btf, + * trigger an explicit type generation here. + */ + BTF_TYPE_EMIT(struct tcp6_sock); + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && + sk->sk_family == AF_INET6) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { + .func = bpf_skc_to_tcp6_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], +}; diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 91fa668fa860..6c2f64118651 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -421,6 +421,7 @@ class PrinterHelpers(Printer): 'struct sockaddr', 'struct tcphdr', 'struct seq_file', + 'struct tcp6_sock', 'struct __sk_buff', 'struct sk_msg_md', @@ -458,6 +459,7 @@ class PrinterHelpers(Printer): 'struct sockaddr', 'struct tcphdr', 'struct seq_file', + 'struct tcp6_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d9737d51dd19..e90ad07b291a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3255,6 +3255,12 @@ union bpf_attr { * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level * is returned or the error code -EACCES in case the skb is not * subject to CHECKSUM_UNNECESSARY. + * + * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3392,7 +3398,8 @@ union bpf_attr { FN(ringbuf_submit), \ FN(ringbuf_discard), \ FN(ringbuf_query), \ - FN(csum_level), + FN(csum_level), \ + FN(skc_to_tcp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call -- cgit v1.2.3 From 478cfbdf5f13dfe09cfd0b1cbac821f5e27f6108 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:11 -0700 Subject: bpf: Add bpf_skc_to_{tcp, tcp_timewait, tcp_request}_sock() helpers Three more helpers are added to cast a sock_common pointer to an tcp_sock, tcp_timewait_sock or a tcp_request_sock for tracing programs. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230811.3988277-1-yhs@fb.com --- include/linux/bpf.h | 3 ++ include/uapi/linux/bpf.h | 23 +++++++++++++++- kernel/trace/bpf_trace.c | 6 ++++ net/core/filter.c | 62 ++++++++++++++++++++++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 6 ++++ tools/include/uapi/linux/bpf.h | 23 +++++++++++++++- 6 files changed, 121 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c0e38ad07848..c23998cf6699 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1650,6 +1650,9 @@ extern const struct bpf_func_proto bpf_ringbuf_submit_proto; extern const struct bpf_func_proto bpf_ringbuf_discard_proto; extern const struct bpf_func_proto bpf_ringbuf_query_proto; extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e90ad07b291a..b9412ab275f3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3261,6 +3261,24 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3399,7 +3417,10 @@ union bpf_attr { FN(ringbuf_discard), \ FN(ringbuf_query), \ FN(csum_level), \ - FN(skc_to_tcp6_sock), + FN(skc_to_tcp6_sock), \ + FN(skc_to_tcp_sock), \ + FN(skc_to_tcp_timewait_sock), \ + FN(skc_to_tcp_request_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 2a97a268f533..48d935b0d87c 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1517,6 +1517,12 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_xdp_output_proto; case BPF_FUNC_skc_to_tcp6_sock: return &bpf_skc_to_tcp6_sock_proto; + case BPF_FUNC_skc_to_tcp_sock: + return &bpf_skc_to_tcp_sock_proto; + case BPF_FUNC_skc_to_tcp_timewait_sock: + return &bpf_skc_to_tcp_timewait_sock_proto; + case BPF_FUNC_skc_to_tcp_request_sock: + return &bpf_skc_to_tcp_request_sock_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/core/filter.c b/net/core/filter.c index 176e27d75c51..0b4e5aed7e20 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -74,6 +74,7 @@ #include #include #include +#include /** * sk_filter_trim_cap - run a packet through a socket filter @@ -9307,3 +9308,64 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { .check_btf_id = check_arg_btf_id, .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], }; + +BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk) +{ + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { + .func = bpf_skc_to_tcp_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP], +}; + +BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) +{ + if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) + return (unsigned long)sk; + +#if IS_BUILTIN(CONFIG_IPV6) + if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) + return (unsigned long)sk; +#endif + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { + .func = bpf_skc_to_tcp_timewait_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW], +}; + +BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk) +{ + if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) + return (unsigned long)sk; + +#if IS_BUILTIN(CONFIG_IPV6) + if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) + return (unsigned long)sk; +#endif + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { + .func = bpf_skc_to_tcp_request_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], +}; diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 6c2f64118651..d886657c6aaa 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -422,6 +422,9 @@ class PrinterHelpers(Printer): 'struct tcphdr', 'struct seq_file', 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', 'struct __sk_buff', 'struct sk_msg_md', @@ -460,6 +463,9 @@ class PrinterHelpers(Printer): 'struct tcphdr', 'struct seq_file', 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e90ad07b291a..b9412ab275f3 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3261,6 +3261,24 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3399,7 +3417,10 @@ union bpf_attr { FN(ringbuf_discard), \ FN(ringbuf_query), \ FN(csum_level), \ - FN(skc_to_tcp6_sock), + FN(skc_to_tcp6_sock), \ + FN(skc_to_tcp_sock), \ + FN(skc_to_tcp_timewait_sock), \ + FN(skc_to_tcp_request_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call -- cgit v1.2.3 From 9e8ca27afab6c92477b459f6a5d2af0cd3197c20 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:12 -0700 Subject: net: bpf: Add bpf_seq_afinfo in udp_iter_state Similar to tcp_iter_state, a new field bpf_seq_afinfo is added to udp_iter_state to provide bpf udp iterator afinfo. This does not change /proc/net/{udp, udp6} behavior. But it enables bpf iterator to avoid get afinfo from PDE_DATA and iterate through all udp and udp6 sockets in one pass. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230812.3988347-1-yhs@fb.com --- include/net/udp.h | 1 + net/ipv4/udp.c | 28 +++++++++++++++++++++++----- 2 files changed, 24 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/udp.h b/include/net/udp.h index a8fa6c0c6ded..67c8b7368845 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -440,6 +440,7 @@ struct udp_seq_afinfo { struct udp_iter_state { struct seq_net_private p; int bucket; + struct udp_seq_afinfo *bpf_seq_afinfo; }; void *udp_seq_start(struct seq_file *seq, loff_t *pos); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1b7ebbcae497..90355301b266 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2826,10 +2826,15 @@ EXPORT_SYMBOL(udp_prot); static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + for (state->bucket = start; state->bucket <= afinfo->udp_table->mask; ++state->bucket) { struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket]; @@ -2841,7 +2846,8 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) sk_for_each(sk, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; - if (sk->sk_family == afinfo->family) + if (afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) goto found; } spin_unlock_bh(&hslot->lock); @@ -2853,13 +2859,20 @@ found: static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + do { sk = sk_next(sk); - } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family)); + } while (sk && (!net_eq(sock_net(sk), net) || + (afinfo->family != AF_UNSPEC && + sk->sk_family != afinfo->family))); if (!sk) { if (state->bucket <= afinfo->udp_table->mask) @@ -2904,9 +2917,14 @@ EXPORT_SYMBOL(udp_seq_next); void udp_seq_stop(struct seq_file *seq, void *v) { - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + if (state->bucket <= afinfo->udp_table->mask) spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock); } -- cgit v1.2.3 From 0d4fad3e57df2bf61e8ffc8d12a34b1caf9b8835 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:15 -0700 Subject: bpf: Add bpf_skc_to_udp6_sock() helper The helper is used in tracing programs to cast a socket pointer to a udp6_sock pointer. The return value could be NULL if the casting is illegal. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Cc: Eric Dumazet Link: https://lore.kernel.org/bpf/20200623230815.3988481-1-yhs@fb.com --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 9 ++++++++- kernel/trace/bpf_trace.c | 2 ++ net/core/filter.c | 22 ++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 2 ++ tools/include/uapi/linux/bpf.h | 9 ++++++++- 6 files changed, 43 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c23998cf6699..3d2ade703a35 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1653,6 +1653,7 @@ extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b9412ab275f3..0cb8ec948816 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3279,6 +3279,12 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3420,7 +3426,8 @@ union bpf_attr { FN(skc_to_tcp6_sock), \ FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ - FN(skc_to_tcp_request_sock), + FN(skc_to_tcp_request_sock), \ + FN(skc_to_udp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 48d935b0d87c..5d59dda5f661 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1523,6 +1523,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skc_to_tcp_timewait_sock_proto; case BPF_FUNC_skc_to_tcp_request_sock: return &bpf_skc_to_tcp_request_sock_proto; + case BPF_FUNC_skc_to_udp6_sock: + return &bpf_skc_to_udp6_sock_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/core/filter.c b/net/core/filter.c index 0b4e5aed7e20..c796e141ea8e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9369,3 +9369,25 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { .check_btf_id = check_arg_btf_id, .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], }; + +BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk) +{ + /* udp6_sock type is not generated in dwarf and hence btf, + * trigger an explicit type generation here. + */ + BTF_TYPE_EMIT(struct udp6_sock); + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP && + sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { + .func = bpf_skc_to_udp6_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], +}; diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index d886657c6aaa..6bab40ff442e 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -425,6 +425,7 @@ class PrinterHelpers(Printer): 'struct tcp_sock', 'struct tcp_timewait_sock', 'struct tcp_request_sock', + 'struct udp6_sock', 'struct __sk_buff', 'struct sk_msg_md', @@ -466,6 +467,7 @@ class PrinterHelpers(Printer): 'struct tcp_sock', 'struct tcp_timewait_sock', 'struct tcp_request_sock', + 'struct udp6_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index b9412ab275f3..0cb8ec948816 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3279,6 +3279,12 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3420,7 +3426,8 @@ union bpf_attr { FN(skc_to_tcp6_sock), \ FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ - FN(skc_to_tcp_request_sock), + FN(skc_to_tcp_request_sock), \ + FN(skc_to_udp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call -- cgit v1.2.3 From 58ffbba6a39979baa22d2f7e69faeffa2d9c0641 Mon Sep 17 00:00:00 2001 From: Akash Asthana Date: Tue, 23 Jun 2020 16:08:50 +0530 Subject: soc: qcom: geni: Support for ICC voting Add necessary macros and structure variables to support ICC BW voting from individual SE drivers. Signed-off-by: Akash Asthana Reviewed-by: Matthias Kaehlcke Link: https://lore.kernel.org/r/1592908737-7068-2-git-send-email-akashast@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/qcom-geni-se.c | 82 +++++++++++++++++++++++++++++++++++++++++ include/linux/qcom-geni-se.h | 38 +++++++++++++++++++ 2 files changed, 120 insertions(+) (limited to 'include') diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index 7d622ea1274e..950e3470c498 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -92,6 +92,9 @@ struct geni_wrapper { struct clk_bulk_data ahb_clks[NUM_AHB_CLKS]; }; +static const char * const icc_path_names[] = {"qup-core", "qup-config", + "qup-memory"}; + #define QUP_HW_VER_REG 0x4 /* Common SE registers */ @@ -720,6 +723,85 @@ void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len) } EXPORT_SYMBOL(geni_se_rx_dma_unprep); +int geni_icc_get(struct geni_se *se, const char *icc_ddr) +{ + int i, err; + const char *icc_names[] = {"qup-core", "qup-config", icc_ddr}; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + if (!icc_names[i]) + continue; + + se->icc_paths[i].path = devm_of_icc_get(se->dev, icc_names[i]); + if (IS_ERR(se->icc_paths[i].path)) + goto err; + } + + return 0; + +err: + err = PTR_ERR(se->icc_paths[i].path); + if (err != -EPROBE_DEFER) + dev_err_ratelimited(se->dev, "Failed to get ICC path '%s': %d\n", + icc_names[i], err); + return err; + +} +EXPORT_SYMBOL(geni_icc_get); + +int geni_icc_set_bw(struct geni_se *se) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + ret = icc_set_bw(se->icc_paths[i].path, + se->icc_paths[i].avg_bw, se->icc_paths[i].avg_bw); + if (ret) { + dev_err_ratelimited(se->dev, "ICC BW voting failed on path '%s': %d\n", + icc_path_names[i], ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(geni_icc_set_bw); + +/* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */ +int geni_icc_enable(struct geni_se *se) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + ret = icc_enable(se->icc_paths[i].path); + if (ret) { + dev_err_ratelimited(se->dev, "ICC enable failed on path '%s': %d\n", + icc_path_names[i], ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(geni_icc_enable); + +int geni_icc_disable(struct geni_se *se) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + ret = icc_disable(se->icc_paths[i].path); + if (ret) { + dev_err_ratelimited(se->dev, "ICC disable failed on path '%s': %d\n", + icc_path_names[i], ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(geni_icc_disable); + static int geni_se_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index dd464943f717..80dbc01904d6 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -6,6 +6,8 @@ #ifndef _LINUX_QCOM_GENI_SE #define _LINUX_QCOM_GENI_SE +#include + /* Transfer mode supported by GENI Serial Engines */ enum geni_se_xfer_mode { GENI_SE_INVALID, @@ -25,6 +27,17 @@ enum geni_se_protocol_type { struct geni_wrapper; struct clk; +enum geni_icc_path_index { + GENI_TO_CORE, + CPU_TO_GENI, + GENI_TO_DDR +}; + +struct geni_icc_path { + struct icc_path *path; + unsigned int avg_bw; +}; + /** * struct geni_se - GENI Serial Engine * @base: Base Address of the Serial Engine's register block @@ -33,6 +46,7 @@ struct clk; * @clk: Handle to the core serial engine clock * @num_clk_levels: Number of valid clock levels in clk_perf_tbl * @clk_perf_tbl: Table of clock frequency input to serial engine clock + * @icc_paths: Array of ICC paths for SE */ struct geni_se { void __iomem *base; @@ -41,6 +55,7 @@ struct geni_se { struct clk *clk; unsigned int num_clk_levels; unsigned long *clk_perf_tbl; + struct geni_icc_path icc_paths[3]; }; /* Common SE registers */ @@ -229,6 +244,21 @@ struct geni_se { #define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT) #define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK) +/* + * Define bandwidth thresholds that cause the underlying Core 2X interconnect + * clock to run at the named frequency. These baseline values are recommended + * by the hardware team, and are not dynamically scaled with GENI bandwidth + * beyond basic on/off. + */ +#define CORE_2X_19_2_MHZ 960 +#define CORE_2X_50_MHZ 2500 +#define CORE_2X_100_MHZ 5000 +#define CORE_2X_150_MHZ 7500 +#define CORE_2X_200_MHZ 10000 +#define CORE_2X_236_MHZ 16383 + +#define GENI_DEFAULT_BW Bps_to_icc(1000) + #if IS_ENABLED(CONFIG_QCOM_GENI_SE) u32 geni_se_get_qup_hw_version(struct geni_se *se); @@ -416,5 +446,13 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); + +int geni_icc_get(struct geni_se *se, const char *icc_ddr); + +int geni_icc_set_bw(struct geni_se *se); + +int geni_icc_enable(struct geni_se *se); + +int geni_icc_disable(struct geni_se *se); #endif #endif -- cgit v1.2.3 From 048eb908a1f276ca0346f20a3e6e7d707dcd81f3 Mon Sep 17 00:00:00 2001 From: Akash Asthana Date: Tue, 23 Jun 2020 16:08:51 +0530 Subject: soc: qcom-geni-se: Add interconnect support to fix earlycon crash QUP core clock is shared among all the SE drivers present on particular QUP wrapper, the system will reset(unclocked access) if earlycon used after QUP core clock is put to 0 from other SE drivers before real console comes up. As earlycon can't vote for it's QUP core need, to fix this add ICC support to common/QUP wrapper driver and put vote for QUP core from probe on behalf of earlycon and remove vote during earlycon exit call. Signed-off-by: Akash Asthana Reported-by: Matthias Kaehlcke Reviewed-by: Matthias Kaehlcke Link: https://lore.kernel.org/r/1592908737-7068-3-git-send-email-akashast@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/qcom-geni-se.c | 68 +++++++++++++++++++++++++++++++++++ drivers/tty/serial/qcom_geni_serial.c | 7 ++++ include/linux/qcom-geni-se.h | 2 ++ 3 files changed, 77 insertions(+) (limited to 'include') diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index 950e3470c498..e2a0ba278b6b 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -90,11 +91,14 @@ struct geni_wrapper { struct device *dev; void __iomem *base; struct clk_bulk_data ahb_clks[NUM_AHB_CLKS]; + struct geni_icc_path to_core; }; static const char * const icc_path_names[] = {"qup-core", "qup-config", "qup-memory"}; +static struct geni_wrapper *earlycon_wrapper; + #define QUP_HW_VER_REG 0x4 /* Common SE registers */ @@ -802,11 +806,38 @@ int geni_icc_disable(struct geni_se *se) } EXPORT_SYMBOL(geni_icc_disable); +void geni_remove_earlycon_icc_vote(void) +{ + struct geni_wrapper *wrapper; + struct device_node *parent; + struct device_node *child; + + if (!earlycon_wrapper) + return; + + wrapper = earlycon_wrapper; + parent = of_get_next_parent(wrapper->dev->of_node); + for_each_child_of_node(parent, child) { + if (!of_device_is_compatible(child, "qcom,geni-se-qup")) + continue; + wrapper = platform_get_drvdata(of_find_device_by_node(child)); + icc_put(wrapper->to_core.path); + wrapper->to_core.path = NULL; + + } + of_node_put(parent); + + earlycon_wrapper = NULL; +} +EXPORT_SYMBOL(geni_remove_earlycon_icc_vote); + static int geni_se_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct geni_wrapper *wrapper; + struct console __maybe_unused *bcon; + bool __maybe_unused has_earlycon = false; int ret; wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL); @@ -829,6 +860,43 @@ static int geni_se_probe(struct platform_device *pdev) } } +#ifdef CONFIG_SERIAL_EARLYCON + for_each_console(bcon) { + if (!strcmp(bcon->name, "qcom_geni")) { + has_earlycon = true; + break; + } + } + if (!has_earlycon) + goto exit; + + wrapper->to_core.path = devm_of_icc_get(dev, "qup-core"); + if (IS_ERR(wrapper->to_core.path)) + return PTR_ERR(wrapper->to_core.path); + /* + * Put minmal BW request on core clocks on behalf of early console. + * The vote will be removed earlycon exit function. + * + * Note: We are putting vote on each QUP wrapper instead only to which + * earlycon is connected because QUP core clock of different wrapper + * share same voltage domain. If core1 is put to 0, then core2 will + * also run at 0, if not voted. Default ICC vote will be removed ASA + * we touch any of the core clock. + * core1 = core2 = max(core1, core2) + */ + ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW, + GENI_DEFAULT_BW); + if (ret) { + dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n", + __func__, ret); + return ret; + } + + if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart")) + earlycon_wrapper = wrapper; + of_node_put(pdev->dev.of_node); +#endif +exit: dev_set_drvdata(dev, wrapper); dev_dbg(dev, "GENI SE Driver probed\n"); return devm_of_platform_populate(dev); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 457c0bf8cbf8..a4468db3b734 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1121,6 +1121,12 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se, struct console *con) { } #endif +static int qcom_geni_serial_earlycon_exit(struct console *con) +{ + geni_remove_earlycon_icc_vote(); + return 0; +} + static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, const char *opt) { @@ -1166,6 +1172,7 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN); dev->con->write = qcom_geni_serial_earlycon_write; + dev->con->exit = qcom_geni_serial_earlycon_exit; dev->con->setup = NULL; qcom_geni_serial_enable_early_read(&se, dev->con); diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index 80dbc01904d6..743dd975d1cd 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -454,5 +454,7 @@ int geni_icc_set_bw(struct geni_se *se); int geni_icc_enable(struct geni_se *se); int geni_icc_disable(struct geni_se *se); + +void geni_remove_earlycon_icc_vote(void); #endif #endif -- cgit v1.2.3 From a5819b548af0cc0fd0b84fa3e35723c4c36f157c Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Mon, 15 Jun 2020 17:32:39 +0530 Subject: tty: serial: qcom_geni_serial: Use OPP API to set clk/perf state geni serial needs to express a perforamnce state requirement on CX powerdomain depending on the frequency of the clock rates. Use OPP table from DT to register with OPP framework and use dev_pm_opp_set_rate() to set the clk/perf state. Signed-off-by: Rajendra Nayak Reviewed-by: Matthias Kaehlcke Acked-by: Greg Kroah-Hartman Cc: Akash Asthana Cc: linux-serial@vger.kernel.org Link: https://lore.kernel.org/r/1592222564-13556-2-git-send-email-rnayak@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/tty/serial/qcom_geni_serial.c | 29 +++++++++++++++++++++++++---- include/linux/qcom-geni-se.h | 4 ++++ 2 files changed, 29 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index f701c7e9b89d..0300867eab7a 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -963,7 +964,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, goto out_restart_rx; uport->uartclk = clk_rate; - clk_set_rate(port->se.clk, clk_rate); + dev_pm_opp_set_rate(uport->dev, clk_rate); ser_clk_cfg = SER_CLK_EN; ser_clk_cfg |= clk_div << CLK_DIV_SHFT; @@ -1383,13 +1384,25 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) if (of_property_read_bool(pdev->dev.of_node, "cts-rts-swap")) port->cts_rts_swap = true; + port->se.opp_table = dev_pm_opp_set_clkname(&pdev->dev, "se"); + if (IS_ERR(port->se.opp_table)) + return PTR_ERR(port->se.opp_table); + /* OPP table is optional */ + ret = dev_pm_opp_of_add_table(&pdev->dev); + if (!ret) { + port->se.has_opp_table = true; + } else if (ret != -ENODEV) { + dev_err(&pdev->dev, "invalid OPP table in device tree\n"); + return ret; + } + uport->private_data = drv; platform_set_drvdata(pdev, port); port->handle_rx = console ? handle_rx_console : handle_rx_uart; ret = uart_add_one_port(drv, uport); if (ret) - return ret; + goto err; irq_set_status_flags(uport->irq, IRQ_NOAUTOEN); ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr, @@ -1397,7 +1410,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) if (ret) { dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret); uart_remove_one_port(drv, uport); - return ret; + goto err; } /* @@ -1414,11 +1427,16 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) if (ret) { device_init_wakeup(&pdev->dev, false); uart_remove_one_port(drv, uport); - return ret; + goto err; } } return 0; +err: + if (port->se.has_opp_table) + dev_pm_opp_of_remove_table(&pdev->dev); + dev_pm_opp_put_clkname(port->se.opp_table); + return ret; } static int qcom_geni_serial_remove(struct platform_device *pdev) @@ -1426,6 +1444,9 @@ static int qcom_geni_serial_remove(struct platform_device *pdev) struct qcom_geni_serial_port *port = platform_get_drvdata(pdev); struct uart_driver *drv = port->uport.private_data; + if (port->se.has_opp_table) + dev_pm_opp_of_remove_table(&pdev->dev); + dev_pm_opp_put_clkname(port->se.opp_table); dev_pm_clear_wake_irq(&pdev->dev); device_init_wakeup(&pdev->dev, false); uart_remove_one_port(drv, &port->uport); diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index 743dd975d1cd..afa511ef1457 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -47,6 +47,8 @@ struct geni_icc_path { * @num_clk_levels: Number of valid clock levels in clk_perf_tbl * @clk_perf_tbl: Table of clock frequency input to serial engine clock * @icc_paths: Array of ICC paths for SE + * @opp_table: Pointer to the OPP table + * @has_opp_table: Specifies if the SE has an OPP table */ struct geni_se { void __iomem *base; @@ -56,6 +58,8 @@ struct geni_se { unsigned int num_clk_levels; unsigned long *clk_perf_tbl; struct geni_icc_path icc_paths[3]; + struct opp_table *opp_table; + bool has_opp_table; }; /* Common SE registers */ -- cgit v1.2.3 From 19e528dc9af29169fa7cdfa61071805fdef504c6 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Wed, 24 Jun 2020 17:36:28 +0800 Subject: net: qos: add tc police offloading action with max frame size limit Current police offloading support the 'burst'' and 'rate_bytes_ps'. Some hardware own the capability to limit the frame size. If the frame size larger than the setting, the frame would be dropped. For the police action itself already accept the 'mtu' parameter in tc command. But not extend to tc flower offloading. So extend 'mtu' to tc flower offloading. Signed-off-by: Po Liu Signed-off-by: David S. Miller --- include/net/flow_offload.h | 1 + include/net/tc_act/tc_police.h | 10 ++++++++++ net/sched/cls_api.c | 1 + 3 files changed, 12 insertions(+) (limited to 'include') diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 00c15f14c434..c2ef19c6b27d 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -234,6 +234,7 @@ struct flow_action_entry { struct { /* FLOW_ACTION_POLICE */ s64 burst; u64 rate_bytes_ps; + u32 mtu; } police; struct { /* FLOW_ACTION_CT */ int action; diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h index f098ad4424be..cd973b10ae8c 100644 --- a/include/net/tc_act/tc_police.h +++ b/include/net/tc_act/tc_police.h @@ -69,4 +69,14 @@ static inline s64 tcf_police_tcfp_burst(const struct tc_action *act) return params->tcfp_burst; } +static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act) +{ + struct tcf_police *police = to_police(act); + struct tcf_police_params *params; + + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); + return params->tcfp_mtu; +} + #endif /* __NET_TC_POLICE_H */ diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index a00a203b2ef5..6aba7d5ba1ec 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3658,6 +3658,7 @@ int tc_setup_flow_action(struct flow_action *flow_action, entry->police.burst = tcf_police_tcfp_burst(act); entry->police.rate_bytes_ps = tcf_police_rate_bytes_ps(act); + entry->police.mtu = tcf_police_tcfp_mtu(act); } else if (is_tcf_ct(act)) { entry->id = FLOW_ACTION_CT; entry->ct.action = tcf_ct_action(act); -- cgit v1.2.3 From 627e39b1399e72e53895eec6bbec30199ed43de2 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Wed, 24 Jun 2020 17:36:30 +0800 Subject: net: qos: police action add index for tc flower offloading Hardware device may include more than one police entry. Specifying the action's index make it possible for several tc filters to share the same police action when installing the filters. Propagate this index to device drivers through the flow offload intermediate representation, so that drivers could share a single hardware policer between multiple filters. v1->v2 changes: - Update the commit message suggest by Ido Schimmel Signed-off-by: Po Liu Signed-off-by: David S. Miller --- include/net/flow_offload.h | 1 + net/sched/cls_api.c | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index c2ef19c6b27d..eed98075b1ae 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -232,6 +232,7 @@ struct flow_action_entry { bool truncate; } sample; struct { /* FLOW_ACTION_POLICE */ + u32 index; s64 burst; u64 rate_bytes_ps; u32 mtu; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 6aba7d5ba1ec..fdc4c89ca1fa 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3659,6 +3659,7 @@ int tc_setup_flow_action(struct flow_action *flow_action, entry->police.rate_bytes_ps = tcf_police_rate_bytes_ps(act); entry->police.mtu = tcf_police_tcfp_mtu(act); + entry->police.index = act->tcfa_index; } else if (is_tcf_ct(act)) { entry->id = FLOW_ACTION_CT; entry->ct.action = tcf_ct_action(act); -- cgit v1.2.3 From 5e48a03bb9bff1728164040d71aa03cdb3cdfca2 Mon Sep 17 00:00:00 2001 From: Prashant Malani Date: Wed, 24 Jun 2020 01:09:23 -0700 Subject: platform/chrome: cros_ec: Add TBT pd_ctrl fields To support Thunderbolt compatibility mode, synchronize ec_response_usb_pd_control_v2 with the Chrome EC version, so that we get the Thunderbolt related control fields and macros. Signed-off-by: Prashant Malani Signed-off-by: Enric Balletbo i Serra --- include/linux/platform_data/cros_ec_commands.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index a7b0fc440c35..b808570bdd04 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4917,15 +4917,26 @@ struct ec_response_usb_pd_control_v1 { #define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */ #define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */ +/* Active/Passive Cable */ +#define USB_PD_CTRL_ACTIVE_CABLE BIT(0) +/* Optical/Non-optical cable */ +#define USB_PD_CTRL_OPTICAL_CABLE BIT(1) +/* 3rd Gen TBT device (or AMA)/2nd gen tbt Adapter */ +#define USB_PD_CTRL_TBT_LEGACY_ADAPTER BIT(2) +/* Active Link Uni-Direction */ +#define USB_PD_CTRL_ACTIVE_LINK_UNIDIR BIT(3) + struct ec_response_usb_pd_control_v2 { uint8_t enabled; uint8_t role; uint8_t polarity; char state[32]; - uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */ - uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ - /* CL:1500994 Current cable type */ - uint8_t reserved_cable_type; + uint8_t cc_state; /* enum pd_cc_states representing cc state */ + uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ + uint8_t reserved; /* Reserved for future use */ + uint8_t control_flags; /* USB_PD_CTRL_*flags */ + uint8_t cable_speed; /* TBT_SS_* cable speed */ + uint8_t cable_gen; /* TBT_GEN3_* cable rounded support */ } __ec_align1; #define EC_CMD_USB_PD_PORTS 0x0102 -- cgit v1.2.3 From 590d69796346353878b275c5512c664e3f875f24 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 19 Dec 2019 16:44:52 -0500 Subject: sched: Force the address order of each sched class descriptor In order to make a micro optimization in pick_next_task(), the order of the sched class descriptor address must be in the same order as their priority to each other. That is: &idle_sched_class < &fair_sched_class < &rt_sched_class < &dl_sched_class < &stop_sched_class In order to guarantee this order of the sched class descriptors, add each one into their own data section and force the order in the linker script. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/157675913272.349305.8936736338884044103.stgit@localhost.localdomain --- include/asm-generic/vmlinux.lds.h | 13 +++++++++++++ kernel/sched/deadline.c | 3 ++- kernel/sched/fair.c | 3 ++- kernel/sched/idle.c | 3 ++- kernel/sched/rt.c | 3 ++- kernel/sched/stop_task.c | 3 ++- 6 files changed, 23 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index db600ef218d7..2186d7b01af6 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -108,6 +108,18 @@ #define SBSS_MAIN .sbss #endif +/* + * The order of the sched class addresses are important, as they are + * used to determine the order of the priority of each sched class in + * relation to each other. + */ +#define SCHED_DATA \ + *(__idle_sched_class) \ + *(__fair_sched_class) \ + *(__rt_sched_class) \ + *(__dl_sched_class) \ + *(__stop_sched_class) + /* * Align to a 32 byte boundary equal to the * alignment gcc 4.5 uses for a struct @@ -388,6 +400,7 @@ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ *(.rodata) *(.rodata.*) \ + SCHED_DATA \ RO_AFTER_INIT_DATA /* Read only after init */ \ . = ALIGN(8); \ __start___tracepoints_ptrs = .; \ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d4708e29008f..d9e79462993b 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2479,7 +2479,8 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p, } } -const struct sched_class dl_sched_class = { +const struct sched_class dl_sched_class + __attribute__((section("__dl_sched_class"))) = { .next = &rt_sched_class, .enqueue_task = enqueue_task_dl, .dequeue_task = dequeue_task_dl, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0424a0af5f87..3365f6b07c36 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11122,7 +11122,8 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task /* * All the scheduling class methods: */ -const struct sched_class fair_sched_class = { +const struct sched_class fair_sched_class + __attribute__((section("__fair_sched_class"))) = { .next = &idle_sched_class, .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 8d75ca201484..f5806295356b 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -453,7 +453,8 @@ static void update_curr_idle(struct rq *rq) /* * Simple, special scheduling class for the per-CPU idle tasks: */ -const struct sched_class idle_sched_class = { +const struct sched_class idle_sched_class + __attribute__((section("__idle_sched_class"))) = { /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f395ddb75f38..6543d4430331 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2429,7 +2429,8 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) return 0; } -const struct sched_class rt_sched_class = { +const struct sched_class rt_sched_class + __attribute__((section("__rt_sched_class"))) = { .next = &fair_sched_class, .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 3e50a6a8f1e5..f4bbd54caae0 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -109,7 +109,8 @@ static void update_curr_stop(struct rq *rq) /* * Simple, special scheduling class for the per-CPU stop tasks: */ -const struct sched_class stop_sched_class = { +const struct sched_class stop_sched_class + __attribute__((section("__stop_sched_class"))) = { .next = &dl_sched_class, .enqueue_task = enqueue_task_stop, -- cgit v1.2.3 From c3a340f7e7eadac7662ab104ceb16432e5a4c6b2 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 19 Dec 2019 16:44:53 -0500 Subject: sched: Have sched_class_highest define by vmlinux.lds.h Now that the sched_class descriptors are defined by the linker script, and this needs to be aware of the existance of stop_sched_class when SMP is enabled or not, as it is used as the "highest" priority when defined. Move the declaration of sched_class_highest to the same location in the linker script that inserts stop_sched_class, and this will also make it easier to see what should be defined as the highest class, as this linker script location defines the priorities as well. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20191219214558.682913590@goodmis.org --- include/asm-generic/vmlinux.lds.h | 5 ++++- kernel/sched/core.c | 8 ++++++++ kernel/sched/sched.h | 17 +++++++++-------- 3 files changed, 21 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 2186d7b01af6..66fb84c3dc7e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -114,11 +114,14 @@ * relation to each other. */ #define SCHED_DATA \ + STRUCT_ALIGN(); \ + __begin_sched_classes = .; \ *(__idle_sched_class) \ *(__fair_sched_class) \ *(__rt_sched_class) \ *(__dl_sched_class) \ - *(__stop_sched_class) + *(__stop_sched_class) \ + __end_sched_classes = .; /* * Align to a 32 byte boundary equal to the diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0208b71bef80..81640fe0eae8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6646,6 +6646,14 @@ void __init sched_init(void) unsigned long ptr = 0; int i; + /* Make sure the linker didn't screw up */ + BUG_ON(&idle_sched_class + 1 != &fair_sched_class || + &fair_sched_class + 1 != &rt_sched_class || + &rt_sched_class + 1 != &dl_sched_class); +#ifdef CONFIG_SMP + BUG_ON(&dl_sched_class + 1 != &stop_sched_class); +#endif + wait_bit_init(); #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 336887607b3d..4165c06d1d7b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1811,7 +1811,7 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_change_group)(struct task_struct *p, int type); #endif -}; +} __aligned(32); /* STRUCT_ALIGN(), vmlinux.lds.h */ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) { @@ -1825,17 +1825,18 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } -#ifdef CONFIG_SMP -#define sched_class_highest (&stop_sched_class) -#else -#define sched_class_highest (&dl_sched_class) -#endif +/* Defined in include/asm-generic/vmlinux.lds.h */ +extern struct sched_class __begin_sched_classes[]; +extern struct sched_class __end_sched_classes[]; + +#define sched_class_highest (__end_sched_classes - 1) +#define sched_class_lowest (__begin_sched_classes - 1) #define for_class_range(class, _from, _to) \ - for (class = (_from); class != (_to); class = class->next) + for (class = (_from); class != (_to); class--) #define for_each_class(class) \ - for_class_range(class, sched_class_highest, NULL) + for_class_range(class, sched_class_highest, sched_class_lowest) extern const struct sched_class stop_sched_class; extern const struct sched_class dl_sched_class; -- cgit v1.2.3 From 01e377c539ca52a6c753d0fdbe93b3b8fcd66a1c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 18 Jun 2020 21:08:10 +0200 Subject: sched/core: Remove mmdrop() definition Commit bf2c59fce4074 ("sched/core: Fix illegal RCU from offline CPUs") introduced a definition for mmdrop() but a a few lines above there is already mmdrop() defined as static inline. Remove the newly introduced mmdrop() definition. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200618190810.790211-1-bigeasy@linutronix.de --- include/linux/sched/mm.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 480a4d1b7dd8..a98604ea76f1 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,8 +49,6 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } -void mmdrop(struct mm_struct *mm); - /* * This has to be called after a get_task_mm()/mmget_not_zero() * followed by taking the mmap_lock for writing before modifying the -- cgit v1.2.3 From 10e834099d38dd2c02bf2bd5feaa3997cfcf139f Mon Sep 17 00:00:00 2001 From: Tzung-Bi Shih Date: Thu, 25 Jun 2020 23:35:41 +0800 Subject: ASoC: core: move definition of enum snd_soc_bias_level To fix compilation error: - error: field 'XXX' has incomplete type Moves definition of enum snd_soc_bias_level from soc.h to soc-dapm.h. Signed-off-by: Tzung-Bi Shih Link: https://lore.kernel.org/r/20200625153543.85039-2-tzungbi@google.com Signed-off-by: Mark Brown --- include/sound/soc-dapm.h | 18 ++++++++++++++++++ include/sound/soc.h | 18 ------------------ 2 files changed, 18 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index cc3dcb815282..75467f2ed405 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -376,6 +376,24 @@ struct snd_soc_dapm_widget_list; struct snd_soc_dapm_update; enum snd_soc_dapm_direction; +/* + * Bias levels + * + * @ON: Bias is fully on for audio playback and capture operations. + * @PREPARE: Prepare for audio operations. Called before DAPM switching for + * stream start and stop operations. + * @STANDBY: Low power standby state when no playback/capture operations are + * in progress. NOTE: The transition time between STANDBY and ON + * should be as fast as possible and no longer than 10ms. + * @OFF: Power Off. No restrictions on transition times. + */ +enum snd_soc_bias_level { + SND_SOC_BIAS_OFF = 0, + SND_SOC_BIAS_STANDBY = 1, + SND_SOC_BIAS_PREPARE = 2, + SND_SOC_BIAS_ON = 3, +}; + int dapm_regulator_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event); int dapm_clock_event(struct snd_soc_dapm_widget *w, diff --git a/include/sound/soc.h b/include/sound/soc.h index 33aceadebd03..6791b7570a67 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -368,24 +368,6 @@ #define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \ const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts) -/* - * Bias levels - * - * @ON: Bias is fully on for audio playback and capture operations. - * @PREPARE: Prepare for audio operations. Called before DAPM switching for - * stream start and stop operations. - * @STANDBY: Low power standby state when no playback/capture operations are - * in progress. NOTE: The transition time between STANDBY and ON - * should be as fast as possible and no longer than 10ms. - * @OFF: Power Off. No restrictions on transition times. - */ -enum snd_soc_bias_level { - SND_SOC_BIAS_OFF = 0, - SND_SOC_BIAS_STANDBY = 1, - SND_SOC_BIAS_PREPARE = 2, - SND_SOC_BIAS_ON = 3, -}; - struct device_node; struct snd_jack; struct snd_soc_card; -- cgit v1.2.3 From 3d62ef4280a377bb2ccaee4e8f6c5093f5b8f9d4 Mon Sep 17 00:00:00 2001 From: Tzung-Bi Shih Date: Thu, 25 Jun 2020 23:35:42 +0800 Subject: ASoC: dapm: declare missing structure prototypes To fix compilation warnings: - struct 'snd_soc_pcm_runtime' declared inside parameter list will not be visible outside of this definition or declaration - struct 'soc_enum' declared inside parameter list will not be visible outside of this definition or declaration Declares the missing structure prototypes. Signed-off-by: Tzung-Bi Shih Link: https://lore.kernel.org/r/20200625153543.85039-3-tzungbi@google.com Signed-off-by: Mark Brown --- include/sound/soc-dapm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 75467f2ed405..c3039e97929a 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -16,6 +16,8 @@ #include struct device; +struct snd_soc_pcm_runtime; +struct soc_enum; /* widget has no PM register bit */ #define SND_SOC_NOPM -1 -- cgit v1.2.3 From da6690767cbd344998f36081815c85f3d467e78c Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Thu, 25 Jun 2020 17:36:05 +0100 Subject: regulator: consumer: Supply missing prototypes for 3 core functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit regulator_suspend_enable(), regulator_suspend_disable() and regulator_set_suspend_voltage() are all exported members of the API, but are all missing prototypes. Fixes the following W=1 warning(s): drivers/regulator/core.c:3805:5: warning: no previous prototype for ‘regulator_suspend_enable’ [-Wmissing-prototypes] 3805 | int regulator_suspend_enable(struct regulator_dev *rdev, | ^~~~~~~~~~~~~~~~~~~~~~~~ drivers/regulator/core.c:3812:5: warning: no previous prototype for ‘regulator_suspend_disable’ [-Wmissing-prototypes] 3812 | int regulator_suspend_disable(struct regulator_dev *rdev, | ^~~~~~~~~~~~~~~~~~~~~~~~~ drivers/regulator/core.c:3851:5: warning: no previous prototype for ‘regulator_set_suspend_voltage’ [-Wmissing-prototypes] 3851 | int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Signed-off-by: Lee Jones Link: https://lore.kernel.org/r/20200625163614.4001403-2-lee.jones@linaro.org Signed-off-by: Mark Brown --- include/linux/regulator/consumer.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 6a92fd3105a3..2024944fd2f7 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -32,10 +32,12 @@ #define __LINUX_REGULATOR_CONSUMER_H_ #include +#include struct device; struct notifier_block; struct regmap; +struct regulator_dev; /* * Regulator operating modes. @@ -277,6 +279,14 @@ int regulator_unregister_notifier(struct regulator *regulator, void devm_regulator_unregister_notifier(struct regulator *regulator, struct notifier_block *nb); +/* regulator suspend */ +int regulator_suspend_enable(struct regulator_dev *rdev, + suspend_state_t state); +int regulator_suspend_disable(struct regulator_dev *rdev, + suspend_state_t state); +int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV, + int max_uV, suspend_state_t state); + /* driver data - core doesn't touch */ void *regulator_get_drvdata(struct regulator *regulator); void regulator_set_drvdata(struct regulator *regulator, void *data); -- cgit v1.2.3 From c6d5d843d9b6e8dca3768250970f0d0a1e3d4fb0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 24 Jun 2020 11:06:54 +0100 Subject: net: phylink: add phylink_speed_(up|down) interface Add an interface for the phy_speed_(up|down) functions when a driver makes use of phylink. These pass the call through to phylib when we have a normal PHY attached (i.o.w., not a PHY on a SFP module.) Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/phylink.h | 2 ++ 2 files changed, 50 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 7ce787c227b3..7cda1646bbf7 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1826,6 +1826,54 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) } EXPORT_SYMBOL_GPL(phylink_mii_ioctl); +/** + * phylink_speed_down() - set the non-SFP PHY to lowest speed supported by both + * link partners + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @sync: perform action synchronously + * + * If we have a PHY that is not part of a SFP module, then set the speed + * as described in the phy_speed_down() function. Please see this function + * for a description of the @sync parameter. + * + * Returns zero if there is no PHY, otherwise as per phy_speed_down(). + */ +int phylink_speed_down(struct phylink *pl, bool sync) +{ + int ret = 0; + + ASSERT_RTNL(); + + if (!pl->sfp_bus && pl->phydev) + ret = phy_speed_down(pl->phydev, sync); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_speed_down); + +/** + * phylink_speed_up() - restore the advertised speeds prior to the call to + * phylink_speed_down() + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * If we have a PHY that is not part of a SFP module, then restore the + * PHY speeds as per phy_speed_up(). + * + * Returns zero if there is no PHY, otherwise as per phy_speed_up(). + */ +int phylink_speed_up(struct phylink *pl) +{ + int ret = 0; + + ASSERT_RTNL(); + + if (!pl->sfp_bus && pl->phydev) + ret = phy_speed_up(pl->phydev); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_speed_up); + static void phylink_sfp_attach(void *upstream, struct sfp_bus *bus) { struct phylink *pl = upstream; diff --git a/include/linux/phylink.h b/include/linux/phylink.h index cc5b452a184e..b32b8b45421b 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -392,6 +392,8 @@ int phylink_init_eee(struct phylink *, bool); int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); +int phylink_speed_down(struct phylink *pl, bool sync); +int phylink_speed_up(struct phylink *pl); #define phylink_zero(bm) \ bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS) -- cgit v1.2.3 From 92252eec913b2dd5e7b5de11ea3efa2e64d65cf4 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Wed, 24 Jun 2020 07:16:02 -0500 Subject: net: phy: Add a helper to return the index for of the internal delay Add a helper function that will return the index in the array for the passed in internal delay value. The helper requires the array, size and delay value. The helper will then return the index for the exact match or return the index for the index to the closest smaller value. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 99 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/phy.h | 4 ++ 2 files changed, 103 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 29ef4456ac25..6d47485e68f9 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -31,6 +31,7 @@ #include #include #include +#include MODULE_DESCRIPTION("PHY library"); MODULE_AUTHOR("Andy Fleming"); @@ -2708,6 +2709,104 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause) } EXPORT_SYMBOL(phy_get_pause); +#if IS_ENABLED(CONFIG_OF_MDIO) +static int phy_get_int_delay_property(struct device *dev, const char *name) +{ + s32 int_delay; + int ret; + + ret = device_property_read_u32(dev, name, &int_delay); + if (ret) + return ret; + + return int_delay; +} +#else +static int phy_get_int_delay_property(struct device *dev, const char *name) +{ + return -EINVAL; +} +#endif + +/** + * phy_get_delay_index - returns the index of the internal delay + * @phydev: phy_device struct + * @dev: pointer to the devices device struct + * @delay_values: array of delays the PHY supports + * @size: the size of the delay array + * @is_rx: boolean to indicate to get the rx internal delay + * + * Returns the index within the array of internal delay passed in. + * If the device property is not present then the interface type is checked + * if the interface defines use of internal delay then a 1 is returned otherwise + * a 0 is returned. + * The array must be in ascending order. If PHY does not have an ascending order + * array then size = 0 and the value of the delay property is returned. + * Return -EINVAL if the delay is invalid or cannot be found. + */ +s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, + const int *delay_values, int size, bool is_rx) +{ + s32 delay; + int i; + + if (is_rx) { + delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps"); + if (delay < 0 && size == 0) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + return 1; + else + return 0; + } + + } else { + delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps"); + if (delay < 0 && size == 0) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + return 1; + else + return 0; + } + } + + if (delay < 0) + return delay; + + if (delay && size == 0) + return delay; + + if (delay < delay_values[0] || delay > delay_values[size - 1]) { + phydev_err(phydev, "Delay %d is out of range\n", delay); + return -EINVAL; + } + + if (delay == delay_values[0]) + return 0; + + for (i = 1; i < size; i++) { + if (delay == delay_values[i]) + return i; + + /* Find an approximate index by looking up the table */ + if (delay > delay_values[i - 1] && + delay < delay_values[i]) { + if (delay - delay_values[i - 1] < + delay_values[i] - delay) + return i - 1; + else + return i; + } + } + + phydev_err(phydev, "error finding internal delay index for %d\n", + delay); + + return -EINVAL; +} +EXPORT_SYMBOL(phy_get_internal_delay); + static bool phy_drv_supports_irq(struct phy_driver *phydrv) { return phydrv->config_intr && phydrv->ack_interrupt; diff --git a/include/linux/phy.h b/include/linux/phy.h index 6fb8f302978d..2c00374dc996 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1443,6 +1443,10 @@ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp); void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); + +s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, + const int *delay_values, int size, bool is_rx); + void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, bool *tx_pause, bool *rx_pause); -- cgit v1.2.3 From f3bdc62fd82ed93dbe4d049eacba310de7eb2a6a Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 17 Jun 2020 15:58:23 +0200 Subject: blktrace: Provide event for request merging Currently blk-mq does not report any event when two requests get merged in the elevator. This then results in difficult to understand sequence of events like: ... 8,0 34 1579 0.608765271 2718 I WS 215023504 + 40 [dbench] 8,0 34 1584 0.609184613 2719 A WS 215023544 + 56 <- (8,4) 2160568 8,0 34 1585 0.609184850 2719 Q WS 215023544 + 56 [dbench] 8,0 34 1586 0.609188524 2719 G WS 215023544 + 56 [dbench] 8,0 3 602 0.609684162 773 D WS 215023504 + 96 [kworker/3:1H] 8,0 34 1591 0.609843593 0 C WS 215023504 + 96 [0] and you can only guess (after quite some headscratching since the above excerpt is intermixed with a lot of other IO) that request 215023544+56 got merged to request 215023504+40. Provide proper event for request merging like we used to do in the legacy block layer. Signed-off-by: Jan Kara Signed-off-by: Jens Axboe --- block/blk-merge.c | 2 ++ include/trace/events/block.h | 15 +++++++++++++++ kernel/trace/blktrace.c | 10 ++++++++++ 3 files changed, 27 insertions(+) (limited to 'include') diff --git a/block/blk-merge.c b/block/blk-merge.c index f0b0bae075a0..9c9fb21584b6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -793,6 +793,8 @@ static struct request *attempt_merge(struct request_queue *q, */ blk_account_io_merge_request(next); + trace_block_rq_merge(q, next); + /* * ownership of bio passed from next to req, return 'next' for * the caller to free diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 93b114226af8..34d64ca306b1 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -211,6 +211,21 @@ DEFINE_EVENT(block_rq, block_rq_issue, TP_ARGS(q, rq) ); +/** + * block_rq_merge - merge request with another one in the elevator + * @q: queue holding operation + * @rq: block IO operation operation request + * + * Called when block operation request @rq from queue @q is merged to another + * request queued in the elevator. + */ +DEFINE_EVENT(block_rq, block_rq_merge, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) +); + /** * block_bio_bounce - used bounce buffer when processing block operation * @q: queue holding the block operation diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index c086c38f4954..7ba62d68885a 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -860,6 +860,13 @@ static void blk_add_trace_rq_issue(void *ignore, blk_trace_request_get_cgid(q, rq)); } +static void blk_add_trace_rq_merge(void *ignore, + struct request_queue *q, struct request *rq) +{ + blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE, + blk_trace_request_get_cgid(q, rq)); +} + static void blk_add_trace_rq_requeue(void *ignore, struct request_queue *q, struct request *rq) @@ -1144,6 +1151,8 @@ static void blk_register_tracepoints(void) WARN_ON(ret); ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); WARN_ON(ret); + ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); + WARN_ON(ret); ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); WARN_ON(ret); ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); @@ -1190,6 +1199,7 @@ static void blk_unregister_tracepoints(void) unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL); unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL); unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL); + unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL); unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL); unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL); -- cgit v1.2.3 From d037cb4ae20407df89491f9c2d521ac0723aa15d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 18 Jun 2020 17:00:22 +1000 Subject: crypto: api - Prune inclusions in crypto.h We haven't used string.h since the memcpy calls were removed so this patch removes its inclusion. The file uaccess.h isn't needed at all. However, removing it reveals that we do need to add an inclusion for refcount.h. Signed-off-by: Herbert Xu --- include/linux/crypto.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 763863dbc079..bc5d2d4bfc3d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -16,9 +16,8 @@ #include #include #include +#include #include -#include -#include #include /* -- cgit v1.2.3 From fe21b6c3a65ca74cc4d0909635649bea48b115b3 Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Mon, 4 May 2020 09:43:48 -0700 Subject: i40e: Move client header location Move i40e_client.h to include/linux/net/intel/* since its shared between i40iw and i40e. Signed-off-by: Shiraz Saleem Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/infiniband/hw/i40iw/Makefile | 1 - drivers/infiniband/hw/i40iw/i40iw.h | 2 +- drivers/net/ethernet/intel/i40e/i40e.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_client.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_client.h | 203 -------------------------- include/linux/net/intel/i40e_client.h | 203 ++++++++++++++++++++++++++ 6 files changed, 206 insertions(+), 207 deletions(-) delete mode 100644 drivers/net/ethernet/intel/i40e/i40e_client.h create mode 100644 include/linux/net/intel/i40e_client.h (limited to 'include') diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile index 8942f8229945..34da9eba8a7c 100644 --- a/drivers/infiniband/hw/i40iw/Makefile +++ b/drivers/infiniband/hw/i40iw/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := -I $(srctree)/drivers/net/ethernet/intel/i40e obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 49d92638e0db..25747b85a79c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -57,7 +58,6 @@ #include "i40iw_d.h" #include "i40iw_hmc.h" -#include #include "i40iw_type.h" #include "i40iw_p.h" #include diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index e95b8da45e07..5ff0828a6f50 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -38,7 +38,7 @@ #include #include "i40e_type.h" #include "i40e_prototype.h" -#include "i40e_client.h" +#include #include #include "i40e_virtchnl_pf.h" #include "i40e_txrx.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index e81530ca08d0..befd3018183f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -3,10 +3,10 @@ #include #include +#include #include "i40e.h" #include "i40e_prototype.h" -#include "i40e_client.h" static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR; static struct i40e_client *registered_client; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h deleted file mode 100644 index 72994baf4941..000000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ /dev/null @@ -1,203 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_CLIENT_H_ -#define _I40E_CLIENT_H_ - -#define I40E_CLIENT_STR_LENGTH 10 - -/* Client interface version should be updated anytime there is a change in the - * existing APIs or data structures. - */ -#define I40E_CLIENT_VERSION_MAJOR 0 -#define I40E_CLIENT_VERSION_MINOR 01 -#define I40E_CLIENT_VERSION_BUILD 00 -#define I40E_CLIENT_VERSION_STR \ - __stringify(I40E_CLIENT_VERSION_MAJOR) "." \ - __stringify(I40E_CLIENT_VERSION_MINOR) "." \ - __stringify(I40E_CLIENT_VERSION_BUILD) - -struct i40e_client_version { - u8 major; - u8 minor; - u8 build; - u8 rsvd; -}; - -enum i40e_client_state { - __I40E_CLIENT_NULL, - __I40E_CLIENT_REGISTERED -}; - -enum i40e_client_instance_state { - __I40E_CLIENT_INSTANCE_NONE, - __I40E_CLIENT_INSTANCE_OPENED, -}; - -struct i40e_ops; -struct i40e_client; - -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define I40E_QUEUE_TYPE_PE_AEQ 0x80 -#define I40E_QUEUE_INVALID_IDX 0xFFFF - -struct i40e_qv_info { - u32 v_idx; /* msix_vector */ - u16 ceq_idx; - u16 aeq_idx; - u8 itr_idx; -}; - -struct i40e_qvlist_info { - u32 num_vectors; - struct i40e_qv_info qv_info[1]; -}; - -#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF - -/* set of LAN parameters useful for clients managed by LAN */ - -/* Struct to hold per priority info */ -struct i40e_prio_qos_params { - u16 qs_handle; /* qs handle for prio */ - u8 tc; /* TC mapped to prio */ - u8 reserved; -}; - -#define I40E_CLIENT_MAX_USER_PRIORITY 8 -/* Struct to hold Client QoS */ -struct i40e_qos_params { - struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; -}; - -struct i40e_params { - struct i40e_qos_params qos; - u16 mtu; -}; - -/* Structure to hold Lan device info for a client device */ -struct i40e_info { - struct i40e_client_version version; - u8 lanmac[6]; - struct net_device *netdev; - struct pci_dev *pcidev; - u8 __iomem *hw_addr; - u8 fid; /* function id, PF id or VF id */ -#define I40E_CLIENT_FTYPE_PF 0 -#define I40E_CLIENT_FTYPE_VF 1 - u8 ftype; /* function type, PF or VF */ - void *pf; - - /* All L2 params that could change during the life span of the PF - * and needs to be communicated to the client when they change - */ - struct i40e_qvlist_info *qvlist_info; - struct i40e_params params; - struct i40e_ops *ops; - - u16 msix_count; /* number of msix vectors*/ - /* Array down below will be dynamically allocated based on msix_count */ - struct msix_entry *msix_entries; - u16 itr_index; /* Which ITR index the PE driver is suppose to use */ - u16 fw_maj_ver; /* firmware major version */ - u16 fw_min_ver; /* firmware minor version */ - u32 fw_build; /* firmware build number */ -}; - -#define I40E_CLIENT_RESET_LEVEL_PF 1 -#define I40E_CLIENT_RESET_LEVEL_CORE 2 -#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1) - -struct i40e_ops { - /* setup_q_vector_list enables queues with a particular vector */ - int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, - struct i40e_qvlist_info *qv_info); - - int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, - u32 vf_id, u8 *msg, u16 len); - - /* If the PE Engine is unresponsive, RDMA driver can request a reset. - * The level helps determine the level of reset being requested. - */ - void (*request_reset)(struct i40e_info *ldev, - struct i40e_client *client, u32 level); - - /* API for the RDMA driver to set certain VSI flags that control - * PE Engine. - */ - int (*update_vsi_ctxt)(struct i40e_info *ldev, - struct i40e_client *client, - bool is_vf, u32 vf_id, - u32 flag, u32 valid_flag); -}; - -struct i40e_client_ops { - /* Should be called from register_client() or whenever PF is ready - * to create a specific client instance. - */ - int (*open)(struct i40e_info *ldev, struct i40e_client *client); - - /* Should be called when netdev is unavailable or when unregister - * call comes in. If the close is happenening due to a reset being - * triggered set the reset bit to true. - */ - void (*close)(struct i40e_info *ldev, struct i40e_client *client, - bool reset); - - /* called when some l2 managed parameters changes - mtu */ - void (*l2_param_change)(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_params *params); - - int (*virtchnl_receive)(struct i40e_info *ldev, - struct i40e_client *client, u32 vf_id, - u8 *msg, u16 len); - - /* called when a VF is reset by the PF */ - void (*vf_reset)(struct i40e_info *ldev, - struct i40e_client *client, u32 vf_id); - - /* called when the number of VFs changes */ - void (*vf_enable)(struct i40e_info *ldev, - struct i40e_client *client, u32 num_vfs); - - /* returns true if VF is capable of specified offload */ - int (*vf_capable)(struct i40e_info *ldev, - struct i40e_client *client, u32 vf_id); -}; - -/* Client device */ -struct i40e_client_instance { - struct list_head list; - struct i40e_info lan_info; - struct i40e_client *client; - unsigned long state; -}; - -struct i40e_client { - struct list_head list; /* list of registered clients */ - char name[I40E_CLIENT_STR_LENGTH]; - struct i40e_client_version version; - unsigned long state; /* client state */ - atomic_t ref_cnt; /* Count of all the client devices of this kind */ - u32 flags; -#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) -#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) - u8 type; -#define I40E_CLIENT_IWARP 0 - const struct i40e_client_ops *ops; /* client ops provided by the client */ -}; - -static inline bool i40e_client_is_registered(struct i40e_client *client) -{ - return test_bit(__I40E_CLIENT_REGISTERED, &client->state); -} - -/* used by clients */ -int i40e_register_client(struct i40e_client *client); -int i40e_unregister_client(struct i40e_client *client); - -#endif /* _I40E_CLIENT_H_ */ diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h new file mode 100644 index 000000000000..72994baf4941 --- /dev/null +++ b/include/linux/net/intel/i40e_client.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_CLIENT_H_ +#define _I40E_CLIENT_H_ + +#define I40E_CLIENT_STR_LENGTH 10 + +/* Client interface version should be updated anytime there is a change in the + * existing APIs or data structures. + */ +#define I40E_CLIENT_VERSION_MAJOR 0 +#define I40E_CLIENT_VERSION_MINOR 01 +#define I40E_CLIENT_VERSION_BUILD 00 +#define I40E_CLIENT_VERSION_STR \ + __stringify(I40E_CLIENT_VERSION_MAJOR) "." \ + __stringify(I40E_CLIENT_VERSION_MINOR) "." \ + __stringify(I40E_CLIENT_VERSION_BUILD) + +struct i40e_client_version { + u8 major; + u8 minor; + u8 build; + u8 rsvd; +}; + +enum i40e_client_state { + __I40E_CLIENT_NULL, + __I40E_CLIENT_REGISTERED +}; + +enum i40e_client_instance_state { + __I40E_CLIENT_INSTANCE_NONE, + __I40E_CLIENT_INSTANCE_OPENED, +}; + +struct i40e_ops; +struct i40e_client; + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. + */ +#define I40E_QUEUE_TYPE_PE_AEQ 0x80 +#define I40E_QUEUE_INVALID_IDX 0xFFFF + +struct i40e_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct i40e_qvlist_info { + u32 num_vectors; + struct i40e_qv_info qv_info[1]; +}; + +#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF + +/* set of LAN parameters useful for clients managed by LAN */ + +/* Struct to hold per priority info */ +struct i40e_prio_qos_params { + u16 qs_handle; /* qs handle for prio */ + u8 tc; /* TC mapped to prio */ + u8 reserved; +}; + +#define I40E_CLIENT_MAX_USER_PRIORITY 8 +/* Struct to hold Client QoS */ +struct i40e_qos_params { + struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; +}; + +struct i40e_params { + struct i40e_qos_params qos; + u16 mtu; +}; + +/* Structure to hold Lan device info for a client device */ +struct i40e_info { + struct i40e_client_version version; + u8 lanmac[6]; + struct net_device *netdev; + struct pci_dev *pcidev; + u8 __iomem *hw_addr; + u8 fid; /* function id, PF id or VF id */ +#define I40E_CLIENT_FTYPE_PF 0 +#define I40E_CLIENT_FTYPE_VF 1 + u8 ftype; /* function type, PF or VF */ + void *pf; + + /* All L2 params that could change during the life span of the PF + * and needs to be communicated to the client when they change + */ + struct i40e_qvlist_info *qvlist_info; + struct i40e_params params; + struct i40e_ops *ops; + + u16 msix_count; /* number of msix vectors*/ + /* Array down below will be dynamically allocated based on msix_count */ + struct msix_entry *msix_entries; + u16 itr_index; /* Which ITR index the PE driver is suppose to use */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ +}; + +#define I40E_CLIENT_RESET_LEVEL_PF 1 +#define I40E_CLIENT_RESET_LEVEL_CORE 2 +#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1) + +struct i40e_ops { + /* setup_q_vector_list enables queues with a particular vector */ + int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, + struct i40e_qvlist_info *qv_info); + + int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, + u32 vf_id, u8 *msg, u16 len); + + /* If the PE Engine is unresponsive, RDMA driver can request a reset. + * The level helps determine the level of reset being requested. + */ + void (*request_reset)(struct i40e_info *ldev, + struct i40e_client *client, u32 level); + + /* API for the RDMA driver to set certain VSI flags that control + * PE Engine. + */ + int (*update_vsi_ctxt)(struct i40e_info *ldev, + struct i40e_client *client, + bool is_vf, u32 vf_id, + u32 flag, u32 valid_flag); +}; + +struct i40e_client_ops { + /* Should be called from register_client() or whenever PF is ready + * to create a specific client instance. + */ + int (*open)(struct i40e_info *ldev, struct i40e_client *client); + + /* Should be called when netdev is unavailable or when unregister + * call comes in. If the close is happenening due to a reset being + * triggered set the reset bit to true. + */ + void (*close)(struct i40e_info *ldev, struct i40e_client *client, + bool reset); + + /* called when some l2 managed parameters changes - mtu */ + void (*l2_param_change)(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_params *params); + + int (*virtchnl_receive)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id, + u8 *msg, u16 len); + + /* called when a VF is reset by the PF */ + void (*vf_reset)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id); + + /* called when the number of VFs changes */ + void (*vf_enable)(struct i40e_info *ldev, + struct i40e_client *client, u32 num_vfs); + + /* returns true if VF is capable of specified offload */ + int (*vf_capable)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id); +}; + +/* Client device */ +struct i40e_client_instance { + struct list_head list; + struct i40e_info lan_info; + struct i40e_client *client; + unsigned long state; +}; + +struct i40e_client { + struct list_head list; /* list of registered clients */ + char name[I40E_CLIENT_STR_LENGTH]; + struct i40e_client_version version; + unsigned long state; /* client state */ + atomic_t ref_cnt; /* Count of all the client devices of this kind */ + u32 flags; +#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) +#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) + u8 type; +#define I40E_CLIENT_IWARP 0 + const struct i40e_client_ops *ops; /* client ops provided by the client */ +}; + +static inline bool i40e_client_is_registered(struct i40e_client *client) +{ + return test_bit(__I40E_CLIENT_REGISTERED, &client->state); +} + +/* used by clients */ +int i40e_register_client(struct i40e_client *client); +int i40e_unregister_client(struct i40e_client *client); + +#endif /* _I40E_CLIENT_H_ */ -- cgit v1.2.3 From 3c98f9ee6bc280499cbcb6f8e42c001c3bd7caa1 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 6 Jan 2020 16:09:33 -0800 Subject: i40e: remove unused defines Remove all the unused defines as they are just dead weight. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 24 - drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 497 +-- drivers/net/ethernet/intel/i40e/i40e_common.c | 4 - drivers/net/ethernet/intel/i40e/i40e_dcb.h | 5 - drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 1 - drivers/net/ethernet/intel/i40e/i40e_devids.h | 3 - drivers/net/ethernet/intel/i40e/i40e_hmc.h | 1 - drivers/net/ethernet/intel/i40e/i40e_main.c | 10 +- drivers/net/ethernet/intel/i40e/i40e_osdep.h | 1 - drivers/net/ethernet/intel/i40e/i40e_register.h | 4656 -------------------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 25 - drivers/net/ethernet/intel/i40e/i40e_type.h | 82 - drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 1 - include/linux/net/intel/i40e_client.h | 9 - 14 files changed, 2 insertions(+), 5317 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 5ff0828a6f50..8151671e5e0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -60,17 +60,14 @@ (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_MAX_VF_QUEUES 16 -#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define i40e_pf_get_max_q_per_tc(pf) \ (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) -#define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) -#define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) @@ -92,10 +89,6 @@ #define I40E_OEM_SNAP_SHIFT 16 #define I40E_OEM_RELEASE_MASK 0x0000ffff -/* The values in here are decimal coded as hex as is the case in the NVM map*/ -#define I40E_CURRENT_NVM_VERSION_HI 0x2 -#define I40E_CURRENT_NVM_VERSION_LO 0x40 - #define I40E_RX_DESC(R, i) \ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) #define I40E_TX_DESC(R, i) \ @@ -105,9 +98,6 @@ #define I40E_TX_FDIRDESC(R, i) \ (&(((struct i40e_filter_program_desc *)((R)->desc))[i])) -/* default to trying for four seconds */ -#define I40E_TRY_LINK_TIMEOUT (4 * HZ) - /* BW rate limiting */ #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ #define I40E_BW_MBPS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */ @@ -295,9 +285,6 @@ struct i40e_cloud_filter { u8 tunnel_type; }; -#define I40E_DCB_PRIO_TYPE_STRICT 0 -#define I40E_DCB_PRIO_TYPE_ETS 1 -#define I40E_DCB_STRICT_PRIO_CREDITS 127 /* DCB per TC information data structure */ struct i40e_tc_info { u16 qoffset; /* Queue offset from base queue */ @@ -357,15 +344,6 @@ struct i40e_ddp_old_profile_list { I40E_FLEX_SET_FSIZE(fsize) | \ I40E_FLEX_SET_SRC_WORD(src)) -#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \ - I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \ - I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) -#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \ - I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \ - I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) -#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \ - I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \ - I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) #define I40E_MAX_FLEX_SRC_OFFSET 0x1F @@ -390,7 +368,6 @@ struct i40e_ddp_old_profile_list { #define I40E_L4_GLQF_ORT_IDX 35 /* Flex PIT register index */ -#define I40E_FLEX_PIT_IDX_START_L2 0 #define I40E_FLEX_PIT_IDX_START_L3 3 #define I40E_FLEX_PIT_IDX_START_L4 6 @@ -531,7 +508,6 @@ struct i40e_pf { #define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9) #define I40E_HW_PTP_L4_CAPABLE BIT(10) #define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11) -#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT(12) #define I40E_HW_HAVE_CRT_RETIMER BIT(13) #define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14) #define I40E_HW_PHY_CONTROLS_LEDS BIT(15) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index aa5f1c0aa721..c52910f03cfb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -55,29 +55,17 @@ struct i40e_aq_desc { */ /* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 #define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 #define I40E_AQ_FLAG_LB_SHIFT 9 #define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 #define I40E_AQ_FLAG_BUF_SHIFT 12 #define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 -#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ #define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ #define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ #define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ #define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ #define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { @@ -362,13 +350,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 - struct i40e_aqc_request_resource { __le16 resource_id; __le16 access_type; @@ -384,7 +365,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); */ struct i40e_aqc_list_capabilites { u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; @@ -411,8 +391,6 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 @@ -441,11 +419,6 @@ struct i40e_aqc_list_capabilities_element_resp { /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 __le16 ttlx; __le32 dmacr; __le16 dmcth; @@ -459,15 +432,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0800 -#define I40E_AQ_ARP_UNSUP_CTL 0x1000 -#define I40E_AQ_ARP_ENA 0x2000 -#define I40E_AQ_ARP_ADD_IPV4 0x4000 -#define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; __le32 enabled_offloads; -#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 -#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; @@ -482,19 +448,6 @@ struct i40e_aqc_ns_proxy_data { __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0001 -#define I40E_AQ_NS_PROXY_DEL_0 0x0002 -#define I40E_AQ_NS_PROXY_ADD_1 0x0004 -#define I40E_AQ_NS_PROXY_DEL_1 0x0008 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 -#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 -#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; @@ -507,7 +460,6 @@ I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 u8 reserved[2]; __le32 sal; __le16 sah; @@ -520,11 +472,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); struct i40e_aqc_mac_address_read { __le16 command_flags; #define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_MC_MAG_EN_VALID 0x100 -#define I40E_AQC_ADDR_VALID_MASK 0x3F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -548,9 +496,7 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200 #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 #define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 -#define I40E_AQC_WRITE_TYPE_MASK 0xC000 __le16 mac_sah; __le32 mac_sal; @@ -573,22 +519,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); struct i40e_aqc_set_wol_filter { __le16 filter_index; -#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ - I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) - -#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 -#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ - I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) + __le16 cmd_flags; -#define I40E_AQC_SET_WOL_FILTER 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 -#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 -#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 __le16 valid_flags; -#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 u8 reserved[2]; __le32 address_high; __le32 address_low; @@ -608,12 +541,6 @@ I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); struct i40e_aqc_get_wake_reason_completion { u8 reserved_1[2]; __le16 wake_reason; -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) u8 reserved_2[12]; }; @@ -646,25 +573,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); struct i40e_aqc_switch_config_element_resp { u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 __le16 seid; __le16 uplink_seid; __le16 downlink_seid; u8 reserved[3]; u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 __le16 scheduler_id; __le16 element_info; }; @@ -697,12 +611,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 __le16 bad_frame_vsi; -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF __le16 default_seid; /* reserved for command */ u8 reserved[10]; }; @@ -722,25 +631,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 u8 reserved1; __le16 guaranteed; __le16 total; @@ -756,7 +646,6 @@ struct i40e_aqc_set_switch_config { __le16 flags; /* flags used for both fields below */ #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 -#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 __le16 valid_flags; /* The ethertype in switch_tag is dropped on ingress and used * internally by the switch. Set this to zero for the default @@ -789,17 +678,10 @@ struct i40e_aqc_set_switch_config { */ #define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80 -#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40 -#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00 #define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10 -#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20 -#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30 -#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00 -#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01 #define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02 -#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03 u8 mode; u8 rsvd5[5]; }; @@ -834,19 +716,13 @@ struct i40e_aqc_add_get_update_vsi { __le16 uplink_seid; u8 connection_type; #define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 u8 reserved1; u8 vf_id; u8 reserved2; __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) #define I40E_AQ_VSI_TYPE_VF 0x0 #define I40E_AQ_VSI_TYPE_VMDQ2 0x1 #define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 __le32 addr_high; __le32 addr_low; }; @@ -870,24 +746,18 @@ struct i40e_aqc_vsi_properties_data { #define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 #define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 #define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 #define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ __le16 switch_id; /* 12bit id combined with flags below */ #define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 #define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 u8 sw_reserved[2]; /* security section */ u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 u8 sec_reserved; @@ -899,78 +769,33 @@ struct i40e_aqc_vsi_properties_data { #define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ I40E_AQ_VSI_PVLAN_MODE_SHIFT) #define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 #define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 #define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 #define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ I40E_AQ_VSI_PVLAN_EMOD_SHIFT) #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 #define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 u8 pvlan_reserved[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ __le16 cas_pv_tag; u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 u8 cas_pv_reserved; /* queue mapping section */ __le16 mapping_flags; #define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 #define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) __le16 tc_mapping[8]; #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 -#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 #define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ @@ -995,10 +820,6 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); */ struct i40e_aqc_add_update_pv { __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 __le16 uplink_seid; __le16 connected_seid; u8 reserved[10]; @@ -1009,10 +830,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 u8 reserved[14]; }; @@ -1026,9 +843,6 @@ struct i40e_aqc_get_pv_params_completion { __le16 seid; __le16 default_stag; __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 u8 reserved[8]; __le16 default_port_seid; }; @@ -1041,12 +855,8 @@ struct i40e_aqc_add_veb { __le16 downlink_seid; __le16 veb_flags; #define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ #define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; @@ -1059,10 +869,6 @@ struct i40e_aqc_add_veb_completion { __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 __le16 statistic_index; __le16 vebs_used; __le16 vebs_free; @@ -1095,9 +901,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); struct i40e_aqc_macvlan { __le16 num_addresses; __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) #define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 __le32 addr_high; __le32 addr_low; @@ -1111,18 +914,11 @@ struct i40e_aqc_add_macvlan_element_data { __le16 vlan_tag; __le16 flags; #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 #define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 #define I40E_AQC_MM_ERR_NO_RES 0xFF u8 reserved1[3]; }; @@ -1148,14 +944,10 @@ struct i40e_aqc_remove_macvlan_element_data { __le16 vlan_tag; u8 flags; #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 u8 reserved[3]; /* reply section */ u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF u8 reply_reserved[3]; }; @@ -1166,30 +958,8 @@ struct i40e_aqc_remove_macvlan_element_data { struct i40e_aqc_add_remove_vlan_element_data { __le16 vlan_tag; u8 vlan_flags; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 u8 reserved; u8 result; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF u8 reserved1[3]; }; @@ -1213,9 +983,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 #define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -1227,11 +995,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); */ struct i40e_aqc_add_tag { __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; __le16 queue_number; u8 reserved[8]; @@ -1252,9 +1016,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); */ struct i40e_aqc_remove_tag { __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; u8 reserved[12]; }; @@ -1290,9 +1051,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) __le16 old_tag; __le16 new_tag; u8 reserved[10]; @@ -1319,13 +1077,8 @@ struct i40e_aqc_add_remove_control_packet_filter { __le16 flags; #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) __le16 queue; u8 reserved[2]; }; @@ -1351,9 +1104,6 @@ struct i40e_aqc_add_remove_cloud_filters { u8 num_filters; u8 reserved; __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) u8 big_buffer_flag; #define I40E_AQC_ADD_CLOUD_CMD_BB 1 u8 reserved2[3]; @@ -1380,9 +1130,6 @@ struct i40e_aqc_cloud_filters_element_data { } raw_v6; } ipaddr; __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ /* 0x0001 reserved */ /* 0x0002 reserved */ @@ -1404,36 +1151,20 @@ struct i40e_aqc_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ #define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 __le32 tenant_id; u8 reserved[4]; __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) u8 reserved2[14]; /* response section */ u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF u8 response_reserved[7]; }; @@ -1445,37 +1176,7 @@ I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); struct i40e_aqc_cloud_filters_element_bb { struct i40e_aqc_cloud_filters_element_data element; u16 general_fields[32]; -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 }; I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); @@ -1504,11 +1205,6 @@ I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); struct i40e_aqc_replace_cloud_filters_cmd { u8 valid_flags; -#define I40E_AQC_REPLACE_L1_FILTER 0x0 -#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 -#define I40E_AQC_GET_CLOUD_FILTERS 0x2 -#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 -#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 u8 old_filter_type; u8 new_filter_type; u8 tr_bit; @@ -1521,25 +1217,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); struct i40e_aqc_replace_cloud_filters_cmd_buf { u8 data[32]; -/* Filter type INPUT codes*/ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) - -/* Field Vector offsets */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 struct i40e_filter_data filters[8]; }; @@ -1556,8 +1233,6 @@ struct i40e_aqc_add_delete_mirror_rule { #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 #define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 #define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 @@ -1600,8 +1275,6 @@ struct i40e_aqc_write_ddp_resp { struct i40e_aqc_get_applied_profiles { u8 flags; -#define I40E_AQC_GET_DDP_GET_CONF 0x1 -#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 u8 rsv[3]; __le32 reserved; __le32 addr_high; @@ -1618,8 +1291,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); struct i40e_aqc_pfc_ignore { u8 tc_bitmap; u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 u8 reserved[14]; }; @@ -1736,7 +1407,6 @@ struct i40e_aqc_configure_switching_comp_ets_data { u8 reserved[4]; u8 tc_valid_bits; u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 u8 tc_strict_priority_flags; u8 reserved1[17]; u8 tc_bw_share_credits[8]; @@ -1977,40 +1647,18 @@ struct i40e_aq_get_phy_abilities_resp { u8 abilities; #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 -#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 -#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 __le32 eeer_val; u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 u8 phy_type_ext; #define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 #define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 #define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 #define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 -#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 -#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 -#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40 -#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80 u8 fec_cfg_curr_mod_ext_info; -#define I40E_AQ_ENABLE_FEC_KR 0x01 -#define I40E_AQ_ENABLE_FEC_RS 0x02 #define I40E_AQ_REQUEST_FEC_KR 0x04 #define I40E_AQ_REQUEST_FEC_RS 0x08 #define I40E_AQ_ENABLE_FEC_AUTO 0x10 -#define I40E_AQ_FEC -#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 -#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 u8 ext_comp_code; u8 phy_id[4]; @@ -2028,7 +1676,6 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */ u8 link_speed; u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ -#define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 __le16 eee_capability; @@ -2056,21 +1703,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); struct i40e_aq_set_mac_config { __le16 max_frame_size; u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 -#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80 u8 tx_timer_priority; /* bitmap */ __le16 tx_timer_value; __le16 fc_refresh_threshold; @@ -2092,8 +1724,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 #define I40E_AQ_LSE_DISABLE 0x2 #define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ @@ -2102,44 +1732,16 @@ struct i40e_aqc_get_link_status { u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; #define I40E_AQ_LINK_UP 0x01 /* obsolete */ -#define I40E_AQ_LINK_UP_FUNCTION 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; #define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 #define I40E_AQ_LINK_PAUSE_TX 0x20 #define I40E_AQ_LINK_PAUSE_RX 0x40 #define I40E_AQ_QUALIFIED_MODULE 0x80 u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 -/* 25G Error Codes */ -#define I40E_AQ_25G_NO_ERR 0X00 -#define I40E_AQ_25G_NOT_PRESENT 0X01 -#define I40E_AQ_25G_NVM_CRC_ERR 0X02 -#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 -#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 -#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ /* Since firmware API 1.7 loopback field keeps power class info as well */ #define I40E_AQ_LOOPBACK_MASK 0x07 -#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 -#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) __le16 max_frame_size; u8 config; #define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 @@ -2149,11 +1751,6 @@ struct i40e_aqc_get_link_status { union { struct { u8 power_desc; -#define I40E_AQ_LINK_POWER_CLASS_1 0x00 -#define I40E_AQ_LINK_POWER_CLASS_2 0x01 -#define I40E_AQ_LINK_POWER_CLASS_3 0x02 -#define I40E_AQ_LINK_POWER_CLASS_4 0x03 -#define I40E_AQ_PWR_CLASS_MASK 0x03 u8 reserved[4]; }; struct { @@ -2171,13 +1768,7 @@ struct i40e_aqc_set_phy_int_mask { __le16 event_mask; #define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 #define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 u8 reserved1[6]; }; @@ -2209,13 +1800,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { u8 command_flags; -#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ - I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 /* Disable link manageability on a single port */ #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 /* Disable link manageability on all ports */ @@ -2247,7 +1831,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); /* Get PHY Register command (0x0629) */ struct i40e_aqc_phy_register_access { u8 phy_interface; -#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 u8 dev_address; @@ -2274,9 +1857,7 @@ struct i40e_aqc_nvm_update { #define I40E_AQ_NVM_LAST_CMD 0x01 #define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 #define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 u8 module_pointer; @@ -2291,9 +1872,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { __le16 cmd_flags; -#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 -#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 __le16 element_count; __le16 element_id; /* Feature/field ID */ __le16 element_id_msw; /* MSWord of field ID */ @@ -2315,16 +1893,8 @@ struct i40e_aqc_nvm_config_write { I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) -#define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; -#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 -#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 -#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 __le16 feature_options; __le16 feature_selection; }; @@ -2344,7 +1914,6 @@ I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); * no command data struct used */ struct i40e_aqc_nvm_oem_post_update { -#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 u8 sel_data; u8 reserved[7]; }; @@ -2366,9 +1935,6 @@ I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); */ struct i40e_aqc_thermal_sensor { u8 sensor_action; -#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 -#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 -#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 u8 reserved[7]; __le32 addr_high; __le32 addr_low; @@ -2421,10 +1987,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); */ struct i40e_aqc_alternate_write_done { __le16 cmd_flags; -#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 -#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 -#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 -#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 u8 reserved[14]; }; @@ -2433,8 +1995,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { __le32 mode; -#define I40E_AQ_ALTERNATE_MODE_NONE 0 -#define I40E_AQ_ALTERNATE_MODE_OEM 1 u8 reserved[12]; }; @@ -2460,13 +2020,9 @@ struct i40e_aqc_lldp_get_mib { #define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 #define I40E_AQ_LLDP_MIB_LOCAL 0x0 #define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ __le16 local_len; __le16 remote_len; @@ -2482,7 +2038,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); */ struct i40e_aqc_lldp_update_mib { u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 u8 reserved[7]; __le32 addr_high; @@ -2521,7 +2076,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 #define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 #define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2 u8 reserved[15]; @@ -2627,13 +2181,6 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); * Used to replace the local MIB of a given LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_set_local_mib { -#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 -#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) -#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \ - BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; __le16 length; @@ -2648,9 +2195,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); * Used for stopping/starting specific LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_stop_start_specific_agent { -#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 -#define I40E_AQC_START_SPECIFIC_AGENT_MASK \ - BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT) u8 command; u8 reserved[15]; }; @@ -2660,7 +2204,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent); /* Restore LLDP Agent factory settings (direct 0x0A0A) */ struct i40e_aqc_lldp_restore { u8 command; -#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0 #define I40E_AQ_LLDP_AGENT_RESTORE 0x1 u8 reserved[15]; }; @@ -2674,8 +2217,6 @@ struct i40e_aqc_add_udp_tunnel { u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 -#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 -#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; @@ -2685,8 +2226,6 @@ struct i40e_aqc_add_udp_tunnel_completion { __le16 udp_port; u8 filter_entry_index; u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 u8 total_filters; u8 reserved[11]; }; @@ -2759,16 +2298,7 @@ struct i40e_aqc_tunnel_key_structure { u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; -#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 -#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 -#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 -#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 u8 reserved[10]; }; @@ -2777,9 +2307,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 __le32 param_value1; __le16 param_value2; u8 reserved[6]; @@ -2789,8 +2316,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 u8 reserved[12]; }; @@ -2826,14 +2351,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); struct i40e_acq_set_test_mode { u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 u8 reserved[3]; u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 u8 reserved2[3]; __le32 address_high; __le32 address_low; @@ -2874,20 +2393,6 @@ struct i40e_aqc_debug_modify_reg { I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); /* dump internal data (0xFF08, indirect) */ - -#define I40E_AQ_CLUSTER_ID_AUX 0 -#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 -#define I40E_AQ_CLUSTER_ID_TXSCHED 2 -#define I40E_AQ_CLUSTER_ID_HMC 3 -#define I40E_AQ_CLUSTER_ID_MAC0 4 -#define I40E_AQ_CLUSTER_ID_MAC1 5 -#define I40E_AQ_CLUSTER_ID_MAC2 6 -#define I40E_AQ_CLUSTER_ID_MAC3 7 -#define I40E_AQ_CLUSTER_ID_DCB 8 -#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 -#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 -#define I40E_AQ_CLUSTER_ID_ALTRAM 11 - struct i40e_aqc_debug_dump_internals { u8 cluster_id; u8 table_id; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 45b90eb11adb..4ab081953e19 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1455,10 +1455,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) return gpio_val; } -#define I40E_COMBINED_ACTIVITY 0xA -#define I40E_FILTER_ACTIVITY 0xE -#define I40E_LINK_ACTIVITY 0xC -#define I40E_MAC_ACTIVITY 0xD #define I40E_FW_LED BIT(4) #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index ba86ad833bee..2b1a2e81ac73 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -6,10 +6,8 @@ #include "i40e_type.h" -#define I40E_DCBX_STATUS_NOT_STARTED 0 #define I40E_DCBX_STATUS_IN_PROGRESS 1 #define I40E_DCBX_STATUS_DONE 2 -#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3 #define I40E_DCBX_STATUS_DISABLED 7 #define I40E_TLV_TYPE_END 0 @@ -24,7 +22,6 @@ #define I40E_CEE_DCBX_OUI 0x001b21 #define I40E_CEE_DCBX_TYPE 2 -#define I40E_CEE_SUBTYPE_CTRL 1 #define I40E_CEE_SUBTYPE_PG_CFG 2 #define I40E_CEE_SUBTYPE_PFC_CFG 3 #define I40E_CEE_SUBTYPE_APP_PRI 4 @@ -105,9 +102,7 @@ struct i40e_cee_ctrl_tlv { struct i40e_cee_feat_tlv { struct i40e_cee_tlv_hdr hdr; u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ -#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80 #define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40 -#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20 u8 subtype; u8 tlvinfo[1]; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 99ea543dd245..9cb9b781451c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -688,7 +688,6 @@ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf) i40e_dbg_dump_vf(pf, i); } -#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum * @filp: the opened file diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index bf15a868292f..33df3bf2f73b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -32,8 +32,5 @@ #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) #endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 1c78de838857..3113792afaff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -14,7 +14,6 @@ struct i40e_hw; #define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ #define I40E_HMC_PAGED_BP_SIZE 4096 #define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 -#define I40E_FIRST_VF_FPM_ID 16 struct i40e_hmc_obj_info { u64 base; /* base addr in FPM */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5d807c8004f8..5f7f5147f9a7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6492,8 +6492,7 @@ out: return err; } #endif /* CONFIG_I40E_DCB */ -#define SPEED_SIZE 14 -#define FC_SIZE 8 + /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message @@ -8950,13 +8949,6 @@ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; } -/* We can see up to 256 filter programming desc in transit if the filters are - * being applied really fast; before we see the first - * filter miss error on Rx queue 0. Accumulating enough error messages before - * reacting will make sure we don't cause flush too often. - */ -#define I40E_MAX_FD_PROGRAM_ERROR 256 - /** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index c302ef2524f8..2f6815b2f8df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -26,7 +26,6 @@ do { \ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define rd32(a, reg) readl((a)->hw_addr + (reg)) -#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index d35d690ca10f..7cd3a08a1891 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -4,53 +4,14 @@ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ -#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) -#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) -#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ -#define I40E_GL_ARQH_ARQH_SHIFT 0 -#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) -#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ -#define I40E_GL_ARQT_ARQT_SHIFT 0 -#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) -#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ -#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) -#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ -#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) -#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ -#define I40E_GL_ATQH_ATQH_SHIFT 0 -#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) -#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ -#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) -#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) -#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) #define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) -#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) -#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ -#define I40E_GL_ATQT_ATQT_SHIFT 0 -#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) #define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ -#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) #define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ -#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) #define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ #define I40E_PF_ARQH_ARQH_SHIFT 0 #define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) #define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ -#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) #define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 #define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) #define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 @@ -60,20 +21,10 @@ #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 #define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ -#define I40E_PF_ARQT_ARQT_SHIFT 0 -#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) #define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ -#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) #define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ -#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) #define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ -#define I40E_PF_ATQH_ATQH_SHIFT 0 -#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) #define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ -#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) #define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 #define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) #define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 @@ -83,284 +34,13 @@ #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 #define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ -#define I40E_PF_ATQT_ATQT_SHIFT 0 -#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) -#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAH_MAX_INDEX 127 -#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) -#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAL_MAX_INDEX 127 -#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) -#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQH_MAX_INDEX 127 -#define I40E_VF_ARQH_ARQH_SHIFT 0 -#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) -#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQLEN_MAX_INDEX 127 -#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) -#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQT_MAX_INDEX 127 -#define I40E_VF_ARQT_ARQT_SHIFT 0 -#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) -#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAH_MAX_INDEX 127 -#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) -#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAL_MAX_INDEX 127 -#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) -#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQH_MAX_INDEX 127 -#define I40E_VF_ATQH_ATQH_SHIFT 0 -#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) -#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQLEN_MAX_INDEX 127 -#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) -#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQT_MAX_INDEX 127 -#define I40E_VF_ATQT_ATQT_SHIFT 0 -#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) -#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ -#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 -#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) -#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) -#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 -#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) -#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 -#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) -#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 -#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 -#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) -#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) -#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) -#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ -#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 -#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) -#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) -#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 -#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) -#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) -#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 -#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 -#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) #define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ -#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 -#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) -#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 -#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 -#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 -#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) #define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 #define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) #define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ #define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 #define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) -#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ -#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 -#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) -#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 -#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 -#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) -#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 -#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 -#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) -#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ -#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 -#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) -#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 -#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) -#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 -#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 -#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) -#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) -#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ -#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 -#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 -#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) -#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ -#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 -#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) -#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ -#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 -#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 -#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 -#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 -#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 -#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 -#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 -#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 -#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) -#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 -#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 -#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) -#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ -#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 -#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) -#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ -#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 -#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) -#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 -#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) -#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ -#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 -#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) -#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) -#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) -#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ -#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 -#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 -#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 -#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 -#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 -#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 -#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 -#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 -#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 -#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) -#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) -#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ -#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 -#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) -#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 -#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) -#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 -#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) -#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 -#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) #define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ -#define I40E_GL_FWSTS_FWS0B_SHIFT 0 -#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) -#define I40E_GL_FWSTS_FWRI_SHIFT 9 -#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) #define I40E_GL_FWSTS_FWS1B_SHIFT 16 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) @@ -369,500 +49,119 @@ #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT) -#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ -#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 -#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) #define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ #define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 -#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 #define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 -#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) -#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) -#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 -#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) -#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) -#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) -#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 -#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 -#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) -#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 -#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) -#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 -#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) -#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 -#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) -#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 -#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) -#define I40E_GLGEN_I2CCMD_R_SHIFT 29 -#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) -#define I40E_GLGEN_I2CCMD_E_SHIFT 31 -#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) -#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 -#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 -#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 -#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 -#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) -#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) -#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) #define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSCA_MAX_INDEX 3 #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 -#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 -#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 -#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 -#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_GLGEN_MSCA_STCODE_SHIFT 28 -#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 #define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 -#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 #define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) -#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) #define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 #define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) #define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 #define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) -#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 -#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) -#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 -#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 -#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 -#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) #define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ #define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) #define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 #define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) -#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 -#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) #define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ -#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 -#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) -#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 -#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) -#define I40E_GLGEN_STAT_VTEN_SHIFT 3 -#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) -#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 -#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) -#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 -#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) -#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 -#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) #define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 -#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 -#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) #define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ -#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 -#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) #define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ #define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 #define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) -#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ -#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 -#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) #define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ #define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 #define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) -#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ -#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 -#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) -#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 -#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) -#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 -#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) -#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 -#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) #define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ #define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 #define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) #define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ -#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 -#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) #define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 -#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) #define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 #define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 #define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) #define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 #define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 #define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) -#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 -#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 -#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) -#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 -#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 -#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) #define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) #define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) #define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) #define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) #define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) #define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) #define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) #define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) -#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) -#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) -#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 -#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) -#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) -#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) -#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) -#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) -#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) -#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) #define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ -#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 -#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) #define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) #define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) #define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) #define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) -#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 -#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) #define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) #define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) -#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) -#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_SDPART_MAX_INDEX 15 -#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) #define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) #define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) #define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ #define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) #define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 -#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) #define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ -#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) #define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 -#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) #define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) #define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ #define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 -#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) #define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 -#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 -#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) -#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ -#define I40E_GL_GP_FUSE_MAX_INDEX 28 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) -#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) -#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 -#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) -#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 -#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) -#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 -#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) -#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) #define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) #define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ #define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) #define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) #define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) #define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_PFINT_CEQCTL_MAX_INDEX 511 #define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) #define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) #define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) #define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) #define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */ -#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0 -#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT) #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1 #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT) -#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2 -#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT) #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) @@ -872,8 +171,6 @@ #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) #define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 @@ -881,7 +178,6 @@ #define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) #define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 #define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) #define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 @@ -891,93 +187,13 @@ #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) #define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 #define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) #define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 #define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 -#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 -#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 -#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 -#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) #define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 #define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) #define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 @@ -986,14 +202,8 @@ #define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) #define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 #define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) -#define I40E_PFINT_ICR0_GPIO_SHIFT 22 -#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 #define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 #define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) #define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 @@ -1017,10 +227,6 @@ #define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 #define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 #define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) #define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 @@ -1029,43 +235,17 @@ #define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) #define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 #define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) #define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ -#define I40E_PFINT_ITR0_MAX_INDEX 2 -#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) #define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_ITRN_MAX_INDEX 2 -#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) #define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 -#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) #define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ -#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) -#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) #define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_RATEN_MAX_INDEX 511 -#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) -#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) #define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_RQCTL_MAX_INDEX 1535 #define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 #define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) #define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 @@ -1075,13 +255,11 @@ #define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) #define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) #define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 #define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) #define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 #define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) #define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_TQCTL_MAX_INDEX 1535 #define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 #define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) #define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 @@ -1091,160 +269,45 @@ #define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) #define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) #define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 #define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) #define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 #define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) #define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 -#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 -#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_MAX_INDEX 127 -#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_SWINT_SHIFT 31 -#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) -#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) -#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_ITR0_MAX_INDEX 2 -#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN_MAX_INDEX 2 -#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPINT_AEQCTL_MAX_INDEX 127 #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) #define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) #define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_VPINT_CEQCTL_MAX_INDEX 511 #define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) #define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) #define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) #define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) #define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLST0_MAX_INDEX 127 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) #define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_RATE0_MAX_INDEX 127 -#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) -#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) -#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_RATEN_MAX_INDEX 511 -#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) -#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) -#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 -#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) #define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ #define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 #define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) #define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) #define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) #define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) #define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ -#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 #define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 #define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 @@ -1257,19 +320,12 @@ #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 #define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 #define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) -#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) #define I40E_QRX_ENA_QENA_STAT_SHIFT 2 #define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) #define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QRX_TAIL_MAX_INDEX 1535 -#define I40E_QRX_TAIL_TAIL_SHIFT 0 -#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) #define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_CTL_MAX_INDEX 1535 #define I40E_QTX_CTL_PFVF_Q_SHIFT 0 #define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) #define I40E_QTX_CTL_PF_INDX_SHIFT 2 @@ -1277,43 +333,22 @@ #define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 #define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) #define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_ENA_MAX_INDEX 1535 #define I40E_QTX_ENA_QENA_REQ_SHIFT 0 #define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) -#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) #define I40E_QTX_ENA_QENA_STAT_SHIFT 2 #define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) #define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_HEAD_MAX_INDEX 1535 -#define I40E_QTX_HEAD_HEAD_SHIFT 0 -#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) -#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 -#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) #define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_TAIL_MAX_INDEX 1535 -#define I40E_QTX_TAIL_TAIL_SHIFT 0 -#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) #define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_MAPENA_MAX_INDEX 127 #define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 #define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) #define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_QTABLE_MAX_INDEX 15 #define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 #define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) #define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QBASE_MAX_INDEX 383 -#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 -#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) #define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QTABLE_MAX_INDEX 7 -#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 -#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) -#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 -#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) #define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ #define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 #define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) @@ -1322,789 +357,47 @@ #define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ #define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 #define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) -#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ -#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 -#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) -#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ -#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 -#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) -#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 -#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) -#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) -#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) -#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 -#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 -#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) -#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 -#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 -#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 -#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 -#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) -#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 -#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 -#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) -#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 -#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 -#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) -#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_METF_MAX_INDEX 3 -#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 -#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) -#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 -#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) -#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) -#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 -#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) -#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 -#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) -#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 -#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) -#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 -#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) -#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 -#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) -#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 -#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) -#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) -#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) -#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ -#define I40E_MSIX_PBA_MAX_INDEX 5 -#define I40E_MSIX_PBA_PENBIT_SHIFT 0 -#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) -#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TADD_MAX_INDEX 128 -#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) -#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TMSG_MAX_INDEX 128 -#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TUADD_MAX_INDEX 128 -#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TVCTRL_MAX_INDEX 128 -#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ -#define I40E_VFMSIX_PBA1_MAX_INDEX 19 -#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 -#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ -#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 -#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) -#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 -#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) -#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 -#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) -#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 -#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) -#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 -#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) -#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 -#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) #define I40E_GLNVM_FLA_LOCKED_SHIFT 6 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) -#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 -#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) -#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 -#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) -#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 -#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) -#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ -#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 -#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) -#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 -#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) #define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ -#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 -#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) #define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 #define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) -#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 -#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) -#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 -#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) -#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 -#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) -#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ -#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) #define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ -#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 -#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) #define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 -#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) -#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 -#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) #define I40E_GLNVM_SRCTL_START_SHIFT 30 -#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 #define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ -#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 -#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 #define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 -#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 #define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 -#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 -#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) -#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ -#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 -#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) #define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ -#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 -#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) -#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 -#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 -#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) #define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 #define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 -#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 -#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 -#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 -#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 -#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) -#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ -#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 -#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) -#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 -#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) #define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ -#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 -#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) #define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 #define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) #define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 #define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) -#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ -#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 -#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) -#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) -#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) -#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) -#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ -#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 -#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) -#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 -#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 -#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) #define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 #define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 -#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) -#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) -#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) -#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) -#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ -#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 -#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 -#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) -#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 -#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) -#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ -#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 -#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 -#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 -#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 -#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) -#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ -#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 -#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) -#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ -#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 -#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) -#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ -#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 -#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) -#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) -#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ -#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 -#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) -#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ -#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 -#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) -#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ -#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 -#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) -#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ -#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 -#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) -#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 -#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 -#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) #define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ -#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 -#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 -#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) #define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ -#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 -#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) -#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ -#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 -#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) -#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 -#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) -#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 -#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) -#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ -#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 -#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) -#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 -#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) -#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 -#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) -#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 -#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) -#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ -#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 -#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) -#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 -#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) -#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) -#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ -#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) -#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) -#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) -#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ -#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 -#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) -#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ -#define I40E_PFPCI_PM_PME_EN_SHIFT 0 -#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) -#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ -#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 -#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) -#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ -#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 -#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) -#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ -#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 -#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) #define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ -#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 -#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) -#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) -#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 -#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) -#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) -#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ -#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 -#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) -#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 -#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) -#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ -#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 -#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) -#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ -#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 -#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) -#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 -#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) -#define I40E_PRTPM_GC_RATD_SHIFT 2 -#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) -#define I40E_PRTPM_GC_LCDMP_SHIFT 3 -#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) -#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 -#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) #define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ -#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 -#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ -#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 -#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) -#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GL_PRS_FVBM_MAX_INDEX 3 -#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0 -#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT) -#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8 -#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT) -#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31 -#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT) -#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ -#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 -#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) -#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ -#define I40E_GLRPB_GHW_GHW_SHIFT 0 -#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) -#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ -#define I40E_GLRPB_GLW_GLW_SHIFT 0 -#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) -#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ -#define I40E_GLRPB_PHW_PHW_SHIFT 0 -#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) -#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ -#define I40E_GLRPB_PLW_PLW_SHIFT 0 -#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) -#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DHW_MAX_INDEX 7 -#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 -#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) -#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DLW_MAX_INDEX 7 -#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 -#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) -#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DPS_MAX_INDEX 7 -#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 -#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) -#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SHT_MAX_INDEX 7 -#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 -#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) -#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ -#define I40E_PRTRPB_SHW_SHW_SHIFT 0 -#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) -#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SLT_MAX_INDEX 7 -#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 -#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) -#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ -#define I40E_PRTRPB_SLW_SLW_SHIFT 0 -#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) -#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ -#define I40E_PRTRPB_SPS_SPS_SHIFT 0 -#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) -#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ -#define I40E_GLQF_CTL_HTOEP_SHIFT 1 -#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) -#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 -#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) -#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 -#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) -#define I40E_GLQF_CTL_RSVD_SHIFT 7 -#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) -#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 -#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 -#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 -#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) -#define I40E_GLQF_CTL_FDBEST_SHIFT 17 -#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) -#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 -#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) -#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 -#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) -#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 -#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) #define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ #define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) @@ -2112,36 +405,7 @@ #define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) #define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_GLQF_HKEY_MAX_INDEX 12 -#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 -#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) -#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 -#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) -#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 -#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) -#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 -#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) -#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HSYM_MAX_INDEX 63 -#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 -#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) #define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_GLQF_PCNT_MAX_INDEX 511 -#define I40E_GLQF_PCNT_PCNT_SHIFT 0 -#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) -#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_SWAP_MAX_INDEX 1 -#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 -#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 -#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 -#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 -#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 -#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 -#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) #define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ #define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 #define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) @@ -2159,54 +423,19 @@ #define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) #define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 #define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) -#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 -#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) -#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 -#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) #define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ #define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 #define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) -#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ -#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 -#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) -#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 -#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) #define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ #define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 #define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) #define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 #define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) #define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_PFQF_HENA_MAX_INDEX 1 -#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) #define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_PFQF_HKEY_MAX_INDEX 12 -#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) -#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) -#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) -#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) #define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_PFQF_HLUT_MAX_INDEX 127 -#define I40E_PFQF_HLUT_LUT0_SHIFT 0 -#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) -#define I40E_PFQF_HLUT_LUT1_SHIFT 8 -#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) -#define I40E_PFQF_HLUT_LUT2_SHIFT 16 -#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) -#define I40E_PFQF_HLUT_LUT3_SHIFT 24 -#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) -#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ -#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 -#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) -#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 -#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 -#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) #define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ #define I40E_PRTQF_FD_INSET_MAX_INDEX 63 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 @@ -2215,14 +444,7 @@ #define I40E_PRTQF_FD_INSET_MAX_INDEX 63 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 #define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) -#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 -#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 -#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) -#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 -#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) #define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ -#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) #define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 @@ -2230,775 +452,148 @@ #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 #define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) #define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HENA1_MAX_INDEX 1 -#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) #define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HKEY1_MAX_INDEX 12 -#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) -#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) -#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) -#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) #define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HLUT1_MAX_INDEX 15 -#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) -#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) -#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) -#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) -#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION1_MAX_INDEX 7 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) -#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPQF_CTL_MAX_INDEX 127 -#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 -#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) -#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 -#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) -#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 -#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) -#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 -#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) -#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_CTL_MAX_INDEX 383 -#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 -#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) -#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 -#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 -#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 -#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 -#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 -#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) -#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_TCREGION_MAX_INDEX 3 -#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 -#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 -#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) -#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 -#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 -#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) -#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOECRC_MAX_INDEX 143 -#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 -#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) -#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDDPC_MAX_INDEX 143 -#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 -#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) -#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) -#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) -#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) -#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) -#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) -#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) -#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) -#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) -#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOELAST_MAX_INDEX 143 -#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 -#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) -#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPRC_MAX_INDEX 143 -#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 -#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) -#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPTC_MAX_INDEX 143 -#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 -#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) -#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOERPDC_MAX_INDEX 143 -#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 -#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) -#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR1_L_MAX_INDEX 143 -#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 -#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) -#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR2_L_MAX_INDEX 143 -#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 -#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 -#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 -#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) #define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCH_MAX_INDEX 3 -#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 -#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) #define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCL_MAX_INDEX 3 -#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 -#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) #define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCH_MAX_INDEX 3 -#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) #define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCL_MAX_INDEX 3 -#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) #define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 -#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 -#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) -#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LDPC_MAX_INDEX 3 -#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 -#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) #define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) #define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) #define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) #define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 -#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) #define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MLFC_MAX_INDEX 3 -#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 -#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) #define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCH_MAX_INDEX 3 -#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) #define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCL_MAX_INDEX 3 -#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) #define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCH_MAX_INDEX 3 -#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) #define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCL_MAX_INDEX 3 -#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) #define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MRFC_MAX_INDEX 3 -#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 -#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) #define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 -#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) #define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 -#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) #define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127H_MAX_INDEX 3 -#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 -#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) #define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127L_MAX_INDEX 3 -#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 -#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) #define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) #define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) #define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255H_MAX_INDEX 3 -#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 -#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) #define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255L_MAX_INDEX 3 -#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 -#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) #define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511H_MAX_INDEX 3 -#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 -#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) #define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511L_MAX_INDEX 3 -#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 -#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) #define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64H_MAX_INDEX 3 -#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 -#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) #define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64L_MAX_INDEX 3 -#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 -#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) #define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) #define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) #define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 -#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) #define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 -#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) #define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127H_MAX_INDEX 3 -#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 -#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) #define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127L_MAX_INDEX 3 -#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 -#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) #define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 -#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) #define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 -#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) #define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255H_MAX_INDEX 3 -#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 -#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) #define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255L_MAX_INDEX 3 -#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 -#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) #define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511H_MAX_INDEX 3 -#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 -#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) #define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511L_MAX_INDEX 3 -#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 -#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) #define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64H_MAX_INDEX 3 -#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 -#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) #define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64L_MAX_INDEX 3 -#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 -#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) #define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 -#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) #define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 -#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) #define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) #define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) #define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) #define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) #define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RDPC_MAX_INDEX 3 -#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 -#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) #define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RFC_MAX_INDEX 3 -#define I40E_GLPRT_RFC_RFC_SHIFT 0 -#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) #define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RJC_MAX_INDEX 3 -#define I40E_GLPRT_RJC_RJC_SHIFT 0 -#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) #define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RLEC_MAX_INDEX 3 -#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 -#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) #define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ROC_MAX_INDEX 3 -#define I40E_GLPRT_ROC_ROC_SHIFT 0 -#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) #define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUC_MAX_INDEX 3 -#define I40E_GLPRT_RUC_RUC_SHIFT 0 -#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) -#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUPP_MAX_INDEX 3 -#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 -#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) #define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) #define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDOLD_MAX_INDEX 3 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCH_MAX_INDEX 3 -#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) #define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCL_MAX_INDEX 3 -#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) #define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCH_MAX_INDEX 3 -#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) #define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCL_MAX_INDEX 3 -#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) #define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCH_MAX_INDEX 15 -#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) #define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCL_MAX_INDEX 15 -#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) #define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCH_MAX_INDEX 15 -#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) #define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCL_MAX_INDEX 15 -#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) #define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCH_MAX_INDEX 15 -#define I40E_GLSW_GORCH_GORCH_SHIFT 0 -#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) #define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCL_MAX_INDEX 15 -#define I40E_GLSW_GORCL_GORCL_SHIFT 0 -#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) #define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCH_MAX_INDEX 15 -#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) #define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCL_MAX_INDEX 15 -#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) #define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCH_MAX_INDEX 15 -#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) #define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCL_MAX_INDEX 15 -#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) #define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCH_MAX_INDEX 15 -#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) #define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCL_MAX_INDEX 15 -#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) #define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_RUPP_MAX_INDEX 15 -#define I40E_GLSW_RUPP_RUPP_SHIFT 0 -#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) #define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_TDPC_MAX_INDEX 15 -#define I40E_GLSW_TDPC_TDPC_SHIFT 0 -#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) #define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCH_MAX_INDEX 15 -#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) #define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCL_MAX_INDEX 15 -#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) #define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCH_MAX_INDEX 15 -#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) #define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCL_MAX_INDEX 15 -#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) #define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCH_MAX_INDEX 383 -#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) #define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCL_MAX_INDEX 383 -#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) #define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCH_MAX_INDEX 383 -#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) #define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCL_MAX_INDEX 383 -#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) #define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCH_MAX_INDEX 383 -#define I40E_GLV_GORCH_GORCH_SHIFT 0 -#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) #define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCL_MAX_INDEX 383 -#define I40E_GLV_GORCL_GORCL_SHIFT 0 -#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) #define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCH_MAX_INDEX 383 -#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) #define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCL_MAX_INDEX 383 -#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) #define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCH_MAX_INDEX 383 -#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) #define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCL_MAX_INDEX 383 -#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) #define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCH_MAX_INDEX 383 -#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) #define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCL_MAX_INDEX 383 -#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) #define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RDPC_MAX_INDEX 383 -#define I40E_GLV_RDPC_RDPC_SHIFT 0 -#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) #define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RUPP_MAX_INDEX 383 -#define I40E_GLV_RUPP_RUPP_SHIFT 0 -#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) #define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_TEPC_MAX_INDEX 383 -#define I40E_GLV_TEPC_TEPC_SHIFT 0 -#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) #define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCH_MAX_INDEX 383 -#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) #define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCL_MAX_INDEX 383 -#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) #define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCH_MAX_INDEX 383 -#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 -#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) #define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCL_MAX_INDEX 383 -#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) #define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) #define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) #define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) #define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) #define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) #define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) #define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) #define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) -#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 -#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) -#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 -#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) -#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 -#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) -#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 -#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) -#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 -#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) -#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 -#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) -#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) -#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) -#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) -#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ -#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 -#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) -#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 -#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) -#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 -#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 -#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 -#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) -#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 -#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 -#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) -#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) #define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 #define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) -#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 -#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 #define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) #define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 -#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 #define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 #define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) -#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) -#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) #define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 -#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) #define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 -#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) #define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) #define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) #define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ -#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 -#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 -#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 #define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) #define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ -#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 -#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 -#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) -#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) -#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) #define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) #define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) #define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3033,2304 +628,53 @@ #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 #define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 #define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) #define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_TX_MAX_INDEX 127 #define I40E_VP_MDET_TX_VALID_SHIFT 0 #define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) -#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ -#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 -#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) -#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 -#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) -#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 -#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) -#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 -#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) -#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 -#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) #define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ #define I40E_PFPM_APM_APME_SHIFT 0 #define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) -#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) -#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ -#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 -#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) #define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ -#define I40E_PFPM_WUFC_LNKC_SHIFT 0 -#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) #define I40E_PFPM_WUFC_MAG_SHIFT 1 #define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) -#define I40E_PFPM_WUFC_MNG_SHIFT 3 -#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) -#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 -#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 -#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 -#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 -#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 -#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 -#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 -#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 -#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX0_SHIFT 16 -#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) -#define I40E_PFPM_WUFC_FLX1_SHIFT 17 -#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) -#define I40E_PFPM_WUFC_FLX2_SHIFT 18 -#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) -#define I40E_PFPM_WUFC_FLX3_SHIFT 19 -#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) -#define I40E_PFPM_WUFC_FLX4_SHIFT 20 -#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) -#define I40E_PFPM_WUFC_FLX5_SHIFT 21 -#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) -#define I40E_PFPM_WUFC_FLX6_SHIFT 22 -#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) -#define I40E_PFPM_WUFC_FLX7_SHIFT 23 -#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) -#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) -#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ -#define I40E_PFPM_WUS_LNKC_SHIFT 0 -#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) -#define I40E_PFPM_WUS_MAG_SHIFT 1 -#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) -#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 -#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) -#define I40E_PFPM_WUS_MNG_SHIFT 3 -#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) -#define I40E_PFPM_WUS_FLX0_SHIFT 16 -#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) -#define I40E_PFPM_WUS_FLX1_SHIFT 17 -#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) -#define I40E_PFPM_WUS_FLX2_SHIFT 18 -#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) -#define I40E_PFPM_WUS_FLX3_SHIFT 19 -#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) -#define I40E_PFPM_WUS_FLX4_SHIFT 20 -#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) -#define I40E_PFPM_WUS_FLX5_SHIFT 21 -#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) -#define I40E_PFPM_WUS_FLX6_SHIFT 22 -#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) -#define I40E_PFPM_WUS_FLX7_SHIFT 23 -#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) -#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) -#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ -#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 -#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) -#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 -#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) -#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAH_MAX_INDEX 3 -#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 -#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) -#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 -#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) -#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 -#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) -#define I40E_PRTPM_SAH_AV_SHIFT 31 -#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) -#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAL_MAX_INDEX 3 -#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 -#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) #define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) #define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQH1_ARQH_SHIFT 0 -#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) #define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ARQT1_ARQT_SHIFT 0 -#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) #define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) #define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) #define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQH1_ATQH_SHIFT 0 -#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) #define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ -#define I40E_VF_ATQT1_ATQT_SHIFT 0 -#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) -#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ -#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) -#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) -#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 -#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) -#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ -#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) -#define I40E_VFINT_ICR01_SWINT_SHIFT 31 -#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) -#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ -#define I40E_VFINT_ITR01_MAX_INDEX 2 -#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN1_MAX_INDEX 2 -#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) -#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_QRX_TAIL1_MAX_INDEX 15 -#define I40E_QRX_TAIL1_TAIL_SHIFT 0 -#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) -#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ -#define I40E_QTX_TAIL1_MAX_INDEX 15 -#define I40E_QTX_TAIL1_TAIL_SHIFT 0 -#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) -#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ -#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD_MAX_INDEX 16 -#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG_MAX_INDEX 16 -#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD_MAX_INDEX 16 -#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 -#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_VFQF_HENA_MAX_INDEX 1 -#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) -#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_VFQF_HKEY_MAX_INDEX 12 -#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) -#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) -#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) -#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) -#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_VFQF_HLUT_MAX_INDEX 15 -#define I40E_VFQF_HLUT_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) -#define I40E_VFQF_HLUT_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) -#define I40E_VFQF_HLUT_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) -#define I40E_VFQF_HLUT_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) -#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION_MAX_INDEX 7 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */ -#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0 -#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT) -#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */ -#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2 -#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT) -#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3 -#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT) -#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4 -#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT) -#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8 -#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT) -#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */ -#define I40E_MNGSB_FDS_START_BC_SHIFT 0 -#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT) -#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16 -#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT) -#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127 -#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0 -#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT) -#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127 -#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0 -#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT) -#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */ -#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12 -#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT) -#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16 -#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT) -#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */ -#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12 -#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT) -#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16 -#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT) -#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */ -#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */ -#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT) -#define I40E_GL_FWSTS_FWROWD_SHIFT 8 -#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT) -#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */ -#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT) -#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 -#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 -#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) -#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_CEQPART_MAX_INDEX 15 -#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 -#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) -#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 -#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) -#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */ -#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0 -#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT) -#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_DBCQPART_MAX_INDEX 15 -#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 -#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) -#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 -#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) -#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */ -#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0 -#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT) -#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_DBQPPART_MAX_INDEX 15 -#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 -#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) -#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 -#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) -#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 -#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) -#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 -#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) -#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */ -#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 -#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) -#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */ -#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) -#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PECQBASE_MAX_INDEX 15 -#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 -#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) -#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PECQCNT_MAX_INDEX 15 -#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 -#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) -#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */ -#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 -#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) -#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 -#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) -#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 -#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) -#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */ -#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) -#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */ -#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 -#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) -#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 -#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) -#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 -#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) -#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */ -#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 -#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) -#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */ -#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 -#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) -#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 -#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) -#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 -#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) -#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */ -#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 -#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) -#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */ -#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0 -#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT) -#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 -#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 -#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) -#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 -#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 -#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) -#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 -#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) -#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */ -#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 -#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) -#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */ -#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 -#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) -#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */ -#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 -#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) -#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 -#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) -#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 -#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) -#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */ -#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) -#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 -#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 -#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) -#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 -#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 -#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) -#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */ -#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 -#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) -#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */ -#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 -#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) -#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 -#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 -#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) -#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 -#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 -#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) -#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */ -#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 -#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) -#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */ -#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 -#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) -#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 -#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) -#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 -#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) -#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 -#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) -#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */ -#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 -#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) -#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */ -#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 -#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) -#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */ -#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) -#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15 -#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT) -#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 -#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) -#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 -#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 -#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) -#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 -#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) -#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 -#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 -#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) -#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 -#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) -#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 -#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 -#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) -#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 -#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) -#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 -#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) -#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 -#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) -#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPDINV_MAX_INDEX 31 -#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 -#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) -#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15 -#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT) -#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 -#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) -#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 -#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) -#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 -#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) -#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 -#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) -#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 -#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) -#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 -#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) -#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 -#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) -#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 -#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) -#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 -#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) -#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 -#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) -#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 -#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) -#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 -#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) -#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 -#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) -#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 -#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) -#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 -#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) -#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 -#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) -#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 -#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) -#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 -#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) -#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 -#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) -#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 -#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) -#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 -#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) -#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 -#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) -#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 -#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) -#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFSDPART_MAX_INDEX 31 -#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) -#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */ -#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT) -#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */ -#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT) -#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */ -#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT) -#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 -#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) -#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15 -#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT) -#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */ -#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0 -#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT) -#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */ -#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0 -#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT) -#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */ -#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0 -#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT) -#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */ -#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0 -#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2 -#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4 -#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9 -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10 -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11 -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12 -#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13 -#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14 -#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT) -#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */ -#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0 -#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT) -#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */ -#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0 -#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT) -#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2 -#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT) -#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3 -#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT) #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT) #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT) -#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_QBASE_MAX_INDEX 127 -#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0 -#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT) -#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11 -#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT) -#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31 -#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT) -#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */ -#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0 -#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT) -#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */ -#define I40E_GLNVM_AL_REQ_POR_SHIFT 0 -#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT) -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1 -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT) -#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2 -#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT) -#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3 -#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT) -#define I40E_GLNVM_AL_REQ_PE_SHIFT 4 -#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT) -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5 -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT) -#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */ -#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0 -#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT) -#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12 -#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT) #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ #define I40E_GLNVM_FLA_LOCKED_SHIFT 6 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ -#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0 -#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT) -#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1 -#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT) -#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3 -#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT) -#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4 -#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT) -#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5 -#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT) -#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8 -#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT) -#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9 -#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT) -#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10 -#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT) -#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */ -#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0 -#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1 -#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2 -#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT) -#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3 -#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4 -#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5 -#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6 -#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7 -#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT) -#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8 -#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9 -#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10 -#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT) -#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */ -#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0 -#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1 -#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2 -#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3 -#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4 -#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5 -#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6 -#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7 -#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8 -#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9 -#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10 -#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11 -#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12 -#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13 -#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14 -#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15 -#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16 -#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT) -#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */ -#define I40E_MNGSB_DADD_ADDR_SHIFT 0 -#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT) -#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */ -#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0 -#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT) -#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */ -#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0 -#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT) -#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8 -#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT) -#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26 -#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT) -#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28 -#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT) -#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30 -#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT) -#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31 -#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT) -#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */ -#define I40E_MNGSB_RDATA_DATA_SHIFT 0 -#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT) -#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */ -#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0 -#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT) -#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8 -#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT) -#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16 -#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT) -#define I40E_MNGSB_RHDR0_TAG_SHIFT 24 -#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT) -#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27 -#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT) -#define I40E_MNGSB_RHDR0_EH_SHIFT 31 -#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT) -#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */ -#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0 -#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT) -#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26 -#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT) -#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30 -#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT) -#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31 -#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT) -#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */ -#define I40E_MNGSB_WDATA_DATA_SHIFT 0 -#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT) -#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */ -#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0 -#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT) -#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12 -#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT) -#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16 -#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT) -#define I40E_MNGSB_WHDR0_TAG_SHIFT 24 -#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT) -#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */ -#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0 -#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT) -#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */ -#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0 -#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT) -#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21 -#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT) -#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT) -#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT) -#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 -#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) -#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 -#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) -#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */ -#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0 -#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT) -#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1 -#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT) -#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2 -#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT) -#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6 -#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT) -#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16 -#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT) -#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT) -#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 -#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) -#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 -#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) -#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 -#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) -#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 -#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) -#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 -#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) -#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 -#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) -#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ -#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 -#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) -#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 -#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) -#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 -#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) -#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 -#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 -#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) -#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) -#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) -#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 -#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 -#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 -#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 -#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 -#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 -#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 -#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) -#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) -#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) -#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 -#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) -#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 -#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) -#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 -#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) -#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 -#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 -#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) -#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 -#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 -#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) -#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 -#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) -#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 -#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 -#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 -#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 -#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 -#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 -#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 -#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 -#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) -#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 -#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 -#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) -#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 -#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) -#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ -#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 -#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) -#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ -#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 -#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) -#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ -#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 -#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) -#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ -#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 -#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) -#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 -#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) -#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 -#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) -#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 -#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) -#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ -#define I40E_PFPE_CQACK_PECQID_SHIFT 0 -#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT) -#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ -#define I40E_PFPE_CQARM_PECQID_SHIFT 0 -#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT) -#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ -#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 -#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT) -#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ -#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 -#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) -#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) -#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ -#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 -#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) -#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 -#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) -#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ -#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ -#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ -#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 -#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) -#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 -#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ -#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) -#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ -#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ -#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 -#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) -#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */ -#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 -#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 -#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 -#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 -#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 -#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) -#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */ -#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 -#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) -#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 -#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) -#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ -#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 -#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT) -#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 -#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) -#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */ -#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0 -#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT) -#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */ -#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7 -#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT) -#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */ -#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0 -#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT) -#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13 -#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT) -#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */ -#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0 -#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8 -#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16 -#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24 -#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT) -#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT) -#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13 -#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT) -#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30 -#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT) -#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0 -#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT) -#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0 -#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT) -#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0 -#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT) -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16 -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT) -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31 -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8 -#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16 -#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24 -#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT) -#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0 -#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT) -#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8 -#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT) -#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16 -#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT) -#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 -#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 -#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 -#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 -#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQACK_MAX_INDEX 127 -#define I40E_VFPE_CQACK_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT) -#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQARM_MAX_INDEX 127 -#define I40E_VFPE_CQARM_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT) -#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQPDB_MAX_INDEX 127 -#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 -#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 -#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 -#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 -#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 -#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 -#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 -#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) -#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 -#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) -#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 -#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) -#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 -#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) -#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 -#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) -#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 -#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) -#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 -#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) -#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) -#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 -#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) -#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 -#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) -#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 -#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) -#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 -#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) -#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 -#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 -#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) -#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 -#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) -#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 -#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) -#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 -#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) -#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 -#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) -#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 -#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) -#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 -#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 -#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) -#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 -#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 -#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) -#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) -#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) -#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) -#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) -#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 -#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) -#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 -#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) -#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 -#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) -#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 -#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) -#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 -#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) -#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 -#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) -#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) -#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) -#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 -#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) -#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 -#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) -#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) -#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) -#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 -#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) -#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 -#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) -#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) -#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) -#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) -#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 -#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) -#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 -#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) -#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 -#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) -#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 -#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) -#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 -#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) -#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 -#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) -#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) -#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 -#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) -#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 -#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) -#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 -#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) -#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 -#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) -#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 -#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 -#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) -#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 -#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) -#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 -#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) -#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 -#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) -#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 -#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) -#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 -#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) -#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 -#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 -#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) -#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 -#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 -#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) -#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) -#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) -#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) -#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) -#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */ -#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0 -#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT) -#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */ -#define I40E_GLQF_APBVT_MAX_INDEX 2047 -#define I40E_GLQF_APBVT_APBVT_SHIFT 0 -#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT) -#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */ -#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 -#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 -#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) -#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_FD_MSK_MAX_INDEX 1 -#define I40E_GLQF_FD_MSK_MASK_SHIFT 0 -#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT) -#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16 -#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT) #define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HASH_INSET_MAX_INDEX 1 -#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0 -#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT) -#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HASH_MSK_MAX_INDEX 1 -#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0 -#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT) -#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16 -#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT) #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_ORT_MAX_INDEX 63 #define I40E_GLQF_ORT_PIT_INDX_SHIFT 0 #define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT) #define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5 #define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT) #define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 #define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) -#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */ -#define I40E_GLQF_PIT_MAX_INDEX 23 -#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0 -#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT) -#define I40E_GLQF_PIT_FSIZE_SHIFT 5 -#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT) -#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10 -#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT) #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 -#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 -#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT) -#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */ -#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0 -#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT) -#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8 -#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT) -#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */ -#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0 -#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT) -#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5 -#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT) /* Redefined for X722 family */ -#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_X722_PFQF_HLUT_MAX_INDEX 127 -#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0 -#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT) -#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8 -#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT) -#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16 -#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT) -#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24 -#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT) -#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PFQF_HREGION_MAX_INDEX 7 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_PFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_PFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_PFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_PFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_PFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_PFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_PFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_PFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT) -#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8 -#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT) -#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */ -#define I40E_VSIQF_HKEY_MAX_INDEX 12 -#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT) -#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT) -#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT) -#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT) -#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */ -#define I40E_VSIQF_HLUT_MAX_INDEX 15 -#define I40E_VSIQF_HLUT_LUT0_SHIFT 0 -#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT) -#define I40E_VSIQF_HLUT_LUT1_SHIFT 8 -#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT) -#define I40E_VSIQF_HLUT_LUT2_SHIFT 16 -#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT) -#define I40E_VSIQF_HLUT_LUT3_SHIFT 24 -#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT) #define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ -#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0 -#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT) -#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ -#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 -#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) -#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ -#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) -#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ -#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) -#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ -#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) #endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 5c255977fd58..8d3c9d37e42e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -18,10 +18,7 @@ #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ #define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ -#define I40E_ITR_100K 10 /* all values below must be even */ -#define I40E_ITR_50K 20 #define I40E_ITR_20K 50 -#define I40E_ITR_18K 60 #define I40E_ITR_8K 122 #define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) @@ -52,9 +49,6 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl) else return 0; } -#define I40E_INTRL_8K 125 /* 8000 ints/sec */ -#define I40E_INTRL_62K 16 /* 62500 ints/sec */ -#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_QUEUE_END_OF_LIST 0x7FF @@ -73,7 +67,6 @@ enum i40e_dyn_idx_t { /* these are indexes into ITRN registers */ #define I40E_RX_ITR I40E_IDX_ITR0 #define I40E_TX_ITR I40E_IDX_ITR1 -#define I40E_PE_ITR I40E_IDX_ITR2 /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ @@ -193,13 +186,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ -#define I40E_RX_INCREMENT(r, i) \ - do { \ - (i)++; \ - if ((i) == (r)->count) \ - i = 0; \ - r->next_to_clean = i; \ - } while (0) #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ @@ -209,11 +195,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, (n) = I40E_RX_DESC((r), (i)); \ } while (0) -#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ - do { \ - I40E_RX_NEXT_DESC((r), (i), (n)); \ - prefetch((n)); \ - } while (0) #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 @@ -262,15 +243,12 @@ static inline unsigned int i40e_txd_use_count(unsigned int size) /* Tx Descriptors needed, worst case */ #define DESC_NEEDED (MAX_SKB_FRAGS + 6) -#define I40E_MIN_DESC_PENDING 4 #define I40E_TX_FLAGS_HW_VLAN BIT(1) #define I40E_TX_FLAGS_SW_VLAN BIT(2) #define I40E_TX_FLAGS_TSO BIT(3) #define I40E_TX_FLAGS_IPV4 BIT(4) #define I40E_TX_FLAGS_IPV6 BIT(5) -#define I40E_TX_FLAGS_FCCRC BIT(6) -#define I40E_TX_FLAGS_FSO BIT(7) #define I40E_TX_FLAGS_TSYN BIT(8) #define I40E_TX_FLAGS_FD_SB BIT(9) #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) @@ -332,9 +310,7 @@ enum i40e_ring_state_t { /* some useful defines for virtchannel interface, which * is the only remaining user of header split */ -#define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_TCP_UDP 0x4 @@ -444,7 +420,6 @@ static inline void set_ring_xdp(struct i40e_ring *ring) #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e #define I40E_ITR_ADAPTIVE_LATENCY 0x8000 #define I40E_ITR_ADAPTIVE_BULK 0x0000 -#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) struct i40e_ring_container { struct i40e_ring *ring; /* pointer to linked list of ring(s) */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 63e098f7cb63..52410d609ba1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -84,8 +84,6 @@ enum i40e_debug_mask { I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) @@ -178,21 +176,9 @@ struct i40e_link_status { u8 module_type[3]; /* 1st byte: module identifier */ #define I40E_MODULE_TYPE_SFP 0x03 -#define I40E_MODULE_TYPE_QSFP 0x0D - /* 2nd byte: ethernet compliance codes for 10/40G */ -#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 -#define I40E_MODULE_TYPE_40G_LR4 0x02 -#define I40E_MODULE_TYPE_40G_SR4 0x04 -#define I40E_MODULE_TYPE_40G_CR4 0x08 -#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 -#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 -#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 -#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 /* 3rd byte: ethernet compliance codes for 1G */ #define I40E_MODULE_TYPE_1000BASE_SX 0x01 #define I40E_MODULE_TYPE_1000BASE_LX 0x02 -#define I40E_MODULE_TYPE_1000BASE_CX 0x04 -#define I40E_MODULE_TYPE_1000BASE_T 0x08 }; struct i40e_phy_info { @@ -262,9 +248,6 @@ struct i40e_phy_info { /* Capabilities of a PF or a VF or the whole device */ struct i40e_hw_capabilities { u32 switch_mode; -#define I40E_NVM_IMAGE_TYPE_EVB 0x0 -#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 -#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 /* Cloud filter modes: * Mode1: Filter on L4 port only @@ -273,14 +256,10 @@ struct i40e_hw_capabilities { */ #define I40E_CLOUD_FILTER_MODE1 0x6 #define I40E_CLOUD_FILTER_MODE2 0x7 -#define I40E_CLOUD_FILTER_MODE3 0x8 #define I40E_SWITCH_MODE_MASK 0xF u32 management_mode; u32 mng_protocols_over_mctp; -#define I40E_MNG_PROTOCOL_PLDM 0x2 -#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 -#define I40E_MNG_PROTOCOL_NCSI 0x8 u32 npar_enable; u32 os2bmc; u32 valid_functions; @@ -294,13 +273,8 @@ struct i40e_hw_capabilities { bool flex10_enable; bool flex10_capable; u32 flex10_mode; -#define I40E_FLEX10_MODE_UNKNOWN 0x0 -#define I40E_FLEX10_MODE_DCC 0x1 -#define I40E_FLEX10_MODE_DCI 0x2 u32 flex10_status; -#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 -#define I40E_FLEX10_STATUS_VC_MODE 0x2 bool sec_rev_disabled; bool update_disabled; @@ -421,11 +395,8 @@ enum i40e_nvmupd_state { #define I40E_NVM_AQE 0xe #define I40E_NVM_EXEC 0xf -#define I40E_NVM_ADAPT_SHIFT 16 -#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) #define I40E_NVMUPD_MAX_DATA 4096 -#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ struct i40e_nvm_access { u32 command; @@ -438,7 +409,6 @@ struct i40e_nvm_access { /* (Q)SFP module access definitions */ #define I40E_I2C_EEPROM_DEV_ADDR 0xA0 #define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 -#define I40E_MODULE_TYPE_ADDR 0x00 #define I40E_MODULE_REVISION_ADDR 0x01 #define I40E_MODULE_SFF_8472_COMP 0x5E #define I40E_MODULE_SFF_8472_SWAP 0x5C @@ -547,7 +517,6 @@ struct i40e_dcbx_config { #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 u8 app_mode; -#define I40E_DCBX_APPS_NON_WILLING 0x1 u32 numapps; u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; @@ -895,9 +864,6 @@ enum i40e_rx_ptype_payload_layer { #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) -#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 -#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ - I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 #define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) @@ -926,7 +892,6 @@ enum i40e_rx_desc_pe_status_bits { I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 }; -#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 #define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 @@ -963,8 +928,6 @@ struct i40e_tx_desc { __le64 cmd_type_offset_bsz; }; -#define I40E_TXD_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) enum i40e_tx_desc_dtype_value { I40E_TX_DESC_DTYPE_DATA = 0x0, @@ -980,7 +943,6 @@ enum i40e_tx_desc_dtype_value { }; #define I40E_TXD_QW1_CMD_SHIFT 4 -#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) enum i40e_tx_desc_cmd_bits { I40E_TX_DESC_CMD_EOP = 0x0001, @@ -1004,8 +966,6 @@ enum i40e_tx_desc_cmd_bits { }; #define I40E_TXD_QW1_OFFSET_SHIFT 16 -#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ - I40E_TXD_QW1_OFFSET_SHIFT) enum i40e_tx_desc_length_fields { /* Note: These are predefined bit offsets */ @@ -1015,11 +975,8 @@ enum i40e_tx_desc_length_fields { }; #define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 -#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ - I40E_TXD_QW1_TX_BUF_SZ_SHIFT) #define I40E_TXD_QW1_L2TAG1_SHIFT 48 -#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) /* Context descriptors */ struct i40e_tx_context_desc { @@ -1029,11 +986,8 @@ struct i40e_tx_context_desc { __le64 type_cmd_tso_mss; }; -#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) #define I40E_TXD_CTX_QW1_CMD_SHIFT 4 -#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) enum i40e_tx_ctx_desc_cmd_bits { I40E_TX_CTX_DESC_TSO = 0x01, @@ -1048,19 +1002,10 @@ enum i40e_tx_ctx_desc_cmd_bits { }; #define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 -#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) #define I40E_TXD_CTX_QW1_MSS_SHIFT 50 -#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ - I40E_TXD_CTX_QW1_MSS_SHIFT) -#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 -#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) -#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 -#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ - I40E_TXD_CTX_QW0_EXT_IP_SHIFT) enum i40e_tx_ctx_desc_eipt_offload { I40E_TX_CTX_EXT_IP_NONE = 0x0, @@ -1070,28 +1015,16 @@ enum i40e_tx_ctx_desc_eipt_offload { }; #define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 -#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 -#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ - BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) -#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK #define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 -#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ - I40E_TXD_CTX_QW0_NATLEN_SHIFT) -#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 -#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ - I40E_TXD_CTX_QW0_DECTTL_SHIFT) #define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 #define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) @@ -1161,11 +1094,8 @@ enum i40e_filter_program_desc_fd_status { I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 -#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) enum i40e_filter_program_desc_pcmd { I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, @@ -1316,7 +1246,6 @@ struct i40e_hw_port_stats { #define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 -#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D #define I40E_SR_NVM_EETRACK_HI 0x2E #define I40E_SR_VPD_PTR 0x2F @@ -1329,7 +1258,6 @@ struct i40e_hw_port_stats { #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) -#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) #define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) #define I40E_PTR_TYPE BIT(15) #define I40E_SR_OCP_CFG_WORD0 0x2B @@ -1463,14 +1391,11 @@ struct i40e_lldp_variables { /* Offsets into Alternate Ram */ #define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ #define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ -#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ -#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ #define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ #define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ /* Alternate Ram Bandwidth Masks */ #define I40E_ALT_BW_VALUE_MASK 0xFF -#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 #define I40E_ALT_BW_VALID_MASK 0x80000000 /* RSS Hash Table Size */ @@ -1529,9 +1454,7 @@ struct i40e_package_header { /* Generic segment header */ struct i40e_generic_seg_header { #define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_NOTES 0x00000002 #define SEGMENT_TYPE_I40E 0x00000011 -#define SEGMENT_TYPE_X722 0x00000012 u32 type; struct i40e_ddp_version version; u32 size; @@ -1541,7 +1464,6 @@ struct i40e_generic_seg_header { struct i40e_metadata_segment { struct i40e_generic_seg_header header; struct i40e_ddp_version version; -#define I40E_DDP_TRACKID_RDONLY 0 #define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF u32 track_id; char name[I40E_DDP_NAME_SIZE]; @@ -1575,10 +1497,6 @@ struct i40e_profile_section_header { #define SECTION_TYPE_AQ 0x00000801 #define SECTION_TYPE_RB_AQ 0x00001801 #define SECTION_TYPE_NOTE 0x80000000 -#define SECTION_TYPE_NAME 0x80000001 -#define SECTION_TYPE_PROTO 0x80000002 -#define SECTION_TYPE_PCTYPE 0x80000003 -#define SECTION_TYPE_PTYPE 0x80000004 u32 type; u32 offset; u32 size; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 631248c0981a..5491215d81de 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -10,7 +10,6 @@ #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 -#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3 #define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 #define I40E_VLAN_PRIORITY_SHIFT 13 diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h index 72994baf4941..f41387a8969f 100644 --- a/include/linux/net/intel/i40e_client.h +++ b/include/linux/net/intel/i40e_client.h @@ -37,11 +37,6 @@ enum i40e_client_instance_state { struct i40e_ops; struct i40e_client; -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define I40E_QUEUE_TYPE_PE_AEQ 0x80 #define I40E_QUEUE_INVALID_IDX 0xFFFF struct i40e_qv_info { @@ -56,7 +51,6 @@ struct i40e_qvlist_info { struct i40e_qv_info qv_info[1]; }; -#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF /* set of LAN parameters useful for clients managed by LAN */ @@ -87,7 +81,6 @@ struct i40e_info { u8 __iomem *hw_addr; u8 fid; /* function id, PF id or VF id */ #define I40E_CLIENT_FTYPE_PF 0 -#define I40E_CLIENT_FTYPE_VF 1 u8 ftype; /* function type, PF or VF */ void *pf; @@ -184,8 +177,6 @@ struct i40e_client { unsigned long state; /* client state */ atomic_t ref_cnt; /* Count of all the client devices of this kind */ u32 flags; -#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) -#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) u8 type; #define I40E_CLIENT_IWARP 0 const struct i40e_client_ops *ops; /* client ops provided by the client */ -- cgit v1.2.3 From 33d226f504ed72cba3a2b42bbe2a993b3d6d9548 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 02:25:06 +0200 Subject: mtd: nand: Move nand_device forward declaration to the top This structure might be used earlier in this file, let's move the forward declaration at the top. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529002517.3546-10-miquel.raynal@bootlin.com --- include/linux/mtd/nand.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 0c7483843a32..a1f38c778d0e 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -12,6 +12,8 @@ #include +struct nand_device; + /** * struct nand_memory_organization - Memory organization structure * @bits_per_cell: number of bits per NAND cell @@ -133,8 +135,6 @@ struct nand_bbt { unsigned long *cache; }; -struct nand_device; - /** * struct nand_ops - NAND operations * @erase: erase a specific block. No need to check if the block is bad before -- cgit v1.2.3 From 85f54c5588885cc3b5be4a07498dd0755de9f5cf Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 02:25:10 +0200 Subject: mtd: nand: Rename a core structure Prepare the migration to a generic ECC engine by renaming the nand_ecc_req structure into nand_ecc_props. This structure will be the base of a wider 'nand_ecc' structure. In nand_device, these properties are still named "eccreq" even if "eccprops" might be more descriptive. This is just a transition step, this field is being replaced very soon by a much wider structure. The impact of renaming this field would be huge compared to its interest. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529002517.3546-14-miquel.raynal@bootlin.com --- include/linux/mtd/nand.h | 8 ++++---- include/linux/mtd/spinand.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index a1f38c778d0e..af99041ceaa9 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -116,11 +116,11 @@ struct nand_page_io_req { }; /** - * struct nand_ecc_req - NAND ECC requirements + * struct nand_ecc_props - NAND ECC properties * @strength: ECC strength - * @step_size: ECC step/block size + * @step_size: Number of bytes per step */ -struct nand_ecc_req { +struct nand_ecc_props { unsigned int strength; unsigned int step_size; }; @@ -179,7 +179,7 @@ struct nand_ops { struct nand_device { struct mtd_info mtd; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct nand_row_converter rowconv; struct nand_bbt bbt; const struct nand_ops *ops; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 1077c45721ff..7b78c4ba9b3e 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -309,7 +309,7 @@ struct spinand_info { struct spinand_devid devid; u32 flags; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct spinand_ecc_info eccinfo; struct { const struct spinand_op_variants *read_cache; -- cgit v1.2.3 From c4cabc08d09e4b107b685e08bdec8a38a91089d8 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:55 +0200 Subject: mtd: rawnand: Use unsigned types for nand_chip unsigned values page_shift, phys_erase_shift, bbt_erase_shift, chip_shift, pagemask, subpagesize and badblockbits are all positive values, so declare them as unsigned. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-2-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 65b1c1c18b41..830f2d08937f 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1110,11 +1110,11 @@ struct nand_chip { unsigned int options; unsigned int bbt_options; - int page_shift; - int phys_erase_shift; - int bbt_erase_shift; - int chip_shift; - int pagemask; + unsigned int page_shift; + unsigned int phys_erase_shift; + unsigned int bbt_erase_shift; + unsigned int chip_shift; + unsigned int pagemask; u8 *data_buf; struct { @@ -1122,10 +1122,10 @@ struct nand_chip { int page; } pagecache; - int subpagesize; + unsigned int subpagesize; int onfi_timing_mode_default; unsigned int badblockpos; - int badblockbits; + unsigned int badblockbits; struct nand_id id; struct nand_parameters parameters; -- cgit v1.2.3 From d1f3837a507d73746f9e2118fad20ee5e57e86cc Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:56 +0200 Subject: mtd: rawnand: Only use u8 instead of uint8_t in nand_chip structure Mechanical change to avoid using old types. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-3-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 830f2d08937f..cea137778224 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1141,13 +1141,13 @@ struct nand_chip { int (*suspend)(struct nand_chip *chip); void (*resume)(struct nand_chip *chip); - uint8_t *oob_poi; + u8 *oob_poi; struct nand_controller *controller; struct nand_ecc_ctrl ecc; unsigned long buf_align; - uint8_t *bbt; + u8 *bbt; struct nand_bbt_descr *bbt_td; struct nand_bbt_descr *bbt_md; -- cgit v1.2.3 From 8e8b2706e15d16443b1ea61a6f994c08ec5b9486 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:57 +0200 Subject: mtd: rawnand: Create a nand_chip operations structure And move nand_chip hooks there. While moving entries from one structure to the other, adapt the documentation style. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 20 ++++++++++---------- drivers/mtd/nand/raw/nand_hynix.c | 2 +- drivers/mtd/nand/raw/nand_macronix.c | 10 +++++----- drivers/mtd/nand/raw/nand_micron.c | 2 +- include/linux/mtd/rawnand.h | 32 ++++++++++++++++++-------------- 5 files changed, 35 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 45124dbb1835..d9cb71e7c0ed 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -3215,10 +3215,10 @@ static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) if (retry_mode >= chip->read_retries) return -EINVAL; - if (!chip->setup_read_retry) + if (!chip->ops.setup_read_retry) return -EOPNOTSUPP; - return chip->setup_read_retry(chip, retry_mode); + return chip->ops.setup_read_retry(chip, retry_mode); } static void nand_wait_readrdy(struct nand_chip *chip) @@ -4462,8 +4462,8 @@ static int nand_suspend(struct mtd_info *mtd) int ret = 0; mutex_lock(&chip->lock); - if (chip->suspend) - ret = chip->suspend(chip); + if (chip->ops.suspend) + ret = chip->ops.suspend(chip); if (!ret) chip->suspended = 1; mutex_unlock(&chip->lock); @@ -4481,8 +4481,8 @@ static void nand_resume(struct mtd_info *mtd) mutex_lock(&chip->lock); if (chip->suspended) { - if (chip->resume) - chip->resume(chip); + if (chip->ops.resume) + chip->ops.resume(chip); chip->suspended = 0; } else { pr_err("%s called for a chip which is not in suspended state\n", @@ -4511,10 +4511,10 @@ static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); - if (!chip->lock_area) + if (!chip->ops.lock_area) return -ENOTSUPP; - return chip->lock_area(chip, ofs, len); + return chip->ops.lock_area(chip, ofs, len); } /** @@ -4527,10 +4527,10 @@ static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); - if (!chip->unlock_area) + if (!chip->ops.unlock_area) return -ENOTSUPP; - return chip->unlock_area(chip, ofs, len); + return chip->ops.unlock_area(chip, ofs, len); } /* Set default functions */ diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c index 7caedaa5b9e5..7d1be53f27f3 100644 --- a/drivers/mtd/nand/raw/nand_hynix.c +++ b/drivers/mtd/nand/raw/nand_hynix.c @@ -337,7 +337,7 @@ static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip, rr->nregs = nregs; rr->regs = hynix_1xnm_mlc_read_retry_regs; hynix->read_retry = rr; - chip->setup_read_retry = hynix_nand_setup_read_retry; + chip->ops.setup_read_retry = hynix_nand_setup_read_retry; chip->read_retries = nmodes; out: diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c index 09c254c97b5c..1472f925f386 100644 --- a/drivers/mtd/nand/raw/nand_macronix.c +++ b/drivers/mtd/nand/raw/nand_macronix.c @@ -130,7 +130,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip) return; chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES; - chip->setup_read_retry = macronix_nand_setup_read_retry; + chip->ops.setup_read_retry = macronix_nand_setup_read_retry; if (p->supports_set_get_features) { bitmap_set(p->set_feature_list, @@ -242,8 +242,8 @@ static void macronix_nand_block_protection_support(struct nand_chip *chip) bitmap_set(chip->parameters.set_feature_list, ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1); - chip->lock_area = mxic_nand_lock; - chip->unlock_area = mxic_nand_unlock; + chip->ops.lock_area = mxic_nand_lock; + chip->ops.unlock_area = mxic_nand_unlock; } static int nand_power_down_op(struct nand_chip *chip) @@ -312,8 +312,8 @@ static void macronix_nand_deep_power_down_support(struct nand_chip *chip) if (i < 0) return; - chip->suspend = mxic_nand_suspend; - chip->resume = mxic_nand_resume; + chip->ops.suspend = mxic_nand_suspend; + chip->ops.resume = mxic_nand_resume; } static int macronix_nand_init(struct nand_chip *chip) diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 3589b4fce0d4..4385092a9325 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -84,7 +84,7 @@ static int micron_nand_onfi_init(struct nand_chip *chip) struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor; chip->read_retries = micron->read_retry_options; - chip->setup_read_retry = micron_nand_setup_read_retry; + chip->ops.setup_read_retry = micron_nand_setup_read_retry; } if (p->supports_set_get_features) { diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index cea137778224..7f9be95ca8dc 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1027,16 +1027,31 @@ struct nand_legacy { struct nand_controller dummy_controller; }; +/** + * struct nand_chip_ops - NAND chip operations + * @suspend: Suspend operation + * @resume: Resume operation + * @lock_area: Lock operation + * @unlock_area: Unlock operation + * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs) + */ +struct nand_chip_ops { + int (*suspend)(struct nand_chip *chip); + void (*resume)(struct nand_chip *chip); + int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); +}; + /** * struct nand_chip - NAND Private Flash Chip Data * @base: Inherit from the generic NAND device + * @ops: NAND chip operations * @legacy: All legacy fields/hooks. If you develop a new driver, * don't even try to use any of these fields/hooks, and if * you're modifying an existing driver that is using those * fields/hooks, you should consider reworking the driver * avoid using them. - * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for - * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buf_align: minimum buffer alignment required by a platform * @oob_poi: "poison value buffer," used for laying out OOB data @@ -1081,8 +1096,6 @@ struct nand_legacy { * @lock: lock protecting the suspended field. Also used to * serialize accesses to the NAND device. * @suspended: set to 1 when the device is suspended, 0 when it's not. - * @suspend: [REPLACEABLE] specific NAND device suspend operation - * @resume: [REPLACEABLE] specific NAND device resume operation * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. @@ -1096,17 +1109,13 @@ struct nand_legacy { * @manufacturer: [INTERN] Contains manufacturer information * @manufacturer.desc: [INTERN] Contains manufacturer's description * @manufacturer.priv: [INTERN] Contains manufacturer private information - * @lock_area: [REPLACEABLE] specific NAND chip lock operation - * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation */ struct nand_chip { struct nand_device base; - + struct nand_chip_ops ops; struct nand_legacy legacy; - int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); - unsigned int options; unsigned int bbt_options; @@ -1138,8 +1147,6 @@ struct nand_chip { struct mutex lock; unsigned int suspended : 1; - int (*suspend)(struct nand_chip *chip); - void (*resume)(struct nand_chip *chip); u8 *oob_poi; struct nand_controller *controller; @@ -1159,9 +1166,6 @@ struct nand_chip { const struct nand_manufacturer *desc; void *priv; } manufacturer; - - int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); - int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; -- cgit v1.2.3 From 271de009b7c0c1c15f63491a352ab08835462977 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:58 +0200 Subject: mtd: rawnand: Rename the manufacturer structure It is currently called nand_manufacturer but could actually be called nand_manufacturer_desc, like its instances, so that the former name is left unused for now. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-5-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/internals.h | 6 +++--- drivers/mtd/nand/raw/nand_base.c | 14 +++++++------- drivers/mtd/nand/raw/nand_ids.c | 16 ++++++++-------- include/linux/mtd/rawnand.h | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 03866b0aadea..a518acfd9b3f 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -53,12 +53,12 @@ struct nand_manufacturer_ops { }; /** - * struct nand_manufacturer - NAND Flash Manufacturer structure + * struct nand_manufacturer_desc - NAND Flash Manufacturer descriptor * @name: Manufacturer name * @id: manufacturer ID code of device. * @ops: manufacturer operations */ -struct nand_manufacturer { +struct nand_manufacturer_desc { int id; char *name; const struct nand_manufacturer_ops *ops; @@ -79,7 +79,7 @@ extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; extern const struct mtd_pairing_scheme dist3_pairing_scheme; /* Core functions */ -const struct nand_manufacturer *nand_get_manufacturer(u8 id); +const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id); int nand_bbm_get_next_page(struct nand_chip *chip, int page); int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs); int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index d9cb71e7c0ed..534ee75d0f2b 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -4810,9 +4810,9 @@ static void nand_manufacturer_cleanup(struct nand_chip *chip) } static const char * -nand_manufacturer_name(const struct nand_manufacturer *manufacturer) +nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) { - return manufacturer ? manufacturer->name : "Unknown"; + return manufacturer_desc ? manufacturer_desc->name : "Unknown"; } /* @@ -4820,7 +4820,7 @@ nand_manufacturer_name(const struct nand_manufacturer *manufacturer) */ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) { - const struct nand_manufacturer *manufacturer; + const struct nand_manufacturer_desc *manufacturer_desc; struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; int busw, ret; @@ -4877,8 +4877,8 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); /* Try to identify manufacturer */ - manufacturer = nand_get_manufacturer(maf_id); - chip->manufacturer.desc = manufacturer; + manufacturer_desc = nand_get_manufacturer_desc(maf_id); + chip->manufacturer.desc = manufacturer_desc; if (!type) type = nand_flash_ids; @@ -4957,7 +4957,7 @@ ident_done: */ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); - pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), mtd->name); pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); @@ -4992,7 +4992,7 @@ ident_done: pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); - pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), chip->parameters.model); pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c index ba27902fc54b..e0dbc2e316c7 100644 --- a/drivers/mtd/nand/raw/nand_ids.c +++ b/drivers/mtd/nand/raw/nand_ids.c @@ -166,7 +166,7 @@ struct nand_flash_dev nand_flash_ids[] = { }; /* Manufacturer IDs */ -static const struct nand_manufacturer nand_manufacturers[] = { +static const struct nand_manufacturer_desc nand_manufacturer_descs[] = { {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops}, {NAND_MFR_ATO, "ATO"}, {NAND_MFR_EON, "Eon"}, @@ -186,20 +186,20 @@ static const struct nand_manufacturer nand_manufacturers[] = { }; /** - * nand_get_manufacturer - Get manufacturer information from the manufacturer - * ID + * nand_get_manufacturer_desc - Get manufacturer information from the + * manufacturer ID * @id: manufacturer ID * - * Returns a pointer a nand_manufacturer object if the manufacturer is defined + * Returns a nand_manufacturer_desc object if the manufacturer is defined * in the NAND manufacturers database, NULL otherwise. */ -const struct nand_manufacturer *nand_get_manufacturer(u8 id) +const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id) { int i; - for (i = 0; i < ARRAY_SIZE(nand_manufacturers); i++) - if (nand_manufacturers[i].id == id) - return &nand_manufacturers[i]; + for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++) + if (nand_manufacturer_descs[i].id == id) + return &nand_manufacturer_descs[i]; return NULL; } diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 7f9be95ca8dc..860d3c1020ef 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1163,7 +1163,7 @@ struct nand_chip { void *priv; struct { - const struct nand_manufacturer *desc; + const struct nand_manufacturer_desc *desc; void *priv; } manufacturer; }; -- cgit v1.2.3 From 36017af430e6b2fad0b2ee5476103706160f1379 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:59 +0200 Subject: mtd: rawnand: Declare the nand_manufacturer structure out of nand_chip Now that struct nand_manufacturer type is free, use it to store the nand_manufacturer_desc and the manufacturer's private data. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-6-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 860d3c1020ef..a3dfa36a9fd5 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1043,10 +1043,21 @@ struct nand_chip_ops { int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); }; +/** + * struct nand_manufacturer - NAND manufacturer structure + * @desc: The manufacturer description + * @priv: Private information for the manufacturer driver + */ +struct nand_manufacturer { + const struct nand_manufacturer_desc *desc; + void *priv; +}; + /** * struct nand_chip - NAND Private Flash Chip Data * @base: Inherit from the generic NAND device * @ops: NAND chip operations + * @manufacturer: Manufacturer information * @legacy: All legacy fields/hooks. If you develop a new driver, * don't even try to use any of these fields/hooks, and if * you're modifying an existing driver that is using those @@ -1106,13 +1117,11 @@ struct nand_chip_ops { * structure which is shared among multiple independent * devices. * @priv: [OPTIONAL] pointer to private chip data - * @manufacturer: [INTERN] Contains manufacturer information - * @manufacturer.desc: [INTERN] Contains manufacturer's description - * @manufacturer.priv: [INTERN] Contains manufacturer private information */ struct nand_chip { struct nand_device base; + struct nand_manufacturer manufacturer; struct nand_chip_ops ops; struct nand_legacy legacy; @@ -1161,11 +1170,6 @@ struct nand_chip { struct nand_bbt_descr *badblock_pattern; void *priv; - - struct { - const struct nand_manufacturer_desc *desc; - void *priv; - } manufacturer; }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; -- cgit v1.2.3 From a63674c7cfe62221e05ba73107d9e15d73ff8bbd Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:00 +0200 Subject: mtd: rawnand: Reorganize the nand_chip structure Reorder fields in this structure and pack entries by theme: * The main descriptive structures * The data interface details * Bad block information * The device layout * Extra buffers matching the device layout * Internal values * External objects like the ECC controller, the ECC engine and a private data pointer. While at it, adapt the documentation style. I changed on purpose the description of @oob_poi which was weird. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-7-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 166 ++++++++++++++++++++------------------------ 1 file changed, 76 insertions(+), 90 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index a3dfa36a9fd5..544ec8736793 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1055,120 +1055,106 @@ struct nand_manufacturer { /** * struct nand_chip - NAND Private Flash Chip Data - * @base: Inherit from the generic NAND device - * @ops: NAND chip operations - * @manufacturer: Manufacturer information - * @legacy: All legacy fields/hooks. If you develop a new driver, - * don't even try to use any of these fields/hooks, and if - * you're modifying an existing driver that is using those - * fields/hooks, you should consider reworking the driver - * avoid using them. - * @ecc: [BOARDSPECIFIC] ECC control structure - * @buf_align: minimum buffer alignment required by a platform - * @oob_poi: "poison value buffer," used for laying out OOB data - * before writing - * @page_shift: [INTERN] number of address bits in a page (column - * address bits). - * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock - * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry - * @chip_shift: [INTERN] number of address bits in one chip - * @options: [BOARDSPECIFIC] various chip options. They can partly - * be set to inform nand_scan about special functionality. - * See the defines for further explanation. - * @bbt_options: [INTERN] bad block specific options. All options used - * here must come from bbm.h. By default, these options - * will be copied to the appropriate nand_bbt_descr's. - * @badblockpos: [INTERN] position of the bad block marker in the oob - * area. - * @badblockbits: [INTERN] minimum number of set bits in a good block's - * bad block marker position; i.e., BBM == 11110111b is - * not bad when badblockbits == 7 - * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is - * set to the actually used ONFI mode if the chip is - * ONFI compliant or deduced from the datasheet if - * the NAND chip is not ONFI compliant. - * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 - * @data_buf: [INTERN] buffer for data, size is (page size + oobsize). - * @pagecache: Structure containing page cache related fields - * @pagecache.bitflips: Number of bitflips of the cached page - * @pagecache.page: Page number currently in the cache. -1 means no page is - * currently cached - * @subpagesize: [INTERN] holds the subpagesize - * @id: [INTERN] holds NAND ID - * @parameters: [INTERN] holds generic parameters under an easily - * readable form. - * @data_interface: [INTERN] NAND interface timing information - * @cur_cs: currently selected target. -1 means no target selected, - * otherwise we should always have cur_cs >= 0 && - * cur_cs < nanddev_ntargets(). NAND Controller drivers - * should not modify this value, but they're allowed to - * read it. - * @read_retries: [INTERN] the number of read retry modes supported - * @lock: lock protecting the suspended field. Also used to - * serialize accesses to the NAND device. - * @suspended: set to 1 when the device is suspended, 0 when it's not. - * @bbt: [INTERN] bad block table pointer - * @bbt_td: [REPLACEABLE] bad block table descriptor for flash - * lookup. - * @bbt_md: [REPLACEABLE] bad block table mirror descriptor - * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial - * bad block scan. - * @controller: [REPLACEABLE] a pointer to a hardware controller - * structure which is shared among multiple independent - * devices. - * @priv: [OPTIONAL] pointer to private chip data + * @base: Inherit from the generic NAND device + * @id: Holds NAND ID + * @parameters: Holds generic parameters under an easily readable form + * @manufacturer: Manufacturer information + * @ops: NAND chip operations + * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try + * to use any of these fields/hooks, and if you're modifying an + * existing driver that is using those fields/hooks, you should + * consider reworking the driver and avoid using them. + * @options: Various chip options. They can partly be set to inform nand_scan + * about special functionality. See the defines for further + * explanation. + * @onfi_timing_mode_default: Default ONFI timing mode. This field is set to the + * actually used ONFI mode if the chip is ONFI + * compliant or deduced from the datasheet otherwise + * @data_interface: NAND interface timing information + * @bbt_erase_shift: Number of address bits in a bbt entry + * @bbt_options: Bad block table specific options. All options used here must + * come from bbm.h. By default, these options will be copied to + * the appropriate nand_bbt_descr's. + * @badblockpos: Bad block marker position in the oob area + * @badblockbits: Minimum number of set bits in a good block's bad block marker + * position; i.e., BBM = 11110111b is good when badblockbits = 7 + * @bbt_td: Bad block table descriptor for flash lookup + * @bbt_md: Bad block table mirror descriptor + * @badblock_pattern: Bad block scan pattern used for initial bad block scan + * @bbt: Bad block table pointer + * @page_shift: Number of address bits in a page (column address bits) + * @phys_erase_shift: Number of address bits in a physical eraseblock + * @chip_shift: Number of address bits in one chip + * @pagemask: Page number mask = number of (pages / chip) - 1 + * @subpagesize: Holds the subpagesize + * @data_buf: Buffer for data, size is (page size + oobsize) + * @oob_poi: pointer on the OOB area covered by data_buf + * @pagecache: Structure containing page cache related fields + * @pagecache.bitflips: Number of bitflips of the cached page + * @pagecache.page: Page number currently in the cache. -1 means no page is + * currently cached + * @buf_align: Minimum buffer alignment required by a platform + * @lock: Lock protecting the suspended field. Also used to serialize accesses + * to the NAND device + * @suspended: Set to 1 when the device is suspended, 0 when it's not + * @cur_cs: Currently selected target. -1 means no target selected, otherwise we + * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets(). + * NAND Controller drivers should not modify this value, but they're + * allowed to read it. + * @read_retries: The number of read retry modes supported + * @controller: The hardware controller structure which is shared among multiple + * independent devices + * @ecc: The ECC controller structure + * @priv: Chip private data */ - struct nand_chip { struct nand_device base; + struct nand_id id; + struct nand_parameters parameters; struct nand_manufacturer manufacturer; struct nand_chip_ops ops; struct nand_legacy legacy; - unsigned int options; + + /* Data interface */ + int onfi_timing_mode_default; + struct nand_data_interface data_interface; + + /* Bad block information */ + unsigned int bbt_erase_shift; unsigned int bbt_options; + unsigned int badblockpos; + unsigned int badblockbits; + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + struct nand_bbt_descr *badblock_pattern; + u8 *bbt; + /* Device internal layout */ unsigned int page_shift; unsigned int phys_erase_shift; - unsigned int bbt_erase_shift; unsigned int chip_shift; unsigned int pagemask; - u8 *data_buf; + unsigned int subpagesize; + /* Buffers */ + u8 *data_buf; + u8 *oob_poi; struct { unsigned int bitflips; int page; } pagecache; + unsigned long buf_align; - unsigned int subpagesize; - int onfi_timing_mode_default; - unsigned int badblockpos; - unsigned int badblockbits; - - struct nand_id id; - struct nand_parameters parameters; - - struct nand_data_interface data_interface; - - int cur_cs; - - int read_retries; - + /* Internals */ struct mutex lock; unsigned int suspended : 1; + int cur_cs; + int read_retries; - u8 *oob_poi; + /* Externals */ struct nand_controller *controller; - struct nand_ecc_ctrl ecc; - unsigned long buf_align; - - u8 *bbt; - struct nand_bbt_descr *bbt_td; - struct nand_bbt_descr *bbt_md; - - struct nand_bbt_descr *badblock_pattern; - void *priv; }; -- cgit v1.2.3 From e0160cd41fb81fde9ee4612a7ea2dfd631de2638 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:12 +0200 Subject: mtd: rawnand: Hide the chip->data_interface indirection As a preparation for allocating the data interface structure dynamically (and rename it), let's avoid accessing chip->data_interface directly. Instead, we introduce a helper, nand_get_interface_config(), and use it to retrieve the current data interface configuration out of a nand_chip object. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-19-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 9 ++++++--- drivers/mtd/nand/raw/meson_nand.c | 8 ++++---- drivers/mtd/nand/raw/nand_base.c | 34 +++++++++++++++++----------------- drivers/mtd/nand/raw/nand_legacy.c | 5 ++++- drivers/mtd/nand/raw/nand_toshiba.c | 2 +- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 2 +- drivers/mtd/nand/raw/tango_nand.c | 2 +- include/linux/mtd/rawnand.h | 11 +++++++++++ 8 files changed, 45 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 260a0430313e..df859889e4eb 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -1096,6 +1096,8 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, const u8 *oob_buf, bool raw, int page) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; @@ -1141,7 +1143,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, return ret; ret = marvell_nfc_wait_op(chip, - PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); + PSEC_TO_MSEC(sdr->tPROG_max)); return ret; } @@ -1562,6 +1564,8 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, const u8 *buf, int oob_required, int page) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); struct mtd_info *mtd = nand_to_mtd(chip); const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; const u8 *data = buf; @@ -1598,8 +1602,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, marvell_nfc_wait_ndrun(chip); } - ret = marvell_nfc_wait_op(chip, - PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); + ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max)); marvell_nfc_disable_hw_ecc(chip); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 3f376471f3f7..580b7be0719f 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -573,10 +573,10 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len) static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, int page, bool in) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(nand)); struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc *nfc = nand_get_controller_data(nand); - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&nand->data_interface); u32 *addrs = nfc->cmdfifo.rw.addrs; u32 cs = nfc->param.chip_select; u32 cmd0, cmd_num, row_start; @@ -626,9 +626,9 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, static int meson_nfc_write_page_sub(struct nand_chip *nand, int page, int raw) { - struct mtd_info *mtd = nand_to_mtd(nand); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&nand->data_interface); + nand_get_sdr_timings(nand_get_interface_config(nand)); + struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); struct meson_nfc *nfc = nand_get_controller_data(nand); int data_len, info_len; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index b4de85794e07..7d393e1d0252 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -773,7 +773,7 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) return -ENOTSUPP; /* Wait tWB before polling the STATUS reg. */ - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); ndelay(PSEC_TO_NSEC(timings->tWB_max)); ret = nand_status_op(chip, NULL); @@ -1119,9 +1119,9 @@ static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len) { - struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); + struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[4]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), @@ -1163,7 +1163,7 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int len) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[5]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), @@ -1260,7 +1260,7 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PARAM, 0), NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1315,7 +1315,7 @@ int nand_change_read_column_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[2] = {}; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDOUT, 0), @@ -1389,9 +1389,9 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len, bool prog) { - struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); + struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[5] = {}; struct nand_op_instr instrs[] = { /* @@ -1514,7 +1514,7 @@ int nand_prog_page_end_op(struct nand_chip *chip) if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1621,7 +1621,7 @@ int nand_change_write_column_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[2]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDIN, 0), @@ -1676,7 +1676,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READID, 0), NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1715,7 +1715,7 @@ int nand_status_op(struct nand_chip *chip, u8 *status) { if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_STATUS, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1784,7 +1784,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[3] = { page, page >> 8, page >> 16 }; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_ERASE1, 0), @@ -1843,7 +1843,7 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1890,7 +1890,7 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1947,7 +1947,7 @@ int nand_reset_op(struct nand_chip *chip) { if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)), NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0), @@ -3226,7 +3226,7 @@ static void nand_wait_readrdy(struct nand_chip *chip) if (!(chip->options & NAND_NEED_READRDY)) return; - sdr = nand_get_sdr_timings(&chip->data_interface); + sdr = nand_get_sdr_timings(nand_get_interface_config(chip)); WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0)); } diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index 848403dcae03..fe769762e1d8 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -354,6 +354,9 @@ static void nand_command(struct nand_chip *chip, unsigned int command, static void nand_ccs_delay(struct nand_chip *chip) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); + /* * The controller already takes care of waiting for tCCS when the RNDIN * or RNDOUT command is sent, return directly. @@ -366,7 +369,7 @@ static void nand_ccs_delay(struct nand_chip *chip) * (which should be safe for all NANDs). */ if (nand_controller_can_setup_data_iface(chip)) - ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000); + ndelay(sdr->tCCS_min / 1000); else ndelay(500); } diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c index ae069905d7e4..333037bdca41 100644 --- a/drivers/mtd/nand/raw/nand_toshiba.c +++ b/drivers/mtd/nand/raw/nand_toshiba.c @@ -33,7 +33,7 @@ static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ, PSEC_TO_NSEC(sdr->tADL_min)), diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 65c9d17b25a3..7320c0fc19ec 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1308,7 +1308,7 @@ static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip, dev_warn(nfc->dev, "Waitrdy timeout\n"); /* Wait tWB before R/B# signal is low */ - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); ndelay(PSEC_TO_NSEC(timings->tWB_max)); /* R/B# signal is low, clear high level flag */ diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index b3a0d08f1733..648dc7e77f6a 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -336,7 +336,7 @@ static int tango_write_page(struct nand_chip *chip, const u8 *buf, if (err) return err; - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); err = tango_waitrdy(chip, PSEC_TO_MSEC(timings->tR_max)); if (err) return err; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 544ec8736793..0852df941130 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1203,6 +1203,17 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) return mtd_get_of_node(nand_to_mtd(chip)); } +/** + * nand_get_interface_config - Retrieve the current interface configuration + * of a NAND chip + * @chip: The NAND chip + */ +static inline const struct nand_data_interface * +nand_get_interface_config(struct nand_chip *chip) +{ + return &chip->data_interface; +} + /* * A helper for defining older NAND chips where the second ID byte fully * defined the chip, including the geometry (chip size, eraseblock size, page -- cgit v1.2.3 From 4c46667b3d67253604ee42840917844548c86657 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:13 +0200 Subject: mtd: rawnand: s/data_interface/interface_config/ The name/suffix data_interface is a bit misleading in that the field or functions actually represent a configuration that can be applied by the controller/chip. Let's rename all fields/functions/hooks that are worth renaming. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/ams-delta.c | 6 +-- drivers/mtd/nand/raw/arasan-nand-controller.c | 6 +-- drivers/mtd/nand/raw/atmel/nand-controller.c | 34 ++++++------- drivers/mtd/nand/raw/cadence-nand-controller.c | 6 +-- drivers/mtd/nand/raw/denali.c | 8 +-- drivers/mtd/nand/raw/fsmc_nand.c | 6 +-- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 6 +-- drivers/mtd/nand/raw/internals.h | 12 ++--- drivers/mtd/nand/raw/marvell_nand.c | 9 ++-- drivers/mtd/nand/raw/meson_nand.c | 6 +-- drivers/mtd/nand/raw/mtk_nand.c | 6 +-- drivers/mtd/nand/raw/mxc_nand.c | 20 ++++---- drivers/mtd/nand/raw/mxic_nand.c | 6 +-- drivers/mtd/nand/raw/nand_base.c | 69 +++++++++++++------------- drivers/mtd/nand/raw/nand_legacy.c | 2 +- drivers/mtd/nand/raw/nand_timings.c | 17 ++++--- drivers/mtd/nand/raw/s3c2410.c | 6 +-- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 4 +- drivers/mtd/nand/raw/sunxi_nand.c | 6 +-- drivers/mtd/nand/raw/tango_nand.c | 4 +- drivers/mtd/nand/raw/tegra_nand.c | 6 +-- include/linux/mtd/rawnand.h | 33 ++++++------ 22 files changed, 139 insertions(+), 139 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c index 3711e7a0436c..fdba155416d2 100644 --- a/drivers/mtd/nand/raw/ams-delta.c +++ b/drivers/mtd/nand/raw/ams-delta.c @@ -191,8 +191,8 @@ static int gpio_nand_exec_op(struct nand_chip *this, return ret; } -static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline, - const struct nand_data_interface *cf) +static int gpio_nand_setup_interface(struct nand_chip *this, int csline, + const struct nand_interface_config *cf) { struct gpio_nand *priv = nand_get_controller_data(this); const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf); @@ -217,7 +217,7 @@ static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline, static const struct nand_controller_ops gpio_nand_ops = { .exec_op = gpio_nand_exec_op, - .setup_data_interface = gpio_nand_setup_data_interface, + .setup_interface = gpio_nand_setup_interface, }; /* diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 7141dcccba3c..12c643e97c85 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -854,8 +854,8 @@ static int anfc_exec_op(struct nand_chip *chip, return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only); } -static int anfc_setup_data_interface(struct nand_chip *chip, int target, - const struct nand_data_interface *conf) +static int anfc_setup_interface(struct nand_chip *chip, int target, + const struct nand_interface_config *conf) { struct anand *anand = to_anand(chip); struct arasan_nfc *nfc = to_anfc(chip->controller); @@ -1083,7 +1083,7 @@ static void anfc_detach_chip(struct nand_chip *chip) static const struct nand_controller_ops anfc_ops = { .exec_op = anfc_exec_op, - .setup_data_interface = anfc_setup_data_interface, + .setup_interface = anfc_setup_interface, .attach_chip = anfc_attach_chip, .detach_chip = anfc_detach_chip, }; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 46a3724a788e..c9818f548d07 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -200,8 +200,8 @@ struct atmel_nand_controller_ops { void (*nand_init)(struct atmel_nand_controller *nc, struct atmel_nand *nand); int (*ecc_init)(struct nand_chip *chip); - int (*setup_data_interface)(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf); + int (*setup_interface)(struct atmel_nand *nand, int csline, + const struct nand_interface_config *conf); }; struct atmel_nand_controller_caps { @@ -1168,7 +1168,7 @@ static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip) } static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, - const struct nand_data_interface *conf, + const struct nand_interface_config *conf, struct atmel_smc_cs_conf *smcconf) { u32 ncycles, totalcycles, timeps, mckperiodps; @@ -1397,9 +1397,9 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, return 0; } -static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand, +static int atmel_smc_nand_setup_interface(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { struct atmel_nand_controller *nc; struct atmel_smc_cs_conf smcconf; @@ -1422,9 +1422,9 @@ static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand, return 0; } -static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, +static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { struct atmel_hsmc_nand_controller *nc; struct atmel_smc_cs_conf smcconf; @@ -1452,8 +1452,8 @@ static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, return 0; } -static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int atmel_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct atmel_nand *nand = to_atmel_nand(chip); struct atmel_nand_controller *nc; @@ -1464,7 +1464,7 @@ static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline, (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY)) return -EINVAL; - return nc->caps->ops->setup_data_interface(nand, csline, conf); + return nc->caps->ops->setup_interface(nand, csline, conf); } static void atmel_nand_init(struct atmel_nand_controller *nc, @@ -1483,7 +1483,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc, chip->legacy.write_buf = atmel_nand_write_buf; chip->legacy.select_chip = atmel_nand_select_chip; - if (!nc->mck || !nc->caps->ops->setup_data_interface) + if (!nc->mck || !nc->caps->ops->setup_interface) chip->options |= NAND_KEEP_TIMINGS; /* Some NANDs require a longer delay than the default one (20us). */ @@ -1956,7 +1956,7 @@ static int atmel_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops atmel_nand_controller_ops = { .attach_chip = atmel_nand_attach_chip, - .setup_data_interface = atmel_nand_setup_data_interface, + .setup_interface = atmel_nand_setup_interface, }; static int atmel_nand_controller_init(struct atmel_nand_controller *nc, @@ -2318,7 +2318,7 @@ static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = { .remove = atmel_hsmc_nand_controller_remove, .ecc_init = atmel_hsmc_nand_ecc_init, .nand_init = atmel_hsmc_nand_init, - .setup_data_interface = atmel_hsmc_nand_setup_data_interface, + .setup_interface = atmel_hsmc_nand_setup_interface, }; static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = { @@ -2375,10 +2375,10 @@ atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc) /* * The SMC reg layout of at91rm9200 is completely different which prevents us - * from re-using atmel_smc_nand_setup_data_interface() for the - * ->setup_data_interface() hook. + * from re-using atmel_smc_nand_setup_interface() for the + * ->setup_interface() hook. * At this point, there's no support for the at91rm9200 SMC IP, so we leave - * ->setup_data_interface() unassigned. + * ->setup_interface() unassigned. */ static const struct atmel_nand_controller_ops at91rm9200_nc_ops = { .probe = atmel_smc_nand_controller_probe, @@ -2399,7 +2399,7 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = { .remove = atmel_smc_nand_controller_remove, .ecc_init = atmel_nand_ecc_init, .nand_init = atmel_smc_nand_init, - .setup_data_interface = atmel_smc_nand_setup_data_interface, + .setup_interface = atmel_smc_nand_setup_interface, }; static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = { diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index c405722adfe1..574cbd3446e5 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2303,8 +2303,8 @@ static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min, } static int -cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +cadence_nand_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdr; struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); @@ -2690,7 +2690,7 @@ static int cadence_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops cadence_nand_controller_ops = { .attach_chip = cadence_nand_attach_chip, .exec_op = cadence_nand_exec_op, - .setup_data_interface = cadence_nand_setup_data_interface, + .setup_interface = cadence_nand_setup_interface, }; static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl, diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 4e6e1578aa2d..9d99dade95ce 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -761,8 +761,8 @@ static int denali_write_page(struct nand_chip *chip, const u8 *buf, return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true); } -static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int denali_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { static const unsigned int data_setup_on_host = 10000; struct denali_controller *denali = to_denali_controller(chip); @@ -1173,7 +1173,7 @@ static int denali_exec_op(struct nand_chip *chip, static const struct nand_controller_ops denali_controller_ops = { .attach_chip = denali_attach_chip, .exec_op = denali_exec_op, - .setup_data_interface = denali_setup_data_interface, + .setup_interface = denali_setup_interface, }; int denali_chip_init(struct denali_controller *denali, @@ -1230,7 +1230,7 @@ int denali_chip_init(struct denali_controller *denali, chip->buf_align = 16; } - /* clk rate info is needed for setup_data_interface */ + /* clk rate info is needed for setup_interface */ if (!denali->clk_rate || !denali->clk_x_rate) chip->options |= NAND_KEEP_TIMINGS; diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 3909752b14c5..92ddc41d0ff0 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -327,8 +327,8 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, return 0; } -static int fsmc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +static int fsmc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct fsmc_nand_data *host = nand_to_fsmc(nand); struct fsmc_nand_timings tims; @@ -951,7 +951,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand) static const struct nand_controller_ops fsmc_nand_controller_ops = { .attach_chip = fsmc_nand_attach_chip, .exec_op = fsmc_exec_op, - .setup_data_interface = fsmc_setup_data_interface, + .setup_interface = fsmc_setup_interface, }; /** diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 061a8ddda275..5d4aee46cc55 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -736,8 +736,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) udelay(dll_wait_time_us); } -static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int gpmi_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct gpmi_nand_data *this = nand_get_controller_data(chip); const struct nand_sdr_timings *sdr; @@ -2400,7 +2400,7 @@ unmap: static const struct nand_controller_ops gpmi_nand_controller_ops = { .attach_chip = gpmi_nand_attach_chip, - .setup_data_interface = gpmi_setup_data_interface, + .setup_interface = gpmi_setup_interface, .exec_op = gpmi_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index f851ed210d70..114c63a6a349 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -84,10 +84,10 @@ int nand_bbm_get_next_page(struct nand_chip *chip, int page); int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs); int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, int allowbbt); -int onfi_fill_data_interface(struct nand_chip *chip, - struct nand_data_interface *iface, - enum nand_data_interface_type type, - unsigned int timing_mode); +int onfi_fill_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + enum nand_interface_type type, + unsigned int timing_mode); unsigned int onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings); int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); @@ -133,10 +133,10 @@ static inline int nand_exec_op(struct nand_chip *chip, return chip->controller->ops->exec_op(chip, op, false); } -static inline bool nand_controller_can_setup_data_iface(struct nand_chip *chip) +static inline bool nand_controller_can_setup_interface(struct nand_chip *chip) { if (!chip->controller || !chip->controller->ops || - !chip->controller->ops->setup_data_interface) + !chip->controller->ops->setup_interface) return false; if (chip->options & NAND_KEEP_TIMINGS) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index df859889e4eb..8482d3bd8b1f 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2308,9 +2308,8 @@ static struct nand_bbt_descr bbt_mirror_descr = { .pattern = bbt_mirror_pattern }; -static int marvell_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface - *conf) +static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); @@ -2511,7 +2510,7 @@ static int marvell_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops marvell_nand_controller_ops = { .attach_chip = marvell_nand_attach_chip, .exec_op = marvell_nfc_exec_op, - .setup_data_interface = marvell_nfc_setup_data_interface, + .setup_interface = marvell_nfc_setup_interface, }; static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, @@ -2647,7 +2646,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, /* * Save a reference value for timing registers before - * ->setup_data_interface() is called. + * ->setup_interface() is called. */ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0); marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 580b7be0719f..0e5829a2b54f 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1097,8 +1097,8 @@ static int meson_chip_buffer_init(struct nand_chip *nand) } static -int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +int meson_nfc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); const struct nand_sdr_timings *timings; @@ -1222,7 +1222,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand) static const struct nand_controller_ops meson_nand_controller_ops = { .attach_chip = meson_nand_attach_chip, .detach_chip = meson_nand_detach_chip, - .setup_data_interface = meson_nfc_setup_data_interface, + .setup_interface = meson_nfc_setup_interface, .exec_op = meson_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index ca8457626d53..ad1b55dab211 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -531,8 +531,8 @@ static int mtk_nfc_exec_op(struct nand_chip *chip, return ret; } -static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mtk_nfc *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *timings; @@ -1357,7 +1357,7 @@ static int mtk_nfc_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops mtk_nfc_controller_ops = { .attach_chip = mtk_nfc_attach_chip, - .setup_data_interface = mtk_nfc_setup_data_interface, + .setup_interface = mtk_nfc_setup_interface, .exec_op = mtk_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 09dacb83cb5a..07c41e8bae2d 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -137,8 +137,8 @@ struct mxc_nand_devtype_data { u32 (*get_ecc_status)(struct mxc_nand_host *); const struct mtd_ooblayout_ops *ooblayout; void (*select_chip)(struct nand_chip *chip, int cs); - int (*setup_data_interface)(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf); + int (*setup_interface)(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf); void (*enable_hwecc)(struct nand_chip *chip, bool enable); /* @@ -1139,8 +1139,8 @@ static void preset_v1(struct mtd_info *mtd) writew(0x4, NFC_V1_V2_WRPROT); } -static int mxc_nand_v2_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int mxc_nand_v2_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mxc_nand_host *host = nand_get_controller_data(chip); int tRC_min_ns, tRC_ps, ret; @@ -1521,7 +1521,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = { .get_ecc_status = get_ecc_status_v2, .ooblayout = &mxc_v2_ooblayout_ops, .select_chip = mxc_nand_select_chip_v2, - .setup_data_interface = mxc_nand_v2_setup_data_interface, + .setup_interface = mxc_nand_v2_setup_interface, .enable_hwecc = mxc_nand_enable_hwecc_v1_v2, .irqpending_quirk = 0, .needs_ip = 0, @@ -1738,17 +1738,17 @@ static int mxcnd_attach_chip(struct nand_chip *chip) return 0; } -static int mxcnd_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct mxc_nand_host *host = nand_get_controller_data(chip); - return host->devtype_data->setup_data_interface(chip, chipnr, conf); + return host->devtype_data->setup_interface(chip, chipnr, conf); } static const struct nand_controller_ops mxcnd_controller_ops = { .attach_chip = mxcnd_attach_chip, - .setup_data_interface = mxcnd_setup_data_interface, + .setup_interface = mxcnd_setup_interface, }; static int mxcnd_probe(struct platform_device *pdev) @@ -1809,7 +1809,7 @@ static int mxcnd_probe(struct platform_device *pdev) if (err < 0) return err; - if (!host->devtype_data->setup_data_interface) + if (!host->devtype_data->setup_interface) this->options |= NAND_KEEP_TIMINGS; if (host->devtype_data->needs_ip) { diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index 57f36721f4c6..d66b5b0971fa 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -451,8 +451,8 @@ static int mxic_nfc_exec_op(struct nand_chip *chip, return ret; } -static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int mxic_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *sdr; @@ -480,7 +480,7 @@ static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, static const struct nand_controller_ops mxic_nand_controller_ops = { .exec_op = mxic_nfc_exec_op, - .setup_data_interface = mxic_nfc_setup_data_interface, + .setup_interface = mxic_nfc_setup_interface, }; static int mxic_nfc_probe(struct platform_device *pdev) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 7d393e1d0252..4fa18fb68d62 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -898,7 +898,7 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr) } /** - * nand_reset_data_interface - Reset data interface and timings + * nand_reset_interface - Reset data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * @@ -906,11 +906,12 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr) * * Returns 0 for success or negative error code otherwise. */ -static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) +static int nand_reset_interface(struct nand_chip *chip, int chipnr) { + const struct nand_controller_ops *ops = chip->controller->ops; int ret; - if (!nand_controller_can_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* @@ -927,9 +928,9 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) * timings to timing mode 0. */ - onfi_fill_data_interface(chip, &chip->data_interface, NAND_SDR_IFACE, 0); - ret = chip->controller->ops->setup_data_interface(chip, chipnr, - &chip->data_interface); + onfi_fill_interface_config(chip, &chip->interface_config, + NAND_SDR_IFACE, 0); + ret = ops->setup_interface(chip, chipnr, &chip->interface_config); if (ret) pr_err("Failed to configure data interface to SDR timing mode 0\n"); @@ -937,7 +938,7 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) } /** - * nand_setup_data_interface - Setup the best data interface and timings + * nand_setup_interface - Setup the best data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * @@ -946,13 +947,13 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) * * Returns 0 for success or negative error code otherwise. */ -static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) +static int nand_setup_interface(struct nand_chip *chip, int chipnr) { - u8 mode = chip->data_interface.timings.mode; + u8 mode = chip->interface_config.timings.mode; u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { mode, }; int ret; - if (!nand_controller_can_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* Change the mode on the chip side (if supported by the NAND chip) */ @@ -966,8 +967,8 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) } /* Change the mode on the controller side */ - ret = chip->controller->ops->setup_data_interface(chip, chipnr, - &chip->data_interface); + ret = chip->controller->ops->setup_interface(chip, chipnr, + &chip->interface_config); if (ret) return ret; @@ -996,7 +997,7 @@ err_reset_chip: * Fallback to mode 0 if the chip explicitly did not ack the chosen * timing mode. */ - nand_reset_data_interface(chip, chipnr); + nand_reset_interface(chip, chipnr); nand_select_target(chip, chipnr); nand_reset_op(chip); nand_deselect_target(chip); @@ -1005,7 +1006,7 @@ err_reset_chip: } /** - * nand_choose_data_interface - find the best data interface and timings + * nand_choose_interface_config - find the best data interface and timings * @chip: The NAND chip * * Find the best data interface and NAND timings supported by the chip @@ -1013,16 +1014,16 @@ err_reset_chip: * First tries to retrieve supported timing modes from ONFI information, * and if the NAND chip does not support ONFI, relies on the * ->onfi_timing_mode_default specified in the nand_ids table. After this - * function nand_chip->data_interface is initialized with the best timing mode + * function nand_chip->interface_ is initialized with the best timing mode * available. * * Returns 0 for success or negative error code otherwise. */ -static int nand_choose_data_interface(struct nand_chip *chip) +static int nand_choose_interface_config(struct nand_chip *chip) { int modes, mode, ret; - if (!nand_controller_can_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* @@ -1040,8 +1041,8 @@ static int nand_choose_data_interface(struct nand_chip *chip) } for (mode = fls(modes) - 1; mode >= 0; mode--) { - ret = onfi_fill_data_interface(chip, &chip->data_interface, - NAND_SDR_IFACE, mode); + ret = onfi_fill_interface_config(chip, &chip->interface_config, + NAND_SDR_IFACE, mode); if (ret) continue; @@ -1049,9 +1050,9 @@ static int nand_choose_data_interface(struct nand_chip *chip) * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the * controller supports the requested timings. */ - ret = chip->controller->ops->setup_data_interface(chip, + ret = chip->controller->ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, - &chip->data_interface); + &chip->interface_config); if (!ret) { chip->onfi_timing_mode_default = mode; break; @@ -2477,17 +2478,17 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_len); * @chipnr: Internal die id * * Save the timings data structure, then apply SDR timings mode 0 (see - * nand_reset_data_interface for details), do the reset operation, and - * apply back the previous timings. + * nand_reset_interface for details), do the reset operation, and apply + * back the previous timings. * * Returns 0 on success, a negative error code otherwise. */ int nand_reset(struct nand_chip *chip, int chipnr) { - struct nand_data_interface saved_data_intf = chip->data_interface; + struct nand_interface_config saved_intf_config = chip->interface_config; int ret; - ret = nand_reset_data_interface(chip, chipnr); + ret = nand_reset_interface(chip, chipnr); if (ret) return ret; @@ -2503,18 +2504,18 @@ int nand_reset(struct nand_chip *chip, int chipnr) return ret; /* - * A nand_reset_data_interface() put both the NAND chip and the NAND + * A nand_reset_interface() put both the NAND chip and the NAND * controller in timings mode 0. If the default mode for this chip is * also 0, no need to proceed to the change again. Plus, at probe time, - * nand_setup_data_interface() uses ->set/get_features() which would + * nand_setup_interface() uses ->set/get_features() which would * fail anyway as the parameter page is not available yet. */ - if (!memcmp(&chip->data_interface, &saved_data_intf, - sizeof(saved_data_intf))) + if (!memcmp(&chip->interface_config, &saved_intf_config, + sizeof(saved_intf_config))) return 0; - chip->data_interface = saved_data_intf; - ret = nand_setup_data_interface(chip, chipnr); + chip->interface_config = saved_intf_config; + ret = nand_setup_interface(chip, chipnr); if (ret) return ret; @@ -5183,7 +5184,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, mutex_init(&chip->lock); /* Enforce the right timings for reset/detection */ - onfi_fill_data_interface(chip, &chip->data_interface, NAND_SDR_IFACE, 0); + onfi_fill_interface_config(chip, &chip->interface_config, NAND_SDR_IFACE, 0); ret = nand_dt_init(chip); if (ret) @@ -5971,13 +5972,13 @@ static int nand_scan_tail(struct nand_chip *chip) mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); /* Find the fastest data interface for this chip */ - ret = nand_choose_data_interface(chip); + ret = nand_choose_interface_config(chip); if (ret) goto err_nanddev_cleanup; /* Enter fastest possible mode on all dies. */ for (i = 0; i < nanddev_ntargets(&chip->base); i++) { - ret = nand_setup_data_interface(chip, i); + ret = nand_setup_interface(chip, i); if (ret) goto err_nanddev_cleanup; } diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index fe769762e1d8..2bcc03714432 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -368,7 +368,7 @@ static void nand_ccs_delay(struct nand_chip *chip) * Wait tCCS_min if it is correctly defined, otherwise wait 500ns * (which should be safe for all NANDs). */ - if (nand_controller_can_setup_data_iface(chip)) + if (nand_controller_can_setup_interface(chip)) ndelay(sdr->tCCS_min / 1000); else ndelay(500); diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index efff3583c549..bf05b4bceaa0 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -19,7 +19,7 @@ * * These four values are tweaked to be more accurate in the case of ONFI chips. */ -static const struct nand_data_interface onfi_sdr_timings[] = { +static const struct nand_interface_config onfi_sdr_timings[] = { /* Mode 0 */ { .type = NAND_SDR_IFACE, @@ -340,16 +340,17 @@ onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings) } /** - * onfi_fill_data_interface - Initialize a data interface from a given ONFI mode + * onfi_fill_interface_config - Initialize an interface config from a given + * ONFI mode * @chip: The NAND chip - * @iface: The data interface to fill - * @type: The data interface type + * @iface: The interface configuration to fill + * @type: The interface type * @timing_mode: The ONFI timing mode */ -int onfi_fill_data_interface(struct nand_chip *chip, - struct nand_data_interface *iface, - enum nand_data_interface_type type, - unsigned int timing_mode) +int onfi_fill_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + enum nand_interface_type type, + unsigned int timing_mode) { struct onfi_params *onfi = chip->parameters.onfi; diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index f86dff311464..f121a3ae294c 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -808,8 +808,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, return -ENODEV; } -static int s3c2410_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mtd_info *mtd = nand_to_mtd(chip); struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); @@ -999,7 +999,7 @@ static int s3c2410_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops s3c24xx_nand_controller_ops = { .attach_chip = s3c2410_nand_attach_chip, - .setup_data_interface = s3c2410_nand_setup_data_interface, + .setup_interface = s3c2410_nand_setup_interface, }; static const struct of_device_id s3c24xx_nand_dt_ids[] = { diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 7320c0fc19ec..a4140af43ed4 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1546,7 +1546,7 @@ static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip, } static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdrt; @@ -1764,7 +1764,7 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = { .attach_chip = stm32_fmc2_nfc_attach_chip, .exec_op = stm32_fmc2_nfc_exec_op, - .setup_data_interface = stm32_fmc2_nfc_setup_interface, + .setup_interface = stm32_fmc2_nfc_setup_interface, }; static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc, diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index ffbc1651fadc..9c50c2b965e1 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1376,8 +1376,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration, #define sunxi_nand_lookup_timing(l, p, c) \ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c) -static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); @@ -1920,7 +1920,7 @@ static int sunxi_nfc_exec_op(struct nand_chip *nand, static const struct nand_controller_ops sunxi_nand_controller_ops = { .attach_chip = sunxi_nand_attach_chip, - .setup_data_interface = sunxi_nfc_setup_data_interface, + .setup_interface = sunxi_nfc_setup_interface, .exec_op = sunxi_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index 648dc7e77f6a..bdb965ae7a4a 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -515,7 +515,7 @@ static u32 to_ticks(int kHz, int ps) } static int tango_set_timings(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf); struct tango_nfc *nfc = to_tango_nfc(chip->controller); @@ -565,7 +565,7 @@ static int tango_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops tango_controller_ops = { .attach_chip = tango_attach_chip, - .setup_data_interface = tango_set_timings, + .setup_interface = tango_set_timings, .exec_op = tango_exec_op, }; diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index f9d046b2cd3b..6b6212ffa01c 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -813,8 +813,8 @@ static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl, writel_relaxed(reg, ctrl->regs + TIMING_2); } -static int tegra_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int tegra_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); const struct nand_sdr_timings *timings; @@ -1053,7 +1053,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops tegra_nand_controller_ops = { .attach_chip = &tegra_nand_attach_chip, .exec_op = tegra_nand_exec_op, - .setup_data_interface = tegra_nand_setup_data_interface, + .setup_interface = tegra_nand_setup_interface, }; static int tegra_nand_chips_init(struct device *dev, diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 0852df941130..2ca56eef0f07 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -492,22 +492,22 @@ struct nand_sdr_timings { }; /** - * enum nand_data_interface_type - NAND interface timing type + * enum nand_interface_type - NAND interface type * @NAND_SDR_IFACE: Single Data Rate interface */ -enum nand_data_interface_type { +enum nand_interface_type { NAND_SDR_IFACE, }; /** - * struct nand_data_interface - NAND interface timing + * struct nand_interface_config - NAND interface timing * @type: type of the timing * @timings: The timing information * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ -struct nand_data_interface { - enum nand_data_interface_type type; +struct nand_interface_config { + enum nand_interface_type type; struct nand_timings { unsigned int mode; union { @@ -521,7 +521,7 @@ struct nand_data_interface { * @conf: The data interface */ static inline const struct nand_sdr_timings * -nand_get_sdr_timings(const struct nand_data_interface *conf) +nand_get_sdr_timings(const struct nand_interface_config *conf) { if (conf->type != NAND_SDR_IFACE) return ERR_PTR(-EINVAL); @@ -944,11 +944,10 @@ static inline void nand_op_trace(const char *prefix, * This method replaces chip->legacy.cmdfunc(), * chip->legacy.{read,write}_{buf,byte,word}(), * chip->legacy.dev_ready() and chip->legacy.waifunc(). - * @setup_data_interface: setup the data interface and timing. If - * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this - * means the configuration should not be applied but - * only checked. - * This hook is optional. + * @setup_interface: setup the data interface and timing. If chipnr is set to + * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration + * should not be applied but only checked. + * This hook is optional. */ struct nand_controller_ops { int (*attach_chip)(struct nand_chip *chip); @@ -956,8 +955,8 @@ struct nand_controller_ops { int (*exec_op)(struct nand_chip *chip, const struct nand_operation *op, bool check_only); - int (*setup_data_interface)(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf); + int (*setup_interface)(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf); }; /** @@ -1070,7 +1069,7 @@ struct nand_manufacturer { * @onfi_timing_mode_default: Default ONFI timing mode. This field is set to the * actually used ONFI mode if the chip is ONFI * compliant or deduced from the datasheet otherwise - * @data_interface: NAND interface timing information + * @interface_config: NAND interface timing information * @bbt_erase_shift: Number of address bits in a bbt entry * @bbt_options: Bad block table specific options. All options used here must * come from bbm.h. By default, these options will be copied to @@ -1118,7 +1117,7 @@ struct nand_chip { /* Data interface */ int onfi_timing_mode_default; - struct nand_data_interface data_interface; + struct nand_interface_config interface_config; /* Bad block information */ unsigned int bbt_erase_shift; @@ -1208,10 +1207,10 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) * of a NAND chip * @chip: The NAND chip */ -static inline const struct nand_data_interface * +static inline const struct nand_interface_config * nand_get_interface_config(struct nand_chip *chip) { - return &chip->data_interface; + return &chip->interface_config; } /* -- cgit v1.2.3 From 26d014f0400e5ff54cc80c8329e3adbd74db1e04 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:16 +0200 Subject: mtd: rawnand: Add the ->choose_interface_config() hook This hook can be overloaded by NAND manufacturer drivers to propose alternative timings when not following the main standards. In this case, the manufacturer drivers is responsible for choosing the best interface configuration that fits both the controller and chip capabilities. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-23-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 17 +++++++++++------ include/linux/mtd/rawnand.h | 3 +++ 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 742d099df5c6..2f4eba1a1082 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1066,18 +1066,23 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, * @chip: The NAND chip * * Find the best data interface and NAND timings supported by the chip - * and the driver. - * First tries to retrieve supported timing modes from ONFI information, - * and if the NAND chip does not support ONFI, relies on the - * ->onfi_timing_mode_default specified in the nand_ids table. After this - * function nand_chip->interface_ is initialized with the best timing mode - * available. + * and the driver. Eventually let the NAND manufacturer driver propose his own + * set of timings. + * + * After this function nand_chip->interface_config is initialized with the best + * timing mode available. + * + * Returns 0 for success or negative error code otherwise. */ static int nand_choose_interface_config(struct nand_chip *chip) { if (!nand_controller_can_setup_interface(chip)) return 0; + if (chip->ops.choose_interface_config) + return chip->ops.choose_interface_config(chip, + &chip->interface_config); + return nand_choose_best_sdr_timings(chip, &chip->interface_config, NULL); } diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 2ca56eef0f07..316a02189da1 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1033,6 +1033,7 @@ struct nand_legacy { * @lock_area: Lock operation * @unlock_area: Unlock operation * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs) + * @choose_interface_config: Choose the best interface configuration */ struct nand_chip_ops { int (*suspend)(struct nand_chip *chip); @@ -1040,6 +1041,8 @@ struct nand_chip_ops { int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); + int (*choose_interface_config)(struct nand_chip *chip, + struct nand_interface_config *iface); }; /** -- cgit v1.2.3 From a69ad11168dca68b3f0adc6882422f4a2e2cb050 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:21 +0200 Subject: mtd: rawnand: Get rid of the default ONFI timing mode The ->choose_interface() hook is here for manufacturer drivers to provide a better timing interface than the default one, this field is not needed anymore. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-28-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 19 ++++--------------- include/linux/mtd/rawnand.h | 9 --------- 2 files changed, 4 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 2f4eba1a1082..753328f106c1 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1012,10 +1012,8 @@ err_reset_chip: * @iface: the interface configuration (can eventually be updated) * @spec_timings: specific timings, when not fitting the ONFI specification * - * If specific timings are provided, use them. Otherwise, try to retrieve - * supported timing modes from ONFI information. Finally, if the NAND chip does - * not follow the ONFI specification, rely on the ->default_timing_mode - * specified in the nand_ids table. + * If specific timings are provided, use them. Otherwise, retrieve supported + * timing modes from ONFI information. */ int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_interface_config *iface, @@ -1038,15 +1036,8 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, /* Fallback to slower modes */ best_mode = iface->timings.mode; - } else { - if (chip->parameters.onfi) { - unsigned int onfi_modes; - - onfi_modes = chip->parameters.onfi->async_timing_mode; - best_mode = fls(onfi_modes) - 1; - } else { - best_mode = chip->onfi_timing_mode_default; - } + } else if (chip->parameters.onfi) { + best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1; } for (mode = best_mode; mode >= 0; mode--) { @@ -4767,8 +4758,6 @@ static bool find_full_id_nand(struct nand_chip *chip, chip->options |= type->options; chip->base.eccreq.strength = NAND_ECC_STRENGTH(type); chip->base.eccreq.step_size = NAND_ECC_STEP(type); - chip->onfi_timing_mode_default = - type->onfi_timing_mode_default; chip->parameters.model = kstrdup(type->name, GFP_KERNEL); if (!chip->parameters.model) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 316a02189da1..a2427c67d38b 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1069,9 +1069,6 @@ struct nand_manufacturer { * @options: Various chip options. They can partly be set to inform nand_scan * about special functionality. See the defines for further * explanation. - * @onfi_timing_mode_default: Default ONFI timing mode. This field is set to the - * actually used ONFI mode if the chip is ONFI - * compliant or deduced from the datasheet otherwise * @interface_config: NAND interface timing information * @bbt_erase_shift: Number of address bits in a bbt entry * @bbt_options: Bad block table specific options. All options used here must @@ -1119,7 +1116,6 @@ struct nand_chip { unsigned int options; /* Data interface */ - int onfi_timing_mode_default; struct nand_interface_config interface_config; /* Bad block information */ @@ -1268,10 +1264,6 @@ nand_get_interface_config(struct nand_chip *chip) * @ecc_step_ds in nand_chip{}, also from the datasheet. * For example, the "4bit ECC for each 512Byte" can be set with * NAND_ECC_INFO(4, 512). - * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND - * reset. Should be deduced from timings described - * in the datasheet. - * */ struct nand_flash_dev { char *name; @@ -1292,7 +1284,6 @@ struct nand_flash_dev { uint16_t strength_ds; uint16_t step_ds; } ecc; - int onfi_timing_mode_default; }; int nand_create_bbt(struct nand_chip *chip); -- cgit v1.2.3 From 35b6bcc970f759d4a86d6221d09ca28ea20467c8 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:22 +0200 Subject: mtd: rawnand: Allocate the interface configurations dynamically Instead of manipulating the statically allocated structure and copy timings around, allocate one at identification time and save it in the nand_chip structure once it has been initialized. All NAND chips using the same interface configuration during reset and startup, we define a helper to retrieve a single reset interface configuration object, shared across all NAND chips. We use a second pointer to always have a reference on the currently applied interface configuration, which may either point to the "best interface configuration" or to the "default reset interface configuration". Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-29-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/internals.h | 1 + drivers/mtd/nand/raw/nand_base.c | 84 +++++++++++++++++++++++-------------- drivers/mtd/nand/raw/nand_timings.c | 6 +++ include/linux/mtd/rawnand.h | 11 +++-- 4 files changed, 67 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 5ebfbb89e572..012876e14317 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -93,6 +93,7 @@ onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings); int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_interface_config *iface, struct nand_sdr_timings *spec_timings); +const struct nand_interface_config *nand_get_reset_interface_config(void); int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param); int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 753328f106c1..0c768cb88f96 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -928,9 +928,9 @@ static int nand_reset_interface(struct nand_chip *chip, int chipnr) * timings to timing mode 0. */ - onfi_fill_interface_config(chip, &chip->interface_config, - NAND_SDR_IFACE, 0); - ret = ops->setup_interface(chip, chipnr, &chip->interface_config); + chip->current_interface_config = nand_get_reset_interface_config(); + ret = ops->setup_interface(chip, chipnr, + chip->current_interface_config); if (ret) pr_err("Failed to configure data interface to SDR timing mode 0\n"); @@ -949,13 +949,25 @@ static int nand_reset_interface(struct nand_chip *chip, int chipnr) */ static int nand_setup_interface(struct nand_chip *chip, int chipnr) { - u8 mode = chip->interface_config.timings.mode; - u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { mode, }; + const struct nand_controller_ops *ops = chip->controller->ops; + u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }; int ret; if (!nand_controller_can_setup_interface(chip)) return 0; + /* + * A nand_reset_interface() put both the NAND chip and the NAND + * controller in timings mode 0. If the default mode for this chip is + * also 0, no need to proceed to the change again. Plus, at probe time, + * nand_setup_interface() uses ->set/get_features() which would + * fail anyway as the parameter page is not available yet. + */ + if (!chip->best_interface_config) + return 0; + + tmode_param[0] = chip->best_interface_config->timings.mode; + /* Change the mode on the chip side (if supported by the NAND chip) */ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { nand_select_target(chip, chipnr); @@ -967,14 +979,13 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr) } /* Change the mode on the controller side */ - ret = chip->controller->ops->setup_interface(chip, chipnr, - &chip->interface_config); + ret = ops->setup_interface(chip, chipnr, chip->best_interface_config); if (ret) return ret; /* Check the mode has been accepted by the chip, if supported */ if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) - return 0; + goto update_interface_config; memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); nand_select_target(chip, chipnr); @@ -984,12 +995,15 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr) if (ret) goto err_reset_chip; - if (tmode_param[0] != mode) { + if (tmode_param[0] != chip->best_interface_config->timings.mode) { pr_warn("timing mode %d not acknowledged by the NAND chip\n", - mode); + chip->best_interface_config->timings.mode); goto err_reset_chip; } +update_interface_config: + chip->current_interface_config = chip->best_interface_config; + return 0; err_reset_chip: @@ -1031,8 +1045,10 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, /* Verify the controller supports the requested interface */ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); - if (!ret) + if (!ret) { + chip->best_interface_config = iface; return ret; + } /* Fallback to slower modes */ best_mode = iface->timings.mode; @@ -1046,9 +1062,11 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); if (!ret) - return 0; + break; } + chip->best_interface_config = iface; + return 0; } @@ -1067,15 +1085,25 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, */ static int nand_choose_interface_config(struct nand_chip *chip) { + struct nand_interface_config *iface; + int ret; + if (!nand_controller_can_setup_interface(chip)) return 0; + iface = kzalloc(sizeof(*iface), GFP_KERNEL); + if (!iface) + return -ENOMEM; + if (chip->ops.choose_interface_config) - return chip->ops.choose_interface_config(chip, - &chip->interface_config); + ret = chip->ops.choose_interface_config(chip, iface); + else + ret = nand_choose_best_sdr_timings(chip, iface, NULL); - return nand_choose_best_sdr_timings(chip, &chip->interface_config, - NULL); + if (ret) + kfree(iface); + + return ret; } /** @@ -2501,7 +2529,6 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_len); */ int nand_reset(struct nand_chip *chip, int chipnr) { - struct nand_interface_config saved_intf_config = chip->interface_config; int ret; ret = nand_reset_interface(chip, chipnr); @@ -2519,18 +2546,6 @@ int nand_reset(struct nand_chip *chip, int chipnr) if (ret) return ret; - /* - * A nand_reset_interface() put both the NAND chip and the NAND - * controller in timings mode 0. If the default mode for this chip is - * also 0, no need to proceed to the change again. Plus, at probe time, - * nand_setup_interface() uses ->set/get_features() which would - * fail anyway as the parameter page is not available yet. - */ - if (!memcmp(&chip->interface_config, &saved_intf_config, - sizeof(saved_intf_config))) - return 0; - - chip->interface_config = saved_intf_config; ret = nand_setup_interface(chip, chipnr); if (ret) return ret; @@ -5198,7 +5213,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, mutex_init(&chip->lock); /* Enforce the right timings for reset/detection */ - onfi_fill_interface_config(chip, &chip->interface_config, NAND_SDR_IFACE, 0); + chip->current_interface_config = nand_get_reset_interface_config(); ret = nand_dt_init(chip); if (ret) @@ -5994,7 +6009,7 @@ static int nand_scan_tail(struct nand_chip *chip) for (i = 0; i < nanddev_ntargets(&chip->base); i++) { ret = nand_setup_interface(chip, i); if (ret) - goto err_nanddev_cleanup; + goto err_free_interface_config; } /* Check, if we should skip the bad block table scan */ @@ -6004,10 +6019,12 @@ static int nand_scan_tail(struct nand_chip *chip) /* Build bad block table */ ret = nand_create_bbt(chip); if (ret) - goto err_nanddev_cleanup; + goto err_free_interface_config; return 0; +err_free_interface_config: + kfree(chip->best_interface_config); err_nanddev_cleanup: nanddev_cleanup(&chip->base); @@ -6101,6 +6118,9 @@ void nand_cleanup(struct nand_chip *chip) & NAND_BBT_DYNAMICSTRUCT) kfree(chip->badblock_pattern); + /* Free the data interface */ + kfree(chip->best_interface_config); + /* Free manufacturer priv data. */ nand_manufacturer_cleanup(chip); diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index 1e22006c79ba..94d832646487 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -292,6 +292,12 @@ static const struct nand_interface_config onfi_sdr_timings[] = { }, }; +/* All NAND chips share the same reset data interface: SDR mode 0 */ +const struct nand_interface_config *nand_get_reset_interface_config(void) +{ + return &onfi_sdr_timings[0]; +} + /** * onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a * set of timings diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index a2427c67d38b..a725b620aca2 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1069,7 +1069,11 @@ struct nand_manufacturer { * @options: Various chip options. They can partly be set to inform nand_scan * about special functionality. See the defines for further * explanation. - * @interface_config: NAND interface timing information + * @current_interface_config: The currently used NAND interface configuration + * @best_interface_config: The best NAND interface configuration which fits both + * the NAND chip and NAND controller constraints. If + * unset, the default reset interface configuration must + * be used. * @bbt_erase_shift: Number of address bits in a bbt entry * @bbt_options: Bad block table specific options. All options used here must * come from bbm.h. By default, these options will be copied to @@ -1116,7 +1120,8 @@ struct nand_chip { unsigned int options; /* Data interface */ - struct nand_interface_config interface_config; + const struct nand_interface_config *current_interface_config; + struct nand_interface_config *best_interface_config; /* Bad block information */ unsigned int bbt_erase_shift; @@ -1209,7 +1214,7 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) static inline const struct nand_interface_config * nand_get_interface_config(struct nand_chip *chip) { - return &chip->interface_config; + return chip->current_interface_config; } /* -- cgit v1.2.3 From bccb48c89fe3c09f1cbb7c8612e31f5daa1d4541 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 1 Jun 2020 20:13:21 +0200 Subject: batman-adv: Fix typos and grammar in documentation Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- Documentation/networking/batman-adv.rst | 6 ++-- include/uapi/linux/batadv_packet.h | 50 ++++++++++++++++----------------- include/uapi/linux/batman_adv.h | 4 +-- net/batman-adv/bat_iv_ogm.c | 8 +++--- net/batman-adv/bat_v_elp.c | 10 +++---- net/batman-adv/bat_v_ogm.c | 14 ++++----- net/batman-adv/bridge_loop_avoidance.c | 6 ++-- net/batman-adv/distributed-arp-table.c | 2 +- net/batman-adv/fragmentation.c | 6 ++-- net/batman-adv/hard-interface.c | 14 ++++----- net/batman-adv/log.h | 6 ++-- net/batman-adv/main.c | 2 +- net/batman-adv/main.h | 6 ++-- net/batman-adv/multicast.c | 21 +++++++------- net/batman-adv/netlink.c | 2 +- net/batman-adv/network-coding.c | 14 ++++----- net/batman-adv/originator.c | 8 +++--- net/batman-adv/routing.c | 4 +-- net/batman-adv/send.c | 4 +-- net/batman-adv/soft-interface.c | 2 +- net/batman-adv/tp_meter.c | 12 ++++---- net/batman-adv/translation-table.c | 10 +++---- net/batman-adv/tvlv.c | 4 +-- net/batman-adv/types.h | 12 ++++---- 24 files changed, 114 insertions(+), 113 deletions(-) (limited to 'include') diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst index 02af49b08635..74821d29a22f 100644 --- a/Documentation/networking/batman-adv.rst +++ b/Documentation/networking/batman-adv.rst @@ -73,7 +73,7 @@ lower value. This will make the mesh more responsive to topology changes, but will also increase the overhead. Information about the current state can be accessed via the batadv generic -netlink family. batctl provides human readable version via its debug tables +netlink family. batctl provides a human readable version via its debug tables subcommands. @@ -115,8 +115,8 @@ are prefixed with "batman-adv:" So to see just these messages try:: $ dmesg | grep batman-adv When investigating problems with your mesh network, it is sometimes necessary to -see more detail debug messages. This must be enabled when compiling the -batman-adv module. When building batman-adv as part of kernel, use "make +see more detailed debug messages. This must be enabled when compiling the +batman-adv module. When building batman-adv as part of the kernel, use "make menuconfig" and enable the option ``B.A.T.M.A.N. debugging`` (``CONFIG_BATMAN_ADV_DEBUG=y``). diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h index 0ae34c85ef9e..9c8604c5b5f6 100644 --- a/include/uapi/linux/batadv_packet.h +++ b/include/uapi/linux/batadv_packet.h @@ -72,8 +72,8 @@ enum batadv_subtype { /** * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets - * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was - * previously received from someone else than the best neighbor. + * @BATADV_NOT_BEST_NEXT_HOP: flag is set when the ogm packet is forwarded and + * was previously received from someone other than the best neighbor. * @BATADV_PRIMARIES_FIRST_HOP: flag unused. * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a * one hop neighbor on the interface where it was originally received. @@ -195,8 +195,8 @@ struct batadv_bla_claim_dst { /** * struct batadv_ogm_packet - ogm (routing protocol) packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @flags: contains routing relevant flags - see enum batadv_iv_flags * @seqno: sequence identification * @orig: address of the source node @@ -247,7 +247,7 @@ struct batadv_ogm2_packet { /** * struct batadv_elp_packet - elp (neighbor discovery) packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header + * @version: batman-adv protocol version, part of the general header * @orig: originator mac address * @seqno: sequence number * @elp_interval: currently used ELP sending interval in ms @@ -265,15 +265,15 @@ struct batadv_elp_packet { /** * struct batadv_icmp_header - common members among all the ICMP packets * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node * @uid: local ICMP socket identifier * @align: not used - useful for alignment purposes only * - * This structure is used for ICMP packets parsing only and it is never sent + * This structure is used for ICMP packet parsing only and it is never sent * over the wire. The alignment field at the end is there to ensure that * members are padded the same way as they are in real packets. */ @@ -291,8 +291,8 @@ struct batadv_icmp_header { /** * struct batadv_icmp_packet - ICMP packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node @@ -315,8 +315,8 @@ struct batadv_icmp_packet { /** * struct batadv_icmp_tp_packet - ICMP TP Meter packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node @@ -358,8 +358,8 @@ enum batadv_icmp_tp_subtype { /** * struct batadv_icmp_packet_rr - ICMP RouteRecord packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node @@ -397,8 +397,8 @@ struct batadv_icmp_packet_rr { /** * struct batadv_unicast_packet - unicast packet for network payload * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @ttvn: translation table version number * @dest: originator destination of the unicast packet */ @@ -433,8 +433,8 @@ struct batadv_unicast_4addr_packet { /** * struct batadv_frag_packet - fragmented packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @dest: final destination used when routing fragments * @orig: originator of the fragment used when merging the packet * @no: fragment number within this sequence @@ -467,8 +467,8 @@ struct batadv_frag_packet { /** * struct batadv_bcast_packet - broadcast packet for network payload * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @reserved: reserved byte for alignment * @seqno: sequence identification * @orig: originator of the broadcast packet @@ -488,10 +488,10 @@ struct batadv_bcast_packet { /** * struct batadv_coded_packet - network coded packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @first_source: original source of first included packet - * @first_orig_dest: original destinal of first included packet + * @first_orig_dest: original destination of first included packet * @first_crc: checksum of first included packet * @first_ttvn: tt-version number of first included packet * @second_ttl: ttl of second packet @@ -523,8 +523,8 @@ struct batadv_coded_packet { /** * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @reserved: reserved field (for packet alignment) * @src: address of the source * @dst: address of the destination diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 617c180ff0c8..8cf2ad11ead9 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -69,7 +69,7 @@ enum batadv_tt_client_flags { /** * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be - * part of the network but no nnode has already announced it + * part of the network but no node has already announced it */ BATADV_TT_CLIENT_TEMP = (1 << 11), }; @@ -131,7 +131,7 @@ enum batadv_gw_modes { /** @BATADV_GW_MODE_CLIENT: send DHCP requests to gw servers */ BATADV_GW_MODE_CLIENT, - /** @BATADV_GW_MODE_SERVER: announce itself as gatway server */ + /** @BATADV_GW_MODE_SERVER: announce itself as gateway server */ BATADV_GW_MODE_SERVER, }; diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index e87f19c82e8d..5b3a41983156 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -134,7 +134,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[]) * * Return: the originator object corresponding to the passed mac address or NULL * on failure. - * If the object does not exists it is created an initialised. + * If the object does not exist, it is created and initialised. */ static struct batadv_orig_node * batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) @@ -871,7 +871,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) } /** - * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface + * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over interface * @orig_node: originator which reproadcasted the OGMs directly * @if_outgoing: interface which transmitted the original OGM and received the * direct rebroadcast @@ -1554,7 +1554,7 @@ static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet, * batadv_iv_ogm_process() - process an incoming batman iv OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) - * @if_incoming: the interface where this packet was receved + * @if_incoming: the interface where this packet was received */ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) @@ -2288,7 +2288,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @cb: Control block containing additional options * @bat_priv: The bat priv with all the soft interface information - * @single_hardif: Limit dump to this hard interfaace + * @single_hardif: Limit dump to this hard interface */ static void batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 0bdefa35da98..d35aca0e969a 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -60,7 +60,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface) * @neigh: the neighbour for which the throughput has to be obtained * * Return: The throughput towards the given neighbour in multiples of 100kpbs - * (a value of '1' equals to 0.1Mbps, '10' equals 1Mbps, etc). + * (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc). */ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) { @@ -183,8 +183,8 @@ void batadv_v_elp_throughput_metric_update(struct work_struct *work) * * Sends a predefined number of unicast wifi packets to a given neighbour in * order to trigger the throughput estimation on this link by the RC algorithm. - * Packets are sent only if there there is not enough payload unicast traffic - * towards this neighbour.. + * Packets are sent only if there is not enough payload unicast traffic towards + * this neighbour.. * * Return: True on success and false in case of error during skb preparation. */ @@ -244,7 +244,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh) * batadv_v_elp_periodic_work() - ELP periodic task per interface * @work: work queue item * - * Emits broadcast ELP message in regular intervals. + * Emits broadcast ELP messages in regular intervals. */ static void batadv_v_elp_periodic_work(struct work_struct *work) { @@ -499,7 +499,7 @@ orig_free: * @skb: the received packet * @if_incoming: the interface this packet was received through * - * Return: NET_RX_SUCCESS and consumes the skb if the packet was peoperly + * Return: NET_RX_SUCCESS and consumes the skb if the packet was properly * processed or NET_RX_DROP in case of failure. */ int batadv_v_elp_packet_recv(struct sk_buff *skb, diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 18028b9f95f0..0d404f7bcd9f 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -47,9 +47,9 @@ * @bat_priv: the bat priv with all the soft interface information * @addr: the address of the originator * - * Return: the orig_node corresponding to the specified address. If such object - * does not exist it is allocated here. In case of allocation failure returns - * NULL. + * Return: the orig_node corresponding to the specified address. If such an + * object does not exist, it is allocated here. In case of allocation failure + * returns NULL. */ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) @@ -172,7 +172,7 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb, * batadv_v_ogm_aggr_list_free - free all elements in an aggregation queue * @hard_iface: the interface holding the aggregation queue * - * Empties the OGMv2 aggregation queue and frees all the skbs it contained. + * Empties the OGMv2 aggregation queue and frees all the skbs it contains. * * Caller needs to hold the hard_iface->bat_v.aggr_list.lock. */ @@ -378,7 +378,7 @@ static void batadv_v_ogm_send(struct work_struct *work) * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface * @work: work queue item * - * Emits aggregated OGM message in regular intervals. + * Emits aggregated OGM messages in regular intervals. */ void batadv_v_ogm_aggr_work(struct work_struct *work) { @@ -399,7 +399,7 @@ void batadv_v_ogm_aggr_work(struct work_struct *work) * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V * @hard_iface: the interface to prepare * - * Takes care of scheduling own OGM sending routine for this interface. + * Takes care of scheduling its own OGM sending routine for this interface. * * Return: 0 on success or a negative error code otherwise */ @@ -847,7 +847,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, * batadv_v_ogm_process() - process an incoming batman v OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) - * @if_incoming: the interface where this packet was receved + * @if_incoming: the interface where this packet was received */ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 41cc87f06b14..91a04ca373dc 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -992,7 +992,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv, * @hw_dst: the Hardware destination in the ARP Header * @ethhdr: pointer to the Ethernet header of the claim frame * - * checks if it is a claim packet and if its on the same group. + * checks if it is a claim packet and if it's on the same group. * This function also applies the group ID of the sender * if it is in the same mesh. * @@ -1757,7 +1757,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv) * @vid: the VLAN ID of the frame * * Checks if this packet is a loop detect frame which has been sent by us, - * throw an uevent and log the event if that is the case. + * throws an uevent and logs the event if that is the case. * * Return: true if it is a loop detect frame which is to be dropped, false * otherwise. @@ -1815,7 +1815,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * * we have to race for a claim * * if the frame is allowed on the LAN * - * in these cases, the skb is further handled by this function + * In these cases, the skb is further handled by this function * * Return: true if handled, otherwise it returns false and the caller shall * further process the skb. diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index b85da4b7a77b..0e6e53e9b5f3 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -666,7 +666,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst, * @vid: VLAN identifier * @packet_subtype: unicast4addr packet subtype to use * - * This function copies the skb with pskb_copy() and is sent as unicast packet + * This function copies the skb with pskb_copy() and is sent as a unicast packet * to each of the selected candidates. * * Return: true if the packet is sent to at least one candidate, false diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 7cad97644d05..9fdbe3068153 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -102,8 +102,8 @@ static int batadv_frag_size_limit(void) * * Caller must hold chain->lock. * - * Return: true if chain is empty and caller can just insert the new fragment - * without searching for the right position. + * Return: true if chain is empty and the caller can just insert the new + * fragment without searching for the right position. */ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain, u16 seqno) @@ -306,7 +306,7 @@ free: * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb * to NULL; 3) Error: Return false and free skb. * - * Return: true when packet is merged or buffered, false when skb is not not + * Return: true when the packet is merged or buffered, false when skb is not not * used. */ bool batadv_frag_skb_buffer(struct sk_buff **skb, diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 3a256af92784..53c27c67cc11 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -138,10 +138,10 @@ static bool batadv_mutual_parents(const struct net_device *dev1, * @net_dev: the device to check * * If the user creates any virtual device on top of a batman-adv interface, it - * is important to prevent this new interface to be used to create a new mesh - * network (this behaviour would lead to a batman-over-batman configuration). - * This function recursively checks all the fathers of the device passed as - * argument looking for a batman-adv soft interface. + * is important to prevent this new interface from being used to create a new + * mesh network (this behaviour would lead to a batman-over-batman + * configuration). This function recursively checks all the fathers of the + * device passed as argument looking for a batman-adv soft interface. * * Return: true if the device is descendant of a batman-adv mesh interface (or * if it is a batman-adv interface itself), false otherwise @@ -680,8 +680,8 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface) * @slave: the interface enslaved in another master * @master: the master from which slave has to be removed * - * Invoke ndo_del_slave on master passing slave as argument. In this way slave - * is free'd and master can correctly change its internal state. + * Invoke ndo_del_slave on master passing slave as argument. In this way the + * slave is free'd and the master can correctly change its internal state. * * Return: 0 on success, a negative value representing the error otherwise */ @@ -818,7 +818,7 @@ err: * @soft_iface: soft interface to check * * This function is only using RCU for locking - the result can therefore be - * off when another functions is modifying the list at the same time. The + * off when another function is modifying the list at the same time. The * caller can use the rtnl_lock to make sure that the count is accurate. * * Return: number of connected/enslaved hard interfaces diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h index f9884dc56cf3..979864c0fa6b 100644 --- a/net/batman-adv/log.h +++ b/net/batman-adv/log.h @@ -69,7 +69,7 @@ int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...) __printf(2, 3); /** - * _batadv_dbg() - Store debug output with(out) ratelimiting + * _batadv_dbg() - Store debug output with(out) rate limiting * @type: type of debug message * @bat_priv: the bat priv with all the soft interface information * @ratelimited: whether output should be rate limited @@ -95,7 +95,7 @@ static inline void _batadv_dbg(int type __always_unused, #endif /** - * batadv_dbg() - Store debug output without ratelimiting + * batadv_dbg() - Store debug output without rate limiting * @type: type of debug message * @bat_priv: the bat priv with all the soft interface information * @arg: format string and variable arguments @@ -104,7 +104,7 @@ static inline void _batadv_dbg(int type __always_unused, _batadv_dbg(type, bat_priv, 0, ## arg) /** - * batadv_dbg_ratelimited() - Store debug output with ratelimiting + * batadv_dbg_ratelimited() - Store debug output with rate limiting * @type: type of debug message * @bat_priv: the bat priv with all the soft interface information * @arg: format string and variable arguments diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index d8a255c85e77..519c08c2cfba 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -666,7 +666,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len) * @vid: the VLAN identifier for which the AP isolation attributed as to be * looked up * - * Return: true if AP isolation is on for the VLAN idenfied by vid, false + * Return: true if AP isolation is on for the VLAN identified by vid, false * otherwise */ bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 42b8d1e76dea..0393bb9ed3d0 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -308,7 +308,7 @@ static inline bool batadv_has_timed_out(unsigned long timestamp, * @y: value to compare @x against * * It handles overflows/underflows and can correctly check for a predecessor - * unless the variable sequence number has grown by more then + * unless the variable sequence number has grown by more than * 2**(bitwidth(x)-1)-1. * * This means that for a u8 with the maximum value 255, it would think: @@ -330,11 +330,11 @@ static inline bool batadv_has_timed_out(unsigned long timestamp, /** * batadv_seq_after() - Checks if a sequence number x is a successor of y - * @x: potential sucessor of @y + * @x: potential successor of @y * @y: value to compare @x against * * It handles overflows/underflows and can correctly check for a successor - * unless the variable sequence number has grown by more then + * unless the variable sequence number has grown by more than * 2**(bitwidth(x)-1)-1. * * This means that for a u8 with the maximum value 255, it would think: diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 9ebdc1e864b9..bdc4a1fba1c6 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -510,7 +510,7 @@ batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, * the given mcast_list. In general, multicast listeners provided by * your multicast receiving applications run directly on this node. * - * If there is a bridge interface on top of dev, collects from that one + * If there is a bridge interface on top of dev, collect from that one * instead. Just like with IP addresses and routes, multicast listeners * will(/should) register to the bridge interface instead of an * enslaved bat0. @@ -832,8 +832,8 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv, * @bat_priv: the bat priv with all the soft interface information * @flags: TVLV flags indicating the new multicast state * - * Whenever the multicast TVLV flags this nodes announces change this notifies - * userspace via the 'mcast' log level. + * Whenever the multicast TVLV flags this node announces change, this function + * should be used to notify userspace about the change. */ static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) { @@ -1244,7 +1244,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) * @ethhdr: an ethernet header to determine the protocol family from * * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or - * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and + * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and * increases its refcount. */ static struct batadv_orig_node * @@ -1693,7 +1693,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, } /** - * batadv_mcast_forw_send() - send packet to any detected multicast recpient + * batadv_mcast_forw_send() - send packet to any detected multicast recipient * @bat_priv: the bat priv with all the soft interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier @@ -1742,7 +1742,8 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, - * orig, has toggled then this method updates counter and list accordingly. + * orig, has toggled then this method updates the counter and the list + * accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1787,7 +1788,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1832,7 +1833,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1877,7 +1878,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1922,7 +1923,7 @@ static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 02ed073f95a9..cfb00dfa468a 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -640,7 +640,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie) * @bat_priv: the bat priv with all the soft interface information * @dst: destination of tp_meter session * @result: reason for tp meter session stop - * @test_time: total time ot the tp_meter session + * @test_time: total time of the tp_meter session * @total_bytes: bytes acked to the receiver * @cookie: cookie of tp_meter session * diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index b0469d15da0e..48d707850f3e 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -134,7 +134,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, } /** - * batadv_nc_mesh_init() - initialise coding hash table and start house keeping + * batadv_nc_mesh_init() - initialise coding hash table and start housekeeping * @bat_priv: the bat priv with all the soft interface information * * Return: 0 on success or negative error number in case of failure @@ -700,7 +700,7 @@ batadv_nc_process_nc_paths(struct batadv_priv *bat_priv, } /** - * batadv_nc_worker() - periodic task for house keeping related to network + * batadv_nc_worker() - periodic task for housekeeping related to network * coding * @work: kernel work struct */ @@ -1316,7 +1316,7 @@ batadv_nc_path_search(struct batadv_priv *bat_priv, } /** - * batadv_nc_skb_src_search() - Loops through the list of neighoring nodes of + * batadv_nc_skb_src_search() - Loops through the list of neighboring nodes of * the skb's sender (may be equal to the originator). * @bat_priv: the bat priv with all the soft interface information * @skb: data skb to forward @@ -1402,10 +1402,10 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv, * @neigh_node: next hop to forward packet to * @ethhdr: pointer to the ethernet header inside the skb * - * Loops through list of neighboring nodes the next hop has a good connection to - * (receives OGMs with a sufficient quality). We need to find a neighbor of our - * next hop that potentially sent a packet which our next hop also received - * (overheard) and has stored for later decoding. + * Loops through the list of neighboring nodes the next hop has a good + * connection to (receives OGMs with a sufficient quality). We need to find a + * neighbor of our next hop that potentially sent a packet which our next hop + * also received (overheard) and has stored for later decoding. * * Return: true if the skb was consumed (encoded packet sent) or false otherwise */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 5b0c2fffc214..805d8969bdfb 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -325,7 +325,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node) * @if_outgoing: the interface where the payload packet has been received or * the OGM should be sent to * - * Return: the neighbor which should be router for this orig_node/iface. + * Return: the neighbor which should be the router for this orig_node/iface. * * The object is returned with refcounter increased by 1. */ @@ -515,7 +515,7 @@ out: * Looks for and possibly returns a neighbour belonging to this originator list * which is connected through the provided hard interface. * - * Return: neighbor when found. Othwerwise NULL + * Return: neighbor when found. Otherwise NULL */ static struct batadv_neigh_node * batadv_neigh_node_get(const struct batadv_orig_node *orig_node, @@ -620,7 +620,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface, * * Looks for and possibly returns a neighbour belonging to this hard interface. * - * Return: neighbor when found. Othwerwise NULL + * Return: neighbor when found. Otherwise NULL */ struct batadv_hardif_neigh_node * batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface, @@ -999,7 +999,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv) * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the originator * - * Creates a new originator object and initialise all the generic fields. + * Creates a new originator object and initialises all the generic fields. * The new object is not added to the originator list. * * Return: the newly created object or NULL on failure. diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index d343382e9664..27cdf5e4349a 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -449,7 +449,7 @@ free_skb: * @skb: packet to check * @hdr_size: size of header to pull * - * Check for short header and bad addresses in given packet. + * Checks for short header and bad addresses in the given packet. * * Return: negative value when check fails and 0 otherwise. The negative value * depends on the reason: -ENODATA for bad header, -EBADR for broadcast @@ -1113,7 +1113,7 @@ free_skb: * @recv_if: interface that the skb is received on * * This function does one of the three following things: 1) Forward fragment, if - * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till + * the assembled packet will exceed our MTU; 2) Buffer fragment, if we still * lack further fragments; 3) Merge fragments, if we have all needed parts. * * Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise. diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 7f8ade04e08e..d267b94800d6 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -605,8 +605,8 @@ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet, * given hard_iface. If hard_iface is NULL forwarding packets on all hard * interfaces will be claimed. * - * The packets are being moved from the forw_list to the cleanup_list and - * by that allows already running threads to notice the claiming. + * The packets are being moved from the forw_list to the cleanup_list. This + * makes it possible for already running threads to notice the claim. */ static void batadv_forw_packet_list_steal(struct hlist_head *forw_list, diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index f1f1c86f3419..23833a0ba5e6 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -406,7 +406,7 @@ end: * @hdr_size: size of already parsed batman-adv header * @orig_node: originator from which the batman-adv packet was sent * - * Sends a ethernet frame to the receive path of the local @soft_iface. + * Sends an ethernet frame to the receive path of the local @soft_iface. * skb->data has still point to the batman-adv header with the size @hdr_size. * The caller has to have parsed this header already and made sure that at least * @hdr_size bytes are still available for pull in @skb. diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index bd2ac570c42c..db7e3774825b 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -66,7 +66,7 @@ /** * BATADV_TP_MAX_RTO - Maximum sender timeout. If the sender RTO gets beyond - * such amound of milliseconds, the receiver is considered unreachable and the + * such amount of milliseconds, the receiver is considered unreachable and the * connection is killed */ #define BATADV_TP_MAX_RTO 30000 @@ -108,10 +108,10 @@ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid) * batadv_tp_cwnd() - compute the new cwnd size * @base: base cwnd size value * @increment: the value to add to base to get the new size - * @min: minumim cwnd value (usually MSS) + * @min: minimum cwnd value (usually MSS) * - * Return the new cwnd size and ensures it does not exceed the Advertised - * Receiver Window size. It is wrap around safe. + * Return the new cwnd size and ensure it does not exceed the Advertised + * Receiver Window size. It is wrapped around safely. * For details refer to Section 3.1 of RFC5681 * * Return: new congestion window size in bytes @@ -254,7 +254,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason, * @dst: the other endpoint MAC address to look for * * Look for a tp_vars object matching dst as end_point and return it after - * having incremented the refcounter. Return NULL is not found + * having increment the refcounter. Return NULL is not found * * Return: matching tp_vars or NULL when no tp_vars with @dst was found */ @@ -291,7 +291,7 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv, * @session: session identifier * * Look for a tp_vars object matching dst as end_point, session as tp meter - * session and return it after having incremented the refcounter. Return NULL + * session and return it after having increment the refcounter. Return NULL * is not found * * Return: matching tp_vars or NULL when no tp_vars was found diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index a9635c882fe0..98a0aaaf0d50 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -301,7 +301,7 @@ void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry) * @vid: VLAN identifier * * Return: the number of originators advertising the given address/data - * (excluding ourself). + * (excluding our self). */ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) @@ -842,7 +842,7 @@ out: * table. In case of success the value is updated with the real amount of * reserved bytes * Allocate the needed amount of memory for the entire TT TVLV and write its - * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data + * header made up of one tvlv_tt_data object and a series of tvlv_tt_vlan_data * objects, one per active VLAN served by the originator node. * * Return: the size of the allocated buffer or 0 in case of failure. @@ -1674,7 +1674,7 @@ out: * the function argument. * If a TT local entry exists for this non-mesh client remove it. * - * The caller must hold orig_node refcount. + * The caller must hold the orig_node refcount. * * Return: true if the new entry has been added, false otherwise */ @@ -1839,7 +1839,7 @@ out: * @bat_priv: the bat priv with all the soft interface information * @tt_global_entry: global translation table entry to be analyzed * - * This functon assumes the caller holds rcu_read_lock(). + * This function assumes the caller holds rcu_read_lock(). * Return: best originator list entry or NULL on errors. */ static struct batadv_tt_orig_list_entry * @@ -1887,7 +1887,7 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv, * @tt_global_entry: global translation table entry to be printed * @seq: debugfs table seq_file struct * - * This functon assumes the caller holds rcu_read_lock(). + * This function assumes the caller holds rcu_read_lock(). */ static void batadv_tt_global_print_entry(struct batadv_priv *bat_priv, diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index 0963a43ad996..6a23a566cde1 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -353,8 +353,8 @@ end: * @tvlv_value: tvlv content * @tvlv_value_len: tvlv content length * - * Return: success if handler was not found or the return value of the handler - * callback. + * Return: success if the handler was not found or the return value of the + * handler callback. */ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv, struct batadv_tvlv_handler *tvlv_handler, diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index d152b8e81f61..cc151e1f23b2 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -455,8 +455,8 @@ struct batadv_orig_node { spinlock_t tt_buff_lock; /** - * @tt_lock: prevents from updating the table while reading it. Table - * update is made up by two operations (data structure update and + * @tt_lock: avoids concurrent read from and write to the table. Table + * update is made up of two operations (data structure update and * metadata -CRC/TTVN-recalculation) and they have to be executed * atomically in order to avoid another thread to read the * table/metadata between those. @@ -748,7 +748,7 @@ struct batadv_neigh_ifinfo { * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression */ struct batadv_bcast_duplist_entry { - /** @orig: mac address of orig node orginating the broadcast */ + /** @orig: mac address of orig node originating the broadcast */ u8 orig[ETH_ALEN]; /** @crc: crc32 checksum of broadcast payload */ @@ -1010,7 +1010,7 @@ struct batadv_priv_tt { /** * @commit_lock: prevents from executing a local TT commit while reading - * the local table. The local TT commit is made up by two operations + * the local table. The local TT commit is made up of two operations * (data structure update and metadata -CRC/TTVN- recalculation) and * they have to be executed atomically in order to avoid another thread * to read the table/metadata between those. @@ -1024,7 +1024,7 @@ struct batadv_priv_tt { #ifdef CONFIG_BATMAN_ADV_BLA /** - * struct batadv_priv_bla - per mesh interface bridge loope avoidance data + * struct batadv_priv_bla - per mesh interface bridge loop avoidance data */ struct batadv_priv_bla { /** @num_requests: number of bla requests in flight */ @@ -1718,7 +1718,7 @@ struct batadv_priv { spinlock_t softif_vlan_list_lock; #ifdef CONFIG_BATMAN_ADV_BLA - /** @bla: bridge loope avoidance data */ + /** @bla: bridge loop avoidance data */ struct batadv_priv_bla bla; #endif -- cgit v1.2.3 From 3bda14d09dc5789a895ab02b7dcfcec19b0a65b3 Mon Sep 17 00:00:00 2001 From: Linus Lüssing Date: Mon, 1 Jun 2020 22:35:22 +0200 Subject: batman-adv: Introduce a configurable per interface hop penalty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some setups multiple hard interfaces with similar link qualities or throughput values are available. But people have expressed the desire to consider one of them as a backup only. Some creative solutions are currently in use: Such people are configuring multiple batman-adv mesh/soft interfaces, wire them together with some veth pairs and then tune the hop penalty to achieve an effect similar to a tunable per interface hop penalty. This patch introduces a new, configurable, per hard interface hop penalty to simplify such setups. Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- include/uapi/linux/batman_adv.h | 3 ++- net/batman-adv/bat_iv_ogm.c | 17 +++++++++-------- net/batman-adv/bat_v_ogm.c | 13 ++++++++++--- net/batman-adv/hard-interface.c | 2 ++ net/batman-adv/netlink.c | 12 +++++++++++- net/batman-adv/types.h | 6 ++++++ 6 files changed, 40 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 8cf2ad11ead9..bb0ae945b36a 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -427,7 +427,8 @@ enum batadv_nl_attrs { /** * @BATADV_ATTR_HOP_PENALTY: defines the penalty which will be applied - * to an originator message's tq-field on every hop. + * to an originator message's tq-field on every hop and/or per + * hard interface */ BATADV_ATTR_HOP_PENALTY, diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 5b3a41983156..a4faf5f904d9 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1075,10 +1075,10 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, struct batadv_neigh_ifinfo *neigh_ifinfo; u8 total_count; u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; + unsigned int tq_iface_hop_penalty = BATADV_TQ_MAX_VALUE; unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; unsigned int tq_asym_penalty, inv_asym_penalty; unsigned int combined_tq; - unsigned int tq_iface_penalty; bool ret = false; /* find corresponding one hop neighbor */ @@ -1157,31 +1157,32 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube; inv_asym_penalty /= neigh_rq_max_cube; tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty; + tq_iface_hop_penalty -= atomic_read(&if_incoming->hop_penalty); /* penalize if the OGM is forwarded on the same interface. WiFi * interfaces and other half duplex devices suffer from throughput * drops as they can't send and receive at the same time. */ - tq_iface_penalty = BATADV_TQ_MAX_VALUE; if (if_outgoing && if_incoming == if_outgoing && batadv_is_wifi_hardif(if_outgoing)) - tq_iface_penalty = batadv_hop_penalty(BATADV_TQ_MAX_VALUE, - bat_priv); + tq_iface_hop_penalty = batadv_hop_penalty(tq_iface_hop_penalty, + bat_priv); combined_tq = batadv_ogm_packet->tq * tq_own * tq_asym_penalty * - tq_iface_penalty; + tq_iface_hop_penalty; combined_tq /= BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE; batadv_ogm_packet->tq = combined_tq; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", + "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_hop_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", orig_node->orig, orig_neigh_node->orig, total_count, - neigh_rq_count, tq_own, tq_asym_penalty, tq_iface_penalty, - batadv_ogm_packet->tq, if_incoming->net_dev->name, + neigh_rq_count, tq_own, tq_asym_penalty, + tq_iface_hop_penalty, batadv_ogm_packet->tq, + if_incoming->net_dev->name, if_outgoing ? if_outgoing->net_dev->name : "DEFAULT"); /* if link has the minimum required transmission quality diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 0d404f7bcd9f..0f8495b9eeb1 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -455,15 +455,17 @@ unlock: * @throughput: the current throughput * * Apply a penalty on the current throughput metric value based on the - * characteristic of the interface where the OGM has been received. The return - * value is computed as follows: + * characteristic of the interface where the OGM has been received. + * + * Initially the per hardif hop penalty is applied to the throughput. After + * that the return value is then computed as follows: * - throughput * 50% if the incoming and outgoing interface are the * same WiFi interface and the throughput is above * 1MBit/s * - throughput if the outgoing interface is the default * interface (i.e. this OGM is processed for the * internal table and not forwarded) - * - throughput * hop penalty otherwise + * - throughput * node hop penalty otherwise * * Return: the penalised throughput metric. */ @@ -472,9 +474,14 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing, u32 throughput) { + int if_hop_penalty = atomic_read(&if_incoming->hop_penalty); int hop_penalty = atomic_read(&bat_priv->hop_penalty); int hop_penalty_max = BATADV_TQ_MAX_VALUE; + /* Apply per hardif hop penalty */ + throughput = throughput * (hop_penalty_max - if_hop_penalty) / + hop_penalty_max; + /* Don't apply hop penalty in default originator table. */ if (if_outgoing == BATADV_IF_DEFAULT) return throughput; diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 53c27c67cc11..fa06b51c0144 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -939,6 +939,8 @@ batadv_hardif_add_interface(struct net_device *net_dev) if (batadv_is_wifi_hardif(hard_iface)) hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; + atomic_set(&hard_iface->hop_penalty, 0); + batadv_v_hardif_init(hard_iface); batadv_check_known_mac_addr(hard_iface->net_dev); diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index cfb00dfa468a..dc193618a761 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -826,6 +826,10 @@ static int batadv_netlink_hardif_fill(struct sk_buff *msg, goto nla_put_failure; } + if (nla_put_u8(msg, BATADV_ATTR_HOP_PENALTY, + atomic_read(&hard_iface->hop_penalty))) + goto nla_put_failure; + #ifdef CONFIG_BATMAN_ADV_BATMAN_V if (nla_put_u32(msg, BATADV_ATTR_ELP_INTERVAL, atomic_read(&hard_iface->bat_v.elp_interval))) @@ -920,9 +924,15 @@ static int batadv_netlink_set_hardif(struct sk_buff *skb, { struct batadv_hard_iface *hard_iface = info->user_ptr[1]; struct batadv_priv *bat_priv = info->user_ptr[0]; + struct nlattr *attr; + + if (info->attrs[BATADV_ATTR_HOP_PENALTY]) { + attr = info->attrs[BATADV_ATTR_HOP_PENALTY]; + + atomic_set(&hard_iface->hop_penalty, nla_get_u8(attr)); + } #ifdef CONFIG_BATMAN_ADV_BATMAN_V - struct nlattr *attr; if (info->attrs[BATADV_ATTR_ELP_INTERVAL]) { attr = info->attrs[BATADV_ATTR_ELP_INTERVAL]; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index cc151e1f23b2..ed519efa3c36 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -208,6 +208,12 @@ struct batadv_hard_iface { /** @rcu: struct used for freeing in an RCU-safe manner */ struct rcu_head rcu; + /** + * @hop_penalty: penalty which will be applied to the tq-field + * of an OGM received via this interface + */ + atomic_t hop_penalty; + /** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */ struct batadv_hard_iface_bat_iv bat_iv; -- cgit v1.2.3 From 5c45a918263e4da142b9cf40a1dfcb4134d454a2 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:08:59 +0200 Subject: net: netdevice.h: add a description for napi_defer_hard_irqs Changeset 6f8b12d661d0 ("net: napi: add hard irqs deferral feature") added a new element at struct net_device. Add a description for it, based on what's described at the changeset which added such feature. Fixes: 6f8b12d661d0 ("net: napi: add hard irqs deferral feature") Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/807a3840e7bc1562adefadb0535c9f47e6ab52e0.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- include/linux/netdevice.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6fc613ed8eae..f3ca52958a17 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1742,6 +1742,8 @@ enum netdev_priv_flags { * @real_num_rx_queues: Number of RX queues currently active in device * @xdp_prog: XDP sockets filter program pointer * @gro_flush_timeout: timeout for GRO layer in NAPI + * @napi_defer_hard_irqs: If not zero, provides a counter that would + * allow to avoid NIC hard IRQ, on busy queues. * * @rx_handler: handler for received packets * @rx_handler_data: XXX: need comments on this one -- cgit v1.2.3 From 5d682f5ec9d1c49e7fe2945e586aa264692859f0 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:09:01 +0200 Subject: net: pylink.h: add kernel-doc descriptions for new fields at phylink_config Some fields were moved from struct phylink into phylink_config. Update the kernel-doc markups for the config struct accordingly Fixes: 5c05c1dbb177 ("net: phylink, dsa: eliminate phylink_fixed_state_cb()") Reviewed-by: Russell King Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/34970f447ff86415a6cef10a785fbef81c2819a7.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- include/linux/phylink.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/phylink.h b/include/linux/phylink.h index cc5b452a184e..02ff1419d4be 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -62,6 +62,10 @@ enum phylink_op_type { * @dev: a pointer to a struct device associated with the MAC * @type: operation type of PHYLINK instance * @pcs_poll: MAC PCS cannot provide link change interrupt + * @poll_fixed_state: if true, starts link_poll, + * if MAC link is at %MLO_AN_FIXED mode. + * @get_fixed_state: callback to execute to determine the fixed link state, + * if MAC link is at %MLO_AN_FIXED mode. */ struct phylink_config { struct device *dev; -- cgit v1.2.3 From 21b9cb34385d93d661a9134adf19eae2bd734218 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:09:03 +0200 Subject: fs: fs.h: fix a kernel-doc parameter description Changeset 3b0311e7ca71 ("vfs: track per-sb writeback errors and report them to syncfs") added a variant of filemap_sample_wb_err(), but it forgot to rename the arguments at the kernel-doc markup. Fix it. Fix those warnings: ./include/linux/fs.h:2845: warning: Function parameter or member 'file' not described in 'file_sample_sb_err' ./include/linux/fs.h:2845: warning: Excess function parameter 'mapping' description in 'file_sample_sb_err' Fixes: 3b0311e7ca71 ("vfs: track per-sb writeback errors and report them to syncfs") Signed-off-by: Mauro Carvalho Chehab Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/7b33bbceb29ac80874622a2bc84127bb10103245.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 6c4ab4dc1cd7..7e17ecc461d5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2829,7 +2829,7 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) /** * file_sample_sb_err - sample the current errseq_t to test for later errors - * @mapping: mapping to be sampled + * @file: file pointer to be sampled * * Grab the most current superblock-level errseq_t value for the given * struct file. -- cgit v1.2.3 From 15d737f8a14e73fcf25f6f797630279a203ce99c Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:09:04 +0200 Subject: kcsan: fix a kernel-doc warning One of the kernel-doc markups there have two "note" sections: ./include/linux/kcsan-checks.h:346: warning: duplicate section name 'Note' While this is not the case here, duplicated sections can cause build issues on Sphinx. So, let's change the notes section to use, instead, a list for those 2 notes at the same function. Signed-off-by: Mauro Carvalho Chehab Acked-by: Marco Elver Link: https://lore.kernel.org/r/20f7995fab2ba85ce723203e9a7c822a55cca2af.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- include/linux/kcsan-checks.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index 7b0b9c44f5f3..c5f6c1dcf7e3 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -337,11 +337,13 @@ static inline void __kcsan_disable_current(void) { } * release_for_reuse(obj); * } * - * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough - * checking if a clear scope where no concurrent accesses are expected exists. + * Note: * - * Note: For cases where the object is freed, `KASAN `_ is a better - * fit to detect use-after-free bugs. + * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent accesses are expected exists. + * + * 2. For cases where the object is freed, `KASAN `_ is a better + * fit to detect use-after-free bugs. * * @var: variable to assert on */ -- cgit v1.2.3 From 985098a05eee6bf5caca7e997e02a5b15242cfa0 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 23 Jun 2020 09:09:10 +0200 Subject: docs: fix references for DMA*.txt files As we moved those files to core-api, fix references to point to their newer locations. Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/37b2fd159fbc7655dbf33b3eb1215396a25f6344.1592895969.git.mchehab+huawei@kernel.org Signed-off-by: Jonathan Corbet --- Documentation/PCI/pci.rst | 6 +++--- Documentation/block/biodoc.rst | 2 +- Documentation/bus-virt-phys-mapping.txt | 2 +- Documentation/core-api/dma-api.rst | 6 +++--- Documentation/core-api/dma-isa-lpc.rst | 2 +- Documentation/driver-api/usb/dma.rst | 6 +++--- Documentation/translations/ko_KR/memory-barriers.txt | 6 +++--- arch/ia64/hp/common/sba_iommu.c | 12 ++++++------ arch/parisc/kernel/pci-dma.c | 2 +- arch/x86/include/asm/dma-mapping.h | 4 ++-- arch/x86/kernel/amd_gart_64.c | 2 +- drivers/parisc/sba_iommu.c | 14 +++++++------- include/linux/dma-mapping.h | 2 +- include/media/videobuf-dma-sg.h | 2 +- kernel/dma/debug.c | 2 +- 15 files changed, 35 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/Documentation/PCI/pci.rst b/Documentation/PCI/pci.rst index 8c016d8c9862..d10d3fe604c5 100644 --- a/Documentation/PCI/pci.rst +++ b/Documentation/PCI/pci.rst @@ -265,7 +265,7 @@ Set the DMA mask size --------------------- .. note:: If anything below doesn't make sense, please refer to - Documentation/DMA-API.txt. This section is just a reminder that + :doc:`/core-api/dma-api`. This section is just a reminder that drivers need to indicate DMA capabilities of the device and is not an authoritative source for DMA interfaces. @@ -291,7 +291,7 @@ Many 64-bit "PCI" devices (before PCI-X) and some PCI-X devices are Setup shared control data ------------------------- Once the DMA masks are set, the driver can allocate "consistent" (a.k.a. shared) -memory. See Documentation/DMA-API.txt for a full description of +memory. See :doc:`/core-api/dma-api` for a full description of the DMA APIs. This section is just a reminder that it needs to be done before enabling DMA on the device. @@ -421,7 +421,7 @@ owners if there is one. Then clean up "consistent" buffers which contain the control data. -See Documentation/DMA-API.txt for details on unmapping interfaces. +See :doc:`/core-api/dma-api` for details on unmapping interfaces. Unregister from other subsystems diff --git a/Documentation/block/biodoc.rst b/Documentation/block/biodoc.rst index b964796ec9c7..ba7f45d0271c 100644 --- a/Documentation/block/biodoc.rst +++ b/Documentation/block/biodoc.rst @@ -196,7 +196,7 @@ a virtual address mapping (unlike the earlier scheme of virtual address do not have a corresponding kernel virtual address space mapping) and low-memory pages. -Note: Please refer to Documentation/DMA-API-HOWTO.txt for a discussion +Note: Please refer to :doc:`/core-api/dma-api-howto` for a discussion on PCI high mem DMA aspects and mapping of scatter gather lists, and support for 64 bit PCI. diff --git a/Documentation/bus-virt-phys-mapping.txt b/Documentation/bus-virt-phys-mapping.txt index 4bb07c2f3e7d..c7bc99cd2e21 100644 --- a/Documentation/bus-virt-phys-mapping.txt +++ b/Documentation/bus-virt-phys-mapping.txt @@ -8,7 +8,7 @@ How to access I/O mapped memory from within device drivers The virt_to_bus() and bus_to_virt() functions have been superseded by the functionality provided by the PCI DMA interface - (see Documentation/DMA-API-HOWTO.txt). They continue + (see :doc:`/core-api/dma-api-howto`). They continue to be documented below for historical purposes, but new code must not use them. --davidm 00/12/12 diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst index 2d8d2fed7317..63b4a2f20867 100644 --- a/Documentation/core-api/dma-api.rst +++ b/Documentation/core-api/dma-api.rst @@ -5,7 +5,7 @@ Dynamic DMA mapping using the generic device :Author: James E.J. Bottomley This document describes the DMA API. For a more gentle introduction -of the API (and actual examples), see Documentation/DMA-API-HOWTO.txt. +of the API (and actual examples), see :doc:`/core-api/dma-api-howto`. This API is split into two pieces. Part I describes the basic API. Part II describes extensions for supporting non-consistent memory @@ -471,7 +471,7 @@ without the _attrs suffixes, except that they pass an optional dma_attrs. The interpretation of DMA attributes is architecture-specific, and -each attribute should be documented in Documentation/DMA-attributes.txt. +each attribute should be documented in :doc:`/core-api/dma-attributes`. If dma_attrs are 0, the semantics of each of these functions is identical to those of the corresponding function @@ -484,7 +484,7 @@ for DMA:: #include /* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and - * documented in Documentation/DMA-attributes.txt */ + * documented in Documentation/core-api/dma-attributes.rst */ ... unsigned long attr; diff --git a/Documentation/core-api/dma-isa-lpc.rst b/Documentation/core-api/dma-isa-lpc.rst index b1ec7b16c21f..e59a3d35a93d 100644 --- a/Documentation/core-api/dma-isa-lpc.rst +++ b/Documentation/core-api/dma-isa-lpc.rst @@ -17,7 +17,7 @@ To do ISA style DMA you need to include two headers:: #include The first is the generic DMA API used to convert virtual addresses to -bus addresses (see Documentation/DMA-API.txt for details). +bus addresses (see :doc:`/core-api/dma-api` for details). The second contains the routines specific to ISA DMA transfers. Since this is not present on all platforms make sure you construct your diff --git a/Documentation/driver-api/usb/dma.rst b/Documentation/driver-api/usb/dma.rst index 59d5aee89e37..2b3dbd3265b4 100644 --- a/Documentation/driver-api/usb/dma.rst +++ b/Documentation/driver-api/usb/dma.rst @@ -10,7 +10,7 @@ API overview The big picture is that USB drivers can continue to ignore most DMA issues, though they still must provide DMA-ready buffers (see -``Documentation/DMA-API-HOWTO.txt``). That's how they've worked through +:doc:`/core-api/dma-api-howto`). That's how they've worked through the 2.4 (and earlier) kernels, or they can now be DMA-aware. DMA-aware usb drivers: @@ -60,7 +60,7 @@ and effects like cache-trashing can impose subtle penalties. force a consistent memory access ordering by using memory barriers. It's not using a streaming DMA mapping, so it's good for small transfers on systems where the I/O would otherwise thrash an IOMMU mapping. (See - ``Documentation/DMA-API-HOWTO.txt`` for definitions of "coherent" and + :doc:`/core-api/dma-api-howto` for definitions of "coherent" and "streaming" DMA mappings.) Asking for 1/Nth of a page (as well as asking for N pages) is reasonably @@ -91,7 +91,7 @@ Working with existing buffers Existing buffers aren't usable for DMA without first being mapped into the DMA address space of the device. However, most buffers passed to your driver can safely be used with such DMA mapping. (See the first section -of Documentation/DMA-API-HOWTO.txt, titled "What memory is DMA-able?") +of :doc:`/core-api/dma-api-howto`, titled "What memory is DMA-able?") - When you're using scatterlists, you can map everything at once. On some systems, this kicks in an IOMMU and turns the scatterlists into single diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index 34d041d68f78..604cee350e53 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -570,8 +570,8 @@ ACQUIRE 는 해당 오퍼레이션의 로드 부분에만 적용되고 RELEASE [*] 버스 마스터링 DMA 와 일관성에 대해서는 다음을 참고하시기 바랍니다: Documentation/driver-api/pci/pci.rst - Documentation/DMA-API-HOWTO.txt - Documentation/DMA-API.txt + Documentation/core-api/dma-api-howto.rst + Documentation/core-api/dma-api.rst 데이터 의존성 배리어 (역사적) @@ -1907,7 +1907,7 @@ Mandatory 배리어들은 SMP 시스템에서도 UP 시스템에서도 SMP 효 writel_relaxed() 와 같은 완화된 I/O 접근자들에 대한 자세한 내용을 위해서는 "커널 I/O 배리어의 효과" 섹션을, consistent memory 에 대한 자세한 내용을 - 위해선 Documentation/DMA-API.txt 문서를 참고하세요. + 위해선 Documentation/core-api/dma-api.rst 문서를 참고하세요. ========================= diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index a806227c1fad..656a4888c300 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -907,7 +907,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dir: dma direction * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static dma_addr_t sba_map_page(struct device *dev, struct page *page, unsigned long poff, size_t size, @@ -1028,7 +1028,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -1105,7 +1105,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void * sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, @@ -1162,7 +1162,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) @@ -1425,7 +1425,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, @@ -1524,7 +1524,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 70cd24bdcfec..4f1596bb1936 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -3,7 +3,7 @@ ** PARISC 1.1 Dynamic DMA mapping support. ** This implementation is for PA-RISC platforms that do not support ** I/O TLBs (aka DMA address translation hardware). -** See Documentation/DMA-API-HOWTO.txt for interface definitions. +** See Documentation/core-api/dma-api-howto.rst for interface definitions. ** ** (c) Copyright 1999,2000 Hewlett-Packard Company ** (c) Copyright 2000 Grant Grundler diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 6b15a24930e0..fed67eafcacc 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -3,8 +3,8 @@ #define _ASM_X86_DMA_MAPPING_H /* - * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and - * Documentation/DMA-API.txt for documentation. + * IOMMU interface. See Documentation/core-api/dma-api-howto.rst and + * Documentation/core-api/dma-api.rst for documentation. */ #include diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 17cb5b933dcf..e89031e9c847 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -6,7 +6,7 @@ * This allows to use PCI devices that only support 32bit addresses on systems * with more than 4GB. * - * See Documentation/DMA-API-HOWTO.txt for the interface specification. + * See Documentation/core-api/dma-api-howto.rst for the interface specification. * * Copyright 2002 Andi Kleen, SuSE Labs. */ diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 7e112829d250..5368452eb5a6 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -666,7 +666,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dev: instance of PCI owned by the driver that's asking * @mask: number of address bits this PCI device can handle * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static int sba_dma_supported( struct device *dev, u64 mask) { @@ -698,7 +698,7 @@ static int sba_dma_supported( struct device *dev, u64 mask) * @size: number of bytes to map in driver buffer. * @direction: R/W or both. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static dma_addr_t sba_map_single(struct device *dev, void *addr, size_t size, @@ -788,7 +788,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset, * @size: number of bytes mapped in driver buffer. * @direction: R/W or both. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, @@ -867,7 +867,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) @@ -898,7 +898,7 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_free(struct device *hwdev, size_t size, void *vaddr, @@ -933,7 +933,7 @@ int dump_run_sg = 0; * @nents: number of entries in list * @direction: R/W or both. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, @@ -1017,7 +1017,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, * @nents: number of entries in list * @direction: R/W or both. * - * See Documentation/DMA-API-HOWTO.txt + * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 78f677cf45ab..ef2b153ddbd9 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -14,7 +14,7 @@ /** * List of possible attributes associated with a DMA mapping. The semantics - * of each attribute should be defined in Documentation/DMA-attributes.txt. + * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. */ /* diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index b89d5e31f172..34450f7ad510 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h @@ -31,7 +31,7 @@ * does memory allocation too using vmalloc_32(). * * videobuf_dma_*() - * see Documentation/DMA-API-HOWTO.txt, these functions to + * see Documentation/core-api/dma-api-howto.rst, these functions to * basically the same. The map function does also build a * scatterlist for the buffer (and unmap frees it ...) * diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 36c962a86bf2..f97f088ace7e 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -1071,7 +1071,7 @@ static void check_unmap(struct dma_debug_entry *ref) /* * Drivers should use dma_mapping_error() to check the returned * addresses of dma_map_single() and dma_map_page(). - * If not, print this warning message. See Documentation/DMA-API.txt. + * If not, print this warning message. See Documentation/core-api/dma-api.rst. */ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { err_printk(ref->dev, entry, -- cgit v1.2.3 From d4cdd146d0db900b2eb6c2d28cba719b3bf0a928 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Fri, 29 May 2020 22:46:20 +0100 Subject: kunit: generalize kunit_resource API beyond allocated resources In its original form, the kunit resources API - consisting the struct kunit_resource and associated functions - was focused on adding allocated resources during test operation that would be automatically cleaned up on test completion. The recent RFC patch proposing converting KASAN tests to KUnit [1] showed another potential model - where outside of test context, but with a pointer to the test state, we wish to access/update test-related data, but expressly want to avoid allocations. It turns out we can generalize the kunit_resource to support static resources where the struct kunit_resource * is passed in and initialized for us. As part of this work, we also change the "allocation" field to the more general "data" name, as instead of associating an allocation, we can associate a pointer to static data. Static data is distinguished by a NULL free functions. A test is added to cover using kunit_add_resource() with a static resource and data. Finally we also make use of the kernel's krefcount interfaces to manage reference counting of KUnit resources. The motivation for this is simple; if we have kernel threads accessing and using resources (say via kunit_find_resource()) we need to ensure we do not remove said resources (or indeed free them if they were dynamically allocated) until the reference count reaches zero. A new function - kunit_put_resource() - is added to handle this, and it should be called after a thread using kunit_find_resource() is finished with the retrieved resource. We ensure that the functions needed to look up, use and drop reference count are "static inline"-defined so that they can be used by builtin code as well as modules in the case that KUnit is built as a module. A cosmetic change here also; I've tried moving to kunit_[action]_resource() as the format of function names for consistency and readability. [1] https://lkml.org/lkml/2020/2/26/1286 Signed-off-by: Alan Maguire Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- include/kunit/test.h | 156 +++++++++++++++++++++++++++++++++++++--------- lib/kunit/kunit-test.c | 74 ++++++++++++++++------ lib/kunit/string-stream.c | 14 ++--- lib/kunit/test.c | 153 ++++++++++++++++++++++++--------------------- 4 files changed, 268 insertions(+), 129 deletions(-) (limited to 'include') diff --git a/include/kunit/test.h b/include/kunit/test.h index 47e61e1d5337..f9b914ebfd75 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -15,6 +15,7 @@ #include #include #include +#include struct kunit_resource; @@ -23,13 +24,19 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); /** * struct kunit_resource - represents a *test managed resource* - * @allocation: for the user to store arbitrary data. + * @data: for the user to store arbitrary data. * @free: a user supplied function to free the resource. Populated by - * kunit_alloc_resource(). + * kunit_resource_alloc(). * * Represents a *test managed resource*, a resource which will automatically be * cleaned up at the end of a test case. * + * Resources are reference counted so if a resource is retrieved via + * kunit_alloc_and_get_resource() or kunit_find_resource(), we need + * to call kunit_put_resource() to reduce the resource reference count + * when finished with it. Note that kunit_alloc_resource() does not require a + * kunit_resource_put() because it does not retrieve the resource itself. + * * Example: * * .. code-block:: c @@ -42,9 +49,9 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); * static int kunit_kmalloc_init(struct kunit_resource *res, void *context) * { * struct kunit_kmalloc_params *params = context; - * res->allocation = kmalloc(params->size, params->gfp); + * res->data = kmalloc(params->size, params->gfp); * - * if (!res->allocation) + * if (!res->data) * return -ENOMEM; * * return 0; @@ -52,30 +59,26 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); * * static void kunit_kmalloc_free(struct kunit_resource *res) * { - * kfree(res->allocation); + * kfree(res->data); * } * * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp) * { * struct kunit_kmalloc_params params; - * struct kunit_resource *res; * * params.size = size; * params.gfp = gfp; * - * res = kunit_alloc_resource(test, kunit_kmalloc_init, + * return kunit_alloc_resource(test, kunit_kmalloc_init, * kunit_kmalloc_free, ¶ms); - * if (res) - * return res->allocation; - * - * return NULL; * } */ struct kunit_resource { - void *allocation; - kunit_resource_free_t free; + void *data; /* private: internal use only. */ + kunit_resource_free_t free; + struct kref refcount; struct list_head node; }; @@ -283,6 +286,64 @@ struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, gfp_t internal_gfp, void *context); +/** + * kunit_get_resource() - Hold resource for use. Should not need to be used + * by most users as we automatically get resources + * retrieved by kunit_find_resource*(). + * @res: resource + */ +static inline void kunit_get_resource(struct kunit_resource *res) +{ + kref_get(&res->refcount); +} + +/* + * Called when refcount reaches zero via kunit_put_resources(); + * should not be called directly. + */ +static inline void kunit_release_resource(struct kref *kref) +{ + struct kunit_resource *res = container_of(kref, struct kunit_resource, + refcount); + + /* If free function is defined, resource was dynamically allocated. */ + if (res->free) { + res->free(res); + kfree(res); + } +} + +/** + * kunit_put_resource() - When caller is done with retrieved resource, + * kunit_put_resource() should be called to drop + * reference count. The resource list maintains + * a reference count on resources, so if no users + * are utilizing a resource and it is removed from + * the resource list, it will be freed via the + * associated free function (if any). Only + * needs to be used if we alloc_and_get() or + * find() resource. + * @res: resource + */ +static inline void kunit_put_resource(struct kunit_resource *res) +{ + kref_put(&res->refcount, kunit_release_resource); +} + +/** + * kunit_add_resource() - Add a *test managed resource*. + * @test: The test context object. + * @init: a user-supplied function to initialize the result (if needed). If + * none is supplied, the resource data value is simply set to @data. + * If an init function is supplied, @data is passed to it instead. + * @free: a user-supplied function to free the resource (if needed). + * @data: value to pass to init function or set in resource data field. + */ +int kunit_add_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + void *data); /** * kunit_alloc_resource() - Allocates a *test managed resource*. * @test: The test context object. @@ -295,7 +356,7 @@ struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, * cleaned up at the end of a test case. See &struct kunit_resource for an * example. * - * NOTE: KUnit needs to allocate memory for each kunit_resource object. You must + * Note: KUnit needs to allocate memory for a kunit_resource object. You must * specify an @internal_gfp that is compatible with the use context of your * resource. */ @@ -307,54 +368,89 @@ static inline void *kunit_alloc_resource(struct kunit *test, { struct kunit_resource *res; - res = kunit_alloc_and_get_resource(test, init, free, internal_gfp, - context); + res = kzalloc(sizeof(*res), internal_gfp); + if (!res) + return NULL; - if (res) - return res->allocation; + if (!kunit_add_resource(test, init, free, res, context)) + return res->data; return NULL; } typedef bool (*kunit_resource_match_t)(struct kunit *test, - const void *res, + struct kunit_resource *res, void *match_data); /** * kunit_resource_instance_match() - Match a resource with the same instance. * @test: Test case to which the resource belongs. - * @res: The data stored in kunit_resource->allocation. + * @res: The resource. * @match_data: The resource pointer to match against. * * An instance of kunit_resource_match_t that matches a resource whose * allocation matches @match_data. */ static inline bool kunit_resource_instance_match(struct kunit *test, - const void *res, + struct kunit_resource *res, void *match_data) { - return res == match_data; + return res->data == match_data; } /** - * kunit_resource_destroy() - Find a kunit_resource and destroy it. + * kunit_find_resource() - Find a resource using match function/data. + * @test: Test case to which the resource belongs. + * @match: match function to be applied to resources/match data. + * @match_data: data to be used in matching. + */ +static inline struct kunit_resource * +kunit_find_resource(struct kunit *test, + kunit_resource_match_t match, + void *match_data) +{ + struct kunit_resource *res, *found = NULL; + + spin_lock(&test->lock); + + list_for_each_entry_reverse(res, &test->resources, node) { + if (match(test, res, (void *)match_data)) { + found = res; + kunit_get_resource(found); + break; + } + } + + spin_unlock(&test->lock); + + return found; +} + +/** + * kunit_destroy_resource() - Find a kunit_resource and destroy it. * @test: Test case to which the resource belongs. * @match: Match function. Returns whether a given resource matches @match_data. - * @free: Must match free on the kunit_resource to free. * @match_data: Data passed into @match. * - * Free the latest kunit_resource of @test for which @free matches the - * kunit_resource_free_t associated with the resource and for which @match - * returns true. - * * RETURNS: * 0 if kunit_resource is found and freed, -ENOENT if not found. */ -int kunit_resource_destroy(struct kunit *test, +int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, - kunit_resource_free_t free, void *match_data); +/** + * kunit_remove_resource: remove resource from resource list associated with + * test. + * @test: The test context object. + * @res: The resource to be removed. + * + * Note that the resource will not be immediately freed since it is likely + * the caller has a reference to it via alloc_and_get() or find(); + * in this case a final call to kunit_put_resource() is required. + */ +void kunit_remove_resource(struct kunit *test, struct kunit_resource *res); + /** * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*. * @test: The test context object. diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 4f3d36a72f8f..03f3ecaa1ef9 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -118,14 +118,14 @@ static int fake_resource_init(struct kunit_resource *res, void *context) { struct kunit_test_resource_context *ctx = context; - res->allocation = &ctx->is_resource_initialized; + res->data = &ctx->is_resource_initialized; ctx->is_resource_initialized = true; return 0; } static void fake_resource_free(struct kunit_resource *res) { - bool *is_resource_initialized = res->allocation; + bool *is_resource_initialized = res->data; *is_resource_initialized = false; } @@ -154,11 +154,21 @@ static void kunit_resource_test_alloc_resource(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res); KUNIT_EXPECT_PTR_EQ(test, &ctx->is_resource_initialized, - (bool *) res->allocation); + (bool *)res->data); KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources)); KUNIT_EXPECT_PTR_EQ(test, free, res->free); + + kunit_put_resource(res); } +/* + * Note: tests below use kunit_alloc_and_get_resource(), so as a consequence + * they have a reference to the associated resource that they must release + * via kunit_put_resource(). In normal operation, users will only + * have to do this for cases where they use kunit_find_resource(), and the + * kunit_alloc_resource() function will be used (which does not take a + * resource reference). + */ static void kunit_resource_test_destroy_resource(struct kunit *test) { struct kunit_test_resource_context *ctx = test->priv; @@ -169,11 +179,12 @@ static void kunit_resource_test_destroy_resource(struct kunit *test) GFP_KERNEL, ctx); + kunit_put_resource(res); + KUNIT_ASSERT_FALSE(test, - kunit_resource_destroy(&ctx->test, + kunit_destroy_resource(&ctx->test, kunit_resource_instance_match, - res->free, - res->allocation)); + res->data)); KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized); KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources)); @@ -191,6 +202,7 @@ static void kunit_resource_test_cleanup_resources(struct kunit *test) fake_resource_free, GFP_KERNEL, ctx); + kunit_put_resource(resources[i]); } kunit_cleanup(&ctx->test); @@ -221,14 +233,14 @@ static int fake_resource_2_init(struct kunit_resource *res, void *context) KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2); - res->allocation = ctx; + res->data = ctx; return 0; } static void fake_resource_2_free(struct kunit_resource *res) { - struct kunit_test_resource_context *ctx = res->allocation; + struct kunit_test_resource_context *ctx = res->data; KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2); } @@ -236,23 +248,26 @@ static void fake_resource_2_free(struct kunit_resource *res) static int fake_resource_1_init(struct kunit_resource *res, void *context) { struct kunit_test_resource_context *ctx = context; + struct kunit_resource *res2; - kunit_alloc_and_get_resource(&ctx->test, - fake_resource_2_init, - fake_resource_2_free, - GFP_KERNEL, - ctx); + res2 = kunit_alloc_and_get_resource(&ctx->test, + fake_resource_2_init, + fake_resource_2_free, + GFP_KERNEL, + ctx); KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1); - res->allocation = ctx; + res->data = ctx; + + kunit_put_resource(res2); return 0; } static void fake_resource_1_free(struct kunit_resource *res) { - struct kunit_test_resource_context *ctx = res->allocation; + struct kunit_test_resource_context *ctx = res->data; KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1); } @@ -265,13 +280,14 @@ static void fake_resource_1_free(struct kunit_resource *res) static void kunit_resource_test_proper_free_ordering(struct kunit *test) { struct kunit_test_resource_context *ctx = test->priv; + struct kunit_resource *res; /* fake_resource_1 allocates a fake_resource_2 in its init. */ - kunit_alloc_and_get_resource(&ctx->test, - fake_resource_1_init, - fake_resource_1_free, - GFP_KERNEL, - ctx); + res = kunit_alloc_and_get_resource(&ctx->test, + fake_resource_1_init, + fake_resource_1_free, + GFP_KERNEL, + ctx); /* * Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER @@ -281,6 +297,8 @@ static void kunit_resource_test_proper_free_ordering(struct kunit *test) KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2); KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1); + kunit_put_resource(res); + kunit_cleanup(&ctx->test); /* @@ -292,6 +310,21 @@ static void kunit_resource_test_proper_free_ordering(struct kunit *test) KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2); } +static void kunit_resource_test_static(struct kunit *test) +{ + struct kunit_test_resource_context ctx; + struct kunit_resource res; + + KUNIT_EXPECT_EQ(test, kunit_add_resource(test, NULL, NULL, &res, &ctx), + 0); + + KUNIT_EXPECT_PTR_EQ(test, res.data, (void *)&ctx); + + kunit_cleanup(test); + + KUNIT_EXPECT_TRUE(test, list_empty(&test->resources)); +} + static int kunit_resource_test_init(struct kunit *test) { struct kunit_test_resource_context *ctx = @@ -320,6 +353,7 @@ static struct kunit_case kunit_resource_test_cases[] = { KUNIT_CASE(kunit_resource_test_destroy_resource), KUNIT_CASE(kunit_resource_test_cleanup_resources), KUNIT_CASE(kunit_resource_test_proper_free_ordering), + KUNIT_CASE(kunit_resource_test_static), {} }; diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c index 350392013c14..141789ca8949 100644 --- a/lib/kunit/string-stream.c +++ b/lib/kunit/string-stream.c @@ -33,14 +33,14 @@ static int string_stream_fragment_init(struct kunit_resource *res, if (!frag->fragment) return -ENOMEM; - res->allocation = frag; + res->data = frag; return 0; } static void string_stream_fragment_free(struct kunit_resource *res) { - struct string_stream_fragment *frag = res->allocation; + struct string_stream_fragment *frag = res->data; list_del(&frag->node); kunit_kfree(frag->test, frag->fragment); @@ -65,9 +65,8 @@ static struct string_stream_fragment *alloc_string_stream_fragment( static int string_stream_fragment_destroy(struct string_stream_fragment *frag) { - return kunit_resource_destroy(frag->test, + return kunit_destroy_resource(frag->test, kunit_resource_instance_match, - string_stream_fragment_free, frag); } @@ -179,7 +178,7 @@ static int string_stream_init(struct kunit_resource *res, void *context) if (!stream) return -ENOMEM; - res->allocation = stream; + res->data = stream; stream->gfp = ctx->gfp; stream->test = ctx->test; INIT_LIST_HEAD(&stream->fragments); @@ -190,7 +189,7 @@ static int string_stream_init(struct kunit_resource *res, void *context) static void string_stream_free(struct kunit_resource *res) { - struct string_stream *stream = res->allocation; + struct string_stream *stream = res->data; string_stream_clear(stream); } @@ -211,8 +210,7 @@ struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp) int string_stream_destroy(struct string_stream *stream) { - return kunit_resource_destroy(stream->test, + return kunit_destroy_resource(stream->test, kunit_resource_instance_match, - string_stream_free, stream); } diff --git a/lib/kunit/test.c b/lib/kunit/test.c index ccb2ffad8dcf..569b7f9f17e4 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -8,6 +8,7 @@ #include #include +#include #include #include "debugfs.h" @@ -406,90 +407,92 @@ void __kunit_test_suites_exit(struct kunit_suite **suites) } EXPORT_SYMBOL_GPL(__kunit_test_suites_exit); -struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, - kunit_resource_init_t init, - kunit_resource_free_t free, - gfp_t internal_gfp, - void *context) +/* + * Used for static resources and when a kunit_resource * has been created by + * kunit_alloc_resource(). When an init function is supplied, @data is passed + * into the init function; otherwise, we simply set the resource data field to + * the data value passed in. + */ +int kunit_add_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + void *data) { - struct kunit_resource *res; - int ret; + int ret = 0; - res = kzalloc(sizeof(*res), internal_gfp); - if (!res) - return NULL; + res->free = free; + kref_init(&res->refcount); - ret = init(res, context); - if (ret) - return NULL; + if (init) { + ret = init(res, data); + if (ret) + return ret; + } else { + res->data = data; + } - res->free = free; spin_lock(&test->lock); list_add_tail(&res->node, &test->resources); + /* refcount for list is established by kref_init() */ spin_unlock(&test->lock); - return res; -} -EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource); - -static void kunit_resource_free(struct kunit *test, struct kunit_resource *res) -{ - res->free(res); - kfree(res); + return ret; } +EXPORT_SYMBOL_GPL(kunit_add_resource); -static struct kunit_resource *kunit_resource_find(struct kunit *test, - kunit_resource_match_t match, - kunit_resource_free_t free, - void *match_data) +struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + gfp_t internal_gfp, + void *data) { - struct kunit_resource *resource; + struct kunit_resource *res; + int ret; - lockdep_assert_held(&test->lock); + res = kzalloc(sizeof(*res), internal_gfp); + if (!res) + return NULL; - list_for_each_entry_reverse(resource, &test->resources, node) { - if (resource->free != free) - continue; - if (match(test, resource->allocation, match_data)) - return resource; + ret = kunit_add_resource(test, init, free, res, data); + if (!ret) { + /* + * bump refcount for get; kunit_resource_put() should be called + * when done. + */ + kunit_get_resource(res); + return res; } - return NULL; } +EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource); -static struct kunit_resource *kunit_resource_remove( - struct kunit *test, - kunit_resource_match_t match, - kunit_resource_free_t free, - void *match_data) +void kunit_remove_resource(struct kunit *test, struct kunit_resource *res) { - struct kunit_resource *resource; - spin_lock(&test->lock); - resource = kunit_resource_find(test, match, free, match_data); - if (resource) - list_del(&resource->node); + list_del(&res->node); spin_unlock(&test->lock); - - return resource; + kunit_put_resource(res); } +EXPORT_SYMBOL_GPL(kunit_remove_resource); -int kunit_resource_destroy(struct kunit *test, - kunit_resource_match_t match, - kunit_resource_free_t free, +int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, void *match_data) { - struct kunit_resource *resource; - - resource = kunit_resource_remove(test, match, free, match_data); + struct kunit_resource *res = kunit_find_resource(test, match, + match_data); - if (!resource) + if (!res) return -ENOENT; - kunit_resource_free(test, resource); + kunit_remove_resource(test, res); + + /* We have a reference also via _find(); drop it. */ + kunit_put_resource(res); + return 0; } -EXPORT_SYMBOL_GPL(kunit_resource_destroy); +EXPORT_SYMBOL_GPL(kunit_destroy_resource); struct kunit_kmalloc_params { size_t size; @@ -500,8 +503,8 @@ static int kunit_kmalloc_init(struct kunit_resource *res, void *context) { struct kunit_kmalloc_params *params = context; - res->allocation = kmalloc(params->size, params->gfp); - if (!res->allocation) + res->data = kmalloc(params->size, params->gfp); + if (!res->data) return -ENOMEM; return 0; @@ -509,7 +512,7 @@ static int kunit_kmalloc_init(struct kunit_resource *res, void *context) static void kunit_kmalloc_free(struct kunit_resource *res) { - kfree(res->allocation); + kfree(res->data); } void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp) @@ -529,20 +532,25 @@ EXPORT_SYMBOL_GPL(kunit_kmalloc); void kunit_kfree(struct kunit *test, const void *ptr) { - int rc; + struct kunit_resource *res; - rc = kunit_resource_destroy(test, - kunit_resource_instance_match, - kunit_kmalloc_free, - (void *)ptr); + res = kunit_find_resource(test, kunit_resource_instance_match, + (void *)ptr); + + /* + * Removing the resource from the list of resources drops the + * reference count to 1; the final put will trigger the free. + */ + kunit_remove_resource(test, res); + + kunit_put_resource(res); - WARN_ON(rc); } EXPORT_SYMBOL_GPL(kunit_kfree); void kunit_cleanup(struct kunit *test) { - struct kunit_resource *resource; + struct kunit_resource *res; /* * test->resources is a stack - each allocation must be freed in the @@ -559,13 +567,16 @@ void kunit_cleanup(struct kunit *test) spin_unlock(&test->lock); break; } - resource = list_last_entry(&test->resources, - struct kunit_resource, - node); - list_del(&resource->node); + res = list_last_entry(&test->resources, + struct kunit_resource, + node); + /* + * Need to unlock here as a resource may remove another + * resource, and this can't happen if the test->lock + * is held. + */ spin_unlock(&test->lock); - - kunit_resource_free(test, resource); + kunit_remove_resource(test, res); } } EXPORT_SYMBOL_GPL(kunit_cleanup); -- cgit v1.2.3 From 725aca9585956676687c4cb803e88f770b0df2b2 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Fri, 29 May 2020 22:46:21 +0100 Subject: kunit: add support for named resources The kunit resources API allows for custom initialization and cleanup code (init/fini); here a new resource add function sets the "struct kunit_resource" "name" field, and calls the standard add function. Having a simple way to name resources is useful in cases such as multithreaded tests where a set of resources are shared among threads; a pointer to the "struct kunit *" test state then is all that is needed to retrieve and use named resources. Support is provided to add, find and destroy named resources; the latter two are simply wrappers that use a "match-by-name" callback. If an attempt to add a resource with a name that already exists is made kunit_add_named_resource() will return -EEXIST. Signed-off-by: Alan Maguire Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- include/kunit/test.h | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++ lib/kunit/kunit-test.c | 37 ++++++++++++++++++++++++++++++++++ lib/kunit/test.c | 24 ++++++++++++++++++++++ 3 files changed, 115 insertions(+) (limited to 'include') diff --git a/include/kunit/test.h b/include/kunit/test.h index f9b914ebfd75..59f3144f009a 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -72,9 +72,15 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *); * return kunit_alloc_resource(test, kunit_kmalloc_init, * kunit_kmalloc_free, ¶ms); * } + * + * Resources can also be named, with lookup/removal done on a name + * basis also. kunit_add_named_resource(), kunit_find_named_resource() + * and kunit_destroy_named_resource(). Resource names must be + * unique within the test instance. */ struct kunit_resource { void *data; + const char *name; /* optional name */ /* private: internal use only. */ kunit_resource_free_t free; @@ -344,6 +350,21 @@ int kunit_add_resource(struct kunit *test, kunit_resource_free_t free, struct kunit_resource *res, void *data); + +/** + * kunit_add_named_resource() - Add a named *test managed resource*. + * @test: The test context object. + * @init: a user-supplied function to initialize the resource data, if needed. + * @free: a user-supplied function to free the resource data, if needed. + * @name_data: name and data to be set for resource. + */ +int kunit_add_named_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + const char *name, + void *data); + /** * kunit_alloc_resource() - Allocates a *test managed resource*. * @test: The test context object. @@ -398,6 +419,19 @@ static inline bool kunit_resource_instance_match(struct kunit *test, return res->data == match_data; } +/** + * kunit_resource_name_match() - Match a resource with the same name. + * @test: Test case to which the resource belongs. + * @res: The resource. + * @match_name: The name to match against. + */ +static inline bool kunit_resource_name_match(struct kunit *test, + struct kunit_resource *res, + void *match_name) +{ + return res->name && strcmp(res->name, match_name) == 0; +} + /** * kunit_find_resource() - Find a resource using match function/data. * @test: Test case to which the resource belongs. @@ -426,6 +460,19 @@ kunit_find_resource(struct kunit *test, return found; } +/** + * kunit_find_named_resource() - Find a resource using match name. + * @test: Test case to which the resource belongs. + * @name: match name. + */ +static inline struct kunit_resource * +kunit_find_named_resource(struct kunit *test, + const char *name) +{ + return kunit_find_resource(test, kunit_resource_name_match, + (void *)name); +} + /** * kunit_destroy_resource() - Find a kunit_resource and destroy it. * @test: Test case to which the resource belongs. @@ -439,6 +486,13 @@ int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, void *match_data); +static inline int kunit_destroy_named_resource(struct kunit *test, + const char *name) +{ + return kunit_destroy_resource(test, kunit_resource_name_match, + (void *)name); +} + /** * kunit_remove_resource: remove resource from resource list associated with * test. diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 03f3ecaa1ef9..69f902440a0e 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -325,6 +325,42 @@ static void kunit_resource_test_static(struct kunit *test) KUNIT_EXPECT_TRUE(test, list_empty(&test->resources)); } +static void kunit_resource_test_named(struct kunit *test) +{ + struct kunit_resource res1, res2, *found = NULL; + struct kunit_test_resource_context ctx; + + KUNIT_EXPECT_EQ(test, + kunit_add_named_resource(test, NULL, NULL, &res1, + "resource_1", &ctx), + 0); + KUNIT_EXPECT_PTR_EQ(test, res1.data, (void *)&ctx); + + KUNIT_EXPECT_EQ(test, + kunit_add_named_resource(test, NULL, NULL, &res1, + "resource_1", &ctx), + -EEXIST); + + KUNIT_EXPECT_EQ(test, + kunit_add_named_resource(test, NULL, NULL, &res2, + "resource_2", &ctx), + 0); + + found = kunit_find_named_resource(test, "resource_1"); + + KUNIT_EXPECT_PTR_EQ(test, found, &res1); + + if (found) + kunit_put_resource(&res1); + + KUNIT_EXPECT_EQ(test, kunit_destroy_named_resource(test, "resource_2"), + 0); + + kunit_cleanup(test); + + KUNIT_EXPECT_TRUE(test, list_empty(&test->resources)); +} + static int kunit_resource_test_init(struct kunit *test) { struct kunit_test_resource_context *ctx = @@ -354,6 +390,7 @@ static struct kunit_case kunit_resource_test_cases[] = { KUNIT_CASE(kunit_resource_test_cleanup_resources), KUNIT_CASE(kunit_resource_test_proper_free_ordering), KUNIT_CASE(kunit_resource_test_static), + KUNIT_CASE(kunit_resource_test_named), {} }; diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 569b7f9f17e4..c36037200310 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -441,6 +441,30 @@ int kunit_add_resource(struct kunit *test, } EXPORT_SYMBOL_GPL(kunit_add_resource); +int kunit_add_named_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + const char *name, + void *data) +{ + struct kunit_resource *existing; + + if (!name) + return -EINVAL; + + existing = kunit_find_named_resource(test, name); + if (existing) { + kunit_put_resource(existing); + return -EEXIST; + } + + res->name = name; + + return kunit_add_resource(test, init, free, res, data); +} +EXPORT_SYMBOL_GPL(kunit_add_named_resource); + struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, kunit_resource_init_t init, kunit_resource_free_t free, -- cgit v1.2.3 From 333740981f94fa80326cc8e5d2da105f17bc1dd5 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 26 Jun 2020 17:53:23 +0200 Subject: net: mdio: add a forward declaration for reset_control to mdio.h This header refers to struct reset_control but doesn't include any reset header. The structure definition is probably somehow indirectly pulled in since no warnings are reported but for the sake of correctness add the forward declaration for struct reset_control. Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/mdio.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 36d2e0673d03..898cbf00332a 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -18,6 +18,7 @@ struct gpio_desc; struct mii_bus; +struct reset_control; /* Multiple levels of nesting are possible. However typically this is * limited to nested DSA like layer, a MUX layer, and the normal -- cgit v1.2.3 From adf4f9d49c74a812757c5c67879ece0e54b75417 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 7 Jun 2020 23:51:23 +0200 Subject: irqchip/vic: Drop cascaded intialization call We got rid of the last user of the cascaded intialization from board files so drop this API. Signed-off-by: Linus Walleij Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200607215124.48638-1-linus.walleij@linaro.org --- drivers/irqchip/irq-vic.c | 21 --------------------- include/linux/irqchip/arm-vic.h | 2 -- 2 files changed, 23 deletions(-) (limited to 'include') diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 3c87d925f74c..927ff2c1bf67 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -481,27 +481,6 @@ void __init vic_init(void __iomem *base, unsigned int irq_start, __vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL); } -/** - * vic_init_cascaded() - initialise a cascaded vectored interrupt controller - * @base: iomem base address - * @parent_irq: the parent IRQ we're cascaded off - * @vic_sources: bitmask of interrupt sources to allow - * @resume_sources: bitmask of interrupt sources to allow for resume - * - * This returns the base for the new interrupts or negative on error. - */ -int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq, - u32 vic_sources, u32 resume_sources) -{ - struct vic_device *v; - - v = &vic_devices[vic_id]; - __vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL); - /* Return out acquired base */ - return v->irq; -} -EXPORT_SYMBOL_GPL(vic_init_cascaded); - #ifdef CONFIG_OF static int __init vic_of_init(struct device_node *node, struct device_node *parent) diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h index a158b97242c7..2a4b6a5d8522 100644 --- a/include/linux/irqchip/arm-vic.h +++ b/include/linux/irqchip/arm-vic.h @@ -19,7 +19,5 @@ struct pt_regs; void __vic_init(void __iomem *base, int parent_irq, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); -int vic_init_cascaded(void __iomem *base, unsigned int parent_irq, - u32 vic_sources, u32 resume_sources); #endif -- cgit v1.2.3 From b0b92ab6a86e59779c2b17c5f611b04120fdfbb6 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 7 Jun 2020 23:51:24 +0200 Subject: irqchip/vic: Cut down the external API There are registers and functions in the header file that are only used inside the driver. Move these into the driver. Signed-off-by: Linus Walleij Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200607215124.48638-2-linus.walleij@linaro.org --- drivers/irqchip/irq-vic.c | 5 ++++- include/linux/irqchip/arm-vic.h | 9 --------- 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 927ff2c1bf67..bc235db8a4c5 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -27,7 +27,10 @@ #define VIC_IRQ_STATUS 0x00 #define VIC_FIQ_STATUS 0x04 +#define VIC_RAW_STATUS 0x08 #define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */ +#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ +#define VIC_INT_ENABLE_CLEAR 0x14 #define VIC_INT_SOFT 0x18 #define VIC_INT_SOFT_CLEAR 0x1c #define VIC_PROTECT 0x20 @@ -428,7 +431,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, vic_register(base, 0, irq_start, vic_sources, 0, node); } -void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, +static void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node) { diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h index 2a4b6a5d8522..f2b11d1df23d 100644 --- a/include/linux/irqchip/arm-vic.h +++ b/include/linux/irqchip/arm-vic.h @@ -9,15 +9,6 @@ #include -#define VIC_RAW_STATUS 0x08 -#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ -#define VIC_INT_ENABLE_CLEAR 0x14 - -struct device_node; -struct pt_regs; - -void __vic_init(void __iomem *base, int parent_irq, int irq_start, - u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); #endif -- cgit v1.2.3 From 89778093d38d547cd80f6097659d1cf1c2dd4d9d Mon Sep 17 00:00:00 2001 From: Oscar Carter Date: Sat, 30 May 2020 16:34:28 +0200 Subject: drivers/acpi: Add new macro ACPI_DECLARE_SUBTABLE_PROBE_ENTRY In an effort to enable -Wcast-function-type in the top-level Makefile to support Control Flow Integrity builds, there are the need to remove all the function callback casts. To do this, create a new macro called ACPI_DECLARE_SUBTABLE_PROBE_ENTRY to initialize the acpi_probe_entry struct using the probe_subtbl field instead of the probe_table field. This is a previous work to be able to modify the IRQCHIP_ACPI_DECLARE macro to use this new defined macro. Even though these two commented fields are part of a union, this is necessary to avoid function cast mismatches. That is, due to the IRQCHIP_ACPI_DECLARE invocations use as last parameter a function with the protoype "int (*func)(struct acpi_subtable_header *, const unsigned long)" it's necessary that this macro initialize the probe_subtbl field of the acpi_probe_entry struct and not the probe_table field. Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Oscar Carter Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20200530143430.5203-2-oscar.carter@gmx.com --- include/linux/acpi.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d661cd0ee64d..cf74e044a570 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1154,6 +1154,17 @@ struct acpi_probe_entry { .driver_data = data, \ } +#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ + subtable, valid, data, fn) \ + static const struct acpi_probe_entry __acpi_probe_##name \ + __used __section(__##table##_acpi_probe_table) = { \ + .id = table_id, \ + .type = subtable, \ + .subtable_valid = valid, \ + .probe_subtbl = fn, \ + .driver_data = data, \ + } + #define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table #define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end -- cgit v1.2.3 From aba3c7ed3fcf74524b7072615028827d5e5750d7 Mon Sep 17 00:00:00 2001 From: Oscar Carter Date: Sat, 30 May 2020 16:34:29 +0200 Subject: drivers/irqchip: Use new macro ACPI_DECLARE_SUBTABLE_PROBE_ENTRY In an effort to enable -Wcast-function-type in the top-level Makefile to support Control Flow Integrity builds, there are the need to remove all the function callback casts. To do this, modify the IRQCHIP_ACPI_DECLARE macro to use the new defined macro ACPI_DECLARE_SUBTABLE_PROBE_ENTRY instead of the macro ACPI_DECLARE_PROBE_ENTRY. This is necessary to be able to initialize the the acpi_probe_entry struct using the probe_subtbl field instead of the probe_table field and avoid function cast mismatches. Also, modify the prototype of the functions used by the invocation of the IRQCHIP_ACPI_DECLARE macro to match all the parameters. Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Oscar Carter Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20200530143430.5203-3-oscar.carter@gmx.com --- drivers/irqchip/irq-gic-v3.c | 2 +- drivers/irqchip/irq-gic.c | 2 +- include/linux/irqchip.h | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index cc46bc2d634b..324f280ff606 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -2116,7 +2116,7 @@ static void __init gic_acpi_setup_kvm_info(void) } static int __init -gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) +gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; struct fwnode_handle *domain_handle; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 00de05abd3c3..71bd64884ae5 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1592,7 +1592,7 @@ static void __init gic_acpi_setup_kvm_info(void) gic_set_kvm_info(&gic_v2_kvm_info); } -static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, +static int __init gic_v2_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_generic_distributor *dist; diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 950e4b2458f0..447f22880a69 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -39,8 +39,9 @@ * @fn: initialization function */ #define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \ - ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \ - subtable, validate, data, fn) + ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(irqchip, name, \ + ACPI_SIG_MADT, subtable, \ + validate, data, fn) #ifdef CONFIG_IRQCHIP void irqchip_init(void); -- cgit v1.2.3 From 8ebf642f3d809b59f57d0d408189a2218294e269 Mon Sep 17 00:00:00 2001 From: Oscar Carter Date: Sat, 30 May 2020 16:34:30 +0200 Subject: drivers/acpi: Remove function cast Remove the function cast in the ACPI_DECLARE_PROBE_ENTRY macro to ensure that the functions passed as a last parameter to this macro have the right prototype. This is an effort to enable -Wcast-function-type in the top-level Makefile to support Control Flow Integrity builds. Suggested-by: Marc Zyngier Signed-off-by: Oscar Carter Signed-off-by: Marc Zyngier Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20200530143430.5203-4-oscar.carter@gmx.com --- include/linux/acpi.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index cf74e044a570..1cda2d32e4c4 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1143,16 +1143,16 @@ struct acpi_probe_entry { kernel_ulong_t driver_data; }; -#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ + valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ - __used __section(__##table##_acpi_probe_table) \ - = { \ + __used __section(__##table##_acpi_probe_table) = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ - .probe_table = (acpi_tbl_table_handler)fn, \ - .driver_data = data, \ - } + .probe_table = fn, \ + .driver_data = data, \ + } #define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ subtable, valid, data, fn) \ -- cgit v1.2.3 From 167cbce27444be3203081b97ea65178c4088b062 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 10 Jun 2020 17:51:21 +0200 Subject: serial: core: drop unnecessary gpio include Drop the recently added gpio include from the serial-core header in favour of a forward declaration and instead include the gpio header only where needed. Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20200610155121.14014-1-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_port.c | 1 + drivers/tty/serial/serial_core.c | 1 + include/linux/serial_core.h | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 1632f7d25acc..d64ca77d9cfa 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 57840cf90388..5750f3628357 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 9fd550e7946a..653c653ad0c2 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -30,6 +29,7 @@ struct uart_port; struct serial_struct; struct device; +struct gpio_desc; /* * This structure describes all the operations that can be done on the -- cgit v1.2.3 From 9205d7b1c1cffa827c23bdbf35e04c7cbe1e1f10 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 25 Jun 2020 22:59:41 -0700 Subject: net/mlx5: Avoid RDMA file inclusion in core driver mlx5 cq.h does not depend on RDMA verbs. Remove RDMA verbs file inclusion. Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- include/linux/mlx5/cq.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index b5a9399e07ee..7bfb67363434 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -33,7 +33,6 @@ #ifndef MLX5_CORE_CQ_H #define MLX5_CORE_CQ_H -#include #include #include -- cgit v1.2.3 From 2d1b69ed65ee033aa541518cc9f6a815296ac493 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 25 Jun 2020 22:59:43 -0700 Subject: net/mlx5: kTLS, Improve TLS params layout structures Add explicit WQE segment structures for the TLS static and progress params. According to the HW spec, TISN is not part of the progress params context, take it out of it. Rename the control segment tisn field as it could hold either a TIS or a TIR number. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 14 +++++++++----- .../net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c | 2 +- include/linux/mlx5/device.h | 9 +++++++++ include/linux/mlx5/mlx5_ifc.h | 5 +---- include/linux/mlx5/qp.h | 2 +- 7 files changed, 23 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index bfd3e1161bc6..31cac239563d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -182,7 +182,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg) { - return cseg && !!cseg->tisn; + return cseg && !!cseg->tis_tir_num; } static inline u8 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index c6180892cfcb..806ed185dd4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -19,7 +19,7 @@ #define MLX5E_KTLS_PROGRESS_WQE_SZ \ (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \ - MLX5_ST_SZ_BYTES(tls_progress_params)) + sizeof(struct mlx5_wqe_tls_progress_params_seg)) #define MLX5E_KTLS_PROGRESS_WQEBBS \ (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 3cd78d9503c1..ad7300f19815 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -64,7 +64,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | STATIC_PARAMS_DS_CNT); cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - cseg->tisn = cpu_to_be32(priv_tx->tisn << 8); + cseg->tis_tir_num = cpu_to_be32(priv_tx->tisn << 8); ucseg->flags = MLX5_UMR_INLINE; ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); @@ -75,10 +75,14 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, static void fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) { - MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn); - MLX5_SET(tls_progress_params, ctx, record_tracker_state, + struct mlx5_wqe_tls_progress_params_seg *params; + + params = ctx; + + params->tis_tir_num = cpu_to_be32(priv_tx->tisn); + MLX5_SET(tls_progress_params, params->ctx, record_tracker_state, MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); - MLX5_SET(tls_progress_params, ctx, auth_state, + MLX5_SET(tls_progress_params, params->ctx, auth_state, MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD); } @@ -284,7 +288,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - cseg->tisn = cpu_to_be32(tisn << 8); + cseg->tis_tir_num = cpu_to_be32(tisn << 8); cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; fsz = skb_frag_size(frag); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 05454a843b28..72d26fbc8d5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -305,7 +305,7 @@ err_out: void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, struct mlx5e_accel_tx_tls_state *state) { - cseg->tisn = cpu_to_be32(state->tls_tisn << 8); + cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); } static int tls_update_resync_sn(struct net_device *netdev, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1bc27aca648b..57db125e5802 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -458,6 +458,15 @@ enum { MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2, }; +struct mlx5_wqe_tls_static_params_seg { + u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)]; +}; + +struct mlx5_wqe_tls_progress_params_seg { + __be32 tis_tir_num; + u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)]; +}; + enum { MLX5_SET_PORT_RESET_QKEY = 0, MLX5_SET_PORT_GUID0 = 16, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 116bd9bb347f..a227518c70cf 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -10638,16 +10638,13 @@ struct mlx5_ifc_tls_static_params_bits { }; struct mlx5_ifc_tls_progress_params_bits { - u8 reserved_at_0[0x8]; - u8 tisn[0x18]; - u8 next_record_tcp_sn[0x20]; u8 hw_resync_tcp_sn[0x20]; u8 record_tracker_state[0x2]; u8 auth_state[0x2]; - u8 reserved_at_64[0x4]; + u8 reserved_at_44[0x4]; u8 hw_offset_record_number[0x18]; }; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index b8992b861ae6..36492a1342cf 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -209,7 +209,7 @@ struct mlx5_wqe_ctrl_seg { __be32 general_id; __be32 imm; __be32 umr_mkey; - __be32 tisn; + __be32 tis_tir_num; }; }; -- cgit v1.2.3 From acb5a07aaf2723cd273a4089e62611a414fb1c35 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Mon, 8 Jun 2020 12:42:52 +0300 Subject: Revert "net/tls: Add force_resync for driver resync" This reverts commit b3ae2459f89773adcbf16fef4b68deaaa3be1929. Revert the force resync API. Not in use. To be replaced by a better async resync API downstream. Signed-off-by: Boris Pismenny Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- include/net/tls.h | 12 +----------- net/tls/tls_device.c | 9 +++------ 2 files changed, 4 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/tls.h b/include/net/tls.h index 3212d3c214a9..ca5f7f437289 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -607,22 +607,12 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) #endif /* The TLS context is valid until sk_destruct is called */ -#define RESYNC_REQ (1 << 0) -#define RESYNC_REQ_FORCE (1 << 1) static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); -} - -static inline void tls_offload_rx_force_resync_request(struct sock *sk) -{ - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - - atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE); + atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); } static inline void diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 0e55f8365ce2..a562ebaaa33c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -694,11 +694,10 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx; - bool is_req_pending, is_force_resync; u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; + u32 sock_data, is_req_pending; struct tls_prot_info *prot; s64 resync_req; - u32 sock_data; u32 req_seq; if (tls_ctx->rx_conf != TLS_HW) @@ -713,11 +712,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) resync_req = atomic64_read(&rx_ctx->resync_req); req_seq = resync_req >> 32; seq += TLS_HEADER_SIZE - 1; - is_req_pending = resync_req & RESYNC_REQ; - is_force_resync = resync_req & RESYNC_REQ_FORCE; + is_req_pending = resync_req; - if (likely(!is_req_pending) || - (!is_force_resync && req_seq != seq) || + if (likely(!is_req_pending) || req_seq != seq || !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) return; break; -- cgit v1.2.3 From ed9b7646b06a2ed2450dd9437fc7d1ad2783140c Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Mon, 8 Jun 2020 19:11:38 +0300 Subject: net/tls: Add asynchronous resync This patch adds support for asynchronous resynchronization in tls_device. Async resync follows two distinct stages: 1. The NIC driver indicates that it would like to resync on some TLS record within the received packet (P), but the driver does not know (yet) which of the TLS records within the packet. At this stage, the NIC driver will query the device to find the exact TCP sequence for resync (tcpsn), however, the driver does not wait for the device to provide the response. 2. Eventually, the device responds, and the driver provides the tcpsn within the resync packet to KTLS. Now, KTLS can check the tcpsn against any processed TLS records within packet P, and also against any record that is processed in the future within packet P. The asynchronous resync path simplifies the device driver, as it can save bits on the packet completion (32-bit TCP sequence), and pass this information on an asynchronous command instead. Signed-off-by: Boris Pismenny Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- include/net/tls.h | 38 +++++++++++++++++++++++++++++++++++++- net/tls/tls_device.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/tls.h b/include/net/tls.h index ca5f7f437289..c875c0a445a6 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -291,11 +291,19 @@ struct tlsdev_ops { enum tls_offload_sync_type { TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, + TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2, }; #define TLS_DEVICE_RESYNC_NH_START_IVAL 2 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 +#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13 +struct tls_offload_resync_async { + atomic64_t req; + u32 loglen; + u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; +}; + struct tls_offload_context_rx { /* sw must be the first member of tls_offload_context_rx */ struct tls_sw_context_rx sw; @@ -314,6 +322,10 @@ struct tls_offload_context_rx { u32 decrypted_failed; u32 decrypted_tgt; } resync_nh; + /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */ + struct { + struct tls_offload_resync_async *resync_async; + }; }; u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state @@ -606,13 +618,37 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) } #endif +#define RESYNC_REQ BIT(0) +#define RESYNC_REQ_ASYNC BIT(1) /* The TLS context is valid until sk_destruct is called */ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); + atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); +} + +/* Log all TLS record header TCP sequences in [seq, seq+len] */ +static inline void +tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + + atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | + (len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); + rx_ctx->resync_async->loglen = 0; +} + +static inline void +tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + + atomic64_set(&rx_ctx->resync_async->req, + ((u64)ntohl(seq) << 32) | RESYNC_REQ); } static inline void diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a562ebaaa33c..18fa6067bb7f 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -690,6 +690,47 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx, TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); } +static bool +tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, + s64 resync_req, u32 *seq) +{ + u32 is_async = resync_req & RESYNC_REQ_ASYNC; + u32 req_seq = resync_req >> 32; + u32 req_end = req_seq + ((resync_req >> 16) & 0xffff); + + if (is_async) { + /* asynchronous stage: log all headers seq such that + * req_seq <= seq <= end_seq, and wait for real resync request + */ + if (between(*seq, req_seq, req_end) && + resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) + resync_async->log[resync_async->loglen++] = *seq; + + return false; + } + + /* synchronous stage: check against the logged entries and + * proceed to check the next entries if no match was found + */ + while (resync_async->loglen) { + if (req_seq == resync_async->log[resync_async->loglen - 1] && + atomic64_try_cmpxchg(&resync_async->req, + &resync_req, 0)) { + resync_async->loglen = 0; + *seq = req_seq; + return true; + } + resync_async->loglen--; + } + + if (req_seq == *seq && + atomic64_try_cmpxchg(&resync_async->req, + &resync_req, 0)) + return true; + + return false; +} + void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); @@ -736,6 +777,16 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) seq += rcd_len; tls_bigint_increment(rcd_sn, prot->rec_seq_size); break; + case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC: + resync_req = atomic64_read(&rx_ctx->resync_async->req); + is_req_pending = resync_req; + if (likely(!is_req_pending)) + return; + + if (!tls_device_rx_resync_async(rx_ctx->resync_async, + resync_req, &seq)) + return; + break; } tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn); -- cgit v1.2.3 From 322b598be4d9b9090cda560c4caab78704615ab4 Mon Sep 17 00:00:00 2001 From: Xu Yilun Date: Tue, 16 Jun 2020 12:08:44 +0800 Subject: fpga: dfl: introduce interrupt trigger setting API FPGA user applications may be interested in interrupts generated by DFL features. For example, users can implement their own FPGA logics with interrupts enabled in AFU (Accelerated Function Unit, dynamic region of DFL based FPGA). So user applications need to be notified to handle these interrupts. In order to allow userspace applications to monitor interrupts, driver requires userspace to provide eventfds as interrupt notification channels. Applications then poll/select on the eventfds to get notified. This patch introduces a generic helper functions to do eventfds binding with given interrupts. Sub feature drivers are expected to use XXX_GET_IRQ_NUM to query irq info, and XXX_SET_IRQ to set eventfds for interrupts. This patch also introduces helper functions for these 2 ioctls. Signed-off-by: Luwei Kang Signed-off-by: Wu Hao Signed-off-by: Xu Yilun Signed-off-by: Tom Rix Reviewed-by: Marcelo Tosatti Acked-by: Wu Hao Signed-off-by: Moritz Fischer --- drivers/fpga/dfl.c | 157 ++++++++++++++++++++++++++++++++++++++++++ drivers/fpga/dfl.h | 16 +++++ include/uapi/linux/fpga-dfl.h | 13 ++++ 3 files changed, 186 insertions(+) (limited to 'include') diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index d915331fb52c..649958a36e62 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -10,7 +10,9 @@ * Wu Hao * Xiao Guangrong */ +#include #include +#include #include "dfl.h" @@ -533,6 +535,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) unsigned int i; /* save resource information for each feature */ + feature->dev = fdev; feature->id = finfo->fid; feature->resource_index = index; feature->ioaddr = finfo->ioaddr; @@ -1393,6 +1396,160 @@ done: } EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf); +static irqreturn_t dfl_irq_handler(int irq, void *arg) +{ + struct eventfd_ctx *trigger = arg; + + eventfd_signal(trigger, 1); + return IRQ_HANDLED; +} + +static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx, + int fd) +{ + struct platform_device *pdev = feature->dev; + struct eventfd_ctx *trigger; + int irq, ret; + + irq = feature->irq_ctx[idx].irq; + + if (feature->irq_ctx[idx].trigger) { + free_irq(irq, feature->irq_ctx[idx].trigger); + kfree(feature->irq_ctx[idx].name); + eventfd_ctx_put(feature->irq_ctx[idx].trigger); + feature->irq_ctx[idx].trigger = NULL; + } + + if (fd < 0) + return 0; + + feature->irq_ctx[idx].name = + kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%llx)", idx, + dev_name(&pdev->dev), feature->id); + if (!feature->irq_ctx[idx].name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + ret = PTR_ERR(trigger); + goto free_name; + } + + ret = request_irq(irq, dfl_irq_handler, 0, + feature->irq_ctx[idx].name, trigger); + if (!ret) { + feature->irq_ctx[idx].trigger = trigger; + return ret; + } + + eventfd_ctx_put(trigger); +free_name: + kfree(feature->irq_ctx[idx].name); + + return ret; +} + +/** + * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts + * + * @feature: dfl sub feature. + * @start: start of irq index in this dfl sub feature. + * @count: number of irqs. + * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative. + * unbind "count" specified number of irqs if fds ptr is NULL. + * + * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if + * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is + * NULL. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start, + unsigned int count, int32_t *fds) +{ + unsigned int i; + int ret = 0; + + /* overflow */ + if (unlikely(start + count < start)) + return -EINVAL; + + /* exceeds nr_irqs */ + if (start + count > feature->nr_irqs) + return -EINVAL; + + for (i = 0; i < count; i++) { + int fd = fds ? fds[i] : -1; + + ret = do_set_irq_trigger(feature, start + i, fd); + if (ret) { + while (i--) + do_set_irq_trigger(feature, start + i, -1); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers); + +/** + * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface. + * @pdev: the feature device which has the sub feature + * @feature: the dfl sub feature + * @arg: ioctl argument + * + * Return: 0 on success, negative error code otherwise. + */ +long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg) +{ + return put_user(feature->nr_irqs, (__u32 __user *)arg); +} +EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs); + +/** + * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface. + * @pdev: the feature device which has the sub feature + * @feature: the dfl sub feature + * @arg: ioctl argument + * + * Return: 0 on success, negative error code otherwise. + */ +long dfl_feature_ioctl_set_irq(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_fpga_irq_set hdr; + s32 *fds; + long ret; + + if (!feature->nr_irqs) + return -ENOENT; + + if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr))) + return -EFAULT; + + if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) || + (hdr.start + hdr.count < hdr.start)) + return -EINVAL; + + fds = memdup_user((void __user *)(arg + sizeof(hdr)), + hdr.count * sizeof(s32)); + if (IS_ERR(fds)) + return PTR_ERR(fds); + + mutex_lock(&pdata->lock); + ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds); + mutex_unlock(&pdata->lock); + + kfree(fds); + return ret; +} +EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq); + static void __exit dfl_fpga_exit(void) { dfl_chardev_uinit(); diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index e1f3ab86560e..a32dfba2a88b 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -214,14 +215,19 @@ struct dfl_feature_driver { * struct dfl_feature_irq_ctx - dfl private feature interrupt context * * @irq: Linux IRQ number of this interrupt. + * @trigger: eventfd context to signal when interrupt happens. + * @name: irq name needed when requesting irq. */ struct dfl_feature_irq_ctx { int irq; + struct eventfd_ctx *trigger; + char *name; }; /** * struct dfl_feature - sub feature of the feature devices * + * @dev: ptr to pdev of the feature device which has the sub feature. * @id: sub feature id. * @resource_index: each sub feature has one mmio resource for its registers. * this index is used to find its mmio resource from the @@ -233,6 +239,7 @@ struct dfl_feature_irq_ctx { * @priv: priv data of this feature. */ struct dfl_feature { + struct platform_device *dev; u64 id; int resource_index; void __iomem *ioaddr; @@ -503,4 +510,13 @@ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id); int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id); void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev); int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf); +int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start, + unsigned int count, int32_t *fds); +long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg); +long dfl_feature_ioctl_set_irq(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg); + #endif /* __FPGA_DFL_H */ diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h index ec70a0746e59..7331350f3067 100644 --- a/include/uapi/linux/fpga-dfl.h +++ b/include/uapi/linux/fpga-dfl.h @@ -151,6 +151,19 @@ struct dfl_fpga_port_dma_unmap { #define DFL_FPGA_PORT_DMA_UNMAP _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 4) +/** + * struct dfl_fpga_irq_set - the argument for DFL_FPGA_XXX_SET_IRQ ioctl. + * + * @start: Index of the first irq. + * @count: The number of eventfd handler. + * @evtfds: Eventfd handlers. + */ +struct dfl_fpga_irq_set { + __u32 start; + __u32 count; + __s32 evtfds[]; +}; + /* IOCTLs for FME file descriptor */ /** -- cgit v1.2.3 From fe80536acf8397827be77f9b8ada384b90e790d0 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 28 Jun 2020 23:18:23 +0530 Subject: bareudp: Added attribute to enable & disable rx metadata collection Metadata need not be collected in receive if the packet from bareudp device is not targeted to openvswitch. Signed-off-by: Martin Signed-off-by: David S. Miller --- Documentation/networking/bareudp.rst | 6 ++++-- drivers/net/bareudp.c | 23 +++++++++++++++++------ include/net/bareudp.h | 1 + include/uapi/linux/if_link.h | 1 + 4 files changed, 23 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst index 465a8b251bfe..0e00636d8d74 100644 --- a/Documentation/networking/bareudp.rst +++ b/Documentation/networking/bareudp.rst @@ -48,5 +48,7 @@ enabled. The bareudp device could be used along with OVS or flower filter in TC. The OVS or TC flower layer must set the tunnel information in SKB dst field before sending packet buffer to the bareudp device for transmission. On reception the -bareudp device extracts and stores the tunnel information in SKB dst field before -passing the packet buffer to the network stack. +bareudp device decapsulates the udp header and passes the inner packet to the +network stack. If RX_COLLECT_METADATA flag is enabled in the device the tunnel +information will be stored in the SKB dst field before the packet buffer is +passed to the network stack. diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 3dd46cd55114..108a8cafc4f8 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -46,6 +46,7 @@ struct bareudp_dev { __be16 port; u16 sport_min; bool multi_proto_mode; + bool rx_collect_metadata; struct socket __rcu *sock; struct list_head next; /* bareudp node on namespace list */ struct gro_cells gro_cells; @@ -125,13 +126,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) bareudp->dev->stats.rx_dropped++; goto drop; } - - tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); - if (!tun_dst) { - bareudp->dev->stats.rx_dropped++; - goto drop; + if (bareudp->rx_collect_metadata) { + tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); + if (!tun_dst) { + bareudp->dev->stats.rx_dropped++; + goto drop; + } + skb_dst_set(skb, &tun_dst->dst); } - skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; oiph = skb_network_header(skb); skb_reset_network_header(skb); @@ -575,6 +577,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) conf->multi_proto_mode = true; + if (data[IFLA_BAREUDP_RX_COLLECT_METADATA]) + conf->rx_collect_metadata = true; + return 0; } @@ -612,6 +617,8 @@ static int bareudp_configure(struct net *net, struct net_device *dev, bareudp->ethertype = conf->ethertype; bareudp->sport_min = conf->sport_min; bareudp->multi_proto_mode = conf->multi_proto_mode; + bareudp->rx_collect_metadata = conf->rx_collect_metadata; + err = register_netdevice(dev); if (err) return err; @@ -669,6 +676,7 @@ static size_t bareudp_get_size(const struct net_device *dev) nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */ + nla_total_size(0) + /* IFLA_BAREUDP_RX_COLLECT_METADATA */ 0; } @@ -685,6 +693,9 @@ static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) if (bareudp->multi_proto_mode && nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE)) goto nla_put_failure; + if (bareudp->rx_collect_metadata && + nla_put_flag(skb, IFLA_BAREUDP_RX_COLLECT_METADATA)) + goto nla_put_failure; return 0; diff --git a/include/net/bareudp.h b/include/net/bareudp.h index dc65a0d71d9b..3dd5f9a8d01c 100644 --- a/include/net/bareudp.h +++ b/include/net/bareudp.h @@ -12,6 +12,7 @@ struct bareudp_conf { __be16 port; u16 sport_min; bool multi_proto_mode; + bool rx_collect_metadata; }; struct net_device *bareudp_dev_create(struct net *net, const char *name, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index a009365ad67b..cc185a007ade 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -600,6 +600,7 @@ enum { IFLA_BAREUDP_ETHERTYPE, IFLA_BAREUDP_SRCPORT_MIN, IFLA_BAREUDP_MULTIPROTO_MODE, + IFLA_BAREUDP_RX_COLLECT_METADATA, __IFLA_BAREUDP_MAX }; -- cgit v1.2.3 From 6fc3e68f5b35c4861b28733fa32f636db7188746 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Sun, 28 Jun 2020 17:32:25 +0800 Subject: sctp: use list_is_singular in sctp_list_single_entry Use list_is_singular() instead of open-coding. Signed-off-by: Geliang Tang Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index f8bcb75bb044..e3bd198b00ae 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -412,7 +412,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) /* Tests if the list has one and only one entry. */ static inline int sctp_list_single_entry(struct list_head *head) { - return (head->next != head) && (head->next == head->prev); + return list_is_singular(head); } static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk) -- cgit v1.2.3 From cea0f76a483d1270ac6f6513964e3e75193dda48 Mon Sep 17 00:00:00 2001 From: Anurag Kumar Vulisha Date: Mon, 29 Jun 2020 15:00:52 +0300 Subject: dt-bindings: phy: Add DT bindings for Xilinx ZynqMP PSGTR PHY Add DT bindings for the Xilinx ZynqMP PHY. ZynqMP SoCs have a High Speed Processing System Gigabit Transceiver which provides PHY capabilities to USB, SATA, PCIE, Display Port and Ehernet SGMII controllers. Signed-off-by: Anurag Kumar Vulisha Signed-off-by: Laurent Pinchart Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20200629120054.29338-2-laurent.pinchart@ideasonboard.com Signed-off-by: Vinod Koul --- .../devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml | 105 +++++++++++++++++++++ include/dt-bindings/phy/phy.h | 1 + 2 files changed, 106 insertions(+) create mode 100644 Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml (limited to 'include') diff --git a/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml new file mode 100644 index 000000000000..09e3cde7ebca --- /dev/null +++ b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/xlnx,zynqmp-psgtr.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Xilinx ZynqMP Gigabit Transceiver PHY Device Tree Bindings + +maintainers: + - Laurent Pinchart + +description: | + This binding describes the Xilinx ZynqMP Gigabit Transceiver (GTR) PHY. The + GTR provides four lanes and is used by USB, SATA, PCIE, Display port and + Ethernet SGMII controllers. + +properties: + "#phy-cells": + const: 4 + description: | + The cells contain the following arguments. + + - description: The GTR lane + minimum: 0 + maximum: 3 + - description: The PHY type + enum: + - PHY_TYPE_DP + - PHY_TYPE_PCIE + - PHY_TYPE_SATA + - PHY_TYPE_SGMII + - PHY_TYPE_USB + - description: The PHY instance + minimum: 0 + maximum: 1 # for DP, SATA or USB + maximum: 3 # for PCIE or SGMII + - description: The reference clock number + minimum: 0 + maximum: 3 + + compatible: + enum: + - xlnx,zynqmp-psgtr-v1.1 + - xlnx,zynqmp-psgtr + + clocks: + minItems: 1 + maxItems: 4 + description: | + Clock for each PS_MGTREFCLK[0-3] reference clock input. Unconnected + inputs shall not have an entry. + + clock-names: + minItems: 1 + maxItems: 4 + items: + pattern: "^ref[0-3]$" + + reg: + items: + - description: SERDES registers block + - description: SIOU registers block + + reg-names: + items: + - const: serdes + - const: siou + + xlnx,tx-termination-fix: + description: | + Include this for fixing functional issue with the TX termination + resistance in GT, which can be out of spec for the XCZU9EG silicon + version. + type: boolean + +required: + - "#phy-cells" + - compatible + - reg + - reg-names + +if: + properties: + compatible: + const: xlnx,zynqmp-psgtr-v1.1 + +then: + properties: + xlnx,tx-termination-fix: false + +additionalProperties: false + +examples: + - | + phy: phy@fd400000 { + compatible = "xlnx,zynqmp-psgtr-v1.1"; + reg = <0x0 0xfd400000 0x0 0x40000>, + <0x0 0xfd3d0000 0x0 0x1000>; + reg-names = "serdes", "siou"; + clocks = <&refclks 3>, <&refclks 2>, <&refclks 0>; + clock-names = "ref1", "ref2", "ref3"; + #phy-cells = <4>; + }; + +... diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h index 3727ef72138b..36e8c241cf48 100644 --- a/include/dt-bindings/phy/phy.h +++ b/include/dt-bindings/phy/phy.h @@ -18,5 +18,6 @@ #define PHY_TYPE_UFS 5 #define PHY_TYPE_DP 6 #define PHY_TYPE_XPCS 7 +#define PHY_TYPE_SGMII 8 #endif /* _DT_BINDINGS_PHY */ -- cgit v1.2.3 From db9819c76c1fd48c30699381c94bba5c95dd467e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:47 +0200 Subject: block: remove bio_disassociate_blkg bio_disassociate_blkg has two callers, of which one immediately assigns a new value to >bi_blkg. Just open code the function in the two callers. Acked-by: Tejun Heo Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 27 ++++++++------------------- include/linux/bio.h | 2 -- 2 files changed, 8 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/block/bio.c b/block/bio.c index fb5533416fa6..8aef4460b32e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -234,8 +234,12 @@ fallback: void bio_uninit(struct bio *bio) { - bio_disassociate_blkg(bio); - +#ifdef CONFIG_BLK_CGROUP + if (bio->bi_blkg) { + blkg_put(bio->bi_blkg); + bio->bi_blkg = NULL; + } +#endif if (bio_integrity(bio)) bio_integrity_free(bio); @@ -1625,21 +1629,6 @@ EXPORT_SYMBOL(bioset_init_from_src); #ifdef CONFIG_BLK_CGROUP -/** - * bio_disassociate_blkg - puts back the blkg reference if associated - * @bio: target bio - * - * Helper to disassociate the blkg from @bio if a blkg is associated. - */ -void bio_disassociate_blkg(struct bio *bio) -{ - if (bio->bi_blkg) { - blkg_put(bio->bi_blkg); - bio->bi_blkg = NULL; - } -} -EXPORT_SYMBOL_GPL(bio_disassociate_blkg); - /** * __bio_associate_blkg - associate a bio with the a blkg * @bio: target bio @@ -1656,8 +1645,8 @@ EXPORT_SYMBOL_GPL(bio_disassociate_blkg); */ static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) { - bio_disassociate_blkg(bio); - + if (bio->bi_blkg) + blkg_put(bio->bi_blkg); bio->bi_blkg = blkg_tryget_closest(blkg); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 0282f8aa8593..4cd229e175c0 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -497,13 +497,11 @@ static inline void bio_associate_blkg_from_page(struct bio *bio, #endif #ifdef CONFIG_BLK_CGROUP -void bio_disassociate_blkg(struct bio *bio); void bio_associate_blkg(struct bio *bio); void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css); void bio_clone_blkg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ -static inline void bio_disassociate_blkg(struct bio *bio) { } static inline void bio_associate_blkg(struct bio *bio) { } static inline void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css) -- cgit v1.2.3 From a18b9b1590ca64f877588700de32c9ad236f405c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:50 +0200 Subject: block: move bio_associate_blkg_from_page to mm/page_io.c bio_associate_blkg_from_page is a special purpose helper for swap bios that doesn't need access to bio internals. Move it to the swap code instead of having it in bio.c. Acked-by: Tejun Heo Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 26 -------------------------- include/linux/bio.h | 7 ------- mm/page_io.c | 17 +++++++++++++++++ 3 files changed, 17 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/block/bio.c b/block/bio.c index bc8de2432e36..901d22715dd4 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1659,32 +1659,6 @@ void bio_associate_blkg_from_css(struct bio *bio, } EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); -#ifdef CONFIG_MEMCG -/** - * bio_associate_blkg_from_page - associate a bio with the page's blkg - * @bio: target bio - * @page: the page to lookup the blkcg from - * - * Associate @bio with the blkg from @page's owning memcg and the respective - * request_queue. If cgroup_e_css returns %NULL, fall back to the queue's - * root_blkg. - */ -void bio_associate_blkg_from_page(struct bio *bio, struct page *page) -{ - struct cgroup_subsys_state *css; - - if (!page->mem_cgroup) - return; - - rcu_read_lock(); - - css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); - bio_associate_blkg_from_css(bio, css); - - rcu_read_unlock(); -} -#endif /* CONFIG_MEMCG */ - /** * bio_associate_blkg - associate a bio with a blkg * @bio: target bio diff --git a/include/linux/bio.h b/include/linux/bio.h index 4cd229e175c0..c6d765382926 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -489,13 +489,6 @@ do { \ #define bio_dev(bio) \ disk_devt((bio)->bi_disk) -#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -void bio_associate_blkg_from_page(struct bio *bio, struct page *page); -#else -static inline void bio_associate_blkg_from_page(struct bio *bio, - struct page *page) { } -#endif - #ifdef CONFIG_BLK_CGROUP void bio_associate_blkg(struct bio *bio); void bio_associate_blkg_from_css(struct bio *bio, diff --git a/mm/page_io.c b/mm/page_io.c index e8726f3e3820..ccda76790088 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -277,6 +277,23 @@ static inline void count_swpout_vm_event(struct page *page) count_vm_events(PSWPOUT, hpage_nr_pages(page)); } +#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) +static void bio_associate_blkg_from_page(struct bio *bio, struct page *page) +{ + struct cgroup_subsys_state *css; + + if (!page->mem_cgroup) + return; + + rcu_read_lock(); + css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); + bio_associate_blkg_from_css(bio, css); + rcu_read_unlock(); +} +#else +#define bio_associate_blkg_from_page(bio, page) do { } while (0) +#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ + int __swap_writepage(struct page *page, struct writeback_control *wbc, bio_end_io_t end_write_func) { -- cgit v1.2.3 From 28fc591ff9d64cbc2b780be95ee3fde8f6ade7fd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:51 +0200 Subject: block: move the bio cgroup associatation helpers to blk-cgroup.c Keep the cgroup code together. Acked-by: Tejun Heo Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 75 --------------------------------- block/blk-cgroup.c | 103 ++++++++++++++++++++++++++++++++++++++++++++- include/linux/blk-cgroup.h | 30 ------------- 3 files changed, 101 insertions(+), 107 deletions(-) (limited to 'include') diff --git a/block/bio.c b/block/bio.c index 901d22715dd4..fc1299f9d86a 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1627,81 +1627,6 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) } EXPORT_SYMBOL(bioset_init_from_src); -#ifdef CONFIG_BLK_CGROUP -/** - * bio_associate_blkg_from_css - associate a bio with a specified css - * @bio: target bio - * @css: target css - * - * Associate @bio with the blkg found by combining the css's blkg and the - * request_queue of the @bio. An association failure is handled by walking up - * the blkg tree. Therefore, the blkg associated can be anything between @blkg - * and q->root_blkg. This situation only happens when a cgroup is dying and - * then the remaining bios will spill to the closest alive blkg. - * - * A reference will be taken on the blkg and will be released when @bio is - * freed. - */ -void bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css) -{ - struct request_queue *q = bio->bi_disk->queue; - struct blkcg_gq *blkg = q->root_blkg; - - if (bio->bi_blkg) - blkg_put(bio->bi_blkg); - - rcu_read_lock(); - if (css && css->parent) - blkg = blkg_lookup_create(css_to_blkcg(css), q); - bio->bi_blkg = blkg_tryget_closest(blkg); - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); - -/** - * bio_associate_blkg - associate a bio with a blkg - * @bio: target bio - * - * Associate @bio with the blkg found from the bio's css and request_queue. - * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is - * already associated, the css is reused and association redone as the - * request_queue may have changed. - */ -void bio_associate_blkg(struct bio *bio) -{ - struct cgroup_subsys_state *css; - - rcu_read_lock(); - - if (bio->bi_blkg) - css = &bio_blkcg(bio)->css; - else - css = blkcg_css(); - - bio_associate_blkg_from_css(bio, css); - - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(bio_associate_blkg); - -/** - * bio_clone_blkg_association - clone blkg association from src to dst bio - * @dst: destination bio - * @src: source bio - */ -void bio_clone_blkg_association(struct bio *dst, struct bio *src) -{ - if (src->bi_blkg) { - if (dst->bi_blkg) - blkg_put(dst->bi_blkg); - blkg_get(src->bi_blkg); - dst->bi_blkg = src->bi_blkg; - } -} -EXPORT_SYMBOL_GPL(bio_clone_blkg_association); -#endif /* CONFIG_BLK_CGROUP */ - static void __init biovec_init_slabs(void) { int i; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 0ecc897b225c..bb0607bfd771 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -328,7 +328,7 @@ err_free_blkg: * Returns the blkg or the closest blkg if blkg_create() fails as it walks * down from root. */ -struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, +static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q) { struct blkcg_gq *blkg; @@ -377,7 +377,7 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, * This looks up or creates the blkg representing the unique pair * of the blkcg and the request_queue. */ -struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, +static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q) { struct blkcg_gq *blkg = blkg_lookup(blkcg, q); @@ -1727,6 +1727,105 @@ void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) atomic64_add(delta, &blkg->delay_nsec); } +/** + * blkg_tryget_closest - try and get a blkg ref on the closet blkg + * @blkg: blkg to get + * + * This needs to be called rcu protected. As the failure mode here is to walk + * up the blkg tree, this ensure that the blkg->parent pointers are always + * valid. This returns the blkg that it ended up taking a reference on or %NULL + * if no reference was taken. + */ +static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) +{ + struct blkcg_gq *ret_blkg = NULL; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + while (blkg) { + if (blkg_tryget(blkg)) { + ret_blkg = blkg; + break; + } + blkg = blkg->parent; + } + + return ret_blkg; +} + +/** + * bio_associate_blkg_from_css - associate a bio with a specified css + * @bio: target bio + * @css: target css + * + * Associate @bio with the blkg found by combining the css's blkg and the + * request_queue of the @bio. An association failure is handled by walking up + * the blkg tree. Therefore, the blkg associated can be anything between @blkg + * and q->root_blkg. This situation only happens when a cgroup is dying and + * then the remaining bios will spill to the closest alive blkg. + * + * A reference will be taken on the blkg and will be released when @bio is + * freed. + */ +void bio_associate_blkg_from_css(struct bio *bio, + struct cgroup_subsys_state *css) +{ + struct request_queue *q = bio->bi_disk->queue; + struct blkcg_gq *blkg = q->root_blkg; + + if (bio->bi_blkg) + blkg_put(bio->bi_blkg); + + rcu_read_lock(); + if (css && css->parent) + blkg = blkg_lookup_create(css_to_blkcg(css), q); + bio->bi_blkg = blkg_tryget_closest(blkg); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); + +/** + * bio_associate_blkg - associate a bio with a blkg + * @bio: target bio + * + * Associate @bio with the blkg found from the bio's css and request_queue. + * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is + * already associated, the css is reused and association redone as the + * request_queue may have changed. + */ +void bio_associate_blkg(struct bio *bio) +{ + struct cgroup_subsys_state *css; + + rcu_read_lock(); + + if (bio->bi_blkg) + css = &bio_blkcg(bio)->css; + else + css = blkcg_css(); + + bio_associate_blkg_from_css(bio, css); + + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(bio_associate_blkg); + +/** + * bio_clone_blkg_association - clone blkg association from src to dst bio + * @dst: destination bio + * @src: source bio + */ +void bio_clone_blkg_association(struct bio *dst, struct bio *src) +{ + if (src->bi_blkg) { + if (dst->bi_blkg) + blkg_put(dst->bi_blkg); + blkg_get(src->bi_blkg); + dst->bi_blkg = src->bi_blkg; + } +} +EXPORT_SYMBOL_GPL(bio_clone_blkg_association); + static int __init blkcg_init(void) { blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index a57ebe2f00ab..60df97202314 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -183,10 +183,6 @@ extern bool blkcg_debug_stats; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); -struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q); -struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q); int blkcg_init_queue(struct request_queue *q); void blkcg_exit_queue(struct request_queue *q); @@ -480,32 +476,6 @@ static inline bool blkg_tryget(struct blkcg_gq *blkg) return blkg && percpu_ref_tryget(&blkg->refcnt); } -/** - * blkg_tryget_closest - try and get a blkg ref on the closet blkg - * @blkg: blkg to get - * - * This needs to be called rcu protected. As the failure mode here is to walk - * up the blkg tree, this ensure that the blkg->parent pointers are always - * valid. This returns the blkg that it ended up taking a reference on or %NULL - * if no reference was taken. - */ -static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) -{ - struct blkcg_gq *ret_blkg = NULL; - - WARN_ON_ONCE(!rcu_read_lock_held()); - - while (blkg) { - if (blkg_tryget(blkg)) { - ret_blkg = blkg; - break; - } - blkg = blkg->parent; - } - - return ret_blkg; -} - /** * blkg_put - put a blkg reference * @blkg: blkg to put -- cgit v1.2.3 From 81630e27fff3dc1ccbf64ecb48a70170d7071545 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:55 +0200 Subject: blk-cgroup: remove the !bio->bi_blkg check in blkcg_bio_issue_check This is purely a sanity check for grave programming errors. Remove it to simplify further work in this area. Acked-by: Tejun Heo Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/blk-cgroup.h | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 60df97202314..8e86b598316c 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -543,24 +543,11 @@ static inline void blkcg_bio_issue_init(struct bio *bio) static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { - struct blkcg_gq *blkg; + struct blkcg_gq *blkg = bio->bi_blkg; bool throtl = false; rcu_read_lock(); - - if (!bio->bi_blkg) { - char b[BDEVNAME_SIZE]; - - WARN_ONCE(1, - "no blkg associated for bio on block-device: %s\n", - bio_devname(bio, b)); - bio_associate_blkg(bio); - } - - blkg = bio->bi_blkg; - throtl = blk_throtl_bio(q, blkg, bio); - if (!throtl) { struct blkg_iostat_set *bis; int rwd, cpu; -- cgit v1.2.3 From 93b8063804b62b55248e16499d853e1b20eff905 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:57 +0200 Subject: blk-cgroup: move rcu locking from blkcg_bio_issue_check to blk_throtl_bio The only thing in blkcg_bio_issue_check that needs to be under rcu_read_lock is blk_throtl_bio, so move the locking there. Acked-by: Tejun Heo Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-throttle.c | 3 ++- include/linux/blk-cgroup.h | 2 -- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 209fdd8939fb..ac0083450500 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2168,7 +2168,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, bool throttled = false; struct throtl_data *td = tg->td; - WARN_ON_ONCE(!rcu_read_lock_held()); + rcu_read_lock(); /* see throtl_charge_bio() */ if (bio_flagged(bio, BIO_THROTTLED)) @@ -2273,6 +2273,7 @@ out: if (throttled || !td->track_bio_latency) bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; #endif + rcu_read_unlock(); return throttled; } diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 8e86b598316c..8ab043c911f2 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -546,7 +546,6 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, struct blkcg_gq *blkg = bio->bi_blkg; bool throtl = false; - rcu_read_lock(); throtl = blk_throtl_bio(q, blkg, bio); if (!throtl) { struct blkg_iostat_set *bis; @@ -582,7 +581,6 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, blkcg_bio_issue_init(bio); - rcu_read_unlock(); return !throtl; } -- cgit v1.2.3 From db18a53e5ba840993a3fc908dec648402ed740bd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 27 Jun 2020 09:31:58 +0200 Subject: blk-cgroup: remove blkcg_bio_issue_check blkcg_bio_issue_check is a giant inline function that does three entirely different things. Factor out the blk-cgroup related bio initalization into a new helper, and the open code the sequence in the only caller, relying on the fact that all the actual functionality is stubbed out for non-cgroup builds. Acked-by: Tejun Heo Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 34 ++++++++++++++++++++++++++++ block/blk-core.c | 7 +++++- block/blk-throttle.c | 5 +++-- block/blk.h | 2 ++ include/linux/blk-cgroup.h | 56 ++-------------------------------------------- 5 files changed, 47 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d21ec2acd716..1ce94afc03bc 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1813,6 +1813,40 @@ void bio_clone_blkg_association(struct bio *dst, struct bio *src) } EXPORT_SYMBOL_GPL(bio_clone_blkg_association); +static int blk_cgroup_io_type(struct bio *bio) +{ + if (op_is_discard(bio->bi_opf)) + return BLKG_IOSTAT_DISCARD; + if (op_is_write(bio->bi_opf)) + return BLKG_IOSTAT_WRITE; + return BLKG_IOSTAT_READ; +} + +void blk_cgroup_bio_start(struct bio *bio) +{ + int rwd = blk_cgroup_io_type(bio), cpu; + struct blkg_iostat_set *bis; + + cpu = get_cpu(); + bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); + u64_stats_update_begin(&bis->sync); + + /* + * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split + * bio and we would have already accounted for the size of the bio. + */ + if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { + bio_set_flag(bio, BIO_CGROUP_ACCT); + bis->cur.bytes[rwd] += bio->bi_iter.bi_size; + } + bis->cur.ios[rwd]++; + + u64_stats_update_end(&bis->sync); + if (cgroup_subsys_on_dfl(io_cgrp_subsys)) + cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); + put_cpu(); +} + static int __init blkcg_init(void) { blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", diff --git a/block/blk-core.c b/block/blk-core.c index a9769c1a2875..76cfd5709f66 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1073,8 +1073,13 @@ generic_make_request_checks(struct bio *bio) if (unlikely(!current->io_context)) create_task_io_context(current, GFP_ATOMIC, q->node); - if (!blkcg_bio_issue_check(q, bio)) + if (blk_throtl_bio(bio)) { + blkcg_bio_issue_init(bio); return false; + } + + blk_cgroup_bio_start(bio); + blkcg_bio_issue_init(bio); if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { trace_block_bio_queue(q, bio); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index ac0083450500..9d00f62c05ec 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2158,9 +2158,10 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td) } #endif -bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, - struct bio *bio) +bool blk_throtl_bio(struct bio *bio) { + struct request_queue *q = bio->bi_disk->queue; + struct blkcg_gq *blkg = bio->bi_blkg; struct throtl_qnode *qn = NULL; struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); struct throtl_service_queue *sq; diff --git a/block/blk.h b/block/blk.h index 3a120a070dac..41a50880c94e 100644 --- a/block/blk.h +++ b/block/blk.h @@ -288,10 +288,12 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); extern int blk_throtl_init(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q); extern void blk_throtl_register_queue(struct request_queue *q); +bool blk_throtl_bio(struct bio *bio); #else /* CONFIG_BLK_DEV_THROTTLING */ static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline void blk_throtl_exit(struct request_queue *q) { } static inline void blk_throtl_register_queue(struct request_queue *q) { } +static inline bool blk_throtl_bio(struct bio *bio) { return false; } #endif /* CONFIG_BLK_DEV_THROTTLING */ #ifdef CONFIG_BLK_DEV_THROTTLING_LOW extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 8ab043c911f2..431b2d18bf40 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -517,14 +517,6 @@ static inline void blkg_put(struct blkcg_gq *blkg) if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ (p_blkg)->q, false))) -#ifdef CONFIG_BLK_DEV_THROTTLING -extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, - struct bio *bio); -#else -static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, - struct bio *bio) { return false; } -#endif - bool __blkcg_punt_bio_submit(struct bio *bio); static inline bool blkcg_punt_bio_submit(struct bio *bio) @@ -540,50 +532,6 @@ static inline void blkcg_bio_issue_init(struct bio *bio) bio_issue_init(&bio->bi_issue, bio_sectors(bio)); } -static inline bool blkcg_bio_issue_check(struct request_queue *q, - struct bio *bio) -{ - struct blkcg_gq *blkg = bio->bi_blkg; - bool throtl = false; - - throtl = blk_throtl_bio(q, blkg, bio); - if (!throtl) { - struct blkg_iostat_set *bis; - int rwd, cpu; - - if (op_is_discard(bio->bi_opf)) - rwd = BLKG_IOSTAT_DISCARD; - else if (op_is_write(bio->bi_opf)) - rwd = BLKG_IOSTAT_WRITE; - else - rwd = BLKG_IOSTAT_READ; - - cpu = get_cpu(); - bis = per_cpu_ptr(blkg->iostat_cpu, cpu); - u64_stats_update_begin(&bis->sync); - - /* - * If the bio is flagged with BIO_CGROUP_ACCT it means this is a - * split bio and we would have already accounted for the size of - * the bio. - */ - if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { - bio_set_flag(bio, BIO_CGROUP_ACCT); - bis->cur.bytes[rwd] += bio->bi_iter.bi_size; - } - bis->cur.ios[rwd]++; - - u64_stats_update_end(&bis->sync); - if (cgroup_subsys_on_dfl(io_cgrp_subsys)) - cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu); - put_cpu(); - } - - blkcg_bio_issue_init(bio); - - return !throtl; -} - static inline void blkcg_use_delay(struct blkcg_gq *blkg) { if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) @@ -657,6 +605,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); } +void blk_cgroup_bio_start(struct bio *bio); void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); void blkcg_maybe_throttle_current(void); @@ -710,8 +659,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) { } static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } static inline void blkcg_bio_issue_init(struct bio *bio) { } -static inline bool blkcg_bio_issue_check(struct request_queue *q, - struct bio *bio) { return true; } +static inline void blk_cgroup_bio_start(struct bio *bio) { } #define blk_queue_for_each_rl(rl, q) \ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) -- cgit v1.2.3 From 42fdc5e49c2be97db112d410d07044e0e2c7d5bb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 29 Jun 2020 17:08:34 +0200 Subject: blk-mq: remove the BLK_MQ_REQ_INTERNAL flag Just check for a non-NULL elevator directly to make the code more clear. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 4 ++-- block/blk-mq.c | 10 +++------- block/blk-mq.h | 2 +- include/linux/blk-mq.h | 2 -- 4 files changed, 6 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index ae722f8b13fb..281367b04527 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -90,9 +90,9 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt) { - if (!(data->flags & BLK_MQ_REQ_INTERNAL) && - !hctx_may_queue(data->hctx, bt)) + if (!data->q->elevator && !hctx_may_queue(data->hctx, bt)) return BLK_MQ_NO_TAG; + if (data->shallow_depth) return __sbitmap_queue_get_shallow(bt, data->shallow_depth); else diff --git a/block/blk-mq.c b/block/blk-mq.c index d07e55455726..72d3034fe39d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -279,7 +279,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct request *rq = tags->static_rqs[tag]; req_flags_t rq_flags = 0; - if (data->flags & BLK_MQ_REQ_INTERNAL) { + if (data->q->elevator) { rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { @@ -364,8 +364,6 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) data->flags |= BLK_MQ_REQ_NOWAIT; if (e) { - data->flags |= BLK_MQ_REQ_INTERNAL; - /* * Flush requests are special and go directly to the * dispatch list. Don't include reserved tags in the @@ -380,7 +378,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) retry: data->ctx = blk_mq_get_ctx(q); data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); - if (!(data->flags & BLK_MQ_REQ_INTERNAL)) + if (!e) blk_mq_tag_busy(data->hctx); /* @@ -476,9 +474,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); data.ctx = __blk_mq_get_ctx(q, cpu); - if (q->elevator) - data.flags |= BLK_MQ_REQ_INTERNAL; - else + if (!q->elevator) blk_mq_tag_busy(data.hctx); ret = -EWOULDBLOCK; diff --git a/block/blk-mq.h b/block/blk-mq.h index b3ce0f3a2ad2..c6330335767c 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -159,7 +159,7 @@ struct blk_mq_alloc_data { static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) { - if (data->flags & BLK_MQ_REQ_INTERNAL) + if (data->q->elevator) return data->hctx->sched_tags; return data->hctx->tags; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 1641ec6cd7e5..8986e88a986b 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -447,8 +447,6 @@ enum { BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), /* allocate from reserved pool */ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), - /* allocate internal/sched tag */ - BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), /* set RQF_PREEMPT */ BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), }; -- cgit v1.2.3 From cbba1d719534b77b857267890b0f54f0f0a90de4 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Mon, 29 Jun 2020 14:29:17 +0200 Subject: thermal: Add current mode to thermal zone device Prepare for changing the place where the mode is stored: now it is in drivers, which might or might not implement get_mode()/set_mode() methods. A lot of cleanup can be done thanks to storing it in struct tzd. The get_mode() methods will become redundant. Signed-off-by: Andrzej Pietrasiewicz Reviewed-by: Guenter Roeck Reviewed-by: Bartlomiej Zolnierkiewicz Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200629122925.21729-4-andrzej.p@collabora.com --- include/linux/thermal.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 216185bb3014..5f91d7f04512 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -128,6 +128,7 @@ struct thermal_cooling_device { * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature * @trip_type_attrs: attributes for trip points for sysfs: trip type * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis + * @mode: current mode of this thermal zone * @devdata: private pointer for device private data * @trips: number of trip points the thermal zone supports * @trips_disabled; bitmap for disabled trips @@ -170,6 +171,7 @@ struct thermal_zone_device { struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; struct thermal_attr *trip_hyst_attrs; + enum thermal_device_mode mode; void *devdata; int trips; unsigned long trips_disabled; /* bitmap for disabled trips */ -- cgit v1.2.3 From 1ee14820fd8ee79c4fc191155f48c985f28040e2 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Mon, 29 Jun 2020 14:29:19 +0200 Subject: thermal: remove get_mode() operation of drivers get_mode() is now redundant, as the state is stored in struct thermal_zone_device. Consequently the "mode" attribute in sysfs can always be visible, because it is always possible to get the mode from struct tzd. Signed-off-by: Andrzej Pietrasiewicz [for acerhdf] Acked-by: Peter Kaestle Reviewed-by: Bartlomiej Zolnierkiewicz Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200629122925.21729-6-andrzej.p@collabora.com --- drivers/acpi/thermal.c | 9 ------- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 19 -------------- drivers/platform/x86/acerhdf.c | 12 --------- drivers/thermal/da9062-thermal.c | 8 ------ drivers/thermal/imx_thermal.c | 9 ------- .../intel/int340x_thermal/int3400_thermal.c | 9 ------- drivers/thermal/intel/intel_quark_dts_thermal.c | 8 ------ drivers/thermal/thermal_core.c | 7 +---- drivers/thermal/thermal_of.c | 9 ------- drivers/thermal/thermal_sysfs.c | 30 ++-------------------- include/linux/thermal.h | 2 -- 11 files changed, 3 insertions(+), 119 deletions(-) (limited to 'include') diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 4ba273f49d87..592be97c4456 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -525,14 +525,6 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) return 0; } -static int thermal_get_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode *mode) -{ - *mode = thermal->mode; - - return 0; -} - static int thermal_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { @@ -847,7 +839,6 @@ static struct thermal_zone_device_ops acpi_thermal_zone_ops = { .bind = acpi_thermal_bind_cooling_device, .unbind = acpi_thermal_unbind_cooling_device, .get_temp = thermal_get_temp, - .get_mode = thermal_get_mode, .set_mode = thermal_set_mode, .get_trip_type = thermal_get_trip_type, .get_trip_temp = thermal_get_trip_temp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 51667ed99c21..ad61b2db30b8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -275,14 +275,6 @@ static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev, return 0; } -static int mlxsw_thermal_get_mode(struct thermal_zone_device *tzdev, - enum thermal_device_mode *mode) -{ - *mode = tzdev->mode; - - return 0; -} - static int mlxsw_thermal_set_mode(struct thermal_zone_device *tzdev, enum thermal_device_mode mode) { @@ -402,7 +394,6 @@ static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev, static struct thermal_zone_device_ops mlxsw_thermal_ops = { .bind = mlxsw_thermal_bind, .unbind = mlxsw_thermal_unbind, - .get_mode = mlxsw_thermal_get_mode, .set_mode = mlxsw_thermal_set_mode, .get_temp = mlxsw_thermal_get_temp, .get_trip_type = mlxsw_thermal_get_trip_type, @@ -461,14 +452,6 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev, return err; } -static int mlxsw_thermal_module_mode_get(struct thermal_zone_device *tzdev, - enum thermal_device_mode *mode) -{ - *mode = tzdev->mode; - - return 0; -} - static int mlxsw_thermal_module_mode_set(struct thermal_zone_device *tzdev, enum thermal_device_mode mode) { @@ -606,7 +589,6 @@ static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev, static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { .bind = mlxsw_thermal_module_bind, .unbind = mlxsw_thermal_module_unbind, - .get_mode = mlxsw_thermal_module_mode_get, .set_mode = mlxsw_thermal_module_mode_set, .get_temp = mlxsw_thermal_module_temp_get, .get_trip_type = mlxsw_thermal_module_trip_type_get, @@ -645,7 +627,6 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = { .bind = mlxsw_thermal_module_bind, .unbind = mlxsw_thermal_module_unbind, - .get_mode = mlxsw_thermal_module_mode_get, .set_mode = mlxsw_thermal_module_mode_set, .get_temp = mlxsw_thermal_gearbox_temp_get, .get_trip_type = mlxsw_thermal_module_trip_type_get, diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 6f21015e5fd9..58c4e1caaa09 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -413,17 +413,6 @@ static inline void acerhdf_enable_kernelmode(void) pr_notice("kernel mode fan control ON\n"); } -static int acerhdf_get_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode *mode) -{ - if (verbose) - pr_notice("kernel mode fan control %d\n", kernelmode); - - *mode = thermal->mode; - - return 0; -} - /* * set operation mode; * enabled: the thermal layer of the kernel takes care about @@ -490,7 +479,6 @@ static struct thermal_zone_device_ops acerhdf_dev_ops = { .bind = acerhdf_bind, .unbind = acerhdf_unbind, .get_temp = acerhdf_get_ec_temp, - .get_mode = acerhdf_get_mode, .set_mode = acerhdf_set_mode, .get_trip_type = acerhdf_get_trip_type, .get_trip_hyst = acerhdf_get_trip_hyst, diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c index a14c7981c7c7..a7ac8afb063e 100644 --- a/drivers/thermal/da9062-thermal.c +++ b/drivers/thermal/da9062-thermal.c @@ -120,13 +120,6 @@ static irqreturn_t da9062_thermal_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int da9062_thermal_get_mode(struct thermal_zone_device *z, - enum thermal_device_mode *mode) -{ - *mode = z->mode; - return 0; -} - static int da9062_thermal_get_trip_type(struct thermal_zone_device *z, int trip, enum thermal_trip_type *type) @@ -179,7 +172,6 @@ static int da9062_thermal_get_temp(struct thermal_zone_device *z, static struct thermal_zone_device_ops da9062_thermal_ops = { .get_temp = da9062_thermal_get_temp, - .get_mode = da9062_thermal_get_mode, .get_trip_type = da9062_thermal_get_trip_type, .get_trip_temp = da9062_thermal_get_trip_temp, }; diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 9a1114d721b6..2c7ee5da608a 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -330,14 +330,6 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp) return 0; } -static int imx_get_mode(struct thermal_zone_device *tz, - enum thermal_device_mode *mode) -{ - *mode = tz->mode; - - return 0; -} - static int imx_set_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { @@ -464,7 +456,6 @@ static struct thermal_zone_device_ops imx_tz_ops = { .bind = imx_bind, .unbind = imx_unbind, .get_temp = imx_get_temp, - .get_mode = imx_get_mode, .set_mode = imx_set_mode, .get_trip_type = imx_get_trip_type, .get_trip_temp = imx_get_trip_temp, diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index f65b2fc09198..9a622aaf29dd 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -377,14 +377,6 @@ static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, return 0; } -static int int3400_thermal_get_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode *mode) -{ - *mode = thermal->mode; - - return 0; -} - static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { @@ -412,7 +404,6 @@ static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, static struct thermal_zone_device_ops int3400_thermal_ops = { .get_temp = int3400_thermal_get_temp, - .get_mode = int3400_thermal_get_mode, .set_mode = int3400_thermal_set_mode, }; diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c index d77cb3df5ade..c4879b4bfbf1 100644 --- a/drivers/thermal/intel/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel/intel_quark_dts_thermal.c @@ -308,13 +308,6 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, return 0; } -static int sys_get_mode(struct thermal_zone_device *tzd, - enum thermal_device_mode *mode) -{ - *mode = tzd->mode; - return 0; -} - static int sys_set_mode(struct thermal_zone_device *tzd, enum thermal_device_mode mode) { @@ -336,7 +329,6 @@ static struct thermal_zone_device_ops tzone_ops = { .get_trip_type = sys_get_trip_type, .set_trip_temp = sys_set_trip_temp, .get_crit_temp = sys_get_crit_temp, - .get_mode = sys_get_mode, .set_mode = sys_set_mode, }; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index b71196eaf90e..14d3b1b94c4f 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1456,7 +1456,6 @@ static int thermal_pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused) { struct thermal_zone_device *tz; - enum thermal_device_mode tz_mode; switch (mode) { case PM_HIBERNATION_PREPARE: @@ -1469,11 +1468,7 @@ static int thermal_pm_notify(struct notifier_block *nb, case PM_POST_SUSPEND: atomic_set(&in_suspend, 0); list_for_each_entry(tz, &thermal_tz_list, node) { - tz_mode = THERMAL_DEVICE_ENABLED; - if (tz->ops->get_mode) - tz->ops->get_mode(tz, &tz_mode); - - if (tz_mode == THERMAL_DEVICE_DISABLED) + if (tz->mode == THERMAL_DEVICE_DISABLED) continue; thermal_zone_device_init(tz); diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index c495b1e48ef2..ba65d48a48cb 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -267,14 +267,6 @@ static int of_thermal_unbind(struct thermal_zone_device *thermal, return 0; } -static int of_thermal_get_mode(struct thermal_zone_device *tz, - enum thermal_device_mode *mode) -{ - *mode = tz->mode; - - return 0; -} - static int of_thermal_set_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { @@ -389,7 +381,6 @@ static int of_thermal_get_crit_temp(struct thermal_zone_device *tz, } static struct thermal_zone_device_ops of_thermal_ops = { - .get_mode = of_thermal_get_mode, .set_mode = of_thermal_set_mode, .get_trip_type = of_thermal_get_trip_type, diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index aa99edb4dff7..096370977068 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -49,18 +49,9 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_zone_device *tz = to_thermal_zone(dev); - enum thermal_device_mode mode; - int result; - - if (!tz->ops->get_mode) - return -EPERM; - result = tz->ops->get_mode(tz, &mode); - if (result) - return result; - - return sprintf(buf, "%s\n", mode == THERMAL_DEVICE_ENABLED ? "enabled" - : "disabled"); + return sprintf(buf, "%s\n", tz->mode == THERMAL_DEVICE_ENABLED ? + "enabled" : "disabled"); } static ssize_t @@ -428,30 +419,13 @@ static struct attribute_group thermal_zone_attribute_group = { .attrs = thermal_zone_dev_attrs, }; -/* We expose mode only if .get_mode is present */ static struct attribute *thermal_zone_mode_attrs[] = { &dev_attr_mode.attr, NULL, }; -static umode_t thermal_zone_mode_is_visible(struct kobject *kobj, - struct attribute *attr, - int attrno) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct thermal_zone_device *tz; - - tz = container_of(dev, struct thermal_zone_device, device); - - if (tz->ops->get_mode) - return attr->mode; - - return 0; -} - static struct attribute_group thermal_zone_mode_attribute_group = { .attrs = thermal_zone_mode_attrs, - .is_visible = thermal_zone_mode_is_visible, }; /* We expose passive only if passive trips are present */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 5f91d7f04512..a808f6fa2777 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -76,8 +76,6 @@ struct thermal_zone_device_ops { struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); - int (*get_mode) (struct thermal_zone_device *, - enum thermal_device_mode *); int (*set_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, -- cgit v1.2.3 From ac5d9ecc74d8beee8c87f1441e4adaf4e9fe90c5 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Mon, 29 Jun 2020 14:29:20 +0200 Subject: thermal: Add mode helpers Prepare for making the drivers not access tzd's private members. Signed-off-by: Andrzej Pietrasiewicz Reviewed-by: Bartlomiej Zolnierkiewicz [staticize thermal_zone_device_set_mode()] Signed-off-by: kernel test robot Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200629122925.21729-7-andrzej.p@collabora.com --- drivers/thermal/thermal_core.c | 53 ++++++++++++++++++++++++++++++++++++++++++ include/linux/thermal.h | 13 +++++++++++ 2 files changed, 66 insertions(+) (limited to 'include') diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 14d3b1b94c4f..f02c57c986f0 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -459,6 +459,59 @@ static void thermal_zone_device_reset(struct thermal_zone_device *tz) thermal_zone_device_init(tz); } +static int thermal_zone_device_set_mode(struct thermal_zone_device *tz, + enum thermal_device_mode mode) +{ + int ret = 0; + + mutex_lock(&tz->lock); + + /* do nothing if mode isn't changing */ + if (mode == tz->mode) { + mutex_unlock(&tz->lock); + + return ret; + } + + if (tz->ops->set_mode) + ret = tz->ops->set_mode(tz, mode); + + if (!ret) + tz->mode = mode; + + mutex_unlock(&tz->lock); + + thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); + + return ret; +} + +int thermal_zone_device_enable(struct thermal_zone_device *tz) +{ + return thermal_zone_device_set_mode(tz, THERMAL_DEVICE_ENABLED); +} +EXPORT_SYMBOL_GPL(thermal_zone_device_enable); + +int thermal_zone_device_disable(struct thermal_zone_device *tz) +{ + return thermal_zone_device_set_mode(tz, THERMAL_DEVICE_DISABLED); +} +EXPORT_SYMBOL_GPL(thermal_zone_device_disable); + +int thermal_zone_device_is_enabled(struct thermal_zone_device *tz) +{ + enum thermal_device_mode mode; + + mutex_lock(&tz->lock); + + mode = tz->mode; + + mutex_unlock(&tz->lock); + + return mode == THERMAL_DEVICE_ENABLED; +} +EXPORT_SYMBOL_GPL(thermal_zone_device_is_enabled); + void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { diff --git a/include/linux/thermal.h b/include/linux/thermal.h index a808f6fa2777..df013c39ba9b 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -416,6 +416,9 @@ int thermal_zone_get_offset(struct thermal_zone_device *tz); void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); +int thermal_zone_device_enable(struct thermal_zone_device *tz); +int thermal_zone_device_disable(struct thermal_zone_device *tz); +int thermal_zone_device_is_enabled(struct thermal_zone_device *tz); #else static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, @@ -463,6 +466,16 @@ static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) static inline void thermal_notify_framework(struct thermal_zone_device *tz, int trip) { } + +static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) +{ return -ENODEV; } + +static inline int thermal_zone_device_disable(struct thermal_zone_device *tz) +{ return -ENODEV; } + +static inline int +thermal_zone_device_is_enabled(struct thermal_zone_device *tz) +{ return -ENODEV; } #endif /* CONFIG_THERMAL */ #endif /* __THERMAL_H__ */ -- cgit v1.2.3 From f5e50bf4d3ef0aba4d5414c9ed51fa4a02e2ed12 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Mon, 29 Jun 2020 14:29:25 +0200 Subject: thermal: Rename set_mode() to change_mode() set_mode() is only called when tzd's mode is about to change. Actual setting is performed in thermal_core, in thermal_zone_device_set_mode(). The meaning of set_mode() callback is actually to notify the driver about the mode being changed and giving the driver a chance to oppose such change. To better reflect the purpose of the method rename it to change_mode() Signed-off-by: Andrzej Pietrasiewicz [for acerhdf] Acked-by: Peter Kaestle Reviewed-by: Bartlomiej Zolnierkiewicz Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200629122925.21729-12-andrzej.p@collabora.com --- drivers/platform/x86/acerhdf.c | 6 +++--- drivers/thermal/imx_thermal.c | 8 ++++---- drivers/thermal/intel/int340x_thermal/int3400_thermal.c | 6 +++--- drivers/thermal/intel/intel_quark_dts_thermal.c | 6 +++--- drivers/thermal/thermal_core.c | 4 ++-- include/linux/thermal.h | 2 +- 6 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 76323855c80c..f816a8a13039 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -413,8 +413,8 @@ static inline void acerhdf_enable_kernelmode(void) * the temperature and the fan. * disabled: the BIOS takes control of the fan. */ -static int acerhdf_set_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode mode) +static int acerhdf_change_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode mode) { if (mode == THERMAL_DEVICE_DISABLED && kernelmode) acerhdf_revert_to_bios_mode(); @@ -473,7 +473,7 @@ static struct thermal_zone_device_ops acerhdf_dev_ops = { .bind = acerhdf_bind, .unbind = acerhdf_unbind, .get_temp = acerhdf_get_ec_temp, - .set_mode = acerhdf_set_mode, + .change_mode = acerhdf_change_mode, .get_trip_type = acerhdf_get_trip_type, .get_trip_hyst = acerhdf_get_trip_hyst, .get_trip_temp = acerhdf_get_trip_temp, diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index a02398118d88..9700ae39feb7 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -330,8 +330,8 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp) return 0; } -static int imx_set_mode(struct thermal_zone_device *tz, - enum thermal_device_mode mode) +static int imx_change_mode(struct thermal_zone_device *tz, + enum thermal_device_mode mode) { struct imx_thermal_data *data = tz->devdata; struct regmap *map = data->tempmon; @@ -447,7 +447,7 @@ static struct thermal_zone_device_ops imx_tz_ops = { .bind = imx_bind, .unbind = imx_unbind, .get_temp = imx_get_temp, - .set_mode = imx_set_mode, + .change_mode = imx_change_mode, .get_trip_type = imx_get_trip_type, .get_trip_temp = imx_get_trip_temp, .get_crit_temp = imx_get_crit_temp, @@ -860,7 +860,7 @@ static int __maybe_unused imx_thermal_suspend(struct device *dev) * Need to disable thermal sensor, otherwise, when thermal core * try to get temperature before thermal sensor resume, a wrong * temperature will be read as the thermal sensor is powered - * down. This is done in set_mode() operation called from + * down. This is done in change_mode() operation called from * thermal_zone_device_disable() */ ret = thermal_zone_device_disable(data->tz); diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index ce49d3b100d5..d3732f624913 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -377,8 +377,8 @@ static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, return 0; } -static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode mode) +static int int3400_thermal_change_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode mode) { struct int3400_thermal_priv *priv = thermal->devdata; int result = 0; @@ -399,7 +399,7 @@ static int int3400_thermal_set_mode(struct thermal_zone_device *thermal, static struct thermal_zone_device_ops int3400_thermal_ops = { .get_temp = int3400_thermal_get_temp, - .set_mode = int3400_thermal_set_mode, + .change_mode = int3400_thermal_change_mode, }; static struct thermal_zone_params int3400_thermal_params = { diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c index e29c3e330b17..3eafc6b0e6c3 100644 --- a/drivers/thermal/intel/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel/intel_quark_dts_thermal.c @@ -298,8 +298,8 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, return 0; } -static int sys_set_mode(struct thermal_zone_device *tzd, - enum thermal_device_mode mode) +static int sys_change_mode(struct thermal_zone_device *tzd, + enum thermal_device_mode mode) { int ret; @@ -319,7 +319,7 @@ static struct thermal_zone_device_ops tzone_ops = { .get_trip_type = sys_get_trip_type, .set_trip_temp = sys_set_trip_temp, .get_crit_temp = sys_get_crit_temp, - .set_mode = sys_set_mode, + .change_mode = sys_change_mode, }; static void free_soc_dts(struct soc_sensor_entry *aux_entry) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index e613f5c07bad..a61e91513584 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -482,8 +482,8 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz, return ret; } - if (tz->ops->set_mode) - ret = tz->ops->set_mode(tz, mode); + if (tz->ops->change_mode) + ret = tz->ops->change_mode(tz, mode); if (!ret) tz->mode = mode; diff --git a/include/linux/thermal.h b/include/linux/thermal.h index df013c39ba9b..b9efaa780d88 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -76,7 +76,7 @@ struct thermal_zone_device_ops { struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); - int (*set_mode) (struct thermal_zone_device *, + int (*change_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, enum thermal_trip_type *); -- cgit v1.2.3 From 2cdb54c93a7e5beb6f3f8b63575d9fb664dfc603 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 21 Apr 2020 19:04:05 +0200 Subject: docs: RCU: Convert rculist_nulls.txt to ReST - Add a SPDX header; - Adjust document title; - Some whitespace fixes and new line breaks; - Mark literal blocks as such; - Add it to RCU/index.rst. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Paul E. McKenney --- Documentation/RCU/index.rst | 1 + Documentation/RCU/rculist_nulls.rst | 194 ++++++++++++++++++++++++++++++++++++ Documentation/RCU/rculist_nulls.txt | 172 -------------------------------- include/linux/rculist_nulls.h | 2 +- net/core/sock.c | 4 +- 5 files changed, 198 insertions(+), 175 deletions(-) create mode 100644 Documentation/RCU/rculist_nulls.rst delete mode 100644 Documentation/RCU/rculist_nulls.txt (limited to 'include') diff --git a/Documentation/RCU/index.rst b/Documentation/RCU/index.rst index fa7a2a8949b7..577a47e27f5d 100644 --- a/Documentation/RCU/index.rst +++ b/Documentation/RCU/index.rst @@ -17,6 +17,7 @@ RCU concepts rcu_dereference whatisRCU rcu + rculist_nulls listRCU NMI-RCU UP diff --git a/Documentation/RCU/rculist_nulls.rst b/Documentation/RCU/rculist_nulls.rst new file mode 100644 index 000000000000..d40374221d69 --- /dev/null +++ b/Documentation/RCU/rculist_nulls.rst @@ -0,0 +1,194 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================================= +Using RCU hlist_nulls to protect list and objects +================================================= + +This section describes how to use hlist_nulls to +protect read-mostly linked lists and +objects using SLAB_TYPESAFE_BY_RCU allocations. + +Please read the basics in Documentation/RCU/listRCU.rst + +Using special makers (called 'nulls') is a convenient way +to solve following problem : + +A typical RCU linked list managing objects which are +allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can +use following algos : + +1) Lookup algo +-------------- + +:: + + rcu_read_lock() + begin: + obj = lockless_lookup(key); + if (obj) { + if (!try_get_ref(obj)) // might fail for free objects + goto begin; + /* + * Because a writer could delete object, and a writer could + * reuse these object before the RCU grace period, we + * must check key after getting the reference on object + */ + if (obj->key != key) { // not the object we expected + put_ref(obj); + goto begin; + } + } + rcu_read_unlock(); + +Beware that lockless_lookup(key) cannot use traditional hlist_for_each_entry_rcu() +but a version with an additional memory barrier (smp_rmb()) + +:: + + lockless_lookup(key) + { + struct hlist_node *node, *next; + for (pos = rcu_dereference((head)->first); + pos && ({ next = pos->next; smp_rmb(); prefetch(next); 1; }) && + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); + pos = rcu_dereference(next)) + if (obj->key == key) + return obj; + return NULL; + } + +And note the traditional hlist_for_each_entry_rcu() misses this smp_rmb():: + + struct hlist_node *node; + for (pos = rcu_dereference((head)->first); + pos && ({ prefetch(pos->next); 1; }) && + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); + pos = rcu_dereference(pos->next)) + if (obj->key == key) + return obj; + return NULL; + +Quoting Corey Minyard:: + + "If the object is moved from one list to another list in-between the + time the hash is calculated and the next field is accessed, and the + object has moved to the end of a new list, the traversal will not + complete properly on the list it should have, since the object will + be on the end of the new list and there's not a way to tell it's on a + new list and restart the list traversal. I think that this can be + solved by pre-fetching the "next" field (with proper barriers) before + checking the key." + +2) Insert algo +-------------- + +We need to make sure a reader cannot read the new 'obj->obj_next' value +and previous value of 'obj->key'. Or else, an item could be deleted +from a chain, and inserted into another chain. If new chain was empty +before the move, 'next' pointer is NULL, and lockless reader can +not detect it missed following items in original chain. + +:: + + /* + * Please note that new inserts are done at the head of list, + * not in the middle or end. + */ + obj = kmem_cache_alloc(...); + lock_chain(); // typically a spin_lock() + obj->key = key; + /* + * we need to make sure obj->key is updated before obj->next + * or obj->refcnt + */ + smp_wmb(); + atomic_set(&obj->refcnt, 1); + hlist_add_head_rcu(&obj->obj_node, list); + unlock_chain(); // typically a spin_unlock() + + +3) Remove algo +-------------- +Nothing special here, we can use a standard RCU hlist deletion. +But thanks to SLAB_TYPESAFE_BY_RCU, beware a deleted object can be reused +very very fast (before the end of RCU grace period) + +:: + + if (put_last_reference_on(obj) { + lock_chain(); // typically a spin_lock() + hlist_del_init_rcu(&obj->obj_node); + unlock_chain(); // typically a spin_unlock() + kmem_cache_free(cachep, obj); + } + + + +-------------------------------------------------------------------------- + +With hlist_nulls we can avoid extra smp_rmb() in lockless_lookup() +and extra smp_wmb() in insert function. + +For example, if we choose to store the slot number as the 'nulls' +end-of-list marker for each slot of the hash table, we can detect +a race (some writer did a delete and/or a move of an object +to another chain) checking the final 'nulls' value if +the lookup met the end of chain. If final 'nulls' value +is not the slot number, then we must restart the lookup at +the beginning. If the object was moved to the same chain, +then the reader doesn't care : It might eventually +scan the list again without harm. + + +1) lookup algo +-------------- + +:: + + head = &table[slot]; + rcu_read_lock(); + begin: + hlist_nulls_for_each_entry_rcu(obj, node, head, member) { + if (obj->key == key) { + if (!try_get_ref(obj)) // might fail for free objects + goto begin; + if (obj->key != key) { // not the object we expected + put_ref(obj); + goto begin; + } + goto out; + } + /* + * if the nulls value we got at the end of this lookup is + * not the expected one, we must restart lookup. + * We probably met an item that was moved to another chain. + */ + if (get_nulls_value(node) != slot) + goto begin; + obj = NULL; + + out: + rcu_read_unlock(); + +2) Insert function +------------------ + +:: + + /* + * Please note that new inserts are done at the head of list, + * not in the middle or end. + */ + obj = kmem_cache_alloc(cachep); + lock_chain(); // typically a spin_lock() + obj->key = key; + /* + * changes to obj->key must be visible before refcnt one + */ + smp_wmb(); + atomic_set(&obj->refcnt, 1); + /* + * insert obj in RCU way (readers might be traversing chain) + */ + hlist_nulls_add_head_rcu(&obj->obj_node, list); + unlock_chain(); // typically a spin_unlock() diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt deleted file mode 100644 index 23f115dc87cf..000000000000 --- a/Documentation/RCU/rculist_nulls.txt +++ /dev/null @@ -1,172 +0,0 @@ -Using hlist_nulls to protect read-mostly linked lists and -objects using SLAB_TYPESAFE_BY_RCU allocations. - -Please read the basics in Documentation/RCU/listRCU.rst - -Using special makers (called 'nulls') is a convenient way -to solve following problem : - -A typical RCU linked list managing objects which are -allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can -use following algos : - -1) Lookup algo --------------- -rcu_read_lock() -begin: -obj = lockless_lookup(key); -if (obj) { - if (!try_get_ref(obj)) // might fail for free objects - goto begin; - /* - * Because a writer could delete object, and a writer could - * reuse these object before the RCU grace period, we - * must check key after getting the reference on object - */ - if (obj->key != key) { // not the object we expected - put_ref(obj); - goto begin; - } -} -rcu_read_unlock(); - -Beware that lockless_lookup(key) cannot use traditional hlist_for_each_entry_rcu() -but a version with an additional memory barrier (smp_rmb()) - -lockless_lookup(key) -{ - struct hlist_node *node, *next; - for (pos = rcu_dereference((head)->first); - pos && ({ next = pos->next; smp_rmb(); prefetch(next); 1; }) && - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); - pos = rcu_dereference(next)) - if (obj->key == key) - return obj; - return NULL; - -And note the traditional hlist_for_each_entry_rcu() misses this smp_rmb() : - - struct hlist_node *node; - for (pos = rcu_dereference((head)->first); - pos && ({ prefetch(pos->next); 1; }) && - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); - pos = rcu_dereference(pos->next)) - if (obj->key == key) - return obj; - return NULL; -} - -Quoting Corey Minyard : - -"If the object is moved from one list to another list in-between the - time the hash is calculated and the next field is accessed, and the - object has moved to the end of a new list, the traversal will not - complete properly on the list it should have, since the object will - be on the end of the new list and there's not a way to tell it's on a - new list and restart the list traversal. I think that this can be - solved by pre-fetching the "next" field (with proper barriers) before - checking the key." - -2) Insert algo : ----------------- - -We need to make sure a reader cannot read the new 'obj->obj_next' value -and previous value of 'obj->key'. Or else, an item could be deleted -from a chain, and inserted into another chain. If new chain was empty -before the move, 'next' pointer is NULL, and lockless reader can -not detect it missed following items in original chain. - -/* - * Please note that new inserts are done at the head of list, - * not in the middle or end. - */ -obj = kmem_cache_alloc(...); -lock_chain(); // typically a spin_lock() -obj->key = key; -/* - * we need to make sure obj->key is updated before obj->next - * or obj->refcnt - */ -smp_wmb(); -atomic_set(&obj->refcnt, 1); -hlist_add_head_rcu(&obj->obj_node, list); -unlock_chain(); // typically a spin_unlock() - - -3) Remove algo --------------- -Nothing special here, we can use a standard RCU hlist deletion. -But thanks to SLAB_TYPESAFE_BY_RCU, beware a deleted object can be reused -very very fast (before the end of RCU grace period) - -if (put_last_reference_on(obj) { - lock_chain(); // typically a spin_lock() - hlist_del_init_rcu(&obj->obj_node); - unlock_chain(); // typically a spin_unlock() - kmem_cache_free(cachep, obj); -} - - - --------------------------------------------------------------------------- -With hlist_nulls we can avoid extra smp_rmb() in lockless_lookup() -and extra smp_wmb() in insert function. - -For example, if we choose to store the slot number as the 'nulls' -end-of-list marker for each slot of the hash table, we can detect -a race (some writer did a delete and/or a move of an object -to another chain) checking the final 'nulls' value if -the lookup met the end of chain. If final 'nulls' value -is not the slot number, then we must restart the lookup at -the beginning. If the object was moved to the same chain, -then the reader doesn't care : It might eventually -scan the list again without harm. - - -1) lookup algo - - head = &table[slot]; - rcu_read_lock(); -begin: - hlist_nulls_for_each_entry_rcu(obj, node, head, member) { - if (obj->key == key) { - if (!try_get_ref(obj)) // might fail for free objects - goto begin; - if (obj->key != key) { // not the object we expected - put_ref(obj); - goto begin; - } - goto out; - } -/* - * if the nulls value we got at the end of this lookup is - * not the expected one, we must restart lookup. - * We probably met an item that was moved to another chain. - */ - if (get_nulls_value(node) != slot) - goto begin; - obj = NULL; - -out: - rcu_read_unlock(); - -2) Insert function : --------------------- - -/* - * Please note that new inserts are done at the head of list, - * not in the middle or end. - */ -obj = kmem_cache_alloc(cachep); -lock_chain(); // typically a spin_lock() -obj->key = key; -/* - * changes to obj->key must be visible before refcnt one - */ -smp_wmb(); -atomic_set(&obj->refcnt, 1); -/* - * insert obj in RCU way (readers might be traversing chain) - */ -hlist_nulls_add_head_rcu(&obj->obj_node, list); -unlock_chain(); // typically a spin_unlock() diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index 9670b54b484a..ff3e94779e73 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -162,7 +162,7 @@ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n) * The barrier() is needed to make sure compiler doesn't cache first element [1], * as this loop can be restarted [2] * [1] Documentation/core-api/atomic_ops.rst around line 114 - * [2] Documentation/RCU/rculist_nulls.txt around line 146 + * [2] Documentation/RCU/rculist_nulls.rst around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ for (({barrier();}), \ diff --git a/net/core/sock.c b/net/core/sock.c index d832c650287c..6921a85a1177 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1973,7 +1973,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) /* * Before updating sk_refcnt, we must commit prior changes to memory - * (Documentation/RCU/rculist_nulls.txt for details) + * (Documentation/RCU/rculist_nulls.rst for details) */ smp_wmb(); refcount_set(&newsk->sk_refcnt, 2); @@ -3035,7 +3035,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk_rx_queue_clear(sk); /* * Before updating sk_refcnt, we must commit prior changes to memory - * (Documentation/RCU/rculist_nulls.txt for details) + * (Documentation/RCU/rculist_nulls.rst for details) */ smp_wmb(); refcount_set(&sk->sk_refcnt, 1); -- cgit v1.2.3 From 88748e330040ecf4681a2c8f344fd386862bf913 Mon Sep 17 00:00:00 2001 From: Madhuparna Bhowmik Date: Mon, 4 May 2020 08:05:05 -0400 Subject: trace: events: rcu: Change description of rcu_dyntick trace event The different strings used for describing the polarity are Start, End and StillNonIdle. Since StillIdle is not used in any trace point for rcu_dyntick, it can be removed and StillNonIdle can be added in the description. Because StillNonIdle is used in a few tracepoints for rcu_dyntick. Similarly, USER, IDLE and IRQ are used for describing context in the rcu_dyntick tracepoints. Since, "KERNEL" is not used for any of the rcu_dyntick tracepoints, remove it from the description. Signed-off-by: Madhuparna Bhowmik Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/trace/events/rcu.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index f9a7811148e2..af274d1532bf 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -435,11 +435,12 @@ TRACE_EVENT_RCU(rcu_fqs, #endif /* #if defined(CONFIG_TREE_RCU) */ /* - * Tracepoint for dyntick-idle entry/exit events. These take a string - * as argument: "Start" for entering dyntick-idle mode, "Startirq" for - * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it - * to irq/NMI, "--=" for events moving towards idle, and "++=" for events - * moving away from idle. + * Tracepoint for dyntick-idle entry/exit events. These take 2 strings + * as argument: + * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not + * being in dyntick-idle mode. + * context: "USER" or "IDLE" or "IRQ". + * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context. * * These events also take a pair of numbers, which indicate the nesting * depth before and after the event of interest, and a third number that is -- cgit v1.2.3 From 24692fa22c30cb8fcfcabdc07a3c82964475b639 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 15 Jun 2020 08:46:49 +0200 Subject: rcu: Fix some kernel-doc warnings The current code provokes some kernel-doc warnings: ./kernel/rcu/tree.c:2915: warning: Function parameter or member 'count' not described in 'kfree_rcu_cpu' ./include/linux/rculist.h:517: warning: bad line: [@right ][node2 ... ] ./include/linux/rculist.h:2: WARNING: Unexpected indentation. This commit therefore moves the comment for "count" to the kernel-doc markup and adds a missing "*" on one kernel-doc continuation line. Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Paul E. McKenney --- include/linux/rculist.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/rculist.h b/include/linux/rculist.h index df587d181844..7eed65b5f713 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -512,7 +512,7 @@ static inline void hlist_replace_rcu(struct hlist_node *old, * @right: The hlist head on the right * * The lists start out as [@left ][node1 ... ] and - [@right ][node2 ... ] + * [@right ][node2 ... ] * The lists end up as [@left ][node2 ... ] * [@right ][node1 ... ] */ -- cgit v1.2.3 From c408b215f58f7156bb6bafb64c0263ee907033df Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Mon, 25 May 2020 23:47:55 +0200 Subject: rcu: Rename *_kfree_callback/*_kfree_rcu_offset/kfree_call_* The following changes are introduced: 1. Rename rcu_invoke_kfree_callback() to rcu_invoke_kvfree_callback(), as well as the associated trace events, so the rcu_kfree_callback(), becomes rcu_kvfree_callback(). The reason is to be aligned with kvfree() notation. 2. Rename __is_kfree_rcu_offset to __is_kvfree_rcu_offset. All RCU paths use kvfree() now instead of kfree(), thus rename it. 3. Rename kfree_call_rcu() to the kvfree_call_rcu(). The reason is, it is capable of freeing vmalloc() memory now. Do the same with __kfree_rcu() macro, it becomes __kvfree_rcu(), the goal is the same. Reviewed-by: Joel Fernandes (Google) Co-developed-by: Joel Fernandes (Google) Signed-off-by: Joel Fernandes (Google) Signed-off-by: Uladzislau Rezki (Sony) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 14 +++++++------- include/linux/rcutiny.h | 2 +- include/linux/rcutree.h | 2 +- include/trace/events/rcu.h | 8 ++++---- kernel/rcu/tiny.c | 4 ++-- kernel/rcu/tree.c | 16 ++++++++-------- 6 files changed, 23 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 659cbfa7581a..b344fc800a9b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -828,17 +828,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /* * Does the specified offset indicate that the corresponding rcu_head - * structure can be handled by kfree_rcu()? + * structure can be handled by kvfree_rcu()? */ -#define __is_kfree_rcu_offset(offset) ((offset) < 4096) +#define __is_kvfree_rcu_offset(offset) ((offset) < 4096) /* * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. */ -#define __kfree_rcu(head, offset) \ +#define __kvfree_rcu(head, offset) \ do { \ - BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ - kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ + BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \ + kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ } while (0) /** @@ -857,7 +857,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will - * be generated in __kfree_rcu(). If this error is triggered, you can + * be generated in __kvfree_rcu(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * @@ -872,7 +872,7 @@ do { \ typeof (ptr) ___p = (ptr); \ \ if (___p) \ - __kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ + __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ } while (0) /* diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 8512caeb7682..fb2eb39c484f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -34,7 +34,7 @@ static inline void synchronize_rcu_expedited(void) synchronize_rcu(); } -static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { call_rcu(head, func); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d5cc9d675987..d2f4064ebd1d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(int cpu) } void synchronize_rcu_expedited(void); -void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); +void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier(void); bool rcu_eqs_special_set(int cpu); diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index f9a7811148e2..0ee93d0b1daa 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -506,13 +506,13 @@ TRACE_EVENT_RCU(rcu_callback, /* * Tracepoint for the registration of a single RCU callback of the special - * kfree() form. The first argument is the RCU type, the second argument + * kvfree() form. The first argument is the RCU type, the second argument * is a pointer to the RCU callback, the third argument is the offset * of the callback within the enclosing RCU-protected data structure, * the fourth argument is the number of lazy callbacks queued, and the * fifth argument is the total number of callbacks queued. */ -TRACE_EVENT_RCU(rcu_kfree_callback, +TRACE_EVENT_RCU(rcu_kvfree_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, long qlen), @@ -596,12 +596,12 @@ TRACE_EVENT_RCU(rcu_invoke_callback, /* * Tracepoint for the invocation of a single RCU callback of the special - * kfree() form. The first argument is the RCU flavor, the second + * kvfree() form. The first argument is the RCU flavor, the second * argument is a pointer to the RCU callback, and the third argument * is the offset of the callback within the enclosing RCU-protected * data structure. */ -TRACE_EVENT_RCU(rcu_invoke_kfree_callback, +TRACE_EVENT_RCU(rcu_invoke_kvfree_callback, TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 4b99f7b88bee..aa897c3f2e92 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -85,8 +85,8 @@ static inline bool rcu_reclaim_tiny(struct rcu_head *head) unsigned long offset = (unsigned long)head->func; rcu_lock_acquire(&rcu_callback_map); - if (__is_kfree_rcu_offset(offset)) { - trace_rcu_invoke_kfree_callback("", head, offset); + if (__is_kvfree_rcu_offset(offset)) { + trace_rcu_invoke_kvfree_callback("", head, offset); kvfree((void *)head - offset); rcu_lock_release(&rcu_callback_map); return true; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 67c4b984c499..f22c47e72287 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2905,8 +2905,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func) return; // Enqueued onto ->nocb_bypass, so just leave. // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. rcu_segcblist_enqueue(&rdp->cblist, head); - if (__is_kfree_rcu_offset((unsigned long)func)) - trace_rcu_kfree_callback(rcu_state.name, head, + if (__is_kvfree_rcu_offset((unsigned long)func)) + trace_rcu_kvfree_callback(rcu_state.name, head, (unsigned long)func, rcu_segcblist_n_cbs(&rdp->cblist)); else @@ -3146,7 +3146,7 @@ static void kfree_rcu_work(struct work_struct *work) bkvhead[i]->records); } else { // vmalloc() / vfree(). for (j = 0; j < bkvhead[i]->nr_records; j++) { - trace_rcu_invoke_kfree_callback( + trace_rcu_invoke_kvfree_callback( rcu_state.name, bkvhead[i]->records[j], 0); @@ -3179,9 +3179,9 @@ static void kfree_rcu_work(struct work_struct *work) next = head->next; debug_rcu_head_unqueue((struct rcu_head *)ptr); rcu_lock_acquire(&rcu_callback_map); - trace_rcu_invoke_kfree_callback(rcu_state.name, head, offset); + trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset); - if (!WARN_ON_ONCE(!__is_kfree_rcu_offset(offset))) + if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) kvfree(ptr); rcu_lock_release(&rcu_callback_map); @@ -3344,12 +3344,12 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) * one, that is used only when the main path can not be maintained temporary, * due to memory pressure. * - * Each kfree_call_rcu() request is added to a batch. The batch will be drained + * Each kvfree_call_rcu() request is added to a batch. The batch will be drained * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will * be free'd in workqueue context. This allows us to: batch requests together to * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load. */ -void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { unsigned long flags; struct kfree_rcu_cpu *krcp; @@ -3388,7 +3388,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) unlock_return: krc_this_cpu_unlock(krcp, flags); } -EXPORT_SYMBOL_GPL(kfree_call_rcu); +EXPORT_SYMBOL_GPL(kvfree_call_rcu); static unsigned long kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) -- cgit v1.2.3 From ce4dce123fdcb5f209752d13f9f06926be65fc78 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Mon, 25 May 2020 23:47:57 +0200 Subject: rcu: Introduce 2 arg kvfree_rcu() interface kvmalloc() can allocate two types of objects: SLAB backed and vmalloc backed. How it behaves depends on requested object's size and memory pressure. Add a kvfree_rcu() interface that can free memory allocated via kvmalloc(). It is a simple alias to kfree_rcu() which can now handle either type of object. struct test_kvfree_rcu { struct rcu_head rcu; unsigned char array[100]; }; struct test_kvfree_rcu *p; p = kvmalloc(10 * PAGE_SIZE); if (p) kvfree_rcu(p, rcu); Signed-off-by: Uladzislau Rezki (Sony) Co-developed-by: Joel Fernandes (Google) Reviewed-by: Joel Fernandes (Google) Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b344fc800a9b..51b26ab02878 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -875,6 +875,15 @@ do { \ __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ } while (0) +/** + * kvfree_rcu() - kvfree an object after a grace period. + * @ptr: pointer to kvfree + * @rhf: the name of the struct rcu_head within the type of @ptr. + * + * Same as kfree_rcu(), just simple alias. + */ +#define kvfree_rcu(ptr, rhf) kfree_rcu(ptr, rhf) + /* * Place this after a lock-acquisition primitive to guarantee that * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies -- cgit v1.2.3 From 3042f83f19bec2e0cd356f72b39e4d816e8cd5ff Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Mon, 25 May 2020 23:47:58 +0200 Subject: rcu: Support reclaim for head-less object Update the kvfree_call_rcu() function with head-less support. This allows RCU to reclaim objects without an embedded rcu_head. tree-RCU: We introduce two chains of arrays to store SLAB-backed and vmalloc pointers, each. Storage in either of these arrays does not require embedding an rcu_head within the object. Maintaining the arrays may become impossible due to high memory pressure. For such cases there is an emergency path. Objects with rcu_head inside are just queued on a backup rcu_head list. Later on that list is drained. As for the head-less variant, as the current context can sleep, the following emergency measures are applied: a) Synchronously wait until a grace period has elapsed. b) Call kvfree(). tiny-RCU: For double argument calls, there are no new changes in behavior. For single argument call, kvfree() is directly inlined on the current stack after a synchronize_rcu() call. Note that for tiny-RCU, any call to synchronize_rcu() is actually a quiescent state, therefore it does nothing. Reviewed-by: Joel Fernandes (Google) Signed-off-by: Uladzislau Rezki (Sony) Signed-off-by: Joel Fernandes (Google) Co-developed-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/linux/rcutiny.h | 18 +++++++++++++++++- kernel/rcu/tree.c | 45 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 60 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index fb2eb39c484f..5cc9637cac16 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -34,9 +34,25 @@ static inline void synchronize_rcu_expedited(void) synchronize_rcu(); } +/* + * Add one more declaration of kvfree() here. It is + * not so straight forward to just include + * where it is defined due to getting many compile + * errors caused by that include. + */ +extern void kvfree(const void *addr); + static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { - call_rcu(head, func); + if (head) { + call_rcu(head, func); + return; + } + + // kvfree_rcu(one_arg) call. + might_sleep(); + synchronize_rcu(); + kvfree((void *) func); } void rcu_qs(void); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f22c47e72287..01f29e4500ba 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3314,6 +3314,13 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) if (IS_ENABLED(CONFIG_PREEMPT_RT)) return false; + /* + * NOTE: For one argument of kvfree_rcu() we can + * drop the lock and get the page in sleepable + * context. That would allow to maintain an array + * for the CONFIG_PREEMPT_RT as well if no cached + * pages are available. + */ bnode = (struct kvfree_rcu_bulk_data *) __get_free_page(GFP_NOWAIT | __GFP_NOWARN); } @@ -3353,16 +3360,33 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { unsigned long flags; struct kfree_rcu_cpu *krcp; + bool success; void *ptr; + if (head) { + ptr = (void *) head - (unsigned long) func; + } else { + /* + * Please note there is a limitation for the head-less + * variant, that is why there is a clear rule for such + * objects: it can be used from might_sleep() context + * only. For other places please embed an rcu_head to + * your data. + */ + might_sleep(); + ptr = (unsigned long *) func; + } + krcp = krc_this_cpu_lock(&flags); - ptr = (void *)head - (unsigned long)func; // Queue the object but don't yet schedule the batch. if (debug_rcu_head_queue(ptr)) { // Probable double kfree_rcu(), just leak. WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", __func__, head); + + // Mark as success and leave. + success = true; goto unlock_return; } @@ -3370,10 +3394,16 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) * Under high memory pressure GFP_NOWAIT can fail, * in that case the emergency path is maintained. */ - if (unlikely(!kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr))) { + success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr); + if (!success) { + if (head == NULL) + // Inline if kvfree_rcu(one_arg) call. + goto unlock_return; + head->func = func; head->next = krcp->head; krcp->head = head; + success = true; } WRITE_ONCE(krcp->count, krcp->count + 1); @@ -3387,6 +3417,17 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) unlock_return: krc_this_cpu_unlock(krcp, flags); + + /* + * Inline kvfree() after synchronize_rcu(). We can do + * it from might_sleep() context only, so the current + * CPU can pass the QS state. + */ + if (!success) { + debug_rcu_head_unqueue((struct rcu_head *) ptr); + synchronize_rcu(); + kvfree(ptr); + } } EXPORT_SYMBOL_GPL(kvfree_call_rcu); -- cgit v1.2.3 From 1835f475e3518ade61e25a57572c78b953778656 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Mon, 25 May 2020 23:47:59 +0200 Subject: rcu: Introduce single argument kvfree_rcu() interface Make kvfree_rcu() capable of freeing objects that will not embed an rcu_head within it. This saves storage overhead in such objects. Reclaiming headless objects this way requires only a single argument (pointer to the object). After this patch, there are two ways to use kvfree_rcu(): a) kvfree_rcu(ptr, rhf); struct X { struct rcu_head rhf; unsigned char data[100]; }; void *ptr = kvmalloc(sizeof(struct X), GFP_KERNEL); if (ptr) kvfree_rcu(ptr, rhf); b) kvfree_rcu(ptr); void *ptr = kvmalloc(some_bytes, GFP_KERNEL); if (ptr) kvfree_rcu(ptr); Note that the headless usage (example b) can only be used in a code that can sleep. This is enforced by the CONFIG_DEBUG_ATOMIC_SLEEP option. Co-developed-by: Joel Fernandes (Google) Reviewed-by: Joel Fernandes (Google) Signed-off-by: Uladzislau Rezki (Sony) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 51b26ab02878..d15d46db61f7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -877,12 +877,42 @@ do { \ /** * kvfree_rcu() - kvfree an object after a grace period. - * @ptr: pointer to kvfree - * @rhf: the name of the struct rcu_head within the type of @ptr. * - * Same as kfree_rcu(), just simple alias. + * This macro consists of one or two arguments and it is + * based on whether an object is head-less or not. If it + * has a head then a semantic stays the same as it used + * to be before: + * + * kvfree_rcu(ptr, rhf); + * + * where @ptr is a pointer to kvfree(), @rhf is the name + * of the rcu_head structure within the type of @ptr. + * + * When it comes to head-less variant, only one argument + * is passed and that is just a pointer which has to be + * freed after a grace period. Therefore the semantic is + * + * kvfree_rcu(ptr); + * + * where @ptr is a pointer to kvfree(). + * + * Please note, head-less way of freeing is permitted to + * use from a context that has to follow might_sleep() + * annotation. Otherwise, please switch and embed the + * rcu_head structure within the type of @ptr. */ -#define kvfree_rcu(ptr, rhf) kfree_rcu(ptr, rhf) +#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \ + kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__) + +#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME +#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf) +#define kvfree_rcu_arg_1(ptr) \ +do { \ + typeof(ptr) ___p = (ptr); \ + \ + if (___p) \ + kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \ +} while (0) /* * Place this after a lock-acquisition primitive to guarantee that -- cgit v1.2.3 From c7dcf8106f7570b133b05ff68fd4100064965d9d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 12 Jun 2020 13:11:29 -0700 Subject: rcu-tasks: Fix synchronize_rcu_tasks_trace() header comment The synchronize_rcu_tasks_trace() header comment incorrectly claims that any number of things delimit RCU Tasks Trace read-side critical sections, when in fact only rcu_read_lock_trace() and rcu_read_unlock_trace() do so. This commit therefore fixes this comment, and, while in the area, fixes a typo in the rcu_read_lock_trace() header comment. Reported-by: Alexei Starovoitov Signed-off-by: Paul E. McKenney --- include/linux/rcupdate_trace.h | 4 ++-- kernel/rcu/tasks.h | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index 4c25a41f8b27..d9015aac78c6 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -36,8 +36,8 @@ void rcu_read_unlock_trace_special(struct task_struct *t, int nesting); /** * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section * - * When synchronize_rcu_trace() is invoked by one task, then that task - * is guaranteed to block until all other tasks exit their read-side + * When synchronize_rcu_tasks_trace() is invoked by one task, then that + * task is guaranteed to block until all other tasks exit their read-side * critical sections. Similarly, if call_rcu_trace() is invoked on one * task while other tasks are within RCU read-side critical sections, * invocation of the corresponding RCU callback is deferred until after diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index ce23f6cc5043..a77298c1d126 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1118,11 +1118,10 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period * * Control will return to the caller some time after a trace rcu-tasks - * grace period has elapsed, in other words after all currently - * executing rcu-tasks read-side critical sections have elapsed. These - * read-side critical sections are delimited by calls to schedule(), - * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory, - * anyway) cond_resched(). + * grace period has elapsed, in other words after all currently executing + * rcu-tasks read-side critical sections have elapsed. These read-side + * critical sections are delimited by calls to rcu_read_lock_trace() + * and rcu_read_unlock_trace(). * * This is a very specialized primitive, intended only for a few uses in * tracing and other situations requiring manipulation of function preambles -- cgit v1.2.3 From 4a5f133c15b77c4018e8d7996541868ac94afb4f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 24 Apr 2020 11:21:40 -0700 Subject: rcutorture: Add races with task-exit processing Several variants of Linux-kernel RCU interact with task-exit processing, including preemptible RCU, Tasks RCU, and Tasks Trace RCU. This commit therefore adds testing of this interaction to rcutorture by adding rcutorture.read_exit_burst and rcutorture.read_exit_delay kernel-boot parameters. These kernel parameters control the frequency and spacing of special read-then-exit kthreads that are spawned. [ paulmck: Apply feedback from Dan Carpenter's static checker. ] [ paulmck: Reduce latency to avoid false-positive shutdown hangs. ] Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 14 +++ include/linux/torture.h | 5 ++ kernel/rcu/rcutorture.c | 112 +++++++++++++++++++++++- 3 files changed, 128 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index fb95fad81c79..a0dcc925c8a2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4258,6 +4258,20 @@ Set time (jiffies) between CPU-hotplug operations, or zero to disable CPU-hotplug testing. + rcutorture.read_exit= [KNL] + Set the number of read-then-exit kthreads used + to test the interaction of RCU updaters and + task-exit processing. + + rcutorture.read_exit_burst= [KNL] + The number of times in a given read-then-exit + episode that a set of read-then-exit kthreads + is spawned. + + rcutorture.read_exit_delay= [KNL] + The delay, in seconds, between successive + read-then-exit testing episodes. + rcutorture.shuffle_interval= [KNL] Set task-shuffle interval (s). Shuffling tasks allows some CPUs to go into dyntick-idle mode diff --git a/include/linux/torture.h b/include/linux/torture.h index 629b66e6c161..7f65bd1dd307 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -55,6 +55,11 @@ struct torture_random_state { #define DEFINE_TORTURE_RANDOM_PERCPU(name) \ DEFINE_PER_CPU(struct torture_random_state, name) unsigned long torture_random(struct torture_random_state *trsp); +static inline void torture_random_init(struct torture_random_state *trsp) +{ + trsp->trs_state = 0; + trsp->trs_count = 0; +} /* Task shuffler, which causes CPUs to occasionally go idle. */ void torture_shuffle_task_register(struct task_struct *tp); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index efb792e13fca..2621a339c8a4 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -109,6 +109,10 @@ torture_param(int, object_debug, 0, torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); +torture_param(int, read_exit_delay, 13, + "Delay between read-then-exit episodes (s)"); +torture_param(int, read_exit_burst, 16, + "# of read-then-exit bursts per episode, zero to disable"); torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); @@ -146,6 +150,7 @@ static struct task_struct *stall_task; static struct task_struct *fwd_prog_task; static struct task_struct **barrier_cbs_tasks; static struct task_struct *barrier_task; +static struct task_struct *read_exit_task; #define RCU_TORTURE_PIPE_LEN 10 @@ -177,6 +182,7 @@ static long n_rcu_torture_boosts; static atomic_long_t n_rcu_torture_timers; static long n_barrier_attempts; static long n_barrier_successes; /* did rcu_barrier test succeed? */ +static unsigned long n_read_exits; static struct list_head rcu_torture_removed; static unsigned long shutdown_jiffies; @@ -1539,10 +1545,11 @@ rcu_torture_stats_print(void) n_rcu_torture_boosts, atomic_long_read(&n_rcu_torture_timers)); torture_onoff_stats(); - pr_cont("barrier: %ld/%ld:%ld\n", + pr_cont("barrier: %ld/%ld:%ld ", data_race(n_barrier_successes), data_race(n_barrier_attempts), data_race(n_rcu_torture_barrier_error)); + pr_cont("read-exits: %ld\n", data_race(n_read_exits)); pr_alert("%s%s ", torture_type, TORTURE_FLAG); if (atomic_read(&n_rcu_torture_mberror) || @@ -1634,7 +1641,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " "stall_cpu_block=%d " "n_barrier_cbs=%d " - "onoff_interval=%d onoff_holdoff=%d\n", + "onoff_interval=%d onoff_holdoff=%d " + "read_exit_delay=%d read_exit_burst=%d\n", torture_type, tag, nrealreaders, nfakewriters, stat_interval, verbose, test_no_idle_hz, shuffle_interval, stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, @@ -1643,7 +1651,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, stall_cpu_block, n_barrier_cbs, - onoff_interval, onoff_holdoff); + onoff_interval, onoff_holdoff, + read_exit_delay, read_exit_burst); } static int rcutorture_booster_cleanup(unsigned int cpu) @@ -2338,6 +2347,99 @@ static bool rcu_torture_can_boost(void) return true; } +static bool read_exit_child_stop; +static bool read_exit_child_stopped; +static wait_queue_head_t read_exit_wq; + +// Child kthread which just does an rcutorture reader and exits. +static int rcu_torture_read_exit_child(void *trsp_in) +{ + struct torture_random_state *trsp = trsp_in; + + set_user_nice(current, MAX_NICE); + // Minimize time between reading and exiting. + while (!kthread_should_stop()) + schedule_timeout_uninterruptible(1); + (void)rcu_torture_one_read(trsp); + return 0; +} + +// Parent kthread which creates and destroys read-exit child kthreads. +static int rcu_torture_read_exit(void *unused) +{ + int count = 0; + bool errexit = false; + int i; + struct task_struct *tsp; + DEFINE_TORTURE_RANDOM(trs); + + // Allocate and initialize. + set_user_nice(current, MAX_NICE); + VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); + + // Each pass through this loop does one read-exit episode. + do { + if (++count > read_exit_burst) { + VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); + rcu_barrier(); // Wait for task_struct free, avoid OOM. + for (i = 0; i < read_exit_delay; i++) { + schedule_timeout_uninterruptible(HZ); + if (READ_ONCE(read_exit_child_stop)) + break; + } + if (!READ_ONCE(read_exit_child_stop)) + VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); + count = 0; + } + if (READ_ONCE(read_exit_child_stop)) + break; + // Spawn child. + tsp = kthread_run(rcu_torture_read_exit_child, + &trs, "%s", + "rcu_torture_read_exit_child"); + if (IS_ERR(tsp)) { + VERBOSE_TOROUT_ERRSTRING("out of memory"); + errexit = true; + tsp = NULL; + break; + } + cond_resched(); + kthread_stop(tsp); + n_read_exits ++; + stutter_wait("rcu_torture_read_exit"); + } while (!errexit && !READ_ONCE(read_exit_child_stop)); + + // Clean up and exit. + smp_store_release(&read_exit_child_stopped, true); // After reaping. + smp_mb(); // Store before wakeup. + wake_up(&read_exit_wq); + while (!torture_must_stop()) + schedule_timeout_uninterruptible(1); + torture_kthread_stopping("rcu_torture_read_exit"); + return 0; +} + +static int rcu_torture_read_exit_init(void) +{ + if (read_exit_burst <= 0) + return -EINVAL; + init_waitqueue_head(&read_exit_wq); + read_exit_child_stop = false; + read_exit_child_stopped = false; + return torture_create_kthread(rcu_torture_read_exit, NULL, + read_exit_task); +} + +static void rcu_torture_read_exit_cleanup(void) +{ + if (!read_exit_task) + return; + WRITE_ONCE(read_exit_child_stop, true); + smp_mb(); // Above write before wait. + wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); + torture_stop_kthread(rcutorture_read_exit, read_exit_task); +} + static enum cpuhp_state rcutor_hp; static void @@ -2359,6 +2461,7 @@ rcu_torture_cleanup(void) } show_rcu_gp_kthreads(); + rcu_torture_read_exit_cleanup(); rcu_torture_barrier_cleanup(); torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); torture_stop_kthread(rcu_torture_stall, stall_task); @@ -2680,6 +2783,9 @@ rcu_torture_init(void) if (firsterr) goto unwind; firsterr = rcu_torture_barrier_init(); + if (firsterr) + goto unwind; + firsterr = rcu_torture_read_exit_init(); if (firsterr) goto unwind; if (object_debug) -- cgit v1.2.3 From c93773c1a3fedf6c3f6fa12833e2b74a9897c3e3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 12 Feb 2020 13:29:15 -0800 Subject: rculist: Add ASSERT_EXCLUSIVE_ACCESS() to __list_splice_init_rcu() After the sync() in __list_splice_init_rcu(), there should be no readers traversing the old list. This commit therefore enlists the help of KCSAN to verify this condition via a pair of calls to ASSERT_EXCLUSIVE_ACCESS(). Signed-off-by: Paul E. McKenney Cc: Marco Elver --- include/linux/rculist.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/rculist.h b/include/linux/rculist.h index df587d181844..2ebd112f86f7 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -248,6 +248,8 @@ static inline void __list_splice_init_rcu(struct list_head *list, */ sync(); + ASSERT_EXCLUSIVE_ACCESS(*first); + ASSERT_EXCLUSIVE_ACCESS(*last); /* * Readers are finished with the source list, so perform splice. -- cgit v1.2.3 From 142240398e50e5fe3171bcf2459856603be13a39 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Sat, 27 Jun 2020 23:24:19 -0400 Subject: audit: add gfp parameter to audit_log_nfcfg Fixed an inconsistent use of GFP flags in nft_obj_notify() that used GFP_KERNEL when a GFP flag was passed in to that function. Given this allocated memory was then used in audit_log_nfcfg() it led to an audit of all other GFP allocations in net/netfilter/nf_tables_api.c and a modification of audit_log_nfcfg() to accept a GFP parameter. Reported-by: Dan Carptenter Signed-off-by: Richard Guy Briggs Signed-off-by: Paul Moore --- include/linux/audit.h | 8 ++++---- kernel/auditsc.c | 4 ++-- net/bridge/netfilter/ebtables.c | 6 +++--- net/netfilter/nf_tables_api.c | 33 +++++++++++++++++++++------------ net/netfilter/x_tables.c | 5 +++-- 5 files changed, 33 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/audit.h b/include/linux/audit.h index 604ede630580..d93739f7a35a 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -404,7 +404,7 @@ extern void __audit_fanotify(unsigned int response); extern void __audit_tk_injoffset(struct timespec64 offset); extern void __audit_ntp_log(const struct audit_ntp_data *ad); extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, - enum audit_nfcfgop op); + enum audit_nfcfgop op, gfp_t gfp); static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { @@ -542,10 +542,10 @@ static inline void audit_ntp_log(const struct audit_ntp_data *ad) static inline void audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, - enum audit_nfcfgop op) + enum audit_nfcfgop op, gfp_t gfp) { if (audit_enabled) - __audit_log_nfcfg(name, af, nentries, op); + __audit_log_nfcfg(name, af, nentries, op, gfp); } extern int audit_n_rules; @@ -683,7 +683,7 @@ static inline void audit_ptrace(struct task_struct *t) static inline void audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, - enum audit_nfcfgop op) + enum audit_nfcfgop op, gfp_t gfp) { } #define audit_n_rules 0 diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 3a9100e95fda..eae1a599ffe3 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -2572,12 +2572,12 @@ void __audit_ntp_log(const struct audit_ntp_data *ad) } void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, - enum audit_nfcfgop op) + enum audit_nfcfgop op, gfp_t gfp) { struct audit_buffer *ab; char comm[sizeof(current->comm)]; - ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_NETFILTER_CFG); + ab = audit_log_start(audit_context(), gfp, AUDIT_NETFILTER_CFG); if (!ab) return; audit_log_format(ab, "table=%s family=%u entries=%u op=%s", diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index c83ffe912163..b13b49b9f75c 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1047,7 +1047,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, vfree(counterstmp); audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries, - AUDIT_XT_OP_REPLACE); + AUDIT_XT_OP_REPLACE, GFP_KERNEL); return ret; free_unlock: @@ -1123,7 +1123,7 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table) list_del(&table->list); mutex_unlock(&ebt_mutex); audit_log_nfcfg(table->name, AF_BRIDGE, table->private->nentries, - AUDIT_XT_OP_UNREGISTER); + AUDIT_XT_OP_UNREGISTER, GFP_KERNEL); EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, ebt_cleanup_entry, net, NULL); if (table->private->nentries) @@ -1218,7 +1218,7 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table, } audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries, - AUDIT_XT_OP_REGISTER); + AUDIT_XT_OP_REGISTER, GFP_KERNEL); return ret; free_unlock: mutex_unlock(&ebt_mutex); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 164700273947..f7ff91479647 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -702,7 +702,8 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event) ctx->table->use, event == NFT_MSG_NEWTABLE ? AUDIT_NFT_OP_TABLE_REGISTER : - AUDIT_NFT_OP_TABLE_UNREGISTER); + AUDIT_NFT_OP_TABLE_UNREGISTER, + GFP_KERNEL); kfree(buf); if (!ctx->report && @@ -1448,7 +1449,8 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) ctx->chain->use, event == NFT_MSG_NEWCHAIN ? AUDIT_NFT_OP_CHAIN_REGISTER : - AUDIT_NFT_OP_CHAIN_UNREGISTER); + AUDIT_NFT_OP_CHAIN_UNREGISTER, + GFP_KERNEL); kfree(buf); if (!ctx->report && @@ -2724,7 +2726,8 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx, rule->handle, event == NFT_MSG_NEWRULE ? AUDIT_NFT_OP_RULE_REGISTER : - AUDIT_NFT_OP_RULE_UNREGISTER); + AUDIT_NFT_OP_RULE_UNREGISTER, + GFP_KERNEL); kfree(buf); if (!ctx->report && @@ -3737,7 +3740,8 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx, set->field_count, event == NFT_MSG_NEWSET ? AUDIT_NFT_OP_SET_REGISTER : - AUDIT_NFT_OP_SET_UNREGISTER); + AUDIT_NFT_OP_SET_UNREGISTER, + gfp_flags); kfree(buf); if (!ctx->report && @@ -4864,7 +4868,8 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx, set->handle, event == NFT_MSG_NEWSETELEM ? AUDIT_NFT_OP_SETELEM_REGISTER : - AUDIT_NFT_OP_SETELEM_UNREGISTER); + AUDIT_NFT_OP_SETELEM_UNREGISTER, + GFP_KERNEL); kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) @@ -5956,7 +5961,8 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) audit_log_nfcfg(buf, family, obj->handle, - AUDIT_NFT_OP_OBJ_RESET); + AUDIT_NFT_OP_OBJ_RESET, + GFP_KERNEL); kfree(buf); } @@ -6071,13 +6077,14 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, reset = true; if (reset) { - char *buf = kasprintf(GFP_KERNEL, "%s:%llu;?:0", + char *buf = kasprintf(GFP_ATOMIC, "%s:%llu;?:0", table->name, table->handle); audit_log_nfcfg(buf, family, obj->handle, - AUDIT_NFT_OP_OBJ_RESET); + AUDIT_NFT_OP_OBJ_RESET, + GFP_KERNEL); kfree(buf); } @@ -6156,7 +6163,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table, { struct sk_buff *skb; int err; - char *buf = kasprintf(GFP_KERNEL, "%s:%llu;?:0", + char *buf = kasprintf(gfp, "%s:%llu;?:0", table->name, table->handle); audit_log_nfcfg(buf, @@ -6164,7 +6171,8 @@ void nft_obj_notify(struct net *net, const struct nft_table *table, obj->handle, event == NFT_MSG_NEWOBJ ? AUDIT_NFT_OP_OBJ_REGISTER : - AUDIT_NFT_OP_OBJ_UNREGISTER); + AUDIT_NFT_OP_OBJ_UNREGISTER, + GFP_KERNEL); kfree(buf); if (!report && @@ -6954,7 +6962,8 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx, flowtable->hooknum, event == NFT_MSG_NEWFLOWTABLE ? AUDIT_NFT_OP_FLOWTABLE_REGISTER : - AUDIT_NFT_OP_FLOWTABLE_UNREGISTER); + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, + GFP_KERNEL); kfree(buf); if (ctx->report && @@ -7078,7 +7087,7 @@ static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int err; audit_log_nfcfg("?:0;?:0", 0, net->nft.base_seq, - AUDIT_NFT_OP_GEN_REGISTER); + AUDIT_NFT_OP_GEN_REGISTER, GFP_KERNEL); if (nlmsg_report(nlh) && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 99a468be4a59..9ad8f3ff66f5 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1410,7 +1410,8 @@ xt_replace_table(struct xt_table *table, audit_log_nfcfg(table->name, table->af, private->number, !private->number ? AUDIT_XT_OP_REGISTER : - AUDIT_XT_OP_REPLACE); + AUDIT_XT_OP_REPLACE, + GFP_KERNEL); return private; } EXPORT_SYMBOL_GPL(xt_replace_table); @@ -1473,7 +1474,7 @@ void *xt_unregister_table(struct xt_table *table) list_del(&table->list); mutex_unlock(&xt[table->af].mutex); audit_log_nfcfg(table->name, table->af, private->number, - AUDIT_XT_OP_UNREGISTER); + AUDIT_XT_OP_UNREGISTER, GFP_KERNEL); kfree(table); return private; -- cgit v1.2.3 From aebe4426ccaa4838f36ea805cdf7d76503e65117 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 27 Jun 2020 01:45:25 +0300 Subject: net: sched: Pass root lock to Qdisc_ops.enqueue A following patch introduces qevents, points in qdisc algorithm where packet can be processed by user-defined filters. Should this processing lead to a situation where a new packet is to be enqueued on the same port, holding the root lock would lead to deadlocks. To solve the issue, qevent handler needs to unlock and relock the root lock when necessary. To that end, add the root lock argument to the qdisc op enqueue, and propagate throughout. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- include/net/sch_generic.h | 6 ++++-- net/core/dev.c | 4 ++-- net/sched/sch_atm.c | 4 ++-- net/sched/sch_blackhole.c | 2 +- net/sched/sch_cake.c | 2 +- net/sched/sch_cbq.c | 4 ++-- net/sched/sch_cbs.c | 18 +++++++++--------- net/sched/sch_choke.c | 2 +- net/sched/sch_codel.c | 2 +- net/sched/sch_drr.c | 4 ++-- net/sched/sch_dsmark.c | 4 ++-- net/sched/sch_etf.c | 2 +- net/sched/sch_ets.c | 4 ++-- net/sched/sch_fifo.c | 6 +++--- net/sched/sch_fq.c | 2 +- net/sched/sch_fq_codel.c | 2 +- net/sched/sch_fq_pie.c | 2 +- net/sched/sch_generic.c | 4 ++-- net/sched/sch_gred.c | 2 +- net/sched/sch_hfsc.c | 6 +++--- net/sched/sch_hhf.c | 2 +- net/sched/sch_htb.c | 4 ++-- net/sched/sch_multiq.c | 4 ++-- net/sched/sch_netem.c | 8 ++++---- net/sched/sch_pie.c | 2 +- net/sched/sch_plug.c | 2 +- net/sched/sch_prio.c | 6 +++--- net/sched/sch_qfq.c | 4 ++-- net/sched/sch_red.c | 4 ++-- net/sched/sch_sfb.c | 4 ++-- net/sched/sch_sfq.c | 2 +- net/sched/sch_skbprio.c | 2 +- net/sched/sch_taprio.c | 4 ++-- net/sched/sch_tbf.c | 10 +++++----- net/sched/sch_teql.c | 4 ++-- 35 files changed, 73 insertions(+), 71 deletions(-) (limited to 'include') diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c510b03b9751..fceb3d63c925 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -57,6 +57,7 @@ struct qdisc_skb_head { struct Qdisc { int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, + spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff * (*dequeue)(struct Qdisc *sch); unsigned int flags; @@ -241,6 +242,7 @@ struct Qdisc_ops { int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, + spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); @@ -788,11 +790,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, #endif } -static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { qdisc_calculate_pkt_len(skb, sch); - return sch->enqueue(skb, sch, to_free); + return sch->enqueue(skb, sch, root_lock, to_free); } static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, diff --git a/net/core/dev.c b/net/core/dev.c index 3a46b86cbd67..c02bae927812 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3749,7 +3749,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_calculate_pkt_len(skb, q); if (q->flags & TCQ_F_NOLOCK) { - rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + rc = q->enqueue(skb, q, NULL, &to_free) & NET_XMIT_MASK; qdisc_run(q); if (unlikely(to_free)) @@ -3792,7 +3792,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { - rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + rc = q->enqueue(skb, q, root_lock, &to_free) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index ee12ca9f55b4..fb6b16c4e46d 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -374,7 +374,7 @@ static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl, /* --------------------------- Qdisc operations ---------------------------- */ -static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct atm_qdisc_data *p = qdisc_priv(sch); @@ -432,7 +432,7 @@ done: #endif } - ret = qdisc_enqueue(skb, flow->q, to_free); + ret = qdisc_enqueue(skb, flow->q, root_lock, to_free); if (ret != NET_XMIT_SUCCESS) { drop: __maybe_unused if (net_xmit_drop_count(ret)) { diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c index a7f7667ae984..187644657c4f 100644 --- a/net/sched/sch_blackhole.c +++ b/net/sched/sch_blackhole.c @@ -13,7 +13,7 @@ #include #include -static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { qdisc_drop(skb, sch, to_free); diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 65a95cb094e8..e9c502dd29a2 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1687,7 +1687,7 @@ hash: static void cake_reconfigure(struct Qdisc *sch); -static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cake_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 39b427dc7512..052d4a1af69a 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -356,7 +356,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) } static int -cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, +cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cbq_sched_data *q = qdisc_priv(sch); @@ -373,7 +373,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, return ret; } - ret = qdisc_enqueue(skb, cl->q, to_free); + ret = qdisc_enqueue(skb, cl->q, root_lock, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; cbq_mark_toplevel(q, cl); diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index 2eaac2ff380f..7af15ebe07f7 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -77,7 +77,7 @@ struct cbs_sched_data { s64 sendslope; /* in bytes/s */ s64 idleslope; /* in bytes/s */ struct qdisc_watchdog watchdog; - int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, + int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff *(*dequeue)(struct Qdisc *sch); struct Qdisc *qdisc; @@ -85,13 +85,13 @@ struct cbs_sched_data { }; static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, - struct Qdisc *child, + struct Qdisc *child, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); int err; - err = child->ops->enqueue(skb, child, to_free); + err = child->ops->enqueue(skb, child, root_lock, to_free); if (err != NET_XMIT_SUCCESS) return err; @@ -101,16 +101,16 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, return NET_XMIT_SUCCESS; } -static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, +static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; - return cbs_child_enqueue(skb, sch, qdisc, to_free); + return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free); } -static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, +static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); @@ -124,15 +124,15 @@ static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, q->last = ktime_get_ns(); } - return cbs_child_enqueue(skb, sch, qdisc, to_free); + return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free); } -static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); - return q->enqueue(skb, sch, to_free); + return q->enqueue(skb, sch, root_lock, to_free); } /* timediff is in ns, slope is in bytes/s */ diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index bd618b00d319..baf3faee31aa 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -210,7 +210,7 @@ static bool choke_match_random(const struct choke_sched_data *q, return choke_match_flow(oskb, nskb); } -static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct choke_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 30169b3adbbb..1d94837abdd8 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -108,7 +108,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) return skb; } -static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct codel_sched_data *q; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 07a2b0b35495..0d5c9a8ec61d 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -337,7 +337,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, return NULL; } -static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -355,7 +355,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, to_free); + err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 05605b30bef3..fbe49fffcdbb 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -198,7 +198,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl, /* --------------------------- Qdisc operations ---------------------------- */ -static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -267,7 +267,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, } } - err = qdisc_enqueue(skb, p->q, to_free); + err = qdisc_enqueue(skb, p->q, root_lock, to_free); if (err != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(err)) qdisc_qstats_drop(sch); diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index c48f91075b5c..7a7c50a68115 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -160,7 +160,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) } static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, - struct sk_buff **to_free) + spinlock_t *root_lock, struct sk_buff **to_free) { struct etf_sched_data *q = qdisc_priv(sch); struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL; diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index a87e9159338c..373dc5855d4e 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -415,7 +415,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch, return &q->classes[band]; } -static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -433,7 +433,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, to_free); + err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index a579a4131d22..b4da5b624ad8 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -16,7 +16,7 @@ /* 1 band FIFO pseudo-"scheduler" */ -static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) @@ -25,7 +25,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, return qdisc_drop(skb, sch, to_free); } -static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { if (likely(sch->q.qlen < sch->limit)) @@ -34,7 +34,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, return qdisc_drop(skb, sch, to_free); } -static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int prev_backlog; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 2fb76fc0cc31..a90d745c41e0 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -439,7 +439,7 @@ static bool fq_packet_beyond_horizon(const struct sk_buff *skb, return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon)); } -static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct fq_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 459a784056c0..6bf979f95509 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -181,7 +181,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, return idx; } -static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct fq_codel_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index fb760cee824e..a27a250ab8f9 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -125,7 +125,7 @@ static inline void flow_queue_add(struct fq_pie_flow *flow, skb->next = NULL; } -static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct fq_pie_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 265a61d011df..715cde1df9e4 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -520,7 +520,7 @@ EXPORT_SYMBOL(netif_carrier_off); cheaper. */ -static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, +static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock, struct sk_buff **to_free) { __qdisc_drop(skb, to_free); @@ -614,7 +614,7 @@ static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, return &priv->q[band]; } -static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, +static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock, struct sk_buff **to_free) { int band = prio2band[skb->priority & TC_PRIO_MAX]; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 8599c6f31b05..7d67c6cd6605 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -161,7 +161,7 @@ static bool gred_per_vq_red_flags_used(struct gred_sched *table) return false; } -static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct gred_sched_data *q = NULL; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 433f2190960f..7f6670044f0a 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1528,8 +1528,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) return -1; } -static int -hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) +static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, + struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); struct hfsc_class *cl; @@ -1545,7 +1545,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, to_free); + err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 420ede875322..ddc6bf1d85d0 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -368,7 +368,7 @@ static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free) return bucket - q->buckets; } -static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct hhf_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 8184c87da8be..52fc513688b1 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -576,7 +576,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) cl->prio_activity = 0; } -static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { int uninitialized_var(ret); @@ -599,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, __qdisc_drop(skb, to_free); return ret; #endif - } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, + } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, root_lock, to_free)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { qdisc_qstats_drop(sch); diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 1330ad224931..648611f5c105 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -57,7 +57,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) } static int -multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, +multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct Qdisc *qdisc; @@ -74,7 +74,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, } #endif - ret = qdisc_enqueue(skb, qdisc, to_free); + ret = qdisc_enqueue(skb, qdisc, root_lock, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 84f82771cdf5..8fb17483a34f 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -431,7 +431,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ -static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct netem_sched_data *q = qdisc_priv(sch); @@ -480,7 +480,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; - rootq->enqueue(skb2, rootq, to_free); + rootq->enqueue(skb2, rootq, root_lock, to_free); q->duplicate = dupsave; rc_drop = NET_XMIT_SUCCESS; } @@ -604,7 +604,7 @@ finish_segs: skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; last_len = segs->len; - rc = qdisc_enqueue(segs, sch, to_free); + rc = qdisc_enqueue(segs, sch, root_lock, to_free); if (rc != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(rc)) qdisc_qstats_drop(sch); @@ -720,7 +720,7 @@ deliver: struct sk_buff *to_free = NULL; int err; - err = qdisc_enqueue(skb, q->qdisc, &to_free); + err = qdisc_enqueue(skb, q->qdisc, NULL, &to_free); kfree_skb_list(to_free); if (err != NET_XMIT_SUCCESS && net_xmit_drop_count(err)) { diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index c65077f0c0f3..b305313b64e3 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -82,7 +82,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, } EXPORT_SYMBOL_GPL(pie_drop_early); -static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct pie_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index cbc2ebca4548..e5f8b4769b4d 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -84,7 +84,7 @@ struct plug_sched_data { u32 pkts_to_release; }; -static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct plug_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 647941702f9f..a3e187f2603c 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -65,8 +65,8 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return q->queues[band]; } -static int -prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) +static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, + struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); struct Qdisc *qdisc; @@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } #endif - ret = qdisc_enqueue(skb, qdisc, to_free); + ret = qdisc_enqueue(skb, qdisc, root_lock, to_free); if (ret == NET_XMIT_SUCCESS) { sch->qstats.backlog += len; sch->q.qlen++; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 0b05ac7c848e..ede854516825 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1194,7 +1194,7 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) return agg; } -static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb), gso_segs; @@ -1225,7 +1225,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, to_free); + err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); if (net_xmit_drop_count(err)) { diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 555a1b9e467f..6ace7d757e8b 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -65,7 +65,7 @@ static int red_use_nodrop(struct red_sched_data *q) return q->flags & TC_RED_NODROP; } -static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct red_sched_data *q = qdisc_priv(sch); @@ -118,7 +118,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, break; } - ret = qdisc_enqueue(skb, child, to_free); + ret = qdisc_enqueue(skb, child, root_lock, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 4074c50ac3d7..d2a6e78262bb 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -276,7 +276,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, return false; } -static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { @@ -399,7 +399,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, } enqueue: - ret = qdisc_enqueue(skb, child, to_free); + ret = qdisc_enqueue(skb, child, root_lock, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 5a6def5e4e6d..46cdefd69e44 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q) } static int -sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) +sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash, dropped; diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 7a5e4c454715..f75f237c4436 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -65,7 +65,7 @@ static u16 calc_new_low_prio(const struct skbprio_sched_data *q) return SKBPRIO_MAX_PRIORITY - 1; } -static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index e981992634dd..daef2ff60a98 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -410,7 +410,7 @@ done: return txtime; } -static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct taprio_sched *q = qdisc_priv(sch); @@ -435,7 +435,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; - return qdisc_enqueue(skb, child, to_free); + return qdisc_enqueue(skb, child, root_lock, to_free); } static struct sk_buff *taprio_peek_soft(struct Qdisc *sch) diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 78e79029dc63..c3eb5cdb83a8 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -187,7 +187,7 @@ static int tbf_offload_dump(struct Qdisc *sch) /* GSO packet is too big, segment it so that tbf can transmit * each segment in time */ -static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, +static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -206,7 +206,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; len += segs->len; - ret = qdisc_enqueue(segs, q->qdisc, to_free); + ret = qdisc_enqueue(segs, q->qdisc, root_lock, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); @@ -221,7 +221,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; } -static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -231,10 +231,10 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (qdisc_pkt_len(skb) > q->max_size) { if (skb_is_gso(skb) && skb_gso_validate_mac_len(skb, q->max_size)) - return tbf_segment(skb, sch, to_free); + return tbf_segment(skb, sch, root_lock, to_free); return qdisc_drop(skb, sch, to_free); } - ret = qdisc_enqueue(skb, q->qdisc, to_free); + ret = qdisc_enqueue(skb, q->qdisc, root_lock, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 689ef6f3ded8..511964653476 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -72,8 +72,8 @@ struct teql_sched_data { /* "teql*" qdisc routines */ -static int -teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) +static int teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, + struct sk_buff **to_free) { struct net_device *dev = qdisc_dev(sch); struct teql_sched_data *q = qdisc_priv(sch); -- cgit v1.2.3 From 3625750f05ecce21a0fce429c1ff85acfffb461b Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 27 Jun 2020 01:45:26 +0300 Subject: net: sched: Introduce helpers for qevent blocks Qevents are attach points for TC blocks, where filters can be put that are executed when "interesting events" take place in a qdisc. The data to keep and the functions to invoke to maintain a qevent will be largely the same between qevents. Therefore introduce sched-wide helpers for qevent management. Currently, similarly to ingress and egress blocks of clsact pseudo-qdisc, blocks attachment cannot be changed after the qdisc is created. To that end, add a helper tcf_qevent_validate_change(), which verifies whether block index attribute is not attached, or if it is, whether its value matches the current one (i.e. there is no material change). The function tcf_qevent_handle() should be invoked when qdisc hits the "interesting event" corresponding to a block. This function releases root lock for the duration of executing the attached filters, to allow packets generated through user actions (notably mirred) to be reinserted to the same qdisc tree. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 49 +++++++++++++++++++++ net/sched/cls_api.c | 119 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 168 insertions(+) (limited to 'include') diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ff017e5b3ea2..690a7f49c8f9 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -32,6 +32,12 @@ struct tcf_block_ext_info { u32 block_index; }; +struct tcf_qevent { + struct tcf_block *block; + struct tcf_block_ext_info info; + struct tcf_proto __rcu *filter_chain; +}; + struct tcf_block_cb; bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); @@ -553,6 +559,49 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, void *cb_priv, u32 *flags, unsigned int *in_hw_count); unsigned int tcf_exts_num_actions(struct tcf_exts *exts); +#ifdef CONFIG_NET_CLS_ACT +int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, + enum flow_block_binder_type binder_type, + struct nlattr *block_index_attr, + struct netlink_ext_ack *extack); +void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); +int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, + struct netlink_ext_ack *extack); +struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, + spinlock_t *root_lock, struct sk_buff **to_free, int *ret); +int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); +#else +static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, + enum flow_block_binder_type binder_type, + struct nlattr *block_index_attr, + struct netlink_ext_ack *extack) +{ + return 0; +} + +static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) +{ +} + +static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, + struct netlink_ext_ack *extack) +{ + return 0; +} + +static inline struct sk_buff * +tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, + spinlock_t *root_lock, struct sk_buff **to_free, int *ret) +{ + return skb; +} + +static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) +{ + return 0; +} +#endif + struct tc_cls_u32_knode { struct tcf_exts *exts; struct tcf_result *res; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 5bfa6b985bb8..1b14d5f57e7f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3748,6 +3748,125 @@ unsigned int tcf_exts_num_actions(struct tcf_exts *exts) } EXPORT_SYMBOL(tcf_exts_num_actions); +#ifdef CONFIG_NET_CLS_ACT +static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr, + u32 *p_block_index, + struct netlink_ext_ack *extack) +{ + *p_block_index = nla_get_u32(block_index_attr); + if (!*p_block_index) { + NL_SET_ERR_MSG(extack, "Block number may not be zero"); + return -EINVAL; + } + + return 0; +} + +int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, + enum flow_block_binder_type binder_type, + struct nlattr *block_index_attr, + struct netlink_ext_ack *extack) +{ + u32 block_index; + int err; + + if (!block_index_attr) + return 0; + + err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); + if (err) + return err; + + if (!block_index) + return 0; + + qe->info.binder_type = binder_type; + qe->info.chain_head_change = tcf_chain_head_change_dflt; + qe->info.chain_head_change_priv = &qe->filter_chain; + qe->info.block_index = block_index; + + return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); +} +EXPORT_SYMBOL(tcf_qevent_init); + +void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) +{ + if (qe->info.block_index) + tcf_block_put_ext(qe->block, sch, &qe->info); +} +EXPORT_SYMBOL(tcf_qevent_destroy); + +int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, + struct netlink_ext_ack *extack) +{ + u32 block_index; + int err; + + if (!block_index_attr) + return 0; + + err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); + if (err) + return err; + + /* Bounce newly-configured block or change in block. */ + if (block_index != qe->info.block_index) { + NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(tcf_qevent_validate_change); + +struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, + spinlock_t *root_lock, struct sk_buff **to_free, int *ret) +{ + struct tcf_result cl_res; + struct tcf_proto *fl; + + if (!qe->info.block_index) + return skb; + + fl = rcu_dereference_bh(qe->filter_chain); + + if (root_lock) + spin_unlock(root_lock); + + switch (tcf_classify(skb, fl, &cl_res, false)) { + case TC_ACT_SHOT: + qdisc_qstats_drop(sch); + __qdisc_drop(skb, to_free); + *ret = __NET_XMIT_BYPASS; + return NULL; + case TC_ACT_STOLEN: + case TC_ACT_QUEUED: + case TC_ACT_TRAP: + __qdisc_drop(skb, to_free); + *ret = __NET_XMIT_STOLEN; + return NULL; + case TC_ACT_REDIRECT: + skb_do_redirect(skb); + *ret = __NET_XMIT_STOLEN; + return NULL; + } + + if (root_lock) + spin_lock(root_lock); + + return skb; +} +EXPORT_SYMBOL(tcf_qevent_handle); + +int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) +{ + if (!qe->info.block_index) + return 0; + return nla_put_u32(skb, attr_name, qe->info.block_index); +} +EXPORT_SYMBOL(tcf_qevent_dump); +#endif + static __net_init int tcf_net_init(struct net *net) { struct tcf_net *tn = net_generic(net, tcf_net_id); -- cgit v1.2.3 From aee9caa03fc3c8b02f8f31824354d85f30e562e0 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 27 Jun 2020 01:45:28 +0300 Subject: net: sched: sch_red: Add qevents "early_drop" and "mark" In order to allow acting on dropped and/or ECN-marked packets, add two new qevents to the RED qdisc: "early_drop" and "mark". Filters attached at "early_drop" block are executed as packets are early-dropped, those attached at the "mark" block are executed as packets are ECN-marked. Two new attributes are introduced: TCA_RED_EARLY_DROP_BLOCK with the block index for the "early_drop" qevent, and TCA_RED_MARK_BLOCK for the "mark" qevent. Absence of these attributes signifies "don't care": no block is allocated in that case, or the existing blocks are left intact in case of the change callback. For purposes of offloading, blocks attached to these qevents appear with newly-introduced binder types, FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP and FLOW_BLOCK_BINDER_TYPE_RED_MARK. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- include/net/flow_offload.h | 2 ++ include/uapi/linux/pkt_sched.h | 2 ++ net/sched/sch_red.c | 58 ++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 60 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 3bafb5124ac0..3e793ac66baf 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -424,6 +424,8 @@ enum flow_block_binder_type { FLOW_BLOCK_BINDER_TYPE_UNSPEC, FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP, + FLOW_BLOCK_BINDER_TYPE_RED_MARK, }; struct flow_block { diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index a95f3ae7ab37..9e7c2c607845 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -257,6 +257,8 @@ enum { TCA_RED_STAB, TCA_RED_MAX_P, TCA_RED_FLAGS, /* bitfield32 */ + TCA_RED_EARLY_DROP_BLOCK, /* u32 */ + TCA_RED_MARK_BLOCK, /* u32 */ __TCA_RED_MAX, }; diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 225ce370e5a8..de2be4d04ed6 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -46,6 +46,8 @@ struct red_sched_data { struct red_vars vars; struct red_stats stats; struct Qdisc *qdisc; + struct tcf_qevent qe_early_drop; + struct tcf_qevent qe_mark; }; #define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP) @@ -92,6 +94,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; + skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret); + if (!skb) + return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { q->stats.prob_drop++; goto congestion_drop; @@ -109,6 +114,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; + skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret); + if (!skb) + return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { q->stats.forced_drop++; goto congestion_drop; @@ -129,6 +137,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ return ret; congestion_drop: + skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret); + if (!skb) + return NET_XMIT_CN | ret; + qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; } @@ -202,6 +214,8 @@ static void red_destroy(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); + tcf_qevent_destroy(&q->qe_mark, sch); + tcf_qevent_destroy(&q->qe_early_drop, sch); del_timer_sync(&q->adapt_timer); red_offload(sch, false); qdisc_put(q->qdisc); @@ -213,6 +227,8 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, [TCA_RED_MAX_P] = { .type = NLA_U32 }, [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS), + [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 }, + [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 }, }; static int __red_change(struct Qdisc *sch, struct nlattr **tb, @@ -328,12 +344,38 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt, q->qdisc = &noop_qdisc; q->sch = sch; timer_setup(&q->adapt_timer, red_adaptative_timer, 0); - return __red_change(sch, tb, extack); + + err = __red_change(sch, tb, extack); + if (err) + return err; + + err = tcf_qevent_init(&q->qe_early_drop, sch, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP, + tb[TCA_RED_EARLY_DROP_BLOCK], extack); + if (err) + goto err_early_drop_init; + + err = tcf_qevent_init(&q->qe_mark, sch, + FLOW_BLOCK_BINDER_TYPE_RED_MARK, + tb[TCA_RED_MARK_BLOCK], extack); + if (err) + goto err_mark_init; + + return 0; + +err_mark_init: + tcf_qevent_destroy(&q->qe_early_drop, sch); +err_early_drop_init: + del_timer_sync(&q->adapt_timer); + red_offload(sch, false); + qdisc_put(q->qdisc); + return err; } static int red_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { + struct red_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_RED_MAX + 1]; int err; @@ -345,6 +387,16 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, if (err < 0) return err; + err = tcf_qevent_validate_change(&q->qe_early_drop, + tb[TCA_RED_EARLY_DROP_BLOCK], extack); + if (err) + return err; + + err = tcf_qevent_validate_change(&q->qe_mark, + tb[TCA_RED_MARK_BLOCK], extack); + if (err) + return err; + return __red_change(sch, tb, extack); } @@ -389,7 +441,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb) if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) || nla_put_bitfield32(skb, TCA_RED_FLAGS, - q->flags, TC_RED_SUPPORTED_FLAGS)) + q->flags, TC_RED_SUPPORTED_FLAGS) || + tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) || + tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop)) goto nla_put_failure; return nla_nest_end(skb, opts); -- cgit v1.2.3 From 5f035af76e51cd622abc6564d5512ffeb9e06917 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Mon, 29 Jun 2020 14:54:16 +0800 Subject: net:qos: police action offloading parameter 'burst' change to the original value Since 'tcfp_burst' with TICK factor, driver side always need to recover it to the original value, this patch moves the generic calculation and recover to the 'burst' original value before offloading to device driver. Signed-off-by: Po Liu Acked-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 4 +-- drivers/net/dsa/sja1105/sja1105_flower.c | 16 +++++------ drivers/net/dsa/sja1105/sja1105_main.c | 4 +-- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 8 +----- drivers/net/ethernet/mscc/ocelot_flower.c | 4 +-- drivers/net/ethernet/mscc/ocelot_net.c | 4 +-- .../net/ethernet/netronome/nfp/flower/qos_conf.c | 6 ++-- include/net/dsa.h | 2 +- include/net/flow_offload.h | 2 +- include/net/tc_act/tc_police.h | 32 ++++++++++++++++++++-- net/sched/cls_api.c | 2 +- 11 files changed, 48 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 25046777c993..75020af7f7a4 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -746,9 +746,7 @@ static int felix_port_policer_add(struct dsa_switch *ds, int port, struct ocelot *ocelot = ds->priv; struct ocelot_policer pol = { .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, - .burst = div_u64(policer->rate_bytes_per_sec * - PSCHED_NS2TICKS(policer->burst), - PSCHED_TICKS_PER_SEC), + .burst = policer->burst, }; return ocelot_port_policer_add(ocelot, port, &pol); diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c index 9ee8968610cd..12e76020bea3 100644 --- a/drivers/net/dsa/sja1105/sja1105_flower.c +++ b/drivers/net/dsa/sja1105/sja1105_flower.c @@ -31,7 +31,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv, struct netlink_ext_ack *extack, unsigned long cookie, int port, u64 rate_bytes_per_sec, - s64 burst) + u32 burst) { struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); struct sja1105_l2_policing_entry *policing; @@ -79,9 +79,8 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv, policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec * 512, 1000000); - policing[rule->bcast_pol.sharindx].smax = div_u64(rate_bytes_per_sec * - PSCHED_NS2TICKS(burst), - PSCHED_TICKS_PER_SEC); + policing[rule->bcast_pol.sharindx].smax = burst; + /* TODO: support per-flow MTU */ policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; @@ -103,7 +102,7 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv, struct netlink_ext_ack *extack, unsigned long cookie, int port, int tc, u64 rate_bytes_per_sec, - s64 burst) + u32 burst) { struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); struct sja1105_l2_policing_entry *policing; @@ -152,9 +151,8 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv, policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec * 512, 1000000); - policing[rule->tc_pol.sharindx].smax = div_u64(rate_bytes_per_sec * - PSCHED_NS2TICKS(burst), - PSCHED_TICKS_PER_SEC); + policing[rule->tc_pol.sharindx].smax = burst; + /* TODO: support per-flow MTU */ policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; @@ -177,7 +175,7 @@ static int sja1105_flower_policer(struct sja1105_private *priv, int port, unsigned long cookie, struct sja1105_key *key, u64 rate_bytes_per_sec, - s64 burst) + u32 burst) { switch (key->type) { case SJA1105_KEY_BCAST: diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 789b288cc78b..5079e4aeef80 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3324,9 +3324,7 @@ static int sja1105_port_policer_add(struct dsa_switch *ds, int port, */ policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, 1000000); - policing[port].smax = div_u64(policer->rate_bytes_per_sec * - PSCHED_NS2TICKS(policer->burst), - PSCHED_TICKS_PER_SEC); + policing[port].smax = policer->burst; return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 4f670cbdf186..b8b336179d82 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1241,8 +1241,6 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, /* Flow meter and max frame size */ if (entryp) { if (entryp->police.burst) { - u64 temp; - fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); if (!fmi) { err = -ENOMEM; @@ -1250,11 +1248,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } refcount_set(&fmi->refcount, 1); fmi->cir = entryp->police.rate_bytes_ps; - /* Convert to original burst value */ - temp = entryp->police.burst * fmi->cir; - temp = div_u64(temp, 1000000000ULL); - - fmi->cbs = temp; + fmi->cbs = entryp->police.burst; fmi->index = entryp->police.index; filter->flags |= ENETC_PSFP_FLAGS_FMI; filter->fmi_index = fmi->index; diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index f2a85b06a6e7..ec1b6e2572ba 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -12,7 +12,6 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, struct ocelot_vcap_filter *filter) { const struct flow_action_entry *a; - s64 burst; u64 rate; int i; @@ -35,8 +34,7 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, filter->action = OCELOT_VCAP_ACTION_POLICE; rate = a->police.rate_bytes_ps; filter->pol.rate = div_u64(rate, 1000) * 8; - burst = rate * PSCHED_NS2TICKS(a->police.burst); - filter->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC); + filter->pol.burst = a->police.burst; break; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 5868ff753232..41a1b5f6df95 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -74,9 +74,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv, } pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8; - pol.burst = (u32)div_u64(action->police.rate_bytes_ps * - PSCHED_NS2TICKS(action->police.burst), - PSCHED_TICKS_PER_SEC); + pol.burst = action->police.burst; err = ocelot_port_policer_add(ocelot, port, &pol); if (err) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index bb327d48d1ab..d4ce8f9ef3cc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -69,7 +69,8 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct nfp_repr *repr; struct sk_buff *skb; u32 netdev_port_id; - u64 burst, rate; + u32 burst; + u64 rate; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); @@ -104,8 +105,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, } rate = action->police.rate_bytes_ps; - burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst), - PSCHED_TICKS_PER_SEC); + burst = action->police.burst; netdev_port_id = nfp_repr_get_port_id(netdev); skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), diff --git a/include/net/dsa.h b/include/net/dsa.h index 50389772c597..4046ccd1945d 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -144,7 +144,7 @@ struct dsa_mall_mirror_tc_entry { /* TC port policer entry */ struct dsa_mall_policer_tc_entry { - s64 burst; + u32 burst; u64 rate_bytes_per_sec; }; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 3e793ac66baf..de395498440d 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -233,7 +233,7 @@ struct flow_action_entry { } sample; struct { /* FLOW_ACTION_POLICE */ u32 index; - s64 burst; + u32 burst; u64 rate_bytes_ps; u32 mtu; } police; diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h index cd973b10ae8c..6d1e26b709b5 100644 --- a/include/net/tc_act/tc_police.h +++ b/include/net/tc_act/tc_police.h @@ -59,14 +59,42 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act) return params->rate.rate_bytes_ps; } -static inline s64 tcf_police_tcfp_burst(const struct tc_action *act) +static inline u32 tcf_police_burst(const struct tc_action *act) { struct tcf_police *police = to_police(act); struct tcf_police_params *params; + u32 burst; params = rcu_dereference_protected(police->params, lockdep_is_held(&police->tcf_lock)); - return params->tcfp_burst; + + /* + * "rate" bytes "burst" nanoseconds + * ------------ * ------------------- + * 1 second 2^6 ticks + * + * ------------------------------------ + * NSEC_PER_SEC nanoseconds + * ------------------------ + * 2^6 ticks + * + * "rate" bytes "burst" nanoseconds 2^6 ticks + * = ------------ * ------------------- * ------------------------ + * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds + * + * "rate" * "burst" + * = ---------------- bytes/nanosecond + * NSEC_PER_SEC^2 + * + * + * "rate" * "burst" + * = ---------------- bytes/second + * NSEC_PER_SEC + */ + burst = div_u64(params->tcfp_burst * params->rate.rate_bytes_ps, + NSEC_PER_SEC); + + return burst; } static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 1b14d5f57e7f..e9e119ea6813 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3660,7 +3660,7 @@ int tc_setup_flow_action(struct flow_action *flow_action, tcf_sample_get_group(entry, act); } else if (is_tcf_police(act)) { entry->id = FLOW_ACTION_POLICE; - entry->police.burst = tcf_police_tcfp_burst(act); + entry->police.burst = tcf_police_burst(act); entry->police.rate_bytes_ps = tcf_police_rate_bytes_ps(act); entry->police.mtu = tcf_police_tcfp_mtu(act); -- cgit v1.2.3 From ecc31c60240b9808a274befc5db6b8a249a6ade1 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:16 +0300 Subject: ethtool: Add link extended state Currently, drivers can only tell whether the link is up/down using LINKSTATE_GET, but no additional information is given. Add attributes to LINKSTATE_GET command in order to allow drivers to expose the user more information in addition to link state to ease the debug process, for example, reason for link down state. Extended state consists of two attributes - link_ext_state and link_ext_substate. The idea is to avoid 'vendor specific' states in order to prevent drivers to use specific link_ext_state that can be in the future common link_ext_state. The substates allows drivers to add more information to the common link_ext_state. For example, vendor can expose 'Autoneg' as link_ext_state and add 'No partner detected during force mode' as link_ext_substate. If a driver cannot pinpoint the extended state with the substate accuracy, it is free to expose only the extended state and omit the substate attribute. Signed-off-by: Amit Cohen Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/ethtool.h | 23 ++++++++++++ include/uapi/linux/ethtool.h | 70 ++++++++++++++++++++++++++++++++++++ include/uapi/linux/ethtool_netlink.h | 2 ++ net/ethtool/linkstate.c | 52 ++++++++++++++++++++++++--- 4 files changed, 143 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index a23b26eab479..48ad3b6a0150 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -86,6 +86,22 @@ struct net_device; u32 ethtool_op_get_link(struct net_device *dev); int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); + +/** + * struct ethtool_link_ext_state_info - link extended state and substate. + */ +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + u8 __link_ext_substate; + }; +}; + /** * ethtool_rxfh_indir_default - get default value for RX flow hash indirection * @index: Index in RX flow hash indirection table @@ -245,6 +261,11 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * @get_link: Report whether physical link is up. Will only be called if * the netdev is up. Should usually be set to ethtool_op_get_link(), * which uses netif_carrier_ok(). + * @get_link_ext_state: Report link extended state. Should set link_ext_state and + * link_ext_substate (link_ext_substate of 0 means link_ext_substate is unknown, + * do not attach ext_substate attribute to netlink message). If link_ext_state + * and link_ext_substate are unknown, return -ENODATA. If not implemented, + * link_ext_state and link_ext_substate will not be sent to userspace. * @get_eeprom: Read data from the device EEPROM. * Should fill in the magic field. Don't need to check len for zero * or wraparound. Fill in the data argument with the eeprom values @@ -384,6 +405,8 @@ struct ethtool_ops { void (*set_msglevel)(struct net_device *, u32); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, + struct ethtool_link_ext_state_info *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index f4662b3a9e1e..d1413538ef30 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -579,6 +579,76 @@ struct ethtool_pauseparam { __u32 tx_pause; }; +/** + * enum ethtool_link_ext_state - link extended state + */ +enum ethtool_link_ext_state { + ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_STATE_NO_CABLE, + ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, + ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, + ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, + ETHTOOL_LINK_EXT_STATE_OVERHEAT, +}; + +/** + * enum ethtool_link_ext_substate_autoneg - more information in addition to + * ETHTOOL_LINK_EXT_STATE_AUTONEG. + */ +enum ethtool_link_ext_substate_autoneg { + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD, +}; + +/** + * enum ethtool_link_ext_substate_link_training - more information in addition to + * ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE. + */ +enum ethtool_link_ext_substate_link_training { + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT, +}; + +/** + * enum ethtool_link_ext_substate_logical_mismatch - more information in addition + * to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH. + */ +enum ethtool_link_ext_substate_link_logical_mismatch { + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED, +}; + +/** + * enum ethtool_link_ext_substate_bad_signal_integrity - more information in + * addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY. + */ +enum ethtool_link_ext_substate_bad_signal_integrity { + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE, +}; + +/** + * enum ethtool_link_ext_substate_cable_issue - more information in + * addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. + */ +enum ethtool_link_ext_substate_cable_issue { + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE, +}; + #define ETH_GSTRING_LEN 32 /** diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 4dda5e4244a7..c12ce4df4b6b 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -236,6 +236,8 @@ enum { ETHTOOL_A_LINKSTATE_LINK, /* u8 */ ETHTOOL_A_LINKSTATE_SQI, /* u32 */ ETHTOOL_A_LINKSTATE_SQI_MAX, /* u32 */ + ETHTOOL_A_LINKSTATE_EXT_STATE, /* u8 */ + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, /* u8 */ /* add new constants above here */ __ETHTOOL_A_LINKSTATE_CNT, diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c index afe5ac8a0f00..4834091ec24c 100644 --- a/net/ethtool/linkstate.c +++ b/net/ethtool/linkstate.c @@ -9,10 +9,12 @@ struct linkstate_req_info { }; struct linkstate_reply_data { - struct ethnl_reply_data base; - int link; - int sqi; - int sqi_max; + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; }; #define LINKSTATE_REPDATA(__reply_base) \ @@ -25,6 +27,8 @@ linkstate_get_policy[ETHTOOL_A_LINKSTATE_MAX + 1] = { [ETHTOOL_A_LINKSTATE_LINK] = { .type = NLA_REJECT }, [ETHTOOL_A_LINKSTATE_SQI] = { .type = NLA_REJECT }, [ETHTOOL_A_LINKSTATE_SQI_MAX] = { .type = NLA_REJECT }, + [ETHTOOL_A_LINKSTATE_EXT_STATE] = { .type = NLA_REJECT }, + [ETHTOOL_A_LINKSTATE_EXT_SUBSTATE] = { .type = NLA_REJECT }, }; static int linkstate_get_sqi(struct net_device *dev) @@ -61,6 +65,23 @@ static int linkstate_get_sqi_max(struct net_device *dev) mutex_unlock(&phydev->lock); return ret; +}; + +static int linkstate_get_link_ext_state(struct net_device *dev, + struct linkstate_reply_data *data) +{ + int err; + + if (!dev->ethtool_ops->get_link_ext_state) + return -EOPNOTSUPP; + + err = dev->ethtool_ops->get_link_ext_state(dev, &data->ethtool_link_ext_state_info); + if (err) + return err; + + data->link_ext_state_provided = true; + + return 0; } static int linkstate_prepare_data(const struct ethnl_req_info *req_base, @@ -86,6 +107,12 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base, goto out; data->sqi_max = ret; + if (dev->flags & IFF_UP) { + ret = linkstate_get_link_ext_state(dev, data); + if (ret < 0 && ret != -EOPNOTSUPP && ret != -ENODATA) + goto out; + } + ret = 0; out: ethnl_ops_complete(dev); @@ -107,6 +134,12 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base, if (data->sqi_max != -EOPNOTSUPP) len += nla_total_size(sizeof(u32)); + if (data->link_ext_state_provided) + len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */ + + if (data->ethtool_link_ext_state_info.__link_ext_substate) + len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_SUBSTATE */ + return len; } @@ -128,6 +161,17 @@ static int linkstate_fill_reply(struct sk_buff *skb, nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max)) return -EMSGSIZE; + if (data->link_ext_state_provided) { + if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE, + data->ethtool_link_ext_state_info.link_ext_state)) + return -EMSGSIZE; + + if (data->ethtool_link_ext_state_info.__link_ext_substate && + nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, + data->ethtool_link_ext_state_info.__link_ext_substate)) + return -EMSGSIZE; + } + return 0; } -- cgit v1.2.3 From 970471914c67b70df24def6b2a30cc42acbebded Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Tue, 16 Jun 2020 16:47:14 +0200 Subject: iommu: Allow page responses without PASID Some PCIe devices do not expect a PASID value in PRI Page Responses. If the "PRG Response PASID Required" bit in the PRI capability is zero, then the OS should not set the PASID field. Similarly on Arm SMMU, responses to stall events do not have a PASID. Currently iommu_page_response() systematically checks that the PASID in the page response corresponds to the one in the page request. This can't work with virtualization because a page response coming from a guest OS won't have a PASID if the passed-through device does not require one. Add a flag to page requests that declares whether the corresponding response needs to have a PASID. When this flag isn't set, allow page responses without PASID. Reported-by: Shameerali Kolothum Thodi Signed-off-by: Jean-Philippe Brucker Link: https://lore.kernel.org/r/20200616144712.748818-1-jean-philippe@linaro.org Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 23 +++++++++++++++++------ include/uapi/linux/iommu.h | 6 +++++- 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d43120eb1dc5..1ed1e14a1f0c 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1185,11 +1185,12 @@ EXPORT_SYMBOL_GPL(iommu_report_device_fault); int iommu_page_response(struct device *dev, struct iommu_page_response *msg) { - bool pasid_valid; + bool needs_pasid; int ret = -EINVAL; struct iommu_fault_event *evt; struct iommu_fault_page_request *prm; struct dev_iommu *param = dev->iommu; + bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; struct iommu_domain *domain = iommu_get_domain_for_dev(dev); if (!domain || !domain->ops->page_response) @@ -1214,14 +1215,24 @@ int iommu_page_response(struct device *dev, */ list_for_each_entry(evt, ¶m->fault_param->faults, list) { prm = &evt->fault.prm; - pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + if (prm->grpid != msg->grpid) + continue; - if ((pasid_valid && prm->pasid != msg->pasid) || - prm->grpid != msg->grpid) + /* + * If the PASID is required, the corresponding request is + * matched using the group ID, the PASID valid bit and the PASID + * value. Otherwise only the group ID matches request and + * response. + */ + needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; + if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) continue; - /* Sanitize the reply */ - msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0; + if (!needs_pasid && has_pasid) { + /* No big deal, just clear it. */ + msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; + msg->pasid = 0; + } ret = domain->ops->page_response(dev, evt, msg); list_del(&evt->list); diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h index e907b7091a46..c2b2caf9ed41 100644 --- a/include/uapi/linux/iommu.h +++ b/include/uapi/linux/iommu.h @@ -81,7 +81,10 @@ struct iommu_fault_unrecoverable { /** * struct iommu_fault_page_request - Page Request data * @flags: encodes whether the corresponding fields are valid and whether this - * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values) + * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values). + * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response + * must have the same PASID value as the page request. When it is clear, + * the page response should not have a PASID. * @pasid: Process Address Space ID * @grpid: Page Request Group Index * @perm: requested page permissions (IOMMU_FAULT_PERM_* values) @@ -92,6 +95,7 @@ struct iommu_fault_page_request { #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) #define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2) +#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3) __u32 flags; __u32 pasid; __u32 grpid; -- cgit v1.2.3 From ca37faf3d7005b5588f045edfac1d82799c408a7 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 30 Jun 2020 10:17:56 +0200 Subject: iommu: Move sg_table wrapper out of CONFIG_IOMMU_SUPPORT Move the recently added sg_table wrapper out of CONFIG_IOMMU_SUPPORT to let the client code copile also when IOMMU support is disabled. Fixes: 48530d9fab0d ("iommu: add generic helper for mapping sgtable objects") Signed-off-by: Marek Szyprowski Link: https://lore.kernel.org/r/20200630081756.18526-1-m.szyprowski@samsung.com Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5f0b7859d2eb..5657d4fef9f2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -457,22 +457,6 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); -/** - * iommu_map_sgtable - Map the given buffer to the IOMMU domain - * @domain: The IOMMU domain to perform the mapping - * @iova: The start address to map the buffer - * @sgt: The sg_table object describing the buffer - * @prot: IOMMU protection bits - * - * Creates a mapping at @iova for the buffer described by a scatterlist - * stored in the given sg_table object in the provided IOMMU domain. - */ -static inline size_t iommu_map_sgtable(struct iommu_domain *domain, - unsigned long iova, struct sg_table *sgt, int prot) -{ - return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); -} - extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern void generic_iommu_put_resv_regions(struct device *dev, @@ -1079,6 +1063,22 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) } #endif /* CONFIG_IOMMU_API */ +/** + * iommu_map_sgtable - Map the given buffer to the IOMMU domain + * @domain: The IOMMU domain to perform the mapping + * @iova: The start address to map the buffer + * @sgt: The sg_table object describing the buffer + * @prot: IOMMU protection bits + * + * Creates a mapping at @iova for the buffer described by a scatterlist + * stored in the given sg_table object in the provided IOMMU domain. + */ +static inline size_t iommu_map_sgtable(struct iommu_domain *domain, + unsigned long iova, struct sg_table *sgt, int prot) +{ + return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); +} + #ifdef CONFIG_IOMMU_DEBUGFS extern struct dentry *iommu_debugfs_dir; void iommu_debugfs_setup(void); -- cgit v1.2.3 From bad0d73b657412058c4d7773ff0d50291bfe1905 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 9 Jun 2020 14:45:03 +0100 Subject: firmware: arm_scmi: Use signed integer to report transfer status Currently the trace event 'scmi_xfer_end' reports the status of the transfer using the unsigned status field read from the firmware which may not be easy to interpret. It may also miss to emit any timeouts that happen in the driver resulting in emitting garbage in the status field in those scenarios. Let us use signed integer so that error values are emitted out after they are mapped from firmware error formats to standard linux error codes. While at this, also include any timeouts in the driver itself. Link: https://lore.kernel.org/r/20200609134503.55860-1-sudeep.holla@arm.com Cc: Jim Quinlan Cc: Lukasz Luba Reviewed-by: Lukasz Luba Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/driver.c | 3 +-- include/trace/events/scmi.h | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 7483cacf63f9..136acbe2f4a1 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -392,8 +392,7 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) info->desc->ops->mark_txdone(cinfo, ret); trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, - xfer->hdr.protocol_id, xfer->hdr.seq, - xfer->hdr.status); + xfer->hdr.protocol_id, xfer->hdr.seq, ret); return ret; } diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h index f076c430d243..f3a4b4d60714 100644 --- a/include/trace/events/scmi.h +++ b/include/trace/events/scmi.h @@ -35,7 +35,7 @@ TRACE_EVENT(scmi_xfer_begin, TRACE_EVENT(scmi_xfer_end, TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq, - u32 status), + int status), TP_ARGS(transfer_id, msg_id, protocol_id, seq, status), TP_STRUCT__entry( @@ -43,7 +43,7 @@ TRACE_EVENT(scmi_xfer_end, __field(u8, msg_id) __field(u8, protocol_id) __field(u16, seq) - __field(u32, status) + __field(int, status) ), TP_fast_assign( @@ -54,7 +54,7 @@ TRACE_EVENT(scmi_xfer_end, __entry->status = status; ), - TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%u", + TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%d", __entry->transfer_id, __entry->msg_id, __entry->protocol_id, __entry->seq, __entry->status) ); -- cgit v1.2.3 From 1909872ff20fc378ec6a44ea1a2b2966d834e504 Mon Sep 17 00:00:00 2001 From: Nicola Mazzucato Date: Wed, 17 Jun 2020 10:43:31 +0100 Subject: firmware: arm_scmi: Add fast_switch_possible() interface Add a new fast_switch_possible interface to the existing perf_ops to export the information of whether or not fast_switch is possible for a given device. This can be used by the cpufreq driver and framework to choose proper mechanism for frequency change. Link: https://lore.kernel.org/r/20200617094332.8391-1-nicola.mazzucato@arm.com Suggested-by: Lukasz Luba Signed-off-by: Nicola Mazzucato Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/perf.c | 12 ++++++++++++ include/linux/scmi_protocol.h | 2 ++ 2 files changed, 14 insertions(+) (limited to 'include') diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index eadc171e254b..7b8d7cebdac9 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -697,6 +697,17 @@ static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain, return ret; } +static bool scmi_fast_switch_possible(const struct scmi_handle *handle, + struct device *dev) +{ + struct perf_dom_info *dom; + struct scmi_perf_info *pi = handle->perf_priv; + + dom = pi->dom_info + scmi_dev_domain_id(dev); + + return dom->fc_info && dom->fc_info->level_set_addr; +} + static struct scmi_perf_ops perf_ops = { .limits_set = scmi_perf_limits_set, .limits_get = scmi_perf_limits_get, @@ -708,6 +719,7 @@ static struct scmi_perf_ops perf_ops = { .freq_set = scmi_dvfs_freq_set, .freq_get = scmi_dvfs_freq_get, .est_power_get = scmi_dvfs_est_power_get, + .fast_switch_possible = scmi_fast_switch_possible, }; static int scmi_perf_protocol_init(struct scmi_handle *handle) diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index ce2f5c28b2df..73911d156a39 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -118,6 +118,8 @@ struct scmi_perf_ops { unsigned long *rate, bool poll); int (*est_power_get)(const struct scmi_handle *handle, u32 domain, unsigned long *rate, unsigned long *power); + bool (*fast_switch_possible)(const struct scmi_handle *handle, + struct device *dev); }; /** -- cgit v1.2.3 From 7999096fa9cfd0253497c8d2ed9a5a1537521d25 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 12 Jun 2020 16:57:37 +1000 Subject: iov_iter: Move unnecessary inclusion of crypto/hash.h The header file linux/uio.h includes crypto/hash.h which pulls in most of the Crypto API. Since linux/uio.h is used throughout the kernel this means that every tiny bit of change to the Crypto API causes the entire kernel to get rebuilt. This patch fixes this by moving it into lib/iov_iter.c instead where it is actually used. This patch also fixes the ifdef to use CRYPTO_HASH instead of just CRYPTO which does not guarantee the existence of ahash. Unfortunately a number of drivers were relying on linux/uio.h to provide access to linux/slab.h. This patch adds inclusions of linux/slab.h as detected by build failures. Also skbuff.h was relying on this to provide a declaration for ahash_request. This patch adds a forward declaration instead. Signed-off-by: Herbert Xu Signed-off-by: Al Viro --- arch/s390/lib/test_unwind.c | 1 + drivers/dma/sf-pdma/sf-pdma.c | 1 + drivers/dma/st_fdma.c | 1 + drivers/dma/uniphier-xdmac.c | 1 + drivers/misc/uacce/uacce.c | 1 + drivers/mtd/mtdpstore.c | 1 + drivers/mtd/nand/raw/cadence-nand-controller.c | 1 + drivers/remoteproc/qcom_q6v5_mss.c | 1 + drivers/soc/qcom/pdr_interface.c | 1 + fs/btrfs/inode.c | 1 + include/linux/skbuff.h | 1 + include/linux/socket.h | 1 + include/linux/uio.h | 1 - lib/iov_iter.c | 3 ++- 14 files changed, 14 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c index 32b7a30b2485..eb382ceaa116 100644 --- a/arch/s390/lib/test_unwind.c +++ b/arch/s390/lib/test_unwind.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 5c118c7e02bd..6e530dca6d9e 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "sf-pdma.h" diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c index 67087dbe2f9f..962b6e05287b 100644 --- a/drivers/dma/st_fdma.c +++ b/drivers/dma/st_fdma.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "st_fdma.h" diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c index 7b2f8a8c2d31..16b19654873d 100644 --- a/drivers/dma/uniphier-xdmac.c +++ b/drivers/dma/uniphier-xdmac.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "dmaengine.h" #include "virt-dma.h" diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index 107028e77ca3..e45bfd409cc5 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -4,6 +4,7 @@ #include #include #include +#include #include static struct class *uacce_class; diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c index a4fe6060b960..a3ae8778f6a9 100644 --- a/drivers/mtd/mtdpstore.c +++ b/drivers/mtd/mtdpstore.c @@ -7,6 +7,7 @@ #include #include #include +#include static struct mtdpstore_context { int index; diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index c405722adfe1..c4f273e2fe78 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -17,6 +17,7 @@ #include #include #include +#include /* * HPNFC can work in 3 modes: diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index feb70283b6a2..903b2bb97e12 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "remoteproc_internal.h" #include "qcom_common.h" diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index bdcf16f88a97..a90d707da689 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -5,6 +5,7 @@ #include #include +#include #include #include diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d04c82c88418..d901d53e4f03 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3,6 +3,7 @@ * Copyright (C) 2007 Oracle. All rights reserved. */ +#include #include #include #include diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0c0377fc00c2..1530e81a6cce 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -238,6 +238,7 @@ SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +struct ahash_request; struct net_device; struct scatterlist; struct pipe_inode_info; diff --git a/include/linux/socket.h b/include/linux/socket.h index 04d2bc97f497..e9cb30d8cbfb 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -10,6 +10,7 @@ #include /* __user */ #include +struct file; struct pid; struct cred; struct socket; diff --git a/include/linux/uio.h b/include/linux/uio.h index 9576fd8158d7..3835a8a8e9ea 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -7,7 +7,6 @@ #include #include -#include #include struct page; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index bf538c2bec77..5e40786c8f12 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include #include #include #include @@ -1567,7 +1568,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter); size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, struct iov_iter *i) { -#ifdef CONFIG_CRYPTO +#ifdef CONFIG_CRYPTO_HASH struct ahash_request *hash = hashp; struct scatterlist sg; size_t copied; -- cgit v1.2.3 From 65c763694398a2de63803b264dcf906b47f9d4c1 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 30 Jun 2020 18:24:56 +0800 Subject: blk-mq: pass request queue into get/put budget callback blk-mq budget is abstract from scsi's device queue depth, and it is always per-request-queue instead of hctx. It can be quite absurd to get a budget from one hctx, then dequeue a request from scheduler queue, and this request may not belong to this hctx, at least for bfq and deadline. So fix the mess and always pass request queue to get/put budget callback. Signed-off-by: Ming Lei Tested-by: Baolin Wang Reviewed-by: Johannes Thumshirn Reviewed-by: Christoph Hellwig Reviewed-by: Douglas Anderson Reviewed-by: Sagi Grimberg Cc: Sagi Grimberg Cc: Baolin Wang Cc: Christoph Hellwig Cc: Douglas Anderson Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 8 ++++---- block/blk-mq.c | 8 ++++---- block/blk-mq.h | 12 ++++-------- drivers/scsi/scsi_lib.c | 8 +++----- include/linux/blk-mq.h | 4 ++-- 5 files changed, 17 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index fdcc2c1dd178..a31e281e9d31 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -108,12 +108,12 @@ static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) break; } - if (!blk_mq_get_dispatch_budget(hctx)) + if (!blk_mq_get_dispatch_budget(q)) break; rq = e->type->ops.dispatch_request(hctx); if (!rq) { - blk_mq_put_dispatch_budget(hctx); + blk_mq_put_dispatch_budget(q); /* * We're releasing without dispatching. Holding the * budget could have blocked any "hctx"s with the @@ -173,12 +173,12 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) if (!sbitmap_any_bit_set(&hctx->ctx_map)) break; - if (!blk_mq_get_dispatch_budget(hctx)) + if (!blk_mq_get_dispatch_budget(q)) break; rq = blk_mq_dequeue_from_ctx(hctx, ctx); if (!rq) { - blk_mq_put_dispatch_budget(hctx); + blk_mq_put_dispatch_budget(q); /* * We're releasing without dispatching. Holding the * budget could have blocked any "hctx"s with the diff --git a/block/blk-mq.c b/block/blk-mq.c index 72d3034fe39d..8cdc868f4249 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1284,7 +1284,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, rq = list_first_entry(list, struct request, queuelist); hctx = rq->mq_hctx; - if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) { + if (!got_budget && !blk_mq_get_dispatch_budget(q)) { blk_mq_put_driver_tag(rq); no_budget_avail = true; break; @@ -1299,7 +1299,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, * we'll re-run it below. */ if (!blk_mq_mark_tag_wait(hctx, rq)) { - blk_mq_put_dispatch_budget(hctx); + blk_mq_put_dispatch_budget(q); /* * For non-shared tags, the RESTART check * will suffice. @@ -1947,11 +1947,11 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, if (q->elevator && !bypass_insert) goto insert; - if (!blk_mq_get_dispatch_budget(hctx)) + if (!blk_mq_get_dispatch_budget(q)) goto insert; if (!blk_mq_get_driver_tag(rq)) { - blk_mq_put_dispatch_budget(hctx); + blk_mq_put_dispatch_budget(q); goto insert; } diff --git a/block/blk-mq.h b/block/blk-mq.h index c6330335767c..e4af193d39ac 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -179,20 +179,16 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part); void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, unsigned int inflight[2]); -static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) +static inline void blk_mq_put_dispatch_budget(struct request_queue *q) { - struct request_queue *q = hctx->queue; - if (q->mq_ops->put_budget) - q->mq_ops->put_budget(hctx); + q->mq_ops->put_budget(q); } -static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx) +static inline bool blk_mq_get_dispatch_budget(struct request_queue *q) { - struct request_queue *q = hctx->queue; - if (q->mq_ops->get_budget) - return q->mq_ops->get_budget(hctx); + return q->mq_ops->get_budget(q); return true; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6ca91d09eca1..534b85e87c80 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1597,17 +1597,15 @@ static void scsi_mq_done(struct scsi_cmnd *cmd) blk_mq_complete_request(cmd->request); } -static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) +static void scsi_mq_put_budget(struct request_queue *q) { - struct request_queue *q = hctx->queue; struct scsi_device *sdev = q->queuedata; atomic_dec(&sdev->device_busy); } -static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) +static bool scsi_mq_get_budget(struct request_queue *q) { - struct request_queue *q = hctx->queue; struct scsi_device *sdev = q->queuedata; return scsi_dev_queue_ready(q, sdev); @@ -1674,7 +1672,7 @@ out_dec_target_busy: if (scsi_target(sdev)->can_queue > 0) atomic_dec(&scsi_target(sdev)->target_busy); out_put_budget: - scsi_mq_put_budget(hctx); + scsi_mq_put_budget(q); switch (ret) { case BLK_STS_OK: break; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8986e88a986b..faa340b70123 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -270,8 +270,8 @@ struct blk_mq_queue_data { typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); -typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); -typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); +typedef bool (get_budget_fn)(struct request_queue *); +typedef void (put_budget_fn)(struct request_queue *); typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); -- cgit v1.2.3 From e3f88cdb8fdd5876ef3a0373e35ba7f2598fcf17 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 9 Jun 2020 04:54:33 +0800 Subject: soundwire: add definitions for 1.2 spec Add definitions for register offsets and bit fields from the MIPI SoundWire 1.2 specification (available to MIPI members at https://members.mipi.org/wg/All-Members/document/download/78371) Signed-off-by: Pierre-Louis Bossart Reviewed-by: Kai Vehmanen Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20200608205436.2402-2-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/soundwire/sdw_registers.h | 107 +++++++++++++++++++++++++++++++- 1 file changed, 106 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h index a686f7988156..12f9ffc3eb3b 100644 --- a/include/linux/soundwire/sdw_registers.h +++ b/include/linux/soundwire/sdw_registers.h @@ -12,7 +12,7 @@ #define SDW_REG_SHIFT(n) (ffs(n) - 1) /* - * SDW registers as defined by MIPI 1.1 Spec + * SDW registers as defined by MIPI 1.2 Spec */ #define SDW_REGADDR GENMASK(14, 0) #define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15) @@ -43,6 +43,8 @@ #define SDW_DP0_INT_TEST_FAIL BIT(0) #define SDW_DP0_INT_PORT_READY BIT(1) #define SDW_DP0_INT_BRA_FAILURE BIT(2) +#define SDW_DP0_SDCA_CASCADE BIT(3) +/* BIT(4) not allocated in SoundWire specification 1.2 */ #define SDW_DP0_INT_IMPDEF1 BIT(5) #define SDW_DP0_INT_IMPDEF2 BIT(6) #define SDW_DP0_INT_IMPDEF3 BIT(7) @@ -106,6 +108,10 @@ #define SDW_SCP_ADDRPAGE2 0x49 #define SDW_SCP_KEEPEREN 0x4A #define SDW_SCP_BANKDELAY 0x4B +#define SDW_SCP_COMMIT 0x4C +#define SDW_SCP_BUS_CLOCK_BASE 0x4D +#define SDW_SCP_BASE_CLOCK_FREQ GENMASK(2, 0) +/* 0x4E is not allocated in SoundWire specification 1.2 */ #define SDW_SCP_TESTMODE 0x4F #define SDW_SCP_DEVID_0 0x50 #define SDW_SCP_DEVID_1 0x51 @@ -114,12 +120,111 @@ #define SDW_SCP_DEVID_4 0x54 #define SDW_SCP_DEVID_5 0x55 +/* Both INT and STATUS register are same */ +#define SDW_SCP_SDCA_INT1 0x58 +#define SDW_SCP_SDCA_INT_SDCA_0 BIT(0) +#define SDW_SCP_SDCA_INT_SDCA_1 BIT(1) +#define SDW_SCP_SDCA_INT_SDCA_2 BIT(2) +#define SDW_SCP_SDCA_INT_SDCA_3 BIT(3) +#define SDW_SCP_SDCA_INT_SDCA_4 BIT(4) +#define SDW_SCP_SDCA_INT_SDCA_5 BIT(5) +#define SDW_SCP_SDCA_INT_SDCA_6 BIT(6) +#define SDW_SCP_SDCA_INT_SDCA_7 BIT(7) + +#define SDW_SCP_SDCA_INT2 0x59 +#define SDW_SCP_SDCA_INT_SDCA_8 BIT(0) +#define SDW_SCP_SDCA_INT_SDCA_9 BIT(1) +#define SDW_SCP_SDCA_INT_SDCA_10 BIT(2) +#define SDW_SCP_SDCA_INT_SDCA_11 BIT(3) +#define SDW_SCP_SDCA_INT_SDCA_12 BIT(4) +#define SDW_SCP_SDCA_INT_SDCA_13 BIT(5) +#define SDW_SCP_SDCA_INT_SDCA_14 BIT(6) +#define SDW_SCP_SDCA_INT_SDCA_15 BIT(7) + +#define SDW_SCP_SDCA_INT3 0x5A +#define SDW_SCP_SDCA_INT_SDCA_16 BIT(0) +#define SDW_SCP_SDCA_INT_SDCA_17 BIT(1) +#define SDW_SCP_SDCA_INT_SDCA_18 BIT(2) +#define SDW_SCP_SDCA_INT_SDCA_19 BIT(3) +#define SDW_SCP_SDCA_INT_SDCA_20 BIT(4) +#define SDW_SCP_SDCA_INT_SDCA_21 BIT(5) +#define SDW_SCP_SDCA_INT_SDCA_22 BIT(6) +#define SDW_SCP_SDCA_INT_SDCA_23 BIT(7) + +#define SDW_SCP_SDCA_INT4 0x5B +#define SDW_SCP_SDCA_INT_SDCA_24 BIT(0) +#define SDW_SCP_SDCA_INT_SDCA_25 BIT(1) +#define SDW_SCP_SDCA_INT_SDCA_26 BIT(2) +#define SDW_SCP_SDCA_INT_SDCA_27 BIT(3) +#define SDW_SCP_SDCA_INT_SDCA_28 BIT(4) +#define SDW_SCP_SDCA_INT_SDCA_29 BIT(5) +#define SDW_SCP_SDCA_INT_SDCA_30 BIT(6) +/* BIT(7) not allocated in SoundWire 1.2 specification */ + +#define SDW_SCP_SDCA_INTMASK1 0x5C +#define SDW_SCP_SDCA_INTMASK_SDCA_0 BIT(0) +#define SDW_SCP_SDCA_INTMASK_SDCA_1 BIT(1) +#define SDW_SCP_SDCA_INTMASK_SDCA_2 BIT(2) +#define SDW_SCP_SDCA_INTMASK_SDCA_3 BIT(3) +#define SDW_SCP_SDCA_INTMASK_SDCA_4 BIT(4) +#define SDW_SCP_SDCA_INTMASK_SDCA_5 BIT(5) +#define SDW_SCP_SDCA_INTMASK_SDCA_6 BIT(6) +#define SDW_SCP_SDCA_INTMASK_SDCA_7 BIT(7) + +#define SDW_SCP_SDCA_INTMASK2 0x5D +#define SDW_SCP_SDCA_INTMASK_SDCA_8 BIT(0) +#define SDW_SCP_SDCA_INTMASK_SDCA_9 BIT(1) +#define SDW_SCP_SDCA_INTMASK_SDCA_10 BIT(2) +#define SDW_SCP_SDCA_INTMASK_SDCA_11 BIT(3) +#define SDW_SCP_SDCA_INTMASK_SDCA_12 BIT(4) +#define SDW_SCP_SDCA_INTMASK_SDCA_13 BIT(5) +#define SDW_SCP_SDCA_INTMASK_SDCA_14 BIT(6) +#define SDW_SCP_SDCA_INTMASK_SDCA_15 BIT(7) + +#define SDW_SCP_SDCA_INTMASK3 0x5E +#define SDW_SCP_SDCA_INTMASK_SDCA_16 BIT(0) +#define SDW_SCP_SDCA_INTMASK_SDCA_17 BIT(1) +#define SDW_SCP_SDCA_INTMASK_SDCA_18 BIT(2) +#define SDW_SCP_SDCA_INTMASK_SDCA_19 BIT(3) +#define SDW_SCP_SDCA_INTMASK_SDCA_20 BIT(4) +#define SDW_SCP_SDCA_INTMASK_SDCA_21 BIT(5) +#define SDW_SCP_SDCA_INTMASK_SDCA_22 BIT(6) +#define SDW_SCP_SDCA_INTMASK_SDCA_23 BIT(7) + +#define SDW_SCP_SDCA_INTMASK4 0x5F +#define SDW_SCP_SDCA_INTMASK_SDCA_24 BIT(0) +#define SDW_SCP_SDCA_INTMASK_SDCA_25 BIT(1) +#define SDW_SCP_SDCA_INTMASK_SDCA_26 BIT(2) +#define SDW_SCP_SDCA_INTMASK_SDCA_27 BIT(3) +#define SDW_SCP_SDCA_INTMASK_SDCA_28 BIT(4) +#define SDW_SCP_SDCA_INTMASK_SDCA_29 BIT(5) +#define SDW_SCP_SDCA_INTMASK_SDCA_30 BIT(6) +/* BIT(7) not allocated in SoundWire 1.2 specification */ + /* Banked Registers */ #define SDW_SCP_FRAMECTRL_B0 0x60 #define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET) #define SDW_SCP_NEXTFRAME_B0 0x61 #define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET) +#define SDW_SCP_BUSCLOCK_SCALE_B0 0x62 +#define SDW_SCP_BUSCLOCK_SCALE_B1 (0x62 + SDW_BANK1_OFFSET) +#define SDW_SCP_CLOCK_SCALE GENMASK(3, 0) + +/* PHY registers - CTRL and STAT are the same address */ +#define SDW_SCP_PHY_OUT_CTRL_0 0x80 +#define SDW_SCP_PHY_OUT_CTRL_1 0x81 +#define SDW_SCP_PHY_OUT_CTRL_2 0x82 +#define SDW_SCP_PHY_OUT_CTRL_3 0x83 +#define SDW_SCP_PHY_OUT_CTRL_4 0x84 +#define SDW_SCP_PHY_OUT_CTRL_5 0x85 +#define SDW_SCP_PHY_OUT_CTRL_6 0x86 +#define SDW_SCP_PHY_OUT_CTRL_7 0x87 + +#define SDW_SCP_CAP_LOAD_CTRL GENMASK(2, 0) +#define SDW_SCP_DRIVE_STRENGTH_CTRL GENMASK(5, 3) +#define SDW_SCP_SLEW_TIME_CTRL GENMASK(7, 6) + /* Both INT and STATUS register is same */ #define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n)) #define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n)) -- cgit v1.2.3 From b5924268d6700821fbd8afe815073b05cebaa308 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 9 Jun 2020 04:54:35 +0800 Subject: soundwire: extend SDW_SLAVE_ENTRY The SoundWire 1.2 specification adds new capabilities that were not present in previous version, such as the class ID. To enable support for class drivers, and well as drivers that address a specific version, all fields of the sdw_device_id structure need to be exposed. For SoundWire 1.0 and 1.1 devices, a wildcard is used so class and version information are ignored. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Rander Wang Reviewed-by: Guennadi Liakhovetski Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20200608205436.2402-4-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/bus_type.c | 13 +++++++++---- include/linux/mod_devicetable.h | 2 ++ include/linux/soundwire/sdw.h | 11 +++++++---- scripts/mod/devicetable-offsets.c | 2 ++ scripts/mod/file2alias.c | 6 +++++- 5 files changed, 25 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c index c8d948c09d9d..6fba55898cf0 100644 --- a/drivers/soundwire/bus_type.c +++ b/drivers/soundwire/bus_type.c @@ -24,7 +24,11 @@ sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv) for (id = drv->id_table; id && id->mfg_id; id++) if (slave->id.mfg_id == id->mfg_id && - slave->id.part_id == id->part_id) + slave->id.part_id == id->part_id && + (!id->sdw_version || + slave->id.sdw_version == id->sdw_version) && + (!id->class_id || + slave->id.class_id == id->class_id)) return id; return NULL; @@ -47,10 +51,11 @@ static int sdw_bus_match(struct device *dev, struct device_driver *ddrv) int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size) { - /* modalias is sdw:mp */ + /* modalias is sdw:mpvc */ - return snprintf(buf, size, "sdw:m%04Xp%04X\n", - slave->id.mfg_id, slave->id.part_id); + return snprintf(buf, size, "sdw:m%04Xp%04Xv%02Xc%02X\n", + slave->id.mfg_id, slave->id.part_id, + slave->id.sdw_version, slave->id.class_id); } int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 8d764aab29de..f8585e3a2c43 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -251,6 +251,8 @@ struct hda_device_id { struct sdw_device_id { __u16 mfg_id; __u16 part_id; + __u8 sdw_version; + __u8 class_id; kernel_ulong_t driver_data; }; diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 9c27a32df9bb..64c9314cb903 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -426,8 +426,7 @@ int sdw_slave_read_prop(struct sdw_slave *slave); * struct sdw_slave_id - Slave ID * @mfg_id: MIPI Manufacturer ID * @part_id: Device Part ID - * @class_id: MIPI Class ID, unused now. - * Currently a placeholder in MIPI SoundWire Spec + * @class_id: MIPI Class ID (defined starting with SoundWire 1.2 spec) * @unique_id: Device unique ID * @sdw_version: SDW version implemented * @@ -659,10 +658,14 @@ struct sdw_driver { struct device_driver driver; }; -#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \ - { .mfg_id = (_mfg_id), .part_id = (_part_id), \ +#define SDW_SLAVE_ENTRY_EXT(_mfg_id, _part_id, _version, _c_id, _drv_data) \ + { .mfg_id = (_mfg_id), .part_id = (_part_id), \ + .sdw_version = (_version), .class_id = (_c_id), \ .driver_data = (unsigned long)(_drv_data) } +#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \ + SDW_SLAVE_ENTRY_EXT((_mfg_id), (_part_id), 0, 0, (_drv_data)) + int sdw_handle_slave_status(struct sdw_bus *bus, enum sdw_slave_status status[]); diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c index 010be8ba2116..27007c18e754 100644 --- a/scripts/mod/devicetable-offsets.c +++ b/scripts/mod/devicetable-offsets.c @@ -216,6 +216,8 @@ int main(void) DEVID(sdw_device_id); DEVID_FIELD(sdw_device_id, mfg_id); DEVID_FIELD(sdw_device_id, part_id); + DEVID_FIELD(sdw_device_id, sdw_version); + DEVID_FIELD(sdw_device_id, class_id); DEVID(fsl_mc_device_id); DEVID_FIELD(fsl_mc_device_id, vendor); diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 9599e2a3f1e6..2417dd1dee33 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1258,15 +1258,19 @@ static int do_hda_entry(const char *filename, void *symval, char *alias) return 1; } -/* Looks like: sdw:mNpN */ +/* Looks like: sdw:mNpNvNcN */ static int do_sdw_entry(const char *filename, void *symval, char *alias) { DEF_FIELD(symval, sdw_device_id, mfg_id); DEF_FIELD(symval, sdw_device_id, part_id); + DEF_FIELD(symval, sdw_device_id, sdw_version); + DEF_FIELD(symval, sdw_device_id, class_id); strcpy(alias, "sdw:"); ADD(alias, "m", mfg_id != 0, mfg_id); ADD(alias, "p", part_id != 0, part_id); + ADD(alias, "v", sdw_version != 0, sdw_version); + ADD(alias, "c", class_id != 0, class_id); add_wildcard(alias); return 1; -- cgit v1.2.3 From 29d158f906907ce8e52ea75ec87b4e35461f2018 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 9 Jun 2020 04:54:36 +0800 Subject: soundwire: bus: initialize bus clock base and scale registers The SoundWire 1.2 specification adds new registers to allow for seamless clock changes while audio transfers are on-going. Program them following the specification. Note that dynamic clock changes are not supported for now, this only adds the register initialization. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Kai Vehmanen Reviewed-by: Guennadi Liakhovetski Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20200608205436.2402-5-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/bus.c | 107 ++++++++++++++++++++++++++++++++ include/linux/soundwire/sdw_registers.h | 10 +++ 2 files changed, 117 insertions(+) (limited to 'include') diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index fcfba1c38267..e6e0fb9a81b4 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -1070,12 +1070,119 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave, return ret; } +static int sdw_slave_set_frequency(struct sdw_slave *slave) +{ + u32 mclk_freq = slave->bus->prop.mclk_freq; + u32 curr_freq = slave->bus->params.curr_dr_freq >> 1; + unsigned int scale; + u8 scale_index; + u8 base; + int ret; + + /* + * frequency base and scale registers are required for SDCA + * devices. They may also be used for 1.2+/non-SDCA devices, + * but we will need a DisCo property to cover this case + */ + if (!slave->id.class_id) + return 0; + + if (!mclk_freq) { + dev_err(&slave->dev, + "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n"); + return -EINVAL; + } + + /* + * map base frequency using Table 89 of SoundWire 1.2 spec. + * The order of the tests just follows the specification, this + * is not a selection between possible values or a search for + * the best value but just a mapping. Only one case per platform + * is relevant. + * Some BIOS have inconsistent values for mclk_freq but a + * correct root so we force the mclk_freq to avoid variations. + */ + if (!(19200000 % mclk_freq)) { + mclk_freq = 19200000; + base = SDW_SCP_BASE_CLOCK_19200000_HZ; + } else if (!(24000000 % mclk_freq)) { + mclk_freq = 24000000; + base = SDW_SCP_BASE_CLOCK_24000000_HZ; + } else if (!(24576000 % mclk_freq)) { + mclk_freq = 24576000; + base = SDW_SCP_BASE_CLOCK_24576000_HZ; + } else if (!(22579200 % mclk_freq)) { + mclk_freq = 22579200; + base = SDW_SCP_BASE_CLOCK_22579200_HZ; + } else if (!(32000000 % mclk_freq)) { + mclk_freq = 32000000; + base = SDW_SCP_BASE_CLOCK_32000000_HZ; + } else { + dev_err(&slave->dev, + "Unsupported clock base, mclk %d\n", + mclk_freq); + return -EINVAL; + } + + if (mclk_freq % curr_freq) { + dev_err(&slave->dev, + "mclk %d is not multiple of bus curr_freq %d\n", + mclk_freq, curr_freq); + return -EINVAL; + } + + scale = mclk_freq / curr_freq; + + /* + * map scale to Table 90 of SoundWire 1.2 spec - and check + * that the scale is a power of two and maximum 64 + */ + scale_index = ilog2(scale); + + if (BIT(scale_index) != scale || scale_index > 6) { + dev_err(&slave->dev, + "No match found for scale %d, bus mclk %d curr_freq %d\n", + scale, mclk_freq, curr_freq); + return -EINVAL; + } + scale_index++; + + ret = sdw_write(slave, SDW_SCP_BUS_CLOCK_BASE, base); + if (ret < 0) { + dev_err(&slave->dev, + "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret); + return ret; + } + + /* initialize scale for both banks */ + ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index); + if (ret < 0) { + dev_err(&slave->dev, + "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret); + return ret; + } + ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index); + if (ret < 0) + dev_err(&slave->dev, + "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret); + + dev_dbg(&slave->dev, + "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n", + base, scale_index, mclk_freq, curr_freq); + + return ret; +} + static int sdw_initialize_slave(struct sdw_slave *slave) { struct sdw_slave_prop *prop = &slave->prop; int ret; u8 val; + ret = sdw_slave_set_frequency(slave); + if (ret < 0) + return ret; + /* * Set bus clash, parity and SCP implementation * defined interrupt mask diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h index 12f9ffc3eb3b..5d3c271af7d1 100644 --- a/include/linux/soundwire/sdw_registers.h +++ b/include/linux/soundwire/sdw_registers.h @@ -109,8 +109,18 @@ #define SDW_SCP_KEEPEREN 0x4A #define SDW_SCP_BANKDELAY 0x4B #define SDW_SCP_COMMIT 0x4C + #define SDW_SCP_BUS_CLOCK_BASE 0x4D #define SDW_SCP_BASE_CLOCK_FREQ GENMASK(2, 0) +#define SDW_SCP_BASE_CLOCK_UNKNOWN 0x0 +#define SDW_SCP_BASE_CLOCK_19200000_HZ 0x1 +#define SDW_SCP_BASE_CLOCK_24000000_HZ 0x2 +#define SDW_SCP_BASE_CLOCK_24576000_HZ 0x3 +#define SDW_SCP_BASE_CLOCK_22579200_HZ 0x4 +#define SDW_SCP_BASE_CLOCK_32000000_HZ 0x5 +#define SDW_SCP_BASE_CLOCK_RESERVED 0x6 +#define SDW_SCP_BASE_CLOCK_IMP_DEF 0x7 + /* 0x4E is not allocated in SoundWire specification 1.2 */ #define SDW_SCP_TESTMODE 0x4F #define SDW_SCP_DEVID_0 0x50 -- cgit v1.2.3 From 857ca89711de3dbcc674d58a6d7d297ee0bd34e1 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sun, 21 Jun 2020 18:40:30 +0300 Subject: ipvs: register hooks only with services Keep the IPVS hooks registered in Netfilter only while there are configured virtual services. This saves CPU cycles while IPVS is loaded but not used. Signed-off-by: Julian Anastasov Reviewed-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 5 +++ net/netfilter/ipvs/ip_vs_core.c | 80 ++++++++++++++++++++++++++++++++--------- net/netfilter/ipvs/ip_vs_ctl.c | 23 ++++++++++-- 3 files changed, 89 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 83be2d93b407..0c9881241323 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -874,6 +874,7 @@ struct netns_ipvs { struct ip_vs_stats tot_stats; /* Statistics & est. */ int num_services; /* no of virtual services */ + int num_services6; /* IPv6 virtual services */ /* Trash for destinations */ struct list_head dest_trash; @@ -960,6 +961,7 @@ struct netns_ipvs { * are not supported when synchronization is enabled. */ unsigned int mixed_address_family_dests; + unsigned int hooks_afmask; /* &1=AF_INET, &2=AF_INET6 */ }; #define DEFAULT_SYNC_THRESHOLD 3 @@ -1670,6 +1672,9 @@ static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc) #endif } +int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af); +void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af); + static inline int ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) { diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index aa6a603a2425..ca3670152565 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -2256,7 +2256,7 @@ ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb, #endif -static const struct nf_hook_ops ip_vs_ops[] = { +static const struct nf_hook_ops ip_vs_ops4[] = { /* After packet filtering, change source only for VS/NAT */ { .hook = ip_vs_reply4, @@ -2302,7 +2302,10 @@ static const struct nf_hook_ops ip_vs_ops[] = { .hooknum = NF_INET_FORWARD, .priority = 100, }, +}; + #ifdef CONFIG_IP_VS_IPV6 +static const struct nf_hook_ops ip_vs_ops6[] = { /* After packet filtering, change source only for VS/NAT */ { .hook = ip_vs_reply6, @@ -2348,8 +2351,64 @@ static const struct nf_hook_ops ip_vs_ops[] = { .hooknum = NF_INET_FORWARD, .priority = 100, }, -#endif }; +#endif + +int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af) +{ + const struct nf_hook_ops *ops; + unsigned int count; + unsigned int afmask; + int ret = 0; + + if (af == AF_INET6) { +#ifdef CONFIG_IP_VS_IPV6 + ops = ip_vs_ops6; + count = ARRAY_SIZE(ip_vs_ops6); + afmask = 2; +#else + return -EINVAL; +#endif + } else { + ops = ip_vs_ops4; + count = ARRAY_SIZE(ip_vs_ops4); + afmask = 1; + } + + if (!(ipvs->hooks_afmask & afmask)) { + ret = nf_register_net_hooks(ipvs->net, ops, count); + if (ret >= 0) + ipvs->hooks_afmask |= afmask; + } + return ret; +} + +void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af) +{ + const struct nf_hook_ops *ops; + unsigned int count; + unsigned int afmask; + + if (af == AF_INET6) { +#ifdef CONFIG_IP_VS_IPV6 + ops = ip_vs_ops6; + count = ARRAY_SIZE(ip_vs_ops6); + afmask = 2; +#else + return; +#endif + } else { + ops = ip_vs_ops4; + count = ARRAY_SIZE(ip_vs_ops4); + afmask = 1; + } + + if (ipvs->hooks_afmask & afmask) { + nf_unregister_net_hooks(ipvs->net, ops, count); + ipvs->hooks_afmask &= ~afmask; + } +} + /* * Initialize IP Virtual Server netns mem. */ @@ -2425,19 +2484,6 @@ static void __net_exit __ip_vs_cleanup_batch(struct list_head *net_list) } } -static int __net_init __ip_vs_dev_init(struct net *net) -{ - int ret; - - ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); - if (ret < 0) - goto hook_fail; - return 0; - -hook_fail: - return ret; -} - static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list) { struct netns_ipvs *ipvs; @@ -2446,7 +2492,8 @@ static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list) EnterFunction(2); list_for_each_entry(net, net_list, exit_list) { ipvs = net_ipvs(net); - nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); + ip_vs_unregister_hooks(ipvs, AF_INET); + ip_vs_unregister_hooks(ipvs, AF_INET6); ipvs->enable = 0; /* Disable packet reception */ smp_wmb(); ip_vs_sync_net_cleanup(ipvs); @@ -2462,7 +2509,6 @@ static struct pernet_operations ipvs_core_ops = { }; static struct pernet_operations ipvs_core_dev_ops = { - .init = __ip_vs_dev_init, .exit_batch = __ip_vs_dev_cleanup_batch, }; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 412656c34f20..0eed388c960b 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1272,6 +1272,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, struct ip_vs_scheduler *sched = NULL; struct ip_vs_pe *pe = NULL; struct ip_vs_service *svc = NULL; + int ret_hooks = -1; /* increase the module use count */ if (!ip_vs_use_count_inc()) @@ -1313,6 +1314,14 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, } #endif + if ((u->af == AF_INET && !ipvs->num_services) || + (u->af == AF_INET6 && !ipvs->num_services6)) { + ret = ip_vs_register_hooks(ipvs, u->af); + if (ret < 0) + goto out_err; + ret_hooks = ret; + } + svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL); if (svc == NULL) { IP_VS_DBG(1, "%s(): no memory\n", __func__); @@ -1374,6 +1383,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, /* Count only IPv4 services for old get/setsockopt interface */ if (svc->af == AF_INET) ipvs->num_services++; + else if (svc->af == AF_INET6) + ipvs->num_services6++; /* Hash the service into the service table */ ip_vs_svc_hash(svc); @@ -1385,6 +1396,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, out_err: + if (ret_hooks >= 0) + ip_vs_unregister_hooks(ipvs, u->af); if (svc != NULL) { ip_vs_unbind_scheduler(svc, sched); ip_vs_service_free(svc); @@ -1500,9 +1513,15 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup) struct ip_vs_pe *old_pe; struct netns_ipvs *ipvs = svc->ipvs; - /* Count only IPv4 services for old get/setsockopt interface */ - if (svc->af == AF_INET) + if (svc->af == AF_INET) { ipvs->num_services--; + if (!ipvs->num_services) + ip_vs_unregister_hooks(ipvs, svc->af); + } else if (svc->af == AF_INET6) { + ipvs->num_services6--; + if (!ipvs->num_services6) + ip_vs_unregister_hooks(ipvs, svc->af); + } ip_vs_stop_estimator(svc->ipvs, &svc->stats); -- cgit v1.2.3 From e8cf9c8c4c18cda752fadce2da4f2a88d1e3d9e2 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Fri, 12 Jun 2020 17:28:41 +0800 Subject: tracing: not necessary to undefine DEFINE_EVENT again After un-define DEFINE_EVENT in Stage 2, DEFINE_EVENT is not defined to a specific form. It is not necessary to un-define it again. Let's skip this. Link: http://lkml.kernel.org/r/20200612092844.56107-2-richard.weiyang@linux.alibaba.com Signed-off-by: Wei Yang Signed-off-by: Steven Rostedt (VMware) --- include/trace/trace_events.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include') diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 502c7be50b8d..a4994761bdaf 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -443,9 +443,6 @@ static struct trace_event_fields trace_event_fields_##call[] = { \ tstruct \ {} }; -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) - #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) @@ -523,9 +520,6 @@ static inline notrace int trace_event_get_offsets_##call( \ return __data_size; \ } -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) - #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) -- cgit v1.2.3 From b6f9eb87078b18cea7015e2575234e9b6b296c2f Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Fri, 12 Jun 2020 17:28:42 +0800 Subject: tracing: not necessary re-define DEFINE_EVENT_PRINT The definition of DEFINE_EVENT_PRINT is not changed after previous one, so not necessary to re-define is as the same form. Link: http://lkml.kernel.org/r/20200612092844.56107-3-richard.weiyang@linux.alibaba.com Signed-off-by: Wei Yang Signed-off-by: Steven Rostedt (VMware) --- include/trace/trace_events.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index a4994761bdaf..5219f81b9d74 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -520,10 +520,6 @@ static inline notrace int trace_event_get_offsets_##call( \ return __data_size; \ } -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) - #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* -- cgit v1.2.3 From 61df16fcafad810ea5dcaa640d0fe3e039d8e652 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Fri, 12 Jun 2020 17:28:43 +0800 Subject: tracing: define DEFINE_EVENT_PRINT not related to DEFINE_EVENT Current definition define DEFINE_EVENT_PRINT to be DEFINE_EVENT. Actually, at this point DEFINE_EVENT is already an empty macro. Let's cut the relationship between DEFINE_EVENT_PRINT and DEFINE_EVENT. Link: http://lkml.kernel.org/r/20200612092844.56107-4-richard.weiyang@linux.alibaba.com Signed-off-by: Wei Yang Signed-off-by: Steven Rostedt (VMware) --- include/trace/trace_events.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 5219f81b9d74..43023c3e9d74 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -210,8 +210,7 @@ TRACE_MAKE_SYSTEM_STR(); #define DEFINE_EVENT(template, name, proto, args) #undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) #undef TRACE_EVENT_FLAGS #define TRACE_EVENT_FLAGS(event, flag) @@ -444,8 +443,7 @@ static struct trace_event_fields trace_event_fields_##call[] = { \ {} }; #undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -- cgit v1.2.3 From e6bc5b3f423825220a3232ddf08399658c918671 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Fri, 12 Jun 2020 17:28:44 +0800 Subject: tracing: not necessary to define DEFINE_EVENT_PRINT to be empty again After the previous cleanup, DEFINE_EVENT_PRINT's definition has no relationship with DEFINE_EVENT. So After we re-define DEFINE_EVENT, it is not necessary to define DEFINE_EVENT_PRINT to be empty again. Link: http://lkml.kernel.org/r/20200612092844.56107-5-richard.weiyang@linux.alibaba.com Signed-off-by: Wei Yang Signed-off-by: Steven Rostedt (VMware) --- include/trace/trace_events.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 43023c3e9d74..1bc3e7bba9a4 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -709,9 +709,6 @@ static inline void ftrace_test_probe_##call(void) \ check_trace_callback_type_##call(trace_event_raw_event_##template); \ } -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) - #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #undef __entry -- cgit v1.2.3 From a6ed3ebca49b62d7a917287b9986feff4e9fa7b1 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 30 Jun 2020 15:27:46 +0100 Subject: net/tls: fix sign extension issue when left shifting u16 value Left shifting the u16 value promotes it to a int and then it gets sign extended to a u64. If len << 16 is greater than 0x7fffffff then the upper bits get set to 1 because of the implicit sign extension. Fix this by casting len to u64 before shifting it. Addresses-Coverity: ("integer handling issues") Fixes: ed9b7646b06a ("net/tls: Add asynchronous resync") Signed-off-by: Colin Ian King Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- include/net/tls.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/tls.h b/include/net/tls.h index c875c0a445a6..e5dac7e74e79 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -637,7 +637,7 @@ tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | - (len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); + ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); rx_ctx->resync_async->loglen = 0; } -- cgit v1.2.3 From b23d7a5f4a07af02343cdd28fe1f7488bac3afda Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 25 Jun 2020 15:34:03 +1000 Subject: ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU On a 144 thread system, `perf ftrace` takes about 20 seconds to start up, due to calling synchronize_rcu() for each CPU. cat /proc/108560/stack 0xc0003e7eb336f470 __switch_to+0x2e0/0x480 __wait_rcu_gp+0x20c/0x220 synchronize_rcu+0x9c/0xc0 ring_buffer_reset_cpu+0x88/0x2e0 tracing_reset_online_cpus+0x84/0xe0 tracing_open+0x1d4/0x1f0 On a system with 10x more threads, it starts to become an annoyance. Batch these up so we disable all the per-cpu buffers first, then synchronize_rcu() once, then reset each of the buffers. This brings the time down to about 0.5s. Link: https://lkml.kernel.org/r/20200625053403.2386972-1-npiggin@gmail.com Tested-by: Anton Blanchard Acked-by: Paul E. McKenney Signed-off-by: Nicholas Piggin Signed-off-by: Steven Rostedt (VMware) --- include/linux/ring_buffer.h | 1 + kernel/trace/ring_buffer.c | 85 +++++++++++++++++++++++++++++++++++++-------- kernel/trace/trace.c | 4 +-- 3 files changed, 73 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index c76b2f3b3ac4..136ea0997e6d 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -143,6 +143,7 @@ bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); +void ring_buffer_reset_online_cpus(struct trace_buffer *buffer); void ring_buffer_reset(struct trace_buffer *buffer); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 802bb38d9c81..ed1941304f69 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -270,6 +270,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); #define for_each_buffer_cpu(buffer, cpu) \ for_each_cpu(cpu, buffer->cpumask) +#define for_each_online_buffer_cpu(buffer, cpu) \ + for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) + #define TS_SHIFT 27 #define TS_MASK ((1ULL << TS_SHIFT) - 1) #define TS_DELTA_TEST (~TS_MASK) @@ -4790,6 +4793,26 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) rb_head_page_activate(cpu_buffer); } +/* Must have disabled the cpu buffer then done a synchronize_rcu */ +static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + + if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) + goto out; + + arch_spin_lock(&cpu_buffer->lock); + + rb_reset_cpu(cpu_buffer); + + arch_spin_unlock(&cpu_buffer->lock); + + out: + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); +} + /** * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer * @buffer: The ring buffer to reset a per cpu buffer of @@ -4798,7 +4821,6 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; - unsigned long flags; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return; @@ -4809,24 +4831,42 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) /* Make sure all commits have finished */ synchronize_rcu(); - raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + reset_disabled_cpu_buffer(cpu_buffer); - if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) - goto out; + atomic_dec(&cpu_buffer->record_disabled); + atomic_dec(&cpu_buffer->resize_disabled); +} +EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); - arch_spin_lock(&cpu_buffer->lock); +/** + * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer + * @buffer: The ring buffer to reset a per cpu buffer of + * @cpu: The CPU buffer to be reset + */ +void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) +{ + struct ring_buffer_per_cpu *cpu_buffer; + int cpu; - rb_reset_cpu(cpu_buffer); + for_each_online_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; - arch_spin_unlock(&cpu_buffer->lock); + atomic_inc(&cpu_buffer->resize_disabled); + atomic_inc(&cpu_buffer->record_disabled); + } - out: - raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + /* Make sure all commits have finished */ + synchronize_rcu(); - atomic_dec(&cpu_buffer->record_disabled); - atomic_dec(&cpu_buffer->resize_disabled); + for_each_online_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; + + reset_disabled_cpu_buffer(cpu_buffer); + + atomic_dec(&cpu_buffer->record_disabled); + atomic_dec(&cpu_buffer->resize_disabled); + } } -EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); /** * ring_buffer_reset - reset a ring buffer @@ -4834,10 +4874,27 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); */ void ring_buffer_reset(struct trace_buffer *buffer) { + struct ring_buffer_per_cpu *cpu_buffer; int cpu; - for_each_buffer_cpu(buffer, cpu) - ring_buffer_reset_cpu(buffer, cpu); + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; + + atomic_inc(&cpu_buffer->resize_disabled); + atomic_inc(&cpu_buffer->record_disabled); + } + + /* Make sure all commits have finished */ + synchronize_rcu(); + + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; + + reset_disabled_cpu_buffer(cpu_buffer); + + atomic_dec(&cpu_buffer->record_disabled); + atomic_dec(&cpu_buffer->resize_disabled); + } } EXPORT_SYMBOL_GPL(ring_buffer_reset); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 64c5b8146cca..4aab712f9567 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2003,7 +2003,6 @@ static void tracing_reset_cpu(struct array_buffer *buf, int cpu) void tracing_reset_online_cpus(struct array_buffer *buf) { struct trace_buffer *buffer = buf->buffer; - int cpu; if (!buffer) return; @@ -2015,8 +2014,7 @@ void tracing_reset_online_cpus(struct array_buffer *buf) buf->time_start = buffer_ftrace_now(buf, buf->cpu); - for_each_online_cpu(cpu) - ring_buffer_reset_cpu(buffer, cpu); + ring_buffer_reset_online_cpus(buffer); ring_buffer_record_enable(buffer); } -- cgit v1.2.3 From ab81e23cf77905896a5e51422eb58e8763507b85 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 29 Jun 2020 14:05:07 +0300 Subject: net: qed: correct existing SPDX tags QLogic QED drivers source code is dual licensed under GPL-2.0/BSD-3-Clause. Correct already existing but wrong SPDX tags to match the actual license. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 3 ++- drivers/net/ethernet/qlogic/qed/qed_debug.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_debug.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 3 ++- drivers/net/ethernet/qlogic/qed/qed_selftest.h | 3 ++- include/linux/qed/fcoe_common.h | 2 +- include/linux/qed/qed_fcoe_if.h | 3 ++- 7 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index a0acb94d65f0..cee566faba2f 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -1,4 +1,5 @@ -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 81e8fbe4a05b..8b14e6852daf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index edf99d296bd1..685696878ec2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 6c16158d8090..56b7567d7a60 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -1,4 +1,5 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index ad00d082fec8..d8121fd39bc1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -1,4 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + #ifndef _QED_SELFTEST_API_H #define _QED_SELFTEST_API_H #include diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 98cfc195abe2..a669d7d84284 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation */ diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index 46082480a2c3..65d0317ef67e 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -1,4 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + #ifndef _QED_FCOE_IF_H #define _QED_FCOE_IF_H #include -- cgit v1.2.3 From 1f4d4ed6acc5b74974fa038e5e9d2e19f3b532f3 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 29 Jun 2020 14:05:08 +0300 Subject: net: qed: convert to SPDX License Identifiers QLogic QED drivers source code is dual licensed under GPL-2.0/BSD-3-Clause. Remove all the boilerplates in the existing code and replace it with the correct SPDX tag. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_cxt.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_dev.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_fcoe.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_hw.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_hw.h | 29 +-------------------- .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_init_ops.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_init_ops.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_int.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_int.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_iscsi.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 30 ++-------------------- drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 30 ++-------------------- drivers/net/ethernet/qlogic/qed/qed_l2.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_l2.h | 30 ++-------------------- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_main.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_ooo.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_ooo.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_ptp.c | 30 ++-------------------- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 30 ++-------------------- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 30 ++-------------------- drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_roce.c | 30 ++-------------------- drivers/net/ethernet/qlogic/qed/qed_roce.h | 30 ++-------------------- drivers/net/ethernet/qlogic/qed/qed_selftest.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_sp.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_spq.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_vf.c | 29 +-------------------- drivers/net/ethernet/qlogic/qed/qed_vf.h | 29 +-------------------- include/linux/qed/common_hsi.h | 29 +-------------------- include/linux/qed/eth_common.h | 29 +-------------------- include/linux/qed/iscsi_common.h | 29 +-------------------- include/linux/qed/iwarp_common.h | 29 +-------------------- include/linux/qed/qed_chain.h | 29 +-------------------- include/linux/qed/qed_eth_if.h | 29 +-------------------- include/linux/qed/qed_if.h | 29 +-------------------- include/linux/qed/qed_iov_if.h | 29 +-------------------- include/linux/qed/qed_iscsi_if.h | 29 +-------------------- include/linux/qed/qed_ll2_if.h | 29 +-------------------- include/linux/qed/qed_rdma_if.h | 30 ++-------------------- include/linux/qed/qede_rdma.h | 30 ++-------------------- include/linux/qed/rdma_common.h | 29 +-------------------- include/linux/qed/roce_common.h | 29 +-------------------- include/linux/qed/storage_common.h | 29 +-------------------- include/linux/qed/tcp_common.h | 29 +-------------------- 60 files changed, 70 insertions(+), 1680 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a49743d56b9c..52130dcbfe4c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 08ba9d54ab63..3985dd746ca2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index ce08ae8d8498..05b28919653a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_CXT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 5c6a276f69ac..a72523298307 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 01f253ea4b22..537d60de4e2b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_DCBX_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3aa51374e727..fa7c10e8aa7a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index eb4808b3bf67..1f2122c699cc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_DEV_API_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 4c7fa391fd33..91d6cdf4abe8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h index 027a76ac839a..bf324736c7cb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_FCOE_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index f00460d00cab..ebbca7d999a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_HSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 5fa251489536..bdbb8fa8d399 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index f5b109b04b66..68f44b747565 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_HW_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 2f1049b0b93a..72ff2e5c5f24 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 5a6e4ac4fef4..74c425640d67 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index e9e8ade50ed3..cf33c41e0952 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_INIT_OPS_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index b7b974f0ef21..be336d47c934 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index e09db3386367..6fca82f6c7fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_INT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 7245a615517a..164b4d953b67 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index 225c75b02a06..8b6518a31b7e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_ISCSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 5409a2da6106..7fac39744275 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index c1b2057d23b8..83ca05cd74d7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_IWARP_H #define _QED_IWARP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 29810a1aa210..750098e60c64 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 7127d5aaac42..dce39f5a87ca 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_L2_H #define _QED_L2_H #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 4afd8572ada6..47da6d4e226c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 288642d526b7..c0d13bd6c3a6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_LL2_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 11367a248d55..d00335cc145b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9624616806e7..c17b140aa7ae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 5750b4c5ef63..351a13215ddd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_MCP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index ffac4ac87394..d01f91f7f661 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index 49c4e75b15b1..2731c392a3f4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_OOO_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 3e613058e225..f10ddf9d1704 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include "qed.h" #include "qed_dev_api.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 19c0c8864da1..9a3541f159dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 1e69d5bb0a70..a20397a395cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_RDMA_H #define _QED_RDMA_H #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 3dcb6ff58e73..bdfd90748042 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef REG_ADDR_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 7271dd7166e5..1e03d66e33d1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index f801f39fde61..9178904bf0e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_ROCE_H #define _QED_ROCE_H #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index cf1d4476f9d8..d24ee1ea8d3c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index b7b4fbbbccfe..4f646e101074 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_SP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 900bc603e30a..23d630b37199 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 790c28d696a0..18c59981cab7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 20679fd4204b..6d3c6d4f6308 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 368e88565783..43dfaf410332 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_SRIOV_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index adc2c8f3d48e..c800f8812492 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 033409db86ae..60d2bb64e65f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_VF_H diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 2c4737e6694a..294d01eae5cb 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _COMMON_HSI_H diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 95f5fd615852..a9566ef3c2ce 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __ETH_COMMON__ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 2f0a771a9176..7ca89fb9247f 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __ISCSI_COMMON__ diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h index c6cfd39cd910..23583e644257 100644 --- a/include/linux/qed/iwarp_common.h +++ b/include/linux/qed/iwarp_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __IWARP_COMMON__ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 6d15040c642c..e6e25197f7cb 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_CHAIN_H diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index a1310482c4ed..7803dedbcb52 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_ETH_IF_H diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8cb76405cbce..4a36608ff3a8 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_IF_H diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index ac2e6a3199a3..c2ca8196def9 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_IOV_IF_H diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index d0df1bec5357..89912c6c440c 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_ISCSI_IF_H diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 1313c34d9a68..79cac277e38b 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_LL2_IF_H diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 2d3ddd2b85e0..041a5b005a82 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_RDMA_IF_H #define _QED_RDMA_IF_H #include diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 5a00c7a473bf..20ed7f2c55bb 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef QEDE_ROCE_H #define QEDE_ROCE_H diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 480a57eb36cc..3e3f01136c06 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __RDMA_COMMON__ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 473fba76aa77..89065f023813 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __ROCE_COMMON__ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 9a973ffbbff5..34f069c79067 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __STORAGE_COMMON__ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index 4a4845193539..925e7cb7a582 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __TCP_COMMON__ -- cgit v1.2.3 From 663eacd899ac131dcfc2279184c1ce3c4fd2815f Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 29 Jun 2020 14:05:09 +0300 Subject: net: qed: update copyright years Set the actual copyright holder and years in all qed source files. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 1 + drivers/net/ethernet/qlogic/qed/qed.h | 1 + drivers/net/ethernet/qlogic/qed/qed_cxt.c | 1 + drivers/net/ethernet/qlogic/qed/qed_cxt.h | 1 + drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 1 + drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 1 + drivers/net/ethernet/qlogic/qed/qed_debug.c | 1 + drivers/net/ethernet/qlogic/qed/qed_debug.h | 1 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 1 + drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 1 + drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 1 + drivers/net/ethernet/qlogic/qed/qed_fcoe.h | 1 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 1 + drivers/net/ethernet/qlogic/qed/qed_hw.c | 1 + drivers/net/ethernet/qlogic/qed/qed_hw.h | 1 + drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 1 + drivers/net/ethernet/qlogic/qed/qed_init_ops.c | 1 + drivers/net/ethernet/qlogic/qed/qed_init_ops.h | 1 + drivers/net/ethernet/qlogic/qed/qed_int.c | 1 + drivers/net/ethernet/qlogic/qed/qed_int.h | 1 + drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 1 + drivers/net/ethernet/qlogic/qed/qed_iscsi.h | 1 + drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 1 + drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 1 + drivers/net/ethernet/qlogic/qed/qed_l2.c | 1 + drivers/net/ethernet/qlogic/qed/qed_l2.h | 1 + drivers/net/ethernet/qlogic/qed/qed_ll2.c | 1 + drivers/net/ethernet/qlogic/qed/qed_ll2.h | 1 + drivers/net/ethernet/qlogic/qed/qed_main.c | 1 + drivers/net/ethernet/qlogic/qed/qed_mcp.c | 1 + drivers/net/ethernet/qlogic/qed/qed_mcp.h | 1 + drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 1 + drivers/net/ethernet/qlogic/qed/qed_ooo.c | 1 + drivers/net/ethernet/qlogic/qed/qed_ooo.h | 1 + drivers/net/ethernet/qlogic/qed/qed_ptp.c | 1 + drivers/net/ethernet/qlogic/qed/qed_rdma.c | 1 + drivers/net/ethernet/qlogic/qed/qed_rdma.h | 1 + drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 1 + drivers/net/ethernet/qlogic/qed/qed_roce.c | 1 + drivers/net/ethernet/qlogic/qed/qed_roce.h | 1 + drivers/net/ethernet/qlogic/qed/qed_selftest.c | 1 + drivers/net/ethernet/qlogic/qed/qed_selftest.h | 1 + drivers/net/ethernet/qlogic/qed/qed_sp.h | 1 + drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 1 + drivers/net/ethernet/qlogic/qed/qed_spq.c | 1 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 1 + drivers/net/ethernet/qlogic/qed/qed_sriov.h | 1 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 1 + include/linux/qed/common_hsi.h | 1 + include/linux/qed/eth_common.h | 1 + include/linux/qed/fcoe_common.h | 1 + include/linux/qed/iscsi_common.h | 1 + include/linux/qed/iwarp_common.h | 1 + include/linux/qed/qed_chain.h | 1 + include/linux/qed/qed_eth_if.h | 1 + include/linux/qed/qed_fcoe_if.h | 1 + include/linux/qed/qed_if.h | 1 + include/linux/qed/qed_iov_if.h | 1 + include/linux/qed/qed_iscsi_if.h | 1 + include/linux/qed/qed_ll2_if.h | 1 + include/linux/qed/qed_rdma_if.h | 1 + include/linux/qed/qede_rdma.h | 1 + include/linux/qed/rdma_common.h | 1 + include/linux/qed/roce_common.h | 1 + include/linux/qed/storage_common.h | 1 + include/linux/qed/tcp_common.h | 1 + 66 files changed, 66 insertions(+) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index cee566faba2f..4176bbf2a22b 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +# Copyright (c) 2019-2020 Marvell International Ltd. obj-$(CONFIG_QED) := qed.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 52130dcbfe4c..68fe745c9b99 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 3985dd746ca2..e72d25854d79 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 05b28919653a..8b64495f8745 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_CXT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index a72523298307..9f16a3a66007 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 537d60de4e2b..ba5f3927034c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_DCBX_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 8b14e6852daf..45cbe1c87106 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index 685696878ec2..e71af82d3200 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_DEBUGFS_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index fa7c10e8aa7a..74142896b707 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 1f2122c699cc..395d4932c262 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_DEV_API_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 91d6cdf4abe8..a10e57bba6b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h index bf324736c7cb..13c5ccfe06e7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_FCOE_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index ebbca7d999a4..d7e70625bdc4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_HSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index bdbb8fa8d399..e8c777f30207 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 68f44b747565..6a61b88b544d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_HW_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 72ff2e5c5f24..1eb48ea80484 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 74c425640d67..736702fb81b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index cf33c41e0952..a573c8921982 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_INIT_OPS_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index be336d47c934..297983d66e2f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 6fca82f6c7fa..aea04b121586 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_INT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 164b4d953b67..f9d9e21cb66b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index 8b6518a31b7e..d6af7ea19bbb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ISCSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 7fac39744275..b7a0a717ee6d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 83ca05cd74d7..c3872cd9457f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_IWARP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 750098e60c64..03dc804c92a9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index dce39f5a87ca..8eceeebb1a7b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_L2_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 47da6d4e226c..cce6fd27c042 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index c0d13bd6c3a6..8356c7d4a193 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_LL2_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d00335cc145b..0cd6b8bf023a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index c17b140aa7ae..25433d162a54 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 351a13215ddd..d77b4c262cff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_MCP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 56b7567d7a60..1dd01e0373ab 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (c) 2019-2020 Marvell International Ltd. */ #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index d01f91f7f661..88353aa404dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index 2731c392a3f4..3a7e1b59d6fc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_OOO_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index f10ddf9d1704..3bd2f8c961c9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 9a3541f159dc..59d916693654 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index a20397a395cf..fba43adbb68e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_RDMA_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index bdfd90748042..9db22be42476 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef REG_ADDR_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 1e03d66e33d1..d5db07db65b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 9178904bf0e9..3a4a2d72a826 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ROCE_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index d24ee1ea8d3c..6e70781ab87c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index d8121fd39bc1..e27dd9a4547e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_SELFTEST_API_H #define _QED_SELFTEST_API_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 4f646e101074..35539e951bee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_SP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 23d630b37199..745d76d13732 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 18c59981cab7..ee89a8f4f585 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 6d3c6d4f6308..fcf4d82da161 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 43dfaf410332..552892c45670 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_SRIOV_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index c800f8812492..72a38d53d33f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 294d01eae5cb..977807e1be53 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _COMMON_HSI_H diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index a9566ef3c2ce..cd1207ad4ada 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __ETH_COMMON__ diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index a669d7d84284..68eda1c21cde 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __FCOE_COMMON__ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 7ca89fb9247f..157019f716f1 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __ISCSI_COMMON__ diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h index 23583e644257..14f9e4c0ddd9 100644 --- a/include/linux/qed/iwarp_common.h +++ b/include/linux/qed/iwarp_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __IWARP_COMMON__ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index e6e25197f7cb..92cdc79e5019 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_CHAIN_H diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 7803dedbcb52..812a4d751163 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ETH_IF_H diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index 65d0317ef67e..16752eca5cbd 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_FCOE_IF_H #define _QED_FCOE_IF_H diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 4a36608ff3a8..5ca081cd2ed9 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_IF_H diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index c2ca8196def9..8e31a28e51b9 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_IOV_IF_H diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 89912c6c440c..04180d9af560 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ISCSI_IF_H diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 79cac277e38b..2f64ed79cee9 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_LL2_IF_H diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 041a5b005a82..f464d85e88a4 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_RDMA_IF_H diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 20ed7f2c55bb..072da2f6da37 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef QEDE_ROCE_H diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 3e3f01136c06..bab078b25834 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __RDMA_COMMON__ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 89065f023813..ccddd7a96b67 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __ROCE_COMMON__ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 34f069c79067..91896e8793bf 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __STORAGE_COMMON__ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index 925e7cb7a582..2b2c87d10e0a 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __TCP_COMMON__ -- cgit v1.2.3 From 8b11c20a658de159fcf18ebce4f2acfdf747ff25 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:41 +0200 Subject: phy: un-inline devm_mdiobus_register() Functions should only be static inline if they're very short. This devres helper is already over 10 lines and it will grow soon as we'll be improving upon its approach. Pull it into mdio_devres.c. Signed-off-by: Bartosz Golaszewski Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Makefile | 2 +- drivers/net/phy/mdio_devres.c | 18 ++++++++++++++++++ include/linux/phy.h | 15 ++------------- 3 files changed, 21 insertions(+), 14 deletions(-) create mode 100644 drivers/net/phy/mdio_devres.c (limited to 'include') diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index dc9e53b511d6..896afdcac437 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -3,7 +3,7 @@ libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ linkmode.o -mdio-bus-y += mdio_bus.o mdio_device.o +mdio-bus-y += mdio_bus.o mdio_device.o mdio_devres.o ifdef CONFIG_MDIO_DEVICE obj-y += mdio-boardinfo.o diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c new file mode 100644 index 000000000000..f0b4b6cfe5e3 --- /dev/null +++ b/drivers/net/phy/mdio_devres.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include + +int __devm_mdiobus_register(struct mii_bus *bus, struct module *owner) +{ + int ret; + + if (!bus->is_managed) + return -EPERM; + + ret = __mdiobus_register(bus, owner); + if (!ret) + bus->is_managed_registered = 1; + + return ret; +} +EXPORT_SYMBOL(__devm_mdiobus_register); diff --git a/include/linux/phy.h b/include/linux/phy.h index ecc7640705b9..7c75e1c90141 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -322,20 +322,9 @@ static inline struct mii_bus *mdiobus_alloc(void) } int __mdiobus_register(struct mii_bus *bus, struct module *owner); +int __devm_mdiobus_register(struct mii_bus *bus, struct module *owner); #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) -static inline int devm_mdiobus_register(struct mii_bus *bus) -{ - int ret; - - if (!bus->is_managed) - return -EPERM; - - ret = mdiobus_register(bus); - if (!ret) - bus->is_managed_registered = 1; - - return ret; -} +#define devm_mdiobus_register(bus) __devm_mdiobus_register(bus, THIS_MODULE) void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); -- cgit v1.2.3 From ac3a68d56651c3dad2c12c7afce065fe15267f44 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:43 +0200 Subject: net: phy: don't abuse devres in devm_mdiobus_register() We currently have two managed helpers for mdiobus - devm_mdiobus_alloc() and devm_mdiobus_register(). The idea behind devres is that the release callback releases whatever resource the devm function allocates. In the mdiobus case however there's no devres associated with the device by devm_mdiobus_register(). Instead the release callback for devm_mdiobus_alloc(): _devm_mdiobus_free() unregisters the device if it is marked as managed. This all seems wrong. The managed structure shouldn't need to know or care about whether it's managed or not - and this is the case now for struct mii_bus. The devres wrapper should be opaque to the managed resource. This changeset makes devm_mdiobus_alloc() and devm_mdiobus_register() conform to common devres standards: devm_mdiobus_alloc() allocates a devres structure and registers a callback that will call mdiobus_free(). __devm_mdiobus_register() allocated another devres and registers a callback that will unregister the bus. Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- Documentation/driver-api/driver-model/devres.rst | 1 - drivers/net/ethernet/realtek/r8169_main.c | 2 +- drivers/net/phy/mdio_bus.c | 73 --------------------- drivers/net/phy/mdio_devres.c | 83 ++++++++++++++++++++++-- include/linux/phy.h | 10 ++- 5 files changed, 82 insertions(+), 87 deletions(-) (limited to 'include') diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index 5463fc8a60c1..e0333d66a7f4 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -342,7 +342,6 @@ LED MDIO devm_mdiobus_alloc() devm_mdiobus_alloc_size() - devm_mdiobus_free() devm_mdiobus_register() MEM diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 07a33af1f64e..745e1739e9c8 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5012,7 +5012,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp) new_bus->read = r8169_mdio_read_reg; new_bus->write = r8169_mdio_write_reg; - ret = devm_mdiobus_register(new_bus); + ret = devm_mdiobus_register(&pdev->dev, new_bus); if (ret) return ret; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 134f82d72da8..46b33701ad4b 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -165,79 +165,6 @@ struct mii_bus *mdiobus_alloc_size(size_t size) } EXPORT_SYMBOL(mdiobus_alloc_size); -static void _devm_mdiobus_free(struct device *dev, void *res) -{ - struct mii_bus *bus = *(struct mii_bus **)res; - - if (bus->is_managed_registered && bus->state == MDIOBUS_REGISTERED) - mdiobus_unregister(bus); - - mdiobus_free(bus); -} - -static int devm_mdiobus_match(struct device *dev, void *res, void *data) -{ - struct mii_bus **r = res; - - if (WARN_ON(!r || !*r)) - return 0; - - return *r == data; -} - -/** - * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size() - * @dev: Device to allocate mii_bus for - * @sizeof_priv: Space to allocate for private structure. - * - * Managed mdiobus_alloc_size. mii_bus allocated with this function is - * automatically freed on driver detach. - * - * If an mii_bus allocated with this function needs to be freed separately, - * devm_mdiobus_free() must be used. - * - * RETURNS: - * Pointer to allocated mii_bus on success, NULL on failure. - */ -struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv) -{ - struct mii_bus **ptr, *bus; - - ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - /* use raw alloc_dr for kmalloc caller tracing */ - bus = mdiobus_alloc_size(sizeof_priv); - if (bus) { - *ptr = bus; - devres_add(dev, ptr); - bus->is_managed = 1; - } else { - devres_free(ptr); - } - - return bus; -} -EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size); - -/** - * devm_mdiobus_free - Resource-managed mdiobus_free() - * @dev: Device this mii_bus belongs to - * @bus: the mii_bus associated with the device - * - * Free mii_bus allocated with devm_mdiobus_alloc_size(). - */ -void devm_mdiobus_free(struct device *dev, struct mii_bus *bus) -{ - int rc; - - rc = devres_release(dev, _devm_mdiobus_free, - devm_mdiobus_match, bus); - WARN_ON(rc); -} -EXPORT_SYMBOL_GPL(devm_mdiobus_free); - /** * mdiobus_release - mii_bus device release callback * @d: the target struct device that contains the mii_bus diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c index 3ee887733d4a..0b9bd9a61378 100644 --- a/drivers/net/phy/mdio_devres.c +++ b/drivers/net/phy/mdio_devres.c @@ -1,25 +1,96 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include #include +#include + +struct mdiobus_devres { + struct mii_bus *mii; +}; + +static void devm_mdiobus_free(struct device *dev, void *this) +{ + struct mdiobus_devres *dr = this; + + mdiobus_free(dr->mii); +} + +/** + * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size() + * @dev: Device to allocate mii_bus for + * @sizeof_priv: Space to allocate for private structure + * + * Managed mdiobus_alloc_size. mii_bus allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated mii_bus on success, NULL on out-of-memory error. + */ +struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv) +{ + struct mdiobus_devres *dr; + + dr = devres_alloc(devm_mdiobus_free, sizeof(*dr), GFP_KERNEL); + if (!dr) + return NULL; + + dr->mii = mdiobus_alloc_size(sizeof_priv); + if (!dr->mii) { + devres_free(dr); + return NULL; + } + + devres_add(dev, dr); + return dr->mii; +} +EXPORT_SYMBOL(devm_mdiobus_alloc_size); + +static void devm_mdiobus_unregister(struct device *dev, void *this) +{ + struct mdiobus_devres *dr = this; + + mdiobus_unregister(dr->mii); +} + +static int mdiobus_devres_match(struct device *dev, + void *this, void *match_data) +{ + struct mdiobus_devres *res = this; + struct mii_bus *mii = match_data; + + return mii == res->mii; +} /** * __devm_mdiobus_register - Resource-managed variant of mdiobus_register() + * @dev: Device to register mii_bus for * @bus: MII bus structure to register * @owner: Owning module * * Returns 0 on success, negative error number on failure. */ -int __devm_mdiobus_register(struct mii_bus *bus, struct module *owner) +int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, + struct module *owner) { + struct mdiobus_devres *dr; int ret; - if (!bus->is_managed) - return -EPERM; + if (WARN_ON(!devres_find(dev, devm_mdiobus_free, + mdiobus_devres_match, bus))) + return -EINVAL; + + dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; ret = __mdiobus_register(bus, owner); - if (!ret) - bus->is_managed_registered = 1; + if (ret) { + devres_free(dr); + return ret; + } - return ret; + dr->mii = bus; + devres_add(dev, dr); + return 0; } EXPORT_SYMBOL(__devm_mdiobus_register); diff --git a/include/linux/phy.h b/include/linux/phy.h index 7c75e1c90141..101a48fa6750 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -261,9 +261,6 @@ struct mii_bus { int (*reset)(struct mii_bus *bus); struct mdio_bus_stats stats[PHY_MAX_ADDR]; - unsigned int is_managed:1; /* is device-managed */ - unsigned int is_managed_registered:1; - /* * A lock to ensure that only one thing can read/write * the MDIO bus at a time @@ -322,9 +319,11 @@ static inline struct mii_bus *mdiobus_alloc(void) } int __mdiobus_register(struct mii_bus *bus, struct module *owner); -int __devm_mdiobus_register(struct mii_bus *bus, struct module *owner); +int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, + struct module *owner); #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) -#define devm_mdiobus_register(bus) __devm_mdiobus_register(bus, THIS_MODULE) +#define devm_mdiobus_register(dev, bus) \ + __devm_mdiobus_register(dev, bus, THIS_MODULE) void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); @@ -335,7 +334,6 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) } struct mii_bus *mdio_find_bus(const char *mdio_name); -void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); #define PHY_INTERRUPT_DISABLED false -- cgit v1.2.3 From a0bd96f5aed2108505386733abe76f37362c2f50 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:44 +0200 Subject: of: mdio: remove the 'extern' keyword from function declarations The 'extern' keyword in headers doesn't have any benefit. Remove them all from the of_mdio.h header. Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- include/linux/of_mdio.h | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 0f61a4ac6bcf..ba8e157f24ad 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -12,27 +12,26 @@ #include #if IS_ENABLED(CONFIG_OF_MDIO) -extern bool of_mdiobus_child_is_phy(struct device_node *child); -extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); -extern struct phy_device *of_phy_find_device(struct device_node *phy_np); -extern struct phy_device *of_phy_connect(struct net_device *dev, - struct device_node *phy_np, - void (*hndlr)(struct net_device *), - u32 flags, phy_interface_t iface); -extern struct phy_device * +bool of_mdiobus_child_is_phy(struct device_node *child); +int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); +struct phy_device *of_phy_find_device(struct device_node *phy_np); +struct phy_device * +of_phy_connect(struct net_device *dev, struct device_node *phy_np, + void (*hndlr)(struct net_device *), u32 flags, + phy_interface_t iface); +struct phy_device * of_phy_get_and_connect(struct net_device *dev, struct device_node *np, void (*hndlr)(struct net_device *)); -struct phy_device *of_phy_attach(struct net_device *dev, - struct device_node *phy_np, u32 flags, - phy_interface_t iface); - -extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); -extern int of_phy_register_fixed_link(struct device_node *np); -extern void of_phy_deregister_fixed_link(struct device_node *np); -extern bool of_phy_is_fixed_link(struct device_node *np); -extern int of_mdiobus_phy_device_register(struct mii_bus *mdio, - struct phy_device *phy, - struct device_node *child, u32 addr); +struct phy_device * +of_phy_attach(struct net_device *dev, struct device_node *phy_np, + u32 flags, phy_interface_t iface); + +struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); +int of_phy_register_fixed_link(struct device_node *np); +void of_phy_deregister_fixed_link(struct device_node *np); +bool of_phy_is_fixed_link(struct device_node *np); +int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy, + struct device_node *child, u32 addr); static inline int of_mdio_parse_addr(struct device *dev, const struct device_node *np) -- cgit v1.2.3 From 14eeb6e086d6b9004c600e5f0f62bacb458ecfba Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:45 +0200 Subject: of: mdio: provide devm_of_mdiobus_register() Implement a managed variant of of_mdiobus_register(). We need to make mdio_devres into its own module because otherwise we'd hit circular sumbol dependencies between phylib and of_mdio. Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- Documentation/driver-api/driver-model/devres.rst | 1 + drivers/net/phy/Makefile | 4 ++- drivers/net/phy/mdio_devres.c | 37 ++++++++++++++++++++++++ include/linux/of_mdio.h | 3 ++ 4 files changed, 44 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index e0333d66a7f4..eaaaafc21134 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -343,6 +343,7 @@ MDIO devm_mdiobus_alloc() devm_mdiobus_alloc_size() devm_mdiobus_register() + devm_of_mdiobus_register() MEM devm_free_pages() diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 896afdcac437..c9a9adf194d5 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -3,7 +3,8 @@ libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ linkmode.o -mdio-bus-y += mdio_bus.o mdio_device.o mdio_devres.o +mdio-bus-y += mdio_bus.o mdio_device.o +mdio-devres-y += mdio_devres.o ifdef CONFIG_MDIO_DEVICE obj-y += mdio-boardinfo.o @@ -17,6 +18,7 @@ libphy-y += $(mdio-bus-y) else obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o endif +obj-$(CONFIG_MDIO_DEVICE) += mdio-devres.o libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c index 0b9bd9a61378..b560e99695df 100644 --- a/drivers/net/phy/mdio_devres.c +++ b/drivers/net/phy/mdio_devres.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include +#include #include #include @@ -94,3 +95,39 @@ int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, return 0; } EXPORT_SYMBOL(__devm_mdiobus_register); + +#if IS_ENABLED(CONFIG_OF_MDIO) +/** + * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register() + * @dev: Device to register mii_bus for + * @mdio: MII bus structure to register + * @np: Device node to parse + */ +int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, + struct device_node *np) +{ + struct mdiobus_devres *dr; + int ret; + + if (WARN_ON(!devres_find(dev, devm_mdiobus_free, + mdiobus_devres_match, mdio))) + return -EINVAL; + + dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + ret = of_mdiobus_register(mdio, np); + if (ret) { + devres_free(dr); + return ret; + } + + dr->mii = mdio; + devres_add(dev, dr); + return 0; +} +EXPORT_SYMBOL(devm_of_mdiobus_register); +#endif /* CONFIG_OF_MDIO */ + +MODULE_LICENSE("GPL"); diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index ba8e157f24ad..1efb88d9f892 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -8,12 +8,15 @@ #ifndef __LINUX_OF_MDIO_H #define __LINUX_OF_MDIO_H +#include #include #include #if IS_ENABLED(CONFIG_OF_MDIO) bool of_mdiobus_child_is_phy(struct device_node *child); int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); +int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, + struct device_node *np); struct phy_device *of_phy_find_device(struct device_node *phy_np); struct phy_device * of_phy_connect(struct net_device *dev, struct device_node *phy_np, -- cgit v1.2.3 From e1915eec54a6b4902664eaf6fb7a20de5624c4dd Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 26 Jun 2020 22:37:41 +0200 Subject: backlight: sky81452: Convert to GPIO descriptors The SKY81452 backlight driver just obtains a GPIO (named "gpios" in the device tree) drives it high and leaves it high until the driver is removed. Switch to use GPIO descriptors for this, simple and straight-forward. Cc: Gyungoh Yoo Signed-off-by: Linus Walleij Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- drivers/video/backlight/sky81452-backlight.c | 18 ++++-------------- include/linux/platform_data/sky81452-backlight.h | 6 ++++-- 2 files changed, 8 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c index 2355f00f5773..81d2c8f3ca50 100644 --- a/drivers/video/backlight/sky81452-backlight.c +++ b/drivers/video/backlight/sky81452-backlight.c @@ -8,12 +8,11 @@ #include #include -#include +#include #include #include #include #include -#include #include #include #include @@ -182,7 +181,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt( pdata->ignore_pwm = of_property_read_bool(np, "skyworks,ignore-pwm"); pdata->dpwm_mode = of_property_read_bool(np, "skyworks,dpwm-mode"); pdata->phase_shift = of_property_read_bool(np, "skyworks,phase-shift"); - pdata->gpio_enable = of_get_gpio(np, 0); + pdata->gpiod_enable = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); ret = of_property_count_u32_elems(np, "led-sources"); if (ret < 0) { @@ -264,15 +263,6 @@ static int sky81452_bl_probe(struct platform_device *pdev) return PTR_ERR(pdata); } - if (gpio_is_valid(pdata->gpio_enable)) { - ret = devm_gpio_request_one(dev, pdata->gpio_enable, - GPIOF_OUT_INIT_HIGH, "sky81452-en"); - if (ret < 0) { - dev_err(dev, "failed to request GPIO. err=%d\n", ret); - return ret; - } - } - ret = sky81452_bl_init_device(regmap, pdata); if (ret < 0) { dev_err(dev, "failed to initialize. err=%d\n", ret); @@ -312,8 +302,8 @@ static int sky81452_bl_remove(struct platform_device *pdev) bd->props.brightness = 0; backlight_update_status(bd); - if (gpio_is_valid(pdata->gpio_enable)) - gpio_set_value_cansleep(pdata->gpio_enable, 0); + if (pdata->gpiod_enable) + gpiod_set_value_cansleep(pdata->gpiod_enable, 0); return 0; } diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h index 02653d92d84f..d6f46670d923 100644 --- a/include/linux/platform_data/sky81452-backlight.h +++ b/include/linux/platform_data/sky81452-backlight.h @@ -9,11 +9,13 @@ #ifndef _SKY81452_BACKLIGHT_H #define _SKY81452_BACKLIGHT_H +#include + /** * struct sky81452_platform_data * @name: backlight driver name. If it is not defined, default name is lcd-backlight. - * @gpio_enable:GPIO number which control EN pin + * @gpios_enable:GPIO descriptor which control EN pin * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. * @ignore_pwm: true if DPWMI should be ignored. * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. @@ -23,7 +25,7 @@ */ struct sky81452_bl_platform_data { const char *name; - int gpio_enable; + struct gpio_desc *gpiod_enable; unsigned int enable; bool ignore_pwm; bool dpwm_mode; -- cgit v1.2.3 From 08bf73a6f056d84fd52a58c5d165523dd84be535 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 26 Jun 2020 22:37:42 +0200 Subject: backlight: sky81452: Privatize platform data The only way the platform data for the SKY81452 ever gets populated is through the device tree. The MFD device is bothered with this for no reason at all. Just allocate the platform data in the driver and be happy. Cc: Gyungoh Yoo Signed-off-by: Linus Walleij Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- drivers/mfd/sky81452.c | 2 -- drivers/video/backlight/sky81452-backlight.c | 34 +++++++++++++++++----- include/linux/mfd/sky81452.h | 2 -- include/linux/platform_data/sky81452-backlight.h | 37 ------------------------ 4 files changed, 27 insertions(+), 48 deletions(-) delete mode 100644 include/linux/platform_data/sky81452-backlight.h (limited to 'include') diff --git a/drivers/mfd/sky81452.c b/drivers/mfd/sky81452.c index 76eedfae8553..3ad35bf0c015 100644 --- a/drivers/mfd/sky81452.c +++ b/drivers/mfd/sky81452.c @@ -47,8 +47,6 @@ static int sky81452_probe(struct i2c_client *client, memset(cells, 0, sizeof(cells)); cells[0].name = "sky81452-backlight"; cells[0].of_compatible = "skyworks,sky81452-backlight"; - cells[0].platform_data = pdata->bl_pdata; - cells[0].pdata_size = sizeof(*pdata->bl_pdata); cells[1].name = "sky81452-regulator"; cells[1].platform_data = pdata->regulator_init_data; cells[1].pdata_size = sizeof(*pdata->regulator_init_data); diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c index 81d2c8f3ca50..83ccb3d940fa 100644 --- a/drivers/video/backlight/sky81452-backlight.c +++ b/drivers/video/backlight/sky81452-backlight.c @@ -15,7 +15,6 @@ #include #include #include -#include #include /* registers */ @@ -41,6 +40,29 @@ #define SKY81452_DEFAULT_NAME "lcd-backlight" #define SKY81452_MAX_BRIGHTNESS (SKY81452_CS + 1) +/** + * struct sky81452_platform_data + * @name: backlight driver name. + If it is not defined, default name is lcd-backlight. + * @gpios_enable:GPIO descriptor which control EN pin + * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. + * @ignore_pwm: true if DPWMI should be ignored. + * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. + * @phase_shift:true is phase shift mode. + * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V. + * @boost_current_limit: It should be one of 2300, 2750mA. + */ +struct sky81452_bl_platform_data { + const char *name; + struct gpio_desc *gpiod_enable; + unsigned int enable; + bool ignore_pwm; + bool dpwm_mode; + bool phase_shift; + unsigned int short_detection_threshold; + unsigned int boost_current_limit; +}; + #define CTZ(b) __builtin_ctz(b) static int sky81452_bl_update_status(struct backlight_device *bd) @@ -251,17 +273,15 @@ static int sky81452_bl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct regmap *regmap = dev_get_drvdata(dev->parent); - struct sky81452_bl_platform_data *pdata = dev_get_platdata(dev); + struct sky81452_bl_platform_data *pdata; struct backlight_device *bd; struct backlight_properties props; const char *name; int ret; - if (!pdata) { - pdata = sky81452_bl_parse_dt(dev); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); - } + pdata = sky81452_bl_parse_dt(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); ret = sky81452_bl_init_device(regmap, pdata); if (ret < 0) { diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h index d469aa481243..b08570ff34df 100644 --- a/include/linux/mfd/sky81452.h +++ b/include/linux/mfd/sky81452.h @@ -9,11 +9,9 @@ #ifndef _SKY81452_H #define _SKY81452_H -#include #include struct sky81452_platform_data { - struct sky81452_bl_platform_data *bl_pdata; struct regulator_init_data *regulator_init_data; }; diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h deleted file mode 100644 index d6f46670d923..000000000000 --- a/include/linux/platform_data/sky81452-backlight.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * sky81452.h SKY81452 backlight driver - * - * Copyright 2014 Skyworks Solutions Inc. - * Author : Gyungoh Yoo - */ - -#ifndef _SKY81452_BACKLIGHT_H -#define _SKY81452_BACKLIGHT_H - -#include - -/** - * struct sky81452_platform_data - * @name: backlight driver name. - If it is not defined, default name is lcd-backlight. - * @gpios_enable:GPIO descriptor which control EN pin - * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. - * @ignore_pwm: true if DPWMI should be ignored. - * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. - * @phase_shift:true is phase shift mode. - * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V. - * @boost_current_limit: It should be one of 2300, 2750mA. - */ -struct sky81452_bl_platform_data { - const char *name; - struct gpio_desc *gpiod_enable; - unsigned int enable; - bool ignore_pwm; - bool dpwm_mode; - bool phase_shift; - unsigned int short_detection_threshold; - unsigned int boost_current_limit; -}; - -#endif -- cgit v1.2.3 From 43ff98695cc01779407d489fd546359c188222b4 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 28 Jun 2020 19:54:45 -0700 Subject: usb: fix kernel-doc warnings and formatting in Fix kernel-doc warnings in : ../include/linux/usb.h:713: warning: Function parameter or member 'use_generic_driver' not described in 'usb_device' ../include/linux/usb.h:1253: warning: Function parameter or member 'match' not described in 'usb_device_driver' ../include/linux/usb.h:1253: warning: Function parameter or member 'id_table' not described in 'usb_device_driver' Also drop an extra blank line and fix indentation. Fixes: 77419aa403ca ("USB: Fallback to generic driver when specific driver fails") Fixes: 88b7381a939d ("USB: Select better matching USB drivers when available") Signed-off-by: Randy Dunlap Cc: Bastien Nocera Link: https://lore.kernel.org/r/7014bab2-268c-69f6-7ef5-57fbd45c8b08@infradead.org Signed-off-by: Greg Kroah-Hartman --- include/linux/usb.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/usb.h b/include/linux/usb.h index 9f3c721c70dc..c86e4ec4d00f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -620,9 +620,9 @@ struct usb3_lpm_parameters { * Management to be disabled for this usb_device. This count should only * be manipulated by those functions, with the bandwidth_mutex is held. * @hub_delay: cached value consisting of: - * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns) - * + * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns) * Will be used as wValue for SetIsochDelay requests. + * @use_generic_driver: ask driver core to reprobe using the generic driver. * * Notes: * Usbcore drivers should not set usbdev->state directly. Instead use @@ -1215,6 +1215,7 @@ struct usb_driver { * struct usb_device_driver - identifies USB device driver to usbcore * @name: The driver name should be unique among USB drivers, * and should normally be the same as the module name. + * @match: If set, used for better device/driver matching. * @probe: Called to see if the driver is willing to manage a particular * device. If it is, probe returns zero and uses dev_set_drvdata() * to associate driver-specific data with the device. If unwilling @@ -1227,13 +1228,16 @@ struct usb_driver { * @dev_groups: Attributes attached to the device that will be created once it * is bound to the driver. * @drvwrap: Driver-model core structure wrapper. + * @id_table: used with @match() to select better matching driver at + * probe() time. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for devices bound to this driver. * @generic_subclass: if set to 1, the generic USB driver's probe, disconnect, * resume and suspend functions will be called in addition to the driver's * own, so this part of the setup does not need to be replicated. * - * USB drivers must provide all the fields listed above except drvwrap. + * USB drivers must provide all the fields listed above except drvwrap, + * match, and id_table. */ struct usb_device_driver { const char *name; -- cgit v1.2.3 From f470a6554854123f763f6273e8266bd6ee89a97a Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Wed, 1 Jul 2020 14:56:15 +0300 Subject: usb: typec: Combine the definitions for Accessory and USB modes There is no need to describe them sparately. Signed-off-by: Heikki Krogerus Link: https://lore.kernel.org/r/20200701115618.22482-2-heikki.krogerus@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/typec_altmode.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h index d834e236c6df..a4b65eaa0f62 100644 --- a/include/linux/usb/typec_altmode.h +++ b/include/linux/usb/typec_altmode.h @@ -95,13 +95,7 @@ enum { * * Port drivers can use TYPEC_MODE_AUDIO and TYPEC_MODE_DEBUG as the mode * value for typec_set_mode() when accessory modes are supported. - */ -enum { - TYPEC_MODE_AUDIO = TYPEC_STATE_MODAL, /* Audio Accessory */ - TYPEC_MODE_DEBUG, /* Debug Accessory */ -}; - -/* + * * USB4 also requires that the pins on the connector are repurposed, just like * Alternate Modes. USB4 mode is however not entered with the Enter Mode Command * like the Alternate Modes are, but instead with a special Enter_USB Message. @@ -112,9 +106,11 @@ enum { * state values, just like the Accessory Modes. */ enum { - TYPEC_MODE_USB2 = TYPEC_MODE_DEBUG, /* USB 2.0 mode */ + TYPEC_MODE_USB2 = TYPEC_STATE_MODAL, /* USB 2.0 mode */ TYPEC_MODE_USB3, /* USB 3.2 mode */ - TYPEC_MODE_USB4 /* USB4 mode */ + TYPEC_MODE_USB4, /* USB4 mode */ + TYPEC_MODE_AUDIO, /* Audio Accessory */ + TYPEC_MODE_DEBUG, /* Debug Accessory */ }; #define TYPEC_MODAL_STATE(_state_) ((_state_) + TYPEC_STATE_MODAL) -- cgit v1.2.3 From ad8db94d6813dc659bd4de0531a8a1150559eafb Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Wed, 1 Jul 2020 14:56:16 +0300 Subject: usb: typec: Add data structure for Enter_USB message This data structure can be delivered to the mux drivers when Enter_USB Message is used exactly the same way as the Alternate Mode specific data structures are delivered to the mux drivers when Enter Mode Messages are used. The Enter_USB data structure shall have all details related to the Enter_USB Message, most importantly the Enter_USB Date Object that was used. Signed-off-by: Heikki Krogerus Link: https://lore.kernel.org/r/20200701115618.22482-3-heikki.krogerus@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/typec.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include') diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 5daa1c49761c..9cb1bec94b71 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -72,6 +72,20 @@ enum typec_orientation { TYPEC_ORIENTATION_REVERSE, }; +/* + * struct enter_usb_data - Enter_USB Message details + * @eudo: Enter_USB Data Object + * @active_link_training: Active Cable Plug Link Training + * + * @active_link_training is a flag that should be set with uni-directional SBRX + * communication, and left 0 with passive cables and with bi-directional SBRX + * communication. + */ +struct enter_usb_data { + u32 eudo; + unsigned char active_link_training:1; +}; + /* * struct usb_pd_identity - USB Power Delivery identity data * @id_header: ID Header VDO -- cgit v1.2.3 From f695ca3886ce72b027af7aa6040cd420cae2088c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Jul 2020 10:59:39 +0200 Subject: block: remove the request_queue argument from blk_queue_split The queue can be trivially derived from the bio, so pass one less argument. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-merge.c | 21 ++++++++++----------- block/blk-mq.c | 2 +- block/blk.h | 3 +-- drivers/block/drbd/drbd_req.c | 2 +- drivers/block/pktcdvd.c | 2 +- drivers/block/ps3vram.c | 2 +- drivers/block/rsxx/dev.c | 2 +- drivers/block/umem.c | 2 +- drivers/lightnvm/pblk-init.c | 4 ++-- drivers/md/dm.c | 2 +- drivers/md/md.c | 2 +- drivers/nvme/host/multipath.c | 9 ++++----- drivers/s390/block/dcssblk.c | 2 +- drivers/s390/block/xpram.c | 2 +- include/linux/blkdev.h | 2 +- 15 files changed, 28 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/block/blk-merge.c b/block/blk-merge.c index 9c9fb21584b6..20fa22906041 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -283,20 +283,20 @@ split: /** * __blk_queue_split - split a bio and submit the second half - * @q: [in] request queue pointer * @bio: [in, out] bio to be split * @nr_segs: [out] number of segments in the first bio * * Split a bio into two bios, chain the two bios, submit the second half and * store a pointer to the first half in *@bio. If the second bio is still too * big it will be split by a recursive call to this function. Since this - * function may allocate a new bio from @q->bio_split, it is the responsibility - * of the caller to ensure that @q is only released after processing of the + * function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is + * the responsibility of the caller to ensure that + * @bio->bi_disk->queue->bio_split is only released after processing of the * split bio has finished. */ -void __blk_queue_split(struct request_queue *q, struct bio **bio, - unsigned int *nr_segs) +void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) { + struct request_queue *q = (*bio)->bi_disk->queue; struct bio *split = NULL; switch (bio_op(*bio)) { @@ -345,20 +345,19 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio, /** * blk_queue_split - split a bio and submit the second half - * @q: [in] request queue pointer * @bio: [in, out] bio to be split * * Split a bio into two bios, chains the two bios, submit the second half and * store a pointer to the first half in *@bio. Since this function may allocate - * a new bio from @q->bio_split, it is the responsibility of the caller to - * ensure that @q is only released after processing of the split bio has - * finished. + * a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of + * the caller to ensure that @bio->bi_disk->queue->bio_split is only released + * after processing of the split bio has finished. */ -void blk_queue_split(struct request_queue *q, struct bio **bio) +void blk_queue_split(struct bio **bio) { unsigned int nr_segs; - __blk_queue_split(q, bio, &nr_segs); + __blk_queue_split(bio, &nr_segs); } EXPORT_SYMBOL(blk_queue_split); diff --git a/block/blk-mq.c b/block/blk-mq.c index 65e0846fd065..dbadb7defd61 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2166,7 +2166,7 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_status_t ret; blk_queue_bounce(q, &bio); - __blk_queue_split(q, &bio, &nr_segs); + __blk_queue_split(&bio, &nr_segs); if (!bio_integrity_prep(bio)) goto queue_exit; diff --git a/block/blk.h b/block/blk.h index 0184a31fe4df..0114fd92c8a0 100644 --- a/block/blk.h +++ b/block/blk.h @@ -220,8 +220,7 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); -void __blk_queue_split(struct request_queue *q, struct bio **bio, - unsigned int *nr_segs); +void __blk_queue_split(struct bio **bio, unsigned int *nr_segs); int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs); int ll_front_merge_fn(struct request *req, struct bio *bio, diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3f09b2ab9778..936868047422 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1598,7 +1598,7 @@ blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio) struct drbd_device *device = bio->bi_disk->private_data; unsigned long start_jif; - blk_queue_split(q, &bio); + blk_queue_split(&bio); start_jif = jiffies; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 27a33adc41e4..29b0c62dc86c 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2434,7 +2434,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) char b[BDEVNAME_SIZE]; struct bio *split; - blk_queue_split(q, &bio); + blk_queue_split(&bio); pd = q->queuedata; if (!pd) { diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 5a1d1d137c72..76cc584aa763 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -593,7 +593,7 @@ static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio) dev_dbg(&dev->core, "%s\n", __func__); - blk_queue_split(q, &bio); + blk_queue_split(&bio); spin_lock_irq(&priv->lock); busy = !bio_list_empty(&priv->list); diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 6a4d8d26e32c..1d52bc73dd0f 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -123,7 +123,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) struct rsxx_bio_meta *bio_meta; blk_status_t st = BLK_STS_IOERR; - blk_queue_split(q, &bio); + blk_queue_split(&bio); might_sleep(); diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 5498f1cf36b3..3b89c07f9e9d 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -527,7 +527,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio) (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size); - blk_queue_split(q, &bio); + blk_queue_split(&bio); spin_lock_irq(&card->lock); *card->biotail = bio; diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 6e677ff62cc9..7a4a1b7a941d 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -63,7 +63,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) * constraint. Writes can be of arbitrary size. */ if (bio_data_dir(bio) == READ) { - blk_queue_split(q, &bio); + blk_queue_split(&bio); pblk_submit_read(pblk, bio); } else { /* Prevent deadlock in the case of a modest LUN configuration @@ -71,7 +71,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) * leaves at least 256KB available for user I/O. */ if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl)) - blk_queue_split(q, &bio); + blk_queue_split(&bio); pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c8d91f271c27..5aa7a604f4cb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1776,7 +1776,7 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, */ if (current->bio_list) { if (is_abnormal_io(bio)) - blk_queue_split(md->queue, &bio); + blk_queue_split(&bio); else dm_queue_split(md, ti, &bio); } diff --git a/drivers/md/md.c b/drivers/md/md.c index f567f536b529..ff20868e5e1b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -475,7 +475,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } - blk_queue_split(q, &bio); + blk_queue_split(&bio); if (mddev == NULL || mddev->pers == NULL) { bio_io_error(bio); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index da78e499947a..5a5205ea570a 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -301,12 +301,11 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, int srcu_idx; /* - * The namespace might be going away and the bio might - * be moved to a different queue via blk_steal_bios(), - * so we need to use the bio_split pool from the original - * queue to allocate the bvecs from. + * The namespace might be going away and the bio might be moved to a + * different queue via blk_steal_bios(), so we need to use the bio_split + * pool from the original queue to allocate the bvecs from. */ - blk_queue_split(q, &bio); + blk_queue_split(&bio); srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 384edffe5cb4..dfe21eb72760 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -878,7 +878,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) unsigned long source_addr; unsigned long bytes_done; - blk_queue_split(q, &bio); + blk_queue_split(&bio); bytes_done = 0; dev_info = bio->bi_disk->private_data; diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 45a04daec89e..5456f0ad5a40 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -191,7 +191,7 @@ static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) unsigned long page_addr; unsigned long bytes; - blk_queue_split(q, &bio); + blk_queue_split(&bio); if ((bio->bi_iter.bi_sector & 7) != 0 || (bio->bi_iter.bi_size & 4095) != 0) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 15497782c176..d002defc1789 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -871,7 +871,7 @@ extern void blk_rq_unprep_clone(struct request *rq); extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); extern int blk_rq_append_bio(struct request *rq, struct bio **bio); -extern void blk_queue_split(struct request_queue *, struct bio **); +extern void blk_queue_split(struct bio **); extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, unsigned int, void __user *); -- cgit v1.2.3 From c62b37d96b6eb3ec5ae4cbe00db107bf15aebc93 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Jul 2020 10:59:43 +0200 Subject: block: move ->make_request_fn to struct block_device_operations The make_request_fn is a little weird in that it sits directly in struct request_queue instead of an operation vector. Replace it with a block_device_operations method called submit_bio (which describes much better what it does). Also remove the request_queue argument to it, as the queue can be derived pretty trivially from the bio. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- Documentation/block/biodoc.rst | 2 +- Documentation/block/writeback_cache_control.rst | 2 +- arch/m68k/emu/nfblock.c | 5 ++- arch/xtensa/platforms/iss/simdisk.c | 5 ++- block/blk-cgroup.c | 2 +- block/blk-core.c | 53 +++++++++---------------- block/blk-mq.c | 10 ++--- block/blk.h | 2 - drivers/block/brd.c | 5 ++- drivers/block/drbd/drbd_int.h | 2 +- drivers/block/drbd/drbd_main.c | 9 +++-- drivers/block/drbd/drbd_req.c | 2 +- drivers/block/null_blk_main.c | 17 ++++++-- drivers/block/pktcdvd.c | 11 ++--- drivers/block/ps3vram.c | 15 ++++--- drivers/block/rsxx/dev.c | 7 +++- drivers/block/umem.c | 5 ++- drivers/block/zram/zram_drv.c | 11 ++--- drivers/lightnvm/core.c | 8 +--- drivers/lightnvm/pblk-init.c | 12 ++++-- drivers/md/bcache/request.c | 4 +- drivers/md/bcache/request.h | 4 +- drivers/md/bcache/super.c | 23 +++++++---- drivers/md/dm.c | 23 ++++++----- drivers/md/md.c | 5 ++- drivers/nvdimm/blk.c | 5 ++- drivers/nvdimm/btt.c | 5 ++- drivers/nvdimm/pmem.c | 5 ++- drivers/nvme/host/core.c | 1 + drivers/nvme/host/multipath.c | 5 +-- drivers/nvme/host/nvme.h | 1 + drivers/s390/block/dcssblk.c | 9 ++--- drivers/s390/block/xpram.c | 6 +-- include/linux/blk-mq.h | 2 +- include/linux/blkdev.h | 7 +--- include/linux/lightnvm.h | 3 +- 36 files changed, 153 insertions(+), 140 deletions(-) (limited to 'include') diff --git a/Documentation/block/biodoc.rst b/Documentation/block/biodoc.rst index b964796ec9c7..267384159bf7 100644 --- a/Documentation/block/biodoc.rst +++ b/Documentation/block/biodoc.rst @@ -1036,7 +1036,7 @@ Now the generic block layer performs partition-remapping early and thus provides drivers with a sector number relative to whole device, rather than having to take partition number into account in order to arrive at the true sector number. The routine blk_partition_remap() is invoked by -generic_make_request even before invoking the queue specific make_request_fn, +generic_make_request even before invoking the queue specific ->submit_bio, so the i/o scheduler also gets to operate on whole disk sector numbers. This should typically not require changes to block drivers, it just never gets to invoke its own partition sector offset calculations since all bios diff --git a/Documentation/block/writeback_cache_control.rst b/Documentation/block/writeback_cache_control.rst index 2c752c57c14c..b208488d0aae 100644 --- a/Documentation/block/writeback_cache_control.rst +++ b/Documentation/block/writeback_cache_control.rst @@ -47,7 +47,7 @@ the Forced Unit Access is implemented. The REQ_PREFLUSH and REQ_FUA flags may both be set on a single bio. -Implementation details for make_request_fn based block drivers +Implementation details for bio based block drivers -------------------------------------------------------------- These drivers will always see the REQ_PREFLUSH and REQ_FUA bits as they sit diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 87e8b1700acd..92d26c812441 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -59,7 +59,7 @@ struct nfhd_device { struct gendisk *disk; }; -static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio) +static blk_qc_t nfhd_submit_bio(struct bio *bio) { struct nfhd_device *dev = bio->bi_disk->private_data; struct bio_vec bvec; @@ -93,6 +93,7 @@ static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) static const struct block_device_operations nfhd_ops = { .owner = THIS_MODULE, + .submit_bio = nfhd_submit_bio, .getgeo = nfhd_getgeo, }; @@ -118,7 +119,7 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize) dev->bsize = bsize; dev->bshift = ffs(bsize) - 10; - dev->queue = blk_alloc_queue(nfhd_make_request, NUMA_NO_NODE); + dev->queue = blk_alloc_queue(NUMA_NO_NODE); if (dev->queue == NULL) goto free_dev; diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 31b5020077a0..5107140dbb7e 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -101,7 +101,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector, spin_unlock(&dev->lock); } -static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t simdisk_submit_bio(struct bio *bio) { struct simdisk *dev = bio->bi_disk->private_data; struct bio_vec bvec; @@ -144,6 +144,7 @@ static void simdisk_release(struct gendisk *disk, fmode_t mode) static const struct block_device_operations simdisk_ops = { .owner = THIS_MODULE, + .submit_bio = simdisk_submit_bio, .open = simdisk_open, .release = simdisk_release, }; @@ -267,7 +268,7 @@ static int __init simdisk_setup(struct simdisk *dev, int which, spin_lock_init(&dev->lock); dev->users = 0; - dev->queue = blk_alloc_queue(simdisk_make_request, NUMA_NO_NODE); + dev->queue = blk_alloc_queue(NUMA_NO_NODE); if (dev->queue == NULL) { pr_err("blk_alloc_queue failed\n"); goto out_alloc_queue; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 748c4b2a9273..594f1d0b0e5a 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1012,7 +1012,7 @@ static int blkcg_css_online(struct cgroup_subsys_state *css) * blkcg_init_queue - initialize blkcg part of request queue * @q: request_queue to initialize * - * Called from __blk_alloc_queue(). Responsible for initializing blkcg + * Called from blk_alloc_queue(). Responsible for initializing blkcg * part of new request_queue @q. * * RETURNS: diff --git a/block/blk-core.c b/block/blk-core.c index 28f60985dc75..cb07a726dd71 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -283,7 +283,7 @@ EXPORT_SYMBOL(blk_dump_rq_flags); * A block device may call blk_sync_queue to ensure that any * such activity is cancelled, thus allowing it to release resources * that the callbacks might use. The caller must already have made sure - * that its ->make_request_fn will not re-add plugging prior to calling + * that its ->submit_bio will not re-add plugging prior to calling * this function. * * This function does not cancel any asynchronous activity arising @@ -510,7 +510,7 @@ static void blk_timeout_work(struct work_struct *work) { } -struct request_queue *__blk_alloc_queue(int node_id) +struct request_queue *blk_alloc_queue(int node_id) { struct request_queue *q; int ret; @@ -575,6 +575,7 @@ struct request_queue *__blk_alloc_queue(int node_id) blk_queue_dma_alignment(q, 511); blk_set_default_limits(&q->limits); + q->nr_requests = BLKDEV_MAX_RQ; return q; @@ -592,21 +593,6 @@ fail_q: kmem_cache_free(blk_requestq_cachep, q); return NULL; } - -struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id) -{ - struct request_queue *q; - - if (WARN_ON_ONCE(!make_request)) - return NULL; - - q = __blk_alloc_queue(node_id); - if (!q) - return NULL; - q->make_request_fn = make_request; - q->nr_requests = BLKDEV_MAX_RQ; - return q; -} EXPORT_SYMBOL(blk_alloc_queue); /** @@ -1088,15 +1074,15 @@ end_io: static blk_qc_t do_make_request(struct bio *bio) { - struct request_queue *q = bio->bi_disk->queue; + struct gendisk *disk = bio->bi_disk; blk_qc_t ret = BLK_QC_T_NONE; if (blk_crypto_bio_prep(&bio)) { - if (!q->make_request_fn) - return blk_mq_make_request(q, bio); - ret = q->make_request_fn(q, bio); + if (!disk->fops->submit_bio) + return blk_mq_submit_bio(bio); + ret = disk->fops->submit_bio(bio); } - blk_queue_exit(q); + blk_queue_exit(disk->queue); return ret; } @@ -1113,10 +1099,9 @@ blk_qc_t generic_make_request(struct bio *bio) { /* * bio_list_on_stack[0] contains bios submitted by the current - * make_request_fn. - * bio_list_on_stack[1] contains bios that were submitted before - * the current make_request_fn, but that haven't been processed - * yet. + * ->submit_bio. + * bio_list_on_stack[1] contains bios that were submitted before the + * current ->submit_bio_bio, but that haven't been processed yet. */ struct bio_list bio_list_on_stack[2]; blk_qc_t ret = BLK_QC_T_NONE; @@ -1125,10 +1110,10 @@ blk_qc_t generic_make_request(struct bio *bio) goto out; /* - * We only want one ->make_request_fn to be active at a time, else + * We only want one ->submit_bio to be active at a time, else * stack usage with stacked devices could be a problem. So use * current->bio_list to keep a list of requests submited by a - * make_request_fn function. current->bio_list is also used as a + * ->submit_bio method. current->bio_list is also used as a * flag to say if generic_make_request is currently active in this * task or not. If it is NULL, then no make_request is active. If * it is non-NULL, then a make_request is active, and new requests @@ -1146,12 +1131,12 @@ blk_qc_t generic_make_request(struct bio *bio) * We pretend that we have just taken it off a longer list, so * we assign bio_list to a pointer to the bio_list_on_stack, * thus initialising the bio_list of new bios to be - * added. ->make_request() may indeed add some more bios + * added. ->submit_bio() may indeed add some more bios * through a recursive call to generic_make_request. If it * did, we find a non-NULL value in bio_list and re-enter the loop * from the top. In this case we really did just take the bio * of the top of the list (no pretending) and so remove it from - * bio_list, and call into ->make_request() again. + * bio_list, and call into ->submit_bio() again. */ BUG_ON(bio->bi_next); bio_list_init(&bio_list_on_stack[0]); @@ -1201,9 +1186,9 @@ EXPORT_SYMBOL(generic_make_request); */ blk_qc_t direct_make_request(struct bio *bio) { - struct request_queue *q = bio->bi_disk->queue; + struct gendisk *disk = bio->bi_disk; - if (WARN_ON_ONCE(q->make_request_fn)) { + if (WARN_ON_ONCE(!disk->queue->mq_ops)) { bio_io_error(bio); return BLK_QC_T_NONE; } @@ -1212,10 +1197,10 @@ blk_qc_t direct_make_request(struct bio *bio) if (unlikely(bio_queue_enter(bio))) return BLK_QC_T_NONE; if (!blk_crypto_bio_prep(&bio)) { - blk_queue_exit(q); + blk_queue_exit(disk->queue); return BLK_QC_T_NONE; } - return blk_mq_make_request(q, bio); + return blk_mq_submit_bio(bio); } EXPORT_SYMBOL_GPL(direct_make_request); diff --git a/block/blk-mq.c b/block/blk-mq.c index dbadb7defd61..948987e9b6ab 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2136,8 +2136,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) } /** - * blk_mq_make_request - Create and send a request to block device. - * @q: Request queue pointer. + * blk_mq_submit_bio - Create and send a request to block device. * @bio: Bio pointer. * * Builds up a request structure from @q and @bio and send to the device. The @@ -2151,8 +2150,9 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) * * Returns: Request queue cookie. */ -blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) +blk_qc_t blk_mq_submit_bio(struct bio *bio) { + struct request_queue *q = bio->bi_disk->queue; const int is_sync = op_is_sync(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf); struct blk_mq_alloc_data data = { @@ -2277,7 +2277,7 @@ queue_exit: blk_queue_exit(q); return BLK_QC_T_NONE; } -EXPORT_SYMBOL_GPL(blk_mq_make_request); /* only for request based dm */ +EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) @@ -3017,7 +3017,7 @@ struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, { struct request_queue *uninit_q, *q; - uninit_q = __blk_alloc_queue(set->numa_node); + uninit_q = blk_alloc_queue(set->numa_node); if (!uninit_q) return ERR_PTR(-ENOMEM); uninit_q->queuedata = queuedata; diff --git a/block/blk.h b/block/blk.h index 0114fd92c8a0..9dcf51c94096 100644 --- a/block/blk.h +++ b/block/blk.h @@ -419,8 +419,6 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) #endif } -struct request_queue *__blk_alloc_queue(int node_id); - int bio_add_hw_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, unsigned int max_sectors, bool *same_page); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 2fb25c348d53..2723a70eb855 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -282,7 +282,7 @@ out: return err; } -static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_disk->private_data; struct bio_vec bvec; @@ -330,6 +330,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, static const struct block_device_operations brd_fops = { .owner = THIS_MODULE, + .submit_bio = brd_submit_bio, .rw_page = brd_rw_page, }; @@ -381,7 +382,7 @@ static struct brd_device *brd_alloc(int i) spin_lock_init(&brd->brd_lock); INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); - brd->brd_queue = blk_alloc_queue(brd_make_request, NUMA_NO_NODE); + brd->brd_queue = blk_alloc_queue(NUMA_NO_NODE); if (!brd->brd_queue) goto out_free_dev; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 33d0831c99b6..0327408da79c 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1451,7 +1451,7 @@ extern void conn_free_crypto(struct drbd_connection *connection); /* drbd_req */ extern void do_submit(struct work_struct *ws); extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); -extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio); +extern blk_qc_t drbd_submit_bio(struct bio *bio); extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); extern int is_valid_ar_handle(struct drbd_request *, sector_t); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 26f4e0aa7393..2b05de0896e2 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -132,9 +132,10 @@ wait_queue_head_t drbd_pp_wait; DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); static const struct block_device_operations drbd_ops = { - .owner = THIS_MODULE, - .open = drbd_open, - .release = drbd_release, + .owner = THIS_MODULE, + .submit_bio = drbd_submit_bio, + .open = drbd_open, + .release = drbd_release, }; struct bio *bio_alloc_drbd(gfp_t gfp_mask) @@ -2801,7 +2802,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_init_set_defaults(device); - q = blk_alloc_queue(drbd_make_request, NUMA_NO_NODE); + q = blk_alloc_queue(NUMA_NO_NODE); if (!q) goto out_no_q; device->rq_queue = q; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 936868047422..c7e14c9a6e5f 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1593,7 +1593,7 @@ void do_submit(struct work_struct *ws) } } -blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio) +blk_qc_t drbd_submit_bio(struct bio *bio) { struct drbd_device *device = bio->bi_disk->private_data; unsigned long start_jif; diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 93ce0a00b2ae..907c6858aec0 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1388,7 +1388,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb) return &nullb->queues[index]; } -static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) +static blk_qc_t null_submit_bio(struct bio *bio) { sector_t sector = bio->bi_iter.bi_sector; sector_t nr_sectors = bio_sectors(bio); @@ -1575,7 +1575,13 @@ static void null_config_discard(struct nullb *nullb) blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); } -static const struct block_device_operations null_ops = { +static const struct block_device_operations null_bio_ops = { + .owner = THIS_MODULE, + .submit_bio = null_submit_bio, + .report_zones = null_report_zones, +}; + +static const struct block_device_operations null_rq_ops = { .owner = THIS_MODULE, .report_zones = null_report_zones, }; @@ -1647,7 +1653,10 @@ static int null_gendisk_register(struct nullb *nullb) disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; disk->major = null_major; disk->first_minor = nullb->index; - disk->fops = &null_ops; + if (queue_is_mq(nullb->q)) + disk->fops = &null_rq_ops; + else + disk->fops = &null_bio_ops; disk->private_data = nullb; disk->queue = nullb->q; strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); @@ -1792,7 +1801,7 @@ static int null_add_dev(struct nullb_device *dev) goto out_cleanup_tags; } } else if (dev->queue_mode == NULL_Q_BIO) { - nullb->q = blk_alloc_queue(null_queue_bio, dev->home_node); + nullb->q = blk_alloc_queue(dev->home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 29b0c62dc86c..5588bd4cd267 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -36,7 +36,7 @@ * block device, assembling the pieces to full packets and queuing them to the * packet I/O scheduler. * - * At the top layer there is a custom make_request_fn function that forwards + * At the top layer there is a custom ->submit_bio function that forwards * read requests directly to the iosched queue and puts write requests in the * unaligned write queue. A kernel thread performs the necessary read * gathering to convert the unaligned writes to aligned writes and then feeds @@ -2428,7 +2428,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) } } -static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t pkt_submit_bio(struct bio *bio) { struct pktcdvd_device *pd; char b[BDEVNAME_SIZE]; @@ -2436,7 +2436,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) blk_queue_split(&bio); - pd = q->queuedata; + pd = bio->bi_disk->queue->queuedata; if (!pd) { pr_err("%s incorrect request queue\n", bio_devname(bio, b)); goto end_io; @@ -2480,7 +2480,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) split = bio; } - pkt_make_request_write(q, split); + pkt_make_request_write(bio->bi_disk->queue, split); } while (split != bio); return BLK_QC_T_NONE; @@ -2685,6 +2685,7 @@ static char *pkt_devnode(struct gendisk *disk, umode_t *mode) static const struct block_device_operations pktcdvd_ops = { .owner = THIS_MODULE, + .submit_bio = pkt_submit_bio, .open = pkt_open, .release = pkt_close, .ioctl = pkt_ioctl, @@ -2749,7 +2750,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) disk->flags = GENHD_FL_REMOVABLE; strcpy(disk->disk_name, pd->name); disk->private_data = pd; - disk->queue = blk_alloc_queue(pkt_make_request, NUMA_NO_NODE); + disk->queue = blk_alloc_queue(NUMA_NO_NODE); if (!disk->queue) goto out_mem2; diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 76cc584aa763..1088798c8dd0 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -90,12 +90,6 @@ struct ps3vram_priv { static int ps3vram_major; - -static const struct block_device_operations ps3vram_fops = { - .owner = THIS_MODULE, -}; - - #define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ #define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ #define DMA_NOTIFIER_SIZE 0x40 @@ -585,7 +579,7 @@ out: return next; } -static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t ps3vram_submit_bio(struct bio *bio) { struct ps3_system_bus_device *dev = bio->bi_disk->private_data; struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); @@ -610,6 +604,11 @@ static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } +static const struct block_device_operations ps3vram_fops = { + .owner = THIS_MODULE, + .submit_bio = ps3vram_submit_bio, +}; + static int ps3vram_probe(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv; @@ -737,7 +736,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev) ps3vram_proc_init(dev); - queue = blk_alloc_queue(ps3vram_make_request, NUMA_NO_NODE); + queue = blk_alloc_queue(NUMA_NO_NODE); if (!queue) { dev_err(&dev->core, "blk_alloc_queue failed\n"); error = -ENOMEM; diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 1d52bc73dd0f..edacefff6e35 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -50,6 +50,8 @@ struct rsxx_bio_meta { static struct kmem_cache *bio_meta_pool; +static blk_qc_t rsxx_submit_bio(struct bio *bio); + /*----------------- Block Device Operations -----------------*/ static int rsxx_blkdev_ioctl(struct block_device *bdev, fmode_t mode, @@ -92,6 +94,7 @@ static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo) static const struct block_device_operations rsxx_fops = { .owner = THIS_MODULE, + .submit_bio = rsxx_submit_bio, .getgeo = rsxx_getgeo, .ioctl = rsxx_blkdev_ioctl, }; @@ -117,7 +120,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card, } } -static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t rsxx_submit_bio(struct bio *bio) { struct rsxx_cardinfo *card = bio->bi_disk->private_data; struct rsxx_bio_meta *bio_meta; @@ -233,7 +236,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) return -ENOMEM; } - card->queue = blk_alloc_queue(rsxx_make_request, NUMA_NO_NODE); + card->queue = blk_alloc_queue(NUMA_NO_NODE); if (!card->queue) { dev_err(CARD_TO_DEV(card), "Failed queue alloc\n"); unregister_blkdev(card->major, DRIVER_NAME); diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 3b89c07f9e9d..2b95d7b33b91 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -519,7 +519,7 @@ static int mm_check_plugged(struct cardinfo *card) return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); } -static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t mm_submit_bio(struct bio *bio) { struct cardinfo *card = bio->bi_disk->private_data; @@ -779,6 +779,7 @@ static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo) static const struct block_device_operations mm_fops = { .owner = THIS_MODULE, + .submit_bio = mm_submit_bio, .getgeo = mm_getgeo, .revalidate_disk = mm_revalidate, }; @@ -886,7 +887,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) card->biotail = &card->bio; spin_lock_init(&card->lock); - card->queue = blk_alloc_queue(mm_make_request, NUMA_NO_NODE); + card->queue = blk_alloc_queue(NUMA_NO_NODE); if (!card->queue) goto failed_alloc; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 0564e3f38408..f9a57f147ee1 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -793,9 +793,9 @@ static void zram_sync_read(struct work_struct *work) } /* - * Block layer want one ->make_request_fn to be active at a time - * so if we use chained IO with parent IO in same context, - * it's a deadlock. To avoid, it, it uses worker thread context. + * Block layer want one ->submit_bio to be active at a time, so if we use + * chained IO with parent IO in same context, it's a deadlock. To avoid that, + * use a worker thread context. */ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, unsigned long entry, struct bio *bio) @@ -1584,7 +1584,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) /* * Handler function for all zram I/O requests. */ -static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) +static blk_qc_t zram_submit_bio(struct bio *bio) { struct zram *zram = bio->bi_disk->private_data; @@ -1813,6 +1813,7 @@ static int zram_open(struct block_device *bdev, fmode_t mode) static const struct block_device_operations zram_devops = { .open = zram_open, + .submit_bio = zram_submit_bio, .swap_slot_free_notify = zram_slot_free_notify, .rw_page = zram_rw_page, .owner = THIS_MODULE @@ -1891,7 +1892,7 @@ static int zram_add(void) #ifdef CONFIG_ZRAM_WRITEBACK spin_lock_init(&zram->wb_limit_lock); #endif - queue = blk_alloc_queue(zram_make_request, NUMA_NO_NODE); + queue = blk_alloc_queue(NUMA_NO_NODE); if (!queue) { pr_err("Error allocating disk queue for device %d\n", device_id); diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index db38a68abb6c..fe78bf0fdce5 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -236,10 +236,6 @@ err_dev: return tgt_dev; } -static const struct block_device_operations nvm_fops = { - .owner = THIS_MODULE, -}; - static struct nvm_tgt_type *__nvm_find_target_type(const char *name) { struct nvm_tgt_type *tt; @@ -380,7 +376,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) goto err_dev; } - tqueue = blk_alloc_queue(tt->make_rq, dev->q->node); + tqueue = blk_alloc_queue(dev->q->node); if (!tqueue) { ret = -ENOMEM; goto err_disk; @@ -390,7 +386,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) tdisk->flags = GENHD_FL_EXT_DEVT; tdisk->major = 0; tdisk->first_minor = 0; - tdisk->fops = &nvm_fops; + tdisk->fops = tt->bops; tdisk->queue = tqueue; targetdata = tt->init(tgt_dev, tdisk, create->flags); diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 7a4a1b7a941d..b6246f73895c 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -47,9 +47,9 @@ static struct pblk_global_caches pblk_caches = { struct bio_set pblk_bio_set; -static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) +static blk_qc_t pblk_submit_bio(struct bio *bio) { - struct pblk *pblk = q->queuedata; + struct pblk *pblk = bio->bi_disk->queue->queuedata; if (bio_op(bio) == REQ_OP_DISCARD) { pblk_discard(pblk, bio); @@ -79,6 +79,12 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } +static const struct block_device_operations pblk_bops = { + .owner = THIS_MODULE, + .submit_bio = pblk_submit_bio, +}; + + static size_t pblk_trans_map_size(struct pblk *pblk) { int entry_size = 8; @@ -1280,7 +1286,7 @@ static struct nvm_tgt_type tt_pblk = { .name = "pblk", .version = {1, 0, 0}, - .make_rq = pblk_make_rq, + .bops = &pblk_bops, .capacity = pblk_capacity, .init = pblk_init, diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 7acf024e99f3..fc5702b10074 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1158,7 +1158,7 @@ static void quit_max_writeback_rate(struct cache_set *c, /* Cached devices - read & write stuff */ -blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio) +blk_qc_t cached_dev_submit_bio(struct bio *bio) { struct search *s; struct bcache_device *d = bio->bi_disk->private_data; @@ -1291,7 +1291,7 @@ static void flash_dev_nodata(struct closure *cl) continue_at(cl, search_free, NULL); } -blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio) +blk_qc_t flash_dev_submit_bio(struct bio *bio) { struct search *s; struct closure *cl; diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index bb005c93dd72..82b38366a95d 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c); void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); -blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio); +blk_qc_t cached_dev_submit_bio(struct bio *bio); void bch_flash_dev_request_init(struct bcache_device *d); -blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio); +blk_qc_t flash_dev_submit_bio(struct bio *bio); extern struct kmem_cache *bch_search_cache; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 21aa168113d3..de13f6e91696 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -680,7 +680,16 @@ static int ioctl_dev(struct block_device *b, fmode_t mode, return d->ioctl(d, mode, cmd, arg); } -static const struct block_device_operations bcache_ops = { +static const struct block_device_operations bcache_cached_ops = { + .submit_bio = cached_dev_submit_bio, + .open = open_dev, + .release = release_dev, + .ioctl = ioctl_dev, + .owner = THIS_MODULE, +}; + +static const struct block_device_operations bcache_flash_ops = { + .submit_bio = flash_dev_submit_bio, .open = open_dev, .release = release_dev, .ioctl = ioctl_dev, @@ -820,8 +829,8 @@ static void bcache_device_free(struct bcache_device *d) } static int bcache_device_init(struct bcache_device *d, unsigned int block_size, - sector_t sectors, make_request_fn make_request_fn, - struct block_device *cached_bdev) + sector_t sectors, struct block_device *cached_bdev, + const struct block_device_operations *ops) { struct request_queue *q; const size_t max_stripes = min_t(size_t, INT_MAX, @@ -868,10 +877,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, d->disk->major = bcache_major; d->disk->first_minor = idx_to_first_minor(idx); - d->disk->fops = &bcache_ops; + d->disk->fops = ops; d->disk->private_data = d; - q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE); + q = blk_alloc_queue(NUMA_NO_NODE); if (!q) return -ENOMEM; @@ -1355,7 +1364,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) ret = bcache_device_init(&dc->disk, block_size, dc->bdev->bd_part->nr_sects - dc->sb.data_offset, - cached_dev_make_request, dc->bdev); + dc->bdev, &bcache_cached_ops); if (ret) return ret; @@ -1468,7 +1477,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) kobject_init(&d->kobj, &bch_flash_dev_ktype); if (bcache_device_init(d, block_bytes(c), u->sectors, - flash_dev_make_request, NULL)) + NULL, &bcache_flash_ops)) goto err; bcache_device_attach(d, c, u - c->uuids); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5aa7a604f4cb..5acfaba3700d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1770,7 +1770,7 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, } /* - * If in ->make_request_fn we need to use blk_queue_split(), otherwise + * If in ->queue_bio we need to use blk_queue_split(), otherwise * queue_limits for abnormal requests (e.g. discard, writesame, etc) * won't be imposed. */ @@ -1787,7 +1787,7 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, return __split_and_process_bio(md, map, bio); } -static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t dm_submit_bio(struct bio *bio) { struct mapped_device *md = bio->bi_disk->private_data; blk_qc_t ret = BLK_QC_T_NONE; @@ -1798,12 +1798,12 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) /* * We are called with a live reference on q_usage_counter, but * that one will be released as soon as we return. Grab an - * extra one as blk_mq_make_request expects to be able to - * consume a reference (which lives until the request is freed - * in case a request is allocated). + * extra one as blk_mq_submit_bio expects to be able to consume + * a reference (which lives until the request is freed in case a + * request is allocated). */ - percpu_ref_get(&q->q_usage_counter); - return blk_mq_make_request(q, bio); + percpu_ref_get(&bio->bi_disk->queue->q_usage_counter); + return blk_mq_submit_bio(bio); } map = dm_get_live_table(md, &srcu_idx); @@ -1988,11 +1988,11 @@ static struct mapped_device *alloc_dev(int minor) spin_lock_init(&md->uevent_lock); /* - * default to bio-based required ->make_request_fn until DM - * table is loaded and md->type established. If request-based - * table is loaded: blk-mq will override accordingly. + * default to bio-based until DM table is loaded and md->type + * established. If request-based table is loaded: blk-mq will + * override accordingly. */ - md->queue = blk_alloc_queue(dm_make_request, numa_node_id); + md->queue = blk_alloc_queue(numa_node_id); if (!md->queue) goto bad; @@ -3232,6 +3232,7 @@ static const struct pr_ops dm_pr_ops = { }; static const struct block_device_operations dm_blk_dops = { + .submit_bio = dm_submit_bio, .open = dm_blk_open, .release = dm_blk_close, .ioctl = dm_blk_ioctl, diff --git a/drivers/md/md.c b/drivers/md/md.c index ff20868e5e1b..7b7cb49be351 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -463,7 +463,7 @@ check_suspended: } EXPORT_SYMBOL(md_handle_request); -static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t md_submit_bio(struct bio *bio) { const int rw = bio_data_dir(bio); const int sgrp = op_stat_group(bio_op(bio)); @@ -5641,7 +5641,7 @@ static int md_alloc(dev_t dev, char *name) mddev->hold_active = UNTIL_STOP; error = -ENOMEM; - mddev->queue = blk_alloc_queue(md_make_request, NUMA_NO_NODE); + mddev->queue = blk_alloc_queue(NUMA_NO_NODE); if (!mddev->queue) goto abort; @@ -7823,6 +7823,7 @@ static int md_revalidate(struct gendisk *disk) static const struct block_device_operations md_fops = { .owner = THIS_MODULE, + .submit_bio = md_submit_bio, .open = md_open, .release = md_release, .ioctl = md_ioctl, diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 39030a324d7f..1f718381a045 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -162,7 +162,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk, return err; } -static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t nd_blk_submit_bio(struct bio *bio) { struct bio_integrity_payload *bip; struct nd_namespace_blk *nsblk = bio->bi_disk->private_data; @@ -225,6 +225,7 @@ static int nsblk_rw_bytes(struct nd_namespace_common *ndns, static const struct block_device_operations nd_blk_fops = { .owner = THIS_MODULE, + .submit_bio = nd_blk_submit_bio, .revalidate_disk = nvdimm_revalidate_disk, }; @@ -250,7 +251,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk)); available_disk_size = internal_nlba * nsblk_sector_size(nsblk); - q = blk_alloc_queue(nd_blk_make_request, NUMA_NO_NODE); + q = blk_alloc_queue(NUMA_NO_NODE); if (!q) return -ENOMEM; if (devm_add_action_or_reset(dev, nd_blk_release_queue, q)) diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 48e9d169b6f9..412d21d8f643 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1439,7 +1439,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, return ret; } -static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t btt_submit_bio(struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); struct btt *btt = bio->bi_disk->private_data; @@ -1512,6 +1512,7 @@ static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo) static const struct block_device_operations btt_fops = { .owner = THIS_MODULE, + .submit_bio = btt_submit_bio, .rw_page = btt_rw_page, .getgeo = btt_getgeo, .revalidate_disk = nvdimm_revalidate_disk, @@ -1523,7 +1524,7 @@ static int btt_blk_init(struct btt *btt) struct nd_namespace_common *ndns = nd_btt->ndns; /* create a new disk and request queue for btt */ - btt->btt_queue = blk_alloc_queue(btt_make_request, NUMA_NO_NODE); + btt->btt_queue = blk_alloc_queue(NUMA_NO_NODE); if (!btt->btt_queue) return -ENOMEM; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index d25e66fd942d..94790e6e0e4c 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -189,7 +189,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem, return rc; } -static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t pmem_submit_bio(struct bio *bio) { int ret = 0; blk_status_t rc = 0; @@ -281,6 +281,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, static const struct block_device_operations pmem_fops = { .owner = THIS_MODULE, + .submit_bio = pmem_submit_bio, .rw_page = pmem_rw_page, .revalidate_disk = nvdimm_revalidate_disk, }; @@ -423,7 +424,7 @@ static int pmem_attach_disk(struct device *dev, return -EBUSY; } - q = blk_alloc_queue(pmem_make_request, dev_to_node(dev)); + q = blk_alloc_queue(dev_to_node(dev)); if (!q) return -ENOMEM; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 6810c8812aed..5192a024dc1b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2178,6 +2178,7 @@ static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) const struct block_device_operations nvme_ns_head_ops = { .owner = THIS_MODULE, + .submit_bio = nvme_ns_head_submit_bio, .open = nvme_ns_head_open, .release = nvme_ns_head_release, .ioctl = nvme_ioctl, diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5a5205ea570a..89afcf943bf8 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -291,8 +291,7 @@ static bool nvme_available_path(struct nvme_ns_head *head) return false; } -static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, - struct bio *bio) +blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) { struct nvme_ns_head *head = bio->bi_disk->private_data; struct device *dev = disk_to_dev(head->disk); @@ -374,7 +373,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath) return 0; - q = blk_alloc_queue(nvme_ns_head_make_request, ctrl->numa_node); + q = blk_alloc_queue(ctrl->numa_node); if (!q) goto out; blk_queue_flag_set(QUEUE_FLAG_NONROT, q); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 61780c38f51f..9f2b0e0b4558 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -586,6 +586,7 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); +blk_qc_t nvme_ns_head_submit_bio(struct bio *bio); static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) { diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index dfe21eb72760..35666c3537de 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -31,8 +31,7 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode); -static blk_qc_t dcssblk_make_request(struct request_queue *q, - struct bio *bio); +static blk_qc_t dcssblk_submit_bio(struct bio *bio); static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); @@ -41,6 +40,7 @@ static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; static int dcssblk_major; static const struct block_device_operations dcssblk_devops = { .owner = THIS_MODULE, + .submit_bio = dcssblk_submit_bio, .open = dcssblk_open, .release = dcssblk_release, }; @@ -651,8 +651,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char } dev_info->gd->major = dcssblk_major; dev_info->gd->fops = &dcssblk_devops; - dev_info->dcssblk_queue = - blk_alloc_queue(dcssblk_make_request, NUMA_NO_NODE); + dev_info->dcssblk_queue = blk_alloc_queue(NUMA_NO_NODE); dev_info->gd->queue = dev_info->dcssblk_queue; dev_info->gd->private_data = dev_info; blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); @@ -868,7 +867,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) } static blk_qc_t -dcssblk_make_request(struct request_queue *q, struct bio *bio) +dcssblk_submit_bio(struct bio *bio) { struct dcssblk_dev_info *dev_info; struct bio_vec bvec; diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 5456f0ad5a40..c2536f7767b3 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -182,7 +182,7 @@ static unsigned long xpram_highest_page_index(void) /* * Block device make request function. */ -static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) +static blk_qc_t xpram_submit_bio(struct bio *bio) { xpram_device_t *xdev = bio->bi_disk->private_data; struct bio_vec bvec; @@ -250,6 +250,7 @@ static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) static const struct block_device_operations xpram_devops = { .owner = THIS_MODULE, + .submit_bio = xpram_submit_bio, .getgeo = xpram_getgeo, }; @@ -343,8 +344,7 @@ static int __init xpram_setup_blkdev(void) xpram_disks[i] = alloc_disk(1); if (!xpram_disks[i]) goto out; - xpram_queues[i] = blk_alloc_queue(xpram_make_request, - NUMA_NO_NODE); + xpram_queues[i] = blk_alloc_queue(NUMA_NO_NODE); if (!xpram_queues[i]) { put_disk(xpram_disks[i]); goto out; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index faa340b70123..23230c1d031e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -596,6 +596,6 @@ static inline void blk_mq_cleanup_rq(struct request *rq) rq->q->mq_ops->cleanup_rq(rq); } -blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio); +blk_qc_t blk_mq_submit_bio(struct bio *bio); #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d002defc1789..083ffc5bc51b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -286,8 +286,6 @@ static inline unsigned short req_get_ioprio(struct request *req) struct blk_queue_ctx; -typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); - struct bio_vec; enum blk_eh_timer_return { @@ -398,8 +396,6 @@ struct request_queue { struct blk_queue_stats *stats; struct rq_qos *rq_qos; - make_request_fn *make_request_fn; - const struct blk_mq_ops *mq_ops; /* sw queues */ @@ -1162,7 +1158,7 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, extern void blk_dump_rq_flags(struct request *, char *); bool __must_check blk_get_queue(struct request_queue *); -struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id); +struct request_queue *blk_alloc_queue(int node_id); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); @@ -1778,6 +1774,7 @@ static inline void blk_ksm_unregister(struct request_queue *q) { } struct block_device_operations { + blk_qc_t (*submit_bio) (struct bio *bio); int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index ee8ec2e68055..1db223710b28 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -631,7 +631,6 @@ static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev, return last; } -typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, int flags); @@ -650,7 +649,7 @@ struct nvm_tgt_type { int flags; /* target entry points */ - nvm_tgt_make_rq_fn *make_rq; + const struct block_device_operations *bops; nvm_tgt_capacity_fn *capacity; /* module-specific init/teardown */ -- cgit v1.2.3 From ed00aabd5eb9fb44d6aff1173234a2e911b9fead Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Jul 2020 10:59:44 +0200 Subject: block: rename generic_make_request to submit_bio_noacct generic_make_request has always been very confusingly misnamed, so rename it to submit_bio_noacct to make it clear that it is submit_bio minus accounting and a few checks. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- Documentation/block/biodoc.rst | 2 +- Documentation/fault-injection/fault-injection.rst | 2 +- Documentation/trace/ftrace.rst | 4 +-- block/bio.c | 14 +++++----- block/blk-core.c | 32 +++++++++++------------ block/blk-crypto-fallback.c | 2 +- block/blk-crypto.c | 2 +- block/blk-merge.c | 2 +- block/blk-throttle.c | 4 +-- block/bounce.c | 2 +- drivers/block/drbd/drbd_int.h | 6 ++--- drivers/block/drbd/drbd_main.c | 2 +- drivers/block/drbd/drbd_receiver.c | 2 +- drivers/block/drbd/drbd_req.c | 2 +- drivers/block/drbd/drbd_worker.c | 2 +- drivers/block/pktcdvd.c | 2 +- drivers/lightnvm/pblk-read.c | 2 +- drivers/md/bcache/bcache.h | 2 +- drivers/md/bcache/btree.c | 2 +- drivers/md/bcache/request.c | 7 +++-- drivers/md/dm-cache-target.c | 6 ++--- drivers/md/dm-clone-target.c | 10 +++---- drivers/md/dm-crypt.c | 6 ++--- drivers/md/dm-delay.c | 2 +- drivers/md/dm-era-target.c | 2 +- drivers/md/dm-integrity.c | 4 +-- drivers/md/dm-mpath.c | 2 +- drivers/md/dm-raid1.c | 2 +- drivers/md/dm-snap-persistent.c | 2 +- drivers/md/dm-snap.c | 6 ++--- drivers/md/dm-thin.c | 4 +-- drivers/md/dm-verity-target.c | 2 +- drivers/md/dm-writecache.c | 2 +- drivers/md/dm-zoned-target.c | 2 +- drivers/md/dm.c | 10 +++---- drivers/md/md-faulty.c | 4 +-- drivers/md/md-linear.c | 4 +-- drivers/md/md-multipath.c | 4 +-- drivers/md/raid0.c | 8 +++--- drivers/md/raid1.c | 14 +++++----- drivers/md/raid10.c | 28 ++++++++++---------- drivers/md/raid5.c | 10 +++---- drivers/nvme/host/multipath.c | 2 +- include/linux/blkdev.h | 2 +- 44 files changed, 115 insertions(+), 118 deletions(-) (limited to 'include') diff --git a/Documentation/block/biodoc.rst b/Documentation/block/biodoc.rst index 267384159bf7..afda5e30a82e 100644 --- a/Documentation/block/biodoc.rst +++ b/Documentation/block/biodoc.rst @@ -1036,7 +1036,7 @@ Now the generic block layer performs partition-remapping early and thus provides drivers with a sector number relative to whole device, rather than having to take partition number into account in order to arrive at the true sector number. The routine blk_partition_remap() is invoked by -generic_make_request even before invoking the queue specific ->submit_bio, +submit_bio_noacct even before invoking the queue specific ->submit_bio, so the i/o scheduler also gets to operate on whole disk sector numbers. This should typically not require changes to block drivers, it just never gets to invoke its own partition sector offset calculations since all bios diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst index f51bb21d20e4..f850ad018b70 100644 --- a/Documentation/fault-injection/fault-injection.rst +++ b/Documentation/fault-injection/fault-injection.rst @@ -24,7 +24,7 @@ Available fault injection capabilities injects disk IO errors on devices permitted by setting /sys/block//make-it-fail or - /sys/block///make-it-fail. (generic_make_request()) + /sys/block///make-it-fail. (submit_bio_noacct()) - fail_mmc_request diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst index 430a16283103..80ba765a8237 100644 --- a/Documentation/trace/ftrace.rst +++ b/Documentation/trace/ftrace.rst @@ -1453,7 +1453,7 @@ function-trace, we get a much larger output:: => __blk_run_queue_uncond => __blk_run_queue => blk_queue_bio - => generic_make_request + => submit_bio_noacct => submit_bio => submit_bh => __ext3_get_inode_loc @@ -1738,7 +1738,7 @@ tracers. => __blk_run_queue_uncond => __blk_run_queue => blk_queue_bio - => generic_make_request + => submit_bio_noacct => submit_bio => submit_bh => ext3_bread diff --git a/block/bio.c b/block/bio.c index fc1299f9d86a..ef91782fd668 100644 --- a/block/bio.c +++ b/block/bio.c @@ -358,7 +358,7 @@ static void bio_alloc_rescue(struct work_struct *work) if (!bio) break; - generic_make_request(bio); + submit_bio_noacct(bio); } } @@ -416,19 +416,19 @@ static void punt_bios_to_rescuer(struct bio_set *bs) * submit the previously allocated bio for IO before attempting to allocate * a new one. Failure to do so can cause deadlocks under memory pressure. * - * Note that when running under generic_make_request() (i.e. any block + * Note that when running under submit_bio_noacct() (i.e. any block * driver), bios are not submitted until after you return - see the code in - * generic_make_request() that converts recursion into iteration, to prevent + * submit_bio_noacct() that converts recursion into iteration, to prevent * stack overflows. * * This would normally mean allocating multiple bios under - * generic_make_request() would be susceptible to deadlocks, but we have + * submit_bio_noacct() would be susceptible to deadlocks, but we have * deadlock avoidance code that resubmits any blocked bios from a rescuer * thread. * * However, we do not guarantee forward progress for allocations from other * mempools. Doing multiple allocations from the same mempool under - * generic_make_request() should be avoided - instead, use bio_set's front_pad + * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad * for per bio allocations. * * RETURNS: @@ -457,14 +457,14 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, nr_iovecs > 0)) return NULL; /* - * generic_make_request() converts recursion to iteration; this + * submit_bio_noacct() converts recursion to iteration; this * means if we're running beneath it, any bios we allocate and * submit will not be submitted (and thus freed) until after we * return. * * This exposes us to a potential deadlock if we allocate * multiple bios from the same bio_set() while running - * underneath generic_make_request(). If we were to allocate + * underneath submit_bio_noacct(). If we were to allocate * multiple bios (say a stacking block driver that was splitting * bios), we would deadlock if we exhausted the mempool's * reserve. diff --git a/block/blk-core.c b/block/blk-core.c index cb07a726dd71..ff9a88d2d244 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -956,8 +956,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q, return BLK_STS_OK; } -static noinline_for_stack bool -generic_make_request_checks(struct bio *bio) +static noinline_for_stack bool submit_bio_checks(struct bio *bio) { struct request_queue *q = bio->bi_disk->queue; blk_status_t status = BLK_STS_IOERR; @@ -985,9 +984,8 @@ generic_make_request_checks(struct bio *bio) } /* - * Filter flush bio's early so that make_request based - * drivers without flush support don't have to worry - * about them. + * Filter flush bio's early so that bio based drivers without flush + * support don't have to worry about them. */ if (op_is_flush(bio->bi_opf) && !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { @@ -1072,7 +1070,7 @@ end_io: return false; } -static blk_qc_t do_make_request(struct bio *bio) +static blk_qc_t __submit_bio(struct bio *bio) { struct gendisk *disk = bio->bi_disk; blk_qc_t ret = BLK_QC_T_NONE; @@ -1087,7 +1085,7 @@ static blk_qc_t do_make_request(struct bio *bio) } /** - * generic_make_request - re-submit a bio to the block device layer for I/O + * submit_bio_noacct - re-submit a bio to the block device layer for I/O * @bio: The bio describing the location in memory and on the device. * * This is a version of submit_bio() that shall only be used for I/O that is @@ -1095,7 +1093,7 @@ static blk_qc_t do_make_request(struct bio *bio) * systems and other upper level users of the block layer should use * submit_bio() instead. */ -blk_qc_t generic_make_request(struct bio *bio) +blk_qc_t submit_bio_noacct(struct bio *bio) { /* * bio_list_on_stack[0] contains bios submitted by the current @@ -1106,7 +1104,7 @@ blk_qc_t generic_make_request(struct bio *bio) struct bio_list bio_list_on_stack[2]; blk_qc_t ret = BLK_QC_T_NONE; - if (!generic_make_request_checks(bio)) + if (!submit_bio_checks(bio)) goto out; /* @@ -1114,7 +1112,7 @@ blk_qc_t generic_make_request(struct bio *bio) * stack usage with stacked devices could be a problem. So use * current->bio_list to keep a list of requests submited by a * ->submit_bio method. current->bio_list is also used as a - * flag to say if generic_make_request is currently active in this + * flag to say if submit_bio_noacct is currently active in this * task or not. If it is NULL, then no make_request is active. If * it is non-NULL, then a make_request is active, and new requests * should be added at the tail @@ -1132,7 +1130,7 @@ blk_qc_t generic_make_request(struct bio *bio) * we assign bio_list to a pointer to the bio_list_on_stack, * thus initialising the bio_list of new bios to be * added. ->submit_bio() may indeed add some more bios - * through a recursive call to generic_make_request. If it + * through a recursive call to submit_bio_noacct. If it * did, we find a non-NULL value in bio_list and re-enter the loop * from the top. In this case we really did just take the bio * of the top of the list (no pretending) and so remove it from @@ -1150,7 +1148,7 @@ blk_qc_t generic_make_request(struct bio *bio) /* Create a fresh bio_list for all subordinate requests */ bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); - ret = do_make_request(bio); + ret = __submit_bio(bio); /* sort new bios into those for a lower level * and those for the same level @@ -1174,13 +1172,13 @@ blk_qc_t generic_make_request(struct bio *bio) out: return ret; } -EXPORT_SYMBOL(generic_make_request); +EXPORT_SYMBOL(submit_bio_noacct); /** * direct_make_request - hand a buffer directly to its device driver for I/O * @bio: The bio describing the location in memory and on the device. * - * This function behaves like generic_make_request(), but does not protect + * This function behaves like submit_bio_noacct(), but does not protect * against recursion. Must only be used if the called driver is known * to be blk-mq based. */ @@ -1192,7 +1190,7 @@ blk_qc_t direct_make_request(struct bio *bio) bio_io_error(bio); return BLK_QC_T_NONE; } - if (!generic_make_request_checks(bio)) + if (!submit_bio_checks(bio)) return BLK_QC_T_NONE; if (unlikely(bio_queue_enter(bio))) return BLK_QC_T_NONE; @@ -1263,13 +1261,13 @@ blk_qc_t submit_bio(struct bio *bio) blk_qc_t ret; psi_memstall_enter(&pflags); - ret = generic_make_request(bio); + ret = submit_bio_noacct(bio); psi_memstall_leave(&pflags); return ret; } - return generic_make_request(bio); + return submit_bio_noacct(bio); } EXPORT_SYMBOL(submit_bio); diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index 6e49688a2d80..c162b754efbd 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -228,7 +228,7 @@ static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr) return false; } bio_chain(split_bio, bio); - generic_make_request(bio); + submit_bio_noacct(bio); *bio_ptr = split_bio; } diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 6533c9b36ab8..2d5e60023b08 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -239,7 +239,7 @@ void __blk_crypto_free_request(struct request *rq) * kernel crypto API. When the crypto API fallback is used for encryption, * blk-crypto may choose to split the bio into 2 - the first one that will * continue to be processed and the second one that will be resubmitted via - * generic_make_request. A bounce bio will be allocated to encrypt the contents + * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents * of the aforementioned "first one", and *bio_ptr will be updated to this * bounce bio. * diff --git a/block/blk-merge.c b/block/blk-merge.c index 20fa22906041..5196dc145270 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -338,7 +338,7 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) bio_chain(split, *bio); trace_block_split(q, split, (*bio)->bi_iter.bi_sector); - generic_make_request(*bio); + submit_bio_noacct(*bio); *bio = split; } } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index ad37043297ed..fee3325edf27 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1339,8 +1339,8 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work) if (!bio_list_empty(&bio_list_on_stack)) { blk_start_plug(&plug); - while((bio = bio_list_pop(&bio_list_on_stack))) - generic_make_request(bio); + while ((bio = bio_list_pop(&bio_list_on_stack))) + submit_bio_noacct(bio); blk_finish_plug(&plug); } } diff --git a/block/bounce.c b/block/bounce.c index c3aaed070124..431be88a0240 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -309,7 +309,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, if (!passthrough && sectors < bio_sectors(*bio_orig)) { bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split); bio_chain(bio, *bio_orig); - generic_make_request(*bio_orig); + submit_bio_noacct(*bio_orig); *bio_orig = bio; } bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL : diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 0327408da79c..fe6cb99eb917 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1576,12 +1576,12 @@ void drbd_set_my_capacity(struct drbd_device *device, sector_t size); /* * used to submit our private bio */ -static inline void drbd_generic_make_request(struct drbd_device *device, +static inline void drbd_submit_bio_noacct(struct drbd_device *device, int fault_type, struct bio *bio) { __release(local); if (!bio->bi_disk) { - drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n"); + drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n"); bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return; @@ -1590,7 +1590,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device, if (drbd_insert_fault(device, fault_type)) bio_io_error(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); } void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 2b05de0896e2..7c34cc0ad8cc 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2325,7 +2325,7 @@ static void do_retry(struct work_struct *ws) * workqueues instead. */ - /* We are not just doing generic_make_request(), + /* We are not just doing submit_bio_noacct(), * as we want to keep the start_time information. */ inc_ap_bio(device); __drbd_make_request(device, bio, start_jif); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 3a3f2b6a821f..c74f561b4eab 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1723,7 +1723,7 @@ next_bio: bios = bios->bi_next; bio->bi_next = NULL; - drbd_generic_make_request(device, fault_type, bio); + drbd_submit_bio_noacct(device, fault_type, bio); } while (bios); return 0; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index c7e14c9a6e5f..674be09b2da9 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1164,7 +1164,7 @@ drbd_submit_req_private_bio(struct drbd_request *req) else if (bio_op(bio) == REQ_OP_DISCARD) drbd_process_discard_or_zeroes_req(req, EE_TRIM); else - generic_make_request(bio); + submit_bio_noacct(bio); put_ldev(device); } else bio_io_error(bio); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 2b89c9f2ca70..7c903de5c4e1 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1525,7 +1525,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel) drbd_req_make_private_bio(req, req->master_bio); bio_set_dev(req->private_bio, device->ldev->backing_bdev); - generic_make_request(req->private_bio); + submit_bio_noacct(req->private_bio); return 0; } diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 5588bd4cd267..4becc1efe775 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -913,7 +913,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) } atomic_inc(&pd->cdrw.pending_bios); - generic_make_request(bio); + submit_bio_noacct(bio); } } diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 140927ebf41e..c28537a489bc 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -320,7 +320,7 @@ split_retry: split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL, &pblk_bio_set); bio_chain(split_bio, bio); - generic_make_request(bio); + submit_bio_noacct(bio); /* New bio contains first N sectors of the previous one, so * we can continue to use existing rqd, but we need to shrink diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 221e0191b687..3c708e8b5e2d 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -929,7 +929,7 @@ static inline void closure_bio_submit(struct cache_set *c, bio_endio(bio); return; } - generic_make_request(bio); + submit_bio_noacct(bio); } /* diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 6548a601edf0..d5c51e332046 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -959,7 +959,7 @@ err: * bch_btree_node_get - find a btree node in the cache and lock it, reading it * in from disk if necessary. * - * If IO is necessary and running under generic_make_request, returns -EAGAIN. + * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN. * * The btree node will have either a read or a write lock held, depending on * level and op->lock. diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index fc5702b10074..dd012ebface0 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1115,7 +1115,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) !blk_queue_discard(bdev_get_queue(dc->bdev))) bio->bi_end_io(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); } static void quit_max_writeback_rate(struct cache_set *c, @@ -1197,7 +1197,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio) if (!bio->bi_iter.bi_size) { /* * can't call bch_journal_meta from under - * generic_make_request + * submit_bio_noacct */ continue_at_nobarrier(&s->cl, cached_dev_nodata, @@ -1311,8 +1311,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio) if (!bio->bi_iter.bi_size) { /* - * can't call bch_journal_meta from under - * generic_make_request + * can't call bch_journal_meta from under submit_bio_noacct */ continue_at_nobarrier(&s->cl, flash_dev_nodata, diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index d3bb355819a4..9eccced92896 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -886,7 +886,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio) static void accounted_request(struct cache *cache, struct bio *bio) { accounted_begin(cache, bio); - generic_make_request(bio); + submit_bio_noacct(bio); } static void issue_op(struct bio *bio, void *context) @@ -1792,7 +1792,7 @@ static bool process_bio(struct cache *cache, struct bio *bio) bool commit_needed; if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED) - generic_make_request(bio); + submit_bio_noacct(bio); return commit_needed; } @@ -1858,7 +1858,7 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio) if (cache->features.discard_passdown) { remap_to_origin(cache, bio); - generic_make_request(bio); + submit_bio_noacct(bio); } else bio_endio(bio); diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 5ce96ddf1ce1..59ed8a67c2e3 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -330,7 +330,7 @@ static void submit_bios(struct bio_list *bios) blk_start_plug(&plug); while ((bio = bio_list_pop(bios))) - generic_make_request(bio); + submit_bio_noacct(bio); blk_finish_plug(&plug); } @@ -346,7 +346,7 @@ static void submit_bios(struct bio_list *bios) static void issue_bio(struct clone *clone, struct bio *bio) { if (!bio_triggers_commit(clone, bio)) { - generic_make_request(bio); + submit_bio_noacct(bio); return; } @@ -473,7 +473,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ bio_region_range(clone, bio, &rs, &nr_regions); trim_bio(bio, region_to_sector(clone, rs), nr_regions << clone->region_shift); - generic_make_request(bio); + submit_bio_noacct(bio); } else bio_endio(bio); } @@ -865,7 +865,7 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio bio->bi_private = hd; atomic_inc(&hd->clone->hydrations_in_flight); - generic_make_request(bio); + submit_bio_noacct(bio); } /* @@ -1281,7 +1281,7 @@ static void process_deferred_flush_bios(struct clone *clone) */ bio_endio(bio); } else { - generic_make_request(bio); + submit_bio_noacct(bio); } } } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 000ddfab5ba0..ad324abb8c49 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1789,7 +1789,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) return 1; } - generic_make_request(clone); + submit_bio_noacct(clone); return 0; } @@ -1815,7 +1815,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io) { struct bio *clone = io->ctx.bio_out; - generic_make_request(clone); + submit_bio_noacct(clone); } #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) @@ -1893,7 +1893,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) clone->bi_iter.bi_sector = cc->start + io->sector; if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { - generic_make_request(clone); + submit_bio_noacct(clone); return; } diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index f496213f8b67..2628a832787b 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio) while (bio) { n = bio->bi_next; bio->bi_next = NULL; - generic_make_request(bio); + submit_bio_noacct(bio); bio = n; } } diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index bdb84b8e7162..566ddbdb16a4 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1265,7 +1265,7 @@ static void process_deferred_bios(struct era *era) bio_io_error(bio); else while ((bio = bio_list_pop(&marked_bios))) - generic_make_request(bio); + submit_bio_noacct(bio); } static void process_rpc_calls(struct era *era) diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 81dc5ff08909..ae866e469e1b 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2115,12 +2115,12 @@ offload_to_thread: dio->in_flight = (atomic_t)ATOMIC_INIT(1); dio->completion = NULL; - generic_make_request(bio); + submit_bio_noacct(bio); return; } - generic_make_request(bio); + submit_bio_noacct(bio); if (need_sync_io) { wait_for_completion_io(&read_comp); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 78cff42d987e..73bb23de6336 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -677,7 +677,7 @@ static void process_queued_bios(struct work_struct *work) bio_endio(bio); break; case DM_MAPIO_REMAPPED: - generic_make_request(bio); + submit_bio_noacct(bio); break; case DM_MAPIO_SUBMITTED: break; diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 2f655d9f4200..fa09bc4e4c54 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -779,7 +779,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) wakeup_mirrord(ms); } else { map_bio(get_default_mirror(ms), bio); - generic_make_request(bio); + submit_bio_noacct(bio); } } } diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 963d3774c93e..2d1d4a4c399c 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -252,7 +252,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, /* * Issue the synchronous I/O from a different thread - * to avoid generic_make_request recursion. + * to avoid submit_bio_noacct recursion. */ INIT_WORK_ONSTACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 6b11a266299f..4668b2cd98f4 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1568,7 +1568,7 @@ static void flush_bios(struct bio *bio) while (bio) { n = bio->bi_next; bio->bi_next = NULL; - generic_make_request(bio); + submit_bio_noacct(bio); bio = n; } } @@ -1588,7 +1588,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) bio->bi_next = NULL; r = do_origin(s->origin, bio, false); if (r == DM_MAPIO_REMAPPED) - generic_make_request(bio); + submit_bio_noacct(bio); bio = n; } } @@ -1829,7 +1829,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, bio->bi_end_io = full_bio_end_io; bio->bi_private = callback_data; - generic_make_request(bio); + submit_bio_noacct(bio); } static struct dm_snap_pending_exception * diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fa8d5464c1fb..fe2de2888709 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -758,7 +758,7 @@ static void issue(struct thin_c *tc, struct bio *bio) struct pool *pool = tc->pool; if (!bio_triggers_commit(tc, bio)) { - generic_make_request(bio); + submit_bio_noacct(bio); return; } @@ -2394,7 +2394,7 @@ static void process_deferred_bios(struct pool *pool) if (bio->bi_opf & REQ_PREFLUSH) bio_endio(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); } } diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index eec9f252e935..75fa4d9b7617 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -681,7 +681,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) verity_submit_prefetch(v, io); - generic_make_request(bio); + submit_bio_noacct(bio); return DM_MAPIO_SUBMITTED; } diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 74f3c506f084..62421554b838 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1238,7 +1238,7 @@ static int writecache_flush_thread(void *data) bio_end_sector(bio)); wc_unlock(wc); bio_set_dev(bio, wc->dev->bdev); - generic_make_request(bio); + submit_bio_noacct(bio); } else { writecache_flush(wc); wc_unlock(wc); diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index a907a9446c0b..05a3cfefe937 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -140,7 +140,7 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, bio_advance(bio, clone->bi_iter.bi_size); refcount_inc(&bioctx->ref); - generic_make_request(clone); + submit_bio_noacct(clone); if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) zone->wp_block += nr_blocks; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5acfaba3700d..b32b539dbace 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1305,7 +1305,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) if (md->type == DM_TYPE_NVME_BIO_BASED) ret = direct_make_request(clone); else - ret = generic_make_request(clone); + ret = submit_bio_noacct(clone); break; case DM_MAPIO_KILL: free_tio(tio); @@ -1652,7 +1652,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, error = __split_and_process_non_flush(&ci); if (current->bio_list && ci.sector_count && !error) { /* - * Remainder must be passed to generic_make_request() + * Remainder must be passed to submit_bio_noacct() * so that it gets handled *after* bios already submitted * have been completely processed. * We take a clone of the original to store in @@ -1677,7 +1677,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, bio_chain(b, bio); trace_block_split(md->queue, b, bio->bi_iter.bi_sector); - ret = generic_make_request(bio); + ret = submit_bio_noacct(bio); break; } } @@ -1745,7 +1745,7 @@ static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struc bio_chain(split, *bio); trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); - generic_make_request(*bio); + submit_bio_noacct(*bio); *bio = split; } } @@ -2500,7 +2500,7 @@ static void dm_wq_work(struct work_struct *work) break; if (dm_request_based(md)) - (void) generic_make_request(c); + (void) submit_bio_noacct(c); else (void) dm_process_bio(md, map, c); } diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c index 50ad4ba86f0e..fda4cb3f936f 100644 --- a/drivers/md/md-faulty.c +++ b/drivers/md/md-faulty.c @@ -169,7 +169,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio) if (bio_data_dir(bio) == WRITE) { /* write request */ if (atomic_read(&conf->counters[WriteAll])) { - /* special case - don't decrement, don't generic_make_request, + /* special case - don't decrement, don't submit_bio_noacct, * just fail immediately */ bio_io_error(bio); @@ -214,7 +214,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio) } else bio_set_dev(bio, conf->rdev->bdev); - generic_make_request(bio); + submit_bio_noacct(bio); return true; } diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 26c75c0199fa..8efada3ee16f 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -267,7 +267,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) struct bio *split = bio_split(bio, end_sector - bio_sector, GFP_NOIO, &mddev->bio_set); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; } @@ -286,7 +286,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) bio_sector); mddev_check_writesame(mddev, bio); mddev_check_write_zeroes(mddev, bio); - generic_make_request(bio); + submit_bio_noacct(bio); } return true; diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 152f9e65a226..277fdfd9ee54 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -131,7 +131,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio) mp_bh->bio.bi_private = mp_bh; mddev_check_writesame(mddev, &mp_bh->bio); mddev_check_write_zeroes(mddev, &mp_bh->bio); - generic_make_request(&mp_bh->bio); + submit_bio_noacct(&mp_bh->bio); return true; } @@ -348,7 +348,7 @@ static void multipathd(struct md_thread *thread) bio->bi_opf |= REQ_FAILFAST_TRANSPORT; bio->bi_end_io = multipath_end_request; bio->bi_private = mp_bh; - generic_make_request(bio); + submit_bio_noacct(bio); } } spin_unlock_irqrestore(&conf->device_lock, flags); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 322386ff5d22..e9e91c8d8afc 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -495,7 +495,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, &mddev->bio_set); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; end = zone->zone_end; } else @@ -559,7 +559,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) trace_block_bio_remap(bdev_get_queue(rdev->bdev), discard_bio, disk_devt(mddev->gendisk), bio->bi_iter.bi_sector); - generic_make_request(discard_bio); + submit_bio_noacct(discard_bio); } bio_endio(bio); } @@ -600,7 +600,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) struct bio *split = bio_split(bio, sectors, GFP_NOIO, &mddev->bio_set); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; } @@ -633,7 +633,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) disk_devt(mddev->gendisk), bio_sector); mddev_check_writesame(mddev, bio); mddev_check_write_zeroes(mddev, bio); - generic_make_request(bio); + submit_bio_noacct(bio); return true; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index dcd27f3da84e..2aa2649cca66 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -834,7 +834,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) /* Just ignore it */ bio_endio(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); bio = next; cond_resched(); } @@ -1312,7 +1312,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; r1_bio->master_bio = bio; r1_bio->sectors = max_sectors; @@ -1338,7 +1338,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, disk_devt(mddev->gendisk), r1_bio->sector); - generic_make_request(read_bio); + submit_bio_noacct(read_bio); } static void raid1_write_request(struct mddev *mddev, struct bio *bio, @@ -1483,7 +1483,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, struct bio *split = bio_split(bio, max_sectors, GFP_NOIO, &conf->bio_split); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; r1_bio->master_bio = bio; r1_bio->sectors = max_sectors; @@ -2240,7 +2240,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) atomic_inc(&r1_bio->remaining); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); - generic_make_request(wbio); + submit_bio_noacct(wbio); } put_sync_write_buf(r1_bio, 1); @@ -2926,7 +2926,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; - generic_make_request(bio); + submit_bio_noacct(bio); } } } else { @@ -2935,7 +2935,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; - generic_make_request(bio); + submit_bio_noacct(bio); } return nr_sectors; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ec136e44aef7..e45fd56cf584 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -917,7 +917,7 @@ static void flush_pending_writes(struct r10conf *conf) /* Just ignore it */ bio_endio(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); bio = next; } blk_finish_plug(&plug); @@ -1102,7 +1102,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) /* Just ignore it */ bio_endio(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); bio = next; } kfree(plug); @@ -1194,7 +1194,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, gfp, &conf->bio_split); bio_chain(split, bio); allow_barrier(conf); - generic_make_request(bio); + submit_bio_noacct(bio); wait_barrier(conf); bio = split; r10_bio->master_bio = bio; @@ -1221,7 +1221,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, disk_devt(mddev->gendisk), r10_bio->sector); - generic_make_request(read_bio); + submit_bio_noacct(read_bio); return; } @@ -1479,7 +1479,7 @@ retry_write: GFP_NOIO, &conf->bio_split); bio_chain(split, bio); allow_barrier(conf); - generic_make_request(bio); + submit_bio_noacct(bio); wait_barrier(conf); bio = split; r10_bio->master_bio = bio; @@ -2099,7 +2099,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) tbio->bi_opf |= MD_FAILFAST; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); - generic_make_request(tbio); + submit_bio_noacct(tbio); } /* Now write out to any replacement devices @@ -2118,7 +2118,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) atomic_inc(&r10_bio->remaining); md_sync_acct(conf->mirrors[d].replacement->bdev, bio_sectors(tbio)); - generic_make_request(tbio); + submit_bio_noacct(tbio); } done: @@ -2241,7 +2241,7 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) wbio = r10_bio->devs[1].bio; wbio2 = r10_bio->devs[1].repl_bio; /* Need to test wbio2->bi_end_io before we call - * generic_make_request as if the former is NULL, + * submit_bio_noacct as if the former is NULL, * the latter is free to free wbio2. */ if (wbio2 && !wbio2->bi_end_io) @@ -2249,13 +2249,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) if (wbio->bi_end_io) { atomic_inc(&conf->mirrors[d].rdev->nr_pending); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); - generic_make_request(wbio); + submit_bio_noacct(wbio); } if (wbio2) { atomic_inc(&conf->mirrors[d].replacement->nr_pending); md_sync_acct(conf->mirrors[d].replacement->bdev, bio_sectors(wbio2)); - generic_make_request(wbio2); + submit_bio_noacct(wbio2); } } @@ -2889,7 +2889,7 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf) * a number of r10_bio structures, one for each out-of-sync device. * As we setup these structures, we collect all bio's together into a list * which we then process collectively to add pages, and then process again - * to pass to generic_make_request. + * to pass to submit_bio_noacct. * * The r10_bio structures are linked using a borrowed master_bio pointer. * This link is counted in ->remaining. When the r10_bio that points to NULL @@ -3496,7 +3496,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, if (bio->bi_end_io == end_sync_read) { md_sync_acct_bio(bio, nr_sectors); bio->bi_status = 0; - generic_make_request(bio); + submit_bio_noacct(bio); } } @@ -4654,7 +4654,7 @@ read_more: md_sync_acct_bio(read_bio, r10_bio->sectors); atomic_inc(&r10_bio->remaining); read_bio->bi_next = NULL; - generic_make_request(read_bio); + submit_bio_noacct(read_bio); sectors_done += nr_sectors; if (sector_nr <= last) goto read_more; @@ -4717,7 +4717,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) md_sync_acct_bio(b, r10_bio->sectors); atomic_inc(&r10_bio->remaining); b->bi_next = NULL; - generic_make_request(b); + submit_bio_noacct(b); } end_reshape_request(r10_bio); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ab8067f9ce8c..8dea4398b191 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -873,7 +873,7 @@ static void dispatch_bio_list(struct bio_list *tmp) struct bio *bio; while ((bio = bio_list_pop(tmp))) - generic_make_request(bio); + submit_bio_noacct(bio); } static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b) @@ -1151,7 +1151,7 @@ again: if (should_defer && op_is_write(op)) bio_list_add(&pending_bios, bi); else - generic_make_request(bi); + submit_bio_noacct(bi); } if (rrdev) { if (s->syncing || s->expanding || s->expanded @@ -1201,7 +1201,7 @@ again: if (should_defer && op_is_write(op)) bio_list_add(&pending_bios, rbi); else - generic_make_request(rbi); + submit_bio_noacct(rbi); } if (!rdev && !rrdev) { if (op_is_write(op)) @@ -5289,7 +5289,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) trace_block_bio_remap(align_bi->bi_disk->queue, align_bi, disk_devt(mddev->gendisk), raid_bio->bi_iter.bi_sector); - generic_make_request(align_bi); + submit_bio_noacct(align_bi); return 1; } else { rcu_read_unlock(); @@ -5309,7 +5309,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) struct r5conf *conf = mddev->private; split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); bio_chain(split, raid_bio); - generic_make_request(raid_bio); + submit_bio_noacct(raid_bio); raid_bio = split; } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 89afcf943bf8..f07fa47c251d 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -351,7 +351,7 @@ static void nvme_requeue_work(struct work_struct *work) * path. */ bio->bi_disk = head->disk; - generic_make_request(bio); + submit_bio_noacct(bio); } } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 083ffc5bc51b..b73cfa6a5141 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -852,7 +852,7 @@ static inline void rq_flush_dcache_pages(struct request *rq) extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); -extern blk_qc_t generic_make_request(struct bio *bio); +blk_qc_t submit_bio_noacct(struct bio *bio); extern blk_qc_t direct_make_request(struct bio *bio); extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); -- cgit v1.2.3 From 5a6c35f9af416114588298aa7a90b15bbed15a41 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Jul 2020 10:59:47 +0200 Subject: block: remove direct_make_request Now that submit_bio_noacct has a decent blk-mq fast path there is no more need for this bypass. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 28 ---------------------------- drivers/md/dm.c | 5 +---- drivers/nvme/host/multipath.c | 2 +- include/linux/blkdev.h | 1 - 4 files changed, 2 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/block/blk-core.c b/block/blk-core.c index 2ff166f0d24e..bf882b8d8445 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1211,34 +1211,6 @@ blk_qc_t submit_bio_noacct(struct bio *bio) } EXPORT_SYMBOL(submit_bio_noacct); -/** - * direct_make_request - hand a buffer directly to its device driver for I/O - * @bio: The bio describing the location in memory and on the device. - * - * This function behaves like submit_bio_noacct(), but does not protect - * against recursion. Must only be used if the called driver is known - * to be blk-mq based. - */ -blk_qc_t direct_make_request(struct bio *bio) -{ - struct gendisk *disk = bio->bi_disk; - - if (WARN_ON_ONCE(!disk->queue->mq_ops)) { - bio_io_error(bio); - return BLK_QC_T_NONE; - } - if (!submit_bio_checks(bio)) - return BLK_QC_T_NONE; - if (unlikely(bio_queue_enter(bio))) - return BLK_QC_T_NONE; - if (!blk_crypto_bio_prep(&bio)) { - blk_queue_exit(disk->queue); - return BLK_QC_T_NONE; - } - return blk_mq_submit_bio(bio); -} -EXPORT_SYMBOL_GPL(direct_make_request); - /** * submit_bio - submit a bio to the block device layer for I/O * @bio: The &struct bio which describes the I/O diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b32b539dbace..2cb33896198c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1302,10 +1302,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) /* the bio has been remapped so dispatch it */ trace_block_bio_remap(clone->bi_disk->queue, clone, bio_dev(io->orig_bio), sector); - if (md->type == DM_TYPE_NVME_BIO_BASED) - ret = direct_make_request(clone); - else - ret = submit_bio_noacct(clone); + ret = submit_bio_noacct(clone); break; case DM_MAPIO_KILL: free_tio(tio); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index f07fa47c251d..a986ac52c4cc 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -314,7 +314,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) trace_block_bio_remap(bio->bi_disk->queue, bio, disk_devt(ns->head->disk), bio->bi_iter.bi_sector); - ret = direct_make_request(bio); + ret = submit_bio_noacct(bio); } else if (nvme_available_path(head)) { dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b73cfa6a5141..1cc913ffdbe2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -853,7 +853,6 @@ static inline void rq_flush_dcache_pages(struct request *rq) extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); blk_qc_t submit_bio_noacct(struct bio *bio); -extern blk_qc_t direct_make_request(struct bio *bio); extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern struct request *blk_get_request(struct request_queue *, unsigned int op, -- cgit v1.2.3 From 6b7b181b67aa8177e57732723106a0411570a86d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 26 Jun 2020 10:01:55 +0200 Subject: block: remove the bd_block_size field from struct block_device We can trivially calculate the block size from the inodes i_blkbits variable. Use that instead of keeping two redundant copies of the information in slightly different formats. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/block_dev.c | 9 ++------- include/linux/blk_types.h | 1 - include/linux/blkdev.h | 2 +- 3 files changed, 3 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/fs/block_dev.c b/fs/block_dev.c index 8b7a9c76d33e..06d31e459048 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -105,10 +105,7 @@ EXPORT_SYMBOL(invalidate_bdev); static void set_init_blocksize(struct block_device *bdev) { - unsigned bsize = bdev_logical_block_size(bdev); - - bdev->bd_block_size = bsize; - bdev->bd_inode->i_blkbits = blksize_bits(bsize); + bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev)); } int set_blocksize(struct block_device *bdev, int size) @@ -122,9 +119,8 @@ int set_blocksize(struct block_device *bdev, int size) return -EINVAL; /* Don't change the size if it is same as current */ - if (bdev->bd_block_size != size) { + if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { sync_blockdev(bdev); - bdev->bd_block_size = size; bdev->bd_inode->i_blkbits = blksize_bits(size); kill_bdev(bdev); } @@ -889,7 +885,6 @@ struct block_device *bdget(dev_t dev) bdev->bd_contains = NULL; bdev->bd_super = NULL; bdev->bd_inode = inode; - bdev->bd_block_size = i_blocksize(inode); bdev->bd_part_count = 0; bdev->bd_invalidated = 0; inode->i_mode = S_IFBLK; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index a602132cbe32..b01cd19bbe8a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -33,7 +33,6 @@ struct block_device { struct list_head bd_holder_disks; #endif struct block_device * bd_contains; - unsigned bd_block_size; u8 bd_partno; struct hd_struct * bd_part; /* number of times partitions within this device have been opened. */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1cc913ffdbe2..408eb66a82fd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1543,7 +1543,7 @@ static inline unsigned int blksize_bits(unsigned int size) static inline unsigned int block_size(struct block_device *bdev) { - return bdev->bd_block_size; + return 1 << bdev->bd_inode->i_blkbits; } int kblockd_schedule_work(struct work_struct *work); -- cgit v1.2.3 From e556f6ba10f0f3c3484a1597382ceaec1e7bc700 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 26 Jun 2020 10:01:56 +0200 Subject: block: remove the bd_queue field from struct block_device Just use bd_disk->queue instead. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/dax/super.c | 2 +- drivers/md/md.c | 2 +- drivers/nvme/target/core.c | 2 +- fs/block_dev.c | 11 ++++------- fs/direct-io.c | 4 ++-- fs/xfs/xfs_pwork.c | 2 +- include/linux/blk_types.h | 1 - mm/swapfile.c | 2 +- 8 files changed, 11 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 8e32345be0f7..f50828526331 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -59,7 +59,7 @@ EXPORT_SYMBOL(bdev_dax_pgoff); #if IS_ENABLED(CONFIG_FS_DAX) struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) { - if (!blk_queue_dax(bdev->bd_queue)) + if (!blk_queue_dax(bdev->bd_disk->queue)) return NULL; return dax_get_by_host(bdev->bd_disk->disk_name); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 7b7cb49be351..8bb69c61afe0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -199,7 +199,7 @@ static int rdevs_init_serial(struct mddev *mddev) static int rdev_need_serial(struct md_rdev *rdev) { return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && - rdev->bdev->bd_queue->nr_hw_queues != 1 && + rdev->bdev->bd_disk->queue->nr_hw_queues != 1 && test_bit(WriteMostly, &rdev->flags)); } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 6e2f623e472e..6816507fba58 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -467,7 +467,7 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) return -EINVAL; } - if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) { + if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) { pr_err("peer-to-peer DMA is not supported by the driver of %s\n", ns->device_path); return -EINVAL; diff --git a/fs/block_dev.c b/fs/block_dev.c index 06d31e459048..68cb08bc1b7a 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -693,12 +693,12 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, if (!ops->rw_page || bdev_get_integrity(bdev)) return result; - result = blk_queue_enter(bdev->bd_queue, 0); + result = blk_queue_enter(bdev->bd_disk->queue, 0); if (result) return result; result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, REQ_OP_READ); - blk_queue_exit(bdev->bd_queue); + blk_queue_exit(bdev->bd_disk->queue); return result; } @@ -729,7 +729,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, if (!ops->rw_page || bdev_get_integrity(bdev)) return -EOPNOTSUPP; - result = blk_queue_enter(bdev->bd_queue, 0); + result = blk_queue_enter(bdev->bd_disk->queue, 0); if (result) return result; @@ -742,7 +742,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, clean_page_buffers(page); unlock_page(page); } - blk_queue_exit(bdev->bd_queue); + blk_queue_exit(bdev->bd_disk->queue); return result; } @@ -1568,7 +1568,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) if (!bdev->bd_openers) { first_open = true; bdev->bd_disk = disk; - bdev->bd_queue = disk->queue; bdev->bd_contains = bdev; bdev->bd_partno = partno; @@ -1589,7 +1588,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) disk_put_part(bdev->bd_part); bdev->bd_part = NULL; bdev->bd_disk = NULL; - bdev->bd_queue = NULL; mutex_unlock(&bdev->bd_mutex); disk_unblock_events(disk); put_disk_and_module(disk); @@ -1666,7 +1664,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) disk_put_part(bdev->bd_part); bdev->bd_disk = NULL; bdev->bd_part = NULL; - bdev->bd_queue = NULL; if (bdev != bdev->bd_contains) __blkdev_put(bdev->bd_contains, mode, 1); bdev->bd_contains = NULL; diff --git a/fs/direct-io.c b/fs/direct-io.c index 6d5370eac2a8..183299892465 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1387,8 +1387,8 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, * Attempt to prefetch the pieces we likely need later. */ prefetch(&bdev->bd_disk->part_tbl); - prefetch(bdev->bd_queue); - prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); + prefetch(bdev->bd_disk->queue); + prefetch((char *)bdev->bd_disk->queue + SMP_CACHE_BYTES); return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block, end_io, submit_io, flags); diff --git a/fs/xfs/xfs_pwork.c b/fs/xfs/xfs_pwork.c index 4bcc3e61056c..b03333f1c84a 100644 --- a/fs/xfs/xfs_pwork.c +++ b/fs/xfs/xfs_pwork.c @@ -132,5 +132,5 @@ xfs_pwork_guess_datadev_parallelism( * For now we'll go with the most conservative setting possible, * which is two threads for an SSD and 1 thread everywhere else. */ - return blk_queue_nonrot(btp->bt_bdev->bd_queue) ? 2 : 1; + return blk_queue_nonrot(btp->bt_bdev->bd_disk->queue) ? 2 : 1; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index b01cd19bbe8a..667cd365fd04 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -39,7 +39,6 @@ struct block_device { unsigned bd_part_count; int bd_invalidated; struct gendisk * bd_disk; - struct request_queue * bd_queue; struct backing_dev_info *bd_bdi; struct list_head bd_list; /* diff --git a/mm/swapfile.c b/mm/swapfile.c index 987276c557d1..6c26916e95fd 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2929,7 +2929,7 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) * write only restriction. Hence zoned block devices are not * suitable for swapping. Disallow them here. */ - if (blk_queue_is_zoned(p->bdev->bd_queue)) + if (blk_queue_is_zoned(p->bdev->bd_disk->queue)) return -EINVAL; p->flags |= SWP_BLKDEV; } else if (S_ISREG(inode->i_mode)) { -- cgit v1.2.3 From 47b5e00322a3033851ab304f3c3873aebdfb4979 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 26 Jun 2020 10:01:57 +0200 Subject: block: remove the unused bd_private field from struct block_device Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/blk_types.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'include') diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 667cd365fd04..b5f7105806e4 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -41,13 +41,6 @@ struct block_device { struct gendisk * bd_disk; struct backing_dev_info *bd_bdi; struct list_head bd_list; - /* - * Private data. You must have bd_claim'ed the block_device - * to use this. NOTE: bd_claim allows an owner to claim - * the same device multiple times, the owner must take special - * care to not mess up bd_private for that case. - */ - unsigned long bd_private; /* The counter of freeze processes */ int bd_fsfreeze_count; -- cgit v1.2.3 From 1008fe6dc36dd87dfd02d4307f49162f0b4f1665 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 26 Jun 2020 10:01:58 +0200 Subject: block: remove the all_bdevs list Instead just iterate over the inodes for the block device superblock. Reviewed-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/block_dev.c | 22 +++++++--------------- include/linux/blk_types.h | 1 - 2 files changed, 7 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/fs/block_dev.c b/fs/block_dev.c index 68cb08bc1b7a..2d2fcb50e78e 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -773,7 +773,6 @@ static void init_once(void *foo) memset(bdev, 0, sizeof(*bdev)); mutex_init(&bdev->bd_mutex); - INIT_LIST_HEAD(&bdev->bd_list); #ifdef CONFIG_SYSFS INIT_LIST_HEAD(&bdev->bd_holder_disks); #endif @@ -789,9 +788,6 @@ static void bdev_evict_inode(struct inode *inode) truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); /* is it needed here? */ clear_inode(inode); - spin_lock(&bdev_lock); - list_del_init(&bdev->bd_list); - spin_unlock(&bdev_lock); /* Detach inode from wb early as bdi_put() may free bdi->wb */ inode_detach_wb(inode); if (bdev->bd_bdi != &noop_backing_dev_info) { @@ -866,8 +862,6 @@ static int bdev_set(struct inode *inode, void *data) return 0; } -static LIST_HEAD(all_bdevs); - struct block_device *bdget(dev_t dev) { struct block_device *bdev; @@ -892,9 +886,6 @@ struct block_device *bdget(dev_t dev) inode->i_bdev = bdev; inode->i_data.a_ops = &def_blk_aops; mapping_set_gfp_mask(&inode->i_data, GFP_USER); - spin_lock(&bdev_lock); - list_add(&bdev->bd_list, &all_bdevs); - spin_unlock(&bdev_lock); unlock_new_inode(inode); } return bdev; @@ -915,13 +906,14 @@ EXPORT_SYMBOL(bdgrab); long nr_blockdev_pages(void) { - struct block_device *bdev; + struct inode *inode; long ret = 0; - spin_lock(&bdev_lock); - list_for_each_entry(bdev, &all_bdevs, bd_list) { - ret += bdev->bd_inode->i_mapping->nrpages; - } - spin_unlock(&bdev_lock); + + spin_lock(&blockdev_superblock->s_inode_list_lock); + list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) + ret += inode->i_mapping->nrpages; + spin_unlock(&blockdev_superblock->s_inode_list_lock); + return ret; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index b5f7105806e4..07facaf62b72 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -40,7 +40,6 @@ struct block_device { int bd_invalidated; struct gendisk * bd_disk; struct backing_dev_info *bd_bdi; - struct list_head bd_list; /* The counter of freeze processes */ int bd_fsfreeze_count; -- cgit v1.2.3 From d141b8bc5773cbbaf5b8530f08f94fc10fff9e8c Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 29 Jun 2020 23:28:43 -0700 Subject: perf: Expose get/put_callchain_entry() Sanitize and expose get/put_callchain_entry(). This would be used by bpf stack map. Suggested-by: Peter Zijlstra Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200630062846.664389-2-songliubraving@fb.com --- include/linux/perf_event.h | 2 ++ kernel/events/callchain.c | 13 ++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b4bb32082342..00ab5efa3833 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1244,6 +1244,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); +extern struct perf_callchain_entry *get_callchain_entry(int *rctx); +extern void put_callchain_entry(int rctx); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 334d48b16c36..c6ce894e4ce9 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -149,7 +149,7 @@ void put_callchain_buffers(void) } } -static struct perf_callchain_entry *get_callchain_entry(int *rctx) +struct perf_callchain_entry *get_callchain_entry(int *rctx) { int cpu; struct callchain_cpus_entries *entries; @@ -159,8 +159,10 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx) return NULL; entries = rcu_dereference(callchain_cpus_entries); - if (!entries) + if (!entries) { + put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx); return NULL; + } cpu = smp_processor_id(); @@ -168,7 +170,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx) (*rctx * perf_callchain_entry__sizeof())); } -static void +void put_callchain_entry(int rctx) { put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); @@ -183,11 +185,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, int rctx; entry = get_callchain_entry(&rctx); - if (rctx == -1) - return NULL; - if (!entry) - goto exit_put; + return NULL; ctx.entry = entry; ctx.max_stack = max_stack; -- cgit v1.2.3 From fa28dcb82a38f8e3993b0fae9106b1a80b59e4f0 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 29 Jun 2020 23:28:44 -0700 Subject: bpf: Introduce helper bpf_get_task_stack() Introduce helper bpf_get_task_stack(), which dumps stack trace of given task. This is different to bpf_get_stack(), which gets stack track of current task. One potential use case of bpf_get_task_stack() is to call it from bpf_iter__task and dump all /proc//stack to a seq_file. bpf_get_task_stack() uses stack_trace_save_tsk() instead of get_perf_callchain() for kernel stack. The benefit of this choice is that stack_trace_save_tsk() doesn't require changes in arch/. The downside of using stack_trace_save_tsk() is that stack_trace_save_tsk() dumps the stack trace to unsigned long array. For 32-bit systems, we need to translate it to u64 array. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200630062846.664389-3-songliubraving@fb.com --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 37 +++++++++++++++++++- kernel/bpf/stackmap.c | 77 +++++++++++++++++++++++++++++++++++++++--- kernel/bpf/verifier.c | 4 ++- kernel/trace/bpf_trace.c | 2 ++ scripts/bpf_helpers_doc.py | 2 ++ tools/include/uapi/linux/bpf.h | 37 +++++++++++++++++++- 7 files changed, 153 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3d2ade703a35..0cd7f6884c5c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1627,6 +1627,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; extern const struct bpf_func_proto bpf_get_stack_proto; +extern const struct bpf_func_proto bpf_get_task_stack_proto; extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0cb8ec948816..da9bf35a26f8 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3285,6 +3285,39 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) + * Description + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *task*, which is a valid + * pointer to struct task_struct. To store the stacktrace, the + * bpf program provides *buf* with a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_task_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * Return + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + * */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3427,7 +3460,9 @@ union bpf_attr { FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ FN(skc_to_tcp_request_sock), \ - FN(skc_to_udp6_sock), + FN(skc_to_udp6_sock), \ + FN(get_task_stack), \ + /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 071f98d0f7c6..5ad72ab2276b 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -348,6 +348,40 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } } +static struct perf_callchain_entry * +get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) +{ + struct perf_callchain_entry *entry; + int rctx; + + entry = get_callchain_entry(&rctx); + + if (!entry) + return NULL; + + entry->nr = init_nr + + stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr), + sysctl_perf_event_max_stack - init_nr, 0); + + /* stack_trace_save_tsk() works on unsigned long array, while + * perf_callchain_entry uses u64 array. For 32-bit systems, it is + * necessary to fix this mismatch. + */ + if (__BITS_PER_LONG != 64) { + unsigned long *from = (unsigned long *) entry->ip; + u64 *to = entry->ip; + int i; + + /* copy data from the end to avoid using extra buffer */ + for (i = entry->nr - 1; i >= (int)init_nr; i--) + to[i] = (u64)(from[i]); + } + + put_callchain_entry(rctx); + + return entry; +} + BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u64, flags) { @@ -448,8 +482,8 @@ const struct bpf_func_proto bpf_get_stackid_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, - u64, flags) +static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, + void *buf, u32 size, u64 flags) { u32 init_nr, trace_nr, copy_len, elem_size, num_elem; bool user_build_id = flags & BPF_F_USER_BUILD_ID; @@ -471,13 +505,22 @@ BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, if (unlikely(size % elem_size)) goto clear; + /* cannot get valid user stack for task without user_mode regs */ + if (task && user && !user_mode(regs)) + goto err_fault; + num_elem = size / elem_size; if (sysctl_perf_event_max_stack < num_elem) init_nr = 0; else init_nr = sysctl_perf_event_max_stack - num_elem; - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, false, false); + + if (kernel && task) + trace = get_callchain_entry_for_task(task, init_nr); + else + trace = get_perf_callchain(regs, init_nr, kernel, user, + sysctl_perf_event_max_stack, + false, false); if (unlikely(!trace)) goto err_fault; @@ -505,6 +548,12 @@ clear: return err; } +BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, + u64, flags) +{ + return __bpf_get_stack(regs, NULL, buf, size, flags); +} + const struct bpf_func_proto bpf_get_stack_proto = { .func = bpf_get_stack, .gpl_only = true, @@ -515,6 +564,26 @@ const struct bpf_func_proto bpf_get_stack_proto = { .arg4_type = ARG_ANYTHING, }; +BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, + u32, size, u64, flags) +{ + struct pt_regs *regs = task_pt_regs(task); + + return __bpf_get_stack(regs, task, buf, size, flags); +} + +static int bpf_get_task_stack_btf_ids[5]; +const struct bpf_func_proto bpf_get_task_stack_proto = { + .func = bpf_get_task_stack, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, + .btf_id = bpf_get_task_stack_btf_ids, +}; + /* Called from eBPF program */ static void *stack_map_lookup_elem(struct bpf_map *map, void *key) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7de98906ddf4..b608185e1ffd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4864,7 +4864,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn if (err) return err; - if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { + if ((func_id == BPF_FUNC_get_stack || + func_id == BPF_FUNC_get_task_stack) && + !env->prog->has_callchain_buf) { const char *err_str; #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 5d59dda5f661..977ba3b6f6c6 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1137,6 +1137,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_ringbuf_query_proto; case BPF_FUNC_jiffies64: return &bpf_jiffies64_proto; + case BPF_FUNC_get_task_stack: + return &bpf_get_task_stack_proto; default: return NULL; } diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 6bab40ff442e..6843376733df 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -426,6 +426,7 @@ class PrinterHelpers(Printer): 'struct tcp_timewait_sock', 'struct tcp_request_sock', 'struct udp6_sock', + 'struct task_struct', 'struct __sk_buff', 'struct sk_msg_md', @@ -468,6 +469,7 @@ class PrinterHelpers(Printer): 'struct tcp_timewait_sock', 'struct tcp_request_sock', 'struct udp6_sock', + 'struct task_struct', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0cb8ec948816..da9bf35a26f8 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3285,6 +3285,39 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) + * Description + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *task*, which is a valid + * pointer to struct task_struct. To store the stacktrace, the + * bpf program provides *buf* with a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_task_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * Return + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + * */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3427,7 +3460,9 @@ union bpf_attr { FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ FN(skc_to_tcp_request_sock), \ - FN(skc_to_udp6_sock), + FN(skc_to_udp6_sock), \ + FN(get_task_stack), \ + /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call -- cgit v1.2.3 From 1fc2dd1864c2b18860fb619caeee758504c3aac8 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 1 Jul 2020 16:53:40 +0100 Subject: firmware: arm_scmi: Add notification protocol-registration Add the core SCMI notifications protocol-registration support: allow protocols to register their own set of supported events, during their initialization phase. Notification core can track multiple platform instances by their handles. Link: https://lore.kernel.org/r/20200701155348.52864-2-cristian.marussi@arm.com Reviewed-by: Jonathan Cameron Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 2 +- drivers/firmware/arm_scmi/common.h | 4 + drivers/firmware/arm_scmi/notify.c | 440 +++++++++++++++++++++++++++++++++++++ drivers/firmware/arm_scmi/notify.h | 56 +++++ include/linux/scmi_protocol.h | 3 + 5 files changed, 504 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/notify.c create mode 100644 drivers/firmware/arm_scmi/notify.h (limited to 'include') diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 70c5a8c986a5..6f9cbc4aef22 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o scmi-bus-y = bus.o -scmi-driver-y = driver.o +scmi-driver-y = driver.o notify.o scmi-transport-y = shmem.o scmi-transport-$(CONFIG_MAILBOX) += mailbox.o scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 31fe5a22a011..c113e578cc6c 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -6,6 +6,8 @@ * * Copyright (C) 2018 ARM Ltd. */ +#ifndef _SCMI_COMMON_H +#define _SCMI_COMMON_H #include #include @@ -235,3 +237,5 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); + +#endif /* _SCMI_COMMON_H */ diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c new file mode 100644 index 000000000000..0505433043d8 --- /dev/null +++ b/drivers/firmware/arm_scmi/notify.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Notification support + * + * Copyright (C) 2020 ARM Ltd. + */ +/** + * DOC: Theory of operation + * + * SCMI Protocol specification allows the platform to signal events to + * interested agents via notification messages: this is an implementation + * of the dispatch and delivery of such notifications to the interested users + * inside the Linux kernel. + * + * An SCMI Notification core instance is initialized for each active platform + * instance identified by the means of the usual &struct scmi_handle. + * + * Each SCMI Protocol implementation, during its initialization, registers with + * this core its set of supported events using scmi_register_protocol_events(): + * all the needed descriptors are stored in the &struct registered_protocols and + * &struct registered_events arrays. + */ + +#define dev_fmt(fmt) "SCMI Notifications - " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "notify.h" + +#define SCMI_MAX_PROTO 256 + +#define PROTO_ID_MASK GENMASK(31, 24) +#define EVT_ID_MASK GENMASK(23, 16) +#define SRC_ID_MASK GENMASK(15, 0) + +/* + * Builds an unsigned 32bit key from the given input tuple to be used + * as a key in hashtables. + */ +#define MAKE_HASH_KEY(p, e, s) \ + (FIELD_PREP(PROTO_ID_MASK, (p)) | \ + FIELD_PREP(EVT_ID_MASK, (e)) | \ + FIELD_PREP(SRC_ID_MASK, (s))) + +#define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK) + +struct scmi_registered_events_desc; + +/** + * struct scmi_notify_instance - Represents an instance of the notification + * core + * @gid: GroupID used for devres + * @handle: A reference to the platform instance + * @registered_protocols: A statically allocated array containing pointers to + * all the registered protocol-level specific information + * related to events' handling + * + * Each platform instance, represented by a handle, has its own instance of + * the notification subsystem represented by this structure. + */ +struct scmi_notify_instance { + void *gid; + struct scmi_handle *handle; + struct scmi_registered_events_desc **registered_protocols; +}; + +/** + * struct events_queue - Describes a queue and its associated worker + * @sz: Size in bytes of the related kfifo + * @kfifo: A dedicated Kernel kfifo descriptor + * + * Each protocol has its own dedicated events_queue descriptor. + */ +struct events_queue { + size_t sz; + struct kfifo kfifo; +}; + +/** + * struct scmi_event_header - A utility header + * @timestamp: The timestamp, in nanoseconds (boottime), which was associated + * to this event as soon as it entered the SCMI RX ISR + * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol) + * @payld_sz: Effective size of the embedded message payload which follows + * @payld: A reference to the embedded event payload + * + * This header is prepended to each received event message payload before + * queueing it on the related &struct events_queue. + */ +struct scmi_event_header { + u64 timestamp; + u8 evt_id; + size_t payld_sz; + u8 payld[]; +} __packed; + +struct scmi_registered_event; + +/** + * struct scmi_registered_events_desc - Protocol Specific information + * @id: Protocol ID + * @ops: Protocol specific and event-related operations + * @equeue: The embedded per-protocol events_queue + * @ni: A reference to the initialized instance descriptor + * @eh: A reference to pre-allocated buffer to be used as a scratch area by the + * deferred worker when fetching data from the kfifo + * @eh_sz: Size of the pre-allocated buffer @eh + * @in_flight: A reference to an in flight &struct scmi_registered_event + * @num_events: Number of events in @registered_events + * @registered_events: A dynamically allocated array holding all the registered + * events' descriptors, whose fixed-size is determined at + * compile time. + * + * All protocols that register at least one event have their protocol-specific + * information stored here, together with the embedded allocated events_queue. + * These descriptors are stored in the @registered_protocols array at protocol + * registration time. + * + * Once these descriptors are successfully registered, they are NEVER again + * removed or modified since protocols do not unregister ever, so that, once + * we safely grab a NON-NULL reference from the array we can keep it and use it. + */ +struct scmi_registered_events_desc { + u8 id; + const struct scmi_event_ops *ops; + struct events_queue equeue; + struct scmi_notify_instance *ni; + struct scmi_event_header *eh; + size_t eh_sz; + void *in_flight; + int num_events; + struct scmi_registered_event **registered_events; +}; + +/** + * struct scmi_registered_event - Event Specific Information + * @proto: A reference to the associated protocol descriptor + * @evt: A reference to the associated event descriptor (as provided at + * registration time) + * @report: A pre-allocated buffer used by the deferred worker to fill a + * customized event report + * @num_sources: The number of possible sources for this event as stated at + * events' registration time + * @sources: A reference to a dynamically allocated array used to refcount the + * events' enable requests for all the existing sources + * @sources_mtx: A mutex to serialize the access to @sources + * + * All registered events are represented by one of these structures that are + * stored in the @registered_events array at protocol registration time. + * + * Once these descriptors are successfully registered, they are NEVER again + * removed or modified since protocols do not unregister ever, so that once we + * safely grab a NON-NULL reference from the table we can keep it and use it. + */ +struct scmi_registered_event { + struct scmi_registered_events_desc *proto; + const struct scmi_event *evt; + void *report; + u32 num_sources; + refcount_t *sources; + /* locking to serialize the access to sources */ + struct mutex sources_mtx; +}; + +/** + * scmi_kfifo_free() - Devres action helper to free the kfifo + * @kfifo: The kfifo to free + */ +static void scmi_kfifo_free(void *kfifo) +{ + kfifo_free((struct kfifo *)kfifo); +} + +/** + * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer + * @ni: A reference to the notification instance to use + * @equeue: The events_queue to initialize + * @sz: Size of the kfifo buffer to allocate + * + * Allocate a buffer for the kfifo and initialize it. + * + * Return: 0 on Success + */ +static int scmi_initialize_events_queue(struct scmi_notify_instance *ni, + struct events_queue *equeue, size_t sz) +{ + if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL)) + return -ENOMEM; + /* Size could have been roundup to power-of-two */ + equeue->sz = kfifo_size(&equeue->kfifo); + + return devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free, + &equeue->kfifo); +} + +/** + * scmi_allocate_registered_events_desc() - Allocate a registered events' + * descriptor + * @ni: A reference to the &struct scmi_notify_instance notification instance + * to use + * @proto_id: Protocol ID + * @queue_sz: Size of the associated queue to allocate + * @eh_sz: Size of the event header scratch area to pre-allocate + * @num_events: Number of events to support (size of @registered_events) + * @ops: Pointer to a struct holding references to protocol specific helpers + * needed during events handling + * + * It is supposed to be called only once for each protocol at protocol + * initialization time, so it warns if the requested protocol is found already + * registered. + * + * Return: The allocated and registered descriptor on Success + */ +static struct scmi_registered_events_desc * +scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni, + u8 proto_id, size_t queue_sz, size_t eh_sz, + int num_events, + const struct scmi_event_ops *ops) +{ + int ret; + struct scmi_registered_events_desc *pd; + + /* Ensure protocols are up to date */ + smp_rmb(); + if (WARN_ON(ni->registered_protocols[proto_id])) + return ERR_PTR(-EINVAL); + + pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + pd->id = proto_id; + pd->ops = ops; + pd->ni = ni; + + ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz); + if (ret) + return ERR_PTR(ret); + + pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL); + if (!pd->eh) + return ERR_PTR(-ENOMEM); + pd->eh_sz = eh_sz; + + pd->registered_events = devm_kcalloc(ni->handle->dev, num_events, + sizeof(char *), GFP_KERNEL); + if (!pd->registered_events) + return ERR_PTR(-ENOMEM); + pd->num_events = num_events; + + return pd; +} + +/** + * scmi_register_protocol_events() - Register Protocol Events with the core + * @handle: The handle identifying the platform instance against which the + * the protocol's events are registered + * @proto_id: Protocol ID + * @queue_sz: Size in bytes of the associated queue to be allocated + * @ops: Protocol specific event-related operations + * @evt: Event descriptor array + * @num_events: Number of events in @evt array + * @num_sources: Number of possible sources for this protocol on this + * platform. + * + * Used by SCMI Protocols initialization code to register with the notification + * core the list of supported events and their descriptors: takes care to + * pre-allocate and store all needed descriptors, scratch buffers and event + * queues. + * + * Return: 0 on Success + */ +int scmi_register_protocol_events(const struct scmi_handle *handle, + u8 proto_id, size_t queue_sz, + const struct scmi_event_ops *ops, + const struct scmi_event *evt, int num_events, + int num_sources) +{ + int i; + size_t payld_sz = 0; + struct scmi_registered_events_desc *pd; + struct scmi_notify_instance *ni; + + if (!ops || !evt) + return -EINVAL; + + /* Ensure notify_priv is updated */ + smp_rmb(); + if (!handle->notify_priv) + return -ENOMEM; + ni = handle->notify_priv; + + /* Attach to the notification main devres group */ + if (!devres_open_group(ni->handle->dev, ni->gid, GFP_KERNEL)) + return -ENOMEM; + + for (i = 0; i < num_events; i++) + payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz); + payld_sz += sizeof(struct scmi_event_header); + + pd = scmi_allocate_registered_events_desc(ni, proto_id, queue_sz, + payld_sz, num_events, ops); + if (IS_ERR(pd)) + goto err; + + for (i = 0; i < num_events; i++, evt++) { + struct scmi_registered_event *r_evt; + + r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt), + GFP_KERNEL); + if (!r_evt) + goto err; + r_evt->proto = pd; + r_evt->evt = evt; + + r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources, + sizeof(refcount_t), GFP_KERNEL); + if (!r_evt->sources) + goto err; + r_evt->num_sources = num_sources; + mutex_init(&r_evt->sources_mtx); + + r_evt->report = devm_kzalloc(ni->handle->dev, + evt->max_report_sz, GFP_KERNEL); + if (!r_evt->report) + goto err; + + pd->registered_events[i] = r_evt; + /* Ensure events are updated */ + smp_wmb(); + dev_dbg(handle->dev, "registered event - %lX\n", + MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id)); + } + + /* Register protocol and events...it will never be removed */ + ni->registered_protocols[proto_id] = pd; + /* Ensure protocols are updated */ + smp_wmb(); + + devres_close_group(ni->handle->dev, ni->gid); + + return 0; + +err: + dev_warn(handle->dev, "Proto:%X - Registration Failed !\n", proto_id); + /* A failing protocol registration does not trigger full failure */ + devres_close_group(ni->handle->dev, ni->gid); + + return -ENOMEM; +} + +/** + * scmi_notification_init() - Initializes Notification Core Support + * @handle: The handle identifying the platform instance to initialize + * + * This function lays out all the basic resources needed by the notification + * core instance identified by the provided handle: once done, all of the + * SCMI Protocols can register their events with the core during their own + * initializations. + * + * Note that failing to initialize the core notifications support does not + * cause the whole SCMI Protocols stack to fail its initialization. + * + * SCMI Notification Initialization happens in 2 steps: + * * initialization: basic common allocations (this function) + * * registration: protocols asynchronously come into life and registers their + * own supported list of events with the core; this causes + * further per-protocol allocations + * + * Any user's callback registration attempt, referring a still not registered + * event, will be registered as pending and finalized later (if possible) + * by scmi_protocols_late_init() work. + * This allows for lazy initialization of SCMI Protocols due to late (or + * missing) SCMI drivers' modules loading. + * + * Return: 0 on Success + */ +int scmi_notification_init(struct scmi_handle *handle) +{ + void *gid; + struct scmi_notify_instance *ni; + + gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); + if (!gid) + return -ENOMEM; + + ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL); + if (!ni) + goto err; + + ni->gid = gid; + ni->handle = handle; + + ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO, + sizeof(char *), GFP_KERNEL); + if (!ni->registered_protocols) + goto err; + + handle->notify_priv = ni; + /* Ensure handle is up to date */ + smp_wmb(); + + dev_info(handle->dev, "Core Enabled.\n"); + + devres_close_group(handle->dev, ni->gid); + + return 0; + +err: + dev_warn(handle->dev, "Initialization Failed.\n"); + devres_release_group(handle->dev, NULL); + return -ENOMEM; +} + +/** + * scmi_notification_exit() - Shutdown and clean Notification core + * @handle: The handle identifying the platform instance to shutdown + */ +void scmi_notification_exit(struct scmi_handle *handle) +{ + struct scmi_notify_instance *ni; + + /* Ensure notify_priv is updated */ + smp_rmb(); + if (!handle->notify_priv) + return; + ni = handle->notify_priv; + + devres_release_group(ni->handle->dev, ni->gid); +} diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h new file mode 100644 index 000000000000..48702e42995f --- /dev/null +++ b/drivers/firmware/arm_scmi/notify.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * System Control and Management Interface (SCMI) Message Protocol + * notification header file containing some definitions, structures + * and function prototypes related to SCMI Notification handling. + * + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef _SCMI_NOTIFY_H +#define _SCMI_NOTIFY_H + +#include +#include + +#define SCMI_PROTO_QUEUE_SZ 4096 + +/** + * struct scmi_event - Describes an event to be supported + * @id: Event ID + * @max_payld_sz: Max possible size for the payload of a notification message + * @max_report_sz: Max possible size for the report of a notification message + * + * Each SCMI protocol, during its initialization phase, can describe the events + * it wishes to support in a few struct scmi_event and pass them to the core + * using scmi_register_protocol_events(). + */ +struct scmi_event { + u8 id; + size_t max_payld_sz; + size_t max_report_sz; +}; + +/** + * struct scmi_event_ops - Protocol helpers called by the notification core. + * @set_notify_enabled: Enable/disable the required evt_id/src_id notifications + * using the proper custom protocol commands. + * Return 0 on Success + * + * Context: Helpers described in &struct scmi_event_ops are called only in + * process context. + */ +struct scmi_event_ops { + int (*set_notify_enabled)(const struct scmi_handle *handle, + u8 evt_id, u32 src_id, bool enabled); +}; + +int scmi_notification_init(struct scmi_handle *handle); +void scmi_notification_exit(struct scmi_handle *handle); + +int scmi_register_protocol_events(const struct scmi_handle *handle, + u8 proto_id, size_t queue_sz, + const struct scmi_event_ops *ops, + const struct scmi_event *evt, int num_events, + int num_sources); + +#endif /* _SCMI_NOTIFY_H */ diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 73911d156a39..9a34b02a9cce 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -233,6 +233,8 @@ struct scmi_reset_ops { * protocol(for internal use only) * @reset_priv: pointer to private data structure specific to reset * protocol(for internal use only) + * @notify_priv: pointer to private data structure specific to notifications + * (for internal use only) */ struct scmi_handle { struct device *dev; @@ -248,6 +250,7 @@ struct scmi_handle { void *power_priv; void *sensor_priv; void *reset_priv; + void *notify_priv; }; enum scmi_std_protocol { -- cgit v1.2.3 From e7c215f358a350c4bc326b9cea86763f480a97f9 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 1 Jul 2020 16:53:41 +0100 Subject: firmware: arm_scmi: Add notification callbacks-registration Add the core SCMI notifications callbacks-registration support: allow users to register their own callbacks against the desired events. Whenever a registration request is issued against a still non existent event, mark such request as pending for later processing, in order to account for possible late initializations of SCMI Protocols associated to loadable drivers. Link: https://lore.kernel.org/r/20200701155348.52864-3-cristian.marussi@arm.com Reviewed-by: Jonathan Cameron Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/notify.c | 726 +++++++++++++++++++++++++++++++++++++ include/linux/scmi_protocol.h | 46 +++ 2 files changed, 772 insertions(+) (limited to 'include') diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c index 0505433043d8..79615eef0148 100644 --- a/drivers/firmware/arm_scmi/notify.c +++ b/drivers/firmware/arm_scmi/notify.c @@ -19,18 +19,50 @@ * this core its set of supported events using scmi_register_protocol_events(): * all the needed descriptors are stored in the &struct registered_protocols and * &struct registered_events arrays. + * + * Kernel users interested in some specific event can register their callbacks + * providing the usual notifier_block descriptor, since this core implements + * events' delivery using the standard Kernel notification chains machinery. + * + * Given the number of possible events defined by SCMI and the extensibility + * of the SCMI Protocol itself, the underlying notification chains are created + * and destroyed dynamically on demand depending on the number of users + * effectively registered for an event, so that no support structures or chains + * are allocated until at least one user has registered a notifier_block for + * such event. Similarly, events' generation itself is enabled at the platform + * level only after at least one user has registered, and it is shutdown after + * the last user for that event has gone. + * + * All users provided callbacks and allocated notification-chains are stored in + * the @registered_events_handlers hashtable. Callbacks' registration requests + * for still to be registered events are instead kept in the dedicated common + * hashtable @pending_events_handlers. + * + * An event is identified univocally by the tuple (proto_id, evt_id, src_id) + * and is served by its own dedicated notification chain; information contained + * in such tuples is used, in a few different ways, to generate the needed + * hash-keys. + * + * Here proto_id and evt_id are simply the protocol_id and message_id numbers + * as described in the SCMI Protocol specification, while src_id represents an + * optional, protocol dependent, source identifier (like domain_id, perf_id + * or sensor_id and so forth). */ #define dev_fmt(fmt) "SCMI Notifications - " fmt +#define pr_fmt(fmt) "SCMI Notifications - " fmt #include #include #include #include #include +#include #include #include +#include #include +#include #include #include #include @@ -55,6 +87,86 @@ #define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK) +/* + * Assumes that the stored obj includes its own hash-key in a field named 'key': + * with this simplification this macro can be equally used for all the objects' + * types hashed by this implementation. + * + * @__ht: The hashtable name + * @__obj: A pointer to the object type to be retrieved from the hashtable; + * it will be used as a cursor while scanning the hastable and it will + * be possibly left as NULL when @__k is not found + * @__k: The key to search for + */ +#define KEY_FIND(__ht, __obj, __k) \ +({ \ + typeof(__k) k_ = __k; \ + typeof(__obj) obj_; \ + \ + hash_for_each_possible((__ht), obj_, hash, k_) \ + if (obj_->key == k_) \ + break; \ + __obj = obj_; \ +}) + +#define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key)) +#define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key)) +#define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key)) + +/* + * A set of macros used to access safely @registered_protocols and + * @registered_events arrays; these are fixed in size and each entry is possibly + * populated at protocols' registration time and then only read but NEVER + * modified or removed. + */ +#define SCMI_GET_PROTO(__ni, __pid) \ +({ \ + typeof(__ni) ni_ = __ni; \ + struct scmi_registered_events_desc *__pd = NULL; \ + \ + if (ni_) \ + __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \ + __pd; \ +}) + +#define SCMI_GET_REVT_FROM_PD(__pd, __eid) \ +({ \ + typeof(__pd) pd_ = __pd; \ + typeof(__eid) eid_ = __eid; \ + struct scmi_registered_event *__revt = NULL; \ + \ + if (pd_ && eid_ < pd_->num_events) \ + __revt = READ_ONCE(pd_->registered_events[eid_]); \ + __revt; \ +}) + +#define SCMI_GET_REVT(__ni, __pid, __eid) \ +({ \ + struct scmi_registered_event *__revt; \ + struct scmi_registered_events_desc *__pd; \ + \ + __pd = SCMI_GET_PROTO((__ni), (__pid)); \ + __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \ + __revt; \ +}) + +/* A couple of utility macros to limit cruft when calling protocols' helpers */ +#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \ +({ \ + typeof(revt) r = revt; \ + r->proto->ops->set_notify_enabled(r->proto->ni->handle, \ + (eid), (sid), (state)); \ +}) + +#define REVT_NOTIFY_ENABLE(revt, eid, sid) \ + REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true) + +#define REVT_NOTIFY_DISABLE(revt, eid, sid) \ + REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false) + +#define SCMI_PENDING_HASH_SZ 4 +#define SCMI_REGISTERED_HASH_SZ 6 + struct scmi_registered_events_desc; /** @@ -62,9 +174,13 @@ struct scmi_registered_events_desc; * core * @gid: GroupID used for devres * @handle: A reference to the platform instance + * @init_work: A work item to perform final initializations of pending handlers + * @pending_mtx: A mutex to protect @pending_events_handlers * @registered_protocols: A statically allocated array containing pointers to * all the registered protocol-level specific information * related to events' handling + * @pending_events_handlers: An hashtable containing all pending events' + * handlers descriptors * * Each platform instance, represented by a handle, has its own instance of * the notification subsystem represented by this structure. @@ -72,7 +188,11 @@ struct scmi_registered_events_desc; struct scmi_notify_instance { void *gid; struct scmi_handle *handle; + struct work_struct init_work; + /* lock to protect pending_events_handlers */ + struct mutex pending_mtx; struct scmi_registered_events_desc **registered_protocols; + DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ); }; /** @@ -121,6 +241,9 @@ struct scmi_registered_event; * @registered_events: A dynamically allocated array holding all the registered * events' descriptors, whose fixed-size is determined at * compile time. + * @registered_mtx: A mutex to protect @registered_events_handlers + * @registered_events_handlers: An hashtable containing all events' handlers + * descriptors registered for this protocol * * All protocols that register at least one event have their protocol-specific * information stored here, together with the embedded allocated events_queue. @@ -141,6 +264,9 @@ struct scmi_registered_events_desc { void *in_flight; int num_events; struct scmi_registered_event **registered_events; + /* mutex to protect registered_events_handlers */ + struct mutex registered_mtx; + DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ); }; /** @@ -173,6 +299,38 @@ struct scmi_registered_event { struct mutex sources_mtx; }; +/** + * struct scmi_event_handler - Event handler information + * @key: The used hashkey + * @users: A reference count for number of active users for this handler + * @r_evt: A reference to the associated registered event; when this is NULL + * this handler is pending, which means that identifies a set of + * callbacks intended to be attached to an event which is still not + * known nor registered by any protocol at that point in time + * @chain: The notification chain dedicated to this specific event tuple + * @hash: The hlist_node used for collision handling + * @enabled: A boolean which records if event's generation has been already + * enabled for this handler as a whole + * + * This structure collects all the information needed to process a received + * event identified by the tuple (proto_id, evt_id, src_id). + * These descriptors are stored in a per-protocol @registered_events_handlers + * table using as a key a value derived from that tuple. + */ +struct scmi_event_handler { + u32 key; + refcount_t users; + struct scmi_registered_event *r_evt; + struct blocking_notifier_head chain; + struct hlist_node hash; + bool enabled; +}; + +#define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt) + +static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl); + /** * scmi_kfifo_free() - Devres action helper to free the kfifo * @kfifo: The kfifo to free @@ -258,6 +416,10 @@ scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni, return ERR_PTR(-ENOMEM); pd->num_events = num_events; + /* Initialize per protocol handlers table */ + mutex_init(&pd->registered_mtx); + hash_init(pd->registered_events_handlers); + return pd; } @@ -349,6 +511,12 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, devres_close_group(ni->handle->dev, ni->gid); + /* + * Finalize any pending events' handler which could have been waiting + * for this protocol's events registration. + */ + schedule_work(&ni->init_work); + return 0; err: @@ -359,6 +527,558 @@ err: return -ENOMEM; } +/** + * scmi_allocate_event_handler() - Allocate Event handler + * @ni: A reference to the notification instance to use + * @evt_key: 32bit key uniquely bind to the event identified by the tuple + * (proto_id, evt_id, src_id) + * + * Allocate an event handler and related notification chain associated with + * the provided event handler key. + * Note that, at this point, a related registered_event is still to be + * associated to this handler descriptor (hndl->r_evt == NULL), so the handler + * is initialized as pending. + * + * Context: Assumes to be called with @pending_mtx already acquired. + * Return: the freshly allocated structure on Success + */ +static struct scmi_event_handler * +scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key) +{ + struct scmi_event_handler *hndl; + + hndl = kzalloc(sizeof(*hndl), GFP_KERNEL); + if (!hndl) + return NULL; + hndl->key = evt_key; + BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain); + refcount_set(&hndl->users, 1); + /* New handlers are created pending */ + hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key); + + return hndl; +} + +/** + * scmi_free_event_handler() - Free the provided Event handler + * @hndl: The event handler structure to free + * + * Context: Assumes to be called with proper locking acquired depending + * on the situation. + */ +static void scmi_free_event_handler(struct scmi_event_handler *hndl) +{ + hash_del(&hndl->hash); + kfree(hndl); +} + +/** + * scmi_bind_event_handler() - Helper to attempt binding an handler to an event + * @ni: A reference to the notification instance to use + * @hndl: The event handler to bind + * + * If an associated registered event is found, move the handler from the pending + * into the registered table. + * + * Context: Assumes to be called with @pending_mtx already acquired. + * + * Return: 0 on Success + */ +static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + struct scmi_registered_event *r_evt; + + r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key), + KEY_XTRACT_EVT_ID(hndl->key)); + if (!r_evt) + return -EINVAL; + + /* Remove from pending and insert into registered */ + hash_del(&hndl->hash); + hndl->r_evt = r_evt; + mutex_lock(&r_evt->proto->registered_mtx); + hash_add(r_evt->proto->registered_events_handlers, + &hndl->hash, hndl->key); + mutex_unlock(&r_evt->proto->registered_mtx); + + return 0; +} + +/** + * scmi_valid_pending_handler() - Helper to check pending status of handlers + * @ni: A reference to the notification instance to use + * @hndl: The event handler to check + * + * An handler is considered pending when its r_evt == NULL, because the related + * event was still unknown at handler's registration time; anyway, since all + * protocols register their supported events once for all at protocols' + * initialization time, a pending handler cannot be considered valid anymore if + * the underlying event (which it is waiting for), belongs to an already + * initialized and registered protocol. + * + * Return: 0 on Success + */ +static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + struct scmi_registered_events_desc *pd; + + if (!IS_HNDL_PENDING(hndl)) + return -EINVAL; + + pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key)); + if (pd) + return -EINVAL; + + return 0; +} + +/** + * scmi_register_event_handler() - Register whenever possible an Event handler + * @ni: A reference to the notification instance to use + * @hndl: The event handler to register + * + * At first try to bind an event handler to its associated event, then check if + * it was at least a valid pending handler: if it was not bound nor valid return + * false. + * + * Valid pending incomplete bindings will be periodically retried by a dedicated + * worker which is kicked each time a new protocol completes its own + * registration phase. + * + * Context: Assumes to be called with @pending_mtx acquired. + * + * Return: 0 on Success + */ +static int scmi_register_event_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + int ret; + + ret = scmi_bind_event_handler(ni, hndl); + if (!ret) { + dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n", + hndl->key); + } else { + ret = scmi_valid_pending_handler(ni, hndl); + if (!ret) + dev_dbg(ni->handle->dev, + "registered PENDING handler - key:%X\n", + hndl->key); + } + + return ret; +} + +/** + * __scmi_event_handler_get_ops() - Utility to get or create an event handler + * @ni: A reference to the notification instance to use + * @evt_key: The event key to use + * @create: A boolean flag to specify if a handler must be created when + * not already existent + * + * Search for the desired handler matching the key in both the per-protocol + * registered table and the common pending table: + * * if found adjust users refcount + * * if not found and @create is true, create and register the new handler: + * handler could end up being registered as pending if no matching event + * could be found. + * + * An handler is guaranteed to reside in one and only one of the tables at + * any one time; to ensure this the whole search and create is performed + * holding the @pending_mtx lock, with @registered_mtx additionally acquired + * if needed. + * + * Note that when a nested acquisition of these mutexes is needed the locking + * order is always (same as in @init_work): + * 1. pending_mtx + * 2. registered_mtx + * + * Events generation is NOT enabled right after creation within this routine + * since at creation time we usually want to have all setup and ready before + * events really start flowing. + * + * Return: A properly refcounted handler on Success, NULL on Failure + */ +static inline struct scmi_event_handler * +__scmi_event_handler_get_ops(struct scmi_notify_instance *ni, + u32 evt_key, bool create) +{ + struct scmi_registered_event *r_evt; + struct scmi_event_handler *hndl = NULL; + + r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key), + KEY_XTRACT_EVT_ID(evt_key)); + + mutex_lock(&ni->pending_mtx); + /* Search registered events at first ... if possible at all */ + if (r_evt) { + mutex_lock(&r_evt->proto->registered_mtx); + hndl = KEY_FIND(r_evt->proto->registered_events_handlers, + hndl, evt_key); + if (hndl) + refcount_inc(&hndl->users); + mutex_unlock(&r_evt->proto->registered_mtx); + } + + /* ...then amongst pending. */ + if (!hndl) { + hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key); + if (hndl) + refcount_inc(&hndl->users); + } + + /* Create if still not found and required */ + if (!hndl && create) { + hndl = scmi_allocate_event_handler(ni, evt_key); + if (hndl && scmi_register_event_handler(ni, hndl)) { + dev_dbg(ni->handle->dev, + "purging UNKNOWN handler - key:%X\n", + hndl->key); + /* this hndl can be only a pending one */ + scmi_put_handler_unlocked(ni, hndl); + hndl = NULL; + } + } + mutex_unlock(&ni->pending_mtx); + + return hndl; +} + +static struct scmi_event_handler * +scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key) +{ + return __scmi_event_handler_get_ops(ni, evt_key, false); +} + +static struct scmi_event_handler * +scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key) +{ + return __scmi_event_handler_get_ops(ni, evt_key, true); +} + +/** + * __scmi_enable_evt() - Enable/disable events generation + * @r_evt: The registered event to act upon + * @src_id: The src_id to act upon + * @enable: The action to perform: true->Enable, false->Disable + * + * Takes care of proper refcounting while performing enable/disable: handles + * the special case of ALL sources requests by itself. + * Returns successfully if at least one of the required src_id has been + * successfully enabled/disabled. + * + * Return: 0 on Success + */ +static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt, + u32 src_id, bool enable) +{ + int retvals = 0; + u32 num_sources; + refcount_t *sid; + + if (src_id == SRC_ID_MASK) { + src_id = 0; + num_sources = r_evt->num_sources; + } else if (src_id < r_evt->num_sources) { + num_sources = 1; + } else { + return -EINVAL; + } + + mutex_lock(&r_evt->sources_mtx); + if (enable) { + for (; num_sources; src_id++, num_sources--) { + int ret = 0; + + sid = &r_evt->sources[src_id]; + if (refcount_read(sid) == 0) { + ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id, + src_id); + if (!ret) + refcount_set(sid, 1); + } else { + refcount_inc(sid); + } + retvals += !ret; + } + } else { + for (; num_sources; src_id++, num_sources--) { + sid = &r_evt->sources[src_id]; + if (refcount_dec_and_test(sid)) + REVT_NOTIFY_DISABLE(r_evt, + r_evt->evt->id, src_id); + } + retvals = 1; + } + mutex_unlock(&r_evt->sources_mtx); + + return retvals ? 0 : -EINVAL; +} + +static int scmi_enable_events(struct scmi_event_handler *hndl) +{ + int ret = 0; + + if (!hndl->enabled) { + ret = __scmi_enable_evt(hndl->r_evt, + KEY_XTRACT_SRC_ID(hndl->key), true); + if (!ret) + hndl->enabled = true; + } + + return ret; +} + +static int scmi_disable_events(struct scmi_event_handler *hndl) +{ + int ret = 0; + + if (hndl->enabled) { + ret = __scmi_enable_evt(hndl->r_evt, + KEY_XTRACT_SRC_ID(hndl->key), false); + if (!ret) + hndl->enabled = false; + } + + return ret; +} + +/** + * scmi_put_handler_unlocked() - Put an event handler + * @ni: A reference to the notification instance to use + * @hndl: The event handler to act upon + * + * After having got exclusive access to the registered handlers hashtable, + * update the refcount and if @hndl is no more in use by anyone: + * * ask for events' generation disabling + * * unregister and free the handler itself + * + * Context: Assumes all the proper locking has been managed by the caller. + */ +static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + if (refcount_dec_and_test(&hndl->users)) { + if (!IS_HNDL_PENDING(hndl)) + scmi_disable_events(hndl); + scmi_free_event_handler(hndl); + } +} + +static void scmi_put_handler(struct scmi_notify_instance *ni, + struct scmi_event_handler *hndl) +{ + struct scmi_registered_event *r_evt = hndl->r_evt; + + mutex_lock(&ni->pending_mtx); + if (r_evt) + mutex_lock(&r_evt->proto->registered_mtx); + + scmi_put_handler_unlocked(ni, hndl); + + if (r_evt) + mutex_unlock(&r_evt->proto->registered_mtx); + mutex_unlock(&ni->pending_mtx); +} + +/** + * scmi_event_handler_enable_events() - Enable events associated to an handler + * @hndl: The Event handler to act upon + * + * Return: 0 on Success + */ +static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl) +{ + if (scmi_enable_events(hndl)) { + pr_err("Failed to ENABLE events for key:%X !\n", hndl->key); + return -EINVAL; + } + + return 0; +} + +/** + * scmi_register_notifier() - Register a notifier_block for an event + * @handle: The handle identifying the platform instance against which the + * callback is registered + * @proto_id: Protocol ID + * @evt_id: Event ID + * @src_id: Source ID, when NULL register for events coming form ALL possible + * sources + * @nb: A standard notifier block to register for the specified event + * + * Generic helper to register a notifier_block against a protocol event. + * + * A notifier_block @nb will be registered for each distinct event identified + * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain + * so that: + * + * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z + * + * @src_id meaning is protocol specific and identifies the origin of the event + * (like domain_id, sensor_id and so forth). + * + * @src_id can be NULL to signify that the caller is interested in receiving + * notifications from ALL the available sources for that protocol OR simply that + * the protocol does not support distinct sources. + * + * As soon as one user for the specified tuple appears, an handler is created, + * and that specific event's generation is enabled at the platform level, unless + * an associated registered event is found missing, meaning that the needed + * protocol is still to be initialized and the handler has just been registered + * as still pending. + * + * Return: 0 on Success + */ +static int scmi_register_notifier(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, u32 *src_id, + struct notifier_block *nb) +{ + int ret = 0; + u32 evt_key; + struct scmi_event_handler *hndl; + struct scmi_notify_instance *ni; + + /* Ensure notify_priv is updated */ + smp_rmb(); + if (!handle->notify_priv) + return -ENODEV; + ni = handle->notify_priv; + + evt_key = MAKE_HASH_KEY(proto_id, evt_id, + src_id ? *src_id : SRC_ID_MASK); + hndl = scmi_get_or_create_handler(ni, evt_key); + if (!hndl) + return -EINVAL; + + blocking_notifier_chain_register(&hndl->chain, nb); + + /* Enable events for not pending handlers */ + if (!IS_HNDL_PENDING(hndl)) { + ret = scmi_event_handler_enable_events(hndl); + if (ret) + scmi_put_handler(ni, hndl); + } + + return ret; +} + +/** + * scmi_unregister_notifier() - Unregister a notifier_block for an event + * @handle: The handle identifying the platform instance against which the + * callback is unregistered + * @proto_id: Protocol ID + * @evt_id: Event ID + * @src_id: Source ID + * @nb: The notifier_block to unregister + * + * Takes care to unregister the provided @nb from the notification chain + * associated to the specified event and, if there are no more users for the + * event handler, frees also the associated event handler structures. + * (this could possibly cause disabling of event's generation at platform level) + * + * Return: 0 on Success + */ +static int scmi_unregister_notifier(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, u32 *src_id, + struct notifier_block *nb) +{ + u32 evt_key; + struct scmi_event_handler *hndl; + struct scmi_notify_instance *ni; + + /* Ensure notify_priv is updated */ + smp_rmb(); + if (!handle->notify_priv) + return -ENODEV; + ni = handle->notify_priv; + + evt_key = MAKE_HASH_KEY(proto_id, evt_id, + src_id ? *src_id : SRC_ID_MASK); + hndl = scmi_get_handler(ni, evt_key); + if (!hndl) + return -EINVAL; + + /* + * Note that this chain unregistration call is safe on its own + * being internally protected by an rwsem. + */ + blocking_notifier_chain_unregister(&hndl->chain, nb); + scmi_put_handler(ni, hndl); + + /* + * This balances the initial get issued in @scmi_register_notifier. + * If this notifier_block happened to be the last known user callback + * for this event, the handler is here freed and the event's generation + * stopped. + * + * Note that, an ongoing concurrent lookup on the delivery workqueue + * path could still hold the refcount to 1 even after this routine + * completes: in such a case it will be the final put on the delivery + * path which will finally free this unused handler. + */ + scmi_put_handler(ni, hndl); + + return 0; +} + +/** + * scmi_protocols_late_init() - Worker for late initialization + * @work: The work item to use associated to the proper SCMI instance + * + * This kicks in whenever a new protocol has completed its own registration via + * scmi_register_protocol_events(): it is in charge of scanning the table of + * pending handlers (registered by users while the related protocol was still + * not initialized) and finalizing their initialization whenever possible; + * invalid pending handlers are purged at this point in time. + */ +static void scmi_protocols_late_init(struct work_struct *work) +{ + int bkt; + struct scmi_event_handler *hndl; + struct scmi_notify_instance *ni; + struct hlist_node *tmp; + + ni = container_of(work, struct scmi_notify_instance, init_work); + + /* Ensure protocols and events are up to date */ + smp_rmb(); + + mutex_lock(&ni->pending_mtx); + hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) { + int ret; + + ret = scmi_bind_event_handler(ni, hndl); + if (!ret) { + dev_dbg(ni->handle->dev, + "finalized PENDING handler - key:%X\n", + hndl->key); + ret = scmi_event_handler_enable_events(hndl); + } else { + ret = scmi_valid_pending_handler(ni, hndl); + } + if (ret) { + dev_dbg(ni->handle->dev, + "purging PENDING handler - key:%X\n", + hndl->key); + /* this hndl can be only a pending one */ + scmi_put_handler_unlocked(ni, hndl); + } + } + mutex_unlock(&ni->pending_mtx); +} + +/* + * notify_ops are attached to the handle so that can be accessed + * directly from an scmi_driver to register its own notifiers. + */ +static struct scmi_notify_ops notify_ops = { + .register_event_notifier = scmi_register_notifier, + .unregister_event_notifier = scmi_unregister_notifier, +}; + /** * scmi_notification_init() - Initializes Notification Core Support * @handle: The handle identifying the platform instance to initialize @@ -406,6 +1126,12 @@ int scmi_notification_init(struct scmi_handle *handle) if (!ni->registered_protocols) goto err; + mutex_init(&ni->pending_mtx); + hash_init(ni->pending_events_handlers); + + INIT_WORK(&ni->init_work, scmi_protocols_late_init); + + handle->notify_ops = ¬ify_ops; handle->notify_priv = ni; /* Ensure handle is up to date */ smp_wmb(); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 9a34b02a9cce..26957d6872a5 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -9,6 +9,7 @@ #define _LINUX_SCMI_PROTOCOL_H #include +#include #include #define SCMI_MAX_STR_SIZE 16 @@ -213,6 +214,49 @@ struct scmi_reset_ops { int (*deassert)(const struct scmi_handle *handle, u32 domain); }; +/** + * struct scmi_notify_ops - represents notifications' operations provided by + * SCMI core + * @register_event_notifier: Register a notifier_block for the requested event + * @unregister_event_notifier: Unregister a notifier_block for the requested + * event + * + * A user can register/unregister its own notifier_block against the wanted + * platform instance regarding the desired event identified by the + * tuple: (proto_id, evt_id, src_id) using the provided register/unregister + * interface where: + * + * @handle: The handle identifying the platform instance to use + * @proto_id: The protocol ID as in SCMI Specification + * @evt_id: The message ID of the desired event as in SCMI Specification + * @src_id: A pointer to the desired source ID if different sources are + * possible for the protocol (like domain_id, sensor_id...etc) + * + * @src_id can be provided as NULL if it simply does NOT make sense for + * the protocol at hand, OR if the user is explicitly interested in + * receiving notifications from ANY existent source associated to the + * specified proto_id / evt_id. + * + * Received notifications are finally delivered to the registered users, + * invoking the callback provided with the notifier_block *nb as follows: + * + * int user_cb(nb, evt_id, report) + * + * with: + * + * @nb: The notifier block provided by the user + * @evt_id: The message ID of the delivered event + * @report: A custom struct describing the specific event delivered + */ +struct scmi_notify_ops { + int (*register_event_notifier)(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, u32 *src_id, + struct notifier_block *nb); + int (*unregister_event_notifier)(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, u32 *src_id, + struct notifier_block *nb); +}; + /** * struct scmi_handle - Handle returned to ARM SCMI clients for usage. * @@ -223,6 +267,7 @@ struct scmi_reset_ops { * @clk_ops: pointer to set of clock protocol operations * @sensor_ops: pointer to set of sensor protocol operations * @reset_ops: pointer to set of reset protocol operations + * @notify_ops: pointer to set of notifications related operations * @perf_priv: pointer to private data structure specific to performance * protocol(for internal use only) * @clk_priv: pointer to private data structure specific to clock @@ -244,6 +289,7 @@ struct scmi_handle { struct scmi_power_ops *power_ops; struct scmi_sensor_ops *sensor_ops; struct scmi_reset_ops *reset_ops; + struct scmi_notify_ops *notify_ops; /* for protocol internal use */ void *perf_priv; void *clk_priv; -- cgit v1.2.3 From e27077bc04d5a2e09a0860ca086e1d55adf6a16d Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 1 Jul 2020 16:53:44 +0100 Subject: firmware: arm_scmi: Add power notifications support Make SCMI power protocol register with the notification core. Link: https://lore.kernel.org/r/20200701155348.52864-6-cristian.marussi@arm.com Reviewed-by: Jonathan Cameron Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/power.c | 92 ++++++++++++++++++++++++++++++++++++--- include/linux/scmi_protocol.h | 12 +++++ 2 files changed, 98 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index cf7f0312381b..4f6757980739 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -5,19 +5,18 @@ * Copyright (C) 2018 ARM Ltd. */ +#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt + +#include + #include "common.h" +#include "notify.h" enum scmi_power_protocol_cmd { POWER_DOMAIN_ATTRIBUTES = 0x3, POWER_STATE_SET = 0x4, POWER_STATE_GET = 0x5, POWER_STATE_NOTIFY = 0x6, - POWER_STATE_CHANGE_REQUESTED_NOTIFY = 0x7, -}; - -enum scmi_power_protocol_notify { - POWER_STATE_CHANGED = 0x0, - POWER_STATE_CHANGE_REQUESTED = 0x1, }; struct scmi_msg_resp_power_attributes { @@ -48,6 +47,12 @@ struct scmi_power_state_notify { __le32 notify_enable; }; +struct scmi_power_state_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power_state; +}; + struct power_dom_info { bool state_set_sync; bool state_set_async; @@ -186,6 +191,75 @@ static struct scmi_power_ops power_ops = { .state_get = scmi_power_state_get, }; +static int scmi_power_request_notify(const struct scmi_handle *handle, + u32 domain, bool enable) +{ + int ret; + struct scmi_xfer *t; + struct scmi_power_state_notify *notify; + + ret = scmi_xfer_get_init(handle, POWER_STATE_NOTIFY, + SCMI_PROTOCOL_POWER, sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; + + ret = scmi_do_xfer(handle, t); + + scmi_xfer_put(handle, t); + return ret; +} + +static int scmi_power_set_notify_enabled(const struct scmi_handle *handle, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + ret = scmi_power_request_notify(handle, src_id, enable); + if (ret) + pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_power_fill_custom_report(const struct scmi_handle *handle, + u8 evt_id, u64 timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_power_state_notify_payld *p = payld; + struct scmi_power_state_changed_report *r = report; + + if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED || sizeof(*p) != payld_sz) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power_state = le32_to_cpu(p->power_state); + *src_id = r->domain_id; + + return r; +} + +static const struct scmi_event power_events[] = { + { + .id = SCMI_EVENT_POWER_STATE_CHANGED, + .max_payld_sz = sizeof(struct scmi_power_state_notify_payld), + .max_report_sz = + sizeof(struct scmi_power_state_changed_report), + }, +}; + +static const struct scmi_event_ops power_event_ops = { + .set_notify_enabled = scmi_power_set_notify_enabled, + .fill_custom_report = scmi_power_fill_custom_report, +}; + static int scmi_power_protocol_init(struct scmi_handle *handle) { int domain; @@ -214,6 +288,12 @@ static int scmi_power_protocol_init(struct scmi_handle *handle) scmi_power_domain_attributes_get(handle, domain, dom); } + scmi_register_protocol_events(handle, + SCMI_PROTOCOL_POWER, SCMI_PROTO_QUEUE_SZ, + &power_event_ops, power_events, + ARRAY_SIZE(power_events), + pinfo->num_domains); + pinfo->version = version; handle->power_ops = &power_ops; handle->power_priv = pinfo; diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 26957d6872a5..81dc3b132fda 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -375,4 +375,16 @@ typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *); int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn); void scmi_protocol_unregister(int protocol_id); +/* SCMI Notification API - Custom Event Reports */ +enum scmi_notification_events { + SCMI_EVENT_POWER_STATE_CHANGED = 0x0, +}; + +struct scmi_power_state_changed_report { + u64 timestamp; + u32 agent_id; + u32 domain_id; + u32 power_state; +}; + #endif /* _LINUX_SCMI_PROTOCOL_H */ -- cgit v1.2.3 From fb5086dc4746184a9325fc25411226a750fb252c Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 1 Jul 2020 16:53:45 +0100 Subject: firmware: arm_scmi: Add perf notifications support Make SCMI perf protocol register with the notification core. Link: https://lore.kernel.org/r/20200701155348.52864-7-cristian.marussi@arm.com Reviewed-by: Jonathan Cameron Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/perf.c | 139 +++++++++++++++++++++++++++++++++++++-- include/linux/scmi_protocol.h | 17 +++++ 2 files changed, 151 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 7b8d7cebdac9..8bcad96e06ca 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -5,15 +5,19 @@ * Copyright (C) 2018 ARM Ltd. */ +#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt + #include #include #include #include #include #include +#include #include #include "common.h" +#include "notify.h" enum scmi_performance_protocol_cmd { PERF_DOMAIN_ATTRIBUTES = 0x3, @@ -27,11 +31,6 @@ enum scmi_performance_protocol_cmd { PERF_DESCRIBE_FASTCHANNEL = 0xb, }; -enum scmi_performance_protocol_notify { - PERFORMANCE_LIMITS_CHANGED = 0x0, - PERFORMANCE_LEVEL_CHANGED = 0x1, -}; - struct scmi_opp { u32 perf; u32 power; @@ -86,6 +85,19 @@ struct scmi_perf_notify_level_or_limits { __le32 notify_enable; }; +struct scmi_perf_limits_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 range_max; + __le32 range_min; +}; + +struct scmi_perf_level_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 performance_level; +}; + struct scmi_msg_resp_perf_describe_levels { __le16 num_returned; __le16 num_remaining; @@ -158,6 +170,11 @@ struct scmi_perf_info { struct perf_dom_info *dom_info; }; +static enum scmi_performance_protocol_cmd evt_2_cmd[] = { + PERF_NOTIFY_LIMITS, + PERF_NOTIFY_LEVEL, +}; + static int scmi_perf_attributes_get(const struct scmi_handle *handle, struct scmi_perf_info *pi) { @@ -488,6 +505,29 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain, return scmi_perf_mb_level_get(handle, domain, level, poll); } +static int scmi_perf_level_limits_notify(const struct scmi_handle *handle, + u32 domain, int message_id, + bool enable) +{ + int ret; + struct scmi_xfer *t; + struct scmi_perf_notify_level_or_limits *notify; + + ret = scmi_xfer_get_init(handle, message_id, SCMI_PROTOCOL_PERF, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; + + ret = scmi_do_xfer(handle, t); + + scmi_xfer_put(handle, t); + return ret; +} + static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size) { if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4) @@ -722,6 +762,89 @@ static struct scmi_perf_ops perf_ops = { .fast_switch_possible = scmi_fast_switch_possible, }; +static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + + if (evt_id >= ARRAY_SIZE(evt_2_cmd)) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_perf_level_limits_notify(handle, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle, + u8 evt_id, u64 timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + void *rep = NULL; + + switch (evt_id) { + case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED: + { + const struct scmi_perf_limits_notify_payld *p = payld; + struct scmi_perf_limits_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->range_max = le32_to_cpu(p->range_max); + r->range_min = le32_to_cpu(p->range_min); + *src_id = r->domain_id; + rep = r; + break; + } + case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED: + { + const struct scmi_perf_level_notify_payld *p = payld; + struct scmi_perf_level_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->performance_level = le32_to_cpu(p->performance_level); + *src_id = r->domain_id; + rep = r; + break; + } + default: + break; + } + + return rep; +} + +static const struct scmi_event perf_events[] = { + { + .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED, + .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld), + .max_report_sz = sizeof(struct scmi_perf_limits_report), + }, + { + .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED, + .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld), + .max_report_sz = sizeof(struct scmi_perf_level_report), + }, +}; + +static const struct scmi_event_ops perf_event_ops = { + .set_notify_enabled = scmi_perf_set_notify_enabled, + .fill_custom_report = scmi_perf_fill_custom_report, +}; + static int scmi_perf_protocol_init(struct scmi_handle *handle) { int domain; @@ -754,6 +877,12 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle) scmi_perf_domain_init_fc(handle, domain, &dom->fc_info); } + scmi_register_protocol_events(handle, + SCMI_PROTOCOL_PERF, SCMI_PROTO_QUEUE_SZ, + &perf_event_ops, perf_events, + ARRAY_SIZE(perf_events), + pinfo->num_domains); + pinfo->version = version; handle->perf_ops = &perf_ops; handle->perf_priv = pinfo; diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 81dc3b132fda..65ad83ddde70 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -378,6 +378,8 @@ void scmi_protocol_unregister(int protocol_id); /* SCMI Notification API - Custom Event Reports */ enum scmi_notification_events { SCMI_EVENT_POWER_STATE_CHANGED = 0x0, + SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0, + SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1, }; struct scmi_power_state_changed_report { @@ -387,4 +389,19 @@ struct scmi_power_state_changed_report { u32 power_state; }; +struct scmi_perf_limits_report { + u64 timestamp; + u32 agent_id; + u32 domain_id; + u32 range_max; + u32 range_min; +}; + +struct scmi_perf_level_report { + u64 timestamp; + u32 agent_id; + u32 domain_id; + u32 performance_level; +}; + #endif /* _LINUX_SCMI_PROTOCOL_H */ -- cgit v1.2.3 From 128e3e9311a95bab6b267b7a93eb9ebe2347dbda Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 1 Jul 2020 16:53:46 +0100 Subject: firmware: arm_scmi: Add sensor notifications support Make SCMI sensor protocol register with the notification core. Link: https://lore.kernel.org/r/20200701155348.52864-8-cristian.marussi@arm.com Reviewed-by: Jonathan Cameron Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/sensors.c | 69 ++++++++++++++++++++++++++++++++++--- include/linux/scmi_protocol.h | 13 ++++--- 2 files changed, 72 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index db1b1ab303da..2120ac4787c9 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -5,7 +5,12 @@ * Copyright (C) 2018 ARM Ltd. */ +#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt + +#include + #include "common.h" +#include "notify.h" enum scmi_sensor_protocol_cmd { SENSOR_DESCRIPTION_GET = 0x3, @@ -14,10 +19,6 @@ enum scmi_sensor_protocol_cmd { SENSOR_READING_GET = 0x6, }; -enum scmi_sensor_protocol_notify { - SENSOR_TRIP_POINT_EVENT = 0x0, -}; - struct scmi_msg_resp_sensor_attributes { __le16 num_sensors; u8 max_requests; @@ -71,6 +72,12 @@ struct scmi_msg_sensor_reading_get { #define SENSOR_READ_ASYNC BIT(0) }; +struct scmi_sensor_trip_notify_payld { + __le32 agent_id; + __le32 sensor_id; + __le32 trip_point_desc; +}; + struct sensors_info { u32 version; int num_sensors; @@ -271,11 +278,57 @@ static int scmi_sensor_count_get(const struct scmi_handle *handle) static struct scmi_sensor_ops sensor_ops = { .count_get = scmi_sensor_count_get, .info_get = scmi_sensor_info_get, - .trip_point_notify = scmi_sensor_trip_point_notify, .trip_point_config = scmi_sensor_trip_point_config, .reading_get = scmi_sensor_reading_get, }; +static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + ret = scmi_sensor_trip_point_notify(handle, src_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle, + u8 evt_id, u64 timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_sensor_trip_notify_payld *p = payld; + struct scmi_sensor_trip_point_report *r = report; + + if (evt_id != SCMI_EVENT_SENSOR_TRIP_POINT_EVENT || + sizeof(*p) != payld_sz) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->sensor_id = le32_to_cpu(p->sensor_id); + r->trip_point_desc = le32_to_cpu(p->trip_point_desc); + *src_id = r->sensor_id; + + return r; +} + +static const struct scmi_event sensor_events[] = { + { + .id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT, + .max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld), + .max_report_sz = sizeof(struct scmi_sensor_trip_point_report), + }, +}; + +static const struct scmi_event_ops sensor_event_ops = { + .set_notify_enabled = scmi_sensor_set_notify_enabled, + .fill_custom_report = scmi_sensor_fill_custom_report, +}; + static int scmi_sensors_protocol_init(struct scmi_handle *handle) { u32 version; @@ -299,6 +352,12 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle) scmi_sensor_description_get(handle, sinfo); + scmi_register_protocol_events(handle, + SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ, + &sensor_event_ops, sensor_events, + ARRAY_SIZE(sensor_events), + sinfo->num_sensors); + sinfo->version = version; handle->sensor_ops = &sensor_ops; handle->sensor_priv = sinfo; diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 65ad83ddde70..d0ea4a5037e7 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -176,18 +176,13 @@ enum scmi_sensor_class { * * @count_get: get the count of sensors provided by SCMI * @info_get: get the information of the specified sensor - * @trip_point_notify: control notifications on cross-over events for - * the trip-points * @trip_point_config: selects and configures a trip-point of interest * @reading_get: gets the current value of the sensor */ struct scmi_sensor_ops { int (*count_get)(const struct scmi_handle *handle); - const struct scmi_sensor_info *(*info_get) (const struct scmi_handle *handle, u32 sensor_id); - int (*trip_point_notify)(const struct scmi_handle *handle, - u32 sensor_id, bool enable); int (*trip_point_config)(const struct scmi_handle *handle, u32 sensor_id, u8 trip_id, u64 trip_value); int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, @@ -380,6 +375,7 @@ enum scmi_notification_events { SCMI_EVENT_POWER_STATE_CHANGED = 0x0, SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0, SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1, + SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0, }; struct scmi_power_state_changed_report { @@ -404,4 +400,11 @@ struct scmi_perf_level_report { u32 performance_level; }; +struct scmi_sensor_trip_point_report { + u64 timestamp; + u32 agent_id; + u32 sensor_id; + u32 trip_point_desc; +}; + #endif /* _LINUX_SCMI_PROTOCOL_H */ -- cgit v1.2.3 From 469ca1822d64e4a786935576edb696c52119aa11 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 1 Jul 2020 16:53:47 +0100 Subject: firmware: arm_scmi: Add reset notifications support Make SCMI reset protocol register with the notification core. Link: https://lore.kernel.org/r/20200701155348.52864-9-cristian.marussi@arm.com Reviewed-by: Jonathan Cameron Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/reset.c | 96 +++++++++++++++++++++++++++++++++++++-- include/linux/scmi_protocol.h | 8 ++++ 2 files changed, 100 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index de73054554f3..fb7cb517900b 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -5,7 +5,12 @@ * Copyright (C) 2019 ARM Ltd. */ +#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt + +#include + #include "common.h" +#include "notify.h" enum scmi_reset_protocol_cmd { RESET_DOMAIN_ATTRIBUTES = 0x3, @@ -13,10 +18,6 @@ enum scmi_reset_protocol_cmd { RESET_NOTIFY = 0x5, }; -enum scmi_reset_protocol_notify { - RESET_ISSUED = 0x0, -}; - #define NUM_RESET_DOMAIN_MASK 0xffff #define RESET_NOTIFY_ENABLE BIT(0) @@ -40,6 +41,18 @@ struct scmi_msg_reset_domain_reset { #define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE) }; +struct scmi_msg_reset_notify { + __le32 id; + __le32 event_control; +#define RESET_TP_NOTIFY_ALL BIT(0) +}; + +struct scmi_reset_issued_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 reset_state; +}; + struct reset_dom_info { bool async_reset; bool reset_notify; @@ -190,6 +203,75 @@ static struct scmi_reset_ops reset_ops = { .deassert = scmi_reset_domain_deassert, }; +static int scmi_reset_notify(const struct scmi_handle *handle, u32 domain_id, + bool enable) +{ + int ret; + u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0; + struct scmi_xfer *t; + struct scmi_msg_reset_notify *cfg; + + ret = scmi_xfer_get_init(handle, RESET_NOTIFY, + SCMI_PROTOCOL_RESET, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->id = cpu_to_le32(domain_id); + cfg->event_control = cpu_to_le32(evt_cntl); + + ret = scmi_do_xfer(handle, t); + + scmi_xfer_put(handle, t); + return ret; +} + +static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + ret = scmi_reset_notify(handle, src_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle, + u8 evt_id, u64 timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_reset_issued_notify_payld *p = payld; + struct scmi_reset_issued_report *r = report; + + if (evt_id != SCMI_EVENT_RESET_ISSUED || sizeof(*p) != payld_sz) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->reset_state = le32_to_cpu(p->reset_state); + *src_id = r->domain_id; + + return r; +} + +static const struct scmi_event reset_events[] = { + { + .id = SCMI_EVENT_RESET_ISSUED, + .max_payld_sz = sizeof(struct scmi_reset_issued_notify_payld), + .max_report_sz = sizeof(struct scmi_reset_issued_report), + }, +}; + +static const struct scmi_event_ops reset_event_ops = { + .set_notify_enabled = scmi_reset_set_notify_enabled, + .fill_custom_report = scmi_reset_fill_custom_report, +}; + static int scmi_reset_protocol_init(struct scmi_handle *handle) { int domain; @@ -218,6 +300,12 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle) scmi_reset_domain_attributes_get(handle, domain, dom); } + scmi_register_protocol_events(handle, + SCMI_PROTOCOL_RESET, SCMI_PROTO_QUEUE_SZ, + &reset_event_ops, reset_events, + ARRAY_SIZE(reset_events), + pinfo->num_domains); + pinfo->version = version; handle->reset_ops = &reset_ops; handle->reset_priv = pinfo; diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index d0ea4a5037e7..d04d66be596d 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -376,6 +376,7 @@ enum scmi_notification_events { SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0, SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1, SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0, + SCMI_EVENT_RESET_ISSUED = 0x0, }; struct scmi_power_state_changed_report { @@ -407,4 +408,11 @@ struct scmi_sensor_trip_point_report { u32 trip_point_desc; }; +struct scmi_reset_issued_report { + u64 timestamp; + u32 agent_id; + u32 domain_id; + u32 reset_state; +}; + #endif /* _LINUX_SCMI_PROTOCOL_H */ -- cgit v1.2.3 From 585dfab3fb80e67b3a54790b3d5ef2991feb3950 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 1 Jul 2020 16:53:48 +0100 Subject: firmware: arm_scmi: Add base notifications support Make SCMI base protocol register with the notification core. Link: https://lore.kernel.org/r/20200701155348.52864-10-cristian.marussi@arm.com Reviewed-by: Jonathan Cameron Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/base.c | 108 +++++++++++++++++++++++++++++++++++++-- include/linux/scmi_protocol.h | 9 ++++ 2 files changed, 113 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index ce7d9203e41b..54f378e946f1 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -5,7 +5,15 @@ * Copyright (C) 2018 ARM Ltd. */ +#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt + +#include + #include "common.h" +#include "notify.h" + +#define SCMI_BASE_NUM_SOURCES 1 +#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024 enum scmi_base_protocol_cmd { BASE_DISCOVER_VENDOR = 0x3, @@ -19,16 +27,25 @@ enum scmi_base_protocol_cmd { BASE_RESET_AGENT_CONFIGURATION = 0xb, }; -enum scmi_base_protocol_notify { - BASE_ERROR_EVENT = 0x0, -}; - struct scmi_msg_resp_base_attributes { u8 num_protocols; u8 num_agents; __le16 reserved; }; +struct scmi_msg_base_error_notify { + __le32 event_control; +#define BASE_TP_NOTIFY_ALL BIT(0) +}; + +struct scmi_base_error_notify_payld { + __le32 agent_id; + __le32 error_status; +#define IS_FATAL_ERROR(x) ((x) & BIT(31)) +#define ERROR_CMD_COUNT(x) FIELD_GET(GENMASK(9, 0), (x)) + __le64 msg_reports[SCMI_BASE_MAX_CMD_ERR_COUNT]; +}; + /** * scmi_base_attributes_get() - gets the implementation details * that are associated with the base protocol. @@ -222,6 +239,83 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle, return ret; } +static int scmi_base_error_notify(const struct scmi_handle *handle, bool enable) +{ + int ret; + u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0; + struct scmi_xfer *t; + struct scmi_msg_base_error_notify *cfg; + + ret = scmi_xfer_get_init(handle, BASE_NOTIFY_ERRORS, + SCMI_PROTOCOL_BASE, sizeof(*cfg), 0, &t); + if (ret) + return ret; + + cfg = t->tx.buf; + cfg->event_control = cpu_to_le32(evt_cntl); + + ret = scmi_do_xfer(handle, t); + + scmi_xfer_put(handle, t); + return ret; +} + +static int scmi_base_set_notify_enabled(const struct scmi_handle *handle, + u8 evt_id, u32 src_id, bool enable) +{ + int ret; + + ret = scmi_base_error_notify(handle, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret); + + return ret; +} + +static void *scmi_base_fill_custom_report(const struct scmi_handle *handle, + u8 evt_id, u64 timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + int i; + const struct scmi_base_error_notify_payld *p = payld; + struct scmi_base_error_report *r = report; + + /* + * BaseError notification payload is variable in size but + * up to a maximum length determined by the struct ponted by p. + * Instead payld_sz is the effective length of this notification + * payload so cannot be greater of the maximum allowed size as + * pointed by p. + */ + if (evt_id != SCMI_EVENT_BASE_ERROR_EVENT || sizeof(*p) < payld_sz) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->fatal = IS_FATAL_ERROR(le32_to_cpu(p->error_status)); + r->cmd_count = ERROR_CMD_COUNT(le32_to_cpu(p->error_status)); + for (i = 0; i < r->cmd_count; i++) + r->reports[i] = le64_to_cpu(p->msg_reports[i]); + *src_id = 0; + + return r; +} + +static const struct scmi_event base_events[] = { + { + .id = SCMI_EVENT_BASE_ERROR_EVENT, + .max_payld_sz = sizeof(struct scmi_base_error_notify_payld), + .max_report_sz = sizeof(struct scmi_base_error_report) + + SCMI_BASE_MAX_CMD_ERR_COUNT * sizeof(u64), + }, +}; + +static const struct scmi_event_ops base_event_ops = { + .set_notify_enabled = scmi_base_set_notify_enabled, + .fill_custom_report = scmi_base_fill_custom_report, +}; + int scmi_base_protocol_init(struct scmi_handle *h) { int id, ret; @@ -256,6 +350,12 @@ int scmi_base_protocol_init(struct scmi_handle *h) dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols, rev->num_agents); + scmi_register_protocol_events(handle, SCMI_PROTOCOL_BASE, + (4 * SCMI_PROTO_QUEUE_SZ), + &base_event_ops, base_events, + ARRAY_SIZE(base_events), + SCMI_BASE_NUM_SOURCES); + for (id = 0; id < rev->num_agents; id++) { scmi_base_discover_agent_get(handle, id, name); dev_dbg(dev, "Agent %d: %s\n", id, name); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index d04d66be596d..46d98be92466 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -377,6 +377,7 @@ enum scmi_notification_events { SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1, SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0, SCMI_EVENT_RESET_ISSUED = 0x0, + SCMI_EVENT_BASE_ERROR_EVENT = 0x0, }; struct scmi_power_state_changed_report { @@ -415,4 +416,12 @@ struct scmi_reset_issued_report { u32 reset_state; }; +struct scmi_base_error_report { + u64 timestamp; + u32 agent_id; + bool fatal; + u16 cmd_count; + u64 reports[0]; +}; + #endif /* _LINUX_SCMI_PROTOCOL_H */ -- cgit v1.2.3 From 3022c6a1b4b76c40aab9dcdf5142c501f3d3ae8c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 25 Jun 2020 16:51:03 -0700 Subject: driver-core: Introduce DEVICE_ATTR_ADMIN_{RO,RW} A common pattern for using plain DEVICE_ATTR() instead of DEVICE_ATTR_RO() and DEVICE_ATTR_RW() is for attributes that want to limit read to only root. I.e. many users of DEVICE_ATTR() are specifying 0400 or 0600 for permissions. Given the expectation that CAP_SYS_ADMIN is needed to access these sensitive attributes and an explicit helper with the _ADMIN_ identifier for DEVICE_ATTR_ADMIN_{RO,RW}. Cc: "Rafael J. Wysocki" Signed-off-by: Dan Williams Link: https://lore.kernel.org/r/159312906372.1850128.11611897078988158727.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Greg Kroah-Hartman --- include/linux/device.h | 4 ++++ include/linux/sysfs.h | 7 +++++++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/device.h b/include/linux/device.h index 15460a5ac024..d7c2570368fa 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -128,8 +128,12 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, __ATTR_PREALLOC(_name, _mode, _show, _store) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) +#define DEVICE_ATTR_ADMIN_RW(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600) #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) +#define DEVICE_ATTR_ADMIN_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400) #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 86067dbe7745..34e84122f635 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -123,6 +123,13 @@ struct attribute_group { .show = _name##_show, \ } +#define __ATTR_RW_MODE(_name, _mode) { \ + .attr = { .name = __stringify(_name), \ + .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ + .show = _name##_show, \ + .store = _name##_store, \ +} + #define __ATTR_WO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0200 }, \ .store = _name##_store, \ -- cgit v1.2.3 From 9fc54012d7aec4b23f2042dc37b26738f3e68942 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Tue, 30 Jun 2020 18:17:07 -0400 Subject: audit: remove unused !CONFIG_AUDITSYSCALL __audit_inode* stubs Added 14 years ago in commit 73241ccca0f7 ("[PATCH] Collect more inode information during syscall processing.") but never used however needlessly churned no less than 10 times since. Remove the unused __audit_inode* stubs in the !CONFIG_AUDITSYSCALL case. Signed-off-by: Richard Guy Briggs Signed-off-by: Paul Moore --- include/linux/audit.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'include') diff --git a/include/linux/audit.h b/include/linux/audit.h index d93739f7a35a..b5478c64bc69 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -579,14 +579,6 @@ static inline struct filename *audit_reusename(const __user char *name) } static inline void audit_getname(struct filename *name) { } -static inline void __audit_inode(struct filename *name, - const struct dentry *dentry, - unsigned int flags) -{ } -static inline void __audit_inode_child(struct inode *parent, - const struct dentry *dentry, - const unsigned char type) -{ } static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) -- cgit v1.2.3 From ca7d8b980b67f133317525c4273e144116ee1ae5 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 16 Jun 2020 23:03:48 +0300 Subject: memory: add Renesas RPC-IF driver Add the memory driver for Renesas RPC-IF which registers either SPI or HyperFLash device depending on the contents of the device tree subnode. It also provides the absract "back end" device APIs that can be used by the "front end" SPI/MTD drivers to talk to the real hardware. Based on the original patch by Mason Yang . Signed-off-by: Sergei Shtylyov Link: https://lore.kernel.org/r/9a3606ec-d4d0-c63a-4fb6-631ab38e621c@cogentembedded.com Signed-off-by: Mark Brown --- drivers/memory/Kconfig | 9 + drivers/memory/Makefile | 1 + drivers/memory/renesas-rpc-if.c | 603 ++++++++++++++++++++++++++++++++++++++++ include/memory/renesas-rpc-if.h | 87 ++++++ 4 files changed, 700 insertions(+) create mode 100644 drivers/memory/renesas-rpc-if.c create mode 100644 include/memory/renesas-rpc-if.h (limited to 'include') diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 04368ee2a809..e438d79857da 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -174,6 +174,15 @@ config PL353_SMC This driver is for the ARM PL351/PL353 Static Memory Controller(SMC) module. +config RENESAS_RPCIF + tristate "Renesas RPC-IF driver" + depends on ARCH_RENESAS + select REGMAP_MMIO + help + This supports Renesas R-Car Gen3 RPC-IF which provides either SPI + host or HyperFlash. You'll have to select individual components + under the corresponding menu. + source "drivers/memory/samsung/Kconfig" source "drivers/memory/tegra/Kconfig" diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index 6d7e3e64ba62..d105f8ebe8b8 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o obj-$(CONFIG_MTK_SMI) += mtk-smi.o obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o obj-$(CONFIG_PL353_SMC) += pl353-smc.o +obj-$(CONFIG_RENESAS_RPCIF) += renesas-rpc-if.o obj-$(CONFIG_SAMSUNG_MC) += samsung/ obj-$(CONFIG_TEGRA_MC) += tegra/ diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c new file mode 100644 index 000000000000..88f51ec8f1d1 --- /dev/null +++ b/drivers/memory/renesas-rpc-if.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RPC-IF core driver + * + * Copyright (C) 2018-2019 Renesas Solutions Corp. + * Copyright (C) 2019 Macronix International Co., Ltd. + * Copyright (C) 2019-2020 Cogent Embedded, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define RPCIF_CMNCR 0x0000 /* R/W */ +#define RPCIF_CMNCR_MD BIT(31) +#define RPCIF_CMNCR_SFDE BIT(24) /* undocumented but must be set */ +#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22) +#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20) +#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18) +#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16) +#define RPCIF_CMNCR_MOIIO_HIZ (RPCIF_CMNCR_MOIIO0(3) | \ + RPCIF_CMNCR_MOIIO1(3) | \ + RPCIF_CMNCR_MOIIO2(3) | RPCIF_CMNCR_MOIIO3(3)) +#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* undocumented */ +#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* undocumented */ +#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8) +#define RPCIF_CMNCR_IOFV_HIZ (RPCIF_CMNCR_IO0FV(3) | RPCIF_CMNCR_IO2FV(3) | \ + RPCIF_CMNCR_IO3FV(3)) +#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0) + +#define RPCIF_SSLDR 0x0004 /* R/W */ +#define RPCIF_SSLDR_SPNDL(d) (((d) & 0x7) << 16) +#define RPCIF_SSLDR_SLNDL(d) (((d) & 0x7) << 8) +#define RPCIF_SSLDR_SCKDL(d) (((d) & 0x7) << 0) + +#define RPCIF_DRCR 0x000C /* R/W */ +#define RPCIF_DRCR_SSLN BIT(24) +#define RPCIF_DRCR_RBURST(v) ((((v) - 1) & 0x1F) << 16) +#define RPCIF_DRCR_RCF BIT(9) +#define RPCIF_DRCR_RBE BIT(8) +#define RPCIF_DRCR_SSLE BIT(0) + +#define RPCIF_DRCMR 0x0010 /* R/W */ +#define RPCIF_DRCMR_CMD(c) (((c) & 0xFF) << 16) +#define RPCIF_DRCMR_OCMD(c) (((c) & 0xFF) << 0) + +#define RPCIF_DREAR 0x0014 /* R/W */ +#define RPCIF_DREAR_EAV(c) (((c) & 0xF) << 16) +#define RPCIF_DREAR_EAC(c) (((c) & 0x7) << 0) + +#define RPCIF_DROPR 0x0018 /* R/W */ + +#define RPCIF_DRENR 0x001C /* R/W */ +#define RPCIF_DRENR_CDB(o) (u32)((((o) & 0x3) << 30)) +#define RPCIF_DRENR_OCDB(o) (((o) & 0x3) << 28) +#define RPCIF_DRENR_ADB(o) (((o) & 0x3) << 24) +#define RPCIF_DRENR_OPDB(o) (((o) & 0x3) << 20) +#define RPCIF_DRENR_DRDB(o) (((o) & 0x3) << 16) +#define RPCIF_DRENR_DME BIT(15) +#define RPCIF_DRENR_CDE BIT(14) +#define RPCIF_DRENR_OCDE BIT(12) +#define RPCIF_DRENR_ADE(v) (((v) & 0xF) << 8) +#define RPCIF_DRENR_OPDE(v) (((v) & 0xF) << 4) + +#define RPCIF_SMCR 0x0020 /* R/W */ +#define RPCIF_SMCR_SSLKP BIT(8) +#define RPCIF_SMCR_SPIRE BIT(2) +#define RPCIF_SMCR_SPIWE BIT(1) +#define RPCIF_SMCR_SPIE BIT(0) + +#define RPCIF_SMCMR 0x0024 /* R/W */ +#define RPCIF_SMCMR_CMD(c) (((c) & 0xFF) << 16) +#define RPCIF_SMCMR_OCMD(c) (((c) & 0xFF) << 0) + +#define RPCIF_SMADR 0x0028 /* R/W */ + +#define RPCIF_SMOPR 0x002C /* R/W */ +#define RPCIF_SMOPR_OPD3(o) (((o) & 0xFF) << 24) +#define RPCIF_SMOPR_OPD2(o) (((o) & 0xFF) << 16) +#define RPCIF_SMOPR_OPD1(o) (((o) & 0xFF) << 8) +#define RPCIF_SMOPR_OPD0(o) (((o) & 0xFF) << 0) + +#define RPCIF_SMENR 0x0030 /* R/W */ +#define RPCIF_SMENR_CDB(o) (((o) & 0x3) << 30) +#define RPCIF_SMENR_OCDB(o) (((o) & 0x3) << 28) +#define RPCIF_SMENR_ADB(o) (((o) & 0x3) << 24) +#define RPCIF_SMENR_OPDB(o) (((o) & 0x3) << 20) +#define RPCIF_SMENR_SPIDB(o) (((o) & 0x3) << 16) +#define RPCIF_SMENR_DME BIT(15) +#define RPCIF_SMENR_CDE BIT(14) +#define RPCIF_SMENR_OCDE BIT(12) +#define RPCIF_SMENR_ADE(v) (((v) & 0xF) << 8) +#define RPCIF_SMENR_OPDE(v) (((v) & 0xF) << 4) +#define RPCIF_SMENR_SPIDE(v) (((v) & 0xF) << 0) + +#define RPCIF_SMRDR0 0x0038 /* R */ +#define RPCIF_SMRDR1 0x003C /* R */ +#define RPCIF_SMWDR0 0x0040 /* W */ +#define RPCIF_SMWDR1 0x0044 /* W */ + +#define RPCIF_CMNSR 0x0048 /* R */ +#define RPCIF_CMNSR_SSLF BIT(1) +#define RPCIF_CMNSR_TEND BIT(0) + +#define RPCIF_DRDMCR 0x0058 /* R/W */ +#define RPCIF_DMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0) + +#define RPCIF_DRDRENR 0x005C /* R/W */ +#define RPCIF_DRDRENR_HYPE(v) (((v) & 0x7) << 12) +#define RPCIF_DRDRENR_ADDRE BIT(8) +#define RPCIF_DRDRENR_OPDRE BIT(4) +#define RPCIF_DRDRENR_DRDRE BIT(0) + +#define RPCIF_SMDMCR 0x0060 /* R/W */ +#define RPCIF_SMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0) + +#define RPCIF_SMDRENR 0x0064 /* R/W */ +#define RPCIF_SMDRENR_HYPE(v) (((v) & 0x7) << 12) +#define RPCIF_SMDRENR_ADDRE BIT(8) +#define RPCIF_SMDRENR_OPDRE BIT(4) +#define RPCIF_SMDRENR_SPIDRE BIT(0) + +#define RPCIF_PHYCNT 0x007C /* R/W */ +#define RPCIF_PHYCNT_CAL BIT(31) +#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22) +#define RPCIF_PHYCNT_EXDS BIT(21) +#define RPCIF_PHYCNT_OCT BIT(20) +#define RPCIF_PHYCNT_DDRCAL BIT(19) +#define RPCIF_PHYCNT_HS BIT(18) +#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15) +#define RPCIF_PHYCNT_WBUF2 BIT(4) +#define RPCIF_PHYCNT_WBUF BIT(2) +#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0) + +#define RPCIF_PHYOFFSET1 0x0080 /* R/W */ +#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28) + +#define RPCIF_PHYOFFSET2 0x0084 /* R/W */ +#define RPCIF_PHYOFFSET2_OCTTMG(v) (((v) & 0x7) << 8) + +#define RPCIF_PHYINT 0x0088 /* R/W */ +#define RPCIF_PHYINT_WPVAL BIT(1) + +#define RPCIF_DIRMAP_SIZE 0x4000000 + +static const struct regmap_range rpcif_volatile_ranges[] = { + regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1), + regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1), + regmap_reg_range(RPCIF_CMNSR, RPCIF_CMNSR), +}; + +static const struct regmap_access_table rpcif_volatile_table = { + .yes_ranges = rpcif_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges), +}; + +static const struct regmap_config rpcif_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .fast_io = true, + .max_register = RPCIF_PHYINT, + .volatile_table = &rpcif_volatile_table, +}; + +int rpcif_sw_init(struct rpcif *rpc, struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + void __iomem *base; + + rpc->dev = dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + rpc->regmap = devm_regmap_init_mmio(&pdev->dev, base, + &rpcif_regmap_config); + if (IS_ERR(rpc->regmap)) { + dev_err(&pdev->dev, + "failed to init regmap for rpcif, error %ld\n", + PTR_ERR(rpc->regmap)); + return PTR_ERR(rpc->regmap); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); + rpc->size = resource_size(res); + rpc->dirmap = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rpc->dirmap)) + rpc->dirmap = NULL; + + rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(rpc->rstc)) + return PTR_ERR(rpc->rstc); + + return 0; +} +EXPORT_SYMBOL(rpcif_sw_init); + +void rpcif_enable_rpm(struct rpcif *rpc) +{ + pm_runtime_enable(rpc->dev); +} +EXPORT_SYMBOL(rpcif_enable_rpm); + +void rpcif_disable_rpm(struct rpcif *rpc) +{ + pm_runtime_put_sync(rpc->dev); +} +EXPORT_SYMBOL(rpcif_disable_rpm); + +void rpcif_hw_init(struct rpcif *rpc, bool hyperflash) +{ + u32 dummy; + + pm_runtime_get_sync(rpc->dev); + + /* + * NOTE: The 0x260 are undocumented bits, but they must be set. + * RPCIF_PHYCNT_STRTIM is strobe timing adjustment bits, + * 0x0 : the delay is biggest, + * 0x1 : the delay is 2nd biggest, + * On H3 ES1.x, the value should be 0, while on others, + * the value should be 7. + */ + regmap_write(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_STRTIM(7) | + RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0) | 0x260); + + /* + * NOTE: The 0x1511144 are undocumented bits, but they must be set + * for RPCIF_PHYOFFSET1. + * The 0x31 are undocumented bits, but they must be set + * for RPCIF_PHYOFFSET2. + */ + regmap_write(rpc->regmap, RPCIF_PHYOFFSET1, 0x1511144 | + RPCIF_PHYOFFSET1_DDRTMG(3)); + regmap_write(rpc->regmap, RPCIF_PHYOFFSET2, 0x31 | + RPCIF_PHYOFFSET2_OCTTMG(4)); + + if (hyperflash) + regmap_update_bits(rpc->regmap, RPCIF_PHYINT, + RPCIF_PHYINT_WPVAL, 0); + + regmap_write(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_SFDE | + RPCIF_CMNCR_MOIIO_HIZ | RPCIF_CMNCR_IOFV_HIZ | + RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); + /* Set RCF after BSZ update */ + regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF); + /* Dummy read according to spec */ + regmap_read(rpc->regmap, RPCIF_DRCR, &dummy); + regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) | + RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7)); + + pm_runtime_put(rpc->dev); + + rpc->bus_size = hyperflash ? 2 : 1; +} +EXPORT_SYMBOL(rpcif_hw_init); + +static int wait_msg_xfer_end(struct rpcif *rpc) +{ + u32 sts; + + return regmap_read_poll_timeout(rpc->regmap, RPCIF_CMNSR, sts, + sts & RPCIF_CMNSR_TEND, 0, + USEC_PER_SEC); +} + +static u8 rpcif_bits_set(struct rpcif *rpc, u32 nbytes) +{ + if (rpc->bus_size == 2) + nbytes /= 2; + nbytes = clamp(nbytes, 1U, 4U); + return GENMASK(3, 4 - nbytes); +} + +static u8 rpcif_bit_size(u8 buswidth) +{ + return buswidth > 4 ? 2 : ilog2(buswidth); +} + +void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, + size_t *len) +{ + rpc->smcr = 0; + rpc->smadr = 0; + rpc->enable = 0; + rpc->command = 0; + rpc->option = 0; + rpc->dummy = 0; + rpc->ddr = 0; + rpc->xferlen = 0; + + if (op->cmd.buswidth) { + rpc->enable = RPCIF_SMENR_CDE | + RPCIF_SMENR_CDB(rpcif_bit_size(op->cmd.buswidth)); + rpc->command = RPCIF_SMCMR_CMD(op->cmd.opcode); + if (op->cmd.ddr) + rpc->ddr = RPCIF_SMDRENR_HYPE(0x5); + } + if (op->ocmd.buswidth) { + rpc->enable |= RPCIF_SMENR_OCDE | + RPCIF_SMENR_OCDB(rpcif_bit_size(op->ocmd.buswidth)); + rpc->command |= RPCIF_SMCMR_OCMD(op->ocmd.opcode); + } + + if (op->addr.buswidth) { + rpc->enable |= + RPCIF_SMENR_ADB(rpcif_bit_size(op->addr.buswidth)); + if (op->addr.nbytes == 4) + rpc->enable |= RPCIF_SMENR_ADE(0xF); + else + rpc->enable |= RPCIF_SMENR_ADE(GENMASK( + 2, 3 - op->addr.nbytes)); + if (op->addr.ddr) + rpc->ddr |= RPCIF_SMDRENR_ADDRE; + + if (offs && len) + rpc->smadr = *offs; + else + rpc->smadr = op->addr.val; + } + + if (op->dummy.buswidth) { + rpc->enable |= RPCIF_SMENR_DME; + rpc->dummy = RPCIF_SMDMCR_DMCYC(op->dummy.ncycles / + op->dummy.buswidth); + } + + if (op->option.buswidth) { + rpc->enable |= RPCIF_SMENR_OPDE( + rpcif_bits_set(rpc, op->option.nbytes)) | + RPCIF_SMENR_OPDB(rpcif_bit_size(op->option.buswidth)); + if (op->option.ddr) + rpc->ddr |= RPCIF_SMDRENR_OPDRE; + rpc->option = op->option.val; + } + + rpc->dir = op->data.dir; + if (op->data.buswidth) { + u32 nbytes; + + rpc->buffer = op->data.buf.in; + switch (op->data.dir) { + case RPCIF_DATA_IN: + rpc->smcr = RPCIF_SMCR_SPIRE; + break; + case RPCIF_DATA_OUT: + rpc->smcr = RPCIF_SMCR_SPIWE; + break; + default: + break; + } + if (op->data.ddr) + rpc->ddr |= RPCIF_SMDRENR_SPIDRE; + + if (offs && len) + nbytes = *len; + else + nbytes = op->data.nbytes; + rpc->xferlen = nbytes; + + rpc->enable |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)) | + RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth)); + } +} +EXPORT_SYMBOL(rpcif_prepare); + +int rpcif_manual_xfer(struct rpcif *rpc) +{ + u32 smenr, smcr, pos = 0, max = 4; + int ret = 0; + + if (rpc->bus_size == 2) + max = 8; + + pm_runtime_get_sync(rpc->dev); + + regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, + RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL); + regmap_update_bits(rpc->regmap, RPCIF_CMNCR, + RPCIF_CMNCR_MD, RPCIF_CMNCR_MD); + regmap_write(rpc->regmap, RPCIF_SMCMR, rpc->command); + regmap_write(rpc->regmap, RPCIF_SMOPR, rpc->option); + regmap_write(rpc->regmap, RPCIF_SMDMCR, rpc->dummy); + regmap_write(rpc->regmap, RPCIF_SMDRENR, rpc->ddr); + smenr = rpc->enable; + + switch (rpc->dir) { + case RPCIF_DATA_OUT: + while (pos < rpc->xferlen) { + u32 nbytes = rpc->xferlen - pos; + u32 data[2]; + + smcr = rpc->smcr | RPCIF_SMCR_SPIE; + if (nbytes > max) { + nbytes = max; + smcr |= RPCIF_SMCR_SSLKP; + } + + memcpy(data, rpc->buffer + pos, nbytes); + if (nbytes > 4) { + regmap_write(rpc->regmap, RPCIF_SMWDR1, + data[0]); + regmap_write(rpc->regmap, RPCIF_SMWDR0, + data[1]); + } else if (nbytes > 2) { + regmap_write(rpc->regmap, RPCIF_SMWDR0, + data[0]); + } else { + regmap_write(rpc->regmap, RPCIF_SMWDR0, + data[0] << 16); + } + + regmap_write(rpc->regmap, RPCIF_SMADR, + rpc->smadr + pos); + regmap_write(rpc->regmap, RPCIF_SMENR, smenr); + regmap_write(rpc->regmap, RPCIF_SMCR, smcr); + ret = wait_msg_xfer_end(rpc); + if (ret) + goto err_out; + + pos += nbytes; + smenr = rpc->enable & + ~RPCIF_SMENR_CDE & ~RPCIF_SMENR_ADE(0xF); + } + break; + case RPCIF_DATA_IN: + /* + * RPC-IF spoils the data for the commands without an address + * phase (like RDID) in the manual mode, so we'll have to work + * around this issue by using the external address space read + * mode instead. + */ + if (!(smenr & RPCIF_SMENR_ADE(0xF)) && rpc->dirmap) { + u32 dummy; + + regmap_update_bits(rpc->regmap, RPCIF_CMNCR, + RPCIF_CMNCR_MD, 0); + regmap_write(rpc->regmap, RPCIF_DRCR, + RPCIF_DRCR_RBURST(32) | RPCIF_DRCR_RBE); + regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command); + regmap_write(rpc->regmap, RPCIF_DREAR, + RPCIF_DREAR_EAC(1)); + regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option); + regmap_write(rpc->regmap, RPCIF_DRENR, + smenr & ~RPCIF_SMENR_SPIDE(0xF)); + regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy); + regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr); + memcpy_fromio(rpc->buffer, rpc->dirmap, rpc->xferlen); + regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF); + /* Dummy read according to spec */ + regmap_read(rpc->regmap, RPCIF_DRCR, &dummy); + break; + } + while (pos < rpc->xferlen) { + u32 nbytes = rpc->xferlen - pos; + u32 data[2]; + + if (nbytes > max) + nbytes = max; + + regmap_write(rpc->regmap, RPCIF_SMADR, + rpc->smadr + pos); + regmap_write(rpc->regmap, RPCIF_SMENR, smenr); + regmap_write(rpc->regmap, RPCIF_SMCR, + rpc->smcr | RPCIF_SMCR_SPIE); + ret = wait_msg_xfer_end(rpc); + if (ret) + goto err_out; + + if (nbytes > 4) { + regmap_read(rpc->regmap, RPCIF_SMRDR1, + &data[0]); + regmap_read(rpc->regmap, RPCIF_SMRDR0, + &data[1]); + } else if (nbytes > 2) { + regmap_read(rpc->regmap, RPCIF_SMRDR0, + &data[0]); + } else { + regmap_read(rpc->regmap, RPCIF_SMRDR0, + &data[0]); + data[0] >>= 16; + } + memcpy(rpc->buffer + pos, data, nbytes); + + pos += nbytes; + } + break; + default: + regmap_write(rpc->regmap, RPCIF_SMENR, rpc->enable); + regmap_write(rpc->regmap, RPCIF_SMCR, + rpc->smcr | RPCIF_SMCR_SPIE); + ret = wait_msg_xfer_end(rpc); + if (ret) + goto err_out; + } + +exit: + pm_runtime_put(rpc->dev); + return ret; + +err_out: + ret = reset_control_reset(rpc->rstc); + rpcif_hw_init(rpc, rpc->bus_size == 2); + goto exit; +} +EXPORT_SYMBOL(rpcif_manual_xfer); + +ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf) +{ + loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1); + size_t size = RPCIF_DIRMAP_SIZE - from; + + if (len > size) + len = size; + + pm_runtime_get_sync(rpc->dev); + + regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0); + regmap_write(rpc->regmap, RPCIF_DRCR, 0); + regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command); + regmap_write(rpc->regmap, RPCIF_DREAR, + RPCIF_DREAR_EAV(offs >> 25) | RPCIF_DREAR_EAC(1)); + regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option); + regmap_write(rpc->regmap, RPCIF_DRENR, + rpc->enable & ~RPCIF_SMENR_SPIDE(0xF)); + regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy); + regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr); + + memcpy_fromio(buf, rpc->dirmap + from, len); + + pm_runtime_put(rpc->dev); + + return len; +} +EXPORT_SYMBOL(rpcif_dirmap_read); + +static int rpcif_probe(struct platform_device *pdev) +{ + struct platform_device *vdev; + struct device_node *flash; + const char *name; + + flash = of_get_next_child(pdev->dev.of_node, NULL); + if (!flash) { + dev_warn(&pdev->dev, "no flash node found\n"); + return -ENODEV; + } + + if (of_device_is_compatible(flash, "jedec,spi-nor")) { + name = "rpc-if-spi"; + } else if (of_device_is_compatible(flash, "cfi-flash")) { + name = "rpc-if-hyperflash"; + } else { + dev_warn(&pdev->dev, "unknown flash type\n"); + return -ENODEV; + } + + vdev = platform_device_alloc(name, pdev->id); + if (!vdev) + return -ENOMEM; + vdev->dev.parent = &pdev->dev; + platform_set_drvdata(pdev, vdev); + return platform_device_add(vdev); +} + +static int rpcif_remove(struct platform_device *pdev) +{ + struct platform_device *vdev = platform_get_drvdata(pdev); + + platform_device_unregister(vdev); + + return 0; +} + +static const struct of_device_id rpcif_of_match[] = { + { .compatible = "renesas,rcar-gen3-rpc-if", }, + {}, +}; +MODULE_DEVICE_TABLE(of, rpcif_of_match); + +static struct platform_driver rpcif_driver = { + .probe = rpcif_probe, + .remove = rpcif_remove, + .driver = { + .name = "rpc-if", + .of_match_table = rpcif_of_match, + }, +}; +module_platform_driver(rpcif_driver); + +MODULE_DESCRIPTION("Renesas RPC-IF core driver"); +MODULE_LICENSE("GPL v2"); diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h new file mode 100644 index 000000000000..9ad136682c47 --- /dev/null +++ b/include/memory/renesas-rpc-if.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Renesas RPC-IF core driver + * + * Copyright (C) 2018~2019 Renesas Solutions Corp. + * Copyright (C) 2019 Macronix International Co., Ltd. + * Copyright (C) 2019-2020 Cogent Embedded, Inc. + */ + +#ifndef __RENESAS_RPC_IF_H +#define __RENESAS_RPC_IF_H + +#include + +enum rpcif_data_dir { + RPCIF_NO_DATA, + RPCIF_DATA_IN, + RPCIF_DATA_OUT, +}; + +struct rpcif_op { + struct { + u8 buswidth; + u8 opcode; + bool ddr; + } cmd, ocmd; + + struct { + u8 nbytes; + u8 buswidth; + bool ddr; + u64 val; + } addr; + + struct { + u8 ncycles; + u8 buswidth; + } dummy; + + struct { + u8 nbytes; + u8 buswidth; + bool ddr; + u32 val; + } option; + + struct { + u8 buswidth; + unsigned int nbytes; + enum rpcif_data_dir dir; + bool ddr; + union { + void *in; + const void *out; + } buf; + } data; +}; + +struct rpcif { + struct device *dev; + void __iomem *dirmap; + struct regmap *regmap; + struct reset_control *rstc; + size_t size; + enum rpcif_data_dir dir; + u8 bus_size; + void *buffer; + u32 xferlen; + u32 smcr; + u32 smadr; + u32 command; /* DRCMR or SMCMR */ + u32 option; /* DROPR or SMOPR */ + u32 enable; /* DRENR or SMENR */ + u32 dummy; /* DRDMCR or SMDMCR */ + u32 ddr; /* DRDRENR or SMDRENR */ +}; + +int rpcif_sw_init(struct rpcif *rpc, struct device *dev); +void rpcif_hw_init(struct rpcif *rpc, bool hyperflash); +void rpcif_enable_rpm(struct rpcif *rpc); +void rpcif_disable_rpm(struct rpcif *rpc); +void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, + size_t *len); +int rpcif_manual_xfer(struct rpcif *rpc); +ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf); + +#endif // __RENESAS_RPC_IF_H -- cgit v1.2.3 From d40f0b6f2e21f2400ae8b1b120d11877d9ffd8ec Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Mon, 29 Jun 2020 16:41:06 -0700 Subject: spi: Avoid setting the chip select if we don't need to On some SPI controllers (like spi-geni-qcom) setting the chip select is a heavy operation. For instance on spi-geni-qcom, with the current code, is was measured as taking upwards of 20 us. Even on SPI controllers that aren't as heavy, setting the chip select is at least something like a MMIO operation over some peripheral bus which isn't as fast as a RAM access. While it would be good to find ways to mitigate problems like this in the drivers for those SPI controllers, it can also be noted that the SPI framework could also help out. Specifically, in some situations, we can see the SPI framework calling the driver's set_cs() with the same parameter several times in a row. This is specifically observed when looking at the way the Chrome OS EC SPI driver (cros_ec_spi) works but other drivers likely trip it to some extent. Let's solve this by caching the chip select state in the core and only calling into the controller if there was a change. We check not only the "enable" state but also the chip select mode (active high or active low) since controllers may care about both the mode and the enable flag in their callback. Signed-off-by: Douglas Anderson Link: https://lore.kernel.org/r/20200629164103.1.Ied8e8ad8bbb2df7f947e3bc5ea1c315e041785a2@changeid Signed-off-by: Mark Brown --- drivers/spi/spi.c | 11 +++++++++++ include/linux/spi/spi.h | 4 ++++ 2 files changed, 15 insertions(+) (limited to 'include') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6fa56590bba2..d4ba723a30da 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -778,6 +778,17 @@ static void spi_set_cs(struct spi_device *spi, bool enable) { bool enable1 = enable; + /* + * Avoid calling into the driver (or doing delays) if the chip select + * isn't actually changing from the last time this was called. + */ + if ((spi->controller->last_cs_enable == enable) && + (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) + return; + + spi->controller->last_cs_enable = enable; + spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; + if (!spi->controller->set_cs_timing) { if (enable1) spi_delay_exec(&spi->controller->cs_setup, NULL); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b4917df79637..0e67a9a3a1d3 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -368,6 +368,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @cur_msg_prepared: spi_prepare_message was called for the currently * in-flight message * @cur_msg_mapped: message has been mapped for DMA + * @last_cs_enable: was enable true on the last call to set_cs. + * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. * @xfer_completion: used by core transfer_one_message() * @busy: message pump is busy * @running: message pump is running @@ -604,6 +606,8 @@ struct spi_controller { bool auto_runtime_pm; bool cur_msg_prepared; bool cur_msg_mapped; + bool last_cs_enable; + bool last_cs_mode_high; bool fallback; struct completion xfer_completion; size_t max_dma_len; -- cgit v1.2.3 From 2cef30d7bd8b8fbddeb74e3753c29d4248c094e0 Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Mon, 29 Jun 2020 16:13:27 +0300 Subject: xen: netif.h: add a new extra type for XDP The patch adds a new extra type to be able to diffirentiate between RX responses on xen-netfront side with the adjusted offset required for XDP processing. The offset value from a guest is passed via xenstore. Signed-off-by: Denis Kirjanov Signed-off-by: David S. Miller --- include/xen/interface/io/netif.h | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h index 4f20dbc42910..2194322c3c7f 100644 --- a/include/xen/interface/io/netif.h +++ b/include/xen/interface/io/netif.h @@ -160,6 +160,19 @@ * be applied if it is set. */ +/* + * "xdp-headroom" is used to request that extra space is added + * for XDP processing. The value is measured in bytes and passed by + * the frontend to be consistent between both ends. + * If the value is greater than zero that means that + * an RX response is going to be passed to an XDP program for processing. + * XEN_NETIF_MAX_XDP_HEADROOM defines the maximum headroom offset in bytes + * + * "feature-xdp-headroom" is set to "1" by the netback side like other features + * so a guest can check if an XDP program can be processed. + */ +#define XEN_NETIF_MAX_XDP_HEADROOM 0x7FFF + /* * Control ring * ============ @@ -846,7 +859,8 @@ struct xen_netif_tx_request { #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */ -#define XEN_NETIF_EXTRA_TYPE_MAX (5) +#define XEN_NETIF_EXTRA_TYPE_XDP (5) /* u.xdp */ +#define XEN_NETIF_EXTRA_TYPE_MAX (6) /* xen_netif_extra_info_t flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) @@ -879,6 +893,10 @@ struct xen_netif_extra_info { uint8_t algorithm; uint8_t value[4]; } hash; + struct { + uint16_t headroom; + uint16_t pad[2]; + } xdp; uint16_t pad[3]; } u; }; -- cgit v1.2.3 From a3b658cfb66497525278cbf852913a04dbaae992 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Tue, 30 Jun 2020 14:49:41 -0400 Subject: bonding: allow xfrm offload setup post-module-load At the moment, bonding xfrm crypto offload can only be set up if the bonding module is loaded with active-backup mode already set. We need to be able to make this work with bonds set to AB after the bonding driver has already been loaded. So what's done here is: 1) move #define BOND_XFRM_FEATURES to net/bonding.h so it can be used by both bond_main.c and bond_options.c 2) set BOND_XFRM_FEATURES in bond_dev->hw_features universally, rather than only when loading in AB mode 3) wire up xfrmdev_ops universally too 4) disable BOND_XFRM_FEATURES in bond_dev->features if not AB 5) exit early (non-AB case) from bond_ipsec_offload_ok, to prevent a performance hit from traversing into the underlying drivers 5) toggle BOND_XFRM_FEATURES in bond_dev->wanted_features and call netdev_change_features() from bond_option_mode_set() In my local testing, I can change bonding modes back and forth on the fly, have hardware offload work when I'm in AB, and see no performance penalty to non-AB software encryption, despite having xfrm bits all wired up for all modes now. Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Reported-by: Huy Nguyen CC: Saeed Mahameed CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 19 ++++++++++--------- drivers/net/bonding/bond_options.c | 8 ++++++++ include/net/bonding.h | 5 +++++ 3 files changed, 23 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b3479584cc16..2adf6ce20a38 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -434,6 +434,9 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); struct net_device *slave_dev = curr_active->dev; + if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) + return true; + if (!(slave_dev->xfrmdev_ops && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) { slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); @@ -1218,11 +1221,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev, #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) -#ifdef CONFIG_XFRM_OFFLOAD -#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ - NETIF_F_GSO_ESP) -#endif /* CONFIG_XFRM_OFFLOAD */ - #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_ALL_TSO) @@ -4654,8 +4652,7 @@ void bond_setup(struct net_device *bond_dev) #ifdef CONFIG_XFRM_OFFLOAD /* set up xfrm device ops (only supported in active-backup right now) */ - if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) - bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; + bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; bond->xs = NULL; #endif /* CONFIG_XFRM_OFFLOAD */ @@ -4678,11 +4675,15 @@ void bond_setup(struct net_device *bond_dev) bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; #ifdef CONFIG_XFRM_OFFLOAD - if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) - bond_dev->hw_features |= BOND_XFRM_FEATURES; + bond_dev->hw_features |= BOND_XFRM_FEATURES; #endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->features |= bond_dev->hw_features; bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; +#ifdef CONFIG_XFRM_OFFLOAD + /* Disable XFRM features if this isn't an active-backup config */ + if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) + bond_dev->features &= ~BOND_XFRM_FEATURES; +#endif /* CONFIG_XFRM_OFFLOAD */ } /* Destroy a bonding device. diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index ddb3916d3506..9abfaae1c6f7 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -767,6 +767,14 @@ static int bond_option_mode_set(struct bonding *bond, if (newval->value == BOND_MODE_ALB) bond->params.tlb_dynamic_lb = 1; +#ifdef CONFIG_XFRM_OFFLOAD + if (newval->value == BOND_MODE_ACTIVEBACKUP) + bond->dev->wanted_features |= BOND_XFRM_FEATURES; + else + bond->dev->wanted_features &= ~BOND_XFRM_FEATURES; + netdev_change_features(bond->dev); +#endif /* CONFIG_XFRM_OFFLOAD */ + /* don't cache arp_validate between modes */ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; bond->params.mode = newval->value; diff --git a/include/net/bonding.h b/include/net/bonding.h index a00e1764e9b1..7d132cc1e584 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -86,6 +86,11 @@ #define bond_for_each_slave_rcu(bond, pos, iter) \ netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) +#ifdef CONFIG_XFRM_OFFLOAD +#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) +#endif /* CONFIG_XFRM_OFFLOAD */ + #ifdef CONFIG_NET_POLL_CONTROLLER extern atomic_t netpoll_block_tx; -- cgit v1.2.3 From 62e9dd177732843ae6c5b9d2ed61e7c9538fa276 Mon Sep 17 00:00:00 2001 From: Shyam Sundar Date: Tue, 30 Jun 2020 03:22:28 -0700 Subject: scsi: qla2xxx: Change in PUREX to handle FPIN ELS requests SAN Congestion Management generates ELS pkts whose size can vary and be > 64 bytes. Change the PUREX handling code to support non-standard ELS pkt size. Link: https://lore.kernel.org/r/20200630102229.29660-2-njavali@marvell.com Reviewed-by: Himanshu Madhani Signed-off-by: Shyam Sundar Signed-off-by: Arun Easi Signed-off-by: Nilesh Javali Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 15 +++++- drivers/scsi/qla2xxx/qla_gbl.h | 3 +- drivers/scsi/qla2xxx/qla_isr.c | 116 ++++++++++++++++++++++++++++++----------- drivers/scsi/qla2xxx/qla_mbx.c | 22 ++++++-- drivers/scsi/qla2xxx/qla_os.c | 19 +++++-- include/uapi/scsi/fc/fc_els.h | 2 + 6 files changed, 134 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 42dbf90d4651..9a0f2314fe7b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -34,6 +34,8 @@ #include #include +#include + /* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */ typedef struct { uint8_t domain; @@ -1304,7 +1306,6 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) #define RNID_TYPE_ASIC_TEMP 0xC #define ELS_CMD_MAP_SIZE 32 -#define ELS_COMMAND_RDP 0x18 /* * Firmware state codes from get firmware state mailbox command @@ -4522,10 +4523,19 @@ struct active_regions { #define QLA_SET_DATA_RATE_NOLR 1 #define QLA_SET_DATA_RATE_LR 2 /* Set speed and initiate LR */ +#define QLA_DEFAULT_PAYLOAD_SIZE 64 +/* + * This item might be allocated with a size > sizeof(struct purex_item). + * The "size" variable gives the size of the payload (which + * is variable) starting at "iocb". + */ struct purex_item { struct list_head list; struct scsi_qla_host *vha; - void (*process_item)(struct scsi_qla_host *vha, void *pkt); + void (*process_item)(struct scsi_qla_host *vha, + struct purex_item *pkt); + atomic_t in_use; + uint16_t size; struct { uint8_t iocb[64]; } iocb; @@ -4725,6 +4735,7 @@ typedef struct scsi_qla_host { struct list_head head; spinlock_t lock; } purex_list; + struct purex_item default_item; struct name_list_extended gnl; /* Count of active session/fcport */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 061f91b521b3..9cf33d05e3f8 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -229,7 +229,8 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_post_relogin_work(struct scsi_qla_host *vha); void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); -void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt); +void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, + struct purex_item *pkt); /* * Global Functions in qla_mid.c source file. diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index cf0800546740..3bbfff20e3a6 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -31,35 +31,11 @@ const char *const port_state_str[] = { "ONLINE" }; -static void qla24xx_purex_iocb(scsi_qla_host_t *vha, void *pkt, - void (*process_item)(struct scsi_qla_host *vha, void *pkt)) -{ - struct purex_list *list = &vha->purex_list; - struct purex_item *item; - ulong flags; - - item = kzalloc(sizeof(*item), GFP_KERNEL); - if (!item) { - ql_log(ql_log_warn, vha, 0x5092, - ">> Failed allocate purex list item.\n"); - return; - } - - item->vha = vha; - item->process_item = process_item; - memcpy(&item->iocb, pkt, sizeof(item->iocb)); - - spin_lock_irqsave(&list->lock, flags); - list_add_tail(&item->list, &list->head); - spin_unlock_irqrestore(&list->lock, flags); - - set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); -} - static void -qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt) +qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) { - struct abts_entry_24xx *abts = pkt; + struct abts_entry_24xx *abts = + (struct abts_entry_24xx *)&pkt->iocb; struct qla_hw_data *ha = vha->hw; struct els_entry_24xx *rsp_els; struct abts_entry_24xx *abts_rsp; @@ -789,6 +765,74 @@ qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) } } +struct purex_item * +qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) +{ + struct purex_item *item = NULL; + uint8_t item_hdr_size = sizeof(*item); + + if (size > QLA_DEFAULT_PAYLOAD_SIZE) { + item = kzalloc(item_hdr_size + + (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); + } else { + if (atomic_inc_return(&vha->default_item.in_use) == 1) { + item = &vha->default_item; + goto initialize_purex_header; + } else { + item = kzalloc(item_hdr_size, GFP_ATOMIC); + } + } + if (!item) { + ql_log(ql_log_warn, vha, 0x5092, + ">> Failed allocate purex list item.\n"); + + return NULL; + } + +initialize_purex_header: + item->vha = vha; + item->size = size; + return item; +} + +static void +qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, + void (*process_item)(struct scsi_qla_host *vha, + struct purex_item *pkt)) +{ + struct purex_list *list = &vha->purex_list; + ulong flags; + + pkt->process_item = process_item; + + spin_lock_irqsave(&list->lock, flags); + list_add_tail(&pkt->list, &list->head); + spin_unlock_irqrestore(&list->lock, flags); + + set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); +} + +/** + * qla24xx_copy_std_pkt() - Copy over purex ELS which is + * contained in a single IOCB. + * purex packet. + * @vha: SCSI driver HA context + * @pkt: ELS packet + */ +struct purex_item +*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) +{ + struct purex_item *item; + + item = qla24xx_alloc_purex_item(vha, + QLA_DEFAULT_PAYLOAD_SIZE); + if (!item) + return item; + + memcpy(&item->iocb, pkt, sizeof(item->iocb)); + return item; +} + /** * qla2x00_async_event() - Process aynchronous events. * @vha: SCSI driver HA context @@ -3229,6 +3273,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, { struct sts_entry_24xx *pkt; struct qla_hw_data *ha = vha->hw; + struct purex_item *pure_item; if (!ha->flags.fw_started) return; @@ -3280,8 +3325,12 @@ process_err: break; case ABTS_RECV_24XX: if (qla_ini_mode_enabled(vha)) { - qla24xx_purex_iocb(vha, pkt, - qla24xx_process_abts); + pure_item = qla24xx_copy_std_pkt(vha, pkt); + if (!pure_item) + break; + + qla24xx_queue_purex_item(vha, pure_item, + qla24xx_process_abts); break; } if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || @@ -3332,13 +3381,18 @@ process_err: { struct purex_entry_24xx *purex = (void *)pkt; - if (purex->els_frame_payload[3] != ELS_COMMAND_RDP) { + if (purex->els_frame_payload[3] != ELS_RDP) { ql_dbg(ql_dbg_init, vha, 0x5091, "Discarding ELS Request opcode %#x...\n", purex->els_frame_payload[3]); break; } - qla24xx_purex_iocb(vha, pkt, qla24xx_process_purex_rdp); + pure_item = qla24xx_copy_std_pkt(vha, pkt); + if (!pure_item) + break; + + qla24xx_queue_purex_item(vha, pure_item, + qla24xx_process_purex_rdp); break; } default: diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index df31ee0d59b2..b29d3831ea73 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -59,6 +59,7 @@ static struct rom_cmd { { MBC_IOCB_COMMAND_A64 }, { MBC_GET_ADAPTER_LOOP_ID }, { MBC_READ_SFP }, + { MBC_SET_RNID_PARAMS }, { MBC_GET_RNID_PARAMS }, { MBC_GET_SET_ZIO_THRESHOLD }, }; @@ -4866,6 +4867,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, return rval; } +#define PUREX_CMD_COUNT 2 int qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) { @@ -4874,12 +4876,12 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; uint8_t *els_cmd_map; dma_addr_t els_cmd_map_dma; - uint cmd_opcode = ELS_COMMAND_RDP; - uint index = cmd_opcode / 8; - uint bit = cmd_opcode % 8; + uint8_t cmd_opcode[PUREX_CMD_COUNT]; + uint8_t i, index, purex_bit; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_SUCCESS; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, @@ -4893,7 +4895,17 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) return QLA_MEMORY_ALLOC_FAILED; } - els_cmd_map[index] |= 1 << bit; + memset(els_cmd_map, 0, ELS_CMD_MAP_SIZE); + + /* List of Purex ELS */ + cmd_opcode[0] = ELS_FPIN; + cmd_opcode[1] = ELS_RDP; + + for (i = 0; i < PUREX_CMD_COUNT; i++) { + index = cmd_opcode[i] / 8; + purex_bit = cmd_opcode[i] % 8; + els_cmd_map[index] |= 1 << purex_bit; + } mcp->mb[0] = MBC_SET_RNID_PARAMS; mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e92fad99338c..80ce22cfc1b9 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -5893,10 +5893,12 @@ qla25xx_rdp_port_speed_currently(struct qla_hw_data *ha) * vha: SCSI qla host * purex: RDP request received by HBA */ -void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt) +void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, + struct purex_item *item) { struct qla_hw_data *ha = vha->hw; - struct purex_entry_24xx *purex = pkt; + struct purex_entry_24xx *purex = + (struct purex_entry_24xx *)&item->iocb; dma_addr_t rsp_els_dma; dma_addr_t rsp_payload_dma; dma_addr_t stat_dma; @@ -6306,6 +6308,15 @@ dealloc: rsp_els, rsp_els_dma); } +void +qla24xx_free_purex_item(struct purex_item *item) +{ + if (item == &item->vha->default_item) + memset(&item->vha->default_item, 0, sizeof(struct purex_item)); + else + kfree(item); +} + void qla24xx_process_purex_list(struct purex_list *list) { struct list_head head = LIST_HEAD_INIT(head); @@ -6318,8 +6329,8 @@ void qla24xx_process_purex_list(struct purex_list *list) list_for_each_entry_safe(item, next, &head, list) { list_del(&item->list); - item->process_item(item->vha, &item->iocb); - kfree(item); + item->process_item(item->vha, item); + qla24xx_free_purex_item(item); } } diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h index 66318c44acd7..8c704e510e39 100644 --- a/include/uapi/scsi/fc/fc_els.h +++ b/include/uapi/scsi/fc/fc_els.h @@ -41,6 +41,7 @@ enum fc_els_cmd { ELS_REC = 0x13, /* read exchange concise */ ELS_SRR = 0x14, /* sequence retransmission request */ ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */ + ELS_RDP = 0x18, /* Read Diagnostic Parameters */ ELS_RDF = 0x19, /* Register Diagnostic Functions */ ELS_PRLI = 0x20, /* process login */ ELS_PRLO = 0x21, /* process logout */ @@ -110,6 +111,7 @@ enum fc_els_cmd { [ELS_REC] = "REC", \ [ELS_SRR] = "SRR", \ [ELS_FPIN] = "FPIN", \ + [ELS_RDP] = "RDP", \ [ELS_RDF] = "RDF", \ [ELS_PRLI] = "PRLI", \ [ELS_PRLO] = "PRLO", \ -- cgit v1.2.3 From 10dd8573b09e84b81539d939d55ebdb6a36c5f3a Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Mon, 29 Jun 2020 13:54:59 +0530 Subject: cpufreq: Register governors at core_initcall Currently, most CPUFreq governors are registered at the core_initcall time when the given governor is the default one, and the module_init time otherwise. In preparation for letting users specify the default governor on the kernel command line, change all of them to be registered at the core_initcall unconditionally, as it is already the case for the schedutil and performance governors. This will allow us to assume that builtin governors have been registered before the built-in CPUFreq drivers probe. And since all governors have similar init/exit patterns now, introduce two new macros, cpufreq_governor_{init,exit}(), to factorize the code. Acked-by: Viresh Kumar Signed-off-by: Quentin Perret Signed-off-by: Viresh Kumar [ rjw: Changelog ] Signed-off-by: Rafael J. Wysocki --- arch/powerpc/platforms/cell/cpufreq_spudemand.c | 26 ++----------------------- drivers/cpufreq/cpufreq_conservative.c | 22 +++++---------------- drivers/cpufreq/cpufreq_ondemand.c | 24 ++++++----------------- drivers/cpufreq/cpufreq_performance.c | 14 ++----------- drivers/cpufreq/cpufreq_powersave.c | 18 +++-------------- drivers/cpufreq/cpufreq_userspace.c | 18 +++-------------- include/linux/cpufreq.h | 14 +++++++++++++ kernel/sched/cpufreq_schedutil.c | 6 +----- 8 files changed, 36 insertions(+), 106 deletions(-) (limited to 'include') diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c index 55b31eadb3c8..ca7849e113d7 100644 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c @@ -126,30 +126,8 @@ static struct cpufreq_governor spu_governor = { .stop = spu_gov_stop, .owner = THIS_MODULE, }; - -/* - * module init and destoy - */ - -static int __init spu_gov_init(void) -{ - int ret; - - ret = cpufreq_register_governor(&spu_governor); - if (ret) - printk(KERN_ERR "registration of governor failed\n"); - return ret; -} - -static void __exit spu_gov_exit(void) -{ - cpufreq_unregister_governor(&spu_governor); -} - - -module_init(spu_gov_init); -module_exit(spu_gov_exit); +cpufreq_governor_init(spu_governor); +cpufreq_governor_exit(spu_governor); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Krafft "); - diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 737ff3b9c2c0..aa39ff31ec9f 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -322,17 +322,7 @@ static struct dbs_governor cs_governor = { .start = cs_start, }; -#define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov) - -static int __init cpufreq_gov_dbs_init(void) -{ - return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE); -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE); -} +#define CPU_FREQ_GOV_CONSERVATIVE (cs_governor.gov) MODULE_AUTHOR("Alexander Clouter "); MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " @@ -343,11 +333,9 @@ MODULE_LICENSE("GPL"); #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE struct cpufreq_governor *cpufreq_default_governor(void) { - return CPU_FREQ_GOV_CONSERVATIVE; + return &CPU_FREQ_GOV_CONSERVATIVE; } - -core_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); #endif -module_exit(cpufreq_gov_dbs_exit); + +cpufreq_governor_init(CPU_FREQ_GOV_CONSERVATIVE); +cpufreq_governor_exit(CPU_FREQ_GOV_CONSERVATIVE); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 82a4d37ddecb..ac361a8b1d3b 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -408,7 +408,7 @@ static struct dbs_governor od_dbs_gov = { .start = od_start, }; -#define CPU_FREQ_GOV_ONDEMAND (&od_dbs_gov.gov) +#define CPU_FREQ_GOV_ONDEMAND (od_dbs_gov.gov) static void od_set_powersave_bias(unsigned int powersave_bias) { @@ -429,7 +429,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias) continue; policy = cpufreq_cpu_get_raw(cpu); - if (!policy || policy->governor != CPU_FREQ_GOV_ONDEMAND) + if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND) continue; policy_dbs = policy->governor_data; @@ -461,16 +461,6 @@ void od_unregister_powersave_bias_handler(void) } EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler); -static int __init cpufreq_gov_dbs_init(void) -{ - return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND); -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND); -} - MODULE_AUTHOR("Venkatesh Pallipadi "); MODULE_AUTHOR("Alexey Starikovskiy "); MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " @@ -480,11 +470,9 @@ MODULE_LICENSE("GPL"); #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND struct cpufreq_governor *cpufreq_default_governor(void) { - return CPU_FREQ_GOV_ONDEMAND; + return &CPU_FREQ_GOV_ONDEMAND; } - -core_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); #endif -module_exit(cpufreq_gov_dbs_exit); + +cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND); +cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND); diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index def9afe0f5b8..71c1d9aba772 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c @@ -23,16 +23,6 @@ static struct cpufreq_governor cpufreq_gov_performance = { .limits = cpufreq_gov_performance_limits, }; -static int __init cpufreq_gov_performance_init(void) -{ - return cpufreq_register_governor(&cpufreq_gov_performance); -} - -static void __exit cpufreq_gov_performance_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_performance); -} - #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE struct cpufreq_governor *cpufreq_default_governor(void) { @@ -50,5 +40,5 @@ MODULE_AUTHOR("Dominik Brodowski "); MODULE_DESCRIPTION("CPUfreq policy governor 'performance'"); MODULE_LICENSE("GPL"); -core_initcall(cpufreq_gov_performance_init); -module_exit(cpufreq_gov_performance_exit); +cpufreq_governor_init(cpufreq_gov_performance); +cpufreq_governor_exit(cpufreq_gov_performance); diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index 1ae66019eb83..7749522355b5 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c @@ -23,16 +23,6 @@ static struct cpufreq_governor cpufreq_gov_powersave = { .owner = THIS_MODULE, }; -static int __init cpufreq_gov_powersave_init(void) -{ - return cpufreq_register_governor(&cpufreq_gov_powersave); -} - -static void __exit cpufreq_gov_powersave_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_powersave); -} - MODULE_AUTHOR("Dominik Brodowski "); MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'"); MODULE_LICENSE("GPL"); @@ -42,9 +32,7 @@ struct cpufreq_governor *cpufreq_default_governor(void) { return &cpufreq_gov_powersave; } - -core_initcall(cpufreq_gov_powersave_init); -#else -module_init(cpufreq_gov_powersave_init); #endif -module_exit(cpufreq_gov_powersave_exit); + +cpufreq_governor_init(cpufreq_gov_powersave); +cpufreq_governor_exit(cpufreq_gov_powersave); diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index b43e7cd502c5..50a4d7846580 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -126,16 +126,6 @@ static struct cpufreq_governor cpufreq_gov_userspace = { .owner = THIS_MODULE, }; -static int __init cpufreq_gov_userspace_init(void) -{ - return cpufreq_register_governor(&cpufreq_gov_userspace); -} - -static void __exit cpufreq_gov_userspace_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_userspace); -} - MODULE_AUTHOR("Dominik Brodowski , " "Russell King "); MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'"); @@ -146,9 +136,7 @@ struct cpufreq_governor *cpufreq_default_governor(void) { return &cpufreq_gov_userspace; } - -core_initcall(cpufreq_gov_userspace_init); -#else -module_init(cpufreq_gov_userspace_init); #endif -module_exit(cpufreq_gov_userspace_exit); + +cpufreq_governor_init(cpufreq_gov_userspace); +cpufreq_governor_exit(cpufreq_gov_userspace); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 3494f6763597..e62b022cb07e 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -577,6 +577,20 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); +#define cpufreq_governor_init(__governor) \ +static int __init __governor##_init(void) \ +{ \ + return cpufreq_register_governor(&__governor); \ +} \ +core_initcall(__governor##_init) + +#define cpufreq_governor_exit(__governor) \ +static void __exit __governor##_exit(void) \ +{ \ + return cpufreq_unregister_governor(&__governor); \ +} \ +module_exit(__governor##_exit) + struct cpufreq_governor *cpufreq_default_governor(void); struct cpufreq_governor *cpufreq_fallback_governor(void); diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 7fbaee24c824..402a09af9f43 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -909,11 +909,7 @@ struct cpufreq_governor *cpufreq_default_governor(void) } #endif -static int __init sugov_register(void) -{ - return cpufreq_register_governor(&schedutil_gov); -} -core_initcall(sugov_register); +cpufreq_governor_init(schedutil_gov); #ifdef CONFIG_ENERGY_MODEL extern bool sched_energy_update; -- cgit v1.2.3 From 67dd0772396905a108c37202d4a5949ab69131a0 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 08:50:05 +0200 Subject: device: remove 'extern' attribute from function prototypes in device.h Functions are declared 'extern' implicitly by the compiler. There's no reason to prepend every prototype with it. Remove the 'extern' keyword from all function declarations in linux/device.h. Signed-off-by: Bartosz Golaszewski Link: https://lore.kernel.org/r/20200629065008.27620-4-brgl@bgdev.pl Signed-off-by: Greg Kroah-Hartman --- include/linux/device.h | 223 ++++++++++++++++++++++++------------------------- 1 file changed, 107 insertions(+), 116 deletions(-) (limited to 'include') diff --git a/include/linux/device.h b/include/linux/device.h index d7c2570368fa..9a62f7f43d55 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -149,68 +149,66 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, struct device_attribute dev_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) -extern int device_create_file(struct device *device, - const struct device_attribute *entry); -extern void device_remove_file(struct device *dev, - const struct device_attribute *attr); -extern bool device_remove_file_self(struct device *dev, - const struct device_attribute *attr); -extern int __must_check device_create_bin_file(struct device *dev, +int device_create_file(struct device *device, + const struct device_attribute *entry); +void device_remove_file(struct device *dev, + const struct device_attribute *attr); +bool device_remove_file_self(struct device *dev, + const struct device_attribute *attr); +int __must_check device_create_bin_file(struct device *dev, const struct bin_attribute *attr); -extern void device_remove_bin_file(struct device *dev, - const struct bin_attribute *attr); +void device_remove_bin_file(struct device *dev, + const struct bin_attribute *attr); /* device resource management */ typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); #ifdef CONFIG_DEBUG_DEVRES -extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid, const char *name) __malloc; +void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid, const char *name) __malloc; #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) #else -extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid) __malloc; +void *devres_alloc_node(dr_release_t release, size_t size, + gfp_t gfp, int nid) __malloc; static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) { return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); } #endif -extern void devres_for_each_res(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data, - void (*fn)(struct device *, void *, void *), - void *data); -extern void devres_free(void *res); -extern void devres_add(struct device *dev, void *res); -extern void *devres_find(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -extern void *devres_get(struct device *dev, void *new_res, - dr_match_t match, void *match_data); -extern void *devres_remove(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -extern int devres_destroy(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -extern int devres_release(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); +void devres_for_each_res(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data, + void (*fn)(struct device *, void *, void *), + void *data); +void devres_free(void *res); +void devres_add(struct device *dev, void *res); +void *devres_find(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +void *devres_get(struct device *dev, void *new_res, + dr_match_t match, void *match_data); +void *devres_remove(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +int devres_destroy(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +int devres_release(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); /* devres group */ -extern void * __must_check devres_open_group(struct device *dev, void *id, - gfp_t gfp); -extern void devres_close_group(struct device *dev, void *id); -extern void devres_remove_group(struct device *dev, void *id); -extern int devres_release_group(struct device *dev, void *id); +void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp); +void devres_close_group(struct device *dev, void *id); +void devres_remove_group(struct device *dev, void *id); +int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ -extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; -extern __printf(3, 0) -char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, - va_list ap) __malloc; -extern __printf(3, 4) -char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; +void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; +__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, + const char *fmt, va_list ap) __malloc; +__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, + const char *fmt, ...) __malloc; static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); @@ -230,16 +228,14 @@ static inline void *devm_kcalloc(struct device *dev, { return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } -extern void devm_kfree(struct device *dev, const void *p); -extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; -extern const char *devm_kstrdup_const(struct device *dev, - const char *s, gfp_t gfp); -extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, - gfp_t gfp); +void devm_kfree(struct device *dev, const void *p); +char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; +const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); +void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); -extern unsigned long devm_get_free_pages(struct device *dev, - gfp_t gfp_mask, unsigned int order); -extern void devm_free_pages(struct device *dev, unsigned long addr); +unsigned long devm_get_free_pages(struct device *dev, + gfp_t gfp_mask, unsigned int order); +void devm_free_pages(struct device *dev, unsigned long addr); void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res); @@ -655,8 +651,7 @@ static inline const char *dev_name(const struct device *dev) return kobject_name(&dev->kobj); } -extern __printf(2, 3) -int dev_set_name(struct device *dev, const char *name, ...); +__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...); #ifdef CONFIG_NUMA static inline int dev_to_node(struct device *dev) @@ -813,39 +808,38 @@ static inline bool dev_has_sync_state(struct device *dev) /* * High level routines for use by the bus drivers */ -extern int __must_check device_register(struct device *dev); -extern void device_unregister(struct device *dev); -extern void device_initialize(struct device *dev); -extern int __must_check device_add(struct device *dev); -extern void device_del(struct device *dev); -extern int device_for_each_child(struct device *dev, void *data, - int (*fn)(struct device *dev, void *data)); -extern int device_for_each_child_reverse(struct device *dev, void *data, - int (*fn)(struct device *dev, void *data)); -extern struct device *device_find_child(struct device *dev, void *data, - int (*match)(struct device *dev, void *data)); -extern struct device *device_find_child_by_name(struct device *parent, - const char *name); -extern int device_rename(struct device *dev, const char *new_name); -extern int device_move(struct device *dev, struct device *new_parent, - enum dpm_order dpm_order); -extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); -extern const char *device_get_devnode(struct device *dev, - umode_t *mode, kuid_t *uid, kgid_t *gid, - const char **tmp); +int __must_check device_register(struct device *dev); +void device_unregister(struct device *dev); +void device_initialize(struct device *dev); +int __must_check device_add(struct device *dev); +void device_del(struct device *dev); +int device_for_each_child(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +int device_for_each_child_reverse(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +struct device *device_find_child(struct device *dev, void *data, + int (*match)(struct device *dev, void *data)); +struct device *device_find_child_by_name(struct device *parent, + const char *name); +int device_rename(struct device *dev, const char *new_name); +int device_move(struct device *dev, struct device *new_parent, + enum dpm_order dpm_order); +int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); +const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, + kgid_t *gid, const char **tmp); static inline bool device_supports_offline(struct device *dev) { return dev->bus && dev->bus->offline && dev->bus->online; } -extern void lock_device_hotplug(void); -extern void unlock_device_hotplug(void); -extern int lock_device_hotplug_sysfs(void); -extern int device_offline(struct device *dev); -extern int device_online(struct device *dev); -extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); -extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +void lock_device_hotplug(void); +void unlock_device_hotplug(void); +int lock_device_hotplug_sysfs(void); +int device_offline(struct device *dev); +int device_online(struct device *dev); +void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); static inline int dev_num_vf(struct device *dev) @@ -858,14 +852,13 @@ static inline int dev_num_vf(struct device *dev) /* * Root device objects for grouping under /sys/devices */ -extern struct device *__root_device_register(const char *name, - struct module *owner); +struct device *__root_device_register(const char *name, struct module *owner); /* This is a macro to avoid include problems with THIS_MODULE */ #define root_device_register(name) \ __root_device_register(name, THIS_MODULE) -extern void root_device_unregister(struct device *root); +void root_device_unregister(struct device *root); static inline void *dev_get_platdata(const struct device *dev) { @@ -876,33 +869,31 @@ static inline void *dev_get_platdata(const struct device *dev) * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ -extern int __must_check device_bind_driver(struct device *dev); -extern void device_release_driver(struct device *dev); -extern int __must_check device_attach(struct device *dev); -extern int __must_check driver_attach(struct device_driver *drv); -extern void device_initial_probe(struct device *dev); -extern int __must_check device_reprobe(struct device *dev); +int __must_check device_bind_driver(struct device *dev); +void device_release_driver(struct device *dev); +int __must_check device_attach(struct device *dev); +int __must_check driver_attach(struct device_driver *drv); +void device_initial_probe(struct device *dev); +int __must_check device_reprobe(struct device *dev); -extern bool device_is_bound(struct device *dev); +bool device_is_bound(struct device *dev); /* * Easy functions for dynamically creating devices on the fly */ -extern __printf(5, 6) -struct device *device_create(struct class *cls, struct device *parent, - dev_t devt, void *drvdata, - const char *fmt, ...); -extern __printf(6, 7) -struct device *device_create_with_groups(struct class *cls, - struct device *parent, dev_t devt, void *drvdata, - const struct attribute_group **groups, - const char *fmt, ...); -extern void device_destroy(struct class *cls, dev_t devt); - -extern int __must_check device_add_groups(struct device *dev, - const struct attribute_group **groups); -extern void device_remove_groups(struct device *dev, - const struct attribute_group **groups); +__printf(5, 6) struct device * +device_create(struct class *cls, struct device *parent, dev_t devt, + void *drvdata, const char *fmt, ...); +__printf(6, 7) struct device * +device_create_with_groups(struct class *cls, struct device *parent, dev_t devt, + void *drvdata, const struct attribute_group **groups, + const char *fmt, ...); +void device_destroy(struct class *cls, dev_t devt); + +int __must_check device_add_groups(struct device *dev, + const struct attribute_group **groups); +void device_remove_groups(struct device *dev, + const struct attribute_group **groups); static inline int __must_check device_add_group(struct device *dev, const struct attribute_group *grp) @@ -920,14 +911,14 @@ static inline void device_remove_group(struct device *dev, return device_remove_groups(dev, groups); } -extern int __must_check devm_device_add_groups(struct device *dev, +int __must_check devm_device_add_groups(struct device *dev, const struct attribute_group **groups); -extern void devm_device_remove_groups(struct device *dev, - const struct attribute_group **groups); -extern int __must_check devm_device_add_group(struct device *dev, - const struct attribute_group *grp); -extern void devm_device_remove_group(struct device *dev, - const struct attribute_group *grp); +void devm_device_remove_groups(struct device *dev, + const struct attribute_group **groups); +int __must_check devm_device_add_group(struct device *dev, + const struct attribute_group *grp); +void devm_device_remove_group(struct device *dev, + const struct attribute_group *grp); /* * Platform "fixup" functions - allow the platform to have their say @@ -944,21 +935,21 @@ extern int (*platform_notify_remove)(struct device *dev); * get_device - atomically increment the reference count for the device. * */ -extern struct device *get_device(struct device *dev); -extern void put_device(struct device *dev); -extern bool kill_device(struct device *dev); +struct device *get_device(struct device *dev); +void put_device(struct device *dev); +bool kill_device(struct device *dev); #ifdef CONFIG_DEVTMPFS -extern int devtmpfs_mount(void); +int devtmpfs_mount(void); #else static inline int devtmpfs_mount(void) { return 0; } #endif /* drivers/base/power/shutdown.c */ -extern void device_shutdown(void); +void device_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ -extern const char *dev_driver_string(const struct device *dev); +const char *dev_driver_string(const struct device *dev); /* Device links interface. */ struct device_link *device_link_add(struct device *consumer, -- cgit v1.2.3 From 36847f9e3e56c192ef95e7669df38189443530a0 Mon Sep 17 00:00:00 2001 From: Richard Gong Date: Mon, 15 Jun 2020 09:29:05 -0500 Subject: firmware: stratix10-svc: correct reconfig flag and timeout values Correct the incorrect flag value for COMMAND_RECONFIG_FLAG_PARTIAL and increase FPGA reconfig timeout values so that Intel service layer and FPGA manager drivers can work with all versions of firmware. Signed-off-by: Richard Gong Link: https://lore.kernel.org/r/1592231348-31334-2-git-send-email-richard.gong@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- include/linux/firmware/intel/stratix10-svc-client.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index 64213c3e82f5..040bc3f3bedd 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -54,18 +54,17 @@ * Flag bit for COMMAND_RECONFIG * * COMMAND_RECONFIG_FLAG_PARTIAL: - * Set to FPGA configuration type (full or partial), the default - * is full reconfig. + * Set to FPGA configuration type (full or partial). */ -#define COMMAND_RECONFIG_FLAG_PARTIAL 0 +#define COMMAND_RECONFIG_FLAG_PARTIAL 1 /** * Timeout settings for service clients: * timeout value used in Stratix10 FPGA manager driver. * timeout value used in RSU driver */ -#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 100 -#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 240 +#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300 +#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720 #define SVC_RSU_REQUEST_TIMEOUT_MS 300 struct stratix10_svc_chan; -- cgit v1.2.3 From bf0e5bf68a207b14727caf13da576339590a9504 Mon Sep 17 00:00:00 2001 From: Richard Gong Date: Mon, 15 Jun 2020 09:29:06 -0500 Subject: firmware: stratix10-svc: extend svc to support new RSU features Extend Intel Stratix10 service layer driver to support new RSU DCMF versions and max retry parameter. DCMF = Decision Configuration Management Firmware. The max retry parameter is the maximum times the images is allowed to reload itself before giving up and starting RSU failover flow. Signed-off-by: Richard Gong Link: https://lore.kernel.org/r/1592231348-31334-3-git-send-email-richard.gong@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/stratix10-svc.c | 17 +++++++++ include/linux/firmware/intel/stratix10-smc.h | 43 ++++++++++++++++++++++ .../linux/firmware/intel/stratix10-svc-client.h | 8 ++++ 3 files changed, 68 insertions(+) (limited to 'include') diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index e0db8dbfc9d1..3aa489dba30a 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -305,9 +305,15 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, cb_data->status = BIT(SVC_STATUS_COMPLETED); break; case COMMAND_RSU_RETRY: + case COMMAND_RSU_MAX_RETRY: cb_data->status = BIT(SVC_STATUS_OK); cb_data->kaddr1 = &res.a1; break; + case COMMAND_RSU_DCMF_VERSION: + cb_data->status = BIT(SVC_STATUS_OK); + cb_data->kaddr1 = &res.a1; + cb_data->kaddr2 = &res.a2; + break; default: pr_warn("it shouldn't happen\n"); break; @@ -406,6 +412,16 @@ static int svc_normal_to_secure_thread(void *data) a1 = 0; a2 = 0; break; + case COMMAND_RSU_MAX_RETRY: + a0 = INTEL_SIP_SMC_RSU_MAX_RETRY; + a1 = 0; + a2 = 0; + break; + case COMMAND_RSU_DCMF_VERSION: + a0 = INTEL_SIP_SMC_RSU_DCMF_VERSION; + a1 = 0; + a2 = 0; + break; default: pr_warn("it shouldn't happen\n"); break; @@ -474,6 +490,7 @@ static int svc_normal_to_secure_thread(void *data) * doesn't support RSU notify or retry */ if ((pdata->command == COMMAND_RSU_RETRY) || + (pdata->command == COMMAND_RSU_MAX_RETRY) || (pdata->command == COMMAND_RSU_NOTIFY)) { cbdata->status = BIT(SVC_STATUS_NO_SUPPORT); diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index 682dbf694007..c3e5ab014caf 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -361,3 +361,46 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER 15 #define INTEL_SIP_SMC_RSU_RETRY_COUNTER \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER) + +/** + * Request INTEL_SIP_SMC_RSU_DCMF_VERSION + * + * Sync call used by service driver at EL1 to query DCMF (Decision + * Configuration Management Firmware) version from FW + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_DCMF_VERSION + * a1-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 dcmf1 | dcmf0 + * a2 dcmf3 | dcmf2 + * + * Or + * + * a0 INTEL_SIP_SMC_RSU_ERROR + */ +#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION 16 +#define INTEL_SIP_SMC_RSU_DCMF_VERSION \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION) + +/** + * Request INTEL_SIP_SMC_RSU_MAX_RETRY + * + * Sync call used by service driver at EL1 to query max retry value from FW + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_MAX_RETRY + * a1-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 max retry value + * + * Or + * a0 INTEL_SIP_SMC_RSU_ERROR + */ +#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18 +#define INTEL_SIP_SMC_RSU_MAX_RETRY \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY) diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index 040bc3f3bedd..a93d85932eb9 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -98,6 +98,12 @@ struct stratix10_svc_chan; * * @COMMAND_RSU_RETRY: query firmware for the current image's retry counter, * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_RSU_MAX_RETRY: query firmware for the max retry value, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status + * is SVC_STATUS_OK or SVC_STATUS_ERROR */ enum stratix10_svc_command_code { COMMAND_NOOP = 0, @@ -109,6 +115,8 @@ enum stratix10_svc_command_code { COMMAND_RSU_UPDATE, COMMAND_RSU_NOTIFY, COMMAND_RSU_RETRY, + COMMAND_RSU_MAX_RETRY, + COMMAND_RSU_DCMF_VERSION, }; /** -- cgit v1.2.3 From 1c537b2d729698717f01fcea13721818be5adde7 Mon Sep 17 00:00:00 2001 From: Anand K Mistry Date: Thu, 2 Jul 2020 16:23:17 +1000 Subject: regulator: mt6397: Move buck modes into header file This will allow device trees to make use of these constants. Signed-off-by: Anand K Mistry Link: https://lore.kernel.org/r/20200702162231.v2.1.Icf69e2041b1af4548347018186c3ba6310f53e66@changeid Signed-off-by: Mark Brown --- drivers/regulator/mt6397-regulator.c | 4 +--- include/dt-bindings/regulator/mediatek,mt6397-regulator.h | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 include/dt-bindings/regulator/mediatek,mt6397-regulator.h (limited to 'include') diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c index 269c2a6028e8..d51e98ce1138 100644 --- a/drivers/regulator/mt6397-regulator.c +++ b/drivers/regulator/mt6397-regulator.c @@ -13,9 +13,7 @@ #include #include #include - -#define MT6397_BUCK_MODE_AUTO 0 -#define MT6397_BUCK_MODE_FORCE_PWM 1 +#include /* * MT6397 regulators' information diff --git a/include/dt-bindings/regulator/mediatek,mt6397-regulator.h b/include/dt-bindings/regulator/mediatek,mt6397-regulator.h new file mode 100644 index 000000000000..99869a8665cf --- /dev/null +++ b/include/dt-bindings/regulator/mediatek,mt6397-regulator.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_REGULATOR_MEDIATEK_MT6397_H_ +#define _DT_BINDINGS_REGULATOR_MEDIATEK_MT6397_H_ + +/* + * Buck mode constants which may be used in devicetree properties (eg. + * regulator-initial-mode, regulator-allowed-modes). + * See the manufacturer's datasheet for more information on these modes. + */ + +#define MT6397_BUCK_MODE_AUTO 0 +#define MT6397_BUCK_MODE_FORCE_PWM 1 + +#endif -- cgit v1.2.3 From 6c8b65950b1c75b9e41b5bf314f5d7b81df91272 Mon Sep 17 00:00:00 2001 From: Anand K Mistry Date: Thu, 2 Jul 2020 13:15:22 +1000 Subject: regulator: da9211: Move buck modes into header file This will allow device trees to make use of these constants. Signed-off-by: Anand K Mistry Link: https://lore.kernel.org/r/20200702131350.1.I96e67ab7b4568287eb939e8a572cbc03e87f1aa0@changeid Signed-off-by: Mark Brown --- drivers/regulator/da9211-regulator.c | 5 +---- include/dt-bindings/regulator/dlg,da9211-regulator.h | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 4 deletions(-) create mode 100644 include/dt-bindings/regulator/dlg,da9211-regulator.h (limited to 'include') diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index 2ea4362ffa5c..1f9b75b41346 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "da9211-regulator.h" /* DEVICE IDs */ @@ -24,10 +25,6 @@ #define DA9213_DEVICE_ID 0x23 #define DA9215_DEVICE_ID 0x24 -#define DA9211_BUCK_MODE_SLEEP 1 -#define DA9211_BUCK_MODE_SYNC 2 -#define DA9211_BUCK_MODE_AUTO 3 - /* DA9211 REGULATOR IDs */ #define DA9211_ID_BUCKA 0 #define DA9211_ID_BUCKB 1 diff --git a/include/dt-bindings/regulator/dlg,da9211-regulator.h b/include/dt-bindings/regulator/dlg,da9211-regulator.h new file mode 100644 index 000000000000..cdce2d54c8ba --- /dev/null +++ b/include/dt-bindings/regulator/dlg,da9211-regulator.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_REGULATOR_DLG_DA9211_H +#define _DT_BINDINGS_REGULATOR_DLG_DA9211_H + +/* + * These buck mode constants may be used to specify values in device tree + * properties (e.g. regulator-initial-mode, regulator-allowed-modes). + * A description of the following modes is in the manufacturers datasheet. + */ + +#define DA9211_BUCK_MODE_SLEEP 1 +#define DA9211_BUCK_MODE_SYNC 2 +#define DA9211_BUCK_MODE_AUTO 3 + +#endif -- cgit v1.2.3 From c8d141ce1b85d29aba008c8caa1faf174e564843 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 1 Jul 2020 19:15:55 +0200 Subject: USB: Fix up terminology in include files USB is a HOST/DEVICE protocol, as per the specification and all documentation. Fix up terms that are not applicable to make things match up with the terms used through the rest of the USB stack. Acked-by: Felipe Balbi Link: https://lore.kernel.org/r/20200701171555.3198836-1-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- include/linux/usb.h | 2 +- include/linux/usb/ch9.h | 8 ++++---- include/linux/usb/gadget.h | 7 ++++--- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/usb.h b/include/linux/usb.h index c86e4ec4d00f..c28fc391444a 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -422,7 +422,7 @@ struct usb_devmap { * Allocated per bus (tree of devices) we have: */ struct usb_bus { - struct device *controller; /* host/master side hardware */ + struct device *controller; /* host side hardware */ struct device *sysdev; /* as seen from firmware or bus */ int busnum; /* Bus number (in order of reg) */ const char *bus_name; /* stable id (PCI slot_name etc) */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 58b83066bea4..604c6c514a50 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -6,13 +6,13 @@ * Wireless USB 1.0 (spread around). Linux has several APIs in C that * need these: * - * - the master/host side Linux-USB kernel driver API; + * - the host side Linux-USB kernel driver API; * - the "usbfs" user space API; and - * - the Linux "gadget" slave/device/peripheral side driver API. + * - the Linux "gadget" device/peripheral side driver API. * * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems - * act either as a USB master/host or as a USB slave/device. That means - * the master and slave side APIs benefit from working well together. + * act either as a USB host or as a USB device. That means the host and + * device side APIs benefit from working well together. * * There's also "Wireless USB", using low power short range radios for * peripheral interconnection but otherwise building on the USB framework. diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 6a178177e4c9..298b334e2951 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -4,7 +4,8 @@ * * We call the USB code inside a Linux-based peripheral device a "gadget" * driver, except for the hardware-specific bus glue. One USB host can - * master many USB gadgets, but the gadgets are only slaved to one host. + * talk to many USB gadgets, but the gadgets are only able to communicate + * to one host. * * * (C) Copyright 2002-2004 by David Brownell @@ -328,7 +329,7 @@ struct usb_gadget_ops { }; /** - * struct usb_gadget - represents a usb slave device + * struct usb_gadget - represents a usb device * @work: (internal use) Workqueue to be used for sysfs_notify() * @udc: struct usb_udc pointer for this gadget * @ops: Function pointers used to access hardware-specific operations. @@ -602,7 +603,7 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget) /*-------------------------------------------------------------------------*/ /** - * struct usb_gadget_driver - driver for usb 'slave' devices + * struct usb_gadget_driver - driver for usb gadget devices * @function: String describing the gadget's function * @max_speed: Highest speed the driver handles. * @setup: Invoked for ep0 control requests that aren't handled by -- cgit v1.2.3 From e4266b991fead8eb996688e82ff39f6cc59ef7dd Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Thu, 2 Jul 2020 10:13:05 +0200 Subject: bridge: uapi: mrp: Extend MRP attributes to get the status Add MRP attribute IFLA_BRIDGE_MRP_INFO to allow the userspace to get the current state of the MRP instances. This is a nested attribute that contains other attributes like, ring id, index of primary and secondary port, priority, ring state, ring role. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_bridge.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index caa6914a3e53..c114c1c2bd53 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -166,6 +166,7 @@ enum { IFLA_BRIDGE_MRP_RING_STATE, IFLA_BRIDGE_MRP_RING_ROLE, IFLA_BRIDGE_MRP_START_TEST, + IFLA_BRIDGE_MRP_INFO, __IFLA_BRIDGE_MRP_MAX, }; @@ -228,6 +229,22 @@ enum { #define IFLA_BRIDGE_MRP_START_TEST_MAX (__IFLA_BRIDGE_MRP_START_TEST_MAX - 1) +enum { + IFLA_BRIDGE_MRP_INFO_UNSPEC, + IFLA_BRIDGE_MRP_INFO_RING_ID, + IFLA_BRIDGE_MRP_INFO_P_IFINDEX, + IFLA_BRIDGE_MRP_INFO_S_IFINDEX, + IFLA_BRIDGE_MRP_INFO_PRIO, + IFLA_BRIDGE_MRP_INFO_RING_STATE, + IFLA_BRIDGE_MRP_INFO_RING_ROLE, + IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL, + IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS, + IFLA_BRIDGE_MRP_INFO_TEST_MONITOR, + __IFLA_BRIDGE_MRP_INFO_MAX, +}; + +#define IFLA_BRIDGE_MRP_INFO_MAX (__IFLA_BRIDGE_MRP_INFO_MAX - 1) + struct br_mrp_instance { __u32 ring_id; __u32 p_ifindex; -- cgit v1.2.3 From 36a8e8e26542056bbd7eb5e047cadee30587d230 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Thu, 2 Jul 2020 10:13:07 +0200 Subject: bridge: Extend br_fill_ifinfo to return MPR status This patch extends the function br_fill_ifinfo to return also the MRP status for each instance on a bridge. It also adds a new filter RTEXT_FILTER_MRP to return the MRP status only when this is set, not to interfer with the vlans. The MRP status is return only on the bridge interfaces. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 1 + net/bridge/br_netlink.c | 25 ++++++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 879e64950a0a..9b814c92de12 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -778,6 +778,7 @@ enum { #define RTEXT_FILTER_BRVLAN (1 << 1) #define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2) #define RTEXT_FILTER_SKIP_STATS (1 << 3) +#define RTEXT_FILTER_MRP (1 << 4) /* End of information exported to user level */ diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 240e260e3461..c532fa65c983 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -453,6 +453,28 @@ static int br_fill_ifinfo(struct sk_buff *skb, rcu_read_unlock(); if (err) goto nla_put_failure; + + nla_nest_end(skb, af); + } + + if (filter_mask & RTEXT_FILTER_MRP) { + struct nlattr *af; + int err; + + if (!br_mrp_enabled(br) || port) + goto done; + + af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); + if (!af) + goto nla_put_failure; + + rcu_read_lock(); + err = br_mrp_fill_info(skb, br); + rcu_read_unlock(); + + if (err) + goto nla_put_failure; + nla_nest_end(skb, af); } @@ -516,7 +538,8 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_bridge_port *port = br_port_get_rtnl(dev); if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && - !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) + !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) && + !(filter_mask & RTEXT_FILTER_MRP)) return 0; return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, -- cgit v1.2.3 From d473f4dc2f95c8c856b1659ced3502802b7d2fbe Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Sun, 21 Jun 2020 13:41:47 +0300 Subject: RDMA/mlx5: Introduce ODP prefetch counter For debugging purpose it will be easier to understand if prefetch works okay if it has its own counter. Introduce ODP prefetch counter and count per MR the total number of prefetched pages. In addition remove comment which is not relevant anymore and anyway not in the correct place. Link: https://lore.kernel.org/r/20200621104147.53795-1-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/odp.c | 19 ++++++++++--------- drivers/infiniband/hw/mlx5/restrack.c | 3 +++ include/rdma/ib_verbs.h | 1 + 3 files changed, 14 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 7d2ec9ee5097..ee88b32d143d 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -913,11 +913,6 @@ next_mr: if (ret < 0) goto srcu_unlock; - /* - * When prefetching a page, page fault is generated - * in order to bring the page to the main memory. - * In the current flow, page faults are being counted. - */ mlx5_update_odp_stats(mr, faults, ret); npages += ret; @@ -1755,12 +1750,17 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w) struct prefetch_mr_work *work = container_of(w, struct prefetch_mr_work, work); u32 bytes_mapped = 0; + int ret; u32 i; - for (i = 0; i < work->num_sge; ++i) - pagefault_mr(work->frags[i].mr, work->frags[i].io_virt, - work->frags[i].length, &bytes_mapped, - work->pf_flags); + for (i = 0; i < work->num_sge; ++i) { + ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt, + work->frags[i].length, &bytes_mapped, + work->pf_flags); + if (ret <= 0) + continue; + mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret); + } destroy_prefetch_work(work); } @@ -1818,6 +1818,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, &bytes_mapped, pf_flags); if (ret < 0) goto out; + mlx5_update_odp_stats(mr, prefetch, ret); } ret = 0; diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c index 224a63975822..32c6d0397946 100644 --- a/drivers/infiniband/hw/mlx5/restrack.c +++ b/drivers/infiniband/hw/mlx5/restrack.c @@ -99,6 +99,9 @@ int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg, msg, "page_invalidations", atomic64_read(&mr->odp_stats.invalidations))) goto err_table; + if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch", + atomic64_read(&mr->odp_stats.prefetch))) + goto err_table; nla_nest_end(msg, table_attr); return 0; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 1e902a8f1713..f6b51a709818 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2271,6 +2271,7 @@ struct rdma_netdev_alloc_params { struct ib_odp_counters { atomic64_t faults; atomic64_t invalidations; + atomic64_t prefetch; }; struct ib_counters { -- cgit v1.2.3 From c14f61a89c1335f95d9b37624ee157fb1fd424ee Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 3 Jul 2020 12:08:22 +0200 Subject: ASoC: rt5670: Remove struct rt5670_platform_data platform_data is an obsolete concept, instead device_properties, set through e.g. device-tree, should be used. struct rt5670_platform_data is only used internally by the rt5670 codec driver, so lets remove it before someone starts relying on it. Signed-off-by: Hans de Goede Reviewed-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20200703100823.258033-2-hdegoede@redhat.com Signed-off-by: Mark Brown --- include/sound/rt5670.h | 26 ---------------------- sound/soc/codecs/rt5670.c | 55 +++++++++++++++++++++-------------------------- sound/soc/codecs/rt5670.h | 16 +++++++++++--- 3 files changed, 38 insertions(+), 59 deletions(-) delete mode 100644 include/sound/rt5670.h (limited to 'include') diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h deleted file mode 100644 index 02e1d7778354..000000000000 --- a/include/sound/rt5670.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/sound/rt5670.h -- Platform data for RT5670 - * - * Copyright 2014 Realtek Microelectronics - */ - -#ifndef __LINUX_SND_RT5670_H -#define __LINUX_SND_RT5670_H - -struct rt5670_platform_data { - int jd_mode; - bool in2_diff; - bool dev_gpio; - bool gpio1_is_ext_spk_en; - - bool dmic_en; - unsigned int dmic1_data_pin; - /* 0 = GPIO6; 1 = IN2P; 3 = GPIO7*/ - unsigned int dmic2_data_pin; - /* 0 = GPIO8; 1 = IN3N; */ - unsigned int dmic3_data_pin; - /* 0 = GPIO9; 1 = GPIO10; 2 = GPIO5*/ -}; - -#endif diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index 91c68c8965d1..3ccaeb985176 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -25,7 +25,6 @@ #include #include #include -#include #include "rl6231.h" #include "rt5670.h" @@ -518,7 +517,7 @@ static int rt5670_irq_detection(void *data) struct snd_soc_jack *jack = rt5670->jack; int val, btn_type, report = jack->status; - if (rt5670->pdata.jd_mode == 1) /* 2 port */ + if (rt5670->jd_mode == 1) /* 2 port */ val = snd_soc_component_read(rt5670->component, RT5670_A_JD_CTRL1) & 0x0070; else val = snd_soc_component_read(rt5670->component, RT5670_A_JD_CTRL1) & 0x0020; @@ -1454,7 +1453,7 @@ static int rt5670_spk_event(struct snd_soc_dapm_widget *w, struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component); - if (!rt5670->pdata.gpio1_is_ext_spk_en) + if (!rt5670->gpio1_is_ext_spk_en) return 0; switch (event) { @@ -2624,7 +2623,7 @@ static int rt5670_set_bias_level(struct snd_soc_component *component, RT5670_LDO_SEL_MASK, 0x3); break; case SND_SOC_BIAS_OFF: - if (rt5670->pdata.jd_mode) + if (rt5670->jd_mode) snd_soc_component_update_bits(component, RT5670_PWR_ANLG1, RT5670_PWR_VREF1 | RT5670_PWR_MB | RT5670_PWR_BG | RT5670_PWR_VREF2 | @@ -2927,7 +2926,6 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = { static int rt5670_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { - struct rt5670_platform_data *pdata = dev_get_platdata(&i2c->dev); struct rt5670_priv *rt5670; int ret; unsigned int val; @@ -2940,9 +2938,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, i2c_set_clientdata(i2c, rt5670); - if (pdata) - rt5670->pdata = *pdata; - dmi_check_system(dmi_platform_intel_quirks); if (quirk_override) { dev_info(&i2c->dev, "Overriding quirk 0x%x => 0x%x\n", @@ -2951,56 +2946,56 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, } if (rt5670_quirk & RT5670_DEV_GPIO) { - rt5670->pdata.dev_gpio = true; + rt5670->dev_gpio = true; dev_info(&i2c->dev, "quirk dev_gpio\n"); } if (rt5670_quirk & RT5670_GPIO1_IS_EXT_SPK_EN) { - rt5670->pdata.gpio1_is_ext_spk_en = true; + rt5670->gpio1_is_ext_spk_en = true; dev_info(&i2c->dev, "quirk GPIO1 is external speaker enable\n"); } if (rt5670_quirk & RT5670_IN2_DIFF) { - rt5670->pdata.in2_diff = true; + rt5670->in2_diff = true; dev_info(&i2c->dev, "quirk IN2_DIFF\n"); } if (rt5670_quirk & RT5670_DMIC_EN) { - rt5670->pdata.dmic_en = true; + rt5670->dmic_en = true; dev_info(&i2c->dev, "quirk DMIC enabled\n"); } if (rt5670_quirk & RT5670_DMIC1_IN2P) { - rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_IN2P; + rt5670->dmic1_data_pin = RT5670_DMIC_DATA_IN2P; dev_info(&i2c->dev, "quirk DMIC1 on IN2P pin\n"); } if (rt5670_quirk & RT5670_DMIC1_GPIO6) { - rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_GPIO6; + rt5670->dmic1_data_pin = RT5670_DMIC_DATA_GPIO6; dev_info(&i2c->dev, "quirk DMIC1 on GPIO6 pin\n"); } if (rt5670_quirk & RT5670_DMIC1_GPIO7) { - rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_GPIO7; + rt5670->dmic1_data_pin = RT5670_DMIC_DATA_GPIO7; dev_info(&i2c->dev, "quirk DMIC1 on GPIO7 pin\n"); } if (rt5670_quirk & RT5670_DMIC2_INR) { - rt5670->pdata.dmic2_data_pin = RT5670_DMIC_DATA_IN3N; + rt5670->dmic2_data_pin = RT5670_DMIC_DATA_IN3N; dev_info(&i2c->dev, "quirk DMIC2 on INR pin\n"); } if (rt5670_quirk & RT5670_DMIC2_GPIO8) { - rt5670->pdata.dmic2_data_pin = RT5670_DMIC_DATA_GPIO8; + rt5670->dmic2_data_pin = RT5670_DMIC_DATA_GPIO8; dev_info(&i2c->dev, "quirk DMIC2 on GPIO8 pin\n"); } if (rt5670_quirk & RT5670_DMIC3_GPIO5) { - rt5670->pdata.dmic3_data_pin = RT5670_DMIC_DATA_GPIO5; + rt5670->dmic3_data_pin = RT5670_DMIC_DATA_GPIO5; dev_info(&i2c->dev, "quirk DMIC3 on GPIO5 pin\n"); } if (rt5670_quirk & RT5670_JD_MODE1) { - rt5670->pdata.jd_mode = 1; + rt5670->jd_mode = 1; dev_info(&i2c->dev, "quirk JD mode 1\n"); } if (rt5670_quirk & RT5670_JD_MODE2) { - rt5670->pdata.jd_mode = 2; + rt5670->jd_mode = 2; dev_info(&i2c->dev, "quirk JD mode 2\n"); } if (rt5670_quirk & RT5670_JD_MODE3) { - rt5670->pdata.jd_mode = 3; + rt5670->jd_mode = 3; dev_info(&i2c->dev, "quirk JD mode 3\n"); } @@ -3041,11 +3036,11 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, regmap_update_bits(rt5670->regmap, RT5670_DIG_MISC, RT5670_MCLK_DET, RT5670_MCLK_DET); - if (rt5670->pdata.in2_diff) + if (rt5670->in2_diff) regmap_update_bits(rt5670->regmap, RT5670_IN2, RT5670_IN_DF2, RT5670_IN_DF2); - if (rt5670->pdata.dev_gpio) { + if (rt5670->dev_gpio) { /* for push button */ regmap_write(rt5670->regmap, RT5670_IL_CMD, 0x0000); regmap_write(rt5670->regmap, RT5670_IL_CMD2, 0x0010); @@ -3057,14 +3052,14 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); } - if (rt5670->pdata.gpio1_is_ext_spk_en) { + if (rt5670->gpio1_is_ext_spk_en) { regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1, RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_GPIO1); regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); } - if (rt5670->pdata.jd_mode) { + if (rt5670->jd_mode) { regmap_update_bits(rt5670->regmap, RT5670_GLB_CLK, RT5670_SCLK_SRC_MASK, RT5670_SCLK_SRC_RCCLK); rt5670->sysclk = 0; @@ -3079,7 +3074,7 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, RT5670_JD_TRI_CBJ_SEL_MASK | RT5670_JD_TRI_HPO_SEL_MASK, RT5670_JD_CBJ_JD1_1 | RT5670_JD_HPO_JD1_1); - switch (rt5670->pdata.jd_mode) { + switch (rt5670->jd_mode) { case 1: regmap_update_bits(rt5670->regmap, RT5670_A_JD_CTRL1, RT5670_JD1_MODE_MASK, @@ -3100,12 +3095,12 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, } } - if (rt5670->pdata.dmic_en) { + if (rt5670->dmic_en) { regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1, RT5670_GP2_PIN_MASK, RT5670_GP2_PIN_DMIC1_SCL); - switch (rt5670->pdata.dmic1_data_pin) { + switch (rt5670->dmic1_data_pin) { case RT5670_DMIC_DATA_IN2P: regmap_update_bits(rt5670->regmap, RT5670_DMIC_CTRL1, RT5670_DMIC_1_DP_MASK, @@ -3134,7 +3129,7 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, break; } - switch (rt5670->pdata.dmic2_data_pin) { + switch (rt5670->dmic2_data_pin) { case RT5670_DMIC_DATA_IN3N: regmap_update_bits(rt5670->regmap, RT5670_DMIC_CTRL1, RT5670_DMIC_2_DP_MASK, @@ -3154,7 +3149,7 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, break; } - switch (rt5670->pdata.dmic3_data_pin) { + switch (rt5670->dmic3_data_pin) { case RT5670_DMIC_DATA_GPIO5: regmap_update_bits(rt5670->regmap, RT5670_DMIC_CTRL2, RT5670_DMIC_3_DP_MASK, diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h index de0203369b7c..657420805918 100644 --- a/sound/soc/codecs/rt5670.h +++ b/sound/soc/codecs/rt5670.h @@ -9,8 +9,6 @@ #ifndef __RT5670_H__ #define __RT5670_H__ -#include - /* Info */ #define RT5670_RESET 0x00 #define RT5670_VENDOR_ID 0xfd @@ -1988,11 +1986,23 @@ int rt5670_sel_asrc_clk_src(struct snd_soc_component *component, struct rt5670_priv { struct snd_soc_component *component; - struct rt5670_platform_data pdata; struct regmap *regmap; struct snd_soc_jack *jack; struct snd_soc_jack_gpio hp_gpio; + int jd_mode; + bool in2_diff; + bool dev_gpio; + bool gpio1_is_ext_spk_en; + + bool dmic_en; + unsigned int dmic1_data_pin; + /* 0 = GPIO6; 1 = IN2P; 3 = GPIO7*/ + unsigned int dmic2_data_pin; + /* 0 = GPIO8; 1 = IN3N; */ + unsigned int dmic3_data_pin; + /* 0 = GPIO9; 1 = GPIO10; 2 = GPIO5*/ + int sysclk; int sysclk_src; int lrck[RT5670_AIFS]; -- cgit v1.2.3 From 4dca650991e4175b8d5bae7ff6f1637a0c42be4a Mon Sep 17 00:00:00 2001 From: Michael Guralnik Date: Wed, 20 May 2020 13:59:06 +0300 Subject: net/mlx5: Enable QP number request when creating IPoIB underlay QP If in the process of creating the underlay QP for an IPoIB interface the user has set the address and specifically the 1st-3rd bytes representing the QP number, use the requested QP number when creating the underlay QP. For a user to be able to request a QP number on QP creation, the MKEY_BY_NAME NVCONFIG should be set. As mkey_by_name and qp_by_name are coupled in FW. This requires driver to query the mkey_by_name max cap during initialization and set the current cap if it was enabled in FW. Signed-off-by: Michael Guralnik Reviewed-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 7 +++++++ drivers/net/ethernet/mellanox/mlx5/core/main.c | 3 +++ include/linux/mlx5/mlx5_ifc.h | 9 +++++++-- 3 files changed, 17 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 690b822c6152..d1266d8fed97 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -226,13 +226,20 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) { + unsigned char *dev_addr = priv->netdev->dev_addr; u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {}; struct mlx5i_priv *ipriv = priv->ppriv; void *addr_path; + int qpn = 0; int ret = 0; void *qpc; + if (MLX5_CAP_GEN(priv->mdev, mkey_by_name)) { + qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3]; + MLX5_SET(create_qp_in, in, input_qpn, qpn); + } + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8b658908f044..623785fe74b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -557,6 +557,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) if (MLX5_CAP_GEN_MAX(dev, release_all_pages)) MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1); + if (MLX5_CAP_GEN_MAX(dev, mkey_by_name)) + MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1); + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index a227518c70cf..3786888cb1ba 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1392,7 +1392,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 bf[0x1]; u8 driver_version[0x1]; u8 pad_tx_eth_packet[0x1]; - u8 reserved_at_263[0x8]; + u8 reserved_at_263[0x3]; + u8 mkey_by_name[0x1]; + u8 reserved_at_267[0x4]; + u8 log_bf_reg_size[0x5]; u8 reserved_at_270[0x8]; @@ -7712,8 +7715,10 @@ struct mlx5_ifc_create_qp_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x8]; + u8 input_qpn[0x18]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; u8 ece[0x20]; -- cgit v1.2.3 From f0a5e4d7a594e0fe237d3dfafb069bb82f80f42f Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Wed, 1 Jul 2020 18:17:19 +0300 Subject: ipvs: allow connection reuse for unconfirmed conntrack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit YangYuxi is reporting that connection reuse is causing one-second delay when SYN hits existing connection in TIME_WAIT state. Such delay was added to give time to expire both the IPVS connection and the corresponding conntrack. This was considered a rare case at that time but it is causing problem for some environments such as Kubernetes. As nf_conntrack_tcp_packet() can decide to release the conntrack in TIME_WAIT state and to replace it with a fresh NEW conntrack, we can use this to allow rescheduling just by tuning our check: if the conntrack is confirmed we can not schedule it to different real server and the one-second delay still applies but if new conntrack was created, we are free to select new real server without any delays. YangYuxi lists some of the problem reports: - One second connection delay in masquerading mode: https://marc.info/?t=151683118100004&r=1&w=2 - IPVS low throughput #70747 https://github.com/kubernetes/kubernetes/issues/70747 - Apache Bench can fill up ipvs service proxy in seconds #544 https://github.com/cloudnativelabs/kube-router/issues/544 - Additional 1s latency in `host -> service IP -> pod` https://github.com/kubernetes/kubernetes/issues/90854 Fixes: f719e3754ee2 ("ipvs: drop first packet to redirect conntrack") Co-developed-by: YangYuxi Signed-off-by: YangYuxi Signed-off-by: Julian Anastasov Reviewed-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 10 ++++------ net/netfilter/ipvs/ip_vs_core.c | 12 +++++++----- 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 0c9881241323..011f407b76fe 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1626,18 +1626,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) } #endif /* CONFIG_IP_VS_NFCT */ -/* Really using conntrack? */ -static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, - struct sk_buff *skb) +/* Using old conntrack that can not be redirected to another real server? */ +static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp, + struct sk_buff *skb) { #ifdef CONFIG_IP_VS_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct; - if (!(cp->flags & IP_VS_CONN_F_NFCT)) - return false; ct = nf_ct_get(skb, &ctinfo); - if (ct) + if (ct && nf_ct_is_confirmed(ct)) return true; #endif return false; diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index ca3670152565..b4a6b7662f3f 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -2066,14 +2066,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) { - bool uses_ct = false, resched = false; + bool old_ct = false, resched = false; if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && unlikely(!atomic_read(&cp->dest->weight))) { resched = true; - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); } else if (is_new_conn_expected(cp, conn_reuse_mode)) { - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); if (!atomic_read(&cp->n_control)) { resched = true; } else { @@ -2081,15 +2081,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int * that uses conntrack while it is still * referenced by controlled connection(s). */ - resched = !uses_ct; + resched = !old_ct; } } if (resched) { + if (!old_ct) + cp->flags &= ~IP_VS_CONN_F_NFCT; if (!atomic_read(&cp->n_control)) ip_vs_conn_expire_now(cp); __ip_vs_conn_put(cp); - if (uses_ct) + if (old_ct) return NF_DROP; cp = NULL; } -- cgit v1.2.3 From 74cccc3d38438b346e40a4f8133cff3f0839ff84 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:11 +0200 Subject: netfilter: nf_tables: add NFTA_CHAIN_ID attribute This netlink attribute allows you to refer to chains inside a transaction as an alternative to the name and the handle. The chain binding support requires this new chain ID approach. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 3 +++ include/uapi/linux/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 15 ++++++++++++--- 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 6f0f6fca9ac3..3e5226684017 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1433,6 +1433,7 @@ struct nft_trans_chain { char *name; struct nft_stats __percpu *stats; u8 policy; + u32 chain_id; }; #define nft_trans_chain_update(trans) \ @@ -1443,6 +1444,8 @@ struct nft_trans_chain { (((struct nft_trans_chain *)trans->data)->stats) #define nft_trans_chain_policy(trans) \ (((struct nft_trans_chain *)trans->data)->policy) +#define nft_trans_chain_id(trans) \ + (((struct nft_trans_chain *)trans->data)->chain_id) struct nft_trans_table { bool update; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 4565456c0ef4..477779595b78 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -196,6 +196,7 @@ enum nft_table_attributes { * @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING) * @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes) * @NFTA_CHAIN_FLAGS: chain flags + * @NFTA_CHAIN_ID: uniquely identifies a chain in a transaction (NLA_U32) */ enum nft_chain_attributes { NFTA_CHAIN_UNSPEC, @@ -209,6 +210,7 @@ enum nft_chain_attributes { NFTA_CHAIN_COUNTERS, NFTA_CHAIN_PAD, NFTA_CHAIN_FLAGS, + NFTA_CHAIN_ID, __NFTA_CHAIN_MAX }; #define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7647ecfa0d40..650ef0dd0773 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -280,9 +280,15 @@ static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) if (trans == NULL) return ERR_PTR(-ENOMEM); - if (msg_type == NFT_MSG_NEWCHAIN) + if (msg_type == NFT_MSG_NEWCHAIN) { nft_activate_next(ctx->net, ctx->chain); + if (ctx->nla[NFTA_CHAIN_ID]) { + nft_trans_chain_id(trans) = + ntohl(nla_get_be32(ctx->nla[NFTA_CHAIN_ID])); + } + } + list_add_tail(&trans->list, &ctx->net->nft.commit_list); return trans; } @@ -1274,6 +1280,7 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { .len = NFT_MODULE_AUTOLOAD_LIMIT }, [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED }, [NFTA_CHAIN_FLAGS] = { .type = NLA_U32 }, + [NFTA_CHAIN_ID] = { .type = NLA_U32 }, }; static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = { @@ -2154,9 +2161,9 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, const struct nfgenmsg *nfmsg = nlmsg_data(nlh); u8 genmask = nft_genmask_next(net); int family = nfmsg->nfgen_family; + struct nft_chain *chain = NULL; const struct nlattr *attr; struct nft_table *table; - struct nft_chain *chain; u8 policy = NF_ACCEPT; struct nft_ctx ctx; u64 handle = 0; @@ -2181,7 +2188,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, return PTR_ERR(chain); } attr = nla[NFTA_CHAIN_HANDLE]; - } else { + } else if (nla[NFTA_CHAIN_NAME]) { chain = nft_chain_lookup(net, table, attr, genmask); if (IS_ERR(chain)) { if (PTR_ERR(chain) != -ENOENT) { @@ -2190,6 +2197,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, } chain = NULL; } + } else if (!nla[NFTA_CHAIN_ID]) { + return -EINVAL; } if (nla[NFTA_CHAIN_POLICY]) { -- cgit v1.2.3 From 837830a4b439bfeb86c70b0115c280377c84714b Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:16 +0200 Subject: netfilter: nf_tables: add NFTA_RULE_CHAIN_ID attribute This new netlink attribute allows you to add rules to chains by the chain ID. Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 1 + net/netfilter/nf_tables_api.c | 36 ++++++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 477779595b78..2304d1b7ba5e 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -240,6 +240,7 @@ enum nft_rule_attributes { NFTA_RULE_PAD, NFTA_RULE_ID, NFTA_RULE_POSITION_ID, + NFTA_RULE_CHAIN_ID, __NFTA_RULE_MAX }; #define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 650ef0dd0773..fbe8f9209813 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2153,6 +2153,22 @@ err: return err; } +static struct nft_chain *nft_chain_lookup_byid(const struct net *net, + const struct nlattr *nla) +{ + u32 id = ntohl(nla_get_be32(nla)); + struct nft_trans *trans; + + list_for_each_entry(trans, &net->nft.commit_list, list) { + struct nft_chain *chain = trans->ctx.chain; + + if (trans->msg_type == NFT_MSG_NEWCHAIN && + id == nft_trans_chain_id(trans)) + return chain; + } + return ERR_PTR(-ENOENT); +} + static int nf_tables_newchain(struct net *net, struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[], @@ -2633,6 +2649,7 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { .len = NFT_USERDATA_MAXLEN }, [NFTA_RULE_ID] = { .type = NLA_U32 }, [NFTA_RULE_POSITION_ID] = { .type = NLA_U32 }, + [NFTA_RULE_CHAIN_ID] = { .type = NLA_U32 }, }; static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, @@ -3039,10 +3056,21 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, return PTR_ERR(table); } - chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask); - if (IS_ERR(chain)) { - NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); - return PTR_ERR(chain); + if (nla[NFTA_RULE_CHAIN]) { + chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], + genmask); + if (IS_ERR(chain)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); + return PTR_ERR(chain); + } + } else if (nla[NFTA_RULE_CHAIN_ID]) { + chain = nft_chain_lookup_byid(net, nla[NFTA_RULE_CHAIN_ID]); + if (IS_ERR(chain)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]); + return PTR_ERR(chain); + } + } else { + return -EINVAL; } if (nla[NFTA_RULE_HANDLE]) { -- cgit v1.2.3 From 51d70f181ff4e2c996ddf256af1efecd7d5864e5 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:21 +0200 Subject: netfilter: nf_tables: add NFTA_VERDICT_CHAIN_ID attribute This netlink attribute allows you to identify the chain to jump/goto by means of the chain ID. Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 16 +++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 2304d1b7ba5e..683e75126d68 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -471,11 +471,13 @@ enum nft_data_attributes { * * @NFTA_VERDICT_CODE: nf_tables verdict (NLA_U32: enum nft_verdicts) * @NFTA_VERDICT_CHAIN: jump target chain name (NLA_STRING) + * @NFTA_VERDICT_CHAIN_ID: jump target chain ID (NLA_U32) */ enum nft_verdict_attributes { NFTA_VERDICT_UNSPEC, NFTA_VERDICT_CODE, NFTA_VERDICT_CHAIN, + NFTA_VERDICT_CHAIN_ID, __NFTA_VERDICT_MAX }; #define NFTA_VERDICT_MAX (__NFTA_VERDICT_MAX - 1) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fbe8f9209813..d86602797a69 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -8242,6 +8242,7 @@ static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = { [NFTA_VERDICT_CODE] = { .type = NLA_U32 }, [NFTA_VERDICT_CHAIN] = { .type = NLA_STRING, .len = NFT_CHAIN_MAXNAMELEN - 1 }, + [NFTA_VERDICT_CHAIN_ID] = { .type = NLA_U32 }, }; static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, @@ -8278,10 +8279,19 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, break; case NFT_JUMP: case NFT_GOTO: - if (!tb[NFTA_VERDICT_CHAIN]) + if (tb[NFTA_VERDICT_CHAIN]) { + chain = nft_chain_lookup(ctx->net, ctx->table, + tb[NFTA_VERDICT_CHAIN], + genmask); + } else if (tb[NFTA_VERDICT_CHAIN_ID]) { + chain = nft_chain_lookup_byid(ctx->net, + tb[NFTA_VERDICT_CHAIN_ID]); + if (IS_ERR(chain)) + return PTR_ERR(chain); + } else { return -EINVAL; - chain = nft_chain_lookup(ctx->net, ctx->table, - tb[NFTA_VERDICT_CHAIN], genmask); + } + if (IS_ERR(chain)) return PTR_ERR(chain); if (nft_is_base_chain(chain)) -- cgit v1.2.3 From 67c49de4ad862c567088c5119cf125e566f56e7f Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:25 +0200 Subject: netfilter: nf_tables: expose enum nft_chain_flags through UAPI This enum definition was never exposed through UAPI. Rename NFT_BASE_CHAIN to NFT_CHAIN_BASE for consistency. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 7 +------ include/uapi/linux/netfilter/nf_tables.h | 5 +++++ net/netfilter/nf_tables_api.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 3e5226684017..6d1e7da6e00a 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -921,11 +921,6 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext, (expr) != (last); \ (expr) = nft_expr_next(expr)) -enum nft_chain_flags { - NFT_BASE_CHAIN = 0x1, - NFT_CHAIN_HW_OFFLOAD = 0x2, -}; - #define NFT_CHAIN_POLICY_UNSET U8_MAX /** @@ -1036,7 +1031,7 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai static inline bool nft_is_base_chain(const struct nft_chain *chain) { - return chain->flags & NFT_BASE_CHAIN; + return chain->flags & NFT_CHAIN_BASE; } int __nft_release_basechain(struct nft_ctx *ctx); diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 683e75126d68..2cf7cc3b50c1 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -184,6 +184,11 @@ enum nft_table_attributes { }; #define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1) +enum nft_chain_flags { + NFT_CHAIN_BASE = (1 << 0), + NFT_CHAIN_HW_OFFLOAD = (1 << 1), +}; + /** * enum nft_chain_attributes - nf_tables chain netlink attributes * diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index d86602797a69..b7582a1c8dce 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1903,7 +1903,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, nft_basechain_hook_init(&basechain->ops, family, hook, chain); } - chain->flags |= NFT_BASE_CHAIN | flags; + chain->flags |= NFT_CHAIN_BASE | flags; basechain->policy = NF_ACCEPT; if (chain->flags & NFT_CHAIN_HW_OFFLOAD && nft_chain_offload_priority(basechain) < 0) @@ -2255,7 +2255,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - flags |= chain->flags & NFT_BASE_CHAIN; + flags |= chain->flags & NFT_CHAIN_BASE; return nf_tables_updchain(&ctx, genmask, policy, flags); } -- cgit v1.2.3 From d0e2c7de92c7f2b3d355ad76b0bb9fc43d1beb87 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:36 +0200 Subject: netfilter: nf_tables: add NFT_CHAIN_BINDING This new chain flag specifies that: * the kernel dynamically allocates the chain name, if no chain name is specified. * If the immediate expression that refers to this chain is removed, then this bound chain (and its content) is destroyed. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 13 ++++- include/uapi/linux/netfilter/nf_tables.h | 1 + net/netfilter/nf_tables_api.c | 86 +++++++++++++++++++++++++++----- net/netfilter/nft_immediate.c | 51 +++++++++++++++++++ 4 files changed, 138 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 6d1e7da6e00a..822c26766330 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -899,6 +899,8 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) return (void *)&rule->data[rule->dlen]; } +void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule); + static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -944,7 +946,8 @@ struct nft_chain { struct nft_table *table; u64 handle; u32 use; - u8 flags:6, + u8 flags:5, + bound:1, genmask:2; char *name; @@ -989,6 +992,14 @@ int nft_chain_validate_dependency(const struct nft_chain *chain, int nft_chain_validate_hooks(const struct nft_chain *chain, unsigned int hook_flags); +static inline bool nft_chain_is_bound(struct nft_chain *chain) +{ + return (chain->flags & NFT_CHAIN_BINDING) && chain->bound; +} + +void nft_chain_del(struct nft_chain *chain); +void nf_tables_chain_destroy(struct nft_ctx *ctx); + struct nft_stats { u64 bytes; u64 pkts; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 2cf7cc3b50c1..e00b4ae6174e 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -187,6 +187,7 @@ enum nft_table_attributes { enum nft_chain_flags { NFT_CHAIN_BASE = (1 << 0), NFT_CHAIN_HW_OFFLOAD = (1 << 1), + NFT_CHAIN_BINDING = (1 << 2), }; /** diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index a7cb9c07802b..b8a970dad213 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1056,6 +1056,9 @@ static int nft_flush_table(struct nft_ctx *ctx) if (!nft_is_active_next(ctx->net, chain)) continue; + if (nft_chain_is_bound(chain)) + continue; + ctx->chain = chain; err = nft_delrule_by_chain(ctx); @@ -1098,6 +1101,9 @@ static int nft_flush_table(struct nft_ctx *ctx) if (!nft_is_active_next(ctx->net, chain)) continue; + if (nft_chain_is_bound(chain)) + continue; + ctx->chain = chain; err = nft_delchain(ctx); @@ -1413,13 +1419,12 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, lockdep_commit_lock_is_held(net)); if (nft_dump_stats(skb, stats)) goto nla_put_failure; - - if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) && - nla_put_be32(skb, NFTA_CHAIN_FLAGS, - htonl(NFT_CHAIN_HW_OFFLOAD))) - goto nla_put_failure; } + if (chain->flags && + nla_put_be32(skb, NFTA_CHAIN_FLAGS, htonl(chain->flags))) + goto nla_put_failure; + if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use))) goto nla_put_failure; @@ -1621,7 +1626,7 @@ static void nf_tables_chain_free_chain_rules(struct nft_chain *chain) kvfree(chain->rules_next); } -static void nf_tables_chain_destroy(struct nft_ctx *ctx) +void nf_tables_chain_destroy(struct nft_ctx *ctx) { struct nft_chain *chain = ctx->chain; struct nft_hook *hook, *next; @@ -1928,6 +1933,8 @@ static int nft_chain_add(struct nft_table *table, struct nft_chain *chain) return 0; } +static u64 chain_id; + static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, u8 policy, u32 flags) { @@ -1936,6 +1943,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct nft_base_chain *basechain; struct nft_stats __percpu *stats; struct net *net = ctx->net; + char name[NFT_NAME_MAXLEN]; struct nft_trans *trans; struct nft_chain *chain; struct nft_rule **rules; @@ -1947,6 +1955,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (nla[NFTA_CHAIN_HOOK]) { struct nft_chain_hook hook; + if (flags & NFT_CHAIN_BINDING) + return -EOPNOTSUPP; + err = nft_chain_parse_hook(net, nla, &hook, family, true); if (err < 0) return err; @@ -1976,16 +1987,33 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, return err; } } else { + if (flags & NFT_CHAIN_BASE) + return -EINVAL; + if (flags & NFT_CHAIN_HW_OFFLOAD) + return -EOPNOTSUPP; + chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (chain == NULL) return -ENOMEM; + + chain->flags = flags; } ctx->chain = chain; INIT_LIST_HEAD(&chain->rules); chain->handle = nf_tables_alloc_handle(table); chain->table = table; - chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); + + if (nla[NFTA_CHAIN_NAME]) { + chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); + } else { + if (!(flags & NFT_CHAIN_BINDING)) + return -EINVAL; + + snprintf(name, sizeof(name), "__chain%llu", ++chain_id); + chain->name = kstrdup(name, GFP_KERNEL); + } + if (!chain->name) { err = -ENOMEM; goto err1; @@ -2976,8 +3004,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, kfree(rule); } -static void nf_tables_rule_release(const struct nft_ctx *ctx, - struct nft_rule *rule) +void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule) { nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); nf_tables_rule_destroy(ctx, rule); @@ -3075,6 +3102,9 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); return PTR_ERR(chain); } + if (nft_chain_is_bound(chain)) + return -EOPNOTSUPP; + } else if (nla[NFTA_RULE_CHAIN_ID]) { chain = nft_chain_lookup_byid(net, nla[NFTA_RULE_CHAIN_ID]); if (IS_ERR(chain)) { @@ -3294,6 +3324,8 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk, NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); return PTR_ERR(chain); } + if (nft_chain_is_bound(chain)) + return -EOPNOTSUPP; } nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); @@ -5330,11 +5362,24 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, */ void nft_data_hold(const struct nft_data *data, enum nft_data_types type) { + struct nft_chain *chain; + struct nft_rule *rule; + if (type == NFT_DATA_VERDICT) { switch (data->verdict.code) { case NFT_JUMP: case NFT_GOTO: - data->verdict.chain->use++; + chain = data->verdict.chain; + chain->use++; + + if (!nft_chain_is_bound(chain)) + break; + + chain->table->use++; + list_for_each_entry(rule, &chain->rules, list) + chain->use++; + + nft_chain_add(chain->table, chain); break; } } @@ -7474,7 +7519,7 @@ static void nft_obj_del(struct nft_object *obj) list_del_rcu(&obj->list); } -static void nft_chain_del(struct nft_chain *chain) +void nft_chain_del(struct nft_chain *chain) { struct nft_table *table = chain->table; @@ -7825,6 +7870,10 @@ static int __nf_tables_abort(struct net *net, bool autoload) kfree(nft_trans_chain_name(trans)); nft_trans_destroy(trans); } else { + if (nft_chain_is_bound(trans->ctx.chain)) { + nft_trans_destroy(trans); + break; + } trans->ctx.table->use--; nft_chain_del(trans->ctx.chain); nf_tables_unregister_hook(trans->ctx.net, @@ -8321,10 +8370,23 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, static void nft_verdict_uninit(const struct nft_data *data) { + struct nft_chain *chain; + struct nft_rule *rule; + switch (data->verdict.code) { case NFT_JUMP: case NFT_GOTO: - data->verdict.chain->use--; + chain = data->verdict.chain; + chain->use--; + + if (!nft_chain_is_bound(chain)) + break; + + chain->table->use--; + list_for_each_entry(rule, &chain->rules, list) + chain->use--; + + nft_chain_del(chain); break; } } diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index c7f0ef73d939..9e556638bb32 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -54,6 +54,23 @@ static int nft_immediate_init(const struct nft_ctx *ctx, if (err < 0) goto err1; + if (priv->dreg == NFT_REG_VERDICT) { + struct nft_chain *chain = priv->data.verdict.chain; + + switch (priv->data.verdict.code) { + case NFT_JUMP: + case NFT_GOTO: + if (nft_chain_is_bound(chain)) { + err = -EBUSY; + goto err1; + } + chain->bound = true; + break; + default: + break; + } + } + return 0; err1: @@ -81,6 +98,39 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx, return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); } +static void nft_immediate_destroy(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + const struct nft_immediate_expr *priv = nft_expr_priv(expr); + const struct nft_data *data = &priv->data; + struct nft_ctx chain_ctx; + struct nft_chain *chain; + struct nft_rule *rule; + + if (priv->dreg != NFT_REG_VERDICT) + return; + + switch (data->verdict.code) { + case NFT_JUMP: + case NFT_GOTO: + chain = data->verdict.chain; + + if (!nft_chain_is_bound(chain)) + break; + + chain_ctx = *ctx; + chain_ctx.chain = chain; + + list_for_each_entry(rule, &chain->rules, list) + nf_tables_rule_release(&chain_ctx, rule); + + nf_tables_chain_destroy(&chain_ctx); + break; + default: + break; + } +} + static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); @@ -170,6 +220,7 @@ static const struct nft_expr_ops nft_imm_ops = { .init = nft_immediate_init, .activate = nft_immediate_activate, .deactivate = nft_immediate_deactivate, + .destroy = nft_immediate_destroy, .dump = nft_immediate_dump, .validate = nft_immediate_validate, .offload = nft_immediate_offload, -- cgit v1.2.3 From c1f79a2eefdcc0aef5d7a911c27a3f75f1936ecd Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sat, 4 Jul 2020 02:51:28 +0200 Subject: netfilter: nf_tables: reject unsupported chain flags Bail out if userspace sends unsupported chain flags. Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 3 +++ net/netfilter/nf_tables_api.c | 3 +++ 2 files changed, 6 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index e00b4ae6174e..42f351c1f5c5 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -189,6 +189,9 @@ enum nft_chain_flags { NFT_CHAIN_HW_OFFLOAD = (1 << 1), NFT_CHAIN_BINDING = (1 << 2), }; +#define NFT_CHAIN_FLAGS (NFT_CHAIN_BASE | \ + NFT_CHAIN_HW_OFFLOAD | \ + NFT_CHAIN_BINDING) /** * enum nft_chain_attributes - nf_tables chain netlink attributes diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b8a970dad213..f96785586f64 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2285,6 +2285,9 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, else if (chain) flags = chain->flags; + if (flags & ~NFT_CHAIN_FLAGS) + return -EOPNOTSUPP; + nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); if (chain != NULL) { -- cgit v1.2.3 From 8fa88a88d573093868565a1afba43b5ae5b3a316 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Fri, 3 Jul 2020 16:56:45 +0100 Subject: genirq: Remove preflow handler support That was put in place for sparc64, and blackfin also used it for some time; sparc64 no longer uses those, and blackfin is dead. As there are no more users, remove preflow handlers. Signed-off-by: Valentin Schneider Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20200703155645.29703-3-valentin.schneider@arm.com --- include/linux/irqdesc.h | 15 --------------- include/linux/irqhandler.h | 1 - kernel/irq/Kconfig | 4 ---- kernel/irq/chip.c | 13 ------------- 4 files changed, 33 deletions(-) (limited to 'include') diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 8f2820c5e69e..5745491303e0 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -22,7 +22,6 @@ struct pt_regs; * @irq_common_data: per irq and chip data passed down to chip functions * @kstat_irqs: irq stats per cpu * @handle_irq: highlevel irq-events handler - * @preflow_handler: handler called before the flow handler (currently used by sparc) * @action: the irq action chain * @status_use_accessors: status information * @core_internal_state__do_not_mess_with_it: core internal status information @@ -58,9 +57,6 @@ struct irq_desc { struct irq_data irq_data; unsigned int __percpu *kstat_irqs; irq_flow_handler_t handle_irq; -#ifdef CONFIG_IRQ_PREFLOW_FASTEOI - irq_preflow_handler_t preflow_handler; -#endif struct irqaction *action; /* IRQ action list */ unsigned int status_use_accessors; unsigned int core_internal_state__do_not_mess_with_it; @@ -268,15 +264,4 @@ irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, } } -#ifdef CONFIG_IRQ_PREFLOW_FASTEOI -static inline void -__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - desc->preflow_handler = handler; -} -#endif - #endif diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h index 1e6f4e7123d6..c30f454a9518 100644 --- a/include/linux/irqhandler.h +++ b/include/linux/irqhandler.h @@ -10,6 +10,5 @@ struct irq_desc; struct irq_data; typedef void (*irq_flow_handler_t)(struct irq_desc *desc); -typedef void (*irq_preflow_handler_t)(struct irq_data *data); #endif diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 20512252ecc9..10a5aff4eecc 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -51,10 +51,6 @@ config GENERIC_IRQ_INJECTION config HARDIRQS_SW_RESEND bool -# Preflow handler support for fasteoi (sparc64) -config IRQ_PREFLOW_FASTEOI - bool - # Edge style eoi based handler (cell) config IRQ_EDGE_EOI_HANDLER bool diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 41e7e37a0928..75bbaa8b38f1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -656,16 +656,6 @@ out_unlock: } EXPORT_SYMBOL_GPL(handle_level_irq); -#ifdef CONFIG_IRQ_PREFLOW_FASTEOI -static inline void preflow_handler(struct irq_desc *desc) -{ - if (desc->preflow_handler) - desc->preflow_handler(&desc->irq_data); -} -#else -static inline void preflow_handler(struct irq_desc *desc) { } -#endif - static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) { if (!(desc->istate & IRQS_ONESHOT)) { @@ -721,7 +711,6 @@ void handle_fasteoi_irq(struct irq_desc *desc) if (desc->istate & IRQS_ONESHOT) mask_irq(desc); - preflow_handler(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); @@ -1231,7 +1220,6 @@ void handle_fasteoi_ack_irq(struct irq_desc *desc) /* Start handling the irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); - preflow_handler(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); @@ -1281,7 +1269,6 @@ void handle_fasteoi_mask_irq(struct irq_desc *desc) if (desc->istate & IRQS_ONESHOT) mask_irq(desc); - preflow_handler(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); -- cgit v1.2.3 From 98f803cfa76eb67d0e429ba76a39471f95d83675 Mon Sep 17 00:00:00 2001 From: Jeff Chase Date: Tue, 23 Jun 2020 01:59:49 +0200 Subject: media: cec: add adap_controls_phys_addr option Use of the cec notifier framework is required to support CEC_CAP_CONNECTOR_INFO but some devices do not want physical address updates from the notifier. This adds an option to allow registering with a cec notifier without getting address updates. [hans: document the new adap_controls_phys_addr bool] Signed-off-by: Jeff Chase Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/cec/core/cec-notifier.c | 11 +++++++---- include/media/cec.h | 6 ++++++ 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/media/cec/core/cec-notifier.c b/drivers/media/cec/core/cec-notifier.c index 517e0035fc99..95f363bb1d19 100644 --- a/drivers/media/cec/core/cec-notifier.c +++ b/drivers/media/cec/core/cec-notifier.c @@ -116,7 +116,8 @@ cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, else memset(&n->conn_info, 0, sizeof(n->conn_info)); if (n->cec_adap) { - cec_phys_addr_invalidate(n->cec_adap); + if (!n->cec_adap->adap_controls_phys_addr) + cec_phys_addr_invalidate(n->cec_adap); cec_s_conn_info(n->cec_adap, conn_info); } mutex_unlock(&n->lock); @@ -133,7 +134,8 @@ void cec_notifier_conn_unregister(struct cec_notifier *n) memset(&n->conn_info, 0, sizeof(n->conn_info)); n->phys_addr = CEC_PHYS_ADDR_INVALID; if (n->cec_adap) { - cec_phys_addr_invalidate(n->cec_adap); + if (!n->cec_adap->adap_controls_phys_addr) + cec_phys_addr_invalidate(n->cec_adap); cec_s_conn_info(n->cec_adap, NULL); } mutex_unlock(&n->lock); @@ -158,7 +160,8 @@ cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, n->cec_adap = adap; adap->conn_info = n->conn_info; adap->notifier = n; - cec_s_phys_addr(adap, n->phys_addr, false); + if (!adap->adap_controls_phys_addr) + cec_s_phys_addr(adap, n->phys_addr, false); mutex_unlock(&n->lock); return n; } @@ -185,7 +188,7 @@ void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) mutex_lock(&n->lock); n->phys_addr = pa; - if (n->cec_adap) + if (n->cec_adap && !n->cec_adap->adap_controls_phys_addr) cec_s_phys_addr(n->cec_adap, n->phys_addr, false); mutex_unlock(&n->lock); } diff --git a/include/media/cec.h b/include/media/cec.h index 32f7c695d7b5..c48b5f2e4b50 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -172,6 +172,11 @@ struct cec_adap_ops { * @is_configured: the CEC adapter is configured (i.e. has claimed LAs) * @cec_pin_is_high: if true then the CEC pin is high. Only used with the * CEC pin framework. + * @adap_controls_phys_addr: if true, then the CEC adapter controls the + * physical address, i.e. the CEC hardware can detect HPD changes and + * read the EDID and is not dependent on an external HDMI driver. + * Drivers that need this can set this field to true after the + * cec_allocate_adapter() call. * @last_initiator: the initiator of the last transmitted message. * @monitor_all_cnt: number of filehandles monitoring all msgs * @monitor_pin_cnt: number of filehandles monitoring pin changes @@ -222,6 +227,7 @@ struct cec_adapter { bool is_configuring; bool is_configured; bool cec_pin_is_high; + bool adap_controls_phys_addr; u8 last_initiator; u32 monitor_all_cnt; u32 monitor_pin_cnt; -- cgit v1.2.3 From ce548396a4334a4c6e9faada8db89dacf35822b2 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 26 Jun 2020 13:53:18 +0200 Subject: media: mach-omap1: board-ams-delta.c: remove soc_camera dependencies The soc_camera driver is about to be removed, so drop camera support from this board. Note that the soc_camera driver itself has long since been deprecated and can't be compiled anymore (it depends on BROKEN), so camera support on this board has been broken for a long time (at least since 4.6 when the omap1_camera.c was removed from soc_camera). Signed-off-by: Hans Verkuil Cc: Tony Lindgren Acked-by: Arnd Bergmann Signed-off-by: Mauro Carvalho Chehab --- arch/arm/mach-omap1/board-ams-delta.c | 32 ------------------ arch/arm/mach-omap1/camera.h | 14 -------- arch/arm/mach-omap1/devices.c | 43 ------------------------ include/linux/platform_data/media/omap1_camera.h | 32 ------------------ 4 files changed, 121 deletions(-) delete mode 100644 arch/arm/mach-omap1/camera.h delete mode 100644 include/linux/platform_data/media/omap1_camera.h (limited to 'include') diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index 8d32894ecd2e..2ee527c00284 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -29,8 +29,6 @@ #include #include -#include - #include #include #include @@ -40,7 +38,6 @@ #include #include -#include "camera.h" #include #include "ams-delta-fiq.h" @@ -459,12 +456,6 @@ static struct gpiod_lookup_table leds_gpio_table = { }, }; -static struct i2c_board_info ams_delta_camera_board_info[] = { - { - I2C_BOARD_INFO("ov6650", 0x60), - }, -}; - #ifdef CONFIG_LEDS_TRIGGERS DEFINE_LED_TRIGGER(ams_delta_camera_led_trigger); @@ -483,27 +474,6 @@ static int ams_delta_camera_power(struct device *dev, int power) #define ams_delta_camera_power NULL #endif -static struct soc_camera_link ams_delta_iclink = { - .bus_id = 0, /* OMAP1 SoC camera bus */ - .i2c_adapter_id = 1, - .board_info = &ams_delta_camera_board_info[0], - .module_name = "ov6650", - .power = ams_delta_camera_power, -}; - -static struct platform_device ams_delta_camera_device = { - .name = "soc-camera-pdrv", - .id = 0, - .dev = { - .platform_data = &ams_delta_iclink, - }, -}; - -static struct omap1_cam_platform_data ams_delta_camera_platform_data = { - .camexclk_khz = 12000, /* default 12MHz clock, no extra DPLL */ - .lclk_khz_max = 1334, /* results in 5fps CIF, 10fps QCIF */ -}; - static struct platform_device ams_delta_audio_device = { .name = "ams-delta-audio", .id = -1, @@ -598,7 +568,6 @@ static struct platform_device *ams_delta_devices[] __initdata = { &latch1_gpio_device, &latch2_gpio_device, &ams_delta_kp_device, - &ams_delta_camera_device, &ams_delta_audio_device, &ams_delta_serio_device, &ams_delta_nand_device, @@ -750,7 +719,6 @@ static void __init ams_delta_init(void) omap_register_i2c_bus(1, 100, NULL, 0); omap1_usb_init(&ams_delta_usb_config); - omap1_set_camera_info(&ams_delta_camera_platform_data); #ifdef CONFIG_LEDS_TRIGGERS led_trigger_register_simple("ams_delta_camera", &ams_delta_camera_led_trigger); diff --git a/arch/arm/mach-omap1/camera.h b/arch/arm/mach-omap1/camera.h deleted file mode 100644 index 8018f410d024..000000000000 --- a/arch/arm/mach-omap1/camera.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_ARCH_CAMERA_H_ -#define __ASM_ARCH_CAMERA_H_ - -#include - -void omap1_camera_init(void *); - -static inline void omap1_set_camera_info(struct omap1_cam_platform_data *info) -{ - omap1_camera_init(info); -} - -#endif /* __ASM_ARCH_CAMERA_H_ */ diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c index 3c4900ac72fc..eb0f09edb3d1 100644 --- a/arch/arm/mach-omap1/devices.c +++ b/arch/arm/mach-omap1/devices.c @@ -21,7 +21,6 @@ #include #include -#include "camera.h" #include #include "common.h" @@ -258,48 +257,6 @@ static inline void omap_init_spi100k(void) } #endif - -#define OMAP1_CAMERA_BASE 0xfffb6800 -#define OMAP1_CAMERA_IOSIZE 0x1c - -static struct resource omap1_camera_resources[] = { - [0] = { - .start = OMAP1_CAMERA_BASE, - .end = OMAP1_CAMERA_BASE + OMAP1_CAMERA_IOSIZE - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = INT_CAMERA, - .flags = IORESOURCE_IRQ, - }, -}; - -static u64 omap1_camera_dma_mask = DMA_BIT_MASK(32); - -static struct platform_device omap1_camera_device = { - .name = "omap1-camera", - .id = 0, /* This is used to put cameras on this interface */ - .dev = { - .dma_mask = &omap1_camera_dma_mask, - .coherent_dma_mask = DMA_BIT_MASK(32), - }, - .num_resources = ARRAY_SIZE(omap1_camera_resources), - .resource = omap1_camera_resources, -}; - -void __init omap1_camera_init(void *info) -{ - struct platform_device *dev = &omap1_camera_device; - int ret; - - dev->dev.platform_data = info; - - ret = platform_device_register(dev); - if (ret) - dev_err(&dev->dev, "unable to register device: %d\n", ret); -} - - /*-------------------------------------------------------------------------*/ static inline void omap_init_sti(void) {} diff --git a/include/linux/platform_data/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h deleted file mode 100644 index 386439db68de..000000000000 --- a/include/linux/platform_data/media/omap1_camera.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface - * - * Copyright (C) 2010, Janusz Krzysztofik - */ - -#ifndef __MEDIA_OMAP1_CAMERA_H_ -#define __MEDIA_OMAP1_CAMERA_H_ - -#include - -#define OMAP1_CAMERA_IOSIZE 0x1c - -enum omap1_cam_vb_mode { - OMAP1_CAM_DMA_CONTIG = 0, - OMAP1_CAM_DMA_SG, -}; - -#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2) - -struct omap1_cam_platform_data { - unsigned long camexclk_khz; - unsigned long lclk_khz_max; - unsigned long flags; -}; - -#define OMAP1_CAMERA_LCLK_RISING BIT(0) -#define OMAP1_CAMERA_RST_LOW BIT(1) -#define OMAP1_CAMERA_RST_HIGH BIT(2) - -#endif /* __MEDIA_OMAP1_CAMERA_H_ */ -- cgit v1.2.3 From e7eab49132ba819632c3bb9cd5b8342f2cdeb939 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 26 Jun 2020 13:53:20 +0200 Subject: media: staging/media/soc_camera: remove this driver The soc_camera driver (and related soc_camera-dependent sensor drivers) is obsolete and depends on BROKEN for a long time now. Nobody is using it, so it is time to kill it off. Signed-off-by: Hans Verkuil Cc: Arnd Bergmann Signed-off-by: Mauro Carvalho Chehab --- drivers/staging/media/Kconfig | 2 - drivers/staging/media/Makefile | 1 - drivers/staging/media/soc_camera/Kconfig | 51 - drivers/staging/media/soc_camera/Makefile | 7 - drivers/staging/media/soc_camera/TODO | 4 - drivers/staging/media/soc_camera/imx074.c | 492 ------ drivers/staging/media/soc_camera/mt9t031.c | 853 --------- drivers/staging/media/soc_camera/soc-camera.rst | 171 -- drivers/staging/media/soc_camera/soc_camera.c | 2164 ----------------------- drivers/staging/media/soc_camera/soc_mediabus.c | 529 ------ drivers/staging/media/soc_camera/soc_mt9v022.c | 1008 ----------- drivers/staging/media/soc_camera/soc_ov5642.c | 1085 ------------ drivers/staging/media/soc_camera/soc_ov9740.c | 992 ----------- include/media/drv-intf/soc_mediabus.h | 107 -- 14 files changed, 7466 deletions(-) delete mode 100644 drivers/staging/media/soc_camera/Kconfig delete mode 100644 drivers/staging/media/soc_camera/Makefile delete mode 100644 drivers/staging/media/soc_camera/TODO delete mode 100644 drivers/staging/media/soc_camera/imx074.c delete mode 100644 drivers/staging/media/soc_camera/mt9t031.c delete mode 100644 drivers/staging/media/soc_camera/soc-camera.rst delete mode 100644 drivers/staging/media/soc_camera/soc_camera.c delete mode 100644 drivers/staging/media/soc_camera/soc_mediabus.c delete mode 100644 drivers/staging/media/soc_camera/soc_mt9v022.c delete mode 100644 drivers/staging/media/soc_camera/soc_ov5642.c delete mode 100644 drivers/staging/media/soc_camera/soc_ov9740.c delete mode 100644 include/media/drv-intf/soc_mediabus.h (limited to 'include') diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig index 4bb1eca6f597..71d077762698 100644 --- a/drivers/staging/media/Kconfig +++ b/drivers/staging/media/Kconfig @@ -42,8 +42,6 @@ source "drivers/staging/media/tegra-video/Kconfig" source "drivers/staging/media/ipu3/Kconfig" -source "drivers/staging/media/soc_camera/Kconfig" - source "drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig" source "drivers/staging/media/rkisp1/Kconfig" diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile index 71a47b61836d..17ececa1e095 100644 --- a/drivers/staging/media/Makefile +++ b/drivers/staging/media/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/ obj-$(CONFIG_TEGRA_VDE) += tegra-vde/ obj-$(CONFIG_VIDEO_HANTRO) += hantro/ obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/ -obj-$(CONFIG_SOC_CAMERA) += soc_camera/ obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0/ obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rkisp1/ obj-$(CONFIG_VIDEO_USBVISION) += usbvision/ diff --git a/drivers/staging/media/soc_camera/Kconfig b/drivers/staging/media/soc_camera/Kconfig deleted file mode 100644 index 4a54db121574..000000000000 --- a/drivers/staging/media/soc_camera/Kconfig +++ /dev/null @@ -1,51 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config SOC_CAMERA - tristate "SoC camera support" - depends on VIDEO_V4L2 && HAS_DMA && I2C && BROKEN - select VIDEOBUF2_CORE - help - SoC Camera is a common API to several cameras, not connecting - over a bus like PCI or USB. For example some i2c camera connected - directly to the data bus of an SoC. - -comment "soc_camera sensor drivers" - -config SOC_CAMERA_MT9M111 - tristate "legacy soc_camera mt9m111, mt9m112 and mt9m131 support" - depends on SOC_CAMERA && I2C - select VIDEO_MT9M111 - help - This driver supports MT9M111, MT9M112 and MT9M131 cameras from - Micron/Aptina. - This is the legacy configuration which shouldn't be used anymore, - while VIDEO_MT9M111 should be used instead. - -config SOC_CAMERA_MT9V022 - tristate "mt9v022 and mt9v024 support" - depends on SOC_CAMERA && I2C - help - This driver supports MT9V022 cameras from Micron - -config SOC_CAMERA_OV5642 - tristate "ov5642 camera support" - depends on SOC_CAMERA && I2C - help - This is a V4L2 camera driver for the OmniVision OV5642 sensor - -config SOC_CAMERA_OV9740 - tristate "ov9740 camera support" - depends on SOC_CAMERA && I2C - help - This is a ov9740 camera driver - -config SOC_CAMERA_IMX074 - tristate "imx074 support (DEPRECATED)" - depends on SOC_CAMERA && I2C - help - This driver supports IMX074 cameras from Sony - -config SOC_CAMERA_MT9T031 - tristate "mt9t031 support (DEPRECATED)" - depends on SOC_CAMERA && I2C - help - This driver supports MT9T031 cameras from Micron. diff --git a/drivers/staging/media/soc_camera/Makefile b/drivers/staging/media/soc_camera/Makefile deleted file mode 100644 index 3a351bd629f5..000000000000 --- a/drivers/staging/media/soc_camera/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o -obj-$(CONFIG_SOC_CAMERA_MT9V022) += soc_mt9v022.o -obj-$(CONFIG_SOC_CAMERA_OV5642) += soc_ov5642.o -obj-$(CONFIG_SOC_CAMERA_OV9740) += soc_ov9740.o -obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o -obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o diff --git a/drivers/staging/media/soc_camera/TODO b/drivers/staging/media/soc_camera/TODO deleted file mode 100644 index 932af6443b67..000000000000 --- a/drivers/staging/media/soc_camera/TODO +++ /dev/null @@ -1,4 +0,0 @@ -The SoC camera framework is obsolete and scheduled for removal in the near -future. Developers are encouraged to convert the drivers to use the -regular V4L2 API if these drivers are still needed (and if someone has the -hardware). diff --git a/drivers/staging/media/soc_camera/imx074.c b/drivers/staging/media/soc_camera/imx074.c deleted file mode 100644 index 14240b74cdd0..000000000000 --- a/drivers/staging/media/soc_camera/imx074.c +++ /dev/null @@ -1,492 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Driver for IMX074 CMOS Image Sensor from Sony - * - * Copyright (C) 2010, Guennadi Liakhovetski - * - * Partially inspired by the IMX074 driver from the Android / MSM tree - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* IMX074 registers */ - -#define MODE_SELECT 0x0100 -#define IMAGE_ORIENTATION 0x0101 -#define GROUPED_PARAMETER_HOLD 0x0104 - -/* Integration Time */ -#define COARSE_INTEGRATION_TIME_HI 0x0202 -#define COARSE_INTEGRATION_TIME_LO 0x0203 -/* Gain */ -#define ANALOGUE_GAIN_CODE_GLOBAL_HI 0x0204 -#define ANALOGUE_GAIN_CODE_GLOBAL_LO 0x0205 - -/* PLL registers */ -#define PRE_PLL_CLK_DIV 0x0305 -#define PLL_MULTIPLIER 0x0307 -#define PLSTATIM 0x302b -#define VNDMY_ABLMGSHLMT 0x300a -#define Y_OPBADDR_START_DI 0x3014 -/* mode setting */ -#define FRAME_LENGTH_LINES_HI 0x0340 -#define FRAME_LENGTH_LINES_LO 0x0341 -#define LINE_LENGTH_PCK_HI 0x0342 -#define LINE_LENGTH_PCK_LO 0x0343 -#define YADDR_START 0x0347 -#define YADDR_END 0x034b -#define X_OUTPUT_SIZE_MSB 0x034c -#define X_OUTPUT_SIZE_LSB 0x034d -#define Y_OUTPUT_SIZE_MSB 0x034e -#define Y_OUTPUT_SIZE_LSB 0x034f -#define X_EVEN_INC 0x0381 -#define X_ODD_INC 0x0383 -#define Y_EVEN_INC 0x0385 -#define Y_ODD_INC 0x0387 - -#define HMODEADD 0x3001 -#define VMODEADD 0x3016 -#define VAPPLINE_START 0x3069 -#define VAPPLINE_END 0x306b -#define SHUTTER 0x3086 -#define HADDAVE 0x30e8 -#define LANESEL 0x3301 - -/* IMX074 supported geometry */ -#define IMX074_WIDTH 1052 -#define IMX074_HEIGHT 780 - -/* IMX074 has only one fixed colorspace per pixelcode */ -struct imx074_datafmt { - u32 code; - enum v4l2_colorspace colorspace; -}; - -struct imx074 { - struct v4l2_subdev subdev; - const struct imx074_datafmt *fmt; - struct v4l2_clk *clk; -}; - -static const struct imx074_datafmt imx074_colour_fmts[] = { - {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, -}; - -static struct imx074 *to_imx074(const struct i2c_client *client) -{ - return container_of(i2c_get_clientdata(client), struct imx074, subdev); -} - -/* Find a data format by a pixel code in an array */ -static const struct imx074_datafmt *imx074_find_datafmt(u32 code) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(imx074_colour_fmts); i++) - if (imx074_colour_fmts[i].code == code) - return imx074_colour_fmts + i; - - return NULL; -} - -static int reg_write(struct i2c_client *client, const u16 addr, const u8 data) -{ - struct i2c_adapter *adap = client->adapter; - struct i2c_msg msg; - unsigned char tx[3]; - int ret; - - msg.addr = client->addr; - msg.buf = tx; - msg.len = 3; - msg.flags = 0; - - tx[0] = addr >> 8; - tx[1] = addr & 0xff; - tx[2] = data; - - ret = i2c_transfer(adap, &msg, 1); - - mdelay(2); - - return ret == 1 ? 0 : -EIO; -} - -static int reg_read(struct i2c_client *client, const u16 addr) -{ - u8 buf[2] = {addr >> 8, addr & 0xff}; - int ret; - struct i2c_msg msgs[] = { - { - .addr = client->addr, - .flags = 0, - .len = 2, - .buf = buf, - }, { - .addr = client->addr, - .flags = I2C_M_RD, - .len = 2, - .buf = buf, - }, - }; - - ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (ret < 0) { - dev_warn(&client->dev, "Reading register %x from %x failed\n", - addr, client->addr); - return ret; - } - - return buf[0] & 0xff; /* no sign-extension */ -} - -static int imx074_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - const struct imx074_datafmt *fmt = imx074_find_datafmt(mf->code); - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct imx074 *priv = to_imx074(client); - - if (format->pad) - return -EINVAL; - - dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code); - - if (!fmt) { - /* MIPI CSI could have changed the format, double-check */ - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - mf->code = imx074_colour_fmts[0].code; - mf->colorspace = imx074_colour_fmts[0].colorspace; - } - - mf->width = IMX074_WIDTH; - mf->height = IMX074_HEIGHT; - mf->field = V4L2_FIELD_NONE; - - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) - priv->fmt = fmt; - else - cfg->try_fmt = *mf; - - return 0; -} - -static int imx074_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct imx074 *priv = to_imx074(client); - - const struct imx074_datafmt *fmt = priv->fmt; - - if (format->pad) - return -EINVAL; - - mf->code = fmt->code; - mf->colorspace = fmt->colorspace; - mf->width = IMX074_WIDTH; - mf->height = IMX074_HEIGHT; - mf->field = V4L2_FIELD_NONE; - - return 0; -} - -static int imx074_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - sel->r.left = 0; - sel->r.top = 0; - sel->r.width = IMX074_WIDTH; - sel->r.height = IMX074_HEIGHT; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - case V4L2_SEL_TGT_CROP: - return 0; - default: - return -EINVAL; - } -} - -static int imx074_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->pad || - (unsigned int)code->index >= ARRAY_SIZE(imx074_colour_fmts)) - return -EINVAL; - - code->code = imx074_colour_fmts[code->index].code; - return 0; -} - -static int imx074_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - /* MODE_SELECT: stream or standby */ - return reg_write(client, MODE_SELECT, !!enable); -} - -static int imx074_s_power(struct v4l2_subdev *sd, int on) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct imx074 *priv = to_imx074(client); - - return soc_camera_set_power(&client->dev, ssdd, priv->clk, on); -} - -static int imx074_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - cfg->type = V4L2_MBUS_CSI2_DPHY; - cfg->flags = V4L2_MBUS_CSI2_2_LANE | - V4L2_MBUS_CSI2_CHANNEL_0 | - V4L2_MBUS_CSI2_CONTINUOUS_CLOCK; - - return 0; -} - -static const struct v4l2_subdev_video_ops imx074_subdev_video_ops = { - .s_stream = imx074_s_stream, - .g_mbus_config = imx074_g_mbus_config, -}; - -static const struct v4l2_subdev_core_ops imx074_subdev_core_ops = { - .s_power = imx074_s_power, -}; - -static const struct v4l2_subdev_pad_ops imx074_subdev_pad_ops = { - .enum_mbus_code = imx074_enum_mbus_code, - .get_selection = imx074_get_selection, - .get_fmt = imx074_get_fmt, - .set_fmt = imx074_set_fmt, -}; - -static const struct v4l2_subdev_ops imx074_subdev_ops = { - .core = &imx074_subdev_core_ops, - .video = &imx074_subdev_video_ops, - .pad = &imx074_subdev_pad_ops, -}; - -static int imx074_video_probe(struct i2c_client *client) -{ - struct v4l2_subdev *subdev = i2c_get_clientdata(client); - int ret; - u16 id; - - ret = imx074_s_power(subdev, 1); - if (ret < 0) - return ret; - - /* Read sensor Model ID */ - ret = reg_read(client, 0); - if (ret < 0) - goto done; - - id = ret << 8; - - ret = reg_read(client, 1); - if (ret < 0) - goto done; - - id |= ret; - - dev_info(&client->dev, "Chip ID 0x%04x detected\n", id); - - if (id != 0x74) { - ret = -ENODEV; - goto done; - } - - /* PLL Setting EXTCLK=24MHz, 22.5times */ - reg_write(client, PLL_MULTIPLIER, 0x2D); - reg_write(client, PRE_PLL_CLK_DIV, 0x02); - reg_write(client, PLSTATIM, 0x4B); - - /* 2-lane mode */ - reg_write(client, 0x3024, 0x00); - - reg_write(client, IMAGE_ORIENTATION, 0x00); - - /* select RAW mode: - * 0x08+0x08 = top 8 bits - * 0x0a+0x08 = compressed 8-bits - * 0x0a+0x0a = 10 bits - */ - reg_write(client, 0x0112, 0x08); - reg_write(client, 0x0113, 0x08); - - /* Base setting for High frame mode */ - reg_write(client, VNDMY_ABLMGSHLMT, 0x80); - reg_write(client, Y_OPBADDR_START_DI, 0x08); - reg_write(client, 0x3015, 0x37); - reg_write(client, 0x301C, 0x01); - reg_write(client, 0x302C, 0x05); - reg_write(client, 0x3031, 0x26); - reg_write(client, 0x3041, 0x60); - reg_write(client, 0x3051, 0x24); - reg_write(client, 0x3053, 0x34); - reg_write(client, 0x3057, 0xC0); - reg_write(client, 0x305C, 0x09); - reg_write(client, 0x305D, 0x07); - reg_write(client, 0x3060, 0x30); - reg_write(client, 0x3065, 0x00); - reg_write(client, 0x30AA, 0x08); - reg_write(client, 0x30AB, 0x1C); - reg_write(client, 0x30B0, 0x32); - reg_write(client, 0x30B2, 0x83); - reg_write(client, 0x30D3, 0x04); - reg_write(client, 0x3106, 0x78); - reg_write(client, 0x310C, 0x82); - reg_write(client, 0x3304, 0x05); - reg_write(client, 0x3305, 0x04); - reg_write(client, 0x3306, 0x11); - reg_write(client, 0x3307, 0x02); - reg_write(client, 0x3308, 0x0C); - reg_write(client, 0x3309, 0x06); - reg_write(client, 0x330A, 0x08); - reg_write(client, 0x330B, 0x04); - reg_write(client, 0x330C, 0x08); - reg_write(client, 0x330D, 0x06); - reg_write(client, 0x330E, 0x01); - reg_write(client, 0x3381, 0x00); - - /* V : 1/2V-addition (1,3), H : 1/2H-averaging (1,3) -> Full HD */ - /* 1608 = 1560 + 48 (black lines) */ - reg_write(client, FRAME_LENGTH_LINES_HI, 0x06); - reg_write(client, FRAME_LENGTH_LINES_LO, 0x48); - reg_write(client, YADDR_START, 0x00); - reg_write(client, YADDR_END, 0x2F); - /* 0x838 == 2104 */ - reg_write(client, X_OUTPUT_SIZE_MSB, 0x08); - reg_write(client, X_OUTPUT_SIZE_LSB, 0x38); - /* 0x618 == 1560 */ - reg_write(client, Y_OUTPUT_SIZE_MSB, 0x06); - reg_write(client, Y_OUTPUT_SIZE_LSB, 0x18); - reg_write(client, X_EVEN_INC, 0x01); - reg_write(client, X_ODD_INC, 0x03); - reg_write(client, Y_EVEN_INC, 0x01); - reg_write(client, Y_ODD_INC, 0x03); - reg_write(client, HMODEADD, 0x00); - reg_write(client, VMODEADD, 0x16); - reg_write(client, VAPPLINE_START, 0x24); - reg_write(client, VAPPLINE_END, 0x53); - reg_write(client, SHUTTER, 0x00); - reg_write(client, HADDAVE, 0x80); - - reg_write(client, LANESEL, 0x00); - - reg_write(client, GROUPED_PARAMETER_HOLD, 0x00); /* off */ - - ret = 0; - -done: - imx074_s_power(subdev, 0); - return ret; -} - -static int imx074_probe(struct i2c_client *client, - const struct i2c_device_id *did) -{ - struct imx074 *priv; - struct i2c_adapter *adapter = client->adapter; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - int ret; - - if (!ssdd) { - dev_err(&client->dev, "IMX074: missing platform data!\n"); - return -EINVAL; - } - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - dev_warn(&adapter->dev, - "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n"); - return -EIO; - } - - priv = devm_kzalloc(&client->dev, sizeof(struct imx074), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - v4l2_i2c_subdev_init(&priv->subdev, client, &imx074_subdev_ops); - - priv->fmt = &imx074_colour_fmts[0]; - - priv->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(priv->clk)) { - dev_info(&client->dev, "Error %ld getting clock\n", PTR_ERR(priv->clk)); - return -EPROBE_DEFER; - } - - ret = soc_camera_power_init(&client->dev, ssdd); - if (ret < 0) - goto epwrinit; - - ret = imx074_video_probe(client); - if (ret < 0) - goto eprobe; - - ret = v4l2_async_register_subdev(&priv->subdev); - if (!ret) - return 0; - -epwrinit: -eprobe: - v4l2_clk_put(priv->clk); - return ret; -} - -static int imx074_remove(struct i2c_client *client) -{ - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct imx074 *priv = to_imx074(client); - - v4l2_async_unregister_subdev(&priv->subdev); - v4l2_clk_put(priv->clk); - - if (ssdd->free_bus) - ssdd->free_bus(ssdd); - - return 0; -} - -static const struct i2c_device_id imx074_id[] = { - { "imx074", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, imx074_id); - -static struct i2c_driver imx074_i2c_driver = { - .driver = { - .name = "imx074", - }, - .probe = imx074_probe, - .remove = imx074_remove, - .id_table = imx074_id, -}; - -module_i2c_driver(imx074_i2c_driver); - -MODULE_DESCRIPTION("Sony IMX074 Camera driver"); -MODULE_AUTHOR("Guennadi Liakhovetski "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/media/soc_camera/mt9t031.c b/drivers/staging/media/soc_camera/mt9t031.c deleted file mode 100644 index c14f23221544..000000000000 --- a/drivers/staging/media/soc_camera/mt9t031.c +++ /dev/null @@ -1,853 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Driver for MT9T031 CMOS Image Sensor from Micron - * - * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* - * ATTENTION: this driver still cannot be used outside of the soc-camera - * framework because of its PM implementation, using the video_device node. - * If hardware becomes available for testing, alternative PM approaches shall - * be considered and tested. - */ - -/* - * mt9t031 i2c address 0x5d - * The platform has to define struct i2c_board_info objects and link to them - * from struct soc_camera_host_desc - */ - -/* mt9t031 selected register addresses */ -#define MT9T031_CHIP_VERSION 0x00 -#define MT9T031_ROW_START 0x01 -#define MT9T031_COLUMN_START 0x02 -#define MT9T031_WINDOW_HEIGHT 0x03 -#define MT9T031_WINDOW_WIDTH 0x04 -#define MT9T031_HORIZONTAL_BLANKING 0x05 -#define MT9T031_VERTICAL_BLANKING 0x06 -#define MT9T031_OUTPUT_CONTROL 0x07 -#define MT9T031_SHUTTER_WIDTH_UPPER 0x08 -#define MT9T031_SHUTTER_WIDTH 0x09 -#define MT9T031_PIXEL_CLOCK_CONTROL 0x0a -#define MT9T031_FRAME_RESTART 0x0b -#define MT9T031_SHUTTER_DELAY 0x0c -#define MT9T031_RESET 0x0d -#define MT9T031_READ_MODE_1 0x1e -#define MT9T031_READ_MODE_2 0x20 -#define MT9T031_READ_MODE_3 0x21 -#define MT9T031_ROW_ADDRESS_MODE 0x22 -#define MT9T031_COLUMN_ADDRESS_MODE 0x23 -#define MT9T031_GLOBAL_GAIN 0x35 -#define MT9T031_CHIP_ENABLE 0xF8 - -#define MT9T031_MAX_HEIGHT 1536 -#define MT9T031_MAX_WIDTH 2048 -#define MT9T031_MIN_HEIGHT 2 -#define MT9T031_MIN_WIDTH 18 -#define MT9T031_HORIZONTAL_BLANK 142 -#define MT9T031_VERTICAL_BLANK 25 -#define MT9T031_COLUMN_SKIP 32 -#define MT9T031_ROW_SKIP 20 - -struct mt9t031 { - struct v4l2_subdev subdev; - struct v4l2_ctrl_handler hdl; - struct { - /* exposure/auto-exposure cluster */ - struct v4l2_ctrl *autoexposure; - struct v4l2_ctrl *exposure; - }; - struct v4l2_rect rect; /* Sensor window */ - struct v4l2_clk *clk; - u16 xskip; - u16 yskip; - unsigned int total_h; - unsigned short y_skip_top; /* Lines to skip at the top */ -}; - -static struct mt9t031 *to_mt9t031(const struct i2c_client *client) -{ - return container_of(i2c_get_clientdata(client), struct mt9t031, subdev); -} - -static int reg_read(struct i2c_client *client, const u8 reg) -{ - return i2c_smbus_read_word_swapped(client, reg); -} - -static int reg_write(struct i2c_client *client, const u8 reg, - const u16 data) -{ - return i2c_smbus_write_word_swapped(client, reg, data); -} - -static int reg_set(struct i2c_client *client, const u8 reg, - const u16 data) -{ - int ret; - - ret = reg_read(client, reg); - if (ret < 0) - return ret; - return reg_write(client, reg, ret | data); -} - -static int reg_clear(struct i2c_client *client, const u8 reg, - const u16 data) -{ - int ret; - - ret = reg_read(client, reg); - if (ret < 0) - return ret; - return reg_write(client, reg, ret & ~data); -} - -static int set_shutter(struct i2c_client *client, const u32 data) -{ - int ret; - - ret = reg_write(client, MT9T031_SHUTTER_WIDTH_UPPER, data >> 16); - - if (ret >= 0) - ret = reg_write(client, MT9T031_SHUTTER_WIDTH, data & 0xffff); - - return ret; -} - -static int get_shutter(struct i2c_client *client, u32 *data) -{ - int ret; - - ret = reg_read(client, MT9T031_SHUTTER_WIDTH_UPPER); - *data = ret << 16; - - if (ret >= 0) - ret = reg_read(client, MT9T031_SHUTTER_WIDTH); - *data |= ret & 0xffff; - - return ret < 0 ? ret : 0; -} - -static int mt9t031_idle(struct i2c_client *client) -{ - int ret; - - /* Disable chip output, synchronous option update */ - ret = reg_write(client, MT9T031_RESET, 1); - if (ret >= 0) - ret = reg_write(client, MT9T031_RESET, 0); - if (ret >= 0) - ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 2); - - return ret >= 0 ? 0 : -EIO; -} - -static int mt9t031_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - if (enable) - /* Switch to master "normal" mode */ - ret = reg_set(client, MT9T031_OUTPUT_CONTROL, 2); - else - /* Stop sensor readout */ - ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 2); - - if (ret < 0) - return -EIO; - - return 0; -} - -/* target must be _even_ */ -static u16 mt9t031_skip(s32 *source, s32 target, s32 max) -{ - unsigned int skip; - - if (*source < target + target / 2) { - *source = target; - return 1; - } - - skip = min(max, *source + target / 2) / target; - if (skip > 8) - skip = 8; - *source = target * skip; - - return skip; -} - -/* rect is the sensor rectangle, the caller guarantees parameter validity */ -static int mt9t031_set_params(struct i2c_client *client, - struct v4l2_rect *rect, u16 xskip, u16 yskip) -{ - struct mt9t031 *mt9t031 = to_mt9t031(client); - int ret; - u16 xbin, ybin; - const u16 hblank = MT9T031_HORIZONTAL_BLANK, - vblank = MT9T031_VERTICAL_BLANK; - - xbin = min(xskip, (u16)3); - ybin = min(yskip, (u16)3); - - /* - * Could just do roundup(rect->left, [xy]bin * 2); but this is cheaper. - * There is always a valid suitably aligned value. The worst case is - * xbin = 3, width = 2048. Then we will start at 36, the last read out - * pixel will be 2083, which is < 2085 - first black pixel. - * - * MT9T031 datasheet imposes window left border alignment, depending on - * the selected xskip. Failing to conform to this requirement produces - * dark horizontal stripes in the image. However, even obeying to this - * requirement doesn't eliminate the stripes in all configurations. They - * appear "locally reproducibly," but can differ between tests under - * different lighting conditions. - */ - switch (xbin) { - case 1: - rect->left &= ~1; - break; - case 2: - rect->left &= ~3; - break; - case 3: - rect->left = rect->left > roundup(MT9T031_COLUMN_SKIP, 6) ? - (rect->left / 6) * 6 : roundup(MT9T031_COLUMN_SKIP, 6); - } - - rect->top &= ~1; - - dev_dbg(&client->dev, "skip %u:%u, rect %ux%u@%u:%u\n", - xskip, yskip, rect->width, rect->height, rect->left, rect->top); - - /* Disable register update, reconfigure atomically */ - ret = reg_set(client, MT9T031_OUTPUT_CONTROL, 1); - if (ret < 0) - return ret; - - /* Blanking and start values - default... */ - ret = reg_write(client, MT9T031_HORIZONTAL_BLANKING, hblank); - if (ret >= 0) - ret = reg_write(client, MT9T031_VERTICAL_BLANKING, vblank); - - if (yskip != mt9t031->yskip || xskip != mt9t031->xskip) { - /* Binning, skipping */ - if (ret >= 0) - ret = reg_write(client, MT9T031_COLUMN_ADDRESS_MODE, - ((xbin - 1) << 4) | (xskip - 1)); - if (ret >= 0) - ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE, - ((ybin - 1) << 4) | (yskip - 1)); - } - dev_dbg(&client->dev, "new physical left %u, top %u\n", - rect->left, rect->top); - - /* - * The caller provides a supported format, as guaranteed by - * .set_fmt(FORMAT_TRY), soc_camera_s_selection() and soc_camera_cropcap() - */ - if (ret >= 0) - ret = reg_write(client, MT9T031_COLUMN_START, rect->left); - if (ret >= 0) - ret = reg_write(client, MT9T031_ROW_START, rect->top); - if (ret >= 0) - ret = reg_write(client, MT9T031_WINDOW_WIDTH, rect->width - 1); - if (ret >= 0) - ret = reg_write(client, MT9T031_WINDOW_HEIGHT, - rect->height + mt9t031->y_skip_top - 1); - if (ret >= 0 && v4l2_ctrl_g_ctrl(mt9t031->autoexposure) == V4L2_EXPOSURE_AUTO) { - mt9t031->total_h = rect->height + mt9t031->y_skip_top + vblank; - - ret = set_shutter(client, mt9t031->total_h); - } - - /* Re-enable register update, commit all changes */ - if (ret >= 0) - ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 1); - - if (ret >= 0) { - mt9t031->rect = *rect; - mt9t031->xskip = xskip; - mt9t031->yskip = yskip; - } - - return ret < 0 ? ret : 0; -} - -static int mt9t031_set_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t031 *mt9t031 = to_mt9t031(client); - struct v4l2_rect rect = sel->r; - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || - sel->target != V4L2_SEL_TGT_CROP) - return -EINVAL; - - rect.width = ALIGN(rect.width, 2); - rect.height = ALIGN(rect.height, 2); - - soc_camera_limit_side(&rect.left, &rect.width, - MT9T031_COLUMN_SKIP, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH); - - soc_camera_limit_side(&rect.top, &rect.height, - MT9T031_ROW_SKIP, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT); - - return mt9t031_set_params(client, &rect, mt9t031->xskip, mt9t031->yskip); -} - -static int mt9t031_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t031 *mt9t031 = to_mt9t031(client); - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - sel->r.left = MT9T031_COLUMN_SKIP; - sel->r.top = MT9T031_ROW_SKIP; - sel->r.width = MT9T031_MAX_WIDTH; - sel->r.height = MT9T031_MAX_HEIGHT; - return 0; - case V4L2_SEL_TGT_CROP: - sel->r = mt9t031->rect; - return 0; - default: - return -EINVAL; - } -} - -static int mt9t031_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t031 *mt9t031 = to_mt9t031(client); - - if (format->pad) - return -EINVAL; - - mf->width = mt9t031->rect.width / mt9t031->xskip; - mf->height = mt9t031->rect.height / mt9t031->yskip; - mf->code = MEDIA_BUS_FMT_SBGGR10_1X10; - mf->colorspace = V4L2_COLORSPACE_SRGB; - mf->field = V4L2_FIELD_NONE; - - return 0; -} - -/* - * If a user window larger than sensor window is requested, we'll increase the - * sensor window. - */ -static int mt9t031_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t031 *mt9t031 = to_mt9t031(client); - u16 xskip, yskip; - struct v4l2_rect rect = mt9t031->rect; - - if (format->pad) - return -EINVAL; - - mf->code = MEDIA_BUS_FMT_SBGGR10_1X10; - mf->colorspace = V4L2_COLORSPACE_SRGB; - v4l_bound_align_image( - &mf->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1, - &mf->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0); - - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *mf; - return 0; - } - - /* - * Width and height are within limits. - * S_FMT: use binning and skipping for scaling - */ - xskip = mt9t031_skip(&rect.width, mf->width, MT9T031_MAX_WIDTH); - yskip = mt9t031_skip(&rect.height, mf->height, MT9T031_MAX_HEIGHT); - - mf->code = MEDIA_BUS_FMT_SBGGR10_1X10; - mf->colorspace = V4L2_COLORSPACE_SRGB; - - /* mt9t031_set_params() doesn't change width and height */ - return mt9t031_set_params(client, &rect, xskip, yskip); -} - -#ifdef CONFIG_VIDEO_ADV_DEBUG -static int mt9t031_g_register(struct v4l2_subdev *sd, - struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg > 0xff) - return -EINVAL; - - reg->size = 1; - reg->val = reg_read(client, reg->reg); - - if (reg->val > 0xffff) - return -EIO; - - return 0; -} - -static int mt9t031_s_register(struct v4l2_subdev *sd, - const struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg > 0xff) - return -EINVAL; - - if (reg_write(client, reg->reg, reg->val) < 0) - return -EIO; - - return 0; -} -#endif - -static int mt9t031_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct mt9t031 *mt9t031 = container_of(ctrl->handler, - struct mt9t031, hdl); - const u32 shutter_max = MT9T031_MAX_HEIGHT + MT9T031_VERTICAL_BLANK; - s32 min, max; - - switch (ctrl->id) { - case V4L2_CID_EXPOSURE_AUTO: - min = mt9t031->exposure->minimum; - max = mt9t031->exposure->maximum; - mt9t031->exposure->val = - (shutter_max / 2 + (mt9t031->total_h - 1) * (max - min)) - / shutter_max + min; - break; - } - return 0; -} - -static int mt9t031_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct mt9t031 *mt9t031 = container_of(ctrl->handler, - struct mt9t031, hdl); - struct v4l2_subdev *sd = &mt9t031->subdev; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct v4l2_ctrl *exp = mt9t031->exposure; - int data; - - switch (ctrl->id) { - case V4L2_CID_VFLIP: - if (ctrl->val) - data = reg_set(client, MT9T031_READ_MODE_2, 0x8000); - else - data = reg_clear(client, MT9T031_READ_MODE_2, 0x8000); - if (data < 0) - return -EIO; - return 0; - case V4L2_CID_HFLIP: - if (ctrl->val) - data = reg_set(client, MT9T031_READ_MODE_2, 0x4000); - else - data = reg_clear(client, MT9T031_READ_MODE_2, 0x4000); - if (data < 0) - return -EIO; - return 0; - case V4L2_CID_GAIN: - /* See Datasheet Table 7, Gain settings. */ - if (ctrl->val <= ctrl->default_value) { - /* Pack it into 0..1 step 0.125, register values 0..8 */ - unsigned long range = ctrl->default_value - ctrl->minimum; - data = ((ctrl->val - (s32)ctrl->minimum) * 8 + range / 2) / range; - - dev_dbg(&client->dev, "Setting gain %d\n", data); - data = reg_write(client, MT9T031_GLOBAL_GAIN, data); - if (data < 0) - return -EIO; - } else { - /* Pack it into 1.125..128 variable step, register values 9..0x7860 */ - /* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */ - unsigned long range = ctrl->maximum - ctrl->default_value - 1; - /* calculated gain: map 65..127 to 9..1024 step 0.125 */ - unsigned long gain = ((ctrl->val - (s32)ctrl->default_value - 1) * - 1015 + range / 2) / range + 9; - - if (gain <= 32) /* calculated gain 9..32 -> 9..32 */ - data = gain; - else if (gain <= 64) /* calculated gain 33..64 -> 0x51..0x60 */ - data = ((gain - 32) * 16 + 16) / 32 + 80; - else - /* calculated gain 65..1024 -> (1..120) << 8 + 0x60 */ - data = (((gain - 64 + 7) * 32) & 0xff00) | 0x60; - - dev_dbg(&client->dev, "Set gain from 0x%x to 0x%x\n", - reg_read(client, MT9T031_GLOBAL_GAIN), data); - data = reg_write(client, MT9T031_GLOBAL_GAIN, data); - if (data < 0) - return -EIO; - } - return 0; - - case V4L2_CID_EXPOSURE_AUTO: - if (ctrl->val == V4L2_EXPOSURE_MANUAL) { - unsigned int range = exp->maximum - exp->minimum; - unsigned int shutter = ((exp->val - (s32)exp->minimum) * 1048 + - range / 2) / range + 1; - u32 old; - - get_shutter(client, &old); - dev_dbg(&client->dev, "Set shutter from %u to %u\n", - old, shutter); - if (set_shutter(client, shutter) < 0) - return -EIO; - } else { - const u16 vblank = MT9T031_VERTICAL_BLANK; - mt9t031->total_h = mt9t031->rect.height + - mt9t031->y_skip_top + vblank; - - if (set_shutter(client, mt9t031->total_h) < 0) - return -EIO; - } - return 0; - default: - return -EINVAL; - } - return 0; -} - -/* - * Power Management: - * This function does nothing for now but must be present for pm to work - */ -static int mt9t031_runtime_suspend(struct device *dev) -{ - return 0; -} - -/* - * Power Management: - * COLUMN_ADDRESS_MODE and ROW_ADDRESS_MODE are not rewritten if unchanged - * they are however changed at reset if the platform hook is present - * thus we rewrite them with the values stored by the driver - */ -static int mt9t031_runtime_resume(struct device *dev) -{ - struct video_device *vdev = to_video_device(dev); - struct v4l2_subdev *sd = soc_camera_vdev_to_subdev(vdev); - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t031 *mt9t031 = to_mt9t031(client); - - int ret; - u16 xbin, ybin; - - xbin = min(mt9t031->xskip, (u16)3); - ybin = min(mt9t031->yskip, (u16)3); - - ret = reg_write(client, MT9T031_COLUMN_ADDRESS_MODE, - ((xbin - 1) << 4) | (mt9t031->xskip - 1)); - if (ret < 0) - return ret; - - ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE, - ((ybin - 1) << 4) | (mt9t031->yskip - 1)); - if (ret < 0) - return ret; - - return 0; -} - -static const struct dev_pm_ops mt9t031_dev_pm_ops = { - .runtime_suspend = mt9t031_runtime_suspend, - .runtime_resume = mt9t031_runtime_resume, -}; - -static const struct device_type mt9t031_dev_type = { - .name = "MT9T031", - .pm = &mt9t031_dev_pm_ops, -}; - -static int mt9t031_s_power(struct v4l2_subdev *sd, int on) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct video_device *vdev = soc_camera_i2c_to_vdev(client); - struct mt9t031 *mt9t031 = to_mt9t031(client); - int ret; - - if (on) { - ret = soc_camera_power_on(&client->dev, ssdd, mt9t031->clk); - if (ret < 0) - return ret; - if (vdev) - /* Not needed during probing, when vdev isn't available yet */ - vdev->dev.type = &mt9t031_dev_type; - } else { - if (vdev) - vdev->dev.type = NULL; - soc_camera_power_off(&client->dev, ssdd, mt9t031->clk); - } - - return 0; -} - -/* - * Interface active, can use i2c. If it fails, it can indeed mean, that - * this wasn't our capture interface, so, we wait for the right one - */ -static int mt9t031_video_probe(struct i2c_client *client) -{ - struct mt9t031 *mt9t031 = to_mt9t031(client); - s32 data; - int ret; - - ret = mt9t031_s_power(&mt9t031->subdev, 1); - if (ret < 0) - return ret; - - ret = mt9t031_idle(client); - if (ret < 0) { - dev_err(&client->dev, "Failed to initialise the camera\n"); - goto done; - } - - /* Read out the chip version register */ - data = reg_read(client, MT9T031_CHIP_VERSION); - - switch (data) { - case 0x1621: - break; - default: - dev_err(&client->dev, - "No MT9T031 chip detected, register read %x\n", data); - ret = -ENODEV; - goto done; - } - - dev_info(&client->dev, "Detected a MT9T031 chip ID %x\n", data); - - ret = v4l2_ctrl_handler_setup(&mt9t031->hdl); - -done: - mt9t031_s_power(&mt9t031->subdev, 0); - - return ret; -} - -static int mt9t031_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t031 *mt9t031 = to_mt9t031(client); - - *lines = mt9t031->y_skip_top; - - return 0; -} - -static const struct v4l2_ctrl_ops mt9t031_ctrl_ops = { - .g_volatile_ctrl = mt9t031_g_volatile_ctrl, - .s_ctrl = mt9t031_s_ctrl, -}; - -static const struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = { - .s_power = mt9t031_s_power, -#ifdef CONFIG_VIDEO_ADV_DEBUG - .g_register = mt9t031_g_register, - .s_register = mt9t031_s_register, -#endif -}; - -static int mt9t031_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->pad || code->index) - return -EINVAL; - - code->code = MEDIA_BUS_FMT_SBGGR10_1X10; - return 0; -} - -static int mt9t031_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING | - V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_HSYNC_ACTIVE_HIGH | - V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH; - cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); - - return 0; -} - -static int mt9t031_s_mbus_config(struct v4l2_subdev *sd, - const struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - if (soc_camera_apply_board_flags(ssdd, cfg) & - V4L2_MBUS_PCLK_SAMPLE_FALLING) - return reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000); - else - return reg_set(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000); -} - -static const struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = { - .s_stream = mt9t031_s_stream, - .g_mbus_config = mt9t031_g_mbus_config, - .s_mbus_config = mt9t031_s_mbus_config, -}; - -static const struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = { - .g_skip_top_lines = mt9t031_g_skip_top_lines, -}; - -static const struct v4l2_subdev_pad_ops mt9t031_subdev_pad_ops = { - .enum_mbus_code = mt9t031_enum_mbus_code, - .get_selection = mt9t031_get_selection, - .set_selection = mt9t031_set_selection, - .get_fmt = mt9t031_get_fmt, - .set_fmt = mt9t031_set_fmt, -}; - -static const struct v4l2_subdev_ops mt9t031_subdev_ops = { - .core = &mt9t031_subdev_core_ops, - .video = &mt9t031_subdev_video_ops, - .sensor = &mt9t031_subdev_sensor_ops, - .pad = &mt9t031_subdev_pad_ops, -}; - -static int mt9t031_probe(struct i2c_client *client, - const struct i2c_device_id *did) -{ - struct mt9t031 *mt9t031; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct i2c_adapter *adapter = client->adapter; - int ret; - - if (!ssdd) { - dev_err(&client->dev, "MT9T031 driver needs platform data\n"); - return -EINVAL; - } - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) { - dev_warn(&adapter->dev, - "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); - return -EIO; - } - - mt9t031 = devm_kzalloc(&client->dev, sizeof(struct mt9t031), GFP_KERNEL); - if (!mt9t031) - return -ENOMEM; - - v4l2_i2c_subdev_init(&mt9t031->subdev, client, &mt9t031_subdev_ops); - v4l2_ctrl_handler_init(&mt9t031->hdl, 5); - v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops, - V4L2_CID_VFLIP, 0, 1, 1, 0); - v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops, - V4L2_CID_HFLIP, 0, 1, 1, 0); - v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops, - V4L2_CID_GAIN, 0, 127, 1, 64); - - /* - * Simulated autoexposure. If enabled, we calculate shutter width - * ourselves in the driver based on vertical blanking and frame width - */ - mt9t031->autoexposure = v4l2_ctrl_new_std_menu(&mt9t031->hdl, - &mt9t031_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0, - V4L2_EXPOSURE_AUTO); - mt9t031->exposure = v4l2_ctrl_new_std(&mt9t031->hdl, &mt9t031_ctrl_ops, - V4L2_CID_EXPOSURE, 1, 255, 1, 255); - - mt9t031->subdev.ctrl_handler = &mt9t031->hdl; - if (mt9t031->hdl.error) - return mt9t031->hdl.error; - - v4l2_ctrl_auto_cluster(2, &mt9t031->autoexposure, - V4L2_EXPOSURE_MANUAL, true); - - mt9t031->y_skip_top = 0; - mt9t031->rect.left = MT9T031_COLUMN_SKIP; - mt9t031->rect.top = MT9T031_ROW_SKIP; - mt9t031->rect.width = MT9T031_MAX_WIDTH; - mt9t031->rect.height = MT9T031_MAX_HEIGHT; - - mt9t031->xskip = 1; - mt9t031->yskip = 1; - - mt9t031->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(mt9t031->clk)) { - ret = PTR_ERR(mt9t031->clk); - goto eclkget; - } - - ret = mt9t031_video_probe(client); - if (ret) { - v4l2_clk_put(mt9t031->clk); -eclkget: - v4l2_ctrl_handler_free(&mt9t031->hdl); - } - - return ret; -} - -static int mt9t031_remove(struct i2c_client *client) -{ - struct mt9t031 *mt9t031 = to_mt9t031(client); - - v4l2_clk_put(mt9t031->clk); - v4l2_device_unregister_subdev(&mt9t031->subdev); - v4l2_ctrl_handler_free(&mt9t031->hdl); - - return 0; -} - -static const struct i2c_device_id mt9t031_id[] = { - { "mt9t031", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, mt9t031_id); - -static struct i2c_driver mt9t031_i2c_driver = { - .driver = { - .name = "mt9t031", - }, - .probe = mt9t031_probe, - .remove = mt9t031_remove, - .id_table = mt9t031_id, -}; - -module_i2c_driver(mt9t031_i2c_driver); - -MODULE_DESCRIPTION("Micron MT9T031 Camera driver"); -MODULE_AUTHOR("Guennadi Liakhovetski "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/media/soc_camera/soc-camera.rst b/drivers/staging/media/soc_camera/soc-camera.rst deleted file mode 100644 index 7c39711aebf8..000000000000 --- a/drivers/staging/media/soc_camera/soc-camera.rst +++ /dev/null @@ -1,171 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -The Soc-Camera Drivers -====================== - -Author: Guennadi Liakhovetski - -Terminology ------------ - -The following terms are used in this document: - - camera / camera device / camera sensor - a video-camera sensor chip, capable - of connecting to a variety of systems and interfaces, typically uses i2c for - control and configuration, and a parallel or a serial bus for data. - - camera host - an interface, to which a camera is connected. Typically a - specialised interface, present on many SoCs, e.g. PXA27x and PXA3xx, SuperH, - i.MX27, i.MX31. - - camera host bus - a connection between a camera host and a camera. Can be - parallel or serial, consists of data and control lines, e.g. clock, vertical - and horizontal synchronization signals. - -Purpose of the soc-camera subsystem ------------------------------------ - -The soc-camera subsystem initially provided a unified API between camera host -drivers and camera sensor drivers. Later the soc-camera sensor API has been -replaced with the V4L2 standard subdev API. This also made camera driver re-use -with non-soc-camera hosts possible. The camera host API to the soc-camera core -has been preserved. - -Soc-camera implements a V4L2 interface to the user, currently only the "mmap" -method is supported by host drivers. However, the soc-camera core also provides -support for the "read" method. - -The subsystem has been designed to support multiple camera host interfaces and -multiple cameras per interface, although most applications have only one camera -sensor. - -Existing drivers ----------------- - -As of 3.7 there are seven host drivers in the mainline: atmel-isi.c, -mx1_camera.c (broken, scheduled for removal), mx2_camera.c, mx3_camera.c, -omap1_camera.c, pxa_camera.c, sh_mobile_ceu_camera.c, and multiple sensor -drivers under drivers/media/i2c/soc_camera/. - -Camera host API ---------------- - -A host camera driver is registered using the - -.. code-block:: none - - soc_camera_host_register(struct soc_camera_host *); - -function. The host object can be initialized as follows: - -.. code-block:: none - - struct soc_camera_host *ici; - ici->drv_name = DRV_NAME; - ici->ops = &camera_host_ops; - ici->priv = pcdev; - ici->v4l2_dev.dev = &pdev->dev; - ici->nr = pdev->id; - -All camera host methods are passed in a struct soc_camera_host_ops: - -.. code-block:: none - - static struct soc_camera_host_ops camera_host_ops = { - .owner = THIS_MODULE, - .add = camera_add_device, - .remove = camera_remove_device, - .set_fmt = camera_set_fmt_cap, - .try_fmt = camera_try_fmt_cap, - .init_videobuf2 = camera_init_videobuf2, - .poll = camera_poll, - .querycap = camera_querycap, - .set_bus_param = camera_set_bus_param, - /* The rest of host operations are optional */ - }; - -.add and .remove methods are called when a sensor is attached to or detached -from the host. .set_bus_param is used to configure physical connection -parameters between the host and the sensor. .init_videobuf2 is called by -soc-camera core when a video-device is opened, the host driver would typically -call vb2_queue_init() in this method. Further video-buffer management is -implemented completely by the specific camera host driver. If the host driver -supports non-standard pixel format conversion, it should implement a -.get_formats and, possibly, a .put_formats operations. See below for more -details about format conversion. The rest of the methods are called from -respective V4L2 operations. - -Camera API ----------- - -Sensor drivers can use struct soc_camera_link, typically provided by the -platform, and used to specify to which camera host bus the sensor is connected, -and optionally provide platform .power and .reset methods for the camera. This -struct is provided to the camera driver via the I2C client device platform data -and can be obtained, using the soc_camera_i2c_to_link() macro. Care should be -taken, when using soc_camera_vdev_to_subdev() and when accessing struct -soc_camera_device, using v4l2_get_subdev_hostdata(): both only work, when -running on an soc-camera host. The actual camera driver operation is implemented -using the V4L2 subdev API. Additionally soc-camera camera drivers can use -auxiliary soc-camera helper functions like soc_camera_power_on() and -soc_camera_power_off(), which switch regulators, provided by the platform and call -board-specific power switching methods. soc_camera_apply_board_flags() takes -camera bus configuration capability flags and applies any board transformations, -e.g. signal polarity inversion. soc_mbus_get_fmtdesc() can be used to obtain a -pixel format descriptor, corresponding to a certain media-bus pixel format code. -soc_camera_limit_side() can be used to restrict beginning and length of a frame -side, based on camera capabilities. - -VIDIOC_S_CROP and VIDIOC_S_FMT behaviour ----------------------------------------- - -Above user ioctls modify image geometry as follows: - -VIDIOC_S_CROP: sets location and sizes of the sensor window. Unit is one sensor -pixel. Changing sensor window sizes preserves any scaling factors, therefore -user window sizes change as well. - -VIDIOC_S_FMT: sets user window. Should preserve previously set sensor window as -much as possible by modifying scaling factors. If the sensor window cannot be -preserved precisely, it may be changed too. - -In soc-camera there are two locations, where scaling and cropping can take -place: in the camera driver and in the host driver. User ioctls are first passed -to the host driver, which then generally passes them down to the camera driver. -It is more efficient to perform scaling and cropping in the camera driver to -save camera bus bandwidth and maximise the framerate. However, if the camera -driver failed to set the required parameters with sufficient precision, the host -driver may decide to also use its own scaling and cropping to fulfill the user's -request. - -Camera drivers are interfaced to the soc-camera core and to host drivers over -the v4l2-subdev API, which is completely functional, it doesn't pass any data. -Therefore all camera drivers shall reply to .g_fmt() requests with their current -output geometry. This is necessary to correctly configure the camera bus. -.s_fmt() and .try_fmt() have to be implemented too. Sensor window and scaling -factors have to be maintained by camera drivers internally. According to the -V4L2 API all capture drivers must support the VIDIOC_CROPCAP ioctl, hence we -rely on camera drivers implementing .cropcap(). If the camera driver does not -support cropping, it may choose to not implement .s_crop(), but to enable -cropping support by the camera host driver at least the .g_crop method must be -implemented. - -User window geometry is kept in .user_width and .user_height fields in struct -soc_camera_device and used by the soc-camera core and host drivers. The core -updates these fields upon successful completion of a .s_fmt() call, but if these -fields change elsewhere, e.g. during .s_crop() processing, the host driver is -responsible for updating them. - -Format conversion ------------------ - -V4L2 distinguishes between pixel formats, as they are stored in memory, and as -they are transferred over a media bus. Soc-camera provides support to -conveniently manage these formats. A table of standard transformations is -maintained by soc-camera core, which describes, what FOURCC pixel format will -be obtained, if a media-bus pixel format is stored in memory according to -certain rules. E.g. if MEDIA_BUS_FMT_YUYV8_2X8 data is sampled with 8 bits per -sample and stored in memory in the little-endian order with no gaps between -bytes, data in memory will represent the V4L2_PIX_FMT_YUYV FOURCC format. These -standard transformations will be used by soc-camera or by camera host drivers to -configure camera drivers to produce the FOURCC format, requested by the user, -using the VIDIOC_S_FMT ioctl(). Apart from those standard format conversions, -host drivers can also provide their own conversion rules by implementing a -.get_formats and, if required, a .put_formats methods. diff --git a/drivers/staging/media/soc_camera/soc_camera.c b/drivers/staging/media/soc_camera/soc_camera.c deleted file mode 100644 index 39f513f69b89..000000000000 --- a/drivers/staging/media/soc_camera/soc_camera.c +++ /dev/null @@ -1,2164 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * camera image capture (abstract) bus driver - * - * Copyright (C) 2008, Guennadi Liakhovetski - * - * This driver provides an interface between platform-specific camera - * buses and camera devices. It should be used if the camera is - * connected not over a "proper" bus like PCI or USB, but over a - * special bus, like, for example, the Quick Capture interface on PXA270 - * SoCs. Later it should also be used for i.MX31 SoCs from Freescale. - * It can handle multiple cameras and / or multiple buses, which can - * be used, e.g., in stereo-vision applications. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Default to VGA resolution */ -#define DEFAULT_WIDTH 640 -#define DEFAULT_HEIGHT 480 - -#define MAP_MAX_NUM 32 -static DECLARE_BITMAP(device_map, MAP_MAX_NUM); -static LIST_HEAD(hosts); -static LIST_HEAD(devices); -/* - * Protects lists and bitmaps of hosts and devices. - * Lock nesting: Ok to take ->host_lock under list_lock. - */ -static DEFINE_MUTEX(list_lock); - -struct soc_camera_async_client { - struct v4l2_async_subdev *sensor; - struct v4l2_async_notifier notifier; - struct platform_device *pdev; - struct list_head list; /* needed for clean up */ -}; - -static int soc_camera_video_start(struct soc_camera_device *icd); -static int video_dev_create(struct soc_camera_device *icd); - -int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd, - struct v4l2_clk *clk) -{ - int ret; - bool clock_toggle; - - if (clk && (!ssdd->unbalanced_power || - !test_and_set_bit(0, &ssdd->clock_state))) { - ret = v4l2_clk_enable(clk); - if (ret < 0) { - dev_err(dev, "Cannot enable clock: %d\n", ret); - return ret; - } - clock_toggle = true; - } else { - clock_toggle = false; - } - - ret = regulator_bulk_enable(ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); - if (ret < 0) { - dev_err(dev, "Cannot enable regulators\n"); - goto eregenable; - } - - if (ssdd->power) { - ret = ssdd->power(dev, 1); - if (ret < 0) { - dev_err(dev, - "Platform failed to power-on the camera.\n"); - goto epwron; - } - } - - return 0; - -epwron: - regulator_bulk_disable(ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); -eregenable: - if (clock_toggle) - v4l2_clk_disable(clk); - - return ret; -} -EXPORT_SYMBOL(soc_camera_power_on); - -int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd, - struct v4l2_clk *clk) -{ - int ret = 0; - int err; - - if (ssdd->power) { - err = ssdd->power(dev, 0); - if (err < 0) { - dev_err(dev, - "Platform failed to power-off the camera.\n"); - ret = err; - } - } - - err = regulator_bulk_disable(ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); - if (err < 0) { - dev_err(dev, "Cannot disable regulators\n"); - ret = ret ? : err; - } - - if (clk && (!ssdd->unbalanced_power || test_and_clear_bit(0, &ssdd->clock_state))) - v4l2_clk_disable(clk); - - return ret; -} -EXPORT_SYMBOL(soc_camera_power_off); - -int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd) -{ - /* Should not have any effect in synchronous case */ - return devm_regulator_bulk_get(dev, ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); -} -EXPORT_SYMBOL(soc_camera_power_init); - -static int __soc_camera_power_on(struct soc_camera_device *icd) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - int ret; - - ret = v4l2_subdev_call(sd, core, s_power, 1); - if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) - return ret; - - return 0; -} - -static int __soc_camera_power_off(struct soc_camera_device *icd) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - int ret; - - ret = v4l2_subdev_call(sd, core, s_power, 0); - if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) - return ret; - - return 0; -} - -static int soc_camera_clock_start(struct soc_camera_host *ici) -{ - int ret; - - if (!ici->ops->clock_start) - return 0; - - mutex_lock(&ici->clk_lock); - ret = ici->ops->clock_start(ici); - mutex_unlock(&ici->clk_lock); - - return ret; -} - -static void soc_camera_clock_stop(struct soc_camera_host *ici) -{ - if (!ici->ops->clock_stop) - return; - - mutex_lock(&ici->clk_lock); - ici->ops->clock_stop(ici); - mutex_unlock(&ici->clk_lock); -} - -const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( - struct soc_camera_device *icd, unsigned int fourcc) -{ - unsigned int i; - - for (i = 0; i < icd->num_user_formats; i++) - if (icd->user_formats[i].host_fmt->fourcc == fourcc) - return icd->user_formats + i; - return NULL; -} -EXPORT_SYMBOL(soc_camera_xlate_by_fourcc); - -/** - * soc_camera_apply_board_flags() - apply platform SOCAM_SENSOR_INVERT_* flags - * @ssdd: camera platform parameters - * @cfg: media bus configuration - * @return: resulting flags - */ -unsigned long soc_camera_apply_board_flags(struct soc_camera_subdev_desc *ssdd, - const struct v4l2_mbus_config *cfg) -{ - unsigned long f, flags = cfg->flags; - - /* If only one of the two polarities is supported, switch to the opposite */ - if (ssdd->flags & SOCAM_SENSOR_INVERT_HSYNC) { - f = flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW); - if (f == V4L2_MBUS_HSYNC_ACTIVE_HIGH || f == V4L2_MBUS_HSYNC_ACTIVE_LOW) - flags ^= V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW; - } - - if (ssdd->flags & SOCAM_SENSOR_INVERT_VSYNC) { - f = flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW); - if (f == V4L2_MBUS_VSYNC_ACTIVE_HIGH || f == V4L2_MBUS_VSYNC_ACTIVE_LOW) - flags ^= V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW; - } - - if (ssdd->flags & SOCAM_SENSOR_INVERT_PCLK) { - f = flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING); - if (f == V4L2_MBUS_PCLK_SAMPLE_RISING || f == V4L2_MBUS_PCLK_SAMPLE_FALLING) - flags ^= V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING; - } - - return flags; -} -EXPORT_SYMBOL(soc_camera_apply_board_flags); - -#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ - ((x) >> 24) & 0xff - -static int soc_camera_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - const struct soc_camera_format_xlate *xlate; - struct v4l2_pix_format *pix = &f->fmt.pix; - int ret; - - dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n", - pixfmtstr(pix->pixelformat), pix->width, pix->height); - - if (pix->pixelformat != V4L2_PIX_FMT_JPEG && - !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { - pix->bytesperline = 0; - pix->sizeimage = 0; - } - - ret = ici->ops->try_fmt(icd, f); - if (ret < 0) - return ret; - - xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); - if (!xlate) - return -EINVAL; - - ret = soc_mbus_bytes_per_line(pix->width, xlate->host_fmt); - if (ret < 0) - return ret; - - pix->bytesperline = max_t(u32, pix->bytesperline, ret); - - ret = soc_mbus_image_size(xlate->host_fmt, pix->bytesperline, - pix->height); - if (ret < 0) - return ret; - - pix->sizeimage = max_t(u32, pix->sizeimage, ret); - - return 0; -} - -static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) -{ - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - /* Only single-plane capture is supported so far */ - if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - /* limit format to hardware capabilities */ - return soc_camera_try_fmt(icd, f); -} - -static int soc_camera_enum_input(struct file *file, void *priv, - struct v4l2_input *inp) -{ - struct soc_camera_device *icd = file->private_data; - - if (inp->index != 0) - return -EINVAL; - - /* default is camera */ - inp->type = V4L2_INPUT_TYPE_CAMERA; - inp->std = icd->vdev->tvnorms; - strscpy(inp->name, "Camera", sizeof(inp->name)); - - return 0; -} - -static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i) -{ - *i = 0; - - return 0; -} - -static int soc_camera_s_input(struct file *file, void *priv, unsigned int i) -{ - if (i > 0) - return -EINVAL; - - return 0; -} - -static int soc_camera_s_std(struct file *file, void *priv, v4l2_std_id a) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - - return v4l2_subdev_call(sd, video, s_std, a); -} - -static int soc_camera_g_std(struct file *file, void *priv, v4l2_std_id *a) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - - return v4l2_subdev_call(sd, video, g_std, a); -} - -static int soc_camera_enum_framesizes(struct file *file, void *fh, - struct v4l2_frmsizeenum *fsize) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - return ici->ops->enum_framesizes(icd, fsize); -} - -static int soc_camera_reqbufs(struct file *file, void *priv, - struct v4l2_requestbuffers *p) -{ - int ret; - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - if (icd->streamer && icd->streamer != file) - return -EBUSY; - - ret = vb2_reqbufs(&icd->vb2_vidq, p); - if (!ret) - icd->streamer = p->count ? file : NULL; - return ret; -} - -static int soc_camera_querybuf(struct file *file, void *priv, - struct v4l2_buffer *p) -{ - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - return vb2_querybuf(&icd->vb2_vidq, p); -} - -static int soc_camera_qbuf(struct file *file, void *priv, - struct v4l2_buffer *p) -{ - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - if (icd->streamer != file) - return -EBUSY; - - return vb2_qbuf(&icd->vb2_vidq, NULL, p); -} - -static int soc_camera_dqbuf(struct file *file, void *priv, - struct v4l2_buffer *p) -{ - struct soc_camera_device *icd = file->private_data; - - WARN_ON(priv != file->private_data); - - if (icd->streamer != file) - return -EBUSY; - - return vb2_dqbuf(&icd->vb2_vidq, p, file->f_flags & O_NONBLOCK); -} - -static int soc_camera_create_bufs(struct file *file, void *priv, - struct v4l2_create_buffers *create) -{ - struct soc_camera_device *icd = file->private_data; - int ret; - - if (icd->streamer && icd->streamer != file) - return -EBUSY; - - ret = vb2_create_bufs(&icd->vb2_vidq, create); - if (!ret) - icd->streamer = file; - return ret; -} - -static int soc_camera_prepare_buf(struct file *file, void *priv, - struct v4l2_buffer *b) -{ - struct soc_camera_device *icd = file->private_data; - - return vb2_prepare_buf(&icd->vb2_vidq, NULL, b); -} - -static int soc_camera_expbuf(struct file *file, void *priv, - struct v4l2_exportbuffer *p) -{ - struct soc_camera_device *icd = file->private_data; - - if (icd->streamer && icd->streamer != file) - return -EBUSY; - return vb2_expbuf(&icd->vb2_vidq, p); -} - -/* Always entered with .host_lock held */ -static int soc_camera_init_user_formats(struct soc_camera_device *icd) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - unsigned int i, fmts = 0, raw_fmts = 0; - int ret; - struct v4l2_subdev_mbus_code_enum code = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - - while (!v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code)) { - raw_fmts++; - code.index++; - } - - if (!ici->ops->get_formats) - /* - * Fallback mode - the host will have to serve all - * sensor-provided formats one-to-one to the user - */ - fmts = raw_fmts; - else - /* - * First pass - only count formats this host-sensor - * configuration can provide - */ - for (i = 0; i < raw_fmts; i++) { - ret = ici->ops->get_formats(icd, i, NULL); - if (ret < 0) - return ret; - fmts += ret; - } - - if (!fmts) - return -ENXIO; - - icd->user_formats = - vmalloc(array_size(fmts, - sizeof(struct soc_camera_format_xlate))); - if (!icd->user_formats) - return -ENOMEM; - - dev_dbg(icd->pdev, "Found %d supported formats.\n", fmts); - - /* Second pass - actually fill data formats */ - fmts = 0; - for (i = 0; i < raw_fmts; i++) - if (!ici->ops->get_formats) { - code.index = i; - v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code); - icd->user_formats[fmts].host_fmt = - soc_mbus_get_fmtdesc(code.code); - if (icd->user_formats[fmts].host_fmt) - icd->user_formats[fmts++].code = code.code; - } else { - ret = ici->ops->get_formats(icd, i, - &icd->user_formats[fmts]); - if (ret < 0) - goto egfmt; - fmts += ret; - } - - icd->num_user_formats = fmts; - icd->current_fmt = &icd->user_formats[0]; - - return 0; - -egfmt: - vfree(icd->user_formats); - return ret; -} - -/* Always entered with .host_lock held */ -static void soc_camera_free_user_formats(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - if (ici->ops->put_formats) - ici->ops->put_formats(icd); - icd->current_fmt = NULL; - icd->num_user_formats = 0; - vfree(icd->user_formats); - icd->user_formats = NULL; -} - -/* Called with .vb_lock held, or from the first open(2), see comment there */ -static int soc_camera_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct v4l2_pix_format *pix = &f->fmt.pix; - int ret; - - dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n", - pixfmtstr(pix->pixelformat), pix->width, pix->height); - - /* We always call try_fmt() before set_fmt() or set_selection() */ - ret = soc_camera_try_fmt(icd, f); - if (ret < 0) - return ret; - - ret = ici->ops->set_fmt(icd, f); - if (ret < 0) { - return ret; - } else if (!icd->current_fmt || - icd->current_fmt->host_fmt->fourcc != pix->pixelformat) { - dev_err(icd->pdev, - "Host driver hasn't set up current format correctly!\n"); - return -EINVAL; - } - - icd->user_width = pix->width; - icd->user_height = pix->height; - icd->bytesperline = pix->bytesperline; - icd->sizeimage = pix->sizeimage; - icd->colorspace = pix->colorspace; - icd->field = pix->field; - - dev_dbg(icd->pdev, "set width: %d height: %d\n", - icd->user_width, icd->user_height); - - /* set physical bus parameters */ - return ici->ops->set_bus_param(icd); -} - -static int soc_camera_add_device(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - int ret; - - if (ici->icd) - return -EBUSY; - - if (!icd->clk) { - ret = soc_camera_clock_start(ici); - if (ret < 0) - return ret; - } - - if (ici->ops->add) { - ret = ici->ops->add(icd); - if (ret < 0) - goto eadd; - } - - ici->icd = icd; - - return 0; - -eadd: - if (!icd->clk) - soc_camera_clock_stop(ici); - return ret; -} - -static void soc_camera_remove_device(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - if (WARN_ON(icd != ici->icd)) - return; - - if (ici->ops->remove) - ici->ops->remove(icd); - if (!icd->clk) - soc_camera_clock_stop(ici); - ici->icd = NULL; -} - -static int soc_camera_open(struct file *file) -{ - struct video_device *vdev = video_devdata(file); - struct soc_camera_device *icd; - struct soc_camera_host *ici; - int ret; - - /* - * Don't mess with the host during probe: wait until the loop in - * scan_add_host() completes. Also protect against a race with - * soc_camera_host_unregister(). - */ - if (mutex_lock_interruptible(&list_lock)) - return -ERESTARTSYS; - - if (!vdev || !video_is_registered(vdev)) { - mutex_unlock(&list_lock); - return -ENODEV; - } - - icd = video_get_drvdata(vdev); - ici = to_soc_camera_host(icd->parent); - - ret = try_module_get(ici->ops->owner) ? 0 : -ENODEV; - mutex_unlock(&list_lock); - - if (ret < 0) { - dev_err(icd->pdev, "Couldn't lock capture bus driver.\n"); - return ret; - } - - if (!to_soc_camera_control(icd)) { - /* No device driver attached */ - ret = -ENODEV; - goto econtrol; - } - - if (mutex_lock_interruptible(&ici->host_lock)) { - ret = -ERESTARTSYS; - goto elockhost; - } - icd->use_count++; - - /* Now we really have to activate the camera */ - if (icd->use_count == 1) { - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - /* Restore parameters before the last close() per V4L2 API */ - struct v4l2_format f = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .fmt.pix = { - .width = icd->user_width, - .height = icd->user_height, - .field = icd->field, - .colorspace = icd->colorspace, - .pixelformat = - icd->current_fmt->host_fmt->fourcc, - }, - }; - - /* The camera could have been already on, try to reset */ - if (sdesc->subdev_desc.reset) - if (icd->control) - sdesc->subdev_desc.reset(icd->control); - - ret = soc_camera_add_device(icd); - if (ret < 0) { - dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret); - goto eiciadd; - } - - ret = __soc_camera_power_on(icd); - if (ret < 0) - goto epower; - - pm_runtime_enable(&icd->vdev->dev); - ret = pm_runtime_resume(&icd->vdev->dev); - if (ret < 0 && ret != -ENOSYS) - goto eresume; - - /* - * Try to configure with default parameters. Notice: this is the - * very first open, so, we cannot race against other calls, - * apart from someone else calling open() simultaneously, but - * .host_lock is protecting us against it. - */ - ret = soc_camera_set_fmt(icd, &f); - if (ret < 0) - goto esfmt; - - ret = ici->ops->init_videobuf2(&icd->vb2_vidq, icd); - if (ret < 0) - goto einitvb; - v4l2_ctrl_handler_setup(&icd->ctrl_handler); - } - mutex_unlock(&ici->host_lock); - - file->private_data = icd; - dev_dbg(icd->pdev, "camera device open\n"); - - return 0; - - /* - * All errors are entered with the .host_lock held, first four also - * with use_count == 1 - */ -einitvb: -esfmt: - pm_runtime_disable(&icd->vdev->dev); -eresume: - __soc_camera_power_off(icd); -epower: - soc_camera_remove_device(icd); -eiciadd: - icd->use_count--; - mutex_unlock(&ici->host_lock); -elockhost: -econtrol: - module_put(ici->ops->owner); - - return ret; -} - -static int soc_camera_close(struct file *file) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - mutex_lock(&ici->host_lock); - if (icd->streamer == file) { - if (ici->ops->init_videobuf2) - vb2_queue_release(&icd->vb2_vidq); - icd->streamer = NULL; - } - icd->use_count--; - if (!icd->use_count) { - pm_runtime_suspend(&icd->vdev->dev); - pm_runtime_disable(&icd->vdev->dev); - - __soc_camera_power_off(icd); - - soc_camera_remove_device(icd); - } - - mutex_unlock(&ici->host_lock); - - module_put(ici->ops->owner); - - dev_dbg(icd->pdev, "camera device close\n"); - - return 0; -} - -static ssize_t soc_camera_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - dev_dbg(icd->pdev, "read called, buf %p\n", buf); - - if (ici->ops->init_videobuf2 && icd->vb2_vidq.io_modes & VB2_READ) - return vb2_read(&icd->vb2_vidq, buf, count, ppos, - file->f_flags & O_NONBLOCK); - - dev_err(icd->pdev, "camera device read not implemented\n"); - - return -EINVAL; -} - -static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - int err; - - dev_dbg(icd->pdev, "mmap called, vma=%p\n", vma); - - if (icd->streamer != file) - return -EBUSY; - - if (mutex_lock_interruptible(&ici->host_lock)) - return -ERESTARTSYS; - err = vb2_mmap(&icd->vb2_vidq, vma); - mutex_unlock(&ici->host_lock); - - dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n", - (unsigned long)vma->vm_start, - (unsigned long)vma->vm_end - (unsigned long)vma->vm_start, - err); - - return err; -} - -static __poll_t soc_camera_poll(struct file *file, poll_table *pt) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - __poll_t res = EPOLLERR; - - if (icd->streamer != file) - return EPOLLERR; - - mutex_lock(&ici->host_lock); - res = ici->ops->poll(file, pt); - mutex_unlock(&ici->host_lock); - return res; -} - -static const struct v4l2_file_operations soc_camera_fops = { - .owner = THIS_MODULE, - .open = soc_camera_open, - .release = soc_camera_close, - .unlocked_ioctl = video_ioctl2, - .read = soc_camera_read, - .mmap = soc_camera_mmap, - .poll = soc_camera_poll, -}; - -static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) -{ - struct soc_camera_device *icd = file->private_data; - int ret; - - WARN_ON(priv != file->private_data); - - if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { - dev_warn(icd->pdev, "Wrong buf-type %d\n", f->type); - return -EINVAL; - } - - if (icd->streamer && icd->streamer != file) - return -EBUSY; - - if (vb2_is_streaming(&icd->vb2_vidq)) { - dev_err(icd->pdev, "S_FMT denied: queue initialised\n"); - return -EBUSY; - } - - ret = soc_camera_set_fmt(icd, f); - - if (!ret && !icd->streamer) - icd->streamer = file; - - return ret; -} - -static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_fmtdesc *f) -{ - struct soc_camera_device *icd = file->private_data; - const struct soc_mbus_pixelfmt *format; - - WARN_ON(priv != file->private_data); - - if (f->index >= icd->num_user_formats) - return -EINVAL; - - format = icd->user_formats[f->index].host_fmt; - - f->pixelformat = format->fourcc; - return 0; -} - -static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_pix_format *pix = &f->fmt.pix; - - WARN_ON(priv != file->private_data); - - if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - pix->width = icd->user_width; - pix->height = icd->user_height; - pix->bytesperline = icd->bytesperline; - pix->sizeimage = icd->sizeimage; - pix->field = icd->field; - pix->pixelformat = icd->current_fmt->host_fmt->fourcc; - pix->colorspace = icd->colorspace; - dev_dbg(icd->pdev, "current_fmt->fourcc: 0x%08x\n", - icd->current_fmt->host_fmt->fourcc); - return 0; -} - -static int soc_camera_querycap(struct file *file, void *priv, - struct v4l2_capability *cap) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - WARN_ON(priv != file->private_data); - - strscpy(cap->driver, ici->drv_name, sizeof(cap->driver)); - return ici->ops->querycap(ici, cap); -} - -static int soc_camera_streamon(struct file *file, void *priv, - enum v4l2_buf_type i) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - int ret; - - WARN_ON(priv != file->private_data); - - if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - if (icd->streamer != file) - return -EBUSY; - - /* This calls buf_queue from host driver's videobuf2_queue_ops */ - ret = vb2_streamon(&icd->vb2_vidq, i); - if (!ret) - v4l2_subdev_call(sd, video, s_stream, 1); - - return ret; -} - -static int soc_camera_streamoff(struct file *file, void *priv, - enum v4l2_buf_type i) -{ - struct soc_camera_device *icd = file->private_data; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - int ret; - - WARN_ON(priv != file->private_data); - - if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - if (icd->streamer != file) - return -EBUSY; - - /* - * This calls buf_release from host driver's videobuf2_queue_ops for all - * remaining buffers. When the last buffer is freed, stop capture - */ - ret = vb2_streamoff(&icd->vb2_vidq, i); - - v4l2_subdev_call(sd, video, s_stream, 0); - - return ret; -} - -static int soc_camera_g_selection(struct file *file, void *fh, - struct v4l2_selection *s) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - /* With a wrong type no need to try to fall back to cropping */ - if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) - return -EINVAL; - - return ici->ops->get_selection(icd, s); -} - -static int soc_camera_s_selection(struct file *file, void *fh, - struct v4l2_selection *s) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - int ret; - - /* In all these cases cropping emulation will not help */ - if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || - (s->target != V4L2_SEL_TGT_COMPOSE && - s->target != V4L2_SEL_TGT_CROP)) - return -EINVAL; - - if (s->target == V4L2_SEL_TGT_COMPOSE) { - /* No output size change during a running capture! */ - if (vb2_is_streaming(&icd->vb2_vidq) && - (icd->user_width != s->r.width || - icd->user_height != s->r.height)) - return -EBUSY; - - /* - * Only one user is allowed to change the output format, touch - * buffers, start / stop streaming, poll for data - */ - if (icd->streamer && icd->streamer != file) - return -EBUSY; - } - - if (s->target == V4L2_SEL_TGT_CROP && - vb2_is_streaming(&icd->vb2_vidq) && - ici->ops->set_liveselection) - ret = ici->ops->set_liveselection(icd, s); - else - ret = ici->ops->set_selection(icd, s); - if (!ret && - s->target == V4L2_SEL_TGT_COMPOSE) { - icd->user_width = s->r.width; - icd->user_height = s->r.height; - if (!icd->streamer) - icd->streamer = file; - } - - return ret; -} - -static int soc_camera_g_parm(struct file *file, void *fh, - struct v4l2_streamparm *a) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - if (ici->ops->get_parm) - return ici->ops->get_parm(icd, a); - - return -ENOIOCTLCMD; -} - -static int soc_camera_s_parm(struct file *file, void *fh, - struct v4l2_streamparm *a) -{ - struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - if (ici->ops->set_parm) - return ici->ops->set_parm(icd, a); - - return -ENOIOCTLCMD; -} - -static int soc_camera_probe(struct soc_camera_host *ici, - struct soc_camera_device *icd); - -/* So far this function cannot fail */ -static void scan_add_host(struct soc_camera_host *ici) -{ - struct soc_camera_device *icd; - - mutex_lock(&list_lock); - - list_for_each_entry(icd, &devices, list) - if (icd->iface == ici->nr) { - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc; - - /* The camera could have been already on, try to reset */ - if (ssdd->reset) - if (icd->control) - ssdd->reset(icd->control); - - icd->parent = ici->v4l2_dev.dev; - - /* Ignore errors */ - soc_camera_probe(ici, icd); - } - - mutex_unlock(&list_lock); -} - -/* - * It is invalid to call v4l2_clk_enable() after a successful probing - * asynchronously outside of V4L2 operations, i.e. with .host_lock not held. - */ -static int soc_camera_clk_enable(struct v4l2_clk *clk) -{ - struct soc_camera_device *icd = clk->priv; - struct soc_camera_host *ici; - - if (!icd || !icd->parent) - return -ENODEV; - - ici = to_soc_camera_host(icd->parent); - - if (!try_module_get(ici->ops->owner)) - return -ENODEV; - - /* - * If a different client is currently being probed, the host will tell - * you to go - */ - return soc_camera_clock_start(ici); -} - -static void soc_camera_clk_disable(struct v4l2_clk *clk) -{ - struct soc_camera_device *icd = clk->priv; - struct soc_camera_host *ici; - - if (!icd || !icd->parent) - return; - - ici = to_soc_camera_host(icd->parent); - - soc_camera_clock_stop(ici); - - module_put(ici->ops->owner); -} - -/* - * Eventually, it would be more logical to make the respective host the clock - * owner, but then we would have to copy this struct for each ici. Besides, it - * would introduce the circular dependency problem, unless we port all client - * drivers to release the clock, when not in use. - */ -static const struct v4l2_clk_ops soc_camera_clk_ops = { - .owner = THIS_MODULE, - .enable = soc_camera_clk_enable, - .disable = soc_camera_clk_disable, -}; - -static int soc_camera_dyn_pdev(struct soc_camera_desc *sdesc, - struct soc_camera_async_client *sasc) -{ - struct platform_device *pdev; - int ret, i; - - mutex_lock(&list_lock); - i = find_first_zero_bit(device_map, MAP_MAX_NUM); - if (i < MAP_MAX_NUM) - set_bit(i, device_map); - mutex_unlock(&list_lock); - if (i >= MAP_MAX_NUM) - return -ENOMEM; - - pdev = platform_device_alloc("soc-camera-pdrv", i); - if (!pdev) - return -ENOMEM; - - ret = platform_device_add_data(pdev, sdesc, sizeof(*sdesc)); - if (ret < 0) { - platform_device_put(pdev); - return ret; - } - - sasc->pdev = pdev; - - return 0; -} - -static struct soc_camera_device *soc_camera_add_pdev(struct soc_camera_async_client *sasc) -{ - struct platform_device *pdev = sasc->pdev; - int ret; - - ret = platform_device_add(pdev); - if (ret < 0 || !pdev->dev.driver) - return NULL; - - return platform_get_drvdata(pdev); -} - -/* Locking: called with .host_lock held */ -static int soc_camera_probe_finish(struct soc_camera_device *icd) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_format fmt = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - struct v4l2_mbus_framefmt *mf = &fmt.format; - int ret; - - sd->grp_id = soc_camera_grp_id(icd); - v4l2_set_subdev_hostdata(sd, icd); - - v4l2_subdev_call(sd, video, g_tvnorms, &icd->vdev->tvnorms); - - ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, - NULL, true); - if (ret < 0) - return ret; - - ret = soc_camera_add_device(icd); - if (ret < 0) { - dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret); - return ret; - } - - /* At this point client .probe() should have run already */ - ret = soc_camera_init_user_formats(icd); - if (ret < 0) - goto eusrfmt; - - icd->field = V4L2_FIELD_ANY; - - ret = soc_camera_video_start(icd); - if (ret < 0) - goto evidstart; - - /* Try to improve our guess of a reasonable window format */ - if (!v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt)) { - icd->user_width = mf->width; - icd->user_height = mf->height; - icd->colorspace = mf->colorspace; - icd->field = mf->field; - } - soc_camera_remove_device(icd); - - return 0; - -evidstart: - soc_camera_free_user_formats(icd); -eusrfmt: - soc_camera_remove_device(icd); - - return ret; -} - -#ifdef CONFIG_I2C_BOARDINFO -static int soc_camera_i2c_init(struct soc_camera_device *icd, - struct soc_camera_desc *sdesc) -{ - struct soc_camera_subdev_desc *ssdd; - struct i2c_client *client; - struct soc_camera_host *ici; - struct soc_camera_host_desc *shd = &sdesc->host_desc; - struct i2c_adapter *adap; - struct v4l2_subdev *subdev; - char clk_name[V4L2_CLK_NAME_SIZE]; - int ret; - - /* First find out how we link the main client */ - if (icd->sasc) { - /* Async non-OF probing handled by the subdevice list */ - return -EPROBE_DEFER; - } - - ici = to_soc_camera_host(icd->parent); - adap = i2c_get_adapter(shd->i2c_adapter_id); - if (!adap) { - dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n", - shd->i2c_adapter_id); - return -ENODEV; - } - - ssdd = kmemdup(&sdesc->subdev_desc, sizeof(*ssdd), GFP_KERNEL); - if (!ssdd) { - ret = -ENOMEM; - goto ealloc; - } - /* - * In synchronous case we request regulators ourselves in - * soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try - * to allocate them again. - */ - ssdd->sd_pdata.num_regulators = 0; - ssdd->sd_pdata.regulators = NULL; - shd->board_info->platform_data = ssdd; - - v4l2_clk_name_i2c(clk_name, sizeof(clk_name), - shd->i2c_adapter_id, shd->board_info->addr); - - icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); - if (IS_ERR(icd->clk)) { - ret = PTR_ERR(icd->clk); - goto eclkreg; - } - - subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap, - shd->board_info, NULL); - if (!subdev) { - ret = -ENODEV; - goto ei2cnd; - } - - client = v4l2_get_subdevdata(subdev); - - /* Use to_i2c_client(dev) to recover the i2c client */ - icd->control = &client->dev; - - return 0; -ei2cnd: - v4l2_clk_unregister(icd->clk); - icd->clk = NULL; -eclkreg: - kfree(ssdd); -ealloc: - i2c_put_adapter(adap); - return ret; -} - -static void soc_camera_i2c_free(struct soc_camera_device *icd) -{ - struct i2c_client *client = - to_i2c_client(to_soc_camera_control(icd)); - struct i2c_adapter *adap; - struct soc_camera_subdev_desc *ssdd; - - icd->control = NULL; - if (icd->sasc) - return; - - adap = client->adapter; - ssdd = client->dev.platform_data; - v4l2_device_unregister_subdev(i2c_get_clientdata(client)); - i2c_unregister_device(client); - i2c_put_adapter(adap); - kfree(ssdd); - v4l2_clk_unregister(icd->clk); - icd->clk = NULL; -} - -/* - * V4L2 asynchronous notifier callbacks. They are all called under a v4l2-async - * internal global mutex, therefore cannot race against other asynchronous - * events. Until notifier->complete() (soc_camera_async_complete()) is called, - * the video device node is not registered and no V4L fops can occur. Unloading - * of the host driver also calls a v4l2-async function, so also there we're - * protected. - */ -static int soc_camera_async_bound(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, - struct v4l2_async_subdev *asd) -{ - struct soc_camera_async_client *sasc = container_of(notifier, - struct soc_camera_async_client, notifier); - struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev); - - if (asd == sasc->sensor && !WARN_ON(icd->control)) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - - /* - * Only now we get subdevice-specific information like - * regulators, flags, callbacks, etc. - */ - if (client) { - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - struct soc_camera_subdev_desc *ssdd = - soc_camera_i2c_to_desc(client); - if (ssdd) { - memcpy(&sdesc->subdev_desc, ssdd, - sizeof(sdesc->subdev_desc)); - if (ssdd->reset) - ssdd->reset(&client->dev); - } - - icd->control = &client->dev; - } - } - - return 0; -} - -static void soc_camera_async_unbind(struct v4l2_async_notifier *notifier, - struct v4l2_subdev *sd, - struct v4l2_async_subdev *asd) -{ - struct soc_camera_async_client *sasc = container_of(notifier, - struct soc_camera_async_client, notifier); - struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev); - - icd->control = NULL; - - if (icd->clk) { - v4l2_clk_unregister(icd->clk); - icd->clk = NULL; - } -} - -static int soc_camera_async_complete(struct v4l2_async_notifier *notifier) -{ - struct soc_camera_async_client *sasc = container_of(notifier, - struct soc_camera_async_client, notifier); - struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev); - - if (to_soc_camera_control(icd)) { - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - int ret; - - mutex_lock(&list_lock); - ret = soc_camera_probe(ici, icd); - mutex_unlock(&list_lock); - if (ret < 0) - return ret; - } - - return 0; -} - -static const struct v4l2_async_notifier_operations soc_camera_async_ops = { - .bound = soc_camera_async_bound, - .unbind = soc_camera_async_unbind, - .complete = soc_camera_async_complete, -}; - -static int scan_async_group(struct soc_camera_host *ici, - struct v4l2_async_subdev **asd, unsigned int size) -{ - struct soc_camera_async_subdev *sasd; - struct soc_camera_async_client *sasc; - struct soc_camera_device *icd; - struct soc_camera_desc sdesc = {.host_desc.bus_id = ici->nr,}; - char clk_name[V4L2_CLK_NAME_SIZE]; - unsigned int i; - int ret; - - /* First look for a sensor */ - for (i = 0; i < size; i++) { - sasd = container_of(asd[i], struct soc_camera_async_subdev, asd); - if (sasd->role == SOCAM_SUBDEV_DATA_SOURCE) - break; - } - - if (i >= size || asd[i]->match_type != V4L2_ASYNC_MATCH_I2C) { - /* All useless */ - dev_err(ici->v4l2_dev.dev, "No I2C data source found!\n"); - return -ENODEV; - } - - /* Or shall this be managed by the soc-camera device? */ - sasc = devm_kzalloc(ici->v4l2_dev.dev, sizeof(*sasc), GFP_KERNEL); - if (!sasc) - return -ENOMEM; - - /* HACK: just need a != NULL */ - sdesc.host_desc.board_info = ERR_PTR(-ENODATA); - - ret = soc_camera_dyn_pdev(&sdesc, sasc); - if (ret < 0) - goto eallocpdev; - - sasc->sensor = &sasd->asd; - - icd = soc_camera_add_pdev(sasc); - if (!icd) { - ret = -ENOMEM; - goto eaddpdev; - } - - v4l2_async_notifier_init(&sasc->notifier); - - for (i = 0; i < size; i++) { - ret = v4l2_async_notifier_add_subdev(&sasc->notifier, asd[i]); - if (ret) - goto eaddasd; - } - - sasc->notifier.ops = &soc_camera_async_ops; - - icd->sasc = sasc; - icd->parent = ici->v4l2_dev.dev; - - v4l2_clk_name_i2c(clk_name, sizeof(clk_name), - sasd->asd.match.i2c.adapter_id, - sasd->asd.match.i2c.address); - - icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); - if (IS_ERR(icd->clk)) { - ret = PTR_ERR(icd->clk); - goto eclkreg; - } - - ret = v4l2_async_notifier_register(&ici->v4l2_dev, &sasc->notifier); - if (!ret) - return 0; - - v4l2_clk_unregister(icd->clk); -eclkreg: - icd->clk = NULL; -eaddasd: - v4l2_async_notifier_cleanup(&sasc->notifier); - platform_device_del(sasc->pdev); -eaddpdev: - platform_device_put(sasc->pdev); -eallocpdev: - devm_kfree(ici->v4l2_dev.dev, sasc); - dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret); - - return ret; -} - -static void scan_async_host(struct soc_camera_host *ici) -{ - struct v4l2_async_subdev **asd; - int j; - - for (j = 0, asd = ici->asd; ici->asd_sizes[j]; j++) { - scan_async_group(ici, asd, ici->asd_sizes[j]); - asd += ici->asd_sizes[j]; - } -} -#else -#define soc_camera_i2c_init(icd, sdesc) (-ENODEV) -#define soc_camera_i2c_free(icd) do {} while (0) -#define scan_async_host(ici) do {} while (0) -#endif - -#ifdef CONFIG_OF - -struct soc_of_info { - struct soc_camera_async_subdev sasd; - struct soc_camera_async_client sasc; - struct v4l2_async_subdev *subdev; -}; - -static int soc_of_bind(struct soc_camera_host *ici, - struct device_node *ep, - struct device_node *remote) -{ - struct soc_camera_device *icd; - struct soc_camera_desc sdesc = {.host_desc.bus_id = ici->nr,}; - struct soc_camera_async_client *sasc; - struct soc_of_info *info; - struct i2c_client *client; - char clk_name[V4L2_CLK_NAME_SIZE]; - int ret; - - /* allocate a new subdev and add match info to it */ - info = devm_kzalloc(ici->v4l2_dev.dev, sizeof(struct soc_of_info), - GFP_KERNEL); - if (!info) - return -ENOMEM; - - info->sasd.asd.match.fwnode = of_fwnode_handle(remote); - info->sasd.asd.match_type = V4L2_ASYNC_MATCH_FWNODE; - info->subdev = &info->sasd.asd; - - /* Or shall this be managed by the soc-camera device? */ - sasc = &info->sasc; - - /* HACK: just need a != NULL */ - sdesc.host_desc.board_info = ERR_PTR(-ENODATA); - - ret = soc_camera_dyn_pdev(&sdesc, sasc); - if (ret < 0) - goto eallocpdev; - - sasc->sensor = &info->sasd.asd; - - icd = soc_camera_add_pdev(sasc); - if (!icd) { - ret = -ENOMEM; - goto eaddpdev; - } - - v4l2_async_notifier_init(&sasc->notifier); - - ret = v4l2_async_notifier_add_subdev(&sasc->notifier, info->subdev); - if (ret) { - of_node_put(remote); - goto eaddasd; - } - - sasc->notifier.ops = &soc_camera_async_ops; - - icd->sasc = sasc; - icd->parent = ici->v4l2_dev.dev; - - client = of_find_i2c_device_by_node(remote); - - if (client) - v4l2_clk_name_i2c(clk_name, sizeof(clk_name), - client->adapter->nr, client->addr); - else - v4l2_clk_name_of(clk_name, sizeof(clk_name), remote); - - icd->clk = v4l2_clk_register(&soc_camera_clk_ops, clk_name, icd); - if (IS_ERR(icd->clk)) { - ret = PTR_ERR(icd->clk); - goto eclkreg; - } - - ret = v4l2_async_notifier_register(&ici->v4l2_dev, &sasc->notifier); - if (!ret) - return 0; - - v4l2_clk_unregister(icd->clk); -eclkreg: - icd->clk = NULL; -eaddasd: - v4l2_async_notifier_cleanup(&sasc->notifier); - platform_device_del(sasc->pdev); -eaddpdev: - platform_device_put(sasc->pdev); -eallocpdev: - devm_kfree(ici->v4l2_dev.dev, info); - dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret); - - return ret; -} - -static void scan_of_host(struct soc_camera_host *ici) -{ - struct device *dev = ici->v4l2_dev.dev; - struct device_node *np = dev->of_node; - struct device_node *epn = NULL, *rem; - unsigned int i; - - for (i = 0; ; i++) { - epn = of_graph_get_next_endpoint(np, epn); - if (!epn) - break; - - rem = of_graph_get_remote_port_parent(epn); - if (!rem) { - dev_notice(dev, "no remote for %pOF\n", epn); - continue; - } - - /* so we now have a remote node to connect */ - if (!i) - soc_of_bind(ici, epn, rem); - - if (i) { - dev_err(dev, "multiple subdevices aren't supported yet!\n"); - break; - } - } - - of_node_put(epn); -} - -#else -static inline void scan_of_host(struct soc_camera_host *ici) { } -#endif - -/* Called during host-driver probe */ -static int soc_camera_probe(struct soc_camera_host *ici, - struct soc_camera_device *icd) -{ - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - struct soc_camera_host_desc *shd = &sdesc->host_desc; - struct device *control = NULL; - int ret; - - dev_info(icd->pdev, "Probing %s\n", dev_name(icd->pdev)); - - /* - * Currently the subdev with the largest number of controls (13) is - * ov6550. So let's pick 16 as a hint for the control handler. Note - * that this is a hint only: too large and you waste some memory, too - * small and there is a (very) small performance hit when looking up - * controls in the internal hash. - */ - ret = v4l2_ctrl_handler_init(&icd->ctrl_handler, 16); - if (ret < 0) - return ret; - - /* Must have icd->vdev before registering the device */ - ret = video_dev_create(icd); - if (ret < 0) - goto evdc; - - /* - * ..._video_start() will create a device node, video_register_device() - * itself is protected against concurrent open() calls, but we also have - * to protect our data also during client probing. - */ - - /* Non-i2c cameras, e.g., soc_camera_platform, have no board_info */ - if (shd->board_info) { - ret = soc_camera_i2c_init(icd, sdesc); - if (ret < 0 && ret != -EPROBE_DEFER) - goto eadd; - } else if (!shd->add_device || !shd->del_device) { - ret = -EINVAL; - goto eadd; - } else { - ret = soc_camera_clock_start(ici); - if (ret < 0) - goto eadd; - - if (shd->module_name) - ret = request_module(shd->module_name); - - ret = shd->add_device(icd); - if (ret < 0) - goto eadddev; - - /* - * FIXME: this is racy, have to use driver-binding notification, - * when it is available - */ - control = to_soc_camera_control(icd); - if (!control || !control->driver || !dev_get_drvdata(control) || - !try_module_get(control->driver->owner)) { - shd->del_device(icd); - ret = -ENODEV; - goto enodrv; - } - } - - mutex_lock(&ici->host_lock); - ret = soc_camera_probe_finish(icd); - mutex_unlock(&ici->host_lock); - if (ret < 0) - goto efinish; - - return 0; - -efinish: - if (shd->board_info) { - soc_camera_i2c_free(icd); - } else { - shd->del_device(icd); - module_put(control->driver->owner); -enodrv: -eadddev: - soc_camera_clock_stop(ici); - } -eadd: - if (icd->vdev) { - video_device_release(icd->vdev); - icd->vdev = NULL; - } -evdc: - v4l2_ctrl_handler_free(&icd->ctrl_handler); - return ret; -} - -/* - * This is called on device_unregister, which only means we have to disconnect - * from the host, but not remove ourselves from the device list. With - * asynchronous client probing this can also be called without - * soc_camera_probe_finish() having run. Careful with clean up. - */ -static int soc_camera_remove(struct soc_camera_device *icd) -{ - struct soc_camera_desc *sdesc = to_soc_camera_desc(icd); - struct video_device *vdev = icd->vdev; - - v4l2_ctrl_handler_free(&icd->ctrl_handler); - if (vdev) { - video_unregister_device(vdev); - icd->vdev = NULL; - } - - if (sdesc->host_desc.board_info) { - soc_camera_i2c_free(icd); - } else { - struct device *dev = to_soc_camera_control(icd); - struct device_driver *drv = dev ? dev->driver : NULL; - if (drv) { - sdesc->host_desc.del_device(icd); - module_put(drv->owner); - } - } - - if (icd->num_user_formats) - soc_camera_free_user_formats(icd); - - if (icd->clk) { - /* For the synchronous case */ - v4l2_clk_unregister(icd->clk); - icd->clk = NULL; - } - - if (icd->sasc) - platform_device_unregister(icd->sasc->pdev); - - return 0; -} - -static int default_g_selection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = sel->target, - }; - int ret; - - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel); - if (ret) - return ret; - sel->r = sdsel.r; - return 0; -} - -static int default_s_selection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = sel->target, - .flags = sel->flags, - .r = sel->r, - }; - int ret; - - ret = v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); - if (ret) - return ret; - sel->r = sdsel.r; - return 0; -} - -static int default_g_parm(struct soc_camera_device *icd, - struct v4l2_streamparm *a) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - - return v4l2_g_parm_cap(icd->vdev, sd, a); -} - -static int default_s_parm(struct soc_camera_device *icd, - struct v4l2_streamparm *a) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - - return v4l2_s_parm_cap(icd->vdev, sd, a); -} - -static int default_enum_framesizes(struct soc_camera_device *icd, - struct v4l2_frmsizeenum *fsize) -{ - int ret; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - const struct soc_camera_format_xlate *xlate; - struct v4l2_subdev_frame_size_enum fse = { - .index = fsize->index, - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - - xlate = soc_camera_xlate_by_fourcc(icd, fsize->pixel_format); - if (!xlate) - return -EINVAL; - fse.code = xlate->code; - - ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse); - if (ret < 0) - return ret; - - if (fse.min_width == fse.max_width && - fse.min_height == fse.max_height) { - fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; - fsize->discrete.width = fse.min_width; - fsize->discrete.height = fse.min_height; - return 0; - } - fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; - fsize->stepwise.min_width = fse.min_width; - fsize->stepwise.max_width = fse.max_width; - fsize->stepwise.min_height = fse.min_height; - fsize->stepwise.max_height = fse.max_height; - fsize->stepwise.step_width = 1; - fsize->stepwise.step_height = 1; - return 0; -} - -int soc_camera_host_register(struct soc_camera_host *ici) -{ - struct soc_camera_host *ix; - int ret; - - if (!ici || !ici->ops || - !ici->ops->try_fmt || - !ici->ops->set_fmt || - !ici->ops->set_bus_param || - !ici->ops->querycap || - !ici->ops->init_videobuf2 || - !ici->ops->poll || - !ici->v4l2_dev.dev) - return -EINVAL; - - if (!ici->ops->set_selection) - ici->ops->set_selection = default_s_selection; - if (!ici->ops->get_selection) - ici->ops->get_selection = default_g_selection; - if (!ici->ops->set_parm) - ici->ops->set_parm = default_s_parm; - if (!ici->ops->get_parm) - ici->ops->get_parm = default_g_parm; - if (!ici->ops->enum_framesizes) - ici->ops->enum_framesizes = default_enum_framesizes; - - mutex_lock(&list_lock); - list_for_each_entry(ix, &hosts, list) { - if (ix->nr == ici->nr) { - ret = -EBUSY; - goto edevreg; - } - } - - ret = v4l2_device_register(ici->v4l2_dev.dev, &ici->v4l2_dev); - if (ret < 0) - goto edevreg; - - list_add_tail(&ici->list, &hosts); - mutex_unlock(&list_lock); - - mutex_init(&ici->host_lock); - mutex_init(&ici->clk_lock); - - if (ici->v4l2_dev.dev->of_node) - scan_of_host(ici); - else if (ici->asd_sizes) - /* - * No OF, host with a list of subdevices. Don't try to mix - * modes by initialising some groups statically and some - * dynamically! - */ - scan_async_host(ici); - else - /* Legacy: static platform devices from board data */ - scan_add_host(ici); - - return 0; - -edevreg: - mutex_unlock(&list_lock); - return ret; -} -EXPORT_SYMBOL(soc_camera_host_register); - -/* Unregister all clients! */ -void soc_camera_host_unregister(struct soc_camera_host *ici) -{ - struct soc_camera_device *icd, *tmp; - struct soc_camera_async_client *sasc; - LIST_HEAD(notifiers); - - mutex_lock(&list_lock); - list_del(&ici->list); - list_for_each_entry(icd, &devices, list) - if (icd->iface == ici->nr && icd->sasc) { - /* as long as we hold the device, sasc won't be freed */ - get_device(icd->pdev); - list_add(&icd->sasc->list, ¬ifiers); - } - mutex_unlock(&list_lock); - - list_for_each_entry(sasc, ¬ifiers, list) { - /* Must call unlocked to avoid AB-BA dead-lock */ - v4l2_async_notifier_unregister(&sasc->notifier); - v4l2_async_notifier_cleanup(&sasc->notifier); - put_device(&sasc->pdev->dev); - } - - mutex_lock(&list_lock); - - list_for_each_entry_safe(icd, tmp, &devices, list) - if (icd->iface == ici->nr) - soc_camera_remove(icd); - - mutex_unlock(&list_lock); - - v4l2_device_unregister(&ici->v4l2_dev); -} -EXPORT_SYMBOL(soc_camera_host_unregister); - -/* Image capture device */ -static int soc_camera_device_register(struct soc_camera_device *icd) -{ - struct soc_camera_device *ix; - int num = -1, i; - - mutex_lock(&list_lock); - for (i = 0; i < 256 && num < 0; i++) { - num = i; - /* Check if this index is available on this interface */ - list_for_each_entry(ix, &devices, list) { - if (ix->iface == icd->iface && ix->devnum == i) { - num = -1; - break; - } - } - } - - if (num < 0) { - /* - * ok, we have 256 cameras on this host... - * man, stay reasonable... - */ - mutex_unlock(&list_lock); - return -ENOMEM; - } - - icd->devnum = num; - icd->use_count = 0; - icd->host_priv = NULL; - - /* - * Dynamically allocated devices set the bit earlier, but it doesn't hurt setting - * it again - */ - i = to_platform_device(icd->pdev)->id; - if (i < 0) - /* One static (legacy) soc-camera platform device */ - i = 0; - if (i >= MAP_MAX_NUM) { - mutex_unlock(&list_lock); - return -EBUSY; - } - set_bit(i, device_map); - list_add_tail(&icd->list, &devices); - mutex_unlock(&list_lock); - - return 0; -} - -static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = { - .vidioc_querycap = soc_camera_querycap, - .vidioc_try_fmt_vid_cap = soc_camera_try_fmt_vid_cap, - .vidioc_g_fmt_vid_cap = soc_camera_g_fmt_vid_cap, - .vidioc_s_fmt_vid_cap = soc_camera_s_fmt_vid_cap, - .vidioc_enum_fmt_vid_cap = soc_camera_enum_fmt_vid_cap, - .vidioc_enum_input = soc_camera_enum_input, - .vidioc_g_input = soc_camera_g_input, - .vidioc_s_input = soc_camera_s_input, - .vidioc_s_std = soc_camera_s_std, - .vidioc_g_std = soc_camera_g_std, - .vidioc_enum_framesizes = soc_camera_enum_framesizes, - .vidioc_reqbufs = soc_camera_reqbufs, - .vidioc_querybuf = soc_camera_querybuf, - .vidioc_qbuf = soc_camera_qbuf, - .vidioc_dqbuf = soc_camera_dqbuf, - .vidioc_create_bufs = soc_camera_create_bufs, - .vidioc_prepare_buf = soc_camera_prepare_buf, - .vidioc_expbuf = soc_camera_expbuf, - .vidioc_streamon = soc_camera_streamon, - .vidioc_streamoff = soc_camera_streamoff, - .vidioc_g_selection = soc_camera_g_selection, - .vidioc_s_selection = soc_camera_s_selection, - .vidioc_g_parm = soc_camera_g_parm, - .vidioc_s_parm = soc_camera_s_parm, -}; - -static int video_dev_create(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct video_device *vdev = video_device_alloc(); - - if (!vdev) - return -ENOMEM; - - strscpy(vdev->name, ici->drv_name, sizeof(vdev->name)); - - vdev->v4l2_dev = &ici->v4l2_dev; - vdev->fops = &soc_camera_fops; - vdev->ioctl_ops = &soc_camera_ioctl_ops; - vdev->release = video_device_release; - vdev->ctrl_handler = &icd->ctrl_handler; - vdev->lock = &ici->host_lock; - - icd->vdev = vdev; - - return 0; -} - -/* - * Called from soc_camera_probe() above with .host_lock held - */ -static int soc_camera_video_start(struct soc_camera_device *icd) -{ - const struct device_type *type = icd->vdev->dev.type; - int ret; - - if (!icd->parent) - return -ENODEV; - - video_set_drvdata(icd->vdev, icd); - if (icd->vdev->tvnorms == 0) { - /* disable the STD API if there are no tvnorms defined */ - v4l2_disable_ioctl(icd->vdev, VIDIOC_G_STD); - v4l2_disable_ioctl(icd->vdev, VIDIOC_S_STD); - v4l2_disable_ioctl(icd->vdev, VIDIOC_ENUMSTD); - } - ret = video_register_device(icd->vdev, VFL_TYPE_VIDEO, -1); - if (ret < 0) { - dev_err(icd->pdev, "video_register_device failed: %d\n", ret); - return ret; - } - - /* Restore device type, possibly set by the subdevice driver */ - icd->vdev->dev.type = type; - - return 0; -} - -static int soc_camera_pdrv_probe(struct platform_device *pdev) -{ - struct soc_camera_desc *sdesc = pdev->dev.platform_data; - struct soc_camera_subdev_desc *ssdd = &sdesc->subdev_desc; - struct soc_camera_device *icd; - int ret; - - if (!sdesc) - return -EINVAL; - - icd = devm_kzalloc(&pdev->dev, sizeof(*icd), GFP_KERNEL); - if (!icd) - return -ENOMEM; - - /* - * In the asynchronous case ssdd->num_regulators == 0 yet, so, the below - * regulator allocation is a dummy. They are actually requested by the - * subdevice driver, using soc_camera_power_init(). Also note, that in - * that case regulators are attached to the I2C device and not to the - * camera platform device. - */ - ret = devm_regulator_bulk_get(&pdev->dev, ssdd->sd_pdata.num_regulators, - ssdd->sd_pdata.regulators); - if (ret < 0) - return ret; - - icd->iface = sdesc->host_desc.bus_id; - icd->sdesc = sdesc; - icd->pdev = &pdev->dev; - platform_set_drvdata(pdev, icd); - - icd->user_width = DEFAULT_WIDTH; - icd->user_height = DEFAULT_HEIGHT; - - return soc_camera_device_register(icd); -} - -/* - * Only called on rmmod for each platform device, since they are not - * hot-pluggable. Now we know, that all our users - hosts and devices have - * been unloaded already - */ -static int soc_camera_pdrv_remove(struct platform_device *pdev) -{ - struct soc_camera_device *icd = platform_get_drvdata(pdev); - int i; - - if (!icd) - return -EINVAL; - - i = pdev->id; - if (i < 0) - i = 0; - - /* - * In synchronous mode with static platform devices this is called in a - * loop from drivers/base/dd.c::driver_detach(), no parallel execution, - * no need to lock. In asynchronous case the caller - - * soc_camera_host_unregister() - already holds the lock - */ - if (test_bit(i, device_map)) { - clear_bit(i, device_map); - list_del(&icd->list); - } - - return 0; -} - -static struct platform_driver __refdata soc_camera_pdrv = { - .probe = soc_camera_pdrv_probe, - .remove = soc_camera_pdrv_remove, - .driver = { - .name = "soc-camera-pdrv", - }, -}; - -module_platform_driver(soc_camera_pdrv); - -MODULE_DESCRIPTION("Image capture bus driver"); -MODULE_AUTHOR("Guennadi Liakhovetski "); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:soc-camera-pdrv"); diff --git a/drivers/staging/media/soc_camera/soc_mediabus.c b/drivers/staging/media/soc_camera/soc_mediabus.c deleted file mode 100644 index 2aa646c89c1f..000000000000 --- a/drivers/staging/media/soc_camera/soc_mediabus.c +++ /dev/null @@ -1,529 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * soc-camera media bus helper routines - * - * Copyright (C) 2009, Guennadi Liakhovetski - */ -#include -#include - -#include -#include -#include - -static const struct soc_mbus_lookup mbus_fmt[] = { -{ - .code = MEDIA_BUS_FMT_YUYV8_2X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_YUYV, - .name = "YUYV", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YVYU8_2X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_YVYU, - .name = "YVYU", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_UYVY8_2X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_UYVY, - .name = "UYVY", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_VYUY8_2X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_VYUY, - .name = "VYUY", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB555, - .name = "RGB555", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB555X, - .name = "RGB555X", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB565_2X8_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB565, - .name = "RGB565", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB565_2X8_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB565X, - .name = "RGB565X", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB666_1X18, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB32, - .name = "RGB666/32bpp", - .bits_per_sample = 18, - .packing = SOC_MBUS_PACKING_EXTEND32, - .order = SOC_MBUS_ORDER_LE, - }, -}, { - .code = MEDIA_BUS_FMT_RGB888_1X24, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB32, - .name = "RGB888/32bpp", - .bits_per_sample = 24, - .packing = SOC_MBUS_PACKING_EXTEND32, - .order = SOC_MBUS_ORDER_LE, - }, -}, { - .code = MEDIA_BUS_FMT_RGB888_2X12_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB32, - .name = "RGB888/32bpp", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND32, - .order = SOC_MBUS_ORDER_BE, - }, -}, { - .code = MEDIA_BUS_FMT_RGB888_2X12_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB32, - .name = "RGB888/32bpp", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND32, - .order = SOC_MBUS_ORDER_LE, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR8_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR8, - .name = "Bayer 8 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_NONE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_Y8_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_GREY, - .name = "Grey", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_NONE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_Y10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_Y10, - .name = "Grey 10bit", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADLO, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR10, - .name = "Bayer 10 BGGR", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADLO, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_JPEG_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_JPEG, - .name = "JPEG", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_VARIABLE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, - .fmt = { - .fourcc = V4L2_PIX_FMT_RGB444, - .name = "RGB444", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_BE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YUYV8_1_5X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_YUV420, - .name = "YUYV 4:2:0", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_1_5X8, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YVYU8_1_5X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_YVU420, - .name = "YVYU 4:2:0", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_1_5X8, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_UYVY8_1X16, - .fmt = { - .fourcc = V4L2_PIX_FMT_UYVY, - .name = "UYVY 16bit", - .bits_per_sample = 16, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_VYUY8_1X16, - .fmt = { - .fourcc = V4L2_PIX_FMT_VYUY, - .name = "VYUY 16bit", - .bits_per_sample = 16, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YUYV8_1X16, - .fmt = { - .fourcc = V4L2_PIX_FMT_YUYV, - .name = "YUYV 16bit", - .bits_per_sample = 16, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_YVYU8_1X16, - .fmt = { - .fourcc = V4L2_PIX_FMT_YVYU, - .name = "YVYU 16bit", - .bits_per_sample = 16, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGRBG8_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGRBG8, - .name = "Bayer 8 GRBG", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_NONE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8, - .name = "Bayer 10 BGGR DPCM 8", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_NONE, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGBRG10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGBRG10, - .name = "Bayer 10 GBRG", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGRBG10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGRBG10, - .name = "Bayer 10 GRBG", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SRGGB10_1X10, - .fmt = { - .fourcc = V4L2_PIX_FMT_SRGGB10, - .name = "Bayer 10 RGGB", - .bits_per_sample = 10, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SBGGR12_1X12, - .fmt = { - .fourcc = V4L2_PIX_FMT_SBGGR12, - .name = "Bayer 12 BGGR", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGBRG12_1X12, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGBRG12, - .name = "Bayer 12 GBRG", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SGRBG12_1X12, - .fmt = { - .fourcc = V4L2_PIX_FMT_SGRBG12, - .name = "Bayer 12 GRBG", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, { - .code = MEDIA_BUS_FMT_SRGGB12_1X12, - .fmt = { - .fourcc = V4L2_PIX_FMT_SRGGB12, - .name = "Bayer 12 RGGB", - .bits_per_sample = 12, - .packing = SOC_MBUS_PACKING_EXTEND16, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PACKED, - }, -}, -}; - -int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf, - unsigned int *numerator, unsigned int *denominator) -{ - switch (mf->packing) { - case SOC_MBUS_PACKING_NONE: - case SOC_MBUS_PACKING_EXTEND16: - *numerator = 1; - *denominator = 1; - return 0; - case SOC_MBUS_PACKING_EXTEND32: - *numerator = 1; - *denominator = 1; - return 0; - case SOC_MBUS_PACKING_2X8_PADHI: - case SOC_MBUS_PACKING_2X8_PADLO: - *numerator = 2; - *denominator = 1; - return 0; - case SOC_MBUS_PACKING_1_5X8: - *numerator = 3; - *denominator = 2; - return 0; - case SOC_MBUS_PACKING_VARIABLE: - *numerator = 0; - *denominator = 1; - return 0; - } - return -EINVAL; -} -EXPORT_SYMBOL(soc_mbus_samples_per_pixel); - -s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) -{ - if (mf->layout != SOC_MBUS_LAYOUT_PACKED) - return width * mf->bits_per_sample / 8; - - switch (mf->packing) { - case SOC_MBUS_PACKING_NONE: - return width * mf->bits_per_sample / 8; - case SOC_MBUS_PACKING_2X8_PADHI: - case SOC_MBUS_PACKING_2X8_PADLO: - case SOC_MBUS_PACKING_EXTEND16: - return width * 2; - case SOC_MBUS_PACKING_1_5X8: - return width * 3 / 2; - case SOC_MBUS_PACKING_VARIABLE: - return 0; - case SOC_MBUS_PACKING_EXTEND32: - return width * 4; - } - return -EINVAL; -} -EXPORT_SYMBOL(soc_mbus_bytes_per_line); - -s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, - u32 bytes_per_line, u32 height) -{ - if (mf->layout == SOC_MBUS_LAYOUT_PACKED) - return bytes_per_line * height; - - switch (mf->packing) { - case SOC_MBUS_PACKING_2X8_PADHI: - case SOC_MBUS_PACKING_2X8_PADLO: - return bytes_per_line * height * 2; - case SOC_MBUS_PACKING_1_5X8: - return bytes_per_line * height * 3 / 2; - default: - return -EINVAL; - } -} -EXPORT_SYMBOL(soc_mbus_image_size); - -const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( - u32 code, - const struct soc_mbus_lookup *lookup, - int n) -{ - int i; - - for (i = 0; i < n; i++) - if (lookup[i].code == code) - return &lookup[i].fmt; - - return NULL; -} -EXPORT_SYMBOL(soc_mbus_find_fmtdesc); - -const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( - u32 code) -{ - return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt)); -} -EXPORT_SYMBOL(soc_mbus_get_fmtdesc); - -unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg, - unsigned int flags) -{ - unsigned long common_flags; - bool hsync = true, vsync = true, pclk, data, mode; - bool mipi_lanes, mipi_clock; - - common_flags = cfg->flags & flags; - - switch (cfg->type) { - case V4L2_MBUS_PARALLEL: - hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | - V4L2_MBUS_HSYNC_ACTIVE_LOW); - vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | - V4L2_MBUS_VSYNC_ACTIVE_LOW); - /* fall through */ - case V4L2_MBUS_BT656: - pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | - V4L2_MBUS_PCLK_SAMPLE_FALLING); - data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH | - V4L2_MBUS_DATA_ACTIVE_LOW); - mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE); - return (!hsync || !vsync || !pclk || !data || !mode) ? - 0 : common_flags; - case V4L2_MBUS_CSI2_DPHY: - mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES; - mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK | - V4L2_MBUS_CSI2_CONTINUOUS_CLOCK); - return (!mipi_lanes || !mipi_clock) ? 0 : common_flags; - default: - WARN_ON(1); - return -EINVAL; - } - return 0; -} -EXPORT_SYMBOL(soc_mbus_config_compatible); - -static int __init soc_mbus_init(void) -{ - return 0; -} - -static void __exit soc_mbus_exit(void) -{ -} - -module_init(soc_mbus_init); -module_exit(soc_mbus_exit); - -MODULE_DESCRIPTION("soc-camera media bus interface"); -MODULE_AUTHOR("Guennadi Liakhovetski "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/media/soc_camera/soc_mt9v022.c b/drivers/staging/media/soc_camera/soc_mt9v022.c deleted file mode 100644 index 1739a618846d..000000000000 --- a/drivers/staging/media/soc_camera/soc_mt9v022.c +++ /dev/null @@ -1,1008 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Driver for MT9V022 CMOS Image Sensor from Micron - * - * Copyright (C) 2008, Guennadi Liakhovetski - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -/* - * mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c - * The platform has to define struct i2c_board_info objects and link to them - * from struct soc_camera_host_desc - */ - -static char *sensor_type; -module_param(sensor_type, charp, S_IRUGO); -MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\""); - -/* mt9v022 selected register addresses */ -#define MT9V022_CHIP_VERSION 0x00 -#define MT9V022_COLUMN_START 0x01 -#define MT9V022_ROW_START 0x02 -#define MT9V022_WINDOW_HEIGHT 0x03 -#define MT9V022_WINDOW_WIDTH 0x04 -#define MT9V022_HORIZONTAL_BLANKING 0x05 -#define MT9V022_VERTICAL_BLANKING 0x06 -#define MT9V022_CHIP_CONTROL 0x07 -#define MT9V022_SHUTTER_WIDTH1 0x08 -#define MT9V022_SHUTTER_WIDTH2 0x09 -#define MT9V022_SHUTTER_WIDTH_CTRL 0x0a -#define MT9V022_TOTAL_SHUTTER_WIDTH 0x0b -#define MT9V022_RESET 0x0c -#define MT9V022_READ_MODE 0x0d -#define MT9V022_MONITOR_MODE 0x0e -#define MT9V022_PIXEL_OPERATION_MODE 0x0f -#define MT9V022_LED_OUT_CONTROL 0x1b -#define MT9V022_ADC_MODE_CONTROL 0x1c -#define MT9V022_REG32 0x20 -#define MT9V022_ANALOG_GAIN 0x35 -#define MT9V022_BLACK_LEVEL_CALIB_CTRL 0x47 -#define MT9V022_PIXCLK_FV_LV 0x74 -#define MT9V022_DIGITAL_TEST_PATTERN 0x7f -#define MT9V022_AEC_AGC_ENABLE 0xAF -#define MT9V022_MAX_TOTAL_SHUTTER_WIDTH 0xBD - -/* mt9v024 partial list register addresses changes with respect to mt9v022 */ -#define MT9V024_PIXCLK_FV_LV 0x72 -#define MT9V024_MAX_TOTAL_SHUTTER_WIDTH 0xAD - -/* Progressive scan, master, defaults */ -#define MT9V022_CHIP_CONTROL_DEFAULT 0x188 - -#define MT9V022_MAX_WIDTH 752 -#define MT9V022_MAX_HEIGHT 480 -#define MT9V022_MIN_WIDTH 48 -#define MT9V022_MIN_HEIGHT 32 -#define MT9V022_COLUMN_SKIP 1 -#define MT9V022_ROW_SKIP 4 - -#define MT9V022_HORIZONTAL_BLANKING_MIN 43 -#define MT9V022_HORIZONTAL_BLANKING_MAX 1023 -#define MT9V022_HORIZONTAL_BLANKING_DEF 94 -#define MT9V022_VERTICAL_BLANKING_MIN 2 -#define MT9V022_VERTICAL_BLANKING_MAX 3000 -#define MT9V022_VERTICAL_BLANKING_DEF 45 - -#define is_mt9v022_rev3(id) (id == 0x1313) -#define is_mt9v024(id) (id == 0x1324) - -/* MT9V022 has only one fixed colorspace per pixelcode */ -struct mt9v022_datafmt { - u32 code; - enum v4l2_colorspace colorspace; -}; - -/* Find a data format by a pixel code in an array */ -static const struct mt9v022_datafmt *mt9v022_find_datafmt( - u32 code, const struct mt9v022_datafmt *fmt, - int n) -{ - int i; - for (i = 0; i < n; i++) - if (fmt[i].code == code) - return fmt + i; - - return NULL; -} - -static const struct mt9v022_datafmt mt9v022_colour_fmts[] = { - /* - * Order important: first natively supported, - * second supported with a GPIO extender - */ - {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, - {MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, -}; - -static const struct mt9v022_datafmt mt9v022_monochrome_fmts[] = { - /* Order important - see above */ - {MEDIA_BUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG}, - {MEDIA_BUS_FMT_Y8_1X8, V4L2_COLORSPACE_JPEG}, -}; - -/* only registers with different addresses on different mt9v02x sensors */ -struct mt9v02x_register { - u8 max_total_shutter_width; - u8 pixclk_fv_lv; -}; - -static const struct mt9v02x_register mt9v022_register = { - .max_total_shutter_width = MT9V022_MAX_TOTAL_SHUTTER_WIDTH, - .pixclk_fv_lv = MT9V022_PIXCLK_FV_LV, -}; - -static const struct mt9v02x_register mt9v024_register = { - .max_total_shutter_width = MT9V024_MAX_TOTAL_SHUTTER_WIDTH, - .pixclk_fv_lv = MT9V024_PIXCLK_FV_LV, -}; - -enum mt9v022_model { - MT9V022IX7ATM, - MT9V022IX7ATC, -}; - -struct mt9v022 { - struct v4l2_subdev subdev; - struct v4l2_ctrl_handler hdl; - struct { - /* exposure/auto-exposure cluster */ - struct v4l2_ctrl *autoexposure; - struct v4l2_ctrl *exposure; - }; - struct { - /* gain/auto-gain cluster */ - struct v4l2_ctrl *autogain; - struct v4l2_ctrl *gain; - }; - struct v4l2_ctrl *hblank; - struct v4l2_ctrl *vblank; - struct v4l2_rect rect; /* Sensor window */ - struct v4l2_clk *clk; - const struct mt9v022_datafmt *fmt; - const struct mt9v022_datafmt *fmts; - const struct mt9v02x_register *reg; - int num_fmts; - enum mt9v022_model model; - u16 chip_control; - u16 chip_version; - unsigned short y_skip_top; /* Lines to skip at the top */ -}; - -static struct mt9v022 *to_mt9v022(const struct i2c_client *client) -{ - return container_of(i2c_get_clientdata(client), struct mt9v022, subdev); -} - -static int reg_read(struct i2c_client *client, const u8 reg) -{ - return i2c_smbus_read_word_swapped(client, reg); -} - -static int reg_write(struct i2c_client *client, const u8 reg, - const u16 data) -{ - return i2c_smbus_write_word_swapped(client, reg, data); -} - -static int reg_set(struct i2c_client *client, const u8 reg, - const u16 data) -{ - int ret; - - ret = reg_read(client, reg); - if (ret < 0) - return ret; - return reg_write(client, reg, ret | data); -} - -static int reg_clear(struct i2c_client *client, const u8 reg, - const u16 data) -{ - int ret; - - ret = reg_read(client, reg); - if (ret < 0) - return ret; - return reg_write(client, reg, ret & ~data); -} - -static int mt9v022_init(struct i2c_client *client) -{ - struct mt9v022 *mt9v022 = to_mt9v022(client); - int ret; - - /* - * Almost the default mode: master, parallel, simultaneous, and an - * undocumented bit 0x200, which is present in table 7, but not in 8, - * plus snapshot mode to disable scan for now - */ - mt9v022->chip_control |= 0x10; - ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control); - if (!ret) - ret = reg_write(client, MT9V022_READ_MODE, 0x300); - - /* All defaults */ - if (!ret) - /* AEC, AGC on */ - ret = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x3); - if (!ret) - ret = reg_write(client, MT9V022_ANALOG_GAIN, 16); - if (!ret) - ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH, 480); - if (!ret) - ret = reg_write(client, mt9v022->reg->max_total_shutter_width, 480); - if (!ret) - /* default - auto */ - ret = reg_clear(client, MT9V022_BLACK_LEVEL_CALIB_CTRL, 1); - if (!ret) - ret = reg_write(client, MT9V022_DIGITAL_TEST_PATTERN, 0); - if (!ret) - return v4l2_ctrl_handler_setup(&mt9v022->hdl); - - return ret; -} - -static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9v022 *mt9v022 = to_mt9v022(client); - - if (enable) { - /* Switch to master "normal" mode */ - mt9v022->chip_control &= ~0x10; - if (is_mt9v022_rev3(mt9v022->chip_version) || - is_mt9v024(mt9v022->chip_version)) { - /* - * Unset snapshot mode specific settings: clear bit 9 - * and bit 2 in reg. 0x20 when in normal mode. - */ - if (reg_clear(client, MT9V022_REG32, 0x204)) - return -EIO; - } - } else { - /* Switch to snapshot mode */ - mt9v022->chip_control |= 0x10; - if (is_mt9v022_rev3(mt9v022->chip_version) || - is_mt9v024(mt9v022->chip_version)) { - /* - * Required settings for snapshot mode: set bit 9 - * (RST enable) and bit 2 (CR enable) in reg. 0x20 - * See TechNote TN0960 or TN-09-225. - */ - if (reg_set(client, MT9V022_REG32, 0x204)) - return -EIO; - } - } - - if (reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control) < 0) - return -EIO; - return 0; -} - -static int mt9v022_set_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9v022 *mt9v022 = to_mt9v022(client); - struct v4l2_rect rect = sel->r; - int min_row, min_blank; - int ret; - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || - sel->target != V4L2_SEL_TGT_CROP) - return -EINVAL; - - /* Bayer format - even size lengths */ - if (mt9v022->fmts == mt9v022_colour_fmts) { - rect.width = ALIGN(rect.width, 2); - rect.height = ALIGN(rect.height, 2); - /* Let the user play with the starting pixel */ - } - - soc_camera_limit_side(&rect.left, &rect.width, - MT9V022_COLUMN_SKIP, MT9V022_MIN_WIDTH, MT9V022_MAX_WIDTH); - - soc_camera_limit_side(&rect.top, &rect.height, - MT9V022_ROW_SKIP, MT9V022_MIN_HEIGHT, MT9V022_MAX_HEIGHT); - - /* Like in example app. Contradicts the datasheet though */ - ret = reg_read(client, MT9V022_AEC_AGC_ENABLE); - if (ret >= 0) { - if (ret & 1) /* Autoexposure */ - ret = reg_write(client, mt9v022->reg->max_total_shutter_width, - rect.height + mt9v022->y_skip_top + 43); - /* - * If autoexposure is off, there is no need to set - * MT9V022_TOTAL_SHUTTER_WIDTH here. Autoexposure can be off - * only if the user has set exposure manually, using the - * V4L2_CID_EXPOSURE_AUTO with the value V4L2_EXPOSURE_MANUAL. - * In this case the register MT9V022_TOTAL_SHUTTER_WIDTH - * already contains the correct value. - */ - } - /* Setup frame format: defaults apart from width and height */ - if (!ret) - ret = reg_write(client, MT9V022_COLUMN_START, rect.left); - if (!ret) - ret = reg_write(client, MT9V022_ROW_START, rect.top); - /* - * mt9v022: min total row time is 660 columns, min blanking is 43 - * mt9v024: min total row time is 690 columns, min blanking is 61 - */ - if (is_mt9v024(mt9v022->chip_version)) { - min_row = 690; - min_blank = 61; - } else { - min_row = 660; - min_blank = 43; - } - if (!ret) - ret = v4l2_ctrl_s_ctrl(mt9v022->hblank, - rect.width > min_row - min_blank ? - min_blank : min_row - rect.width); - if (!ret) - ret = v4l2_ctrl_s_ctrl(mt9v022->vblank, 45); - if (!ret) - ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect.width); - if (!ret) - ret = reg_write(client, MT9V022_WINDOW_HEIGHT, - rect.height + mt9v022->y_skip_top); - - if (ret < 0) - return ret; - - dev_dbg(&client->dev, "Frame %dx%d pixel\n", rect.width, rect.height); - - mt9v022->rect = rect; - - return 0; -} - -static int mt9v022_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9v022 *mt9v022 = to_mt9v022(client); - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - sel->r.left = MT9V022_COLUMN_SKIP; - sel->r.top = MT9V022_ROW_SKIP; - sel->r.width = MT9V022_MAX_WIDTH; - sel->r.height = MT9V022_MAX_HEIGHT; - return 0; - case V4L2_SEL_TGT_CROP: - sel->r = mt9v022->rect; - return 0; - default: - return -EINVAL; - } -} - -static int mt9v022_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9v022 *mt9v022 = to_mt9v022(client); - - if (format->pad) - return -EINVAL; - - mf->width = mt9v022->rect.width; - mf->height = mt9v022->rect.height; - mf->code = mt9v022->fmt->code; - mf->colorspace = mt9v022->fmt->colorspace; - mf->field = V4L2_FIELD_NONE; - - return 0; -} - -static int mt9v022_s_fmt(struct v4l2_subdev *sd, - const struct mt9v022_datafmt *fmt, - struct v4l2_mbus_framefmt *mf) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9v022 *mt9v022 = to_mt9v022(client); - struct v4l2_subdev_selection sel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = V4L2_SEL_TGT_CROP, - .r.left = mt9v022->rect.left, - .r.top = mt9v022->rect.top, - .r.width = mf->width, - .r.height = mf->height, - }; - int ret; - - /* - * The caller provides a supported format, as verified per call to - * .set_fmt(FORMAT_TRY), datawidth is from our supported format list - */ - switch (mf->code) { - case MEDIA_BUS_FMT_Y8_1X8: - case MEDIA_BUS_FMT_Y10_1X10: - if (mt9v022->model != MT9V022IX7ATM) - return -EINVAL; - break; - case MEDIA_BUS_FMT_SBGGR8_1X8: - case MEDIA_BUS_FMT_SBGGR10_1X10: - if (mt9v022->model != MT9V022IX7ATC) - return -EINVAL; - break; - default: - return -EINVAL; - } - - /* No support for scaling on this camera, just crop. */ - ret = mt9v022_set_selection(sd, NULL, &sel); - if (!ret) { - mf->width = mt9v022->rect.width; - mf->height = mt9v022->rect.height; - mt9v022->fmt = fmt; - mf->colorspace = fmt->colorspace; - } - - return ret; -} - -static int mt9v022_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9v022 *mt9v022 = to_mt9v022(client); - const struct mt9v022_datafmt *fmt; - int align = mf->code == MEDIA_BUS_FMT_SBGGR8_1X8 || - mf->code == MEDIA_BUS_FMT_SBGGR10_1X10; - - if (format->pad) - return -EINVAL; - - v4l_bound_align_image(&mf->width, MT9V022_MIN_WIDTH, - MT9V022_MAX_WIDTH, align, - &mf->height, MT9V022_MIN_HEIGHT + mt9v022->y_skip_top, - MT9V022_MAX_HEIGHT + mt9v022->y_skip_top, align, 0); - - fmt = mt9v022_find_datafmt(mf->code, mt9v022->fmts, - mt9v022->num_fmts); - if (!fmt) { - fmt = mt9v022->fmt; - mf->code = fmt->code; - } - - mf->colorspace = fmt->colorspace; - - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) - return mt9v022_s_fmt(sd, fmt, mf); - cfg->try_fmt = *mf; - return 0; -} - -#ifdef CONFIG_VIDEO_ADV_DEBUG -static int mt9v022_g_register(struct v4l2_subdev *sd, - struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg > 0xff) - return -EINVAL; - - reg->size = 2; - reg->val = reg_read(client, reg->reg); - - if (reg->val > 0xffff) - return -EIO; - - return 0; -} - -static int mt9v022_s_register(struct v4l2_subdev *sd, - const struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg > 0xff) - return -EINVAL; - - if (reg_write(client, reg->reg, reg->val) < 0) - return -EIO; - - return 0; -} -#endif - -static int mt9v022_s_power(struct v4l2_subdev *sd, int on) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct mt9v022 *mt9v022 = to_mt9v022(client); - - return soc_camera_set_power(&client->dev, ssdd, mt9v022->clk, on); -} - -static int mt9v022_g_volatile_ctrl(struct v4l2_ctrl *ctrl) -{ - struct mt9v022 *mt9v022 = container_of(ctrl->handler, - struct mt9v022, hdl); - struct v4l2_subdev *sd = &mt9v022->subdev; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct v4l2_ctrl *gain = mt9v022->gain; - struct v4l2_ctrl *exp = mt9v022->exposure; - unsigned long range; - int data; - - switch (ctrl->id) { - case V4L2_CID_AUTOGAIN: - data = reg_read(client, MT9V022_ANALOG_GAIN); - if (data < 0) - return -EIO; - - range = gain->maximum - gain->minimum; - gain->val = ((data - 16) * range + 24) / 48 + gain->minimum; - return 0; - case V4L2_CID_EXPOSURE_AUTO: - data = reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH); - if (data < 0) - return -EIO; - - range = exp->maximum - exp->minimum; - exp->val = ((data - 1) * range + 239) / 479 + exp->minimum; - return 0; - case V4L2_CID_HBLANK: - data = reg_read(client, MT9V022_HORIZONTAL_BLANKING); - if (data < 0) - return -EIO; - ctrl->val = data; - return 0; - case V4L2_CID_VBLANK: - data = reg_read(client, MT9V022_VERTICAL_BLANKING); - if (data < 0) - return -EIO; - ctrl->val = data; - return 0; - } - return -EINVAL; -} - -static int mt9v022_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct mt9v022 *mt9v022 = container_of(ctrl->handler, - struct mt9v022, hdl); - struct v4l2_subdev *sd = &mt9v022->subdev; - struct i2c_client *client = v4l2_get_subdevdata(sd); - int data; - - switch (ctrl->id) { - case V4L2_CID_VFLIP: - if (ctrl->val) - data = reg_set(client, MT9V022_READ_MODE, 0x10); - else - data = reg_clear(client, MT9V022_READ_MODE, 0x10); - if (data < 0) - return -EIO; - return 0; - case V4L2_CID_HFLIP: - if (ctrl->val) - data = reg_set(client, MT9V022_READ_MODE, 0x20); - else - data = reg_clear(client, MT9V022_READ_MODE, 0x20); - if (data < 0) - return -EIO; - return 0; - case V4L2_CID_AUTOGAIN: - if (ctrl->val) { - if (reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0) - return -EIO; - } else { - struct v4l2_ctrl *gain = mt9v022->gain; - /* mt9v022 has minimum == default */ - unsigned long range = gain->maximum - gain->minimum; - /* Valid values 16 to 64, 32 to 64 must be even. */ - unsigned long gain_val = ((gain->val - (s32)gain->minimum) * - 48 + range / 2) / range + 16; - - if (gain_val >= 32) - gain_val &= ~1; - - /* - * The user wants to set gain manually, hope, she - * knows, what she's doing... Switch AGC off. - */ - if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0) - return -EIO; - - dev_dbg(&client->dev, "Setting gain from %d to %lu\n", - reg_read(client, MT9V022_ANALOG_GAIN), gain_val); - if (reg_write(client, MT9V022_ANALOG_GAIN, gain_val) < 0) - return -EIO; - } - return 0; - case V4L2_CID_EXPOSURE_AUTO: - if (ctrl->val == V4L2_EXPOSURE_AUTO) { - data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x1); - } else { - struct v4l2_ctrl *exp = mt9v022->exposure; - unsigned long range = exp->maximum - exp->minimum; - unsigned long shutter = ((exp->val - (s32)exp->minimum) * - 479 + range / 2) / range + 1; - - /* - * The user wants to set shutter width manually, hope, - * she knows, what she's doing... Switch AEC off. - */ - data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1); - if (data < 0) - return -EIO; - dev_dbg(&client->dev, "Shutter width from %d to %lu\n", - reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH), - shutter); - if (reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH, - shutter) < 0) - return -EIO; - } - return 0; - case V4L2_CID_HBLANK: - if (reg_write(client, MT9V022_HORIZONTAL_BLANKING, - ctrl->val) < 0) - return -EIO; - return 0; - case V4L2_CID_VBLANK: - if (reg_write(client, MT9V022_VERTICAL_BLANKING, - ctrl->val) < 0) - return -EIO; - return 0; - } - return -EINVAL; -} - -/* - * Interface active, can use i2c. If it fails, it can indeed mean, that - * this wasn't our capture interface, so, we wait for the right one - */ -static int mt9v022_video_probe(struct i2c_client *client) -{ - struct mt9v022 *mt9v022 = to_mt9v022(client); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - s32 data; - int ret; - unsigned long flags; - - ret = mt9v022_s_power(&mt9v022->subdev, 1); - if (ret < 0) - return ret; - - /* Read out the chip version register */ - data = reg_read(client, MT9V022_CHIP_VERSION); - - /* must be 0x1311, 0x1313 or 0x1324 */ - if (data != 0x1311 && data != 0x1313 && data != 0x1324) { - ret = -ENODEV; - dev_info(&client->dev, "No MT9V022 found, ID register 0x%x\n", - data); - goto ei2c; - } - - mt9v022->chip_version = data; - - mt9v022->reg = is_mt9v024(data) ? &mt9v024_register : - &mt9v022_register; - - /* Soft reset */ - ret = reg_write(client, MT9V022_RESET, 1); - if (ret < 0) - goto ei2c; - /* 15 clock cycles */ - udelay(200); - if (reg_read(client, MT9V022_RESET)) { - dev_err(&client->dev, "Resetting MT9V022 failed!\n"); - if (ret > 0) - ret = -EIO; - goto ei2c; - } - - /* Set monochrome or colour sensor type */ - if (sensor_type && (!strcmp("colour", sensor_type) || - !strcmp("color", sensor_type))) { - ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11); - mt9v022->model = MT9V022IX7ATC; - mt9v022->fmts = mt9v022_colour_fmts; - } else { - ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 0x11); - mt9v022->model = MT9V022IX7ATM; - mt9v022->fmts = mt9v022_monochrome_fmts; - } - - if (ret < 0) - goto ei2c; - - mt9v022->num_fmts = 0; - - /* - * This is a 10bit sensor, so by default we only allow 10bit. - * The platform may support different bus widths due to - * different routing of the data lines. - */ - if (ssdd->query_bus_param) - flags = ssdd->query_bus_param(ssdd); - else - flags = SOCAM_DATAWIDTH_10; - - if (flags & SOCAM_DATAWIDTH_10) - mt9v022->num_fmts++; - else - mt9v022->fmts++; - - if (flags & SOCAM_DATAWIDTH_8) - mt9v022->num_fmts++; - - mt9v022->fmt = &mt9v022->fmts[0]; - - dev_info(&client->dev, "Detected a MT9V022 chip ID %x, %s sensor\n", - data, mt9v022->model == MT9V022IX7ATM ? - "monochrome" : "colour"); - - ret = mt9v022_init(client); - if (ret < 0) - dev_err(&client->dev, "Failed to initialise the camera\n"); - -ei2c: - mt9v022_s_power(&mt9v022->subdev, 0); - return ret; -} - -static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9v022 *mt9v022 = to_mt9v022(client); - - *lines = mt9v022->y_skip_top; - - return 0; -} - -static const struct v4l2_ctrl_ops mt9v022_ctrl_ops = { - .g_volatile_ctrl = mt9v022_g_volatile_ctrl, - .s_ctrl = mt9v022_s_ctrl, -}; - -static const struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = { -#ifdef CONFIG_VIDEO_ADV_DEBUG - .g_register = mt9v022_g_register, - .s_register = mt9v022_s_register, -#endif - .s_power = mt9v022_s_power, -}; - -static int mt9v022_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9v022 *mt9v022 = to_mt9v022(client); - - if (code->pad || code->index >= mt9v022->num_fmts) - return -EINVAL; - - code->code = mt9v022->fmts[code->index].code; - return 0; -} - -static int mt9v022_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE | - V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | - V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW | - V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW | - V4L2_MBUS_DATA_ACTIVE_HIGH; - cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); - - return 0; -} - -static int mt9v022_s_mbus_config(struct v4l2_subdev *sd, - const struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct mt9v022 *mt9v022 = to_mt9v022(client); - unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg); - unsigned int bps = soc_mbus_get_fmtdesc(mt9v022->fmt->code)->bits_per_sample; - int ret; - u16 pixclk = 0; - - if (ssdd->set_bus_param) { - ret = ssdd->set_bus_param(ssdd, 1 << (bps - 1)); - if (ret) - return ret; - } else if (bps != 10) { - /* - * Without board specific bus width settings we only support the - * sensors native bus width - */ - return -EINVAL; - } - - if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) - pixclk |= 0x10; - - if (!(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)) - pixclk |= 0x1; - - if (!(flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)) - pixclk |= 0x2; - - ret = reg_write(client, mt9v022->reg->pixclk_fv_lv, pixclk); - if (ret < 0) - return ret; - - if (!(flags & V4L2_MBUS_MASTER)) - mt9v022->chip_control &= ~0x8; - - ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control); - if (ret < 0) - return ret; - - dev_dbg(&client->dev, "Calculated pixclk 0x%x, chip control 0x%x\n", - pixclk, mt9v022->chip_control); - - return 0; -} - -static const struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = { - .s_stream = mt9v022_s_stream, - .g_mbus_config = mt9v022_g_mbus_config, - .s_mbus_config = mt9v022_s_mbus_config, -}; - -static const struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = { - .g_skip_top_lines = mt9v022_g_skip_top_lines, -}; - -static const struct v4l2_subdev_pad_ops mt9v022_subdev_pad_ops = { - .enum_mbus_code = mt9v022_enum_mbus_code, - .get_selection = mt9v022_get_selection, - .set_selection = mt9v022_set_selection, - .get_fmt = mt9v022_get_fmt, - .set_fmt = mt9v022_set_fmt, -}; - -static const struct v4l2_subdev_ops mt9v022_subdev_ops = { - .core = &mt9v022_subdev_core_ops, - .video = &mt9v022_subdev_video_ops, - .sensor = &mt9v022_subdev_sensor_ops, - .pad = &mt9v022_subdev_pad_ops, -}; - -static int mt9v022_probe(struct i2c_client *client, - const struct i2c_device_id *did) -{ - struct mt9v022 *mt9v022; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct i2c_adapter *adapter = client->adapter; - struct mt9v022_platform_data *pdata; - int ret; - - if (!ssdd) { - dev_err(&client->dev, "MT9V022 driver needs platform data\n"); - return -EINVAL; - } - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) { - dev_warn(&adapter->dev, - "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); - return -EIO; - } - - mt9v022 = devm_kzalloc(&client->dev, sizeof(struct mt9v022), GFP_KERNEL); - if (!mt9v022) - return -ENOMEM; - - pdata = ssdd->drv_priv; - v4l2_i2c_subdev_init(&mt9v022->subdev, client, &mt9v022_subdev_ops); - v4l2_ctrl_handler_init(&mt9v022->hdl, 6); - v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops, - V4L2_CID_VFLIP, 0, 1, 1, 0); - v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops, - V4L2_CID_HFLIP, 0, 1, 1, 0); - mt9v022->autogain = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops, - V4L2_CID_AUTOGAIN, 0, 1, 1, 1); - mt9v022->gain = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops, - V4L2_CID_GAIN, 0, 127, 1, 64); - - /* - * Simulated autoexposure. If enabled, we calculate shutter width - * ourselves in the driver based on vertical blanking and frame width - */ - mt9v022->autoexposure = v4l2_ctrl_new_std_menu(&mt9v022->hdl, - &mt9v022_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0, - V4L2_EXPOSURE_AUTO); - mt9v022->exposure = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops, - V4L2_CID_EXPOSURE, 1, 255, 1, 255); - - mt9v022->hblank = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops, - V4L2_CID_HBLANK, MT9V022_HORIZONTAL_BLANKING_MIN, - MT9V022_HORIZONTAL_BLANKING_MAX, 1, - MT9V022_HORIZONTAL_BLANKING_DEF); - - mt9v022->vblank = v4l2_ctrl_new_std(&mt9v022->hdl, &mt9v022_ctrl_ops, - V4L2_CID_VBLANK, MT9V022_VERTICAL_BLANKING_MIN, - MT9V022_VERTICAL_BLANKING_MAX, 1, - MT9V022_VERTICAL_BLANKING_DEF); - - mt9v022->subdev.ctrl_handler = &mt9v022->hdl; - if (mt9v022->hdl.error) { - int err = mt9v022->hdl.error; - - dev_err(&client->dev, "control initialisation err %d\n", err); - return err; - } - v4l2_ctrl_auto_cluster(2, &mt9v022->autoexposure, - V4L2_EXPOSURE_MANUAL, true); - v4l2_ctrl_auto_cluster(2, &mt9v022->autogain, 0, true); - - mt9v022->chip_control = MT9V022_CHIP_CONTROL_DEFAULT; - - /* - * On some platforms the first read out line is corrupted. - * Workaround it by skipping if indicated by platform data. - */ - mt9v022->y_skip_top = pdata ? pdata->y_skip_top : 0; - mt9v022->rect.left = MT9V022_COLUMN_SKIP; - mt9v022->rect.top = MT9V022_ROW_SKIP; - mt9v022->rect.width = MT9V022_MAX_WIDTH; - mt9v022->rect.height = MT9V022_MAX_HEIGHT; - - mt9v022->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(mt9v022->clk)) { - ret = PTR_ERR(mt9v022->clk); - goto eclkget; - } - - ret = mt9v022_video_probe(client); - if (ret) { - v4l2_clk_put(mt9v022->clk); -eclkget: - v4l2_ctrl_handler_free(&mt9v022->hdl); - } - - return ret; -} - -static int mt9v022_remove(struct i2c_client *client) -{ - struct mt9v022 *mt9v022 = to_mt9v022(client); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - v4l2_clk_put(mt9v022->clk); - v4l2_device_unregister_subdev(&mt9v022->subdev); - if (ssdd->free_bus) - ssdd->free_bus(ssdd); - v4l2_ctrl_handler_free(&mt9v022->hdl); - - return 0; -} -static const struct i2c_device_id mt9v022_id[] = { - { "mt9v022", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, mt9v022_id); - -static struct i2c_driver mt9v022_i2c_driver = { - .driver = { - .name = "mt9v022", - }, - .probe = mt9v022_probe, - .remove = mt9v022_remove, - .id_table = mt9v022_id, -}; - -module_i2c_driver(mt9v022_i2c_driver); - -MODULE_DESCRIPTION("Micron MT9V022 Camera driver"); -MODULE_AUTHOR("Guennadi Liakhovetski "); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/media/soc_camera/soc_ov5642.c b/drivers/staging/media/soc_camera/soc_ov5642.c deleted file mode 100644 index 39ae24dca65f..000000000000 --- a/drivers/staging/media/soc_camera/soc_ov5642.c +++ /dev/null @@ -1,1085 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Driver for OV5642 CMOS Image Sensor from Omnivision - * - * Copyright (C) 2011, Bastian Hecht - * - * Based on Sony IMX074 Camera Driver - * Copyright (C) 2010, Guennadi Liakhovetski - * - * Based on Omnivision OV7670 Camera Driver - * Copyright (C) 2006-7 Jonathan Corbet - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -/* OV5642 registers */ -#define REG_CHIP_ID_HIGH 0x300a -#define REG_CHIP_ID_LOW 0x300b - -#define REG_WINDOW_START_X_HIGH 0x3800 -#define REG_WINDOW_START_X_LOW 0x3801 -#define REG_WINDOW_START_Y_HIGH 0x3802 -#define REG_WINDOW_START_Y_LOW 0x3803 -#define REG_WINDOW_WIDTH_HIGH 0x3804 -#define REG_WINDOW_WIDTH_LOW 0x3805 -#define REG_WINDOW_HEIGHT_HIGH 0x3806 -#define REG_WINDOW_HEIGHT_LOW 0x3807 -#define REG_OUT_WIDTH_HIGH 0x3808 -#define REG_OUT_WIDTH_LOW 0x3809 -#define REG_OUT_HEIGHT_HIGH 0x380a -#define REG_OUT_HEIGHT_LOW 0x380b -#define REG_OUT_TOTAL_WIDTH_HIGH 0x380c -#define REG_OUT_TOTAL_WIDTH_LOW 0x380d -#define REG_OUT_TOTAL_HEIGHT_HIGH 0x380e -#define REG_OUT_TOTAL_HEIGHT_LOW 0x380f -#define REG_OUTPUT_FORMAT 0x4300 -#define REG_ISP_CTRL_01 0x5001 -#define REG_AVG_WINDOW_END_X_HIGH 0x5682 -#define REG_AVG_WINDOW_END_X_LOW 0x5683 -#define REG_AVG_WINDOW_END_Y_HIGH 0x5686 -#define REG_AVG_WINDOW_END_Y_LOW 0x5687 - -/* active pixel array size */ -#define OV5642_SENSOR_SIZE_X 2592 -#define OV5642_SENSOR_SIZE_Y 1944 - -/* - * About OV5642 resolution, cropping and binning: - * This sensor supports it all, at least in the feature description. - * Unfortunately, no combination of appropriate registers settings could make - * the chip work the intended way. As it works with predefined register lists, - * some undocumented registers are presumably changed there to achieve their - * goals. - * This driver currently only works for resolutions up to 720 lines with a - * 1:1 scale. Hopefully these restrictions will be removed in the future. - */ -#define OV5642_MAX_WIDTH OV5642_SENSOR_SIZE_X -#define OV5642_MAX_HEIGHT 720 - -/* default sizes */ -#define OV5642_DEFAULT_WIDTH 1280 -#define OV5642_DEFAULT_HEIGHT OV5642_MAX_HEIGHT - -/* minimum extra blanking */ -#define BLANKING_EXTRA_WIDTH 500 -#define BLANKING_EXTRA_HEIGHT 20 - -/* - * the sensor's autoexposure is buggy when setting total_height low. - * It tries to expose longer than 1 frame period without taking care of it - * and this leads to weird output. So we set 1000 lines as minimum. - */ -#define BLANKING_MIN_HEIGHT 1000 - -struct regval_list { - u16 reg_num; - u8 value; -}; - -static struct regval_list ov5642_default_regs_init[] = { - { 0x3103, 0x93 }, - { 0x3008, 0x82 }, - { 0x3017, 0x7f }, - { 0x3018, 0xfc }, - { 0x3810, 0xc2 }, - { 0x3615, 0xf0 }, - { 0x3000, 0x0 }, - { 0x3001, 0x0 }, - { 0x3002, 0x0 }, - { 0x3003, 0x0 }, - { 0x3004, 0xff }, - { 0x3030, 0x2b }, - { 0x3011, 0x8 }, - { 0x3010, 0x10 }, - { 0x3604, 0x60 }, - { 0x3622, 0x60 }, - { 0x3621, 0x9 }, - { 0x3709, 0x0 }, - { 0x4000, 0x21 }, - { 0x401d, 0x22 }, - { 0x3600, 0x54 }, - { 0x3605, 0x4 }, - { 0x3606, 0x3f }, - { 0x3c01, 0x80 }, - { 0x300d, 0x22 }, - { 0x3623, 0x22 }, - { 0x5000, 0x4f }, - { 0x5020, 0x4 }, - { 0x5181, 0x79 }, - { 0x5182, 0x0 }, - { 0x5185, 0x22 }, - { 0x5197, 0x1 }, - { 0x5500, 0xa }, - { 0x5504, 0x0 }, - { 0x5505, 0x7f }, - { 0x5080, 0x8 }, - { 0x300e, 0x18 }, - { 0x4610, 0x0 }, - { 0x471d, 0x5 }, - { 0x4708, 0x6 }, - { 0x370c, 0xa0 }, - { 0x5687, 0x94 }, - { 0x501f, 0x0 }, - { 0x5000, 0x4f }, - { 0x5001, 0xcf }, - { 0x4300, 0x30 }, - { 0x4300, 0x30 }, - { 0x460b, 0x35 }, - { 0x471d, 0x0 }, - { 0x3002, 0xc }, - { 0x3002, 0x0 }, - { 0x4713, 0x3 }, - { 0x471c, 0x50 }, - { 0x4721, 0x2 }, - { 0x4402, 0x90 }, - { 0x460c, 0x22 }, - { 0x3815, 0x44 }, - { 0x3503, 0x7 }, - { 0x3501, 0x73 }, - { 0x3502, 0x80 }, - { 0x350b, 0x0 }, - { 0x3818, 0xc8 }, - { 0x3824, 0x11 }, - { 0x3a00, 0x78 }, - { 0x3a1a, 0x4 }, - { 0x3a13, 0x30 }, - { 0x3a18, 0x0 }, - { 0x3a19, 0x7c }, - { 0x3a08, 0x12 }, - { 0x3a09, 0xc0 }, - { 0x3a0a, 0xf }, - { 0x3a0b, 0xa0 }, - { 0x350c, 0x7 }, - { 0x350d, 0xd0 }, - { 0x3a0d, 0x8 }, - { 0x3a0e, 0x6 }, - { 0x3500, 0x0 }, - { 0x3501, 0x0 }, - { 0x3502, 0x0 }, - { 0x350a, 0x0 }, - { 0x350b, 0x0 }, - { 0x3503, 0x0 }, - { 0x3a0f, 0x3c }, - { 0x3a10, 0x32 }, - { 0x3a1b, 0x3c }, - { 0x3a1e, 0x32 }, - { 0x3a11, 0x80 }, - { 0x3a1f, 0x20 }, - { 0x3030, 0x2b }, - { 0x3a02, 0x0 }, - { 0x3a03, 0x7d }, - { 0x3a04, 0x0 }, - { 0x3a14, 0x0 }, - { 0x3a15, 0x7d }, - { 0x3a16, 0x0 }, - { 0x3a00, 0x78 }, - { 0x3a08, 0x9 }, - { 0x3a09, 0x60 }, - { 0x3a0a, 0x7 }, - { 0x3a0b, 0xd0 }, - { 0x3a0d, 0x10 }, - { 0x3a0e, 0xd }, - { 0x4407, 0x4 }, - { 0x5193, 0x70 }, - { 0x589b, 0x0 }, - { 0x589a, 0xc0 }, - { 0x401e, 0x20 }, - { 0x4001, 0x42 }, - { 0x401c, 0x6 }, - { 0x3825, 0xac }, - { 0x3827, 0xc }, - { 0x528a, 0x1 }, - { 0x528b, 0x4 }, - { 0x528c, 0x8 }, - { 0x528d, 0x10 }, - { 0x528e, 0x20 }, - { 0x528f, 0x28 }, - { 0x5290, 0x30 }, - { 0x5292, 0x0 }, - { 0x5293, 0x1 }, - { 0x5294, 0x0 }, - { 0x5295, 0x4 }, - { 0x5296, 0x0 }, - { 0x5297, 0x8 }, - { 0x5298, 0x0 }, - { 0x5299, 0x10 }, - { 0x529a, 0x0 }, - { 0x529b, 0x20 }, - { 0x529c, 0x0 }, - { 0x529d, 0x28 }, - { 0x529e, 0x0 }, - { 0x529f, 0x30 }, - { 0x5282, 0x0 }, - { 0x5300, 0x0 }, - { 0x5301, 0x20 }, - { 0x5302, 0x0 }, - { 0x5303, 0x7c }, - { 0x530c, 0x0 }, - { 0x530d, 0xc }, - { 0x530e, 0x20 }, - { 0x530f, 0x80 }, - { 0x5310, 0x20 }, - { 0x5311, 0x80 }, - { 0x5308, 0x20 }, - { 0x5309, 0x40 }, - { 0x5304, 0x0 }, - { 0x5305, 0x30 }, - { 0x5306, 0x0 }, - { 0x5307, 0x80 }, - { 0x5314, 0x8 }, - { 0x5315, 0x20 }, - { 0x5319, 0x30 }, - { 0x5316, 0x10 }, - { 0x5317, 0x0 }, - { 0x5318, 0x2 }, - { 0x5380, 0x1 }, - { 0x5381, 0x0 }, - { 0x5382, 0x0 }, - { 0x5383, 0x4e }, - { 0x5384, 0x0 }, - { 0x5385, 0xf }, - { 0x5386, 0x0 }, - { 0x5387, 0x0 }, - { 0x5388, 0x1 }, - { 0x5389, 0x15 }, - { 0x538a, 0x0 }, - { 0x538b, 0x31 }, - { 0x538c, 0x0 }, - { 0x538d, 0x0 }, - { 0x538e, 0x0 }, - { 0x538f, 0xf }, - { 0x5390, 0x0 }, - { 0x5391, 0xab }, - { 0x5392, 0x0 }, - { 0x5393, 0xa2 }, - { 0x5394, 0x8 }, - { 0x5480, 0x14 }, - { 0x5481, 0x21 }, - { 0x5482, 0x36 }, - { 0x5483, 0x57 }, - { 0x5484, 0x65 }, - { 0x5485, 0x71 }, - { 0x5486, 0x7d }, - { 0x5487, 0x87 }, - { 0x5488, 0x91 }, - { 0x5489, 0x9a }, - { 0x548a, 0xaa }, - { 0x548b, 0xb8 }, - { 0x548c, 0xcd }, - { 0x548d, 0xdd }, - { 0x548e, 0xea }, - { 0x548f, 0x1d }, - { 0x5490, 0x5 }, - { 0x5491, 0x0 }, - { 0x5492, 0x4 }, - { 0x5493, 0x20 }, - { 0x5494, 0x3 }, - { 0x5495, 0x60 }, - { 0x5496, 0x2 }, - { 0x5497, 0xb8 }, - { 0x5498, 0x2 }, - { 0x5499, 0x86 }, - { 0x549a, 0x2 }, - { 0x549b, 0x5b }, - { 0x549c, 0x2 }, - { 0x549d, 0x3b }, - { 0x549e, 0x2 }, - { 0x549f, 0x1c }, - { 0x54a0, 0x2 }, - { 0x54a1, 0x4 }, - { 0x54a2, 0x1 }, - { 0x54a3, 0xed }, - { 0x54a4, 0x1 }, - { 0x54a5, 0xc5 }, - { 0x54a6, 0x1 }, - { 0x54a7, 0xa5 }, - { 0x54a8, 0x1 }, - { 0x54a9, 0x6c }, - { 0x54aa, 0x1 }, - { 0x54ab, 0x41 }, - { 0x54ac, 0x1 }, - { 0x54ad, 0x20 }, - { 0x54ae, 0x0 }, - { 0x54af, 0x16 }, - { 0x54b0, 0x1 }, - { 0x54b1, 0x20 }, - { 0x54b2, 0x0 }, - { 0x54b3, 0x10 }, - { 0x54b4, 0x0 }, - { 0x54b5, 0xf0 }, - { 0x54b6, 0x0 }, - { 0x54b7, 0xdf }, - { 0x5402, 0x3f }, - { 0x5403, 0x0 }, - { 0x3406, 0x0 }, - { 0x5180, 0xff }, - { 0x5181, 0x52 }, - { 0x5182, 0x11 }, - { 0x5183, 0x14 }, - { 0x5184, 0x25 }, - { 0x5185, 0x24 }, - { 0x5186, 0x6 }, - { 0x5187, 0x8 }, - { 0x5188, 0x8 }, - { 0x5189, 0x7c }, - { 0x518a, 0x60 }, - { 0x518b, 0xb2 }, - { 0x518c, 0xb2 }, - { 0x518d, 0x44 }, - { 0x518e, 0x3d }, - { 0x518f, 0x58 }, - { 0x5190, 0x46 }, - { 0x5191, 0xf8 }, - { 0x5192, 0x4 }, - { 0x5193, 0x70 }, - { 0x5194, 0xf0 }, - { 0x5195, 0xf0 }, - { 0x5196, 0x3 }, - { 0x5197, 0x1 }, - { 0x5198, 0x4 }, - { 0x5199, 0x12 }, - { 0x519a, 0x4 }, - { 0x519b, 0x0 }, - { 0x519c, 0x6 }, - { 0x519d, 0x82 }, - { 0x519e, 0x0 }, - { 0x5025, 0x80 }, - { 0x3a0f, 0x38 }, - { 0x3a10, 0x30 }, - { 0x3a1b, 0x3a }, - { 0x3a1e, 0x2e }, - { 0x3a11, 0x60 }, - { 0x3a1f, 0x10 }, - { 0x5688, 0xa6 }, - { 0x5689, 0x6a }, - { 0x568a, 0xea }, - { 0x568b, 0xae }, - { 0x568c, 0xa6 }, - { 0x568d, 0x6a }, - { 0x568e, 0x62 }, - { 0x568f, 0x26 }, - { 0x5583, 0x40 }, - { 0x5584, 0x40 }, - { 0x5580, 0x2 }, - { 0x5000, 0xcf }, - { 0x5800, 0x27 }, - { 0x5801, 0x19 }, - { 0x5802, 0x12 }, - { 0x5803, 0xf }, - { 0x5804, 0x10 }, - { 0x5805, 0x15 }, - { 0x5806, 0x1e }, - { 0x5807, 0x2f }, - { 0x5808, 0x15 }, - { 0x5809, 0xd }, - { 0x580a, 0xa }, - { 0x580b, 0x9 }, - { 0x580c, 0xa }, - { 0x580d, 0xc }, - { 0x580e, 0x12 }, - { 0x580f, 0x19 }, - { 0x5810, 0xb }, - { 0x5811, 0x7 }, - { 0x5812, 0x4 }, - { 0x5813, 0x3 }, - { 0x5814, 0x3 }, - { 0x5815, 0x6 }, - { 0x5816, 0xa }, - { 0x5817, 0xf }, - { 0x5818, 0xa }, - { 0x5819, 0x5 }, - { 0x581a, 0x1 }, - { 0x581b, 0x0 }, - { 0x581c, 0x0 }, - { 0x581d, 0x3 }, - { 0x581e, 0x8 }, - { 0x581f, 0xc }, - { 0x5820, 0xa }, - { 0x5821, 0x5 }, - { 0x5822, 0x1 }, - { 0x5823, 0x0 }, - { 0x5824, 0x0 }, - { 0x5825, 0x3 }, - { 0x5826, 0x8 }, - { 0x5827, 0xc }, - { 0x5828, 0xe }, - { 0x5829, 0x8 }, - { 0x582a, 0x6 }, - { 0x582b, 0x4 }, - { 0x582c, 0x5 }, - { 0x582d, 0x7 }, - { 0x582e, 0xb }, - { 0x582f, 0x12 }, - { 0x5830, 0x18 }, - { 0x5831, 0x10 }, - { 0x5832, 0xc }, - { 0x5833, 0xa }, - { 0x5834, 0xb }, - { 0x5835, 0xe }, - { 0x5836, 0x15 }, - { 0x5837, 0x19 }, - { 0x5838, 0x32 }, - { 0x5839, 0x1f }, - { 0x583a, 0x18 }, - { 0x583b, 0x16 }, - { 0x583c, 0x17 }, - { 0x583d, 0x1e }, - { 0x583e, 0x26 }, - { 0x583f, 0x53 }, - { 0x5840, 0x10 }, - { 0x5841, 0xf }, - { 0x5842, 0xd }, - { 0x5843, 0xc }, - { 0x5844, 0xe }, - { 0x5845, 0x9 }, - { 0x5846, 0x11 }, - { 0x5847, 0x10 }, - { 0x5848, 0x10 }, - { 0x5849, 0x10 }, - { 0x584a, 0x10 }, - { 0x584b, 0xe }, - { 0x584c, 0x10 }, - { 0x584d, 0x10 }, - { 0x584e, 0x11 }, - { 0x584f, 0x10 }, - { 0x5850, 0xf }, - { 0x5851, 0xc }, - { 0x5852, 0xf }, - { 0x5853, 0x10 }, - { 0x5854, 0x10 }, - { 0x5855, 0xf }, - { 0x5856, 0xe }, - { 0x5857, 0xb }, - { 0x5858, 0x10 }, - { 0x5859, 0xd }, - { 0x585a, 0xd }, - { 0x585b, 0xc }, - { 0x585c, 0xc }, - { 0x585d, 0xc }, - { 0x585e, 0xb }, - { 0x585f, 0xc }, - { 0x5860, 0xc }, - { 0x5861, 0xc }, - { 0x5862, 0xd }, - { 0x5863, 0x8 }, - { 0x5864, 0x11 }, - { 0x5865, 0x18 }, - { 0x5866, 0x18 }, - { 0x5867, 0x19 }, - { 0x5868, 0x17 }, - { 0x5869, 0x19 }, - { 0x586a, 0x16 }, - { 0x586b, 0x13 }, - { 0x586c, 0x13 }, - { 0x586d, 0x12 }, - { 0x586e, 0x13 }, - { 0x586f, 0x16 }, - { 0x5870, 0x14 }, - { 0x5871, 0x12 }, - { 0x5872, 0x10 }, - { 0x5873, 0x11 }, - { 0x5874, 0x11 }, - { 0x5875, 0x16 }, - { 0x5876, 0x14 }, - { 0x5877, 0x11 }, - { 0x5878, 0x10 }, - { 0x5879, 0xf }, - { 0x587a, 0x10 }, - { 0x587b, 0x14 }, - { 0x587c, 0x13 }, - { 0x587d, 0x12 }, - { 0x587e, 0x11 }, - { 0x587f, 0x11 }, - { 0x5880, 0x12 }, - { 0x5881, 0x15 }, - { 0x5882, 0x14 }, - { 0x5883, 0x15 }, - { 0x5884, 0x15 }, - { 0x5885, 0x15 }, - { 0x5886, 0x13 }, - { 0x5887, 0x17 }, - { 0x3710, 0x10 }, - { 0x3632, 0x51 }, - { 0x3702, 0x10 }, - { 0x3703, 0xb2 }, - { 0x3704, 0x18 }, - { 0x370b, 0x40 }, - { 0x370d, 0x3 }, - { 0x3631, 0x1 }, - { 0x3632, 0x52 }, - { 0x3606, 0x24 }, - { 0x3620, 0x96 }, - { 0x5785, 0x7 }, - { 0x3a13, 0x30 }, - { 0x3600, 0x52 }, - { 0x3604, 0x48 }, - { 0x3606, 0x1b }, - { 0x370d, 0xb }, - { 0x370f, 0xc0 }, - { 0x3709, 0x1 }, - { 0x3823, 0x0 }, - { 0x5007, 0x0 }, - { 0x5009, 0x0 }, - { 0x5011, 0x0 }, - { 0x5013, 0x0 }, - { 0x519e, 0x0 }, - { 0x5086, 0x0 }, - { 0x5087, 0x0 }, - { 0x5088, 0x0 }, - { 0x5089, 0x0 }, - { 0x302b, 0x0 }, - { 0x3503, 0x7 }, - { 0x3011, 0x8 }, - { 0x350c, 0x2 }, - { 0x350d, 0xe4 }, - { 0x3621, 0xc9 }, - { 0x370a, 0x81 }, - { 0xffff, 0xff }, -}; - -static struct regval_list ov5642_default_regs_finalise[] = { - { 0x3810, 0xc2 }, - { 0x3818, 0xc9 }, - { 0x381c, 0x10 }, - { 0x381d, 0xa0 }, - { 0x381e, 0x5 }, - { 0x381f, 0xb0 }, - { 0x3820, 0x0 }, - { 0x3821, 0x0 }, - { 0x3824, 0x11 }, - { 0x3a08, 0x1b }, - { 0x3a09, 0xc0 }, - { 0x3a0a, 0x17 }, - { 0x3a0b, 0x20 }, - { 0x3a0d, 0x2 }, - { 0x3a0e, 0x1 }, - { 0x401c, 0x4 }, - { 0x5682, 0x5 }, - { 0x5683, 0x0 }, - { 0x5686, 0x2 }, - { 0x5687, 0xcc }, - { 0x5001, 0x4f }, - { 0x589b, 0x6 }, - { 0x589a, 0xc5 }, - { 0x3503, 0x0 }, - { 0x460c, 0x20 }, - { 0x460b, 0x37 }, - { 0x471c, 0xd0 }, - { 0x471d, 0x5 }, - { 0x3815, 0x1 }, - { 0x3818, 0xc1 }, - { 0x501f, 0x0 }, - { 0x5002, 0xe0 }, - { 0x4300, 0x32 }, /* UYVY */ - { 0x3002, 0x1c }, - { 0x4800, 0x14 }, - { 0x4801, 0xf }, - { 0x3007, 0x3b }, - { 0x300e, 0x4 }, - { 0x4803, 0x50 }, - { 0x3815, 0x1 }, - { 0x4713, 0x2 }, - { 0x4842, 0x1 }, - { 0x300f, 0xe }, - { 0x3003, 0x3 }, - { 0x3003, 0x1 }, - { 0xffff, 0xff }, -}; - -struct ov5642_datafmt { - u32 code; - enum v4l2_colorspace colorspace; -}; - -struct ov5642 { - struct v4l2_subdev subdev; - const struct ov5642_datafmt *fmt; - struct v4l2_rect crop_rect; - struct v4l2_clk *clk; - - /* blanking information */ - int total_width; - int total_height; -}; - -static const struct ov5642_datafmt ov5642_colour_fmts[] = { - {MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG}, -}; - -static struct ov5642 *to_ov5642(const struct i2c_client *client) -{ - return container_of(i2c_get_clientdata(client), struct ov5642, subdev); -} - -/* Find a data format by a pixel code in an array */ -static const struct ov5642_datafmt - *ov5642_find_datafmt(u32 code) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(ov5642_colour_fmts); i++) - if (ov5642_colour_fmts[i].code == code) - return ov5642_colour_fmts + i; - - return NULL; -} - -static int reg_read(struct i2c_client *client, u16 reg, u8 *val) -{ - int ret; - /* We have 16-bit i2c addresses - care for endianness */ - unsigned char data[2] = { reg >> 8, reg & 0xff }; - - ret = i2c_master_send(client, data, 2); - if (ret < 2) { - dev_err(&client->dev, "%s: i2c read error, reg: %x\n", - __func__, reg); - return ret < 0 ? ret : -EIO; - } - - ret = i2c_master_recv(client, val, 1); - if (ret < 1) { - dev_err(&client->dev, "%s: i2c read error, reg: %x\n", - __func__, reg); - return ret < 0 ? ret : -EIO; - } - return 0; -} - -static int reg_write(struct i2c_client *client, u16 reg, u8 val) -{ - int ret; - unsigned char data[3] = { reg >> 8, reg & 0xff, val }; - - ret = i2c_master_send(client, data, 3); - if (ret < 3) { - dev_err(&client->dev, "%s: i2c write error, reg: %x\n", - __func__, reg); - return ret < 0 ? ret : -EIO; - } - - return 0; -} - -/* - * convenience function to write 16 bit register values that are split up - * into two consecutive high and low parts - */ -static int reg_write16(struct i2c_client *client, u16 reg, u16 val16) -{ - int ret; - - ret = reg_write(client, reg, val16 >> 8); - if (ret) - return ret; - return reg_write(client, reg + 1, val16 & 0x00ff); -} - -#ifdef CONFIG_VIDEO_ADV_DEBUG -static int ov5642_get_register(struct v4l2_subdev *sd, - struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - u8 val; - - if (reg->reg & ~0xffff) - return -EINVAL; - - reg->size = 1; - - ret = reg_read(client, reg->reg, &val); - if (!ret) - reg->val = (__u64)val; - - return ret; -} - -static int ov5642_set_register(struct v4l2_subdev *sd, - const struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg & ~0xffff || reg->val & ~0xff) - return -EINVAL; - - return reg_write(client, reg->reg, reg->val); -} -#endif - -static int ov5642_write_array(struct i2c_client *client, - struct regval_list *vals) -{ - while (vals->reg_num != 0xffff || vals->value != 0xff) { - int ret = reg_write(client, vals->reg_num, vals->value); - if (ret < 0) - return ret; - vals++; - } - dev_dbg(&client->dev, "Register list loaded\n"); - return 0; -} - -static int ov5642_set_resolution(struct v4l2_subdev *sd) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov5642 *priv = to_ov5642(client); - int width = priv->crop_rect.width; - int height = priv->crop_rect.height; - int total_width = priv->total_width; - int total_height = priv->total_height; - int start_x = (OV5642_SENSOR_SIZE_X - width) / 2; - int start_y = (OV5642_SENSOR_SIZE_Y - height) / 2; - int ret; - - /* - * This should set the starting point for cropping. - * Doesn't work so far. - */ - ret = reg_write16(client, REG_WINDOW_START_X_HIGH, start_x); - if (!ret) - ret = reg_write16(client, REG_WINDOW_START_Y_HIGH, start_y); - if (!ret) { - priv->crop_rect.left = start_x; - priv->crop_rect.top = start_y; - } - - if (!ret) - ret = reg_write16(client, REG_WINDOW_WIDTH_HIGH, width); - if (!ret) - ret = reg_write16(client, REG_WINDOW_HEIGHT_HIGH, height); - if (ret) - return ret; - priv->crop_rect.width = width; - priv->crop_rect.height = height; - - /* Set the output window size. Only 1:1 scale is supported so far. */ - ret = reg_write16(client, REG_OUT_WIDTH_HIGH, width); - if (!ret) - ret = reg_write16(client, REG_OUT_HEIGHT_HIGH, height); - - /* Total width = output size + blanking */ - if (!ret) - ret = reg_write16(client, REG_OUT_TOTAL_WIDTH_HIGH, total_width); - if (!ret) - ret = reg_write16(client, REG_OUT_TOTAL_HEIGHT_HIGH, total_height); - - /* Sets the window for AWB calculations */ - if (!ret) - ret = reg_write16(client, REG_AVG_WINDOW_END_X_HIGH, width); - if (!ret) - ret = reg_write16(client, REG_AVG_WINDOW_END_Y_HIGH, height); - - return ret; -} - -static int ov5642_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov5642 *priv = to_ov5642(client); - const struct ov5642_datafmt *fmt = ov5642_find_datafmt(mf->code); - - if (format->pad) - return -EINVAL; - - mf->width = priv->crop_rect.width; - mf->height = priv->crop_rect.height; - - if (!fmt) { - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - mf->code = ov5642_colour_fmts[0].code; - mf->colorspace = ov5642_colour_fmts[0].colorspace; - } - - mf->field = V4L2_FIELD_NONE; - - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) - priv->fmt = fmt; - else - cfg->try_fmt = *mf; - return 0; -} - -static int ov5642_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov5642 *priv = to_ov5642(client); - - const struct ov5642_datafmt *fmt = priv->fmt; - - if (format->pad) - return -EINVAL; - - mf->code = fmt->code; - mf->colorspace = fmt->colorspace; - mf->width = priv->crop_rect.width; - mf->height = priv->crop_rect.height; - mf->field = V4L2_FIELD_NONE; - - return 0; -} - -static int ov5642_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->pad || code->index >= ARRAY_SIZE(ov5642_colour_fmts)) - return -EINVAL; - - code->code = ov5642_colour_fmts[code->index].code; - return 0; -} - -static int ov5642_set_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov5642 *priv = to_ov5642(client); - struct v4l2_rect rect = sel->r; - int ret; - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || - sel->target != V4L2_SEL_TGT_CROP) - return -EINVAL; - - v4l_bound_align_image(&rect.width, 48, OV5642_MAX_WIDTH, 1, - &rect.height, 32, OV5642_MAX_HEIGHT, 1, 0); - - priv->crop_rect.width = rect.width; - priv->crop_rect.height = rect.height; - priv->total_width = rect.width + BLANKING_EXTRA_WIDTH; - priv->total_height = max_t(int, rect.height + - BLANKING_EXTRA_HEIGHT, - BLANKING_MIN_HEIGHT); - priv->crop_rect.width = rect.width; - priv->crop_rect.height = rect.height; - - ret = ov5642_write_array(client, ov5642_default_regs_init); - if (!ret) - ret = ov5642_set_resolution(sd); - if (!ret) - ret = ov5642_write_array(client, ov5642_default_regs_finalise); - - return ret; -} - -static int ov5642_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov5642 *priv = to_ov5642(client); - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - sel->r.left = 0; - sel->r.top = 0; - sel->r.width = OV5642_MAX_WIDTH; - sel->r.height = OV5642_MAX_HEIGHT; - return 0; - case V4L2_SEL_TGT_CROP: - sel->r = priv->crop_rect; - return 0; - default: - return -EINVAL; - } -} - -static int ov5642_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - cfg->type = V4L2_MBUS_CSI2_DPHY; - cfg->flags = V4L2_MBUS_CSI2_2_LANE | V4L2_MBUS_CSI2_CHANNEL_0 | - V4L2_MBUS_CSI2_CONTINUOUS_CLOCK; - - return 0; -} - -static int ov5642_s_power(struct v4l2_subdev *sd, int on) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct ov5642 *priv = to_ov5642(client); - int ret; - - if (!on) - return soc_camera_power_off(&client->dev, ssdd, priv->clk); - - ret = soc_camera_power_on(&client->dev, ssdd, priv->clk); - if (ret < 0) - return ret; - - ret = ov5642_write_array(client, ov5642_default_regs_init); - if (!ret) - ret = ov5642_set_resolution(sd); - if (!ret) - ret = ov5642_write_array(client, ov5642_default_regs_finalise); - - return ret; -} - -static const struct v4l2_subdev_video_ops ov5642_subdev_video_ops = { - .g_mbus_config = ov5642_g_mbus_config, -}; - -static const struct v4l2_subdev_pad_ops ov5642_subdev_pad_ops = { - .enum_mbus_code = ov5642_enum_mbus_code, - .get_selection = ov5642_get_selection, - .set_selection = ov5642_set_selection, - .get_fmt = ov5642_get_fmt, - .set_fmt = ov5642_set_fmt, -}; - -static const struct v4l2_subdev_core_ops ov5642_subdev_core_ops = { - .s_power = ov5642_s_power, -#ifdef CONFIG_VIDEO_ADV_DEBUG - .g_register = ov5642_get_register, - .s_register = ov5642_set_register, -#endif -}; - -static const struct v4l2_subdev_ops ov5642_subdev_ops = { - .core = &ov5642_subdev_core_ops, - .video = &ov5642_subdev_video_ops, - .pad = &ov5642_subdev_pad_ops, -}; - -static int ov5642_video_probe(struct i2c_client *client) -{ - struct v4l2_subdev *subdev = i2c_get_clientdata(client); - int ret; - u8 id_high, id_low; - u16 id; - - ret = ov5642_s_power(subdev, 1); - if (ret < 0) - return ret; - - /* Read sensor Model ID */ - ret = reg_read(client, REG_CHIP_ID_HIGH, &id_high); - if (ret < 0) - goto done; - - id = id_high << 8; - - ret = reg_read(client, REG_CHIP_ID_LOW, &id_low); - if (ret < 0) - goto done; - - id |= id_low; - - dev_info(&client->dev, "Chip ID 0x%04x detected\n", id); - - if (id != 0x5642) { - ret = -ENODEV; - goto done; - } - - ret = 0; - -done: - ov5642_s_power(subdev, 0); - return ret; -} - -static int ov5642_probe(struct i2c_client *client, - const struct i2c_device_id *did) -{ - struct ov5642 *priv; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - int ret; - - if (!ssdd) { - dev_err(&client->dev, "OV5642: missing platform data!\n"); - return -EINVAL; - } - - priv = devm_kzalloc(&client->dev, sizeof(struct ov5642), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - v4l2_i2c_subdev_init(&priv->subdev, client, &ov5642_subdev_ops); - - priv->fmt = &ov5642_colour_fmts[0]; - - priv->crop_rect.width = OV5642_DEFAULT_WIDTH; - priv->crop_rect.height = OV5642_DEFAULT_HEIGHT; - priv->crop_rect.left = (OV5642_MAX_WIDTH - OV5642_DEFAULT_WIDTH) / 2; - priv->crop_rect.top = (OV5642_MAX_HEIGHT - OV5642_DEFAULT_HEIGHT) / 2; - priv->total_width = OV5642_DEFAULT_WIDTH + BLANKING_EXTRA_WIDTH; - priv->total_height = BLANKING_MIN_HEIGHT; - - priv->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - - ret = ov5642_video_probe(client); - if (ret < 0) - v4l2_clk_put(priv->clk); - - return ret; -} - -static int ov5642_remove(struct i2c_client *client) -{ - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct ov5642 *priv = to_ov5642(client); - - v4l2_clk_put(priv->clk); - if (ssdd->free_bus) - ssdd->free_bus(ssdd); - - return 0; -} - -static const struct i2c_device_id ov5642_id[] = { - { "ov5642", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, ov5642_id); - -#if IS_ENABLED(CONFIG_OF) -static const struct of_device_id ov5642_of_match[] = { - { .compatible = "ovti,ov5642" }, - { }, -}; -MODULE_DEVICE_TABLE(of, ov5642_of_match); -#endif - -static struct i2c_driver ov5642_i2c_driver = { - .driver = { - .name = "ov5642", - .of_match_table = of_match_ptr(ov5642_of_match), - }, - .probe = ov5642_probe, - .remove = ov5642_remove, - .id_table = ov5642_id, -}; - -module_i2c_driver(ov5642_i2c_driver); - -MODULE_DESCRIPTION("Omnivision OV5642 Camera driver"); -MODULE_AUTHOR("Bastian Hecht "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/media/soc_camera/soc_ov9740.c b/drivers/staging/media/soc_camera/soc_ov9740.c deleted file mode 100644 index 7c765595d85f..000000000000 --- a/drivers/staging/media/soc_camera/soc_ov9740.c +++ /dev/null @@ -1,992 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * OmniVision OV9740 Camera Driver - * - * Copyright (C) 2011 NVIDIA Corporation - * - * Based on ov9640 camera driver. - */ -#include -#include -#include -#include -#include - -#include -#include -#include - -#define to_ov9740(sd) container_of(sd, struct ov9740_priv, subdev) - -/* General Status Registers */ -#define OV9740_MODEL_ID_HI 0x0000 -#define OV9740_MODEL_ID_LO 0x0001 -#define OV9740_REVISION_NUMBER 0x0002 -#define OV9740_MANUFACTURER_ID 0x0003 -#define OV9740_SMIA_VERSION 0x0004 - -/* General Setup Registers */ -#define OV9740_MODE_SELECT 0x0100 -#define OV9740_IMAGE_ORT 0x0101 -#define OV9740_SOFTWARE_RESET 0x0103 -#define OV9740_GRP_PARAM_HOLD 0x0104 -#define OV9740_MSK_CORRUP_FM 0x0105 - -/* Timing Setting */ -#define OV9740_FRM_LENGTH_LN_HI 0x0340 /* VTS */ -#define OV9740_FRM_LENGTH_LN_LO 0x0341 /* VTS */ -#define OV9740_LN_LENGTH_PCK_HI 0x0342 /* HTS */ -#define OV9740_LN_LENGTH_PCK_LO 0x0343 /* HTS */ -#define OV9740_X_ADDR_START_HI 0x0344 -#define OV9740_X_ADDR_START_LO 0x0345 -#define OV9740_Y_ADDR_START_HI 0x0346 -#define OV9740_Y_ADDR_START_LO 0x0347 -#define OV9740_X_ADDR_END_HI 0x0348 -#define OV9740_X_ADDR_END_LO 0x0349 -#define OV9740_Y_ADDR_END_HI 0x034a -#define OV9740_Y_ADDR_END_LO 0x034b -#define OV9740_X_OUTPUT_SIZE_HI 0x034c -#define OV9740_X_OUTPUT_SIZE_LO 0x034d -#define OV9740_Y_OUTPUT_SIZE_HI 0x034e -#define OV9740_Y_OUTPUT_SIZE_LO 0x034f - -/* IO Control Registers */ -#define OV9740_IO_CREL00 0x3002 -#define OV9740_IO_CREL01 0x3004 -#define OV9740_IO_CREL02 0x3005 -#define OV9740_IO_OUTPUT_SEL01 0x3026 -#define OV9740_IO_OUTPUT_SEL02 0x3027 - -/* AWB Registers */ -#define OV9740_AWB_MANUAL_CTRL 0x3406 - -/* Analog Control Registers */ -#define OV9740_ANALOG_CTRL01 0x3601 -#define OV9740_ANALOG_CTRL02 0x3602 -#define OV9740_ANALOG_CTRL03 0x3603 -#define OV9740_ANALOG_CTRL04 0x3604 -#define OV9740_ANALOG_CTRL10 0x3610 -#define OV9740_ANALOG_CTRL12 0x3612 -#define OV9740_ANALOG_CTRL15 0x3615 -#define OV9740_ANALOG_CTRL20 0x3620 -#define OV9740_ANALOG_CTRL21 0x3621 -#define OV9740_ANALOG_CTRL22 0x3622 -#define OV9740_ANALOG_CTRL30 0x3630 -#define OV9740_ANALOG_CTRL31 0x3631 -#define OV9740_ANALOG_CTRL32 0x3632 -#define OV9740_ANALOG_CTRL33 0x3633 - -/* Sensor Control */ -#define OV9740_SENSOR_CTRL03 0x3703 -#define OV9740_SENSOR_CTRL04 0x3704 -#define OV9740_SENSOR_CTRL05 0x3705 -#define OV9740_SENSOR_CTRL07 0x3707 - -/* Timing Control */ -#define OV9740_TIMING_CTRL17 0x3817 -#define OV9740_TIMING_CTRL19 0x3819 -#define OV9740_TIMING_CTRL33 0x3833 -#define OV9740_TIMING_CTRL35 0x3835 - -/* Banding Filter */ -#define OV9740_AEC_MAXEXPO_60_H 0x3a02 -#define OV9740_AEC_MAXEXPO_60_L 0x3a03 -#define OV9740_AEC_B50_STEP_HI 0x3a08 -#define OV9740_AEC_B50_STEP_LO 0x3a09 -#define OV9740_AEC_B60_STEP_HI 0x3a0a -#define OV9740_AEC_B60_STEP_LO 0x3a0b -#define OV9740_AEC_CTRL0D 0x3a0d -#define OV9740_AEC_CTRL0E 0x3a0e -#define OV9740_AEC_MAXEXPO_50_H 0x3a14 -#define OV9740_AEC_MAXEXPO_50_L 0x3a15 - -/* AEC/AGC Control */ -#define OV9740_AEC_ENABLE 0x3503 -#define OV9740_GAIN_CEILING_01 0x3a18 -#define OV9740_GAIN_CEILING_02 0x3a19 -#define OV9740_AEC_HI_THRESHOLD 0x3a11 -#define OV9740_AEC_3A1A 0x3a1a -#define OV9740_AEC_CTRL1B_WPT2 0x3a1b -#define OV9740_AEC_CTRL0F_WPT 0x3a0f -#define OV9740_AEC_CTRL10_BPT 0x3a10 -#define OV9740_AEC_CTRL1E_BPT2 0x3a1e -#define OV9740_AEC_LO_THRESHOLD 0x3a1f - -/* BLC Control */ -#define OV9740_BLC_AUTO_ENABLE 0x4002 -#define OV9740_BLC_MODE 0x4005 - -/* VFIFO */ -#define OV9740_VFIFO_READ_START_HI 0x4608 -#define OV9740_VFIFO_READ_START_LO 0x4609 - -/* DVP Control */ -#define OV9740_DVP_VSYNC_CTRL02 0x4702 -#define OV9740_DVP_VSYNC_MODE 0x4704 -#define OV9740_DVP_VSYNC_CTRL06 0x4706 - -/* PLL Setting */ -#define OV9740_PLL_MODE_CTRL01 0x3104 -#define OV9740_PRE_PLL_CLK_DIV 0x0305 -#define OV9740_PLL_MULTIPLIER 0x0307 -#define OV9740_VT_SYS_CLK_DIV 0x0303 -#define OV9740_VT_PIX_CLK_DIV 0x0301 -#define OV9740_PLL_CTRL3010 0x3010 -#define OV9740_VFIFO_CTRL00 0x460e - -/* ISP Control */ -#define OV9740_ISP_CTRL00 0x5000 -#define OV9740_ISP_CTRL01 0x5001 -#define OV9740_ISP_CTRL03 0x5003 -#define OV9740_ISP_CTRL05 0x5005 -#define OV9740_ISP_CTRL12 0x5012 -#define OV9740_ISP_CTRL19 0x5019 -#define OV9740_ISP_CTRL1A 0x501a -#define OV9740_ISP_CTRL1E 0x501e -#define OV9740_ISP_CTRL1F 0x501f -#define OV9740_ISP_CTRL20 0x5020 -#define OV9740_ISP_CTRL21 0x5021 - -/* AWB */ -#define OV9740_AWB_CTRL00 0x5180 -#define OV9740_AWB_CTRL01 0x5181 -#define OV9740_AWB_CTRL02 0x5182 -#define OV9740_AWB_CTRL03 0x5183 -#define OV9740_AWB_ADV_CTRL01 0x5184 -#define OV9740_AWB_ADV_CTRL02 0x5185 -#define OV9740_AWB_ADV_CTRL03 0x5186 -#define OV9740_AWB_ADV_CTRL04 0x5187 -#define OV9740_AWB_ADV_CTRL05 0x5188 -#define OV9740_AWB_ADV_CTRL06 0x5189 -#define OV9740_AWB_ADV_CTRL07 0x518a -#define OV9740_AWB_ADV_CTRL08 0x518b -#define OV9740_AWB_ADV_CTRL09 0x518c -#define OV9740_AWB_ADV_CTRL10 0x518d -#define OV9740_AWB_ADV_CTRL11 0x518e -#define OV9740_AWB_CTRL0F 0x518f -#define OV9740_AWB_CTRL10 0x5190 -#define OV9740_AWB_CTRL11 0x5191 -#define OV9740_AWB_CTRL12 0x5192 -#define OV9740_AWB_CTRL13 0x5193 -#define OV9740_AWB_CTRL14 0x5194 - -/* MIPI Control */ -#define OV9740_MIPI_CTRL00 0x4800 -#define OV9740_MIPI_3837 0x3837 -#define OV9740_MIPI_CTRL01 0x4801 -#define OV9740_MIPI_CTRL03 0x4803 -#define OV9740_MIPI_CTRL05 0x4805 -#define OV9740_VFIFO_RD_CTRL 0x4601 -#define OV9740_MIPI_CTRL_3012 0x3012 -#define OV9740_SC_CMMM_MIPI_CTR 0x3014 - -#define OV9740_MAX_WIDTH 1280 -#define OV9740_MAX_HEIGHT 720 - -/* Misc. structures */ -struct ov9740_reg { - u16 reg; - u8 val; -}; - -struct ov9740_priv { - struct v4l2_subdev subdev; - struct v4l2_ctrl_handler hdl; - struct v4l2_clk *clk; - - u16 model; - u8 revision; - u8 manid; - u8 smiaver; - - bool flag_vflip; - bool flag_hflip; - - /* For suspend/resume. */ - struct v4l2_mbus_framefmt current_mf; - bool current_enable; -}; - -static const struct ov9740_reg ov9740_defaults[] = { - /* Software Reset */ - { OV9740_SOFTWARE_RESET, 0x01 }, - - /* Banding Filter */ - { OV9740_AEC_B50_STEP_HI, 0x00 }, - { OV9740_AEC_B50_STEP_LO, 0xe8 }, - { OV9740_AEC_CTRL0E, 0x03 }, - { OV9740_AEC_MAXEXPO_50_H, 0x15 }, - { OV9740_AEC_MAXEXPO_50_L, 0xc6 }, - { OV9740_AEC_B60_STEP_HI, 0x00 }, - { OV9740_AEC_B60_STEP_LO, 0xc0 }, - { OV9740_AEC_CTRL0D, 0x04 }, - { OV9740_AEC_MAXEXPO_60_H, 0x18 }, - { OV9740_AEC_MAXEXPO_60_L, 0x20 }, - - /* LC */ - { 0x5842, 0x02 }, { 0x5843, 0x5e }, { 0x5844, 0x04 }, { 0x5845, 0x32 }, - { 0x5846, 0x03 }, { 0x5847, 0x29 }, { 0x5848, 0x02 }, { 0x5849, 0xcc }, - - /* Un-documented OV9740 registers */ - { 0x5800, 0x29 }, { 0x5801, 0x25 }, { 0x5802, 0x20 }, { 0x5803, 0x21 }, - { 0x5804, 0x26 }, { 0x5805, 0x2e }, { 0x5806, 0x11 }, { 0x5807, 0x0c }, - { 0x5808, 0x09 }, { 0x5809, 0x0a }, { 0x580a, 0x0e }, { 0x580b, 0x16 }, - { 0x580c, 0x06 }, { 0x580d, 0x02 }, { 0x580e, 0x00 }, { 0x580f, 0x00 }, - { 0x5810, 0x04 }, { 0x5811, 0x0a }, { 0x5812, 0x05 }, { 0x5813, 0x02 }, - { 0x5814, 0x00 }, { 0x5815, 0x00 }, { 0x5816, 0x03 }, { 0x5817, 0x09 }, - { 0x5818, 0x0f }, { 0x5819, 0x0a }, { 0x581a, 0x07 }, { 0x581b, 0x08 }, - { 0x581c, 0x0b }, { 0x581d, 0x14 }, { 0x581e, 0x28 }, { 0x581f, 0x23 }, - { 0x5820, 0x1d }, { 0x5821, 0x1e }, { 0x5822, 0x24 }, { 0x5823, 0x2a }, - { 0x5824, 0x4f }, { 0x5825, 0x6f }, { 0x5826, 0x5f }, { 0x5827, 0x7f }, - { 0x5828, 0x9f }, { 0x5829, 0x5f }, { 0x582a, 0x8f }, { 0x582b, 0x9e }, - { 0x582c, 0x8f }, { 0x582d, 0x9f }, { 0x582e, 0x4f }, { 0x582f, 0x87 }, - { 0x5830, 0x86 }, { 0x5831, 0x97 }, { 0x5832, 0xae }, { 0x5833, 0x3f }, - { 0x5834, 0x8e }, { 0x5835, 0x7c }, { 0x5836, 0x7e }, { 0x5837, 0xaf }, - { 0x5838, 0x8f }, { 0x5839, 0x8f }, { 0x583a, 0x9f }, { 0x583b, 0x7f }, - { 0x583c, 0x5f }, - - /* Y Gamma */ - { 0x5480, 0x07 }, { 0x5481, 0x18 }, { 0x5482, 0x2c }, { 0x5483, 0x4e }, - { 0x5484, 0x5e }, { 0x5485, 0x6b }, { 0x5486, 0x77 }, { 0x5487, 0x82 }, - { 0x5488, 0x8c }, { 0x5489, 0x95 }, { 0x548a, 0xa4 }, { 0x548b, 0xb1 }, - { 0x548c, 0xc6 }, { 0x548d, 0xd8 }, { 0x548e, 0xe9 }, - - /* UV Gamma */ - { 0x5490, 0x0f }, { 0x5491, 0xff }, { 0x5492, 0x0d }, { 0x5493, 0x05 }, - { 0x5494, 0x07 }, { 0x5495, 0x1a }, { 0x5496, 0x04 }, { 0x5497, 0x01 }, - { 0x5498, 0x03 }, { 0x5499, 0x53 }, { 0x549a, 0x02 }, { 0x549b, 0xeb }, - { 0x549c, 0x02 }, { 0x549d, 0xa0 }, { 0x549e, 0x02 }, { 0x549f, 0x67 }, - { 0x54a0, 0x02 }, { 0x54a1, 0x3b }, { 0x54a2, 0x02 }, { 0x54a3, 0x18 }, - { 0x54a4, 0x01 }, { 0x54a5, 0xe7 }, { 0x54a6, 0x01 }, { 0x54a7, 0xc3 }, - { 0x54a8, 0x01 }, { 0x54a9, 0x94 }, { 0x54aa, 0x01 }, { 0x54ab, 0x72 }, - { 0x54ac, 0x01 }, { 0x54ad, 0x57 }, - - /* AWB */ - { OV9740_AWB_CTRL00, 0xf0 }, - { OV9740_AWB_CTRL01, 0x00 }, - { OV9740_AWB_CTRL02, 0x41 }, - { OV9740_AWB_CTRL03, 0x42 }, - { OV9740_AWB_ADV_CTRL01, 0x8a }, - { OV9740_AWB_ADV_CTRL02, 0x61 }, - { OV9740_AWB_ADV_CTRL03, 0xce }, - { OV9740_AWB_ADV_CTRL04, 0xa8 }, - { OV9740_AWB_ADV_CTRL05, 0x17 }, - { OV9740_AWB_ADV_CTRL06, 0x1f }, - { OV9740_AWB_ADV_CTRL07, 0x27 }, - { OV9740_AWB_ADV_CTRL08, 0x41 }, - { OV9740_AWB_ADV_CTRL09, 0x34 }, - { OV9740_AWB_ADV_CTRL10, 0xf0 }, - { OV9740_AWB_ADV_CTRL11, 0x10 }, - { OV9740_AWB_CTRL0F, 0xff }, - { OV9740_AWB_CTRL10, 0x00 }, - { OV9740_AWB_CTRL11, 0xff }, - { OV9740_AWB_CTRL12, 0x00 }, - { OV9740_AWB_CTRL13, 0xff }, - { OV9740_AWB_CTRL14, 0x00 }, - - /* CIP */ - { 0x530d, 0x12 }, - - /* CMX */ - { 0x5380, 0x01 }, { 0x5381, 0x00 }, { 0x5382, 0x00 }, { 0x5383, 0x17 }, - { 0x5384, 0x00 }, { 0x5385, 0x01 }, { 0x5386, 0x00 }, { 0x5387, 0x00 }, - { 0x5388, 0x00 }, { 0x5389, 0xe0 }, { 0x538a, 0x00 }, { 0x538b, 0x20 }, - { 0x538c, 0x00 }, { 0x538d, 0x00 }, { 0x538e, 0x00 }, { 0x538f, 0x16 }, - { 0x5390, 0x00 }, { 0x5391, 0x9c }, { 0x5392, 0x00 }, { 0x5393, 0xa0 }, - { 0x5394, 0x18 }, - - /* 50/60 Detection */ - { 0x3c0a, 0x9c }, { 0x3c0b, 0x3f }, - - /* Output Select */ - { OV9740_IO_OUTPUT_SEL01, 0x00 }, - { OV9740_IO_OUTPUT_SEL02, 0x00 }, - { OV9740_IO_CREL00, 0x00 }, - { OV9740_IO_CREL01, 0x00 }, - { OV9740_IO_CREL02, 0x00 }, - - /* AWB Control */ - { OV9740_AWB_MANUAL_CTRL, 0x00 }, - - /* Analog Control */ - { OV9740_ANALOG_CTRL03, 0xaa }, - { OV9740_ANALOG_CTRL32, 0x2f }, - { OV9740_ANALOG_CTRL20, 0x66 }, - { OV9740_ANALOG_CTRL21, 0xc0 }, - { OV9740_ANALOG_CTRL31, 0x52 }, - { OV9740_ANALOG_CTRL33, 0x50 }, - { OV9740_ANALOG_CTRL30, 0xca }, - { OV9740_ANALOG_CTRL04, 0x0c }, - { OV9740_ANALOG_CTRL01, 0x40 }, - { OV9740_ANALOG_CTRL02, 0x16 }, - { OV9740_ANALOG_CTRL10, 0xa1 }, - { OV9740_ANALOG_CTRL12, 0x24 }, - { OV9740_ANALOG_CTRL22, 0x9f }, - { OV9740_ANALOG_CTRL15, 0xf0 }, - - /* Sensor Control */ - { OV9740_SENSOR_CTRL03, 0x42 }, - { OV9740_SENSOR_CTRL04, 0x10 }, - { OV9740_SENSOR_CTRL05, 0x45 }, - { OV9740_SENSOR_CTRL07, 0x14 }, - - /* Timing Control */ - { OV9740_TIMING_CTRL33, 0x04 }, - { OV9740_TIMING_CTRL35, 0x02 }, - { OV9740_TIMING_CTRL19, 0x6e }, - { OV9740_TIMING_CTRL17, 0x94 }, - - /* AEC/AGC Control */ - { OV9740_AEC_ENABLE, 0x10 }, - { OV9740_GAIN_CEILING_01, 0x00 }, - { OV9740_GAIN_CEILING_02, 0x7f }, - { OV9740_AEC_HI_THRESHOLD, 0xa0 }, - { OV9740_AEC_3A1A, 0x05 }, - { OV9740_AEC_CTRL1B_WPT2, 0x50 }, - { OV9740_AEC_CTRL0F_WPT, 0x50 }, - { OV9740_AEC_CTRL10_BPT, 0x4c }, - { OV9740_AEC_CTRL1E_BPT2, 0x4c }, - { OV9740_AEC_LO_THRESHOLD, 0x26 }, - - /* BLC Control */ - { OV9740_BLC_AUTO_ENABLE, 0x45 }, - { OV9740_BLC_MODE, 0x18 }, - - /* DVP Control */ - { OV9740_DVP_VSYNC_CTRL02, 0x04 }, - { OV9740_DVP_VSYNC_MODE, 0x00 }, - { OV9740_DVP_VSYNC_CTRL06, 0x08 }, - - /* PLL Setting */ - { OV9740_PLL_MODE_CTRL01, 0x20 }, - { OV9740_PRE_PLL_CLK_DIV, 0x03 }, - { OV9740_PLL_MULTIPLIER, 0x4c }, - { OV9740_VT_SYS_CLK_DIV, 0x01 }, - { OV9740_VT_PIX_CLK_DIV, 0x08 }, - { OV9740_PLL_CTRL3010, 0x01 }, - { OV9740_VFIFO_CTRL00, 0x82 }, - - /* Timing Setting */ - /* VTS */ - { OV9740_FRM_LENGTH_LN_HI, 0x03 }, - { OV9740_FRM_LENGTH_LN_LO, 0x07 }, - /* HTS */ - { OV9740_LN_LENGTH_PCK_HI, 0x06 }, - { OV9740_LN_LENGTH_PCK_LO, 0x62 }, - - /* MIPI Control */ - { OV9740_MIPI_CTRL00, 0x44 }, /* 0x64 for discontinuous clk */ - { OV9740_MIPI_3837, 0x01 }, - { OV9740_MIPI_CTRL01, 0x0f }, - { OV9740_MIPI_CTRL03, 0x05 }, - { OV9740_MIPI_CTRL05, 0x10 }, - { OV9740_VFIFO_RD_CTRL, 0x16 }, - { OV9740_MIPI_CTRL_3012, 0x70 }, - { OV9740_SC_CMMM_MIPI_CTR, 0x01 }, - - /* YUYV order */ - { OV9740_ISP_CTRL19, 0x02 }, -}; - -static u32 ov9740_codes[] = { - MEDIA_BUS_FMT_YUYV8_2X8, -}; - -/* read a register */ -static int ov9740_reg_read(struct i2c_client *client, u16 reg, u8 *val) -{ - int ret; - struct i2c_msg msg[] = { - { - .addr = client->addr, - .flags = 0, - .len = 2, - .buf = (u8 *)®, - }, - { - .addr = client->addr, - .flags = I2C_M_RD, - .len = 1, - .buf = val, - }, - }; - - reg = swab16(reg); - - ret = i2c_transfer(client->adapter, msg, 2); - if (ret < 0) { - dev_err(&client->dev, "Failed reading register 0x%04x!\n", reg); - return ret; - } - - return 0; -} - -/* write a register */ -static int ov9740_reg_write(struct i2c_client *client, u16 reg, u8 val) -{ - struct i2c_msg msg; - struct { - u16 reg; - u8 val; - } __packed buf; - int ret; - - reg = swab16(reg); - - buf.reg = reg; - buf.val = val; - - msg.addr = client->addr; - msg.flags = 0; - msg.len = 3; - msg.buf = (u8 *)&buf; - - ret = i2c_transfer(client->adapter, &msg, 1); - if (ret < 0) { - dev_err(&client->dev, "Failed writing register 0x%04x!\n", reg); - return ret; - } - - return 0; -} - - -/* Read a register, alter its bits, write it back */ -static int ov9740_reg_rmw(struct i2c_client *client, u16 reg, u8 set, u8 unset) -{ - u8 val; - int ret; - - ret = ov9740_reg_read(client, reg, &val); - if (ret < 0) { - dev_err(&client->dev, - "[Read]-Modify-Write of register 0x%04x failed!\n", - reg); - return ret; - } - - val |= set; - val &= ~unset; - - ret = ov9740_reg_write(client, reg, val); - if (ret < 0) { - dev_err(&client->dev, - "Read-Modify-[Write] of register 0x%04x failed!\n", - reg); - return ret; - } - - return 0; -} - -static int ov9740_reg_write_array(struct i2c_client *client, - const struct ov9740_reg *regarray, - int regarraylen) -{ - int i; - int ret; - - for (i = 0; i < regarraylen; i++) { - ret = ov9740_reg_write(client, - regarray[i].reg, regarray[i].val); - if (ret < 0) - return ret; - } - - return 0; -} - -/* Start/Stop streaming from the device */ -static int ov9740_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov9740_priv *priv = to_ov9740(sd); - int ret; - - /* Program orientation register. */ - if (priv->flag_vflip) - ret = ov9740_reg_rmw(client, OV9740_IMAGE_ORT, 0x2, 0); - else - ret = ov9740_reg_rmw(client, OV9740_IMAGE_ORT, 0, 0x2); - if (ret < 0) - return ret; - - if (priv->flag_hflip) - ret = ov9740_reg_rmw(client, OV9740_IMAGE_ORT, 0x1, 0); - else - ret = ov9740_reg_rmw(client, OV9740_IMAGE_ORT, 0, 0x1); - if (ret < 0) - return ret; - - if (enable) { - dev_dbg(&client->dev, "Enabling Streaming\n"); - /* Start Streaming */ - ret = ov9740_reg_write(client, OV9740_MODE_SELECT, 0x01); - - } else { - dev_dbg(&client->dev, "Disabling Streaming\n"); - /* Software Reset */ - ret = ov9740_reg_write(client, OV9740_SOFTWARE_RESET, 0x01); - if (!ret) - /* Setting Streaming to Standby */ - ret = ov9740_reg_write(client, OV9740_MODE_SELECT, - 0x00); - } - - priv->current_enable = enable; - - return ret; -} - -/* select nearest higher resolution for capture */ -static void ov9740_res_roundup(u32 *width, u32 *height) -{ - /* Width must be a multiple of 4 pixels. */ - *width = ALIGN(*width, 4); - - /* Max resolution is 1280x720 (720p). */ - if (*width > OV9740_MAX_WIDTH) - *width = OV9740_MAX_WIDTH; - - if (*height > OV9740_MAX_HEIGHT) - *height = OV9740_MAX_HEIGHT; -} - -/* Setup registers according to resolution and color encoding */ -static int ov9740_set_res(struct i2c_client *client, u32 width, u32 height) -{ - u32 x_start; - u32 y_start; - u32 x_end; - u32 y_end; - bool scaling = false; - u32 scale_input_x; - u32 scale_input_y; - int ret; - - if ((width != OV9740_MAX_WIDTH) || (height != OV9740_MAX_HEIGHT)) - scaling = true; - - /* - * Try to use as much of the sensor area as possible when supporting - * smaller resolutions. Depending on the aspect ratio of the - * chosen resolution, we can either use the full width of the sensor, - * or the full height of the sensor (or both if the aspect ratio is - * the same as 1280x720. - */ - if ((OV9740_MAX_WIDTH * height) > (OV9740_MAX_HEIGHT * width)) { - scale_input_x = (OV9740_MAX_HEIGHT * width) / height; - scale_input_y = OV9740_MAX_HEIGHT; - } else { - scale_input_x = OV9740_MAX_WIDTH; - scale_input_y = (OV9740_MAX_WIDTH * height) / width; - } - - /* These describe the area of the sensor to use. */ - x_start = (OV9740_MAX_WIDTH - scale_input_x) / 2; - y_start = (OV9740_MAX_HEIGHT - scale_input_y) / 2; - x_end = x_start + scale_input_x - 1; - y_end = y_start + scale_input_y - 1; - - ret = ov9740_reg_write(client, OV9740_X_ADDR_START_HI, x_start >> 8); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_X_ADDR_START_LO, x_start & 0xff); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_Y_ADDR_START_HI, y_start >> 8); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_Y_ADDR_START_LO, y_start & 0xff); - if (ret) - goto done; - - ret = ov9740_reg_write(client, OV9740_X_ADDR_END_HI, x_end >> 8); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_X_ADDR_END_LO, x_end & 0xff); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_Y_ADDR_END_HI, y_end >> 8); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_Y_ADDR_END_LO, y_end & 0xff); - if (ret) - goto done; - - ret = ov9740_reg_write(client, OV9740_X_OUTPUT_SIZE_HI, width >> 8); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_X_OUTPUT_SIZE_LO, width & 0xff); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_Y_OUTPUT_SIZE_HI, height >> 8); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_Y_OUTPUT_SIZE_LO, height & 0xff); - if (ret) - goto done; - - ret = ov9740_reg_write(client, OV9740_ISP_CTRL1E, scale_input_x >> 8); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_ISP_CTRL1F, scale_input_x & 0xff); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_ISP_CTRL20, scale_input_y >> 8); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_ISP_CTRL21, scale_input_y & 0xff); - if (ret) - goto done; - - ret = ov9740_reg_write(client, OV9740_VFIFO_READ_START_HI, - (scale_input_x - width) >> 8); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_VFIFO_READ_START_LO, - (scale_input_x - width) & 0xff); - if (ret) - goto done; - - ret = ov9740_reg_write(client, OV9740_ISP_CTRL00, 0xff); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_ISP_CTRL01, 0xef | - (scaling << 4)); - if (ret) - goto done; - ret = ov9740_reg_write(client, OV9740_ISP_CTRL03, 0xff); - -done: - return ret; -} - -/* set the format we will capture in */ -static int ov9740_s_fmt(struct v4l2_subdev *sd, - struct v4l2_mbus_framefmt *mf) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov9740_priv *priv = to_ov9740(sd); - int ret; - - ret = ov9740_reg_write_array(client, ov9740_defaults, - ARRAY_SIZE(ov9740_defaults)); - if (ret < 0) - return ret; - - ret = ov9740_set_res(client, mf->width, mf->height); - if (ret < 0) - return ret; - - priv->current_mf = *mf; - return ret; -} - -static int ov9740_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - - if (format->pad) - return -EINVAL; - - ov9740_res_roundup(&mf->width, &mf->height); - - mf->field = V4L2_FIELD_NONE; - mf->code = MEDIA_BUS_FMT_YUYV8_2X8; - mf->colorspace = V4L2_COLORSPACE_SRGB; - - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) - return ov9740_s_fmt(sd, mf); - cfg->try_fmt = *mf; - return 0; -} - -static int ov9740_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->pad || code->index >= ARRAY_SIZE(ov9740_codes)) - return -EINVAL; - - code->code = ov9740_codes[code->index]; - - return 0; -} - -static int ov9740_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - case V4L2_SEL_TGT_CROP: - sel->r.left = 0; - sel->r.top = 0; - sel->r.width = OV9740_MAX_WIDTH; - sel->r.height = OV9740_MAX_HEIGHT; - return 0; - default: - return -EINVAL; - } -} - -/* Set status of additional camera capabilities */ -static int ov9740_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct ov9740_priv *priv = - container_of(ctrl->handler, struct ov9740_priv, hdl); - - switch (ctrl->id) { - case V4L2_CID_VFLIP: - priv->flag_vflip = ctrl->val; - break; - case V4L2_CID_HFLIP: - priv->flag_hflip = ctrl->val; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int ov9740_s_power(struct v4l2_subdev *sd, int on) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct ov9740_priv *priv = to_ov9740(sd); - int ret; - - if (on) { - ret = soc_camera_power_on(&client->dev, ssdd, priv->clk); - if (ret < 0) - return ret; - - if (priv->current_enable) { - ov9740_s_fmt(sd, &priv->current_mf); - ov9740_s_stream(sd, 1); - } - } else { - if (priv->current_enable) { - ov9740_s_stream(sd, 0); - priv->current_enable = true; - } - - soc_camera_power_off(&client->dev, ssdd, priv->clk); - } - - return 0; -} - -#ifdef CONFIG_VIDEO_ADV_DEBUG -static int ov9740_get_register(struct v4l2_subdev *sd, - struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - u8 val; - - if (reg->reg & ~0xffff) - return -EINVAL; - - reg->size = 2; - - ret = ov9740_reg_read(client, reg->reg, &val); - if (ret) - return ret; - - reg->val = (__u64)val; - - return ret; -} - -static int ov9740_set_register(struct v4l2_subdev *sd, - const struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg & ~0xffff || reg->val & ~0xff) - return -EINVAL; - - return ov9740_reg_write(client, reg->reg, reg->val); -} -#endif - -static int ov9740_video_probe(struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct ov9740_priv *priv = to_ov9740(sd); - u8 modelhi, modello; - int ret; - - ret = ov9740_s_power(&priv->subdev, 1); - if (ret < 0) - return ret; - - /* - * check and show product ID and manufacturer ID - */ - ret = ov9740_reg_read(client, OV9740_MODEL_ID_HI, &modelhi); - if (ret < 0) - goto done; - - ret = ov9740_reg_read(client, OV9740_MODEL_ID_LO, &modello); - if (ret < 0) - goto done; - - priv->model = (modelhi << 8) | modello; - - ret = ov9740_reg_read(client, OV9740_REVISION_NUMBER, &priv->revision); - if (ret < 0) - goto done; - - ret = ov9740_reg_read(client, OV9740_MANUFACTURER_ID, &priv->manid); - if (ret < 0) - goto done; - - ret = ov9740_reg_read(client, OV9740_SMIA_VERSION, &priv->smiaver); - if (ret < 0) - goto done; - - if (priv->model != 0x9740) { - ret = -ENODEV; - goto done; - } - - dev_info(&client->dev, "ov9740 Model ID 0x%04x, Revision 0x%02x, Manufacturer 0x%02x, SMIA Version 0x%02x\n", - priv->model, priv->revision, priv->manid, priv->smiaver); - - ret = v4l2_ctrl_handler_setup(&priv->hdl); - -done: - ov9740_s_power(&priv->subdev, 0); - return ret; -} - -/* Request bus settings on camera side */ -static int ov9740_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER | - V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH | - V4L2_MBUS_DATA_ACTIVE_HIGH; - cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); - - return 0; -} - -static const struct v4l2_subdev_video_ops ov9740_video_ops = { - .s_stream = ov9740_s_stream, - .g_mbus_config = ov9740_g_mbus_config, -}; - -static const struct v4l2_subdev_core_ops ov9740_core_ops = { - .s_power = ov9740_s_power, -#ifdef CONFIG_VIDEO_ADV_DEBUG - .g_register = ov9740_get_register, - .s_register = ov9740_set_register, -#endif -}; - -static const struct v4l2_subdev_pad_ops ov9740_pad_ops = { - .enum_mbus_code = ov9740_enum_mbus_code, - .get_selection = ov9740_get_selection, - .set_fmt = ov9740_set_fmt, -}; - -static const struct v4l2_subdev_ops ov9740_subdev_ops = { - .core = &ov9740_core_ops, - .video = &ov9740_video_ops, - .pad = &ov9740_pad_ops, -}; - -static const struct v4l2_ctrl_ops ov9740_ctrl_ops = { - .s_ctrl = ov9740_s_ctrl, -}; - -/* - * i2c_driver function - */ -static int ov9740_probe(struct i2c_client *client, - const struct i2c_device_id *did) -{ - struct ov9740_priv *priv; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - int ret; - - if (!ssdd) { - dev_err(&client->dev, "Missing platform_data for driver\n"); - return -EINVAL; - } - - priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - v4l2_i2c_subdev_init(&priv->subdev, client, &ov9740_subdev_ops); - v4l2_ctrl_handler_init(&priv->hdl, 13); - v4l2_ctrl_new_std(&priv->hdl, &ov9740_ctrl_ops, - V4L2_CID_VFLIP, 0, 1, 1, 0); - v4l2_ctrl_new_std(&priv->hdl, &ov9740_ctrl_ops, - V4L2_CID_HFLIP, 0, 1, 1, 0); - priv->subdev.ctrl_handler = &priv->hdl; - if (priv->hdl.error) - return priv->hdl.error; - - priv->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(priv->clk)) { - ret = PTR_ERR(priv->clk); - goto eclkget; - } - - ret = ov9740_video_probe(client); - if (ret < 0) { - v4l2_clk_put(priv->clk); -eclkget: - v4l2_ctrl_handler_free(&priv->hdl); - } - - return ret; -} - -static int ov9740_remove(struct i2c_client *client) -{ - struct ov9740_priv *priv = i2c_get_clientdata(client); - - v4l2_clk_put(priv->clk); - v4l2_device_unregister_subdev(&priv->subdev); - v4l2_ctrl_handler_free(&priv->hdl); - return 0; -} - -static const struct i2c_device_id ov9740_id[] = { - { "ov9740", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, ov9740_id); - -static struct i2c_driver ov9740_i2c_driver = { - .driver = { - .name = "ov9740", - }, - .probe = ov9740_probe, - .remove = ov9740_remove, - .id_table = ov9740_id, -}; - -module_i2c_driver(ov9740_i2c_driver); - -MODULE_DESCRIPTION("SoC Camera driver for OmniVision OV9740"); -MODULE_AUTHOR("Andrew Chew "); -MODULE_LICENSE("GPL v2"); diff --git a/include/media/drv-intf/soc_mediabus.h b/include/media/drv-intf/soc_mediabus.h deleted file mode 100644 index 361f8852c9fc..000000000000 --- a/include/media/drv-intf/soc_mediabus.h +++ /dev/null @@ -1,107 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * SoC-camera Media Bus API extensions - * - * Copyright (C) 2009, Guennadi Liakhovetski - */ - -#ifndef SOC_MEDIABUS_H -#define SOC_MEDIABUS_H - -#include -#include - -/** - * enum soc_mbus_packing - data packing types on the media-bus - * @SOC_MBUS_PACKING_NONE: no packing, bit-for-bit transfer to RAM, one - * sample represents one pixel - * @SOC_MBUS_PACKING_2X8_PADHI: 16 bits transferred in 2 8-bit samples, in the - * possibly incomplete byte high bits are padding - * @SOC_MBUS_PACKING_2X8_PADLO: as above, but low bits are padding - * @SOC_MBUS_PACKING_EXTEND16: sample width (e.g., 10 bits) has to be extended - * to 16 bits - * @SOC_MBUS_PACKING_VARIABLE: compressed formats with variable packing - * @SOC_MBUS_PACKING_1_5X8: used for packed YUV 4:2:0 formats, where 4 - * pixels occupy 6 bytes in RAM - * @SOC_MBUS_PACKING_EXTEND32: sample width (e.g., 24 bits) has to be extended - * to 32 bits - */ -enum soc_mbus_packing { - SOC_MBUS_PACKING_NONE, - SOC_MBUS_PACKING_2X8_PADHI, - SOC_MBUS_PACKING_2X8_PADLO, - SOC_MBUS_PACKING_EXTEND16, - SOC_MBUS_PACKING_VARIABLE, - SOC_MBUS_PACKING_1_5X8, - SOC_MBUS_PACKING_EXTEND32, -}; - -/** - * enum soc_mbus_order - sample order on the media bus - * @SOC_MBUS_ORDER_LE: least significant sample first - * @SOC_MBUS_ORDER_BE: most significant sample first - */ -enum soc_mbus_order { - SOC_MBUS_ORDER_LE, - SOC_MBUS_ORDER_BE, -}; - -/** - * enum soc_mbus_layout - planes layout in memory - * @SOC_MBUS_LAYOUT_PACKED: color components packed - * @SOC_MBUS_LAYOUT_PLANAR_2Y_U_V: YUV components stored in 3 planes (4:2:2) - * @SOC_MBUS_LAYOUT_PLANAR_2Y_C: YUV components stored in a luma and a - * chroma plane (C plane is half the size - * of Y plane) - * @SOC_MBUS_LAYOUT_PLANAR_Y_C: YUV components stored in a luma and a - * chroma plane (C plane is the same size - * as Y plane) - */ -enum soc_mbus_layout { - SOC_MBUS_LAYOUT_PACKED = 0, - SOC_MBUS_LAYOUT_PLANAR_2Y_U_V, - SOC_MBUS_LAYOUT_PLANAR_2Y_C, - SOC_MBUS_LAYOUT_PLANAR_Y_C, -}; - -/** - * struct soc_mbus_pixelfmt - Data format on the media bus - * @fourcc: Fourcc code, that will be obtained if the data is - * stored in memory in the following way: - * @packing: Type of sample-packing, that has to be used - * @order: Sample order when storing in memory - * @bits_per_sample: How many bits the bridge has to sample - */ -struct soc_mbus_pixelfmt { - u32 fourcc; - enum soc_mbus_packing packing; - enum soc_mbus_order order; - enum soc_mbus_layout layout; - u8 bits_per_sample; -}; - -/** - * struct soc_mbus_lookup - Lookup FOURCC IDs by mediabus codes for pass-through - * @code: mediabus pixel-code - * @fmt: pixel format description - */ -struct soc_mbus_lookup { - u32 code; - struct soc_mbus_pixelfmt fmt; -}; - -const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( - u32 code, - const struct soc_mbus_lookup *lookup, - int n); -const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( - u32 code); -s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf); -s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, - u32 bytes_per_line, u32 height); -int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf, - unsigned int *numerator, unsigned int *denominator); -unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg, - unsigned int flags); - -#endif -- cgit v1.2.3 From 19d1d03ed232242c7d6246179c7c2a83ee3e107f Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 26 Jun 2020 13:53:21 +0200 Subject: media: soc_camera.h: remove this unused header The soc_camera driver has been removed and all board files that used it have been fixed. This header can now be removed altogether. Signed-off-by: Hans Verkuil Cc: Arnd Bergmann Signed-off-by: Mauro Carvalho Chehab --- include/media/soc_camera.h | 397 --------------------------------------------- 1 file changed, 397 deletions(-) delete mode 100644 include/media/soc_camera.h (limited to 'include') diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h deleted file mode 100644 index 331c343a5b5a..000000000000 --- a/include/media/soc_camera.h +++ /dev/null @@ -1,397 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * camera image capture (abstract) bus driver header - * - * Copyright (C) 2006, Sascha Hauer, Pengutronix - * Copyright (C) 2008, Guennadi Liakhovetski - */ - -#ifndef SOC_CAMERA_H -#define SOC_CAMERA_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct file; -struct soc_camera_desc; -struct soc_camera_async_client; - -struct soc_camera_device { - struct list_head list; /* list of all registered devices */ - struct soc_camera_desc *sdesc; - struct device *pdev; /* Platform device */ - struct device *parent; /* Camera host device */ - struct device *control; /* E.g., the i2c client */ - s32 user_width; - s32 user_height; - u32 bytesperline; /* for padding, zero if unused */ - u32 sizeimage; - enum v4l2_colorspace colorspace; - unsigned char iface; /* Host number */ - unsigned char devnum; /* Device number per host */ - struct soc_camera_sense *sense; /* See comment in struct definition */ - struct video_device *vdev; - struct v4l2_ctrl_handler ctrl_handler; - const struct soc_camera_format_xlate *current_fmt; - struct soc_camera_format_xlate *user_formats; - int num_user_formats; - enum v4l2_field field; /* Preserve field over close() */ - void *host_priv; /* Per-device host private data */ - /* soc_camera.c private count. Only accessed with .host_lock held */ - int use_count; - struct file *streamer; /* stream owner */ - struct v4l2_clk *clk; - /* Asynchronous subdevice management */ - struct soc_camera_async_client *sasc; - /* video buffer queue */ - struct vb2_queue vb2_vidq; -}; - -/* Host supports programmable stride */ -#define SOCAM_HOST_CAP_STRIDE (1 << 0) - -enum soc_camera_subdev_role { - SOCAM_SUBDEV_DATA_SOURCE = 1, - SOCAM_SUBDEV_DATA_SINK, - SOCAM_SUBDEV_DATA_PROCESSOR, -}; - -struct soc_camera_async_subdev { - struct v4l2_async_subdev asd; - enum soc_camera_subdev_role role; -}; - -struct soc_camera_host { - struct v4l2_device v4l2_dev; - struct list_head list; - struct mutex host_lock; /* Main synchronisation lock */ - struct mutex clk_lock; /* Protect pipeline modifications */ - unsigned char nr; /* Host number */ - u32 capabilities; - struct soc_camera_device *icd; /* Currently attached client */ - void *priv; - const char *drv_name; - struct soc_camera_host_ops *ops; - struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */ - unsigned int *asd_sizes; /* 0-terminated array of asd group sizes */ -}; - -struct soc_camera_host_ops { - struct module *owner; - int (*add)(struct soc_camera_device *); - void (*remove)(struct soc_camera_device *); - int (*clock_start)(struct soc_camera_host *); - void (*clock_stop)(struct soc_camera_host *); - /* - * .get_formats() is called for each client device format, but - * .put_formats() is only called once. Further, if any of the calls to - * .get_formats() fail, .put_formats() will not be called at all, the - * failing .get_formats() must then clean up internally. - */ - int (*get_formats)(struct soc_camera_device *, unsigned int, - struct soc_camera_format_xlate *); - void (*put_formats)(struct soc_camera_device *); - int (*get_selection)(struct soc_camera_device *, struct v4l2_selection *); - int (*set_selection)(struct soc_camera_device *, struct v4l2_selection *); - /* - * The difference to .set_selection() is, that .set_liveselection is not allowed - * to change the output sizes - */ - int (*set_liveselection)(struct soc_camera_device *, struct v4l2_selection *); - int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *); - int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *); - int (*init_videobuf2)(struct vb2_queue *, - struct soc_camera_device *); - int (*querycap)(struct soc_camera_host *, struct v4l2_capability *); - int (*set_bus_param)(struct soc_camera_device *); - int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *); - int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *); - int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *); - __poll_t (*poll)(struct file *, poll_table *); -}; - -#define SOCAM_SENSOR_INVERT_PCLK (1 << 0) -#define SOCAM_SENSOR_INVERT_MCLK (1 << 1) -#define SOCAM_SENSOR_INVERT_HSYNC (1 << 2) -#define SOCAM_SENSOR_INVERT_VSYNC (1 << 3) -#define SOCAM_SENSOR_INVERT_DATA (1 << 4) - -struct i2c_board_info; -struct regulator_bulk_data; - -struct soc_camera_subdev_desc { - /* Per camera SOCAM_SENSOR_* bus flags */ - unsigned long flags; - - /* sensor driver private platform data */ - void *drv_priv; - - /* - * Set unbalanced_power to true to deal with legacy drivers, failing to - * balance their calls to subdevice's .s_power() method. clock_state is - * then used internally by helper functions, it shouldn't be touched by - * drivers or the platform code. - */ - bool unbalanced_power; - unsigned long clock_state; - - /* Optional callbacks to power on or off and reset the sensor */ - int (*power)(struct device *, int); - int (*reset)(struct device *); - - /* - * some platforms may support different data widths than the sensors - * native ones due to different data line routing. Let the board code - * overwrite the width flags. - */ - int (*set_bus_param)(struct soc_camera_subdev_desc *, unsigned long flags); - unsigned long (*query_bus_param)(struct soc_camera_subdev_desc *); - void (*free_bus)(struct soc_camera_subdev_desc *); - - /* Optional regulators that have to be managed on power on/off events */ - struct v4l2_subdev_platform_data sd_pdata; -}; - -struct soc_camera_host_desc { - /* Camera bus id, used to match a camera and a bus */ - int bus_id; - int i2c_adapter_id; - struct i2c_board_info *board_info; - const char *module_name; - - /* - * For non-I2C devices platform has to provide methods to add a device - * to the system and to remove it - */ - int (*add_device)(struct soc_camera_device *); - void (*del_device)(struct soc_camera_device *); -}; - -/* - * Platform data for "soc-camera-pdrv" - * This MUST be kept binary-identical to struct soc_camera_link below, until - * it is completely replaced by this one, after which we can split it into its - * two components. - */ -struct soc_camera_desc { - struct soc_camera_subdev_desc subdev_desc; - struct soc_camera_host_desc host_desc; -}; - -/* Prepare to replace this struct: don't change its layout any more! */ -struct soc_camera_link { - /* - * Subdevice part - keep at top and compatible to - * struct soc_camera_subdev_desc - */ - - /* Per camera SOCAM_SENSOR_* bus flags */ - unsigned long flags; - - void *priv; - - /* Set by platforms to handle misbehaving drivers */ - bool unbalanced_power; - /* Used by soc-camera helper functions */ - unsigned long clock_state; - - /* Optional callbacks to power on or off and reset the sensor */ - int (*power)(struct device *, int); - int (*reset)(struct device *); - /* - * some platforms may support different data widths than the sensors - * native ones due to different data line routing. Let the board code - * overwrite the width flags. - */ - int (*set_bus_param)(struct soc_camera_link *, unsigned long flags); - unsigned long (*query_bus_param)(struct soc_camera_link *); - void (*free_bus)(struct soc_camera_link *); - - /* Optional regulators that have to be managed on power on/off events */ - struct regulator_bulk_data *regulators; - int num_regulators; - - void *host_priv; - - /* - * Host part - keep at bottom and compatible to - * struct soc_camera_host_desc - */ - - /* Camera bus id, used to match a camera and a bus */ - int bus_id; - int i2c_adapter_id; - struct i2c_board_info *board_info; - const char *module_name; - - /* - * For non-I2C devices platform has to provide methods to add a device - * to the system and to remove it - */ - int (*add_device)(struct soc_camera_device *); - void (*del_device)(struct soc_camera_device *); -}; - -static inline struct soc_camera_host *to_soc_camera_host( - const struct device *dev) -{ - struct v4l2_device *v4l2_dev = dev_get_drvdata(dev); - - return container_of(v4l2_dev, struct soc_camera_host, v4l2_dev); -} - -static inline struct soc_camera_desc *to_soc_camera_desc( - const struct soc_camera_device *icd) -{ - return icd->sdesc; -} - -static inline struct device *to_soc_camera_control( - const struct soc_camera_device *icd) -{ - return icd->control; -} - -static inline struct v4l2_subdev *soc_camera_to_subdev( - const struct soc_camera_device *icd) -{ - struct device *control = to_soc_camera_control(icd); - return dev_get_drvdata(control); -} - -int soc_camera_host_register(struct soc_camera_host *ici); -void soc_camera_host_unregister(struct soc_camera_host *ici); - -const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( - struct soc_camera_device *icd, unsigned int fourcc); - -/** - * struct soc_camera_format_xlate - match between host and sensor formats - * @code: code of a sensor provided format - * @host_fmt: host format after host translation from code - * - * Host and sensor translation structure. Used in table of host and sensor - * formats matchings in soc_camera_device. A host can override the generic list - * generation by implementing get_formats(), and use it for format checks and - * format setup. - */ -struct soc_camera_format_xlate { - u32 code; - const struct soc_mbus_pixelfmt *host_fmt; -}; - -#define SOCAM_SENSE_PCLK_CHANGED (1 << 0) - -/** - * This struct can be attached to struct soc_camera_device by the host driver - * to request sense from the camera, for example, when calling .set_fmt(). The - * host then can check which flags are set and verify respective values if any. - * For example, if SOCAM_SENSE_PCLK_CHANGED is set, it means, pixclock has - * changed during this operation. After completion the host should detach sense. - * - * @flags ored SOCAM_SENSE_* flags - * @master_clock if the host wants to be informed about pixel-clock - * change, it better set master_clock. - * @pixel_clock_max maximum pixel clock frequency supported by the host, - * camera is not allowed to exceed this. - * @pixel_clock if the camera driver changed pixel clock during this - * operation, it sets SOCAM_SENSE_PCLK_CHANGED, uses - * master_clock to calculate the new pixel-clock and - * sets this field. - */ -struct soc_camera_sense { - unsigned long flags; - unsigned long master_clock; - unsigned long pixel_clock_max; - unsigned long pixel_clock; -}; - -#define SOCAM_DATAWIDTH(x) BIT((x) - 1) -#define SOCAM_DATAWIDTH_4 SOCAM_DATAWIDTH(4) -#define SOCAM_DATAWIDTH_8 SOCAM_DATAWIDTH(8) -#define SOCAM_DATAWIDTH_9 SOCAM_DATAWIDTH(9) -#define SOCAM_DATAWIDTH_10 SOCAM_DATAWIDTH(10) -#define SOCAM_DATAWIDTH_12 SOCAM_DATAWIDTH(12) -#define SOCAM_DATAWIDTH_15 SOCAM_DATAWIDTH(15) -#define SOCAM_DATAWIDTH_16 SOCAM_DATAWIDTH(16) -#define SOCAM_DATAWIDTH_18 SOCAM_DATAWIDTH(18) -#define SOCAM_DATAWIDTH_24 SOCAM_DATAWIDTH(24) - -#define SOCAM_DATAWIDTH_MASK (SOCAM_DATAWIDTH_4 | SOCAM_DATAWIDTH_8 | \ - SOCAM_DATAWIDTH_9 | SOCAM_DATAWIDTH_10 | \ - SOCAM_DATAWIDTH_12 | SOCAM_DATAWIDTH_15 | \ - SOCAM_DATAWIDTH_16 | SOCAM_DATAWIDTH_18 | \ - SOCAM_DATAWIDTH_24) - -static inline void soc_camera_limit_side(int *start, int *length, - unsigned int start_min, - unsigned int length_min, unsigned int length_max) -{ - if (*length < length_min) - *length = length_min; - else if (*length > length_max) - *length = length_max; - - if (*start < start_min) - *start = start_min; - else if (*start > start_min + length_max - *length) - *start = start_min + length_max - *length; -} - -unsigned long soc_camera_apply_board_flags(struct soc_camera_subdev_desc *ssdd, - const struct v4l2_mbus_config *cfg); - -int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd); -int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd, - struct v4l2_clk *clk); -int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd, - struct v4l2_clk *clk); - -static inline int soc_camera_set_power(struct device *dev, - struct soc_camera_subdev_desc *ssdd, struct v4l2_clk *clk, bool on) -{ - return on ? soc_camera_power_on(dev, ssdd, clk) - : soc_camera_power_off(dev, ssdd, clk); -} - -/* This is only temporary here - until v4l2-subdev begins to link to video_device */ -#include -static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client) -{ - struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd); - return icd ? icd->vdev : NULL; -} - -static inline struct soc_camera_subdev_desc *soc_camera_i2c_to_desc(const struct i2c_client *client) -{ - return client->dev.platform_data; -} - -static inline struct v4l2_subdev *soc_camera_vdev_to_subdev(struct video_device *vdev) -{ - struct soc_camera_device *icd = video_get_drvdata(vdev); - return soc_camera_to_subdev(icd); -} - -static inline struct soc_camera_device *soc_camera_from_vb2q(const struct vb2_queue *vq) -{ - return container_of(vq, struct soc_camera_device, vb2_vidq); -} - -static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd) -{ - return (icd->iface << 8) | (icd->devnum + 1); -} - -void soc_camera_lock(struct vb2_queue *vq); -void soc_camera_unlock(struct vb2_queue *vq); - -#endif -- cgit v1.2.3 From d3246337d00fcb635e4ade45f748f155e4056b28 Mon Sep 17 00:00:00 2001 From: Benoit Parrot Date: Thu, 28 May 2020 15:26:04 +0200 Subject: media: v4l2-rect.h: add enclosed rectangle helper Add a helper function to check if one rectangle is enclosed inside another. Signed-off-by: Benoit Parrot Acked-by: Andrzej Pietrasiewicz Reviewed-by: Lad Prabhakar Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-rect.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include') diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h index 8800a640c224..bd587d0c0dc3 100644 --- a/include/media/v4l2-rect.h +++ b/include/media/v4l2-rect.h @@ -184,4 +184,24 @@ static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1, return true; } +/** + * v4l2_rect_enclosed() - is r1 enclosed in r2? + * @r1: rectangle. + * @r2: rectangle. + * + * Returns true if @r1 is enclosed in @r2. + */ +static inline bool v4l2_rect_enclosed(struct v4l2_rect *r1, + struct v4l2_rect *r2) +{ + if (r1->left < r2->left || r1->top < r2->top) + return false; + if (r1->left + r1->width > r2->left + r2->width) + return false; + if (r1->top + r1->height > r2->top + r2->height) + return false; + + return true; +} + #endif -- cgit v1.2.3 From b820935b35479e823eb6ce133cdbe371dced7e95 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 21 Jun 2020 02:23:43 +0200 Subject: media: vb2: Print the queue pointer in debug messages When debugging issues that involve more than one video queue, messages related to multiple queues get interleaved without any easy way to tell which queue they relate to. Fix this by adding a queue name to vb2_queue, and printing it in all debug messages in the vb2 core and V4L2 layers. If the name isn't set by drivers, it is automatically filled with the queue direction and address. Signed-off-by: Laurent Pinchart Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/videobuf2/videobuf2-core.c | 226 ++++++++++++------------ drivers/media/common/videobuf2/videobuf2-v4l2.c | 68 ++++--- include/media/videobuf2-core.h | 4 + include/media/videobuf2-v4l2.h | 13 ++ 4 files changed, 174 insertions(+), 137 deletions(-) (limited to 'include') diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 37d0186ba330..abaf28e057eb 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -34,10 +34,11 @@ static int debug; module_param(debug, int, 0644); -#define dprintk(level, fmt, arg...) \ - do { \ - if (debug >= level) \ - pr_info("%s: " fmt, __func__, ## arg); \ +#define dprintk(q, level, fmt, arg...) \ + do { \ + if (debug >= level) \ + pr_info("[%s] %s: " fmt, (q)->name, __func__, \ + ## arg); \ } while (0) #ifdef CONFIG_VIDEO_ADV_DEBUG @@ -51,8 +52,8 @@ module_param(debug, int, 0644); */ #define log_memop(vb, op) \ - dprintk(2, "call_memop(%p, %d, %s)%s\n", \ - (vb)->vb2_queue, (vb)->index, #op, \ + dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \ + (vb)->index, #op, \ (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") #define call_memop(vb, op, args...) \ @@ -90,7 +91,7 @@ module_param(debug, int, 0644); }) #define log_qop(q, op) \ - dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \ + dprintk(q, 2, "call_qop(%s)%s\n", #op, \ (q)->ops->op ? "" : " (nop)") #define call_qop(q, op, args...) \ @@ -113,8 +114,8 @@ module_param(debug, int, 0644); }) #define log_vb_qop(vb, op, args...) \ - dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \ - (vb)->vb2_queue, (vb)->index, #op, \ + dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \ + (vb)->index, #op, \ (vb)->vb2_queue->ops->op ? "" : " (nop)") #define call_vb_qop(vb, op, args...) \ @@ -246,7 +247,8 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb) for (plane = 0; plane < vb->num_planes; ++plane) { call_void_memop(vb, put, vb->planes[plane].mem_priv); vb->planes[plane].mem_priv = NULL; - dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index); + dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n", + plane, vb->index); } } @@ -354,7 +356,7 @@ static void __setup_offsets(struct vb2_buffer *vb) for (plane = 0; plane < vb->num_planes; ++plane) { vb->planes[plane].m.offset = off; - dprintk(3, "buffer %d, plane %d offset 0x%08lx\n", + dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n", vb->index, plane, off); off += vb->planes[plane].length; @@ -385,7 +387,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, /* Allocate videobuf buffer structures */ vb = kzalloc(q->buf_struct_size, GFP_KERNEL); if (!vb) { - dprintk(1, "memory alloc for buffer struct failed\n"); + dprintk(q, 1, "memory alloc for buffer struct failed\n"); break; } @@ -407,7 +409,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, if (memory == VB2_MEMORY_MMAP) { ret = __vb2_buf_mem_alloc(vb); if (ret) { - dprintk(1, "failed allocating memory for buffer %d\n", + dprintk(q, 1, "failed allocating memory for buffer %d\n", buffer); q->bufs[vb->index] = NULL; kfree(vb); @@ -421,7 +423,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, */ ret = call_vb_qop(vb, buf_init, vb); if (ret) { - dprintk(1, "buffer %d %p initialization failed\n", + dprintk(q, 1, "buffer %d %p initialization failed\n", buffer, vb); __vb2_buf_mem_free(vb); q->bufs[vb->index] = NULL; @@ -431,7 +433,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, } } - dprintk(3, "allocated %d buffers, %d plane(s) each\n", + dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n", buffer, num_planes); return buffer; @@ -483,7 +485,7 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) if (q->bufs[buffer] == NULL) continue; if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) { - dprintk(1, "preparing buffers, cannot free\n"); + dprintk(q, 1, "preparing buffers, cannot free\n"); return -EAGAIN; } } @@ -661,12 +663,12 @@ int vb2_verify_memory_type(struct vb2_queue *q, { if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && memory != VB2_MEMORY_DMABUF) { - dprintk(1, "unsupported memory type\n"); + dprintk(q, 1, "unsupported memory type\n"); return -EINVAL; } if (type != q->type) { - dprintk(1, "requested type is incorrect\n"); + dprintk(q, 1, "requested type is incorrect\n"); return -EINVAL; } @@ -675,17 +677,17 @@ int vb2_verify_memory_type(struct vb2_queue *q, * are available. */ if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { - dprintk(1, "MMAP for current setup unsupported\n"); + dprintk(q, 1, "MMAP for current setup unsupported\n"); return -EINVAL; } if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { - dprintk(1, "USERPTR for current setup unsupported\n"); + dprintk(q, 1, "USERPTR for current setup unsupported\n"); return -EINVAL; } if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { - dprintk(1, "DMABUF for current setup unsupported\n"); + dprintk(q, 1, "DMABUF for current setup unsupported\n"); return -EINVAL; } @@ -695,7 +697,7 @@ int vb2_verify_memory_type(struct vb2_queue *q, * do the memory and type validation. */ if (vb2_fileio_is_active(q)) { - dprintk(1, "file io in progress\n"); + dprintk(q, 1, "file io in progress\n"); return -EBUSY; } return 0; @@ -717,7 +719,7 @@ static bool verify_consistency_attr(struct vb2_queue *q, bool consistent_mem) bool queue_is_consistent = !(q->dma_attrs & DMA_ATTR_NON_CONSISTENT); if (consistent_mem != queue_is_consistent) { - dprintk(1, "memory consistency model mismatch\n"); + dprintk(q, 1, "memory consistency model mismatch\n"); return false; } return true; @@ -736,12 +738,12 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, consistent_mem = false; if (q->streaming) { - dprintk(1, "streaming active\n"); + dprintk(q, 1, "streaming active\n"); return -EBUSY; } if (q->waiting_in_dqbuf && *count) { - dprintk(1, "another dup()ped fd is waiting for a buffer\n"); + dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); return -EBUSY; } @@ -755,7 +757,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, mutex_lock(&q->mmap_lock); if (debug && q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) - dprintk(1, "memory in use, orphaning buffers\n"); + dprintk(q, 1, "memory in use, orphaning buffers\n"); /* * Call queue_cancel to clean up any buffers in the @@ -807,7 +809,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); if (allocated_buffers == 0) { - dprintk(1, "memory allocation failed\n"); + dprintk(q, 1, "memory allocation failed\n"); return -ENOMEM; } @@ -882,13 +884,13 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, consistent_mem = false; if (q->num_buffers == VB2_MAX_FRAME) { - dprintk(1, "maximum number of buffers already allocated\n"); + dprintk(q, 1, "maximum number of buffers already allocated\n"); return -ENOBUFS; } if (!q->num_buffers) { if (q->waiting_in_dqbuf && *count) { - dprintk(1, "another dup()ped fd is waiting for a buffer\n"); + dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); return -EBUSY; } memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); @@ -897,7 +899,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, q->waiting_for_buffers = !q->is_output; } else { if (q->memory != memory) { - dprintk(1, "memory model mismatch\n"); + dprintk(q, 1, "memory model mismatch\n"); return -EINVAL; } if (!verify_consistency_attr(q, consistent_mem)) @@ -924,7 +926,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); if (allocated_buffers == 0) { - dprintk(1, "memory allocation failed\n"); + dprintk(q, 1, "memory allocation failed\n"); return -ENOMEM; } @@ -1013,7 +1015,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) */ vb->cnt_buf_done++; #endif - dprintk(4, "done processing on buffer %d, state: %d\n", + dprintk(q, 4, "done processing on buffer %d, state: %d\n", vb->index, state); if (state != VB2_BUF_STATE_QUEUED) @@ -1099,12 +1101,12 @@ static int __prepare_userptr(struct vb2_buffer *vb) && vb->planes[plane].length == planes[plane].length) continue; - dprintk(3, "userspace address for plane %d changed, reacquiring memory\n", + dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n", plane); /* Check if the provided plane buffer is large enough */ if (planes[plane].length < vb->planes[plane].min_length) { - dprintk(1, "provided buffer size %u is less than setup size %u for plane %d\n", + dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n", planes[plane].length, vb->planes[plane].min_length, plane); @@ -1134,7 +1136,7 @@ static int __prepare_userptr(struct vb2_buffer *vb) planes[plane].m.userptr, planes[plane].length, q->dma_dir); if (IS_ERR(mem_priv)) { - dprintk(1, "failed acquiring userspace memory for plane %d\n", + dprintk(q, 1, "failed acquiring userspace memory for plane %d\n", plane); ret = PTR_ERR(mem_priv); goto err; @@ -1161,14 +1163,14 @@ static int __prepare_userptr(struct vb2_buffer *vb) */ ret = call_vb_qop(vb, buf_init, vb); if (ret) { - dprintk(1, "buffer initialization failed\n"); + dprintk(q, 1, "buffer initialization failed\n"); goto err; } } ret = call_vb_qop(vb, buf_prepare, vb); if (ret) { - dprintk(1, "buffer preparation failed\n"); + dprintk(q, 1, "buffer preparation failed\n"); call_void_vb_qop(vb, buf_cleanup, vb); goto err; } @@ -1211,7 +1213,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); if (IS_ERR_OR_NULL(dbuf)) { - dprintk(1, "invalid dmabuf fd for plane %d\n", + dprintk(q, 1, "invalid dmabuf fd for plane %d\n", plane); ret = -EINVAL; goto err; @@ -1222,7 +1224,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) planes[plane].length = dbuf->size; if (planes[plane].length < vb->planes[plane].min_length) { - dprintk(1, "invalid dmabuf length %u for plane %d, minimum length %u\n", + dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n", planes[plane].length, plane, vb->planes[plane].min_length); dma_buf_put(dbuf); @@ -1237,7 +1239,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) continue; } - dprintk(3, "buffer for plane %d changed\n", plane); + dprintk(q, 3, "buffer for plane %d changed\n", plane); if (!reacquired) { reacquired = true; @@ -1257,7 +1259,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) q->alloc_devs[plane] ? : q->dev, dbuf, planes[plane].length, q->dma_dir); if (IS_ERR(mem_priv)) { - dprintk(1, "failed to attach dmabuf\n"); + dprintk(q, 1, "failed to attach dmabuf\n"); ret = PTR_ERR(mem_priv); dma_buf_put(dbuf); goto err; @@ -1278,7 +1280,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); if (ret) { - dprintk(1, "failed to map dmabuf for plane %d\n", + dprintk(q, 1, "failed to map dmabuf for plane %d\n", plane); goto err; } @@ -1303,14 +1305,14 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) */ ret = call_vb_qop(vb, buf_init, vb); if (ret) { - dprintk(1, "buffer initialization failed\n"); + dprintk(q, 1, "buffer initialization failed\n"); goto err; } } ret = call_vb_qop(vb, buf_prepare, vb); if (ret) { - dprintk(1, "buffer preparation failed\n"); + dprintk(q, 1, "buffer preparation failed\n"); call_void_vb_qop(vb, buf_cleanup, vb); goto err; } @@ -1345,7 +1347,7 @@ static int __buf_prepare(struct vb2_buffer *vb) int ret; if (q->error) { - dprintk(1, "fatal error occurred on queue\n"); + dprintk(q, 1, "fatal error occurred on queue\n"); return -EIO; } @@ -1356,7 +1358,7 @@ static int __buf_prepare(struct vb2_buffer *vb) if (q->is_output) { ret = call_vb_qop(vb, buf_out_validate, vb); if (ret) { - dprintk(1, "buffer validation failed\n"); + dprintk(q, 1, "buffer validation failed\n"); return ret; } } @@ -1380,7 +1382,7 @@ static int __buf_prepare(struct vb2_buffer *vb) } if (ret) { - dprintk(1, "buffer preparation failed: %d\n", ret); + dprintk(q, 1, "buffer preparation failed: %d\n", ret); vb->state = orig_state; return ret; } @@ -1488,12 +1490,12 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) vb = q->bufs[index]; if (vb->state != VB2_BUF_STATE_DEQUEUED) { - dprintk(1, "invalid buffer state %d\n", + dprintk(q, 1, "invalid buffer state %d\n", vb->state); return -EINVAL; } if (vb->prepared) { - dprintk(1, "buffer already prepared\n"); + dprintk(q, 1, "buffer already prepared\n"); return -EINVAL; } @@ -1504,7 +1506,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) /* Fill buffer information for the userspace */ call_void_bufop(q, fill_user_buffer, vb, pb); - dprintk(2, "prepare of buffer %d succeeded\n", vb->index); + dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index); return 0; } @@ -1542,7 +1544,7 @@ static int vb2_start_streaming(struct vb2_queue *q) q->start_streaming_called = 0; - dprintk(1, "driver refused to start streaming\n"); + dprintk(q, 1, "driver refused to start streaming\n"); /* * If you see this warning, then the driver isn't cleaning up properly * after a failed start_streaming(). See the start_streaming() @@ -1580,7 +1582,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, int ret; if (q->error) { - dprintk(1, "fatal error occurred on queue\n"); + dprintk(q, 1, "fatal error occurred on queue\n"); return -EIO; } @@ -1588,14 +1590,14 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && q->requires_requests) { - dprintk(1, "qbuf requires a request\n"); + dprintk(q, 1, "qbuf requires a request\n"); return -EBADR; } if ((req && q->uses_qbuf) || (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && q->uses_requests)) { - dprintk(1, "queue in wrong mode (qbuf vs requests)\n"); + dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n"); return -EBUSY; } @@ -1604,7 +1606,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, q->uses_requests = 1; if (vb->state != VB2_BUF_STATE_DEQUEUED) { - dprintk(1, "buffer %d not in dequeued state\n", + dprintk(q, 1, "buffer %d not in dequeued state\n", vb->index); return -EINVAL; } @@ -1612,7 +1614,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, if (q->is_output && !vb->prepared) { ret = call_vb_qop(vb, buf_out_validate, vb); if (ret) { - dprintk(1, "buffer validation failed\n"); + dprintk(q, 1, "buffer validation failed\n"); return ret; } } @@ -1648,7 +1650,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, call_void_bufop(q, fill_user_buffer, vb, pb); } - dprintk(2, "qbuf of buffer %d succeeded\n", vb->index); + dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); return 0; } @@ -1665,10 +1667,10 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, } break; case VB2_BUF_STATE_PREPARING: - dprintk(1, "buffer still being prepared\n"); + dprintk(q, 1, "buffer still being prepared\n"); return -EINVAL; default: - dprintk(1, "invalid buffer state %d\n", vb->state); + dprintk(q, 1, "invalid buffer state %d\n", vb->state); return -EINVAL; } @@ -1710,7 +1712,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, return ret; } - dprintk(2, "qbuf of buffer %d succeeded\n", vb->index); + dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); return 0; } EXPORT_SYMBOL_GPL(vb2_core_qbuf); @@ -1736,22 +1738,22 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) int ret; if (q->waiting_in_dqbuf) { - dprintk(1, "another dup()ped fd is waiting for a buffer\n"); + dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); return -EBUSY; } if (!q->streaming) { - dprintk(1, "streaming off, will not wait for buffers\n"); + dprintk(q, 1, "streaming off, will not wait for buffers\n"); return -EINVAL; } if (q->error) { - dprintk(1, "Queue in error state, will not wait for buffers\n"); + dprintk(q, 1, "Queue in error state, will not wait for buffers\n"); return -EIO; } if (q->last_buffer_dequeued) { - dprintk(3, "last buffer dequeued already, will not wait for buffers\n"); + dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n"); return -EPIPE; } @@ -1763,7 +1765,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) } if (nonblocking) { - dprintk(3, "nonblocking and no buffers to dequeue, will not wait\n"); + dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n"); return -EAGAIN; } @@ -1778,7 +1780,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) /* * All locks have been released, it is safe to sleep now. */ - dprintk(3, "will sleep waiting for buffers\n"); + dprintk(q, 3, "will sleep waiting for buffers\n"); ret = wait_event_interruptible(q->done_wq, !list_empty(&q->done_list) || !q->streaming || q->error); @@ -1790,7 +1792,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) call_void_qop(q, wait_finish, q); q->waiting_in_dqbuf = 0; if (ret) { - dprintk(1, "sleep was interrupted\n"); + dprintk(q, 1, "sleep was interrupted\n"); return ret; } } @@ -1838,7 +1840,7 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, int vb2_wait_for_all_buffers(struct vb2_queue *q) { if (!q->streaming) { - dprintk(1, "streaming off, will not wait for buffers\n"); + dprintk(q, 1, "streaming off, will not wait for buffers\n"); return -EINVAL; } @@ -1876,13 +1878,13 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, switch (vb->state) { case VB2_BUF_STATE_DONE: - dprintk(3, "returning done buffer\n"); + dprintk(q, 3, "returning done buffer\n"); break; case VB2_BUF_STATE_ERROR: - dprintk(3, "returning done buffer with errors\n"); + dprintk(q, 3, "returning done buffer with errors\n"); break; default: - dprintk(1, "invalid buffer state\n"); + dprintk(q, 1, "invalid buffer state\n"); return -EINVAL; } @@ -1913,7 +1915,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, media_request_put(vb->request); vb->request = NULL; - dprintk(2, "dqbuf of buffer %d, with state %d\n", + dprintk(q, 2, "dqbuf of buffer %d, with state %d\n", vb->index, vb->state); return 0; @@ -2029,22 +2031,22 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type) int ret; if (type != q->type) { - dprintk(1, "invalid stream type\n"); + dprintk(q, 1, "invalid stream type\n"); return -EINVAL; } if (q->streaming) { - dprintk(3, "already streaming\n"); + dprintk(q, 3, "already streaming\n"); return 0; } if (!q->num_buffers) { - dprintk(1, "no buffers have been allocated\n"); + dprintk(q, 1, "no buffers have been allocated\n"); return -EINVAL; } if (q->num_buffers < q->min_buffers_needed) { - dprintk(1, "need at least %u allocated buffers\n", + dprintk(q, 1, "need at least %u allocated buffers\n", q->min_buffers_needed); return -EINVAL; } @@ -2064,7 +2066,7 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type) q->streaming = 1; - dprintk(3, "successful\n"); + dprintk(q, 3, "successful\n"); return 0; } EXPORT_SYMBOL_GPL(vb2_core_streamon); @@ -2080,7 +2082,7 @@ EXPORT_SYMBOL_GPL(vb2_queue_error); int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) { if (type != q->type) { - dprintk(1, "invalid stream type\n"); + dprintk(q, 1, "invalid stream type\n"); return -EINVAL; } @@ -2097,7 +2099,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) q->waiting_for_buffers = !q->is_output; q->last_buffer_dequeued = false; - dprintk(3, "successful\n"); + dprintk(q, 3, "successful\n"); return 0; } EXPORT_SYMBOL_GPL(vb2_core_streamoff); @@ -2140,39 +2142,39 @@ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, struct dma_buf *dbuf; if (q->memory != VB2_MEMORY_MMAP) { - dprintk(1, "queue is not currently set up for mmap\n"); + dprintk(q, 1, "queue is not currently set up for mmap\n"); return -EINVAL; } if (!q->mem_ops->get_dmabuf) { - dprintk(1, "queue does not support DMA buffer exporting\n"); + dprintk(q, 1, "queue does not support DMA buffer exporting\n"); return -EINVAL; } if (flags & ~(O_CLOEXEC | O_ACCMODE)) { - dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n"); + dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n"); return -EINVAL; } if (type != q->type) { - dprintk(1, "invalid buffer type\n"); + dprintk(q, 1, "invalid buffer type\n"); return -EINVAL; } if (index >= q->num_buffers) { - dprintk(1, "buffer index out of range\n"); + dprintk(q, 1, "buffer index out of range\n"); return -EINVAL; } vb = q->bufs[index]; if (plane >= vb->num_planes) { - dprintk(1, "buffer plane out of range\n"); + dprintk(q, 1, "buffer plane out of range\n"); return -EINVAL; } if (vb2_fileio_is_active(q)) { - dprintk(1, "expbuf: file io in progress\n"); + dprintk(q, 1, "expbuf: file io in progress\n"); return -EBUSY; } @@ -2181,20 +2183,20 @@ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, flags & O_ACCMODE); if (IS_ERR_OR_NULL(dbuf)) { - dprintk(1, "failed to export buffer %d, plane %d\n", + dprintk(q, 1, "failed to export buffer %d, plane %d\n", index, plane); return -EINVAL; } ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE); if (ret < 0) { - dprintk(3, "buffer %d, plane %d failed to export (%d)\n", + dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n", index, plane, ret); dma_buf_put(dbuf); return ret; } - dprintk(3, "buffer %d, plane %d exported as %d descriptor\n", + dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n", index, plane, ret); *fd = ret; @@ -2211,7 +2213,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) unsigned long length; if (q->memory != VB2_MEMORY_MMAP) { - dprintk(1, "queue is not currently set up for mmap\n"); + dprintk(q, 1, "queue is not currently set up for mmap\n"); return -EINVAL; } @@ -2219,17 +2221,17 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) * Check memory area access mode. */ if (!(vma->vm_flags & VM_SHARED)) { - dprintk(1, "invalid vma flags, VM_SHARED needed\n"); + dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n"); return -EINVAL; } if (q->is_output) { if (!(vma->vm_flags & VM_WRITE)) { - dprintk(1, "invalid vma flags, VM_WRITE needed\n"); + dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n"); return -EINVAL; } } else { if (!(vma->vm_flags & VM_READ)) { - dprintk(1, "invalid vma flags, VM_READ needed\n"); + dprintk(q, 1, "invalid vma flags, VM_READ needed\n"); return -EINVAL; } } @@ -2237,7 +2239,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) mutex_lock(&q->mmap_lock); if (vb2_fileio_is_active(q)) { - dprintk(1, "mmap: file io in progress\n"); + dprintk(q, 1, "mmap: file io in progress\n"); ret = -EBUSY; goto unlock; } @@ -2258,7 +2260,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) */ length = PAGE_ALIGN(vb->planes[plane].length); if (length < (vma->vm_end - vma->vm_start)) { - dprintk(1, + dprintk(q, 1, "MMAP invalid, as it would overflow buffer length\n"); ret = -EINVAL; goto unlock; @@ -2278,7 +2280,7 @@ unlock: if (ret) return ret; - dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane); + dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane); return 0; } EXPORT_SYMBOL_GPL(vb2_mmap); @@ -2297,7 +2299,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, int ret; if (q->memory != VB2_MEMORY_MMAP) { - dprintk(1, "queue is not currently set up for mmap\n"); + dprintk(q, 1, "queue is not currently set up for mmap\n"); return -EINVAL; } @@ -2349,6 +2351,10 @@ int vb2_core_queue_init(struct vb2_queue *q) else q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + if (q->name[0] == '\0') + snprintf(q->name, sizeof(q->name), "%s-%p", + q->is_output ? "out" : "cap", q); + return 0; } EXPORT_SYMBOL_GPL(vb2_core_queue_init); @@ -2537,7 +2543,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) */ count = 1; - dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", + dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", (read) ? "read" : "write", count, q->fileio_read_once, q->fileio_write_immediately); @@ -2635,7 +2641,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q) fileio->count = 0; vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); kfree(fileio); - dprintk(3, "file io emulator closed\n"); + dprintk(q, 3, "file io emulator closed\n"); } return 0; } @@ -2664,7 +2670,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ unsigned index; int ret; - dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n", + dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n", read ? "read" : "write", (long)*ppos, count, nonblock ? "non" : ""); @@ -2672,7 +2678,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ return -EINVAL; if (q->waiting_in_dqbuf) { - dprintk(3, "another dup()ped fd is %s\n", + dprintk(q, 3, "another dup()ped fd is %s\n", read ? "reading" : "writing"); return -EBUSY; } @@ -2682,7 +2688,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ */ if (!vb2_fileio_is_active(q)) { ret = __vb2_init_fileio(q, read); - dprintk(3, "vb2_init_fileio result: %d\n", ret); + dprintk(q, 3, "vb2_init_fileio result: %d\n", ret); if (ret) return ret; } @@ -2699,7 +2705,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ * Call vb2_dqbuf to get buffer back. */ ret = vb2_core_dqbuf(q, &index, NULL, nonblock); - dprintk(5, "vb2_dqbuf result: %d\n", ret); + dprintk(q, 5, "vb2_dqbuf result: %d\n", ret); if (ret) return ret; fileio->dq_count += 1; @@ -2730,20 +2736,20 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ */ if (buf->pos + count > buf->size) { count = buf->size - buf->pos; - dprintk(5, "reducing read count: %zd\n", count); + dprintk(q, 5, "reducing read count: %zd\n", count); } /* * Transfer data to userspace. */ - dprintk(3, "copying %zd bytes - buffer %d, offset %u\n", + dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n", count, index, buf->pos); if (read) ret = copy_to_user(data, buf->vaddr + buf->pos, count); else ret = copy_from_user(buf->vaddr + buf->pos, data, count); if (ret) { - dprintk(3, "error copying data\n"); + dprintk(q, 3, "error copying data\n"); return -EFAULT; } @@ -2763,7 +2769,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ * Check if this is the last buffer to read. */ if (read && fileio->read_once && fileio->dq_count == 1) { - dprintk(3, "read limit reached\n"); + dprintk(q, 3, "read limit reached\n"); return __vb2_cleanup_fileio(q); } @@ -2775,7 +2781,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ if (copy_timestamp) b->timestamp = ktime_get_ns(); ret = vb2_core_qbuf(q, index, NULL, NULL); - dprintk(5, "vb2_dbuf result: %d\n", ret); + dprintk(q, 5, "vb2_dbuf result: %d\n", ret); if (ret) return ret; @@ -2862,7 +2868,7 @@ static int vb2_thread(void *data) if (!threadio->stop) ret = vb2_core_dqbuf(q, &index, NULL, 0); call_void_qop(q, wait_prepare, q); - dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); + dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret); if (!ret) vb = q->bufs[index]; } @@ -2916,7 +2922,7 @@ int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, threadio->priv = priv; ret = __vb2_init_fileio(q, !q->is_output); - dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); + dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret); if (ret) goto nomem; q->threadio = threadio; diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 559a229cac41..98477a82c810 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -35,10 +35,11 @@ static int debug; module_param(debug, int, 0644); -#define dprintk(level, fmt, arg...) \ +#define dprintk(q, level, fmt, arg...) \ do { \ if (debug >= level) \ - pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \ + pr_info("vb2-v4l2: [%p] %s: " fmt, \ + (q)->name, __func__, ## arg); \ } while (0) /* Flags that are set by us */ @@ -66,12 +67,14 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer /* Is memory for copying plane information present? */ if (b->m.planes == NULL) { - dprintk(1, "multi-planar buffer passed but planes array not provided\n"); + dprintk(vb->vb2_queue, 1, + "multi-planar buffer passed but planes array not provided\n"); return -EINVAL; } if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) { - dprintk(1, "incorrect planes array length, expected %d, got %d\n", + dprintk(vb->vb2_queue, 1, + "incorrect planes array length, expected %d, got %d\n", vb->num_planes, b->length); return -EINVAL; } @@ -179,7 +182,7 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b ret = __verify_length(vb, b); if (ret < 0) { - dprintk(1, "plane parameters verification failed: %d\n", ret); + dprintk(q, 1, "plane parameters verification failed: %d\n", ret); return ret; } if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) { @@ -192,7 +195,7 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b * that just says that it is either a top or a bottom field, * but not which of the two it is. */ - dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); + dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); return -EINVAL; } vbuf->sequence = 0; @@ -395,23 +398,23 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md int ret; if (b->type != q->type) { - dprintk(1, "%s: invalid buffer type\n", opname); + dprintk(q, 1, "%s: invalid buffer type\n", opname); return -EINVAL; } if (b->index >= q->num_buffers) { - dprintk(1, "%s: buffer index out of range\n", opname); + dprintk(q, 1, "%s: buffer index out of range\n", opname); return -EINVAL; } if (q->bufs[b->index] == NULL) { /* Should never happen */ - dprintk(1, "%s: buffer is NULL\n", opname); + dprintk(q, 1, "%s: buffer is NULL\n", opname); return -EINVAL; } if (b->memory != q->memory) { - dprintk(1, "%s: invalid memory type\n", opname); + dprintk(q, 1, "%s: invalid memory type\n", opname); return -EINVAL; } @@ -423,7 +426,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) && vb->state != VB2_BUF_STATE_DEQUEUED) { - dprintk(1, "%s: buffer is not in dequeued state\n", opname); + dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname); return -EINVAL; } @@ -442,19 +445,19 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) { if (q->requires_requests) { - dprintk(1, "%s: queue requires requests\n", opname); + dprintk(q, 1, "%s: queue requires requests\n", opname); return -EBADR; } if (q->uses_requests) { - dprintk(1, "%s: queue uses requests\n", opname); + dprintk(q, 1, "%s: queue uses requests\n", opname); return -EBUSY; } return 0; } else if (!q->supports_requests) { - dprintk(1, "%s: queue does not support requests\n", opname); + dprintk(q, 1, "%s: queue does not support requests\n", opname); return -EBADR; } else if (q->uses_qbuf) { - dprintk(1, "%s: queue does not use requests\n", opname); + dprintk(q, 1, "%s: queue does not use requests\n", opname); return -EBUSY; } @@ -484,13 +487,13 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md return -EINVAL; if (b->request_fd < 0) { - dprintk(1, "%s: request_fd < 0\n", opname); + dprintk(q, 1, "%s: request_fd < 0\n", opname); return -EINVAL; } req = media_request_get_by_fd(mdev, b->request_fd); if (IS_ERR(req)) { - dprintk(1, "%s: invalid request_fd\n", opname); + dprintk(q, 1, "%s: invalid request_fd\n", opname); return PTR_ERR(req); } @@ -500,7 +503,7 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md */ if (req->state != MEDIA_REQUEST_STATE_IDLE && req->state != MEDIA_REQUEST_STATE_UPDATING) { - dprintk(1, "%s: request is not idle\n", opname); + dprintk(q, 1, "%s: request is not idle\n", opname); media_request_put(req); return -EBUSY; } @@ -683,12 +686,12 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) int ret; if (b->type != q->type) { - dprintk(1, "wrong buffer type\n"); + dprintk(q, 1, "wrong buffer type\n"); return -EINVAL; } if (b->index >= q->num_buffers) { - dprintk(1, "buffer index out of range\n"); + dprintk(q, 1, "buffer index out of range\n"); return -EINVAL; } vb = q->bufs[b->index]; @@ -743,7 +746,7 @@ int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, int ret; if (vb2_fileio_is_active(q)) { - dprintk(1, "file io in progress\n"); + dprintk(q, 1, "file io in progress\n"); return -EBUSY; } @@ -823,7 +826,7 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, int ret; if (vb2_fileio_is_active(q)) { - dprintk(1, "file io in progress\n"); + dprintk(q, 1, "file io in progress\n"); return -EBUSY; } @@ -842,12 +845,12 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) int ret; if (vb2_fileio_is_active(q)) { - dprintk(1, "file io in progress\n"); + dprintk(q, 1, "file io in progress\n"); return -EBUSY; } if (b->type != q->type) { - dprintk(1, "invalid buffer type\n"); + dprintk(q, 1, "invalid buffer type\n"); return -EINVAL; } @@ -871,7 +874,7 @@ EXPORT_SYMBOL_GPL(vb2_dqbuf); int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) { if (vb2_fileio_is_active(q)) { - dprintk(1, "file io in progress\n"); + dprintk(q, 1, "file io in progress\n"); return -EBUSY; } return vb2_core_streamon(q, type); @@ -881,7 +884,7 @@ EXPORT_SYMBOL_GPL(vb2_streamon); int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) { if (vb2_fileio_is_active(q)) { - dprintk(1, "file io in progress\n"); + dprintk(q, 1, "file io in progress\n"); return -EBUSY; } return vb2_core_streamoff(q, type); @@ -895,7 +898,7 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) } EXPORT_SYMBOL_GPL(vb2_expbuf); -int vb2_queue_init(struct vb2_queue *q) +int vb2_queue_init_name(struct vb2_queue *q, const char *name) { /* * Sanity check @@ -931,8 +934,19 @@ int vb2_queue_init(struct vb2_queue *q) */ q->quirk_poll_must_check_waiting_for_buffers = true; + if (name) + strscpy(q->name, name, sizeof(q->name)); + else + q->name[0] = '\0'; + return vb2_core_queue_init(q); } +EXPORT_SYMBOL_GPL(vb2_queue_init_name); + +int vb2_queue_init(struct vb2_queue *q) +{ + return vb2_queue_init_name(q, NULL); +} EXPORT_SYMBOL_GPL(vb2_queue_init); void vb2_queue_release(struct vb2_queue *q) diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 9e68fe043a6c..52ef92049073 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -561,6 +561,8 @@ struct vb2_buf_ops { * when a buffer with the %V4L2_BUF_FLAG_LAST is dequeued. * @fileio: file io emulator internal data, used only if emulator is active * @threadio: thread io internal data, used only if thread is active + * @name: queue name, used for logging purpose. Initialized automatically + * if left empty by drivers. */ struct vb2_queue { unsigned int type; @@ -622,6 +624,8 @@ struct vb2_queue { struct vb2_fileio_data *fileio; struct vb2_threadio_data *threadio; + char name[32]; + #ifdef CONFIG_VIDEO_ADV_DEBUG /* * Counters for how often these queue-related ops are diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h index 59bf33a12648..b7b5a9cb5a28 100644 --- a/include/media/videobuf2-v4l2.h +++ b/include/media/videobuf2-v4l2.h @@ -236,6 +236,19 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); */ int __must_check vb2_queue_init(struct vb2_queue *q); +/** + * vb2_queue_init_name() - initialize a videobuf2 queue with a name + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @name: the queue name + * + * This function initializes the vb2_queue exactly like vb2_queue_init(), + * and additionally sets the queue name. The queue name is used for logging + * purpose, and should uniquely identify the queue within the context of the + * device it belongs to. This is useful to attribute kernel log messages to the + * right queue for m2m devices or other devices that handle multiple queues. + */ +int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name); + /** * vb2_queue_release() - stop streaming, release the queue and free memory * @q: pointer to &struct vb2_queue with videobuf2 queue. -- cgit v1.2.3 From 5fec25f2cb959cb5f189d7f6127bee3efc782530 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 24 Jun 2020 16:34:57 -0500 Subject: umh: Capture the pid in umh_pipe_setup The pid in struct subprocess_info is only used by umh_clean_and_save_pid to write the pid into umh_info. Instead always capture the pid on struct umh_info in umh_pipe_setup, removing code that is specific to user mode drivers from the common user path of user mode helpers. v1: https://lkml.kernel.org/r/87h7uygf9i.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/875zb97iix.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-1-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/umh.h | 1 - kernel/umh.c | 5 ++--- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/umh.h b/include/linux/umh.h index 0c08de356d0d..aae16a0ebd0f 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h @@ -25,7 +25,6 @@ struct subprocess_info { struct file *file; int wait; int retval; - pid_t pid; int (*init)(struct subprocess_info *info, struct cred *new); void (*cleanup)(struct subprocess_info *info); void *data; diff --git a/kernel/umh.c b/kernel/umh.c index 79f139a7ca03..c2a582b3a2bf 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -102,7 +102,6 @@ static int call_usermodehelper_exec_async(void *data) commit_creds(new); - sub_info->pid = task_pid_nr(current); if (sub_info->file) { retval = do_execve_file(sub_info->file, sub_info->argv, sub_info->envp); @@ -468,6 +467,7 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) umh_info->pipe_to_umh = to_umh[1]; umh_info->pipe_from_umh = from_umh[0]; + umh_info->pid = task_pid_nr(current); return 0; } @@ -476,13 +476,12 @@ static void umh_clean_and_save_pid(struct subprocess_info *info) struct umh_info *umh_info = info->data; /* cleanup if umh_pipe_setup() was successful but exec failed */ - if (info->pid && info->retval) { + if (info->retval) { fput(umh_info->pipe_to_umh); fput(umh_info->pipe_from_umh); } argv_free(info->argv); - umh_info->pid = info->pid; } /** -- cgit v1.2.3 From 21d598280675c463ea1b264fab06e9614aacd1e1 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 24 Jun 2020 17:01:18 -0500 Subject: umh: Remove call_usermodehelper_setup_file. The only caller of call_usermodehelper_setup_file is fork_usermode_blob. In fork_usermode_blob replace call_usermodehelper_setup_file with call_usermodehelper_setup and delete fork_usermodehelper_setup_file. For this to work the argv_free is moved from umh_clean_and_save_pid to fork_usermode_blob. v1: https://lkml.kernel.org/r/87zh8qf0mp.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/87o8p163u1.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-4-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/umh.h | 3 --- kernel/umh.c | 42 +++++++++++------------------------------- 2 files changed, 11 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/linux/umh.h b/include/linux/umh.h index aae16a0ebd0f..de08af00c68a 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h @@ -39,9 +39,6 @@ call_usermodehelper_setup(const char *path, char **argv, char **envp, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *), void *data); -struct subprocess_info *call_usermodehelper_setup_file(struct file *file, - int (*init)(struct subprocess_info *info, struct cred *new), - void (*cleanup)(struct subprocess_info *), void *data); struct umh_info { const char *cmdline; struct file *pipe_to_umh; diff --git a/kernel/umh.c b/kernel/umh.c index 26c3d493f168..b8fa9b99b366 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -402,33 +402,6 @@ struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv, } EXPORT_SYMBOL(call_usermodehelper_setup); -struct subprocess_info *call_usermodehelper_setup_file(struct file *file, - int (*init)(struct subprocess_info *info, struct cred *new), - void (*cleanup)(struct subprocess_info *info), void *data) -{ - struct subprocess_info *sub_info; - struct umh_info *info = data; - const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper"; - - sub_info = kzalloc(sizeof(struct subprocess_info), GFP_KERNEL); - if (!sub_info) - return NULL; - - sub_info->argv = argv_split(GFP_KERNEL, cmdline, NULL); - if (!sub_info->argv) { - kfree(sub_info); - return NULL; - } - - INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); - sub_info->path = "none"; - sub_info->file = file; - sub_info->init = init; - sub_info->cleanup = cleanup; - sub_info->data = data; - return sub_info; -} - static int umd_setup(struct subprocess_info *info, struct cred *new) { struct umh_info *umh_info = info->data; @@ -479,8 +452,6 @@ static void umd_cleanup(struct subprocess_info *info) fput(umh_info->pipe_to_umh); fput(umh_info->pipe_from_umh); } - - argv_free(info->argv); } /** @@ -501,7 +472,9 @@ static void umd_cleanup(struct subprocess_info *info) */ int fork_usermode_blob(void *data, size_t len, struct umh_info *info) { + const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper"; struct subprocess_info *sub_info; + char **argv = NULL; struct file *file; ssize_t written; loff_t pos = 0; @@ -520,11 +493,16 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info) } err = -ENOMEM; - sub_info = call_usermodehelper_setup_file(file, umd_setup, umd_cleanup, - info); + argv = argv_split(GFP_KERNEL, cmdline, NULL); + if (!argv) + goto out; + + sub_info = call_usermodehelper_setup("none", argv, NULL, GFP_KERNEL, + umd_setup, umd_cleanup, info); if (!sub_info) goto out; + sub_info->file = file; err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); if (!err) { mutex_lock(&umh_list_lock); @@ -532,6 +510,8 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info) mutex_unlock(&umh_list_lock); } out: + if (argv) + argv_free(argv); fput(file); return err; } -- cgit v1.2.3 From 884c5e683b67dbc52892e24c29eed864f330ec08 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 26 Jun 2020 12:23:00 -0500 Subject: umh: Separate the user mode driver and the user mode helper support This makes it clear which code is part of the core user mode helper support and which code is needed to implement user mode drivers. This makes the kernel smaller for everyone who does not use a usermode driver. v1: https://lkml.kernel.org/r/87tuyyf0ln.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/87imf963s6.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-5-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/bpfilter.h | 2 +- include/linux/sched.h | 8 --- include/linux/umh.h | 10 --- include/linux/usermode_driver.h | 30 +++++++++ kernel/Makefile | 1 + kernel/exit.c | 1 + kernel/umh.c | 139 -------------------------------------- kernel/usermode_driver.c | 146 ++++++++++++++++++++++++++++++++++++++++ 8 files changed, 179 insertions(+), 158 deletions(-) create mode 100644 include/linux/usermode_driver.h create mode 100644 kernel/usermode_driver.c (limited to 'include') diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index d815622cd31e..d6d6206052a6 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h @@ -3,7 +3,7 @@ #define _LINUX_BPFILTER_H #include -#include +#include struct sock; int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, diff --git a/include/linux/sched.h b/include/linux/sched.h index b62e6aaf28f0..59d1e92bb88e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2020,14 +2020,6 @@ static inline void rseq_execve(struct task_struct *t) #endif -void __exit_umh(struct task_struct *tsk); - -static inline void exit_umh(struct task_struct *tsk) -{ - if (unlikely(tsk->flags & PF_UMH)) - __exit_umh(tsk); -} - #ifdef CONFIG_DEBUG_RSEQ void rseq_syscall(struct pt_regs *regs); diff --git a/include/linux/umh.h b/include/linux/umh.h index de08af00c68a..73173c4a07e5 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h @@ -39,16 +39,6 @@ call_usermodehelper_setup(const char *path, char **argv, char **envp, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *), void *data); -struct umh_info { - const char *cmdline; - struct file *pipe_to_umh; - struct file *pipe_from_umh; - struct list_head list; - void (*cleanup)(struct umh_info *info); - pid_t pid; -}; -int fork_usermode_blob(void *data, size_t len, struct umh_info *info); - extern int call_usermodehelper_exec(struct subprocess_info *info, int wait); diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h new file mode 100644 index 000000000000..c5f6dc950227 --- /dev/null +++ b/include/linux/usermode_driver.h @@ -0,0 +1,30 @@ +#ifndef __LINUX_USERMODE_DRIVER_H__ +#define __LINUX_USERMODE_DRIVER_H__ + +#include + +#ifdef CONFIG_BPFILTER +void __exit_umh(struct task_struct *tsk); + +static inline void exit_umh(struct task_struct *tsk) +{ + if (unlikely(tsk->flags & PF_UMH)) + __exit_umh(tsk); +} +#else +static inline void exit_umh(struct task_struct *tsk) +{ +} +#endif + +struct umh_info { + const char *cmdline; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct list_head list; + void (*cleanup)(struct umh_info *info); + pid_t pid; +}; +int fork_usermode_blob(void *data, size_t len, struct umh_info *info); + +#endif /* __LINUX_USERMODE_DRIVER_H__ */ diff --git a/kernel/Makefile b/kernel/Makefile index f3218bc5ec69..43928759893a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -12,6 +12,7 @@ obj-y = fork.o exec_domain.o panic.o \ notifier.o ksysfs.o cred.o reboot.o \ async.o range.o smpboot.o ucount.o +obj-$(CONFIG_BPFILTER) += usermode_driver.o obj-$(CONFIG_MODULES) += kmod.o obj-$(CONFIG_MULTIUSER) += groups.o diff --git a/kernel/exit.c b/kernel/exit.c index 727150f28103..a081deea52ca 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include diff --git a/kernel/umh.c b/kernel/umh.c index b8fa9b99b366..3e4e453d45c8 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -26,8 +26,6 @@ #include #include #include -#include -#include #include @@ -38,8 +36,6 @@ static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; static DEFINE_SPINLOCK(umh_sysctl_lock); static DECLARE_RWSEM(umhelper_sem); -static LIST_HEAD(umh_list); -static DEFINE_MUTEX(umh_list_lock); static void call_usermodehelper_freeinfo(struct subprocess_info *info) { @@ -402,121 +398,6 @@ struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv, } EXPORT_SYMBOL(call_usermodehelper_setup); -static int umd_setup(struct subprocess_info *info, struct cred *new) -{ - struct umh_info *umh_info = info->data; - struct file *from_umh[2]; - struct file *to_umh[2]; - int err; - - /* create pipe to send data to umh */ - err = create_pipe_files(to_umh, 0); - if (err) - return err; - err = replace_fd(0, to_umh[0], 0); - fput(to_umh[0]); - if (err < 0) { - fput(to_umh[1]); - return err; - } - - /* create pipe to receive data from umh */ - err = create_pipe_files(from_umh, 0); - if (err) { - fput(to_umh[1]); - replace_fd(0, NULL, 0); - return err; - } - err = replace_fd(1, from_umh[1], 0); - fput(from_umh[1]); - if (err < 0) { - fput(to_umh[1]); - replace_fd(0, NULL, 0); - fput(from_umh[0]); - return err; - } - - umh_info->pipe_to_umh = to_umh[1]; - umh_info->pipe_from_umh = from_umh[0]; - umh_info->pid = task_pid_nr(current); - current->flags |= PF_UMH; - return 0; -} - -static void umd_cleanup(struct subprocess_info *info) -{ - struct umh_info *umh_info = info->data; - - /* cleanup if umh_setup() was successful but exec failed */ - if (info->retval) { - fput(umh_info->pipe_to_umh); - fput(umh_info->pipe_from_umh); - } -} - -/** - * fork_usermode_blob - fork a blob of bytes as a usermode process - * @data: a blob of bytes that can be do_execv-ed as a file - * @len: length of the blob - * @info: information about usermode process (shouldn't be NULL) - * - * If info->cmdline is set it will be used as command line for the - * user process, else "usermodehelper" is used. - * - * Returns either negative error or zero which indicates success - * in executing a blob of bytes as a usermode process. In such - * case 'struct umh_info *info' is populated with two pipes - * and a pid of the process. The caller is responsible for health - * check of the user process, killing it via pid, and closing the - * pipes when user process is no longer needed. - */ -int fork_usermode_blob(void *data, size_t len, struct umh_info *info) -{ - const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper"; - struct subprocess_info *sub_info; - char **argv = NULL; - struct file *file; - ssize_t written; - loff_t pos = 0; - int err; - - file = shmem_kernel_file_setup("", len, 0); - if (IS_ERR(file)) - return PTR_ERR(file); - - written = kernel_write(file, data, len, &pos); - if (written != len) { - err = written; - if (err >= 0) - err = -ENOMEM; - goto out; - } - - err = -ENOMEM; - argv = argv_split(GFP_KERNEL, cmdline, NULL); - if (!argv) - goto out; - - sub_info = call_usermodehelper_setup("none", argv, NULL, GFP_KERNEL, - umd_setup, umd_cleanup, info); - if (!sub_info) - goto out; - - sub_info->file = file; - err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); - if (!err) { - mutex_lock(&umh_list_lock); - list_add(&info->list, &umh_list); - mutex_unlock(&umh_list_lock); - } -out: - if (argv) - argv_free(argv); - fput(file); - return err; -} -EXPORT_SYMBOL_GPL(fork_usermode_blob); - /** * call_usermodehelper_exec - start a usermode application * @sub_info: information about the subprocessa @@ -678,26 +559,6 @@ static int proc_cap_handler(struct ctl_table *table, int write, return 0; } -void __exit_umh(struct task_struct *tsk) -{ - struct umh_info *info; - pid_t pid = tsk->pid; - - mutex_lock(&umh_list_lock); - list_for_each_entry(info, &umh_list, list) { - if (info->pid == pid) { - list_del(&info->list); - mutex_unlock(&umh_list_lock); - goto out; - } - } - mutex_unlock(&umh_list_lock); - return; -out: - if (info->cleanup) - info->cleanup(info); -} - struct ctl_table usermodehelper_table[] = { { .procname = "bset", diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c new file mode 100644 index 000000000000..5b05863af855 --- /dev/null +++ b/kernel/usermode_driver.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * umd - User mode driver support + */ +#include +#include +#include + +static LIST_HEAD(umh_list); +static DEFINE_MUTEX(umh_list_lock); + +static int umd_setup(struct subprocess_info *info, struct cred *new) +{ + struct umh_info *umh_info = info->data; + struct file *from_umh[2]; + struct file *to_umh[2]; + int err; + + /* create pipe to send data to umh */ + err = create_pipe_files(to_umh, 0); + if (err) + return err; + err = replace_fd(0, to_umh[0], 0); + fput(to_umh[0]); + if (err < 0) { + fput(to_umh[1]); + return err; + } + + /* create pipe to receive data from umh */ + err = create_pipe_files(from_umh, 0); + if (err) { + fput(to_umh[1]); + replace_fd(0, NULL, 0); + return err; + } + err = replace_fd(1, from_umh[1], 0); + fput(from_umh[1]); + if (err < 0) { + fput(to_umh[1]); + replace_fd(0, NULL, 0); + fput(from_umh[0]); + return err; + } + + umh_info->pipe_to_umh = to_umh[1]; + umh_info->pipe_from_umh = from_umh[0]; + umh_info->pid = task_pid_nr(current); + current->flags |= PF_UMH; + return 0; +} + +static void umd_cleanup(struct subprocess_info *info) +{ + struct umh_info *umh_info = info->data; + + /* cleanup if umh_setup() was successful but exec failed */ + if (info->retval) { + fput(umh_info->pipe_to_umh); + fput(umh_info->pipe_from_umh); + } +} + +/** + * fork_usermode_blob - fork a blob of bytes as a usermode process + * @data: a blob of bytes that can be do_execv-ed as a file + * @len: length of the blob + * @info: information about usermode process (shouldn't be NULL) + * + * If info->cmdline is set it will be used as command line for the + * user process, else "usermodehelper" is used. + * + * Returns either negative error or zero which indicates success + * in executing a blob of bytes as a usermode process. In such + * case 'struct umh_info *info' is populated with two pipes + * and a pid of the process. The caller is responsible for health + * check of the user process, killing it via pid, and closing the + * pipes when user process is no longer needed. + */ +int fork_usermode_blob(void *data, size_t len, struct umh_info *info) +{ + const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper"; + struct subprocess_info *sub_info; + char **argv = NULL; + struct file *file; + ssize_t written; + loff_t pos = 0; + int err; + + file = shmem_kernel_file_setup("", len, 0); + if (IS_ERR(file)) + return PTR_ERR(file); + + written = kernel_write(file, data, len, &pos); + if (written != len) { + err = written; + if (err >= 0) + err = -ENOMEM; + goto out; + } + + err = -ENOMEM; + argv = argv_split(GFP_KERNEL, cmdline, NULL); + if (!argv) + goto out; + + sub_info = call_usermodehelper_setup("none", argv, NULL, GFP_KERNEL, + umd_setup, umd_cleanup, info); + if (!sub_info) + goto out; + + sub_info->file = file; + err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); + if (!err) { + mutex_lock(&umh_list_lock); + list_add(&info->list, &umh_list); + mutex_unlock(&umh_list_lock); + } +out: + if (argv) + argv_free(argv); + fput(file); + return err; +} +EXPORT_SYMBOL_GPL(fork_usermode_blob); + +void __exit_umh(struct task_struct *tsk) +{ + struct umh_info *info; + pid_t pid = tsk->pid; + + mutex_lock(&umh_list_lock); + list_for_each_entry(info, &umh_list, list) { + if (info->pid == pid) { + list_del(&info->list); + mutex_unlock(&umh_list_lock); + goto out; + } + } + mutex_unlock(&umh_list_lock); + return; +out: + if (info->cleanup) + info->cleanup(info); +} + -- cgit v1.2.3 From 74be2d3b80af1bb264c3b9905b52c15efc03c0fe Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 26 Jun 2020 11:16:06 -0500 Subject: umd: For clarity rename umh_info umd_info This structure is only used for user mode drivers so change the prefix from umh to umd to make that clear. v1: https://lkml.kernel.org/r/87o8p6f0kw.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/878sg563po.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-6-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/bpfilter.h | 2 +- include/linux/usermode_driver.h | 6 +++--- kernel/usermode_driver.c | 20 ++++++++++---------- net/ipv4/bpfilter/sockopt.c | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index d6d6206052a6..ec9972d822e0 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h @@ -11,7 +11,7 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen); struct bpfilter_umh_ops { - struct umh_info info; + struct umd_info info; /* since ip_getsockopt() can run in parallel, serialize access to umh */ struct mutex lock; int (*sockopt)(struct sock *sk, int optname, diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h index c5f6dc950227..7131ea611bab 100644 --- a/include/linux/usermode_driver.h +++ b/include/linux/usermode_driver.h @@ -17,14 +17,14 @@ static inline void exit_umh(struct task_struct *tsk) } #endif -struct umh_info { +struct umd_info { const char *cmdline; struct file *pipe_to_umh; struct file *pipe_from_umh; struct list_head list; - void (*cleanup)(struct umh_info *info); + void (*cleanup)(struct umd_info *info); pid_t pid; }; -int fork_usermode_blob(void *data, size_t len, struct umh_info *info); +int fork_usermode_blob(void *data, size_t len, struct umd_info *info); #endif /* __LINUX_USERMODE_DRIVER_H__ */ diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c index 5b05863af855..e73550e946d6 100644 --- a/kernel/usermode_driver.c +++ b/kernel/usermode_driver.c @@ -11,7 +11,7 @@ static DEFINE_MUTEX(umh_list_lock); static int umd_setup(struct subprocess_info *info, struct cred *new) { - struct umh_info *umh_info = info->data; + struct umd_info *umd_info = info->data; struct file *from_umh[2]; struct file *to_umh[2]; int err; @@ -43,21 +43,21 @@ static int umd_setup(struct subprocess_info *info, struct cred *new) return err; } - umh_info->pipe_to_umh = to_umh[1]; - umh_info->pipe_from_umh = from_umh[0]; - umh_info->pid = task_pid_nr(current); + umd_info->pipe_to_umh = to_umh[1]; + umd_info->pipe_from_umh = from_umh[0]; + umd_info->pid = task_pid_nr(current); current->flags |= PF_UMH; return 0; } static void umd_cleanup(struct subprocess_info *info) { - struct umh_info *umh_info = info->data; + struct umd_info *umd_info = info->data; /* cleanup if umh_setup() was successful but exec failed */ if (info->retval) { - fput(umh_info->pipe_to_umh); - fput(umh_info->pipe_from_umh); + fput(umd_info->pipe_to_umh); + fput(umd_info->pipe_from_umh); } } @@ -72,12 +72,12 @@ static void umd_cleanup(struct subprocess_info *info) * * Returns either negative error or zero which indicates success * in executing a blob of bytes as a usermode process. In such - * case 'struct umh_info *info' is populated with two pipes + * case 'struct umd_info *info' is populated with two pipes * and a pid of the process. The caller is responsible for health * check of the user process, killing it via pid, and closing the * pipes when user process is no longer needed. */ -int fork_usermode_blob(void *data, size_t len, struct umh_info *info) +int fork_usermode_blob(void *data, size_t len, struct umd_info *info) { const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper"; struct subprocess_info *sub_info; @@ -126,7 +126,7 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob); void __exit_umh(struct task_struct *tsk) { - struct umh_info *info; + struct umd_info *info; pid_t pid = tsk->pid; mutex_lock(&umh_list_lock); diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 0480918bfc7c..c0dbcc86fcdb 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -12,7 +12,7 @@ struct bpfilter_umh_ops bpfilter_ops; EXPORT_SYMBOL_GPL(bpfilter_ops); -static void bpfilter_umh_cleanup(struct umh_info *info) +static void bpfilter_umh_cleanup(struct umd_info *info) { mutex_lock(&bpfilter_ops.lock); bpfilter_ops.stop = true; -- cgit v1.2.3 From 1199c6c3da5197e9924a906b9de71b8d0ac62a01 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 25 Jun 2020 11:38:08 -0500 Subject: umd: Rename umd_info.cmdline umd_info.driver_name The only thing supplied in the cmdline today is the driver name so rename the field to clarify the code. As this value is always supplied stop trying to handle the case of a NULL cmdline. Additionally since we now have a name we can count on use the driver_name any place where the code is looking for a name of the binary. v1: https://lkml.kernel.org/r/87imfef0k3.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/87366d63os.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-7-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/usermode_driver.h | 2 +- kernel/usermode_driver.c | 11 ++++------- net/ipv4/bpfilter/sockopt.c | 2 +- 3 files changed, 6 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h index 7131ea611bab..48cf25e3145d 100644 --- a/include/linux/usermode_driver.h +++ b/include/linux/usermode_driver.h @@ -18,7 +18,7 @@ static inline void exit_umh(struct task_struct *tsk) #endif struct umd_info { - const char *cmdline; + const char *driver_name; struct file *pipe_to_umh; struct file *pipe_from_umh; struct list_head list; diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c index e73550e946d6..46d60d855e93 100644 --- a/kernel/usermode_driver.c +++ b/kernel/usermode_driver.c @@ -67,9 +67,6 @@ static void umd_cleanup(struct subprocess_info *info) * @len: length of the blob * @info: information about usermode process (shouldn't be NULL) * - * If info->cmdline is set it will be used as command line for the - * user process, else "usermodehelper" is used. - * * Returns either negative error or zero which indicates success * in executing a blob of bytes as a usermode process. In such * case 'struct umd_info *info' is populated with two pipes @@ -79,7 +76,6 @@ static void umd_cleanup(struct subprocess_info *info) */ int fork_usermode_blob(void *data, size_t len, struct umd_info *info) { - const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper"; struct subprocess_info *sub_info; char **argv = NULL; struct file *file; @@ -87,7 +83,7 @@ int fork_usermode_blob(void *data, size_t len, struct umd_info *info) loff_t pos = 0; int err; - file = shmem_kernel_file_setup("", len, 0); + file = shmem_kernel_file_setup(info->driver_name, len, 0); if (IS_ERR(file)) return PTR_ERR(file); @@ -100,11 +96,12 @@ int fork_usermode_blob(void *data, size_t len, struct umd_info *info) } err = -ENOMEM; - argv = argv_split(GFP_KERNEL, cmdline, NULL); + argv = argv_split(GFP_KERNEL, info->driver_name, NULL); if (!argv) goto out; - sub_info = call_usermodehelper_setup("none", argv, NULL, GFP_KERNEL, + sub_info = call_usermodehelper_setup(info->driver_name, argv, NULL, + GFP_KERNEL, umd_setup, umd_cleanup, info); if (!sub_info) goto out; diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index c0dbcc86fcdb..5050de28333d 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -70,7 +70,7 @@ static int __init bpfilter_sockopt_init(void) { mutex_init(&bpfilter_ops.lock); bpfilter_ops.stop = true; - bpfilter_ops.info.cmdline = "bpfilter_umh"; + bpfilter_ops.info.driver_name = "bpfilter_umh"; bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup; return 0; -- cgit v1.2.3 From e2dc9bf3f5275ca372001541e5f26af572976e65 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 25 Jun 2020 13:12:59 -0500 Subject: umd: Transform fork_usermode_blob into fork_usermode_driver Instead of loading a binary blob into a temporary file with shmem_kernel_file_setup load a binary blob into a temporary tmpfs filesystem. This means that the blob can be stored in an init section and discared, and it means the binary blob will have a filename so can be executed normally. The only tricky thing about this code is that in the helper function blob_to_mnt __fput_sync is used. That is because a file can not be executed if it is still open for write, and the ordinary delayed close for kernel threads does not happen soon enough, which causes the following exec to fail. The function umd_load_blob is not called with any locks so this should be safe. Executing the blob normally winds up correcting several problems with the user mode driver code discovered by Tetsuo Handa[1]. By passing an ordinary filename into the exec, it is no longer necessary to figure out how to turn a O_RDWR file descriptor into a properly referende counted O_EXEC file descriptor that forbids all writes. For path based LSMs there are no new special cases. [1] https://lore.kernel.org/linux-fsdevel/2a8775b4-1dd5-9d5c-aa42-9872445e0942@i-love.sakura.ne.jp/ v1: https://lkml.kernel.org/r/87d05mf0j9.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/87wo3p4p35.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-8-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/usermode_driver.h | 6 +- kernel/usermode_driver.c | 126 +++++++++++++++++++++++++++++++--------- net/bpfilter/bpfilter_kern.c | 14 ++++- 3 files changed, 113 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h index 48cf25e3145d..97c919b7147c 100644 --- a/include/linux/usermode_driver.h +++ b/include/linux/usermode_driver.h @@ -2,6 +2,7 @@ #define __LINUX_USERMODE_DRIVER_H__ #include +#include #ifdef CONFIG_BPFILTER void __exit_umh(struct task_struct *tsk); @@ -23,8 +24,11 @@ struct umd_info { struct file *pipe_from_umh; struct list_head list; void (*cleanup)(struct umd_info *info); + struct path wd; pid_t pid; }; -int fork_usermode_blob(void *data, size_t len, struct umd_info *info); +int umd_load_blob(struct umd_info *info, const void *data, size_t len); +int umd_unload_blob(struct umd_info *info); +int fork_usermode_driver(struct umd_info *info); #endif /* __LINUX_USERMODE_DRIVER_H__ */ diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c index 46d60d855e93..a86798759f83 100644 --- a/kernel/usermode_driver.c +++ b/kernel/usermode_driver.c @@ -4,11 +4,98 @@ */ #include #include +#include +#include +#include #include static LIST_HEAD(umh_list); static DEFINE_MUTEX(umh_list_lock); +static struct vfsmount *blob_to_mnt(const void *data, size_t len, const char *name) +{ + struct file_system_type *type; + struct vfsmount *mnt; + struct file *file; + ssize_t written; + loff_t pos = 0; + + type = get_fs_type("tmpfs"); + if (!type) + return ERR_PTR(-ENODEV); + + mnt = kern_mount(type); + put_filesystem(type); + if (IS_ERR(mnt)) + return mnt; + + file = file_open_root(mnt->mnt_root, mnt, name, O_CREAT | O_WRONLY, 0700); + if (IS_ERR(file)) { + mntput(mnt); + return ERR_CAST(file); + } + + written = kernel_write(file, data, len, &pos); + if (written != len) { + int err = written; + if (err >= 0) + err = -ENOMEM; + filp_close(file, NULL); + mntput(mnt); + return ERR_PTR(err); + } + + fput(file); + + /* Flush delayed fput so exec can open the file read-only */ + flush_delayed_fput(); + task_work_run(); + return mnt; +} + +/** + * umd_load_blob - Remember a blob of bytes for fork_usermode_driver + * @info: information about usermode driver + * @data: a blob of bytes that can be executed as a file + * @len: The lentgh of the blob + * + */ +int umd_load_blob(struct umd_info *info, const void *data, size_t len) +{ + struct vfsmount *mnt; + + if (WARN_ON_ONCE(info->wd.dentry || info->wd.mnt)) + return -EBUSY; + + mnt = blob_to_mnt(data, len, info->driver_name); + if (IS_ERR(mnt)) + return PTR_ERR(mnt); + + info->wd.mnt = mnt; + info->wd.dentry = mnt->mnt_root; + return 0; +} +EXPORT_SYMBOL_GPL(umd_load_blob); + +/** + * umd_unload_blob - Disassociate @info from a previously loaded blob + * @info: information about usermode driver + * + */ +int umd_unload_blob(struct umd_info *info) +{ + if (WARN_ON_ONCE(!info->wd.mnt || + !info->wd.dentry || + info->wd.mnt->mnt_root != info->wd.dentry)) + return -EINVAL; + + kern_unmount(info->wd.mnt); + info->wd.mnt = NULL; + info->wd.dentry = NULL; + return 0; +} +EXPORT_SYMBOL_GPL(umd_unload_blob); + static int umd_setup(struct subprocess_info *info, struct cred *new) { struct umd_info *umd_info = info->data; @@ -43,6 +130,7 @@ static int umd_setup(struct subprocess_info *info, struct cred *new) return err; } + set_fs_pwd(current->fs, &umd_info->wd); umd_info->pipe_to_umh = to_umh[1]; umd_info->pipe_from_umh = from_umh[0]; umd_info->pid = task_pid_nr(current); @@ -62,39 +150,21 @@ static void umd_cleanup(struct subprocess_info *info) } /** - * fork_usermode_blob - fork a blob of bytes as a usermode process - * @data: a blob of bytes that can be do_execv-ed as a file - * @len: length of the blob - * @info: information about usermode process (shouldn't be NULL) + * fork_usermode_driver - fork a usermode driver + * @info: information about usermode driver (shouldn't be NULL) * - * Returns either negative error or zero which indicates success - * in executing a blob of bytes as a usermode process. In such - * case 'struct umd_info *info' is populated with two pipes - * and a pid of the process. The caller is responsible for health - * check of the user process, killing it via pid, and closing the - * pipes when user process is no longer needed. + * Returns either negative error or zero which indicates success in + * executing a usermode driver. In such case 'struct umd_info *info' + * is populated with two pipes and a pid of the process. The caller is + * responsible for health check of the user process, killing it via + * pid, and closing the pipes when user process is no longer needed. */ -int fork_usermode_blob(void *data, size_t len, struct umd_info *info) +int fork_usermode_driver(struct umd_info *info) { struct subprocess_info *sub_info; char **argv = NULL; - struct file *file; - ssize_t written; - loff_t pos = 0; int err; - file = shmem_kernel_file_setup(info->driver_name, len, 0); - if (IS_ERR(file)) - return PTR_ERR(file); - - written = kernel_write(file, data, len, &pos); - if (written != len) { - err = written; - if (err >= 0) - err = -ENOMEM; - goto out; - } - err = -ENOMEM; argv = argv_split(GFP_KERNEL, info->driver_name, NULL); if (!argv) @@ -106,7 +176,6 @@ int fork_usermode_blob(void *data, size_t len, struct umd_info *info) if (!sub_info) goto out; - sub_info->file = file; err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); if (!err) { mutex_lock(&umh_list_lock); @@ -116,10 +185,9 @@ int fork_usermode_blob(void *data, size_t len, struct umd_info *info) out: if (argv) argv_free(argv); - fput(file); return err; } -EXPORT_SYMBOL_GPL(fork_usermode_blob); +EXPORT_SYMBOL_GPL(fork_usermode_driver); void __exit_umh(struct task_struct *tsk) { diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index c0f0990f30b6..28883b00609d 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -77,9 +77,7 @@ static int start_umh(void) int err; /* fork usermode process */ - err = fork_usermode_blob(&bpfilter_umh_start, - &bpfilter_umh_end - &bpfilter_umh_start, - &bpfilter_ops.info); + err = fork_usermode_driver(&bpfilter_ops.info); if (err) return err; bpfilter_ops.stop = false; @@ -98,6 +96,12 @@ static int __init load_umh(void) { int err; + err = umd_load_blob(&bpfilter_ops.info, + &bpfilter_umh_start, + &bpfilter_umh_end - &bpfilter_umh_start); + if (err) + return err; + mutex_lock(&bpfilter_ops.lock); if (!bpfilter_ops.stop) { err = -EFAULT; @@ -110,6 +114,8 @@ static int __init load_umh(void) } out: mutex_unlock(&bpfilter_ops.lock); + if (err) + umd_unload_blob(&bpfilter_ops.info); return err; } @@ -122,6 +128,8 @@ static void __exit fini_umh(void) bpfilter_ops.sockopt = NULL; } mutex_unlock(&bpfilter_ops.lock); + + umd_unload_blob(&bpfilter_ops.info); } module_init(load_umh); module_exit(fini_umh); -- cgit v1.2.3 From 55e6074e3fa67e1fb9ec140904db7e6cae6eda4b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 25 Jun 2020 13:52:50 -0500 Subject: umh: Stop calling do_execve_file With the user mode driver code changed to not set subprocess_info.file there are no more users of subproces_info.file. Remove this field from struct subprocess_info and remove the only user in call_usermodehelper_exec_async that would call do_execve_file instead of do_execve if file was set. v1: https://lkml.kernel.org/r/877dvuf0i7.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/87r1tx4p2a.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-9-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/umh.h | 1 - kernel/umh.c | 10 +++------- 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/umh.h b/include/linux/umh.h index 73173c4a07e5..244aff638220 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h @@ -22,7 +22,6 @@ struct subprocess_info { const char *path; char **argv; char **envp; - struct file *file; int wait; int retval; int (*init)(struct subprocess_info *info, struct cred *new); diff --git a/kernel/umh.c b/kernel/umh.c index 3e4e453d45c8..6ca2096298b9 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -98,13 +98,9 @@ static int call_usermodehelper_exec_async(void *data) commit_creds(new); - if (sub_info->file) - retval = do_execve_file(sub_info->file, - sub_info->argv, sub_info->envp); - else - retval = do_execve(getname_kernel(sub_info->path), - (const char __user *const __user *)sub_info->argv, - (const char __user *const __user *)sub_info->envp); + retval = do_execve(getname_kernel(sub_info->path), + (const char __user *const __user *)sub_info->argv, + (const char __user *const __user *)sub_info->envp); out: sub_info->retval = retval; /* -- cgit v1.2.3 From 25cf336de51b51a3e440e1893751f9532095eff0 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 25 Jun 2020 13:56:40 -0500 Subject: exec: Remove do_execve_file Now that the last callser has been removed remove this code from exec. For anyone thinking of resurrecing do_execve_file please note that the code was buggy in several fundamental ways. - It did not ensure the file it was passed was read-only and that deny_write_access had been called on it. Which subtlely breaks invaniants in exec. - The caller of do_execve_file was expected to hold and put a reference to the file, but an extra reference for use by exec was not taken so that when exec put it's reference to the file an underflow occured on the file reference count. - The point of the interface was so that a pathname did not need to exist. Which breaks pathname based LSMs. Tetsuo Handa originally reported these issues[1]. While it was clear that deny_write_access was missing the fundamental incompatibility with the passed in O_RDWR filehandle was not immediately recognized. All of these issues were fixed by modifying the usermode driver code to have a path, so it did not need this hack. Reported-by: Tetsuo Handa [1] https://lore.kernel.org/linux-fsdevel/2a8775b4-1dd5-9d5c-aa42-9872445e0942@i-love.sakura.ne.jp/ v1: https://lkml.kernel.org/r/871rm2f0hi.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/87lfk54p0m.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-10-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- fs/exec.c | 38 +++++++++----------------------------- include/linux/binfmts.h | 1 - 2 files changed, 9 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/fs/exec.c b/fs/exec.c index e6e8a9a70327..23dfbb820626 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1818,13 +1818,14 @@ static int exec_binprm(struct linux_binprm *bprm) /* * sys_execve() executes a new program. */ -static int __do_execve_file(int fd, struct filename *filename, - struct user_arg_ptr argv, - struct user_arg_ptr envp, - int flags, struct file *file) +static int do_execveat_common(int fd, struct filename *filename, + struct user_arg_ptr argv, + struct user_arg_ptr envp, + int flags) { char *pathbuf = NULL; struct linux_binprm *bprm; + struct file *file; struct files_struct *displaced; int retval; @@ -1863,8 +1864,7 @@ static int __do_execve_file(int fd, struct filename *filename, check_unsafe_exec(bprm); current->in_execve = 1; - if (!file) - file = do_open_execat(fd, filename, flags); + file = do_open_execat(fd, filename, flags); retval = PTR_ERR(file); if (IS_ERR(file)) goto out_unmark; @@ -1872,9 +1872,7 @@ static int __do_execve_file(int fd, struct filename *filename, sched_exec(); bprm->file = file; - if (!filename) { - bprm->filename = "none"; - } else if (fd == AT_FDCWD || filename->name[0] == '/') { + if (fd == AT_FDCWD || filename->name[0] == '/') { bprm->filename = filename->name; } else { if (filename->name[0] == '\0') @@ -1935,8 +1933,7 @@ static int __do_execve_file(int fd, struct filename *filename, task_numa_free(current, false); free_bprm(bprm); kfree(pathbuf); - if (filename) - putname(filename); + putname(filename); if (displaced) put_files_struct(displaced); return retval; @@ -1967,27 +1964,10 @@ out_files: if (displaced) reset_files_struct(displaced); out_ret: - if (filename) - putname(filename); + putname(filename); return retval; } -static int do_execveat_common(int fd, struct filename *filename, - struct user_arg_ptr argv, - struct user_arg_ptr envp, - int flags) -{ - return __do_execve_file(fd, filename, argv, envp, flags, NULL); -} - -int do_execve_file(struct file *file, void *__argv, void *__envp) -{ - struct user_arg_ptr argv = { .ptr.native = __argv }; - struct user_arg_ptr envp = { .ptr.native = __envp }; - - return __do_execve_file(AT_FDCWD, NULL, argv, envp, 0, file); -} - int do_execve(struct filename *filename, const char __user *const __user *__argv, const char __user *const __user *__envp) diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 4a20b7517dd0..7c27d7b57871 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -141,6 +141,5 @@ extern int do_execveat(int, struct filename *, const char __user * const __user *, const char __user * const __user *, int); -int do_execve_file(struct file *file, void *__argv, void *__envp); #endif /* _LINUX_BINFMTS_H */ -- cgit v1.2.3 From 1c340ead18ee4b4a84357abdef6d4f39ee08328b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 25 Jun 2020 16:48:26 -0500 Subject: umd: Track user space drivers with struct pid Use struct pid instead of user space pid values that are prone to wrap araound. In addition track the entire thread group instead of just the first thread that is started by exec. There are no multi-threaded user mode drivers today but there is nothing preclucing user drivers from being multi-threaded, so it is just a good idea to track the entire process. Take a reference count on the tgid's in question to make it possible to remove exit_umh in a future change. As a struct pid is available directly use kill_pid_info. The prior process signalling code was iffy in using a userspace pid known to be in the initial pid namespace and then looking up it's task in whatever the current pid namespace is. It worked only because kernel threads always run in the initial pid namespace. As the tgid is now refcounted verify the tgid is NULL at the start of fork_usermode_driver to avoid the possibility of silent pid leaks. v1: https://lkml.kernel.org/r/87mu4qdlv2.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/a70l4oy8.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-12-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/usermode_driver.h | 2 +- kernel/exit.c | 3 ++- kernel/usermode_driver.c | 15 ++++++++++----- net/bpfilter/bpfilter_kern.c | 13 +++++-------- net/ipv4/bpfilter/sockopt.c | 3 ++- 5 files changed, 20 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h index 97c919b7147c..45adbffb31d9 100644 --- a/include/linux/usermode_driver.h +++ b/include/linux/usermode_driver.h @@ -25,7 +25,7 @@ struct umd_info { struct list_head list; void (*cleanup)(struct umd_info *info); struct path wd; - pid_t pid; + struct pid *tgid; }; int umd_load_blob(struct umd_info *info, const void *data, size_t len); int umd_unload_blob(struct umd_info *info); diff --git a/kernel/exit.c b/kernel/exit.c index a081deea52ca..d3294b611df1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -805,7 +805,8 @@ void __noreturn do_exit(long code) exit_task_namespaces(tsk); exit_task_work(tsk); exit_thread(tsk); - exit_umh(tsk); + if (group_dead) + exit_umh(tsk); /* * Flush inherited counters to the parent - before the parent diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c index a86798759f83..f77f8d7ce9e3 100644 --- a/kernel/usermode_driver.c +++ b/kernel/usermode_driver.c @@ -133,7 +133,7 @@ static int umd_setup(struct subprocess_info *info, struct cred *new) set_fs_pwd(current->fs, &umd_info->wd); umd_info->pipe_to_umh = to_umh[1]; umd_info->pipe_from_umh = from_umh[0]; - umd_info->pid = task_pid_nr(current); + umd_info->tgid = get_pid(task_tgid(current)); current->flags |= PF_UMH; return 0; } @@ -146,6 +146,8 @@ static void umd_cleanup(struct subprocess_info *info) if (info->retval) { fput(umd_info->pipe_to_umh); fput(umd_info->pipe_from_umh); + put_pid(umd_info->tgid); + umd_info->tgid = NULL; } } @@ -155,9 +157,9 @@ static void umd_cleanup(struct subprocess_info *info) * * Returns either negative error or zero which indicates success in * executing a usermode driver. In such case 'struct umd_info *info' - * is populated with two pipes and a pid of the process. The caller is + * is populated with two pipes and a tgid of the process. The caller is * responsible for health check of the user process, killing it via - * pid, and closing the pipes when user process is no longer needed. + * tgid, and closing the pipes when user process is no longer needed. */ int fork_usermode_driver(struct umd_info *info) { @@ -165,6 +167,9 @@ int fork_usermode_driver(struct umd_info *info) char **argv = NULL; int err; + if (WARN_ON_ONCE(info->tgid)) + return -EBUSY; + err = -ENOMEM; argv = argv_split(GFP_KERNEL, info->driver_name, NULL); if (!argv) @@ -192,11 +197,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_driver); void __exit_umh(struct task_struct *tsk) { struct umd_info *info; - pid_t pid = tsk->pid; + struct pid *tgid = task_tgid(tsk); mutex_lock(&umh_list_lock); list_for_each_entry(info, &umh_list, list) { - if (info->pid == pid) { + if (info->tgid == tgid) { list_del(&info->list); mutex_unlock(&umh_list_lock); goto out; diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 28883b00609d..08ea77c2b137 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -15,16 +15,13 @@ extern char bpfilter_umh_end; static void shutdown_umh(void) { - struct task_struct *tsk; + struct umd_info *info = &bpfilter_ops.info; + struct pid *tgid = info->tgid; if (bpfilter_ops.stop) return; - tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID); - if (tsk) { - send_sig(SIGKILL, tsk, 1); - put_task_struct(tsk); - } + kill_pid(tgid, SIGKILL, 1); } static void __stop_umh(void) @@ -48,7 +45,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, req.cmd = optname; req.addr = (long __force __user)optval; req.len = optlen; - if (!bpfilter_ops.info.pid) + if (!bpfilter_ops.info.tgid) goto out; n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), &pos); @@ -81,7 +78,7 @@ static int start_umh(void) if (err) return err; bpfilter_ops.stop = false; - pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid); + pr_info("Loaded bpfilter_umh pid %d\n", pid_nr(bpfilter_ops.info.tgid)); /* health check that usermode process started correctly */ if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 5050de28333d..56cbc43145f6 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -18,7 +18,8 @@ static void bpfilter_umh_cleanup(struct umd_info *info) bpfilter_ops.stop = true; fput(info->pipe_to_umh); fput(info->pipe_from_umh); - info->pid = 0; + put_pid(info->tgid); + info->tgid = NULL; mutex_unlock(&bpfilter_ops.lock); } -- cgit v1.2.3 From ff2a91127b374c75ae024b31d22f23ad49d16eb4 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Sun, 24 May 2020 20:57:00 +0200 Subject: fork: remove do_fork() Now that all architectures have been switched to use _do_fork() and the new struct kernel_clone_args calling convention we can remove the legacy do_fork() helper completely. The calling convention used to be brittle and do_fork() didn't buy us anything. The only calling convention accepted should be based on struct kernel_clone_args going forward. It's cleaner and uniform. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Al Viro Cc: "Matthew Wilcox (Oracle)" Cc: "Peter Zijlstra (Intel)" Signed-off-by: Christian Brauner --- include/linux/sched/task.h | 1 - kernel/fork.c | 25 +------------------------ 2 files changed, 1 insertion(+), 25 deletions(-) (limited to 'include') diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index ddce0ea515d1..9f03c44941fb 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -96,7 +96,6 @@ extern void exit_files(struct task_struct *); extern void exit_itimers(struct signal_struct *); extern long _do_fork(struct kernel_clone_args *kargs); -extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); diff --git a/kernel/fork.c b/kernel/fork.c index 9875aeb2ba41..0fd7eb1b38f9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2493,29 +2493,6 @@ long _do_fork(struct kernel_clone_args *args) return nr; } -#ifndef CONFIG_HAVE_COPY_THREAD_TLS -/* For compatibility with architectures that call do_fork directly rather than - * using the syscall entry points below. */ -long do_fork(unsigned long clone_flags, - unsigned long stack_start, - unsigned long stack_size, - int __user *parent_tidptr, - int __user *child_tidptr) -{ - struct kernel_clone_args args = { - .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), - .pidfd = parent_tidptr, - .child_tid = child_tidptr, - .parent_tid = parent_tidptr, - .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), - .stack = stack_start, - .stack_size = stack_size, - }; - - return _do_fork(&args); -} -#endif - /* * Create a kernel thread. */ @@ -2923,7 +2900,7 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp /* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* - * functions used by do_fork() cannot be used here directly + * functions used by _do_fork() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. -- cgit v1.2.3 From 140c8180eb7c7cbda399f64474788b86db72db32 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Sun, 24 May 2020 23:34:20 +0200 Subject: arch: remove HAVE_COPY_THREAD_TLS All architectures support copy_thread_tls() now, so remove the legacy copy_thread() function and the HAVE_COPY_THREAD_TLS config option. Everyone uses the same process creation calling convention based on copy_thread_tls() and struct kernel_clone_args. This will make it easier to maintain the core process creation code under kernel/, simplifies the callpaths and makes the identical for all architectures. Cc: linux-arch@vger.kernel.org Acked-by: Thomas Bogendoerfer Acked-by: Greentime Hu Acked-by: Geert Uytterhoeven Reviewed-by: Kees Cook Signed-off-by: Christian Brauner --- arch/Kconfig | 7 ------- arch/alpha/Kconfig | 1 - arch/arc/Kconfig | 1 - arch/arm/Kconfig | 1 - arch/arm64/Kconfig | 1 - arch/c6x/Kconfig | 1 - arch/csky/Kconfig | 1 - arch/h8300/Kconfig | 1 - arch/hexagon/Kconfig | 1 - arch/ia64/Kconfig | 1 - arch/m68k/Kconfig | 1 - arch/microblaze/Kconfig | 1 - arch/mips/Kconfig | 1 - arch/nds32/Kconfig | 1 - arch/nios2/Kconfig | 1 - arch/openrisc/Kconfig | 1 - arch/parisc/Kconfig | 1 - arch/powerpc/Kconfig | 1 - arch/riscv/Kconfig | 1 - arch/s390/Kconfig | 1 - arch/sh/Kconfig | 1 - arch/sparc/Kconfig | 1 - arch/um/Kconfig | 1 - arch/unicore32/Kconfig | 1 - arch/x86/Kconfig | 1 - arch/xtensa/Kconfig | 1 - include/linux/sched/task.h | 15 +-------------- kernel/fork.c | 9 --------- 28 files changed, 1 insertion(+), 55 deletions(-) (limited to 'include') diff --git a/arch/Kconfig b/arch/Kconfig index 8cc35dc556c7..943aac2f3ebe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -754,13 +754,6 @@ config ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT depends on MMU select ARCH_HAS_ELF_RANDOMIZE -config HAVE_COPY_THREAD_TLS - bool - help - Architecture provides copy_thread_tls to accept tls argument via - normal C parameter passing, rather than extracting the syscall - argument from pt_regs. - config HAVE_STACK_VALIDATION bool help diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index b01515c6b2ed..10862c5a8c76 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -38,7 +38,6 @@ config ALPHA select OLD_SIGSUSPEND select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67 select MMU_GATHER_NO_RANGE - select HAVE_COPY_THREAD_TLS help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index fddc70029727..1fa0b98ed9ce 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -29,7 +29,6 @@ config ARC select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK - select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_KMEMLEAK select HAVE_FUTEX_CMPXCHG if FUTEX diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2ac74904a3ce..445b5ed693f0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -72,7 +72,6 @@ config ARM select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CONTEXT_TRACKING - select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL select HAVE_DMA_CONTIGUOUS if MMU diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a4a094bedcb2..de93e965727d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -148,7 +148,6 @@ config ARM64 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING - select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 9cde76a5928e..6444ebfd06a6 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -22,7 +22,6 @@ config C6X select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA select MMU_GATHER_NO_RANGE if MMU - select HAVE_COPY_THREAD_TLS config MMU def_bool n diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index bd31ab12f77d..902f1142d550 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -38,7 +38,6 @@ config CSKY select GX6605S_TIMER if CPU_CK610 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_AUDITSYSCALL - select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_BUGVERBOSE select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index de0eb417a0b9..d11666d538fe 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -26,7 +26,6 @@ config H8300 select HAVE_ARCH_HASH select CPU_NO_EFFICIENT_FFS select UACCESS_MEMCPY - select HAVE_COPY_THREAD_TLS config CPU_BIG_ENDIAN def_bool y diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 19bc2f2ee331..667cfc511cf9 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -31,7 +31,6 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES - select HAVE_COPY_THREAD_TLS help Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 1b6034b89a04..1fa2fe2ef053 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -55,7 +55,6 @@ config IA64 select HAVE_ARCH_AUDITSYSCALL select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH - select HAVE_COPY_THREAD_TLS select NUMA if !FLATMEM default y help diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 6ad6cdac74b3..6663f1741798 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -14,7 +14,6 @@ config M68K select HAVE_AOUT if MMU select HAVE_ASM_MODVERSIONS select HAVE_DEBUG_BUGVERBOSE - select HAVE_COPY_THREAD_TLS select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 select HAVE_UID16 diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index e3a211a41880..d262ac0c8714 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -46,7 +46,6 @@ config MICROBLAZE select CPU_NO_EFFICIENT_FFS select MMU_GATHER_NO_RANGE if MMU select SPARSE_IRQ - select HAVE_COPY_THREAD_TLS # Endianness selection choice diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 6fee1a133e9d..ca92c3ed2dc5 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -51,7 +51,6 @@ config MIPS select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS select HAVE_CONTEXT_TRACKING select HAVE_TIF_NOHZ - select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 7b6eaca81cce..e30298e99e1b 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -48,7 +48,6 @@ config NDS32 select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE - select HAVE_COPY_THREAD_TLS help Andes(nds32) Linux support. diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index f9a05957a883..c6645141bb2a 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -27,7 +27,6 @@ config NIOS2 select USB_ARCH_HAS_HCD if USB_SUPPORT select CPU_NO_EFFICIENT_FFS select MMU_GATHER_NO_RANGE if MMU - select HAVE_COPY_THREAD_TLS config GENERIC_CSUM def_bool y diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 8588996165ae..7e94fe37cb2f 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -16,7 +16,6 @@ config OPENRISC select HANDLE_DOMAIN_IRQ select GPIOLIB select HAVE_ARCH_TRACEHOOK - select HAVE_COPY_THREAD_TLS select SPARSE_IRQ select GENERIC_IRQ_CHIP select GENERIC_IRQ_PROBE diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 8e4c3708773d..2667eeb6c6f1 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -62,7 +62,6 @@ config PARISC select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE select HAVE_KPROBES_ON_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS - select HAVE_COPY_THREAD_TLS help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9fa23eb320ff..3b262d87e9c4 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -186,7 +186,6 @@ config PPC select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) select HAVE_CONTEXT_TRACKING if PPC64 select HAVE_TIF_NOHZ if PPC64 - select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW select HAVE_DYNAMIC_FTRACE diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 128192e14ff2..f6a3a2bea3d8 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -52,7 +52,6 @@ config RISCV select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ASM_MODVERSIONS - select HAVE_COPY_THREAD_TLS select HAVE_DMA_CONTIGUOUS if MMU select HAVE_EBPF_JIT if MMU select HAVE_FUTEX_CMPXCHG if FUTEX diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c7d7ede6300c..959969759453 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -136,7 +136,6 @@ config S390 select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL - select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_DYNAMIC_FTRACE diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e10118d61ce7..9fc2b010e938 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -70,7 +70,6 @@ config SUPERH select ARCH_HIBERNATION_POSSIBLE if MMU select SPARSE_IRQ select HAVE_STACKPROTECTOR - select HAVE_COPY_THREAD_TLS help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 66213c0cb557..5bf2dc163540 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -48,7 +48,6 @@ config SPARC select LOCKDEP_SMALL if LOCKDEP select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH - select HAVE_COPY_THREAD_TLS config SPARC32 def_bool !64BIT diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 9318dc6d1a0c..ef69be17ff70 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -14,7 +14,6 @@ config UML select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_BUGVERBOSE - select HAVE_COPY_THREAD_TLS select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 01451cf500d2..11ba1839d198 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -22,7 +22,6 @@ config UNICORE32 select MODULES_USE_ELF_REL select NEED_DMA_MAP_STATE select MMU_GATHER_NO_RANGE if MMU - select HAVE_COPY_THREAD_TLS help UniCore-32 is 32-bit Instruction Set Architecture, including a series of low-power-consumption RISC chip diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6a0cc524882d..214b8bf39bbe 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -161,7 +161,6 @@ config X86 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING if X86_64 - select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 3a9f1e80394a..b71ba910d92f 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -24,7 +24,6 @@ config XTENSA select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL select HAVE_ARCH_TRACEHOOK - select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 9f03c44941fb..77cbe14c3034 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -65,22 +65,9 @@ extern void fork_init(void); extern void release_task(struct task_struct * p); -#ifdef CONFIG_HAVE_COPY_THREAD_TLS extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, struct task_struct *, unsigned long); -#else -extern int copy_thread(unsigned long, unsigned long, unsigned long, - struct task_struct *); - -/* Architectures that haven't opted into copy_thread_tls get the tls argument - * via pt_regs, so ignore the tls argument passed via C. */ -static inline int copy_thread_tls( - unsigned long clone_flags, unsigned long sp, unsigned long arg, - struct task_struct *p, unsigned long tls) -{ - return copy_thread(clone_flags, sp, arg, p); -} -#endif + extern void flush_thread(void); #ifdef CONFIG_HAVE_EXIT_THREAD diff --git a/kernel/fork.c b/kernel/fork.c index 0fd7eb1b38f9..8e52e16a1b5e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2577,15 +2577,6 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, #ifdef __ARCH_WANT_SYS_CLONE3 -/* - * copy_thread implementations handle CLONE_SETTLS by reading the TLS value from - * the registers containing the syscall arguments for clone. This doesn't work - * with clone3 since the TLS value is passed in clone_args instead. - */ -#ifndef CONFIG_HAVE_COPY_THREAD_TLS -#error clone3 requires copy_thread_tls support in arch -#endif - noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, struct clone_args __user *uargs, size_t usize) -- cgit v1.2.3 From 714acdbd1c94e7e3ab90f6b6938f1ccb27b662f0 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 11 Jun 2020 11:04:15 +0200 Subject: arch: rename copy_thread_tls() back to copy_thread() Now that HAVE_COPY_THREAD_TLS has been removed, rename copy_thread_tls() back simply copy_thread(). It's a simpler name, and doesn't imply that only tls is copied here. This finishes an outstanding chunk of internal process creation work since we've added clone3(). Cc: linux-arch@vger.kernel.org Acked-by: Thomas Bogendoerfer A Acked-by: Stafford Horne Acked-by: Greentime Hu Acked-by: Geert Uytterhoeven A Reviewed-by: Kees Cook Signed-off-by: Christian Brauner --- arch/alpha/kernel/process.c | 6 +++--- arch/arc/kernel/process.c | 5 +++-- arch/arm/kernel/process.c | 5 ++--- arch/arm64/kernel/process.c | 2 +- arch/c6x/kernel/process.c | 6 +++--- arch/csky/kernel/process.c | 2 +- arch/h8300/kernel/process.c | 5 ++--- arch/hexagon/kernel/process.c | 4 ++-- arch/ia64/kernel/process.c | 7 +++---- arch/m68k/kernel/process.c | 5 ++--- arch/microblaze/kernel/process.c | 4 ++-- arch/mips/kernel/process.c | 5 +++-- arch/nds32/kernel/process.c | 5 ++--- arch/nios2/kernel/process.c | 4 ++-- arch/openrisc/kernel/process.c | 6 +++--- arch/parisc/kernel/process.c | 2 +- arch/powerpc/kernel/process.c | 2 +- arch/riscv/kernel/process.c | 4 ++-- arch/s390/kernel/process.c | 4 ++-- arch/sh/kernel/process_32.c | 4 ++-- arch/sparc/kernel/process.c | 6 +++--- arch/sparc/kernel/process_32.c | 5 ++--- arch/sparc/kernel/process_64.c | 5 ++--- arch/um/kernel/process.c | 2 +- arch/unicore32/kernel/process.c | 5 ++--- arch/x86/kernel/process.c | 4 ++-- arch/x86/kernel/unwind_frame.c | 2 +- arch/xtensa/kernel/process.c | 2 +- include/linux/sched/task.h | 4 ++-- kernel/fork.c | 3 +-- 30 files changed, 59 insertions(+), 66 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index dfdb6b6ba61c..7462a7911002 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -233,9 +233,9 @@ release_thread(struct task_struct *dead_task) /* * Copy architecture-specific thread state */ -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p, - unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long kthread_arg, struct task_struct *p, + unsigned long tls) { extern void ret_from_fork(void); extern void ret_from_kernel_thread(void); diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 8c8e5172fecd..105420c23c8b 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -173,8 +173,9 @@ asmlinkage void ret_from_fork(void); * | user_r25 | * ------------------ <===== END of PAGE */ -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long kthread_arg, struct task_struct *p, + unsigned long tls) { struct pt_regs *c_regs; /* child's pt_regs */ unsigned long *childksp; /* to unwind out of __switch_to() */ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 58eaa1f60e16..3395be186c7d 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -225,9 +225,8 @@ void release_thread(struct task_struct *dead_task) asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); -int -copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6089638c7d43..84ec630b8ab5 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -375,7 +375,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) asmlinkage void ret_from_fork(void) asm("ret_from_fork"); -int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, +int copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c index afa3ea9a93aa..9f4fd6a40a10 100644 --- a/arch/c6x/kernel/process.c +++ b/arch/c6x/kernel/process.c @@ -104,9 +104,9 @@ void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp) /* * Copy a new thread context in its stack. */ -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long ustk_size, struct task_struct *p, - unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long ustk_size, struct task_struct *p, + unsigned long tls) { struct pt_regs *childregs; diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c index 8b3fad062ab2..28cfeaaf902a 100644 --- a/arch/csky/kernel/process.c +++ b/arch/csky/kernel/process.c @@ -40,7 +40,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return sw->r15; } -int copy_thread_tls(unsigned long clone_flags, +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long kthread_arg, struct task_struct *p, diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index ae23de4dcf42..83ce3caf7313 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -105,9 +105,8 @@ void flush_thread(void) { } -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long topstk, struct task_struct *p, - unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long topstk, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs; diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index d756f9556dd7..d294e71d11d8 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -50,8 +50,8 @@ void arch_cpu_idle(void) /* * Copy architecture-specific thread state */ -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct thread_info *ti = task_thread_info(p); struct hexagon_switch_stack *ss; diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 416dca619da5..9cedb56c3fda 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -311,7 +311,7 @@ ia64_load_extra (struct task_struct *task) * * sys_clone : * _do_fork _do_fork - * copy_thread_tls copy_thread_tls + * copy_thread copy_thread * * This means that the stack layout is as follows: * @@ -333,9 +333,8 @@ ia64_load_extra (struct task_struct *task) * so there is nothing to worry about. */ int -copy_thread_tls(unsigned long clone_flags, unsigned long user_stack_base, - unsigned long user_stack_size, struct task_struct *p, - unsigned long tls) +copy_thread(unsigned long clone_flags, unsigned long user_stack_base, + unsigned long user_stack_size, struct task_struct *p, unsigned long tls) { extern char ia64_ret_from_clone; struct switch_stack *child_stack, *stack; diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 0608439ba452..6492a2c54dbc 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -138,9 +138,8 @@ asmlinkage int m68k_clone3(struct pt_regs *regs) return sys_clone3((struct clone_args __user *)regs->d1, regs->d2); } -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p, - unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct fork_frame { struct switch_stack sw; diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index c2ca9c326510..6cabeab9e2ba 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -54,8 +54,8 @@ void flush_thread(void) { } -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); struct thread_info *ti = task_thread_info(p); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ff5320b79100..f5dc316a826a 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -119,8 +119,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) /* * Copy architecture-specific thread state */ -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long kthread_arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long kthread_arg, struct task_struct *p, + unsigned long tls) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs, *regs = current_pt_regs(); diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index 7dbb1bf64165..e85bbbadc0e7 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -149,9 +149,8 @@ void flush_thread(void) DEFINE_PER_CPU(struct task_struct *, __entry_task); asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); -int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p, - unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c index 3dde4d6d8fbe..0a42ab8e4c32 100644 --- a/arch/nios2/kernel/process.c +++ b/arch/nios2/kernel/process.c @@ -100,8 +100,8 @@ void flush_thread(void) { } -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); struct pt_regs *regs; diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index d7010e72450c..848f74c2c47a 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -116,7 +116,7 @@ void release_thread(struct task_struct *dead_task) extern asmlinkage void ret_from_fork(void); /* - * copy_thread_tls + * copy_thread * @clone_flags: flags * @usp: user stack pointer or fn for kernel thread * @arg: arg to fn for kernel thread; always NULL for userspace thread @@ -147,8 +147,8 @@ extern asmlinkage void ret_from_fork(void); */ int -copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p, unsigned long tls) +copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct pt_regs *userregs; struct pt_regs *kregs; diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index b7abb12edd3a..de6299ff1530 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -208,7 +208,7 @@ arch_initcall(parisc_idle_init); * Copy architecture-specific thread state */ int -copy_thread_tls(unsigned long clone_flags, unsigned long usp, +copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long kthread_arg, struct task_struct *p, unsigned long tls) { struct pt_regs *cregs = &(p->thread.regs); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4650b9bb217f..794b754deec2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1593,7 +1593,7 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) /* * Copy architecture-specific thread state */ -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long kthread_arg, struct task_struct *p, unsigned long tls) { diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 824d117cf202..31f39442df72 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -101,8 +101,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index eb6e23ad15a2..b06dec1267d0 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -80,8 +80,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } -int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, - unsigned long arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long new_stackp, + unsigned long arg, struct task_struct *p, unsigned long tls) { struct fake_frame { diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 537a82d80616..b0fefd8f53a6 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -115,8 +115,8 @@ EXPORT_SYMBOL(dump_fpu); asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c index 8bbe62d77b77..5234b5ccc0b9 100644 --- a/arch/sparc/kernel/process.c +++ b/arch/sparc/kernel/process.c @@ -28,7 +28,7 @@ asmlinkage long sparc_fork(struct pt_regs *regs) ret = _do_fork(&args); /* If we get an error and potentially restart the system - * call, we're screwed because copy_thread_tls() clobbered + * call, we're screwed because copy_thread() clobbered * the parent's %o1. So detect that case and restore it * here. */ @@ -53,7 +53,7 @@ asmlinkage long sparc_vfork(struct pt_regs *regs) ret = _do_fork(&args); /* If we get an error and potentially restart the system - * call, we're screwed because copy_thread_tls() clobbered + * call, we're screwed because copy_thread() clobbered * the parent's %o1. So detect that case and restore it * here. */ @@ -99,7 +99,7 @@ asmlinkage long sparc_clone(struct pt_regs *regs) ret = _do_fork(&args); /* If we get an error and potentially restart the system - * call, we're screwed because copy_thread_tls() clobbered + * call, we're screwed because copy_thread() clobbered * the parent's %o1. So detect that case and restore it * here. */ diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 3e1f7b639e9a..bd123f1de2e7 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -273,9 +273,8 @@ clone_stackframe(struct sparc_stackf __user *dst, extern void ret_from_fork(void); extern void ret_from_kernel_thread(void); -int copy_thread_tls(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct *p, - unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs, *regs = current_pt_regs(); diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 278bf287c4be..04ef19b88632 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -577,9 +577,8 @@ barf: * Parent --> %o0 == childs pid, %o1 == 0 * Child --> %o0 == parents pid, %o1 == 1 */ -int copy_thread_tls(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct *p, - unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct thread_info *t = task_thread_info(p); struct pt_regs *regs = current_pt_regs(); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index e3a2cf92a373..26b5e243d3fc 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -152,7 +152,7 @@ void fork_handler(void) userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); } -int copy_thread_tls(unsigned long clone_flags, unsigned long sp, +int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct * p, unsigned long tls) { void (*handler)(void); diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index 49a305565a53..d5ae5a971c19 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -219,9 +219,8 @@ void release_thread(struct task_struct *dead_task) asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); -int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p, - unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, unsigned long tls) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index f362ce0d5ac0..b35cd50ed0dc 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -121,8 +121,8 @@ static int set_new_tls(struct task_struct *p, unsigned long tls) return do_set_thread_area_64(p, ARCH_SET_FS, tls); } -int copy_thread_tls(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct *p, unsigned long tls) +int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, + struct task_struct *p, unsigned long tls) { struct inactive_task_frame *frame; struct fork_frame *fork_frame; diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 722a85f3b2dd..3070fd6561be 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -269,7 +269,7 @@ bool unwind_next_frame(struct unwind_state *state) /* * kthreads (other than the boot CPU's idle thread) have some * partial regs at the end of their stack which were placed - * there by copy_thread_tls(). But the regs don't have any + * there by copy_thread(). But the regs don't have any * useful information, so we can skip them. * * This user_mode() check is slightly broader than a PF_KTHREAD diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index b7fe6f443b42..397a7de56377 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -201,7 +201,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * involved. Much simpler to just not copy those live frames across. */ -int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn, +int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, unsigned long thread_fn_arg, struct task_struct *p, unsigned long tls) { diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 77cbe14c3034..b6253f2ea96a 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -65,8 +65,8 @@ extern void fork_init(void); extern void release_task(struct task_struct * p); -extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, - struct task_struct *, unsigned long); +extern int copy_thread(unsigned long, unsigned long, unsigned long, + struct task_struct *, unsigned long); extern void flush_thread(void); diff --git a/kernel/fork.c b/kernel/fork.c index 8e52e16a1b5e..790841eb0a21 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2104,8 +2104,7 @@ static __latent_entropy struct task_struct *copy_process( retval = copy_io(clone_flags, p); if (retval) goto bad_fork_cleanup_namespaces; - retval = copy_thread_tls(clone_flags, args->stack, args->stack_size, p, - args->tls); + retval = copy_thread(clone_flags, args->stack, args->stack_size, p, args->tls); if (retval) goto bad_fork_cleanup_io; -- cgit v1.2.3 From b1c7b87443c2c12c4fe095114736b8ad6f963f67 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 5 Jul 2020 19:16:22 +0300 Subject: net: dsa: felix: support half-duplex link modes Ping tested: [ 11.808455] mscc_felix 0000:00:00.5 swp0: Link is Up - 1Gbps/Full - flow control rx/tx [ 11.816497] IPv6: ADDRCONF(NETDEV_CHANGE): swp0: link becomes ready [root@LS1028ARDB ~] # ethtool -s swp0 advertise 0x4 [ 18.844591] mscc_felix 0000:00:00.5 swp0: Link is Down [ 22.048337] mscc_felix 0000:00:00.5 swp0: Link is Up - 100Mbps/Half - flow control off [root@LS1028ARDB ~] # ip addr add 192.168.1.1/24 dev swp0 [root@LS1028ARDB ~] # ping 192.168.1.2 PING 192.168.1.2 (192.168.1.2): 56 data bytes (...) ^C--- 192.168.1.2 ping statistics --- 3 packets transmitted, 3 packets received, 0% packet loss round-trip min/avg/max = 0.383/0.611/1.051 ms [root@LS1028ARDB ~] # ethtool -s swp0 advertise 0x10 [ 355.637747] mscc_felix 0000:00:00.5 swp0: Link is Down [ 358.788034] mscc_felix 0000:00:00.5 swp0: Link is Up - 1Gbps/Half - flow control off [root@LS1028ARDB ~] # ping 192.168.1.2 PING 192.168.1.2 (192.168.1.2): 56 data bytes (...) ^C --- 192.168.1.2 ping statistics --- 16 packets transmitted, 16 packets received, 0% packet loss round-trip min/avg/max = 0.301/0.384/1.138 ms Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Reviewed-by: Russell King Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 4 +++- drivers/net/dsa/ocelot/felix_vsc9959.c | 23 +++++++++++++---------- include/linux/fsl/enetc_mdio.h | 1 + 3 files changed, 17 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 75020af7f7a4..f54648dff0ec 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -194,13 +194,15 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port, return; } - /* No half-duplex. */ phylink_set_port_modes(mask); phylink_set(mask, Autoneg); phylink_set(mask, Pause); phylink_set(mask, Asym_Pause); + phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Half); phylink_set(mask, 1000baseT_Full); if (state->interface == PHY_INTERFACE_MODE_INTERNAL || diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 9f4c8343652f..94e946b26f90 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -817,12 +817,9 @@ static void vsc9959_pcs_init_sgmii(struct phy_device *pcs, phy_set_bits(pcs, MII_BMCR, BMCR_ANENABLE); } else { + u16 if_mode = ENETC_PCS_IF_MODE_SGMII_EN; int speed; - if (state->duplex == DUPLEX_HALF) { - phydev_err(pcs, "Half duplex not supported\n"); - return; - } switch (state->speed) { case SPEED_1000: speed = ENETC_PCS_SPEED_1000; @@ -841,9 +838,9 @@ static void vsc9959_pcs_init_sgmii(struct phy_device *pcs, return; } - phy_write(pcs, ENETC_PCS_IF_MODE, - ENETC_PCS_IF_MODE_SGMII_EN | - ENETC_PCS_IF_MODE_SGMII_SPEED(speed)); + if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(speed); + if (state->duplex == DUPLEX_HALF) + if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF; phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); } @@ -870,15 +867,18 @@ static void vsc9959_pcs_init_2500basex(struct phy_device *pcs, unsigned int link_an_mode, const struct phylink_link_state *state) { + u16 if_mode = ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500) | + ENETC_PCS_IF_MODE_SGMII_EN; + if (link_an_mode == MLO_AN_INBAND) { phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n"); return; } - phy_write(pcs, ENETC_PCS_IF_MODE, - ENETC_PCS_IF_MODE_SGMII_EN | - ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500)); + if (state->duplex == DUPLEX_HALF) + if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF; + phy_write(pcs, ENETC_PCS_IF_MODE, if_mode); phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); } @@ -919,8 +919,11 @@ static void vsc9959_pcs_init(struct ocelot *ocelot, int port, linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), pcs->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, pcs->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, pcs->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, pcs->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported); if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX || pcs->interface == PHY_INTERFACE_MODE_USXGMII) diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h index 4875dd38af7e..2d9203314865 100644 --- a/include/linux/fsl/enetc_mdio.h +++ b/include/linux/fsl/enetc_mdio.h @@ -15,6 +15,7 @@ #define ENETC_PCS_IF_MODE_SGMII_EN BIT(0) #define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1) #define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2)) +#define ENETC_PCS_IF_MODE_DUPLEX_HALF BIT(3) /* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset * Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS -- cgit v1.2.3 From 821b67fa46390baea0ac5139a60eaa48805261b2 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 25 Jun 2020 10:59:39 +0100 Subject: firmware: smccc: Add ARCH_SOC_ID support SMCCC v1.2 adds a new optional function SMCCC_ARCH_SOC_ID to obtain a SiP defined SoC identification value. Add support for the same. Also using the SoC bus infrastructure, let us expose the platform specific SoC atrributes under sysfs. There are various ways in which it can be represented in shortened form for efficiency and ease of parsing for userspace. The chosen form is described in the ABI document. Link: https://lore.kernel.org/r/20200625095939.50861-1-sudeep.holla@arm.com Cc: Etienne Carriere Reviewed-by: Steven Price Signed-off-by: Sudeep Holla --- Documentation/ABI/testing/sysfs-devices-soc | 30 ++++++++ drivers/firmware/smccc/Kconfig | 9 +++ drivers/firmware/smccc/Makefile | 1 + drivers/firmware/smccc/soc_id.c | 114 ++++++++++++++++++++++++++++ include/linux/arm-smccc.h | 5 ++ 5 files changed, 159 insertions(+) create mode 100644 drivers/firmware/smccc/soc_id.c (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-devices-soc b/Documentation/ABI/testing/sysfs-devices-soc index ba3a3fac0ee1..ea999e292f11 100644 --- a/Documentation/ABI/testing/sysfs-devices-soc +++ b/Documentation/ABI/testing/sysfs-devices-soc @@ -26,6 +26,30 @@ Description: Read-only attribute common to all SoCs. Contains SoC family name (e.g. DB8500). + On many of ARM based silicon with SMCCC v1.2+ compliant firmware + this will contain the JEDEC JEP106 manufacturer’s identification + code. The format is "jep106:XXYY" where XX is identity code and + YY is continuation code. + + This manufacturer’s identification code is defined by one + or more eight (8) bit fields, each consisting of seven (7) + data bits plus one (1) odd parity bit. It is a single field, + limiting the possible number of vendors to 126. To expand + the maximum number of identification codes, a continuation + scheme has been defined. + + The specified mechanism is that an identity code of 0x7F + represents the "continuation code" and implies the presence + of an additional identity code field, and this mechanism + may be extended to multiple continuation codes followed + by the manufacturer's identity code. + + For example, ARM has identity code 0x7F 0x7F 0x7F 0x7F 0x3B, + which is code 0x3B on the fifth 'page'. This is shortened + as JEP106 identity code of 0x3B and a continuation code of + 0x4 to represent the four continuation codes preceding the + identity code. + What: /sys/devices/socX/serial_number Date: January 2019 contact: Bjorn Andersson @@ -40,6 +64,12 @@ Description: Read-only attribute supported by most SoCs. In the case of ST-Ericsson's chips this contains the SoC serial number. + On many of ARM based silicon with SMCCC v1.2+ compliant firmware + this will contain the SOC ID appended to the family attribute + to ensure there is no conflict in this namespace across various + vendors. The format is "jep106:XXYY:ZZZZ" where XX is identity + code, YY is continuation code and ZZZZ is the SOC ID. + What: /sys/devices/socX/revision Date: January 2012 contact: Lee Jones diff --git a/drivers/firmware/smccc/Kconfig b/drivers/firmware/smccc/Kconfig index 27b675d76235..15e7466179a6 100644 --- a/drivers/firmware/smccc/Kconfig +++ b/drivers/firmware/smccc/Kconfig @@ -14,3 +14,12 @@ config HAVE_ARM_SMCCC_DISCOVERY to add SMCCC discovery mechanism though the PSCI firmware implementation of PSCI_FEATURES(SMCCC_VERSION) which returns success on firmware compliant to SMCCC v1.1 and above. + +config ARM_SMCCC_SOC_ID + bool "SoC bus device for the ARM SMCCC SOC_ID" + depends on HAVE_ARM_SMCCC_DISCOVERY + default y + select SOC_BUS + help + Include support for the SoC bus on the ARM SMCCC firmware based + platforms providing some sysfs information about the SoC variant. diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile index 6f369fe3f0b9..72ab84042832 100644 --- a/drivers/firmware/smccc/Makefile +++ b/drivers/firmware/smccc/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 # obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o +obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c new file mode 100644 index 000000000000..581aa5e9b077 --- /dev/null +++ b/drivers/firmware/smccc/soc_id.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2020 Arm Limited + */ + +#define pr_fmt(fmt) "SMCCC: SOC_ID: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#define SMCCC_SOC_ID_JEP106_BANK_IDX_MASK GENMASK(30, 24) +/* + * As per the SMC Calling Convention specification v1.2 (ARM DEN 0028C) + * Section 7.4 SMCCC_ARCH_SOC_ID bits[23:16] are JEP-106 identification + * code with parity bit for the SiP. We can drop the parity bit. + */ +#define SMCCC_SOC_ID_JEP106_ID_CODE_MASK GENMASK(22, 16) +#define SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK GENMASK(15, 0) + +#define JEP106_BANK_CONT_CODE(x) \ + (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_BANK_IDX_MASK, (x))) +#define JEP106_ID_CODE(x) \ + (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_ID_CODE_MASK, (x))) +#define IMP_DEF_SOC_ID(x) \ + (u16)(FIELD_GET(SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK, (x))) + +static struct soc_device *soc_dev; +static struct soc_device_attribute *soc_dev_attr; + +static int __init smccc_soc_init(void) +{ + struct arm_smccc_res res; + int soc_id_rev, soc_id_version; + static char soc_id_str[20], soc_id_rev_str[12]; + static char soc_id_jep106_id_str[12]; + + if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2) + return 0; + + if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE) { + pr_err("%s: invalid SMCCC conduit\n", __func__); + return -EOPNOTSUPP; + } + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_SOC_ID, &res); + + if (res.a0 == SMCCC_RET_NOT_SUPPORTED) { + pr_info("ARCH_SOC_ID not implemented, skipping ....\n"); + return 0; + } + + if ((int)res.a0 < 0) { + pr_info("ARCH_FEATURES(ARCH_SOC_ID) returned error: %lx\n", + res.a0); + return -EINVAL; + } + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res); + if ((int)res.a0 < 0) { + pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0); + return -EINVAL; + } + + soc_id_version = res.a0; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res); + if ((int)res.a0 < 0) { + pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0); + return -EINVAL; + } + + soc_id_rev = res.a0; + + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return -ENOMEM; + + sprintf(soc_id_rev_str, "0x%08x", soc_id_rev); + sprintf(soc_id_jep106_id_str, "jep106:%02x%02x", + JEP106_BANK_CONT_CODE(soc_id_version), + JEP106_ID_CODE(soc_id_version)); + sprintf(soc_id_str, "%s:%04x", soc_id_jep106_id_str, + IMP_DEF_SOC_ID(soc_id_version)); + + soc_dev_attr->soc_id = soc_id_str; + soc_dev_attr->revision = soc_id_rev_str; + soc_dev_attr->family = soc_id_jep106_id_str; + + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR(soc_dev)) { + kfree(soc_dev_attr); + return PTR_ERR(soc_dev); + } + + pr_info("ID = %s Revision = %s\n", soc_dev_attr->soc_id, + soc_dev_attr->revision); + + return 0; +} +module_init(smccc_soc_init); + +static void __exit smccc_soc_exit(void) +{ + if (soc_dev) + soc_device_unregister(soc_dev); + kfree(soc_dev_attr); +} +module_exit(smccc_soc_exit); diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 56d6a5c6e353..8254e11ea857 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -71,6 +71,11 @@ ARM_SMCCC_SMC_32, \ 0, 1) +#define ARM_SMCCC_ARCH_SOC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 2) + #define ARM_SMCCC_ARCH_WORKAROUND_1 \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ ARM_SMCCC_SMC_32, \ -- cgit v1.2.3 From ea1be1e59b19017e61aa357d524b743ba5602d3c Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Thu, 7 May 2020 20:03:35 +0800 Subject: serial: Remove duplicated macro definition of port type There exists the same macro definition of port type from 0 to 13 in include/uapi/linux/serial.h, remove these duplicated code in include/uapi/linux/serial_core.h which includes the former header. Acked-by: Jiri Slaby Signed-off-by: Tiezhu Yang Link: https://lore.kernel.org/r/1588853015-28392-1-git-send-email-yangtiezhu@loongson.cn Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/serial_core.h | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 8ec3dd742ea4..851b982f8c4b 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -26,20 +26,6 @@ /* * The type definitions. These are from Ted Ts'o's serial.h */ -#define PORT_UNKNOWN 0 -#define PORT_8250 1 -#define PORT_16450 2 -#define PORT_16550 3 -#define PORT_16550A 4 -#define PORT_CIRRUS 5 -#define PORT_16650 6 -#define PORT_16650V2 7 -#define PORT_16750 8 -#define PORT_STARTECH 9 -#define PORT_16C950 10 -#define PORT_16654 11 -#define PORT_16850 12 -#define PORT_RSA 13 #define PORT_NS16550A 14 #define PORT_XSCALE 15 #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ -- cgit v1.2.3 From 0935ff5f1f0a44f66a13e075ed49f97ad99d2fdc Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Sat, 4 Jul 2020 00:19:35 +0800 Subject: regulator: pca9450: add pca9450 pmic driver Add NXP pca9450 pmic driver. Signed-off-by: Robin Gong Reviewed-by: Frieder Schrempf Link: https://lore.kernel.org/r/1593793178-9737-2-git-send-email-yibin.gong@nxp.com Signed-off-by: Mark Brown --- drivers/regulator/Kconfig | 8 + drivers/regulator/Makefile | 1 + drivers/regulator/pca9450-regulator.c | 843 ++++++++++++++++++++++++++++++++++ include/linux/regulator/pca9450.h | 219 +++++++++ 4 files changed, 1071 insertions(+) create mode 100644 drivers/regulator/pca9450-regulator.c create mode 100644 include/linux/regulator/pca9450.h (limited to 'include') diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 2c6a8c4bdf06..8f9bef574af2 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -750,6 +750,14 @@ config REGULATOR_PBIAS This driver provides support for OMAP pbias modelled regulators. +config REGULATOR_PCA9450 + tristate "NXP PCA9450A/PCA9450B/PCA9450C regulator driver" + depends on I2C + select REGMAP_I2C + help + Say y here to support the NXP PCA9450A/PCA9450B/PCA9450C PMIC + regulator driver. + config REGULATOR_PCAP tristate "Motorola PCAP2 regulator driver" depends on EZX_PCAP diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 85bc3ef0be1c..6adfe769a47c 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -96,6 +96,7 @@ obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o obj-$(CONFIG_REGULATOR_QCOM_USB_VBUS) += qcom_usb_vbus-regulator.o obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o +obj-$(CONFIG_REGULATOR_PCA9450) += pca9450-regulator.o obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o obj-$(CONFIG_REGULATOR_PV88060) += pv88060-regulator.o obj-$(CONFIG_REGULATOR_PV88080) += pv88080-regulator.o diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c new file mode 100644 index 000000000000..02250459aa90 --- /dev/null +++ b/drivers/regulator/pca9450-regulator.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2020 NXP. + * NXP PCA9450 pmic driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct pc9450_dvs_config { + unsigned int run_reg; /* dvs0 */ + unsigned int run_mask; + unsigned int standby_reg; /* dvs1 */ + unsigned int standby_mask; +}; + +struct pca9450_regulator_desc { + struct regulator_desc desc; + const struct pc9450_dvs_config dvs; +}; + +struct pca9450 { + struct device *dev; + struct regmap *regmap; + enum pca9450_chip_type type; + unsigned int rcnt; + int irq; +}; + +static const struct regmap_range pca9450_status_range = { + .range_min = PCA9450_REG_INT1, + .range_max = PCA9450_REG_PWRON_STAT, +}; + +static const struct regmap_access_table pca9450_volatile_regs = { + .yes_ranges = &pca9450_status_range, + .n_yes_ranges = 1, +}; + +static const struct regmap_config pca9450_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .volatile_table = &pca9450_volatile_regs, + .max_register = PCA9450_MAX_REGISTER - 1, + .cache_type = REGCACHE_RBTREE, +}; + +/* + * BUCK1/2/3 + * BUCK1RAM[1:0] BUCK1 DVS ramp rate setting + * 00: 25mV/1usec + * 01: 25mV/2usec + * 10: 25mV/4usec + * 11: 25mV/8usec + */ +static int pca9450_dvs_set_ramp_delay(struct regulator_dev *rdev, + int ramp_delay) +{ + int id = rdev_get_id(rdev); + unsigned int ramp_value; + + switch (ramp_delay) { + case 1 ... 3125: + ramp_value = BUCK1_RAMP_3P125MV; + break; + case 3126 ... 6250: + ramp_value = BUCK1_RAMP_6P25MV; + break; + case 6251 ... 12500: + ramp_value = BUCK1_RAMP_12P5MV; + break; + case 12501 ... 25000: + ramp_value = BUCK1_RAMP_25MV; + break; + default: + ramp_value = BUCK1_RAMP_25MV; + } + + return regmap_update_bits(rdev->regmap, PCA9450_REG_BUCK1CTRL + id * 3, + BUCK1_RAMP_MASK, ramp_value << 6); +} + +static struct regulator_ops pca9450_dvs_buck_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = pca9450_dvs_set_ramp_delay, +}; + +static struct regulator_ops pca9450_buck_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, +}; + +static struct regulator_ops pca9450_ldo_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +/* + * BUCK1/2/3 + * 0.60 to 2.1875V (12.5mV step) + */ +static const struct linear_range pca9450_dvs_buck_volts[] = { + REGULATOR_LINEAR_RANGE(600000, 0x00, 0x7F, 12500), +}; + +/* + * BUCK4/5/6 + * 0.6V to 3.4V (25mV step) + */ +static const struct linear_range pca9450_buck_volts[] = { + REGULATOR_LINEAR_RANGE(600000, 0x00, 0x70, 25000), + REGULATOR_LINEAR_RANGE(3400000, 0x71, 0x7F, 0), +}; + +/* + * LDO1 + * 1.6 to 3.3V () + */ +static const struct linear_range pca9450_ldo1_volts[] = { + REGULATOR_LINEAR_RANGE(1600000, 0x00, 0x03, 100000), + REGULATOR_LINEAR_RANGE(3000000, 0x04, 0x07, 100000), +}; + +/* + * LDO2 + * 0.8 to 1.15V (50mV step) + */ +static const struct linear_range pca9450_ldo2_volts[] = { + REGULATOR_LINEAR_RANGE(800000, 0x00, 0x07, 50000), +}; + +/* + * LDO3/4 + * 0.8 to 3.3V (100mV step) + */ +static const struct linear_range pca9450_ldo34_volts[] = { + REGULATOR_LINEAR_RANGE(800000, 0x00, 0x19, 100000), + REGULATOR_LINEAR_RANGE(3300000, 0x1A, 0x1F, 0), +}; + +/* + * LDO5 + * 1.8 to 3.3V (100mV step) + */ +static const struct linear_range pca9450_ldo5_volts[] = { + REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000), +}; + +static int buck_set_dvs(const struct regulator_desc *desc, + struct device_node *np, struct regmap *regmap, + char *prop, unsigned int reg, unsigned int mask) +{ + int ret, i; + uint32_t uv; + + ret = of_property_read_u32(np, prop, &uv); + if (ret == -EINVAL) + return 0; + else if (ret) + return ret; + + for (i = 0; i < desc->n_voltages; i++) { + ret = regulator_desc_list_voltage_linear_range(desc, i); + if (ret < 0) + continue; + if (ret == uv) { + i <<= ffs(desc->vsel_mask) - 1; + ret = regmap_update_bits(regmap, reg, mask, i); + break; + } + } + + return ret; +} + +static int pca9450_set_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + struct pca9450_regulator_desc *data = container_of(desc, + struct pca9450_regulator_desc, desc); + const struct pc9450_dvs_config *dvs = &data->dvs; + unsigned int reg, mask; + char *prop; + int i, ret = 0; + + for (i = 0; i < PCA9450_DVS_LEVEL_MAX; i++) { + switch (i) { + case PCA9450_DVS_LEVEL_RUN: + prop = "nxp,dvs-run-voltage"; + reg = dvs->run_reg; + mask = dvs->run_mask; + break; + case PCA9450_DVS_LEVEL_STANDBY: + prop = "nxp,dvs-standby-voltage"; + reg = dvs->standby_reg; + mask = dvs->standby_mask; + break; + default: + return -EINVAL; + } + + ret = buck_set_dvs(desc, np, cfg->regmap, prop, reg, mask); + if (ret) + break; + } + + return ret; +} + +static const struct pca9450_regulator_desc pca9450a_regulators[] = { + { + .desc = { + .name = "buck1", + .of_match = of_match_ptr("BUCK1"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK1, + .ops = &pca9450_dvs_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK1_VOLTAGE_NUM, + .linear_ranges = pca9450_dvs_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts), + .vsel_reg = PCA9450_REG_BUCK1OUT_DVS0, + .vsel_mask = BUCK1OUT_DVS0_MASK, + .enable_reg = PCA9450_REG_BUCK1CTRL, + .enable_mask = BUCK1_ENMODE_MASK, + .owner = THIS_MODULE, + .of_parse_cb = pca9450_set_dvs_levels, + }, + .dvs = { + .run_reg = PCA9450_REG_BUCK1OUT_DVS0, + .run_mask = BUCK1OUT_DVS0_MASK, + .standby_reg = PCA9450_REG_BUCK1OUT_DVS1, + .standby_mask = BUCK1OUT_DVS1_MASK, + }, + }, + { + .desc = { + .name = "buck2", + .of_match = of_match_ptr("BUCK2"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK2, + .ops = &pca9450_dvs_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK2_VOLTAGE_NUM, + .linear_ranges = pca9450_dvs_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts), + .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0, + .vsel_mask = BUCK2OUT_DVS0_MASK, + .enable_reg = PCA9450_REG_BUCK2CTRL, + .enable_mask = BUCK1_ENMODE_MASK, + .owner = THIS_MODULE, + .of_parse_cb = pca9450_set_dvs_levels, + }, + .dvs = { + .run_reg = PCA9450_REG_BUCK2OUT_DVS0, + .run_mask = BUCK2OUT_DVS0_MASK, + .standby_reg = PCA9450_REG_BUCK2OUT_DVS1, + .standby_mask = BUCK2OUT_DVS1_MASK, + }, + }, + { + .desc = { + .name = "buck3", + .of_match = of_match_ptr("BUCK3"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK3, + .ops = &pca9450_dvs_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK3_VOLTAGE_NUM, + .linear_ranges = pca9450_dvs_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts), + .vsel_reg = PCA9450_REG_BUCK3OUT_DVS0, + .vsel_mask = BUCK3OUT_DVS0_MASK, + .enable_reg = PCA9450_REG_BUCK3CTRL, + .enable_mask = BUCK3_ENMODE_MASK, + .owner = THIS_MODULE, + .of_parse_cb = pca9450_set_dvs_levels, + }, + .dvs = { + .run_reg = PCA9450_REG_BUCK3OUT_DVS0, + .run_mask = BUCK3OUT_DVS0_MASK, + .standby_reg = PCA9450_REG_BUCK3OUT_DVS1, + .standby_mask = BUCK3OUT_DVS1_MASK, + }, + }, + { + .desc = { + .name = "buck4", + .of_match = of_match_ptr("BUCK4"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK4, + .ops = &pca9450_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK4_VOLTAGE_NUM, + .linear_ranges = pca9450_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts), + .vsel_reg = PCA9450_REG_BUCK4OUT, + .vsel_mask = BUCK4OUT_MASK, + .enable_reg = PCA9450_REG_BUCK4CTRL, + .enable_mask = BUCK4_ENMODE_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "buck5", + .of_match = of_match_ptr("BUCK5"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK5, + .ops = &pca9450_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK5_VOLTAGE_NUM, + .linear_ranges = pca9450_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts), + .vsel_reg = PCA9450_REG_BUCK5OUT, + .vsel_mask = BUCK5OUT_MASK, + .enable_reg = PCA9450_REG_BUCK5CTRL, + .enable_mask = BUCK5_ENMODE_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "buck6", + .of_match = of_match_ptr("BUCK6"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK6, + .ops = &pca9450_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK6_VOLTAGE_NUM, + .linear_ranges = pca9450_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts), + .vsel_reg = PCA9450_REG_BUCK6OUT, + .vsel_mask = BUCK6OUT_MASK, + .enable_reg = PCA9450_REG_BUCK6CTRL, + .enable_mask = BUCK6_ENMODE_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO1, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO1_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo1_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo1_volts), + .vsel_reg = PCA9450_REG_LDO1CTRL, + .vsel_mask = LDO1OUT_MASK, + .enable_reg = PCA9450_REG_LDO1CTRL, + .enable_mask = LDO1_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo2", + .of_match = of_match_ptr("LDO2"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO2, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO2_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo2_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo2_volts), + .vsel_reg = PCA9450_REG_LDO2CTRL, + .vsel_mask = LDO2OUT_MASK, + .enable_reg = PCA9450_REG_LDO2CTRL, + .enable_mask = LDO2_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo3", + .of_match = of_match_ptr("LDO3"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO3, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO3_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo34_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts), + .vsel_reg = PCA9450_REG_LDO3CTRL, + .vsel_mask = LDO3OUT_MASK, + .enable_reg = PCA9450_REG_LDO3CTRL, + .enable_mask = LDO3_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo4", + .of_match = of_match_ptr("LDO4"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO4, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO4_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo34_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts), + .vsel_reg = PCA9450_REG_LDO4CTRL, + .vsel_mask = LDO4OUT_MASK, + .enable_reg = PCA9450_REG_LDO4CTRL, + .enable_mask = LDO4_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo5", + .of_match = of_match_ptr("LDO5"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO5, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO5_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo5_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts), + .vsel_reg = PCA9450_REG_LDO5CTRL_H, + .vsel_mask = LDO5HOUT_MASK, + .enable_reg = PCA9450_REG_LDO5CTRL_H, + .enable_mask = LDO5H_EN_MASK, + .owner = THIS_MODULE, + }, + }, +}; + +/* + * Buck3 removed on PCA9450B and connected with Buck1 internal for dual phase + * on PCA9450C as no Buck3. + */ +static const struct pca9450_regulator_desc pca9450bc_regulators[] = { + { + .desc = { + .name = "buck1", + .of_match = of_match_ptr("BUCK1"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK1, + .ops = &pca9450_dvs_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK1_VOLTAGE_NUM, + .linear_ranges = pca9450_dvs_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts), + .vsel_reg = PCA9450_REG_BUCK1OUT_DVS0, + .vsel_mask = BUCK1OUT_DVS0_MASK, + .enable_reg = PCA9450_REG_BUCK1CTRL, + .enable_mask = BUCK1_ENMODE_MASK, + .owner = THIS_MODULE, + .of_parse_cb = pca9450_set_dvs_levels, + }, + .dvs = { + .run_reg = PCA9450_REG_BUCK1OUT_DVS0, + .run_mask = BUCK1OUT_DVS0_MASK, + .standby_reg = PCA9450_REG_BUCK1OUT_DVS1, + .standby_mask = BUCK1OUT_DVS1_MASK, + }, + }, + { + .desc = { + .name = "buck2", + .of_match = of_match_ptr("BUCK2"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK2, + .ops = &pca9450_dvs_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK2_VOLTAGE_NUM, + .linear_ranges = pca9450_dvs_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts), + .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0, + .vsel_mask = BUCK2OUT_DVS0_MASK, + .enable_reg = PCA9450_REG_BUCK2CTRL, + .enable_mask = BUCK1_ENMODE_MASK, + .owner = THIS_MODULE, + .of_parse_cb = pca9450_set_dvs_levels, + }, + .dvs = { + .run_reg = PCA9450_REG_BUCK2OUT_DVS0, + .run_mask = BUCK2OUT_DVS0_MASK, + .standby_reg = PCA9450_REG_BUCK2OUT_DVS1, + .standby_mask = BUCK2OUT_DVS1_MASK, + }, + }, + { + .desc = { + .name = "buck4", + .of_match = of_match_ptr("BUCK4"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK4, + .ops = &pca9450_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK4_VOLTAGE_NUM, + .linear_ranges = pca9450_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts), + .vsel_reg = PCA9450_REG_BUCK4OUT, + .vsel_mask = BUCK4OUT_MASK, + .enable_reg = PCA9450_REG_BUCK4CTRL, + .enable_mask = BUCK4_ENMODE_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "buck5", + .of_match = of_match_ptr("BUCK5"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK5, + .ops = &pca9450_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK5_VOLTAGE_NUM, + .linear_ranges = pca9450_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts), + .vsel_reg = PCA9450_REG_BUCK5OUT, + .vsel_mask = BUCK5OUT_MASK, + .enable_reg = PCA9450_REG_BUCK5CTRL, + .enable_mask = BUCK5_ENMODE_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "buck6", + .of_match = of_match_ptr("BUCK6"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK6, + .ops = &pca9450_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK6_VOLTAGE_NUM, + .linear_ranges = pca9450_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts), + .vsel_reg = PCA9450_REG_BUCK6OUT, + .vsel_mask = BUCK6OUT_MASK, + .enable_reg = PCA9450_REG_BUCK6CTRL, + .enable_mask = BUCK6_ENMODE_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO1, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO1_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo1_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo1_volts), + .vsel_reg = PCA9450_REG_LDO1CTRL, + .vsel_mask = LDO1OUT_MASK, + .enable_reg = PCA9450_REG_LDO1CTRL, + .enable_mask = LDO1_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo2", + .of_match = of_match_ptr("LDO2"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO2, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO2_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo2_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo2_volts), + .vsel_reg = PCA9450_REG_LDO2CTRL, + .vsel_mask = LDO2OUT_MASK, + .enable_reg = PCA9450_REG_LDO2CTRL, + .enable_mask = LDO2_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo3", + .of_match = of_match_ptr("LDO3"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO3, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO3_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo34_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts), + .vsel_reg = PCA9450_REG_LDO3CTRL, + .vsel_mask = LDO3OUT_MASK, + .enable_reg = PCA9450_REG_LDO3CTRL, + .enable_mask = LDO3_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo4", + .of_match = of_match_ptr("LDO4"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO4, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO4_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo34_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts), + .vsel_reg = PCA9450_REG_LDO4CTRL, + .vsel_mask = LDO4OUT_MASK, + .enable_reg = PCA9450_REG_LDO4CTRL, + .enable_mask = LDO4_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo5", + .of_match = of_match_ptr("LDO5"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO5, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO5_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo5_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts), + .vsel_reg = PCA9450_REG_LDO5CTRL_H, + .vsel_mask = LDO5HOUT_MASK, + .enable_reg = PCA9450_REG_LDO5CTRL_H, + .enable_mask = LDO5H_EN_MASK, + .owner = THIS_MODULE, + }, + }, +}; + +static irqreturn_t pca9450_irq_handler(int irq, void *data) +{ + struct pca9450 *pca9450 = data; + struct regmap *regmap = pca9450->regmap; + unsigned int status; + int ret; + + ret = regmap_read(regmap, PCA9450_REG_INT1, &status); + if (ret < 0) { + dev_err(pca9450->dev, + "Failed to read INT1(%d)\n", ret); + return IRQ_NONE; + } + + if (status & IRQ_PWRON) + dev_warn(pca9450->dev, "PWRON interrupt.\n"); + + if (status & IRQ_WDOGB) + dev_warn(pca9450->dev, "WDOGB interrupt.\n"); + + if (status & IRQ_VR_FLT1) + dev_warn(pca9450->dev, "VRFLT1 interrupt.\n"); + + if (status & IRQ_VR_FLT2) + dev_warn(pca9450->dev, "VRFLT2 interrupt.\n"); + + if (status & IRQ_LOWVSYS) + dev_warn(pca9450->dev, "LOWVSYS interrupt.\n"); + + if (status & IRQ_THERM_105) + dev_warn(pca9450->dev, "IRQ_THERM_105 interrupt.\n"); + + if (status & IRQ_THERM_125) + dev_warn(pca9450->dev, "IRQ_THERM_125 interrupt.\n"); + + return IRQ_HANDLED; +} + +static int pca9450_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + enum pca9450_chip_type type = (unsigned int)(uintptr_t) + of_device_get_match_data(&i2c->dev); + const struct pca9450_regulator_desc *regulator_desc; + struct regulator_config config = { }; + struct pca9450 *pca9450; + unsigned int device_id, i; + int ret; + + if (!i2c->irq) { + dev_err(&i2c->dev, "No IRQ configured?\n"); + return -EINVAL; + } + + pca9450 = devm_kzalloc(&i2c->dev, sizeof(struct pca9450), GFP_KERNEL); + if (!pca9450) + return -ENOMEM; + + switch (type) { + case PCA9450_TYPE_PCA9450A: + regulator_desc = pca9450a_regulators; + pca9450->rcnt = ARRAY_SIZE(pca9450a_regulators); + break; + case PCA9450_TYPE_PCA9450BC: + regulator_desc = pca9450bc_regulators; + pca9450->rcnt = ARRAY_SIZE(pca9450bc_regulators); + break; + default: + dev_err(&i2c->dev, "Unknown device type"); + return -EINVAL; + } + + pca9450->irq = i2c->irq; + pca9450->type = type; + pca9450->dev = &i2c->dev; + + dev_set_drvdata(&i2c->dev, pca9450); + + pca9450->regmap = devm_regmap_init_i2c(i2c, + &pca9450_regmap_config); + if (IS_ERR(pca9450->regmap)) { + dev_err(&i2c->dev, "regmap initialization failed\n"); + return PTR_ERR(pca9450->regmap); + } + + ret = regmap_read(pca9450->regmap, PCA9450_REG_DEV_ID, &device_id); + if (ret) { + dev_err(&i2c->dev, "Read device id error\n"); + return ret; + } + + /* Check your board and dts for match the right pmic */ + if (((device_id >> 4) != 0x1 && type == PCA9450_TYPE_PCA9450A) || + ((device_id >> 4) != 0x3 && type == PCA9450_TYPE_PCA9450BC)) { + dev_err(&i2c->dev, "Device id(%x) mismatched\n", + device_id >> 4); + return -EINVAL; + } + + for (i = 0; i < pca9450->rcnt; i++) { + const struct regulator_desc *desc; + struct regulator_dev *rdev; + const struct pca9450_regulator_desc *r; + + r = ®ulator_desc[i]; + desc = &r->desc; + + config.regmap = pca9450->regmap; + config.dev = pca9450->dev; + + rdev = devm_regulator_register(pca9450->dev, desc, &config); + if (IS_ERR(rdev)) { + ret = PTR_ERR(rdev); + dev_err(pca9450->dev, + "Failed to register regulator(%s): %d\n", + desc->name, ret); + return ret; + } + } + + ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL, + pca9450_irq_handler, + (IRQF_TRIGGER_FALLING | IRQF_ONESHOT), + "pca9450-irq", pca9450); + if (ret != 0) { + dev_err(pca9450->dev, "Failed to request IRQ: %d\n", + pca9450->irq); + return ret; + } + /* Unmask all interrupt except PWRON/WDOG/RSVD */ + ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_INT1_MSK, + IRQ_VR_FLT1 | IRQ_VR_FLT2 | IRQ_LOWVSYS | + IRQ_THERM_105 | IRQ_THERM_125, + IRQ_PWRON | IRQ_WDOGB | IRQ_RSVD); + if (ret) { + dev_err(&i2c->dev, "Unmask irq error\n"); + return ret; + } + + dev_info(&i2c->dev, "%s probed.\n", + type == PCA9450_TYPE_PCA9450A ? "pca9450a" : "pca9450bc"); + + return 0; +} + +static const struct of_device_id pca9450_of_match[] = { + { + .compatible = "nxp,pca9450a", + .data = (void *)PCA9450_TYPE_PCA9450A, + }, + { + .compatible = "nxp,pca9450b", + .data = (void *)PCA9450_TYPE_PCA9450BC, + }, + { + .compatible = "nxp,pca9450c", + .data = (void *)PCA9450_TYPE_PCA9450BC, + }, + { } +}; +MODULE_DEVICE_TABLE(of, pca9450_of_match); + +static struct i2c_driver pca9450_i2c_driver = { + .driver = { + .name = "nxp-pca9450", + .of_match_table = pca9450_of_match, + }, + .probe = pca9450_i2c_probe, +}; + +static int __init pca9450_i2c_init(void) +{ + return i2c_add_driver(&pca9450_i2c_driver); +} +module_init(pca9450_i2c_init); + +static void __exit pca9450_i2c_exit(void) +{ + i2c_del_driver(&pca9450_i2c_driver); +} +module_exit(pca9450_i2c_exit); + +MODULE_AUTHOR("Robin Gong "); +MODULE_DESCRIPTION("NXP PCA9450 Power Management IC driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h new file mode 100644 index 000000000000..1bbd3014f906 --- /dev/null +++ b/include/linux/regulator/pca9450.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright 2020 NXP. */ + +#ifndef __LINUX_REG_PCA9450_H__ +#define __LINUX_REG_PCA9450_H__ + +#include + +enum pca9450_chip_type { + PCA9450_TYPE_PCA9450A = 0, + PCA9450_TYPE_PCA9450BC, + PCA9450_TYPE_AMOUNT, +}; + +enum { + PCA9450_BUCK1 = 0, + PCA9450_BUCK2, + PCA9450_BUCK3, + PCA9450_BUCK4, + PCA9450_BUCK5, + PCA9450_BUCK6, + PCA9450_LDO1, + PCA9450_LDO2, + PCA9450_LDO3, + PCA9450_LDO4, + PCA9450_LDO5, + PCA9450_REGULATOR_CNT, +}; + +enum { + PCA9450_DVS_LEVEL_RUN = 0, + PCA9450_DVS_LEVEL_STANDBY, + PCA9450_DVS_LEVEL_MAX, +}; + +#define PCA9450_BUCK1_VOLTAGE_NUM 0x80 +#define PCA9450_BUCK2_VOLTAGE_NUM 0x80 +#define PCA9450_BUCK3_VOLTAGE_NUM 0x80 +#define PCA9450_BUCK4_VOLTAGE_NUM 0x80 + +#define PCA9450_BUCK5_VOLTAGE_NUM 0x80 +#define PCA9450_BUCK6_VOLTAGE_NUM 0x80 + +#define PCA9450_LDO1_VOLTAGE_NUM 0x08 +#define PCA9450_LDO2_VOLTAGE_NUM 0x08 +#define PCA9450_LDO3_VOLTAGE_NUM 0x20 +#define PCA9450_LDO4_VOLTAGE_NUM 0x20 +#define PCA9450_LDO5_VOLTAGE_NUM 0x10 + +enum { + PCA9450_REG_DEV_ID = 0x00, + PCA9450_REG_INT1 = 0x01, + PCA9450_REG_INT1_MSK = 0x02, + PCA9450_REG_STATUS1 = 0x03, + PCA9450_REG_STATUS2 = 0x04, + PCA9450_REG_PWRON_STAT = 0x05, + PCA9450_REG_SWRST = 0x06, + PCA9450_REG_PWRCTRL = 0x07, + PCA9450_REG_RESET_CTRL = 0x08, + PCA9450_REG_CONFIG1 = 0x09, + PCA9450_REG_CONFIG2 = 0x0A, + PCA9450_REG_BUCK123_DVS = 0x0C, + PCA9450_REG_BUCK1OUT_LIMIT = 0x0D, + PCA9450_REG_BUCK2OUT_LIMIT = 0x0E, + PCA9450_REG_BUCK3OUT_LIMIT = 0x0F, + PCA9450_REG_BUCK1CTRL = 0x10, + PCA9450_REG_BUCK1OUT_DVS0 = 0x11, + PCA9450_REG_BUCK1OUT_DVS1 = 0x12, + PCA9450_REG_BUCK2CTRL = 0x13, + PCA9450_REG_BUCK2OUT_DVS0 = 0x14, + PCA9450_REG_BUCK2OUT_DVS1 = 0x15, + PCA9450_REG_BUCK3CTRL = 0x16, + PCA9450_REG_BUCK3OUT_DVS0 = 0x17, + PCA9450_REG_BUCK3OUT_DVS1 = 0x18, + PCA9450_REG_BUCK4CTRL = 0x19, + PCA9450_REG_BUCK4OUT = 0x1A, + PCA9450_REG_BUCK5CTRL = 0x1B, + PCA9450_REG_BUCK5OUT = 0x1C, + PCA9450_REG_BUCK6CTRL = 0x1D, + PCA9450_REG_BUCK6OUT = 0x1E, + PCA9450_REG_LDO_AD_CTRL = 0x20, + PCA9450_REG_LDO1CTRL = 0x21, + PCA9450_REG_LDO2CTRL = 0x22, + PCA9450_REG_LDO3CTRL = 0x23, + PCA9450_REG_LDO4CTRL = 0x24, + PCA9450_REG_LDO5CTRL_L = 0x25, + PCA9450_REG_LDO5CTRL_H = 0x26, + PCA9450_REG_LOADSW_CTRL = 0x2A, + PCA9450_REG_VRFLT1_STS = 0x2B, + PCA9450_REG_VRFLT2_STS = 0x2C, + PCA9450_REG_VRFLT1_MASK = 0x2D, + PCA9450_REG_VRFLT2_MASK = 0x2E, + PCA9450_MAX_REGISTER = 0x2F, +}; + +/* PCA9450 BUCK ENMODE bits */ +#define BUCK_ENMODE_OFF 0x00 +#define BUCK_ENMODE_ONREQ 0x01 +#define BUCK_ENMODE_ONREQ_STBYREQ 0x02 +#define BUCK_ENMODE_ON 0x03 + +/* PCA9450_REG_BUCK1_CTRL bits */ +#define BUCK1_RAMP_MASK 0xC0 +#define BUCK1_RAMP_25MV 0x0 +#define BUCK1_RAMP_12P5MV 0x1 +#define BUCK1_RAMP_6P25MV 0x2 +#define BUCK1_RAMP_3P125MV 0x3 +#define BUCK1_DVS_CTRL 0x10 +#define BUCK1_AD 0x08 +#define BUCK1_FPWM 0x04 +#define BUCK1_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK2_CTRL bits */ +#define BUCK2_RAMP_MASK 0xC0 +#define BUCK2_RAMP_25MV 0x0 +#define BUCK2_RAMP_12P5MV 0x1 +#define BUCK2_RAMP_6P25MV 0x2 +#define BUCK2_RAMP_3P125MV 0x3 +#define BUCK2_DVS_CTRL 0x10 +#define BUCK2_AD 0x08 +#define BUCK2_FPWM 0x04 +#define BUCK2_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK3_CTRL bits */ +#define BUCK3_RAMP_MASK 0xC0 +#define BUCK3_RAMP_25MV 0x0 +#define BUCK3_RAMP_12P5MV 0x1 +#define BUCK3_RAMP_6P25MV 0x2 +#define BUCK3_RAMP_3P125MV 0x3 +#define BUCK3_DVS_CTRL 0x10 +#define BUCK3_AD 0x08 +#define BUCK3_FPWM 0x04 +#define BUCK3_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK4_CTRL bits */ +#define BUCK4_AD 0x08 +#define BUCK4_FPWM 0x04 +#define BUCK4_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK5_CTRL bits */ +#define BUCK5_AD 0x08 +#define BUCK5_FPWM 0x04 +#define BUCK5_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK6_CTRL bits */ +#define BUCK6_AD 0x08 +#define BUCK6_FPWM 0x04 +#define BUCK6_ENMODE_MASK 0x03 + +/* PCA9450_BUCK1OUT_DVS0 bits */ +#define BUCK1OUT_DVS0_MASK 0x7F +#define BUCK1OUT_DVS0_DEFAULT 0x14 + +/* PCA9450_BUCK1OUT_DVS1 bits */ +#define BUCK1OUT_DVS1_MASK 0x7F +#define BUCK1OUT_DVS1_DEFAULT 0x14 + +/* PCA9450_BUCK2OUT_DVS0 bits */ +#define BUCK2OUT_DVS0_MASK 0x7F +#define BUCK2OUT_DVS0_DEFAULT 0x14 + +/* PCA9450_BUCK2OUT_DVS1 bits */ +#define BUCK2OUT_DVS1_MASK 0x7F +#define BUCK2OUT_DVS1_DEFAULT 0x14 + +/* PCA9450_BUCK3OUT_DVS0 bits */ +#define BUCK3OUT_DVS0_MASK 0x7F +#define BUCK3OUT_DVS0_DEFAULT 0x14 + +/* PCA9450_BUCK3OUT_DVS1 bits */ +#define BUCK3OUT_DVS1_MASK 0x7F +#define BUCK3OUT_DVS1_DEFAULT 0x14 + +/* PCA9450_REG_BUCK4OUT bits */ +#define BUCK4OUT_MASK 0x7F +#define BUCK4OUT_DEFAULT 0x6C + +/* PCA9450_REG_BUCK5OUT bits */ +#define BUCK5OUT_MASK 0x7F +#define BUCK5OUT_DEFAULT 0x30 + +/* PCA9450_REG_BUCK6OUT bits */ +#define BUCK6OUT_MASK 0x7F +#define BUCK6OUT_DEFAULT 0x14 + +/* PCA9450_REG_LDO1_VOLT bits */ +#define LDO1_EN_MASK 0xC0 +#define LDO1OUT_MASK 0x07 + +/* PCA9450_REG_LDO2_VOLT bits */ +#define LDO2_EN_MASK 0xC0 +#define LDO2OUT_MASK 0x07 + +/* PCA9450_REG_LDO3_VOLT bits */ +#define LDO3_EN_MASK 0xC0 +#define LDO3OUT_MASK 0x0F + +/* PCA9450_REG_LDO4_VOLT bits */ +#define LDO4_EN_MASK 0xC0 +#define LDO4OUT_MASK 0x0F + +/* PCA9450_REG_LDO5_VOLT bits */ +#define LDO5L_EN_MASK 0xC0 +#define LDO5LOUT_MASK 0x0F + +#define LDO5H_EN_MASK 0xC0 +#define LDO5HOUT_MASK 0x0F + +/* PCA9450_REG_IRQ bits */ +#define IRQ_PWRON 0x80 +#define IRQ_WDOGB 0x40 +#define IRQ_RSVD 0x20 +#define IRQ_VR_FLT1 0x10 +#define IRQ_VR_FLT2 0x08 +#define IRQ_LOWVSYS 0x04 +#define IRQ_THERM_105 0x02 +#define IRQ_THERM_125 0x01 + +#endif /* __LINUX_REG_PCA9450_H__ */ -- cgit v1.2.3 From 5ab903418ad14732131df0af0d63f19b73e377ae Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:19 +0300 Subject: net: qed: sanitize BE/LE data processing Current code assumes that both host and device operates in Little Endian in lots of places. While this is true for x86 platform, this doesn't mean we should not care about this. This commit addresses all parts of the code that were pointed out by sparse checker. All operations with restricted (__be*/__le*) types are now protected with explicit from/to CPU conversions, even if they're noops on common setups. I'm sure there are more such places, but this implies a deeper code investigation, and is a subject for future works. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 11 +- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 27 ++-- drivers/net/ethernet/qlogic/qed/qed_debug.c | 49 +++--- drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 54 ++++--- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 48 +++--- .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 93 ++++++----- drivers/net/ethernet/qlogic/qed/qed_int.c | 74 +++++---- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 43 +++-- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 132 ++++++++++------ drivers/net/ethernet/qlogic/qed/qed_l2.c | 22 +-- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 9 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 12 +- drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 52 +++--- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_roce.c | 176 +++++++++------------ drivers/net/ethernet/qlogic/qed/qed_sp.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 4 +- include/linux/qed/qed_if.h | 15 +- 20 files changed, 434 insertions(+), 399 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 3a62358b9749..5362dc18b6c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -73,8 +73,8 @@ union type1_task_context { }; struct src_ent { - u8 opaque[56]; - u64 next; + __u8 opaque[56]; + __be64 next; }; #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ @@ -2177,6 +2177,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, dma_addr_t p_phys; u64 ilt_hw_entry; void *p_virt; + u32 flags1; int rc = 0; switch (elem_type) { @@ -2255,8 +2256,10 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, elem = (union type1_task_context *)elem_start; tdif_context = &elem->roce_ctx.tdif_context; - SET_FIELD(tdif_context->flags1, - TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); + flags1 = le32_to_cpu(tdif_context->flags1); + SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); + tdif_context->flags1 = cpu_to_le32(flags1); + elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 9f16a3a66007..17d5b649eb36 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -547,7 +547,8 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, struct dcbx_ets_feature *p_ets, struct qed_dcbx_params *p_params) { - u32 bw_map[2], tsa_map[2], pri_map; + __be32 bw_map[2], tsa_map[2]; + u32 pri_map; int i; p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags, @@ -573,11 +574,10 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, /* 8 bit tsa and bw data corresponding to each of the 8 TC's are * encoded in a type u32 array of size 2. */ - bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]); - bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); - tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); - tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); + cpu_to_be32_array(bw_map, p_ets->tc_bw_tbl, 2); + cpu_to_be32_array(tsa_map, p_ets->tc_tsa_tbl, 2); pri_map = p_ets->pri_tc_tbl[0]; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; @@ -1054,7 +1054,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, struct dcbx_ets_feature *p_ets, struct qed_dcbx_params *p_params) { - u8 *bw_map, *tsa_map; + __be32 bw_map[2], tsa_map[2]; u32 val; int i; @@ -1076,22 +1076,21 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK; p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT; - bw_map = (u8 *)&p_ets->tc_bw_tbl[0]; - tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0]; p_ets->pri_tc_tbl[0] = 0; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { - bw_map[i] = p_params->ets_tc_bw_tbl[i]; - tsa_map[i] = p_params->ets_tc_tsa_tbl[i]; + ((u8 *)bw_map)[i] = p_params->ets_tc_bw_tbl[i]; + ((u8 *)tsa_map)[i] = p_params->ets_tc_tsa_tbl[i]; + /* Copy the priority value to the corresponding 4 bits in the * traffic class table. */ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); p_ets->pri_tc_tbl[0] |= val; } - for (i = 0; i < 2; i++) { - p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); - p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); - } + + be32_to_cpu_array(p_ets->tc_bw_tbl, bw_map, 2); + be32_to_cpu_array(p_ets->tc_tsa_tbl, tsa_map, 2); } static void diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index f856bb9a3897..41ab23712bbd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -972,7 +972,7 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, { struct storm_defs *storm = &s_storm_defs[storm_id]; struct fw_info_location fw_info_location; - u32 addr, i, *dest; + u32 addr, i, size, *dest; memset(&fw_info_location, 0, sizeof(fw_info_location)); memset(fw_info, 0, sizeof(*fw_info)); @@ -985,20 +985,29 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, sizeof(fw_info_location); dest = (u32 *)&fw_info_location; + size = BYTES_TO_DWORDS(sizeof(fw_info_location)); - for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); - i++, addr += BYTES_IN_DWORD) + for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); + /* qed_rq() fetches data in CPU byteorder. Swap it back to + * the device's to get right structure layout. + */ + cpu_to_le32_array(dest, size); + /* Read FW version info from Storm RAM */ - if (fw_info_location.size > 0 && fw_info_location.size <= - sizeof(*fw_info)) { - addr = fw_info_location.grc_addr; - dest = (u32 *)fw_info; - for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); - i++, addr += BYTES_IN_DWORD) - dest[i] = qed_rd(p_hwfn, p_ptt, addr); - } + size = le32_to_cpu(fw_info_location.size); + if (!size || size > sizeof(*fw_info)) + return; + + addr = le32_to_cpu(fw_info_location.grc_addr); + dest = (u32 *)fw_info; + size = BYTES_TO_DWORDS(size); + + for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) + dest[i] = qed_rd(p_hwfn, p_ptt, addr); + + cpu_to_le32_array(dest, size); } /* Dumps the specified string to the specified buffer. @@ -1123,7 +1132,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str); offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp", - fw_info.ver.timestamp); + le32_to_cpu(fw_info.ver.timestamp)); return offset; } @@ -4440,9 +4449,11 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, continue; } + addr = le16_to_cpu(asserts->section_ram_line_offset); fw_asserts_section_addr = storm->sem_fast_mem_addr + - SEM_FAST_REG_INT_RAM + - RAM_LINES_TO_BYTES(asserts->section_ram_line_offset); + SEM_FAST_REG_INT_RAM + + RAM_LINES_TO_BYTES(addr); + next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset); next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); @@ -7650,8 +7661,7 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, { struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; - u32 len_rounded, i; - __be32 val; + u32 len_rounded; int rc; *num_dumped_bytes = 0; @@ -7670,10 +7680,9 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */ if (image_id != QED_NVM_IMAGE_NVM_META) - for (i = 0; i < len_rounded; i += 4) { - val = cpu_to_be32(*(u32 *)(buffer + i)); - *(u32 *)(buffer + i) = val; - } + cpu_to_be32_array((__force __be32 *)buffer, + (const u32 *)buffer, + len_rounded / sizeof(u32)); *num_dumped_bytes = len_rounded; diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index a10e57bba6b9..b768f0698170 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -95,7 +95,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, struct qed_cxt_info cxt_info; u32 dummy_cid; int rc = 0; - u16 tmp; + __le16 tmp; u8 i; /* Get SPQ entry */ @@ -162,17 +162,13 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries); p_data->q_params.cmdq_num_entries = tmp; - tmp = fcoe_pf_params->num_cqs; - p_data->q_params.num_queues = (u8)tmp; + p_data->q_params.num_queues = fcoe_pf_params->num_cqs; - tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; - p_data->q_params.queue_relative_offset = (u8)tmp; + tmp = (__force __le16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; + p_data->q_params.queue_relative_offset = (__force u8)tmp; for (i = 0; i < fcoe_pf_params->num_cqs; i++) { - u16 igu_sb_id; - - igu_sb_id = qed_get_igu_sb_id(p_hwfn, i); - tmp = cpu_to_le16(igu_sb_id); + tmp = cpu_to_le16(qed_get_igu_sb_id(p_hwfn, i)); p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; } @@ -185,21 +181,21 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] = fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ]; - tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]; - p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); - tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]; - p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); + tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]); + p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = tmp; + tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]); + p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = tmp; DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA], fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; - tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; - p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); - tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; - p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); - tmp = fcoe_pf_params->rq_buffer_size; - p_data->q_params.rq_buffer_size = cpu_to_le16(tmp); + tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = tmp; + tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = tmp; + tmp = cpu_to_le16(fcoe_pf_params->rq_buffer_size); + p_data->q_params.rq_buffer_size = tmp; if (fcoe_pf_params->is_target) { SET_FIELD(p_data->q_params.q_validity, @@ -233,7 +229,8 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, struct fcoe_conn_offload_ramrod_data *p_data; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - u16 physical_q0, tmp; + u16 physical_q0; + __le16 tmp; int rc; /* Get SPQ entry */ @@ -254,7 +251,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, /* Transmission PQ is the first of the PF */ physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); - p_conn->physical_q0 = cpu_to_le16(physical_q0); + p_conn->physical_q0 = physical_q0; p_data->physical_q0 = cpu_to_le16(physical_q0); p_data->conn_id = cpu_to_le16(p_conn->conn_id); @@ -553,8 +550,8 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) void qed_fcoe_setup(struct qed_hwfn *p_hwfn) { struct e4_fcoe_task_context *p_task_ctx = NULL; + u32 i, lc; int rc; - u32 i; spin_lock_init(&p_hwfn->p_fcoe_info->lock); for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { @@ -565,10 +562,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) continue; memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context)); - SET_FIELD(p_task_ctx->timer_context.logical_client_0, - TIMERS_CONTEXT_VALIDLC0, 1); - SET_FIELD(p_task_ctx->timer_context.logical_client_1, - TIMERS_CONTEXT_VALIDLC1, 1); + + lc = 0; + SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1); + p_task_ctx->timer_context.logical_client_0 = cpu_to_le32(lc); + + lc = 0; + SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC1, 1); + p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc); + SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 71809ff97a03..6bb0bbc0013b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -2793,7 +2793,7 @@ struct fw_overlay_buf_hdr { /* init array header: raw */ struct init_array_raw_hdr { - u32 data; + __le32 data; #define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF #define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF @@ -2802,7 +2802,7 @@ struct init_array_raw_hdr { /* init array header: standard */ struct init_array_standard_hdr { - u32 data; + __le32 data; #define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF #define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF @@ -2811,7 +2811,7 @@ struct init_array_standard_hdr { /* init array header: zipped */ struct init_array_zipped_hdr { - u32 data; + __le32 data; #define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF #define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF @@ -2820,7 +2820,7 @@ struct init_array_zipped_hdr { /* init array header: pattern */ struct init_array_pattern_hdr { - u32 data; + __le32 data; #define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF #define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF @@ -2847,48 +2847,48 @@ enum init_array_types { /* init operation: callback */ struct init_callback_op { - u32 op_data; + __le32 op_data; #define INIT_CALLBACK_OP_OP_MASK 0xF #define INIT_CALLBACK_OP_OP_SHIFT 0 #define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF #define INIT_CALLBACK_OP_RESERVED_SHIFT 4 - u16 callback_id; - u16 block_id; + __le16 callback_id; + __le16 block_id; }; /* init operation: delay */ struct init_delay_op { - u32 op_data; + __le32 op_data; #define INIT_DELAY_OP_OP_MASK 0xF #define INIT_DELAY_OP_OP_SHIFT 0 #define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF #define INIT_DELAY_OP_RESERVED_SHIFT 4 - u32 delay; + __le32 delay; }; /* init operation: if_mode */ struct init_if_mode_op { - u32 op_data; + __le32 op_data; #define INIT_IF_MODE_OP_OP_MASK 0xF #define INIT_IF_MODE_OP_OP_SHIFT 0 #define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF #define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 #define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 - u16 reserved2; - u16 modes_buf_offset; + __le16 reserved2; + __le16 modes_buf_offset; }; /* init operation: if_phase */ struct init_if_phase_op { - u32 op_data; + __le32 op_data; #define INIT_IF_PHASE_OP_OP_MASK 0xF #define INIT_IF_PHASE_OP_OP_SHIFT 0 #define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF #define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4 #define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 - u32 phase_data; + __le32 phase_data; #define INIT_IF_PHASE_OP_PHASE_MASK 0xFF #define INIT_IF_PHASE_OP_PHASE_SHIFT 0 #define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF @@ -2907,31 +2907,31 @@ enum init_mode_ops { /* init operation: raw */ struct init_raw_op { - u32 op_data; + __le32 op_data; #define INIT_RAW_OP_OP_MASK 0xF #define INIT_RAW_OP_OP_SHIFT 0 #define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF #define INIT_RAW_OP_PARAM1_SHIFT 4 - u32 param2; + __le32 param2; }; /* init array params */ struct init_op_array_params { - u16 size; - u16 offset; + __le16 size; + __le16 offset; }; /* Write init operation arguments */ union init_write_args { - u32 inline_val; - u32 zeros_count; - u32 array_offset; + __le32 inline_val; + __le32 zeros_count; + __le32 array_offset; struct init_op_array_params runtime; }; /* init operation: write */ struct init_write_op { - u32 data; + __le32 data; #define INIT_WRITE_OP_OP_MASK 0xF #define INIT_WRITE_OP_OP_SHIFT 0 #define INIT_WRITE_OP_SOURCE_MASK 0x7 @@ -2947,7 +2947,7 @@ struct init_write_op { /* init operation: read */ struct init_read_op { - u32 op_data; + __le32 op_data; #define INIT_READ_OP_OP_MASK 0xF #define INIT_READ_OP_OP_SHIFT 0 #define INIT_READ_OP_POLL_TYPE_MASK 0xF @@ -2956,7 +2956,7 @@ struct init_read_op { #define INIT_READ_OP_RESERVED_SHIFT 8 #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 - u32 expected_val; + __le32 expected_val; }; /* Init operations union */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 775ef5eaefd4..ea888a2c6ddb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -159,23 +159,22 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \ rl_id, ext_voq, wrr) \ do { \ - typeof(map) __map; \ + u32 __reg = 0; \ \ - memset(&__map, 0, sizeof(__map)); \ + BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \ \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \ !!(rl_valid)); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, \ - (vp_pq_id)); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \ (wrr)); \ \ STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ - *((u32 *)&__map)); \ - (map) = __map; \ + __reg); \ + (map).reg = cpu_to_le32(__reg); \ } while (0) #define WRITE_PQ_INFO_TO_RAM 1 @@ -1012,9 +1011,10 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, * input. */ static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 *p_data, u32 addr, u32 len_in_dwords) + __le32 *p_data, u32 addr, u32 len_in_dwords) { struct qed_dmae_params params = {}; + u32 *data_cpu; int rc; if (!p_data) @@ -1033,8 +1033,13 @@ static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Failed writing to chip using DMAE, using GRC instead\n"); - /* write to registers using GRC */ - ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords); + + /* Swap to CPU byteorder and write to registers using GRC */ + data_cpu = (__force u32 *)p_data; + le32_to_cpu_array(data_cpu, len_in_dwords); + + ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords); + cpu_to_le32_array(data_cpu, len_in_dwords); } return len_in_dwords; @@ -1235,7 +1240,7 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); /* Zero ramline */ - qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, sizeof(ram_line) / REG_SIZE); } @@ -1247,8 +1252,10 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool udp, bool ipv4, bool ipv6, enum gft_profile_type profile_type) { - u32 reg_val, cam_line, search_non_ip_as_gft; - struct regpair ram_line = { }; + struct regpair ram_line; + u32 search_non_ip_as_gft; + u32 reg_val, cam_line; + u32 lo = 0, hi = 0; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, @@ -1319,43 +1326,46 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, search_non_ip_as_gft = 0; /* Tunnel type */ - SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); + SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); + SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { - SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1); + SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { - SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1); + SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { - SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { - SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { - SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); + SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); /* Allow tunneled traffic without inner IP */ search_non_ip_as_gft = 1; } + ram_line.lo = cpu_to_le32(lo); + ram_line.hi = cpu_to_le32(hi); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); - qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, sizeof(ram_line) / REG_SIZE); /* Set default profile so that no filter match will happen */ - ram_line.lo = 0xffffffff; - ram_line.hi = 0x3ff; - qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + ram_line.lo = cpu_to_le32(0xffffffff); + ram_line.hi = cpu_to_le32(0x3ff); + qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH, sizeof(ram_line) / REG_SIZE); @@ -1373,7 +1383,7 @@ static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) u8 crc, validation_byte = 0; static u8 crc8_table_valid; /* automatically initialized to 0 */ u32 validation_string = 0; - u32 data_to_crc; + __be32 data_to_crc; if (!crc8_table_valid) { crc8_populate_msb(cdu_crc8_table, 0x07); @@ -1395,10 +1405,9 @@ static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) validation_string |= (conn_type & 0xF); /* Convert to big-endian and calculate CRC8 */ - data_to_crc = be32_to_cpu(validation_string); - - crc = crc8(cdu_crc8_table, - (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE); + data_to_crc = cpu_to_be32(validation_string); + crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), + CRC8_INIT_VALUE); /* The validation byte [7:0] is composed: * for type A validation diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 0da38c47a8cf..9be40280eaaa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1191,16 +1191,15 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, void __iomem *igu_addr, u32 ack_cons) { - struct igu_prod_cons_update igu_ack = { 0 }; + u32 igu_ack; - igu_ack.sb_id_and_flags = - ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | - (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | - (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | - (IGU_SEG_ACCESS_ATTN << - IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + igu_ack = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_ATTN << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); - DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); + DIRECT_REG_WR(igu_addr, igu_ack); /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. @@ -1414,16 +1413,16 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, u8 pf_id, u16 vf_number, u8 vf_valid) { struct qed_dev *cdev = p_hwfn->cdev; - u32 cau_state; + u32 cau_state, params = 0, data = 0; u8 timer_res; memset(p_sb_entry, 0, sizeof(*p_sb_entry)); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); + SET_FIELD(params, CAU_SB_ENTRY_PF_NUMBER, pf_id); + SET_FIELD(params, CAU_SB_ENTRY_VF_NUMBER, vf_number); + SET_FIELD(params, CAU_SB_ENTRY_VF_VALID, vf_valid); + SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); + SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); cau_state = CAU_HC_DISABLE_STATE; @@ -1442,7 +1441,8 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, timer_res = 1; else timer_res = 2; - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); if (cdev->tx_coalesce_usecs <= 0x7F) timer_res = 0; @@ -1450,10 +1450,13 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, timer_res = 1; else timer_res = 2; - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); - SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); - SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + p_sb_entry->params = cpu_to_le32(params); + + SET_FIELD(data, CAU_SB_ENTRY_STATE0, cau_state); + SET_FIELD(data, CAU_SB_ENTRY_STATE1, cau_state); + p_sb_entry->data = cpu_to_le32(data); } static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, @@ -1463,31 +1466,27 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, enum qed_coalescing_fsm coalescing_fsm, u8 timeset) { - struct cau_pi_entry pi_entry; u32 sb_offset, pi_offset; + u32 prod = 0; if (IS_VF(p_hwfn->cdev)) return; - sb_offset = igu_sb_id * PIS_PER_SB_E4; - memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); - - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); + SET_FIELD(prod, CAU_PI_ENTRY_PI_TIMESET, timeset); if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); + SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 0); else - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); + SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1); + sb_offset = igu_sb_id * PIS_PER_SB_E4; pi_offset = sb_offset + pi_index; - if (p_hwfn->hw_init_done) { + + if (p_hwfn->hw_init_done) qed_wr(p_hwfn, p_ptt, - CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), - *((u32 *)&(pi_entry))); - } else { - STORE_RT_REG(p_hwfn, - CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, - *((u32 *)&(pi_entry))); - } + CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), prod); + else + STORE_RT_REG(p_hwfn, CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, + prod); } void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, @@ -2356,6 +2355,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 timer_res, u16 sb_id, bool tx) { struct cau_sb_entry sb_entry; + u32 params; int rc; if (!p_hwfn->hw_init_done) { @@ -2371,10 +2371,14 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; } + params = le32_to_cpu(sb_entry.params); + if (tx) - SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); else - SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + sb_entry.params = cpu_to_le32(params); rc = qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 8abb31b63e4e..25d2c882d7ac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -118,7 +118,7 @@ struct qed_iscsi_conn { }; static int qed_iscsi_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code) { if (p_hwfn->p_iscsi_info->event_cb) { @@ -270,6 +270,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, dma_addr_t xhq_pbl_addr; dma_addr_t uhq_pbl_addr; u16 physical_q; + __le16 tmp; int rc = 0; u32 dval; u16 wval; @@ -293,12 +294,12 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, /* Transmission PQ is the first of the PF */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); - p_conn->physical_q0 = cpu_to_le16(physical_q); + p_conn->physical_q0 = physical_q; p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q); /* iSCSI Pure-ACK PQ */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); - p_conn->physical_q1 = cpu_to_le16(physical_q); + p_conn->physical_q1 = physical_q; p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q); p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); @@ -324,14 +325,20 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp = &p_ramrod->tcp; p = (u16 *)p_conn->local_mac; - p_tcp->local_mac_addr_hi = swab16(get_unaligned(p)); - p_tcp->local_mac_addr_mid = swab16(get_unaligned(p + 1)); - p_tcp->local_mac_addr_lo = swab16(get_unaligned(p + 2)); + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp->local_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp->local_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp->local_mac_addr_lo = tmp; p = (u16 *)p_conn->remote_mac; - p_tcp->remote_mac_addr_hi = swab16(get_unaligned(p)); - p_tcp->remote_mac_addr_mid = swab16(get_unaligned(p + 1)); - p_tcp->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp->remote_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp->remote_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp->remote_mac_addr_lo = tmp; p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); @@ -390,14 +397,20 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp; p = (u16 *)p_conn->local_mac; - p_tcp2->local_mac_addr_hi = swab16(get_unaligned(p)); - p_tcp2->local_mac_addr_mid = swab16(get_unaligned(p + 1)); - p_tcp2->local_mac_addr_lo = swab16(get_unaligned(p + 2)); + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp2->local_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp2->local_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp2->local_mac_addr_lo = tmp; p = (u16 *)p_conn->remote_mac; - p_tcp2->remote_mac_addr_hi = swab16(get_unaligned(p)); - p_tcp2->remote_mac_addr_mid = swab16(get_unaligned(p + 1)); - p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp2->remote_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp2->remote_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp2->remote_mac_addr_lo = tmp; p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 55e73a842507..512cbef24097 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -60,7 +60,7 @@ struct mpa_v2_hdr { #define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code); /* Override devinfo with iWARP specific values */ @@ -246,14 +246,14 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, SET_FIELD(p_ramrod->flags, IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq); - p_ramrod->pd = qp->pd; - p_ramrod->sq_num_pages = qp->sq_num_pages; - p_ramrod->rq_num_pages = qp->rq_num_pages; + p_ramrod->pd = cpu_to_le16(qp->pd); + p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); + p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); - p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); - p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid_for_sq = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); @@ -288,6 +288,7 @@ static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) struct iwarp_modify_qp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; + u16 flags, trans_to_state; int rc; /* Get SPQ entry */ @@ -303,12 +304,17 @@ static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) return rc; p_ramrod = &p_ent->ramrod.iwarp_modify_qp; - SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, - 0x1); + + flags = le16_to_cpu(p_ramrod->flags); + SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1); + p_ramrod->flags = cpu_to_le16(flags); + if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING) - p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING; + trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING; else - p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR; + trans_to_state = IWARP_MODIFY_QP_STATE_ERROR; + + p_ramrod->transition_to_state = cpu_to_le16(trans_to_state); rc = qed_spq_post(p_hwfn, p_ent, NULL); @@ -621,6 +627,7 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) dma_addr_t async_output_phys; dma_addr_t in_pdata_phys; u16 physical_q; + u16 flags = 0; u8 tcp_flags; int rc; int i; @@ -673,13 +680,14 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan); tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; - tcp->flags = 0; - SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, + + SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, !!(tcp_flags & QED_IWARP_TS_EN)); - SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, + SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, !!(tcp_flags & QED_IWARP_DA_EN)); + tcp->flags = cpu_to_le16(flags); tcp->ip_version = ep->cm_info.ip_version; for (i = 0; i < 4; i++) { @@ -695,10 +703,10 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) tcp->tos_or_tc = 0; tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME; - tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss; + tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss); tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT; - tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT; - tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL; + tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT); + tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL); tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; tcp->connect_mode = ep->connect_mode; @@ -729,6 +737,7 @@ qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) union async_output *async_data; u16 mpa_ord, mpa_ird; u8 mpa_hdr_size = 0; + u16 ulp_data_len; u8 mpa_rev; async_data = &ep->ep_buffer_virt->async_output; @@ -792,8 +801,8 @@ qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) /* Strip mpa v2 hdr from private data before sending to upper layer */ ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size; - ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len - - mpa_hdr_size; + ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len); + ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size; params.event = QED_IWARP_EVENT_MPA_REQUEST; params.cm_info = &ep->cm_info; @@ -817,6 +826,7 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) dma_addr_t in_pdata_phys; struct qed_rdma_qp *qp; bool reject; + u32 val; int rc; if (!ep) @@ -847,13 +857,15 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) offsetof(struct qed_iwarp_ep_memory, out_pdata); DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys); - common->outgoing_ulp_buffer.len = ep->cm_info.private_data_len; + val = ep->cm_info.private_data_len; + common->outgoing_ulp_buffer.len = cpu_to_le16(val); common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; - common->out_rq.ord = ep->cm_info.ord; - common->out_rq.ird = ep->cm_info.ird; + common->out_rq.ord = cpu_to_le32(ep->cm_info.ord); + common->out_rq.ird = cpu_to_le32(ep->cm_info.ird); - p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; + val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; + p_mpa_ramrod->tcp_cid = cpu_to_le32(val); in_pdata_phys = ep->ep_buffer_phys + offsetof(struct qed_iwarp_ep_memory, in_pdata); @@ -879,7 +891,7 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) } iwarp_info = &p_hwfn->p_rdma_info->iwarp; - p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size; + p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size); p_mpa_ramrod->mode = ep->mpa_rev; SET_FIELD(p_mpa_ramrod->rtr_pref, IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); @@ -930,6 +942,7 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) union async_output *async_data; u16 mpa_ird, mpa_ord; u8 mpa_data_size = 0; + u16 ulp_data_len; if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { mpa_v2_params = @@ -941,11 +954,12 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); } - async_data = &ep->ep_buffer_virt->async_output; + async_data = &ep->ep_buffer_virt->async_output; ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; - ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len - - mpa_data_size; + + ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len); + ep->cm_info.private_data_len = ulp_data_len - mpa_data_size; } static void @@ -1822,7 +1836,7 @@ qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, goto out; } - mpa_len = ntohs(*((u16 *)(mpa_data))); + mpa_len = ntohs(*(__force __be16 *)mpa_data); fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); if (fpdu->fpdu_length <= tcp_payload_len) @@ -1844,11 +1858,13 @@ qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, struct unaligned_opaque_data *pkt_data, u16 tcp_payload_size, u8 placement_offset) { + u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); + fpdu->mpa_buf = buf; fpdu->pkt_hdr = buf->data_phys_addr + placement_offset; fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset; - fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset; - fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset; + fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset; + fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset; if (tcp_payload_size == 1) fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH; @@ -1866,6 +1882,7 @@ qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, struct unaligned_opaque_data *pkt_data, struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size) { + u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf; int rc; @@ -1886,13 +1903,11 @@ qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n", fpdu->mpa_frag_virt, fpdu->mpa_frag_len, - (u8 *)(buf->data) + pkt_data->first_mpa_offset, - tcp_payload_size); + (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len); memcpy(tmp_buf + fpdu->mpa_frag_len, - (u8 *)(buf->data) + pkt_data->first_mpa_offset, - tcp_payload_size); + (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf); if (rc) @@ -2035,6 +2050,7 @@ qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type) { struct qed_ll2_tx_pkt_info tx_pkt; + u16 first_mpa_offset; u8 ll2_handle; int rc; @@ -2086,11 +2102,13 @@ qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, if (!fpdu->incomplete_bytes) goto out; + first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); + /* Set third fragment to second part of the packet */ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle, buf->data_phys_addr + - curr_pkt->first_mpa_offset, + first_mpa_offset, fpdu->incomplete_bytes); out: DP_VERBOSE(p_hwfn, @@ -2111,12 +2129,12 @@ qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn, { u64 opaque_data; - opaque_data = HILO_64(opaque_data1, opaque_data0); + opaque_data = HILO_64(cpu_to_le32(opaque_data1), + cpu_to_le32(opaque_data0)); *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data); - curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset + - le16_to_cpu(curr_pkt->first_mpa_offset); - curr_pkt->cid = le32_to_cpu(curr_pkt->cid); + le16_add_cpu(&curr_pkt->first_mpa_offset, + curr_pkt->tcp_payload_offset); } /* This function is called when an unaligned or incomplete MPA packet arrives @@ -2131,18 +2149,22 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; enum qed_iwarp_mpa_pkt_type pkt_type; struct qed_iwarp_fpdu *fpdu; + u16 cid, first_mpa_offset; int rc = -EINVAL; u8 *mpa_data; - fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff); + cid = le32_to_cpu(curr_pkt->cid); + + fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid); if (!fpdu) { /* something corrupt with cid, post rx back */ DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n", - curr_pkt->cid); + cid); goto err; } do { - mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset); + first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); + mpa_data = ((u8 *)(buf->data) + first_mpa_offset); pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu, mpa_buf->tcp_payload_len, @@ -2188,7 +2210,8 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, } mpa_buf->tcp_payload_len -= fpdu->fpdu_length; - curr_pkt->first_mpa_offset += fpdu->fpdu_length; + le16_add_cpu(&curr_pkt->first_mpa_offset, + fpdu->fpdu_length); break; case QED_IWARP_MPA_PKT_UNALIGNED: qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); @@ -2227,7 +2250,9 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, } mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes; - curr_pkt->first_mpa_offset += fpdu->incomplete_bytes; + le16_add_cpu(&curr_pkt->first_mpa_offset, + fpdu->incomplete_bytes); + /* The framed PDU was sent - no more incomplete bytes */ fpdu->incomplete_bytes = 0; break; @@ -2278,6 +2303,7 @@ qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) struct qed_iwarp_ll2_mpa_buf *mpa_buf; struct qed_iwarp_info *iwarp_info; struct qed_hwfn *p_hwfn = cxt; + u16 first_mpa_offset; iwarp_info = &p_hwfn->p_rdma_info->iwarp; mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list, @@ -2291,17 +2317,21 @@ qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data, data->opaque_data_0, data->opaque_data_1); + first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n", - data->length.packet_length, mpa_buf->data.first_mpa_offset, + data->length.packet_length, first_mpa_offset, mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags, mpa_buf->data.cid); mpa_buf->ll2_buf = data->cookie; mpa_buf->tcp_payload_len = data->length.packet_length - - mpa_buf->data.first_mpa_offset; - mpa_buf->data.first_mpa_offset += data->u.placement_offset; + first_mpa_offset; + + first_mpa_offset += data->u.placement_offset; + mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset); mpa_buf->placement_offset = data->u.placement_offset; list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list); @@ -2500,14 +2530,16 @@ qed_iwarp_ll2_slowpath(void *cxt, struct unaligned_opaque_data unalign_data; struct qed_hwfn *p_hwfn = cxt; struct qed_iwarp_fpdu *fpdu; + u32 cid; qed_iwarp_mpa_get_data(p_hwfn, &unalign_data, opaque_data_0, opaque_data_1); - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", - unalign_data.cid); + cid = le32_to_cpu(unalign_data.cid); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid); - fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid); + fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid); if (fpdu) memset(fpdu, 0, sizeof(*fpdu)); } @@ -3010,7 +3042,7 @@ qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) } static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code) { struct qed_rdma_events events = p_hwfn->p_rdma_info->events; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 41afd15f4991..4c6ac8862744 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -345,8 +345,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct eth_vport_tpa_param *tpa_param; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; + u16 min_size, rx_mode = 0; u8 abs_vport_id = 0; - u16 rx_mode = 0; int rc; rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); @@ -386,10 +386,12 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, switch (p_params->tpa_mode) { case QED_TPA_MODE_GRO: + min_size = p_params->mtu / 2; + tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; - tpa_param->tpa_max_size = (u16)-1; - tpa_param->tpa_min_size_to_cont = p_params->mtu / 2; - tpa_param->tpa_min_size_to_start = p_params->mtu / 2; + tpa_param->tpa_max_size = cpu_to_le16(U16_MAX); + tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size); + tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size); tpa_param->tpa_ipv4_en_flg = 1; tpa_param->tpa_ipv6_en_flg = 1; tpa_param->tpa_pkt_split_flg = 1; @@ -626,9 +628,9 @@ qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg; tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg; tpa->tpa_max_aggs_num = param->tpa_max_aggs_num; - tpa->tpa_max_size = param->tpa_max_size; - tpa->tpa_min_size_to_start = param->tpa_min_size_to_start; - tpa->tpa_min_size_to_cont = param->tpa_min_size_to_cont; + tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size); + tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start); + tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont); } static void @@ -2090,7 +2092,8 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, return rc; } - timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); + timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), + CAU_SB_ENTRY_TIMER_RES0); address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); @@ -2123,7 +2126,8 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, return rc; } - timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); + timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), + CAU_SB_ENTRY_TIMER_RES1); address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index cce6fd27c042..6f4aec339cd4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1798,6 +1798,7 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, enum core_roce_flavor_type roce_flavor; enum core_tx_dest tx_dest; u16 bd_data = 0, frag_idx; + u16 bitfield1; roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE : CORE_RROCE; @@ -1829,9 +1830,11 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, pkt->remove_stag = true; } - SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, - cpu_to_le16(pkt->l4_hdr_offset_w)); - SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); + bitfield1 = le16_to_cpu(start_bd->bitfield1); + SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w); + SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest); + start_bd->bitfield1 = cpu_to_le16(bitfield1); + bd_data |= pkt->bd_flags; SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 236013da9453..4c5f5bd91359 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1962,8 +1962,7 @@ static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, u32 *crc) { u8 *buf = NULL; - int rc, j; - u32 val; + int rc; /* Allocate a buffer for holding the nvram image */ buf = kzalloc(nvm_image->length, GFP_KERNEL); @@ -1981,15 +1980,14 @@ static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, /* Convert the buffer into big-endian format (excluding the * closing 4 bytes of CRC). */ - for (j = 0; j < nvm_image->length - 4; j += 4) { - val = cpu_to_be32(*(u32 *)&buf[j]); - *(u32 *)&buf[j] = val; - } + cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, + DIV_ROUND_UP(nvm_image->length - 4, 4)); /* Calc CRC for the "actual" image buffer, i.e. not including * the last 4 CRC bytes. */ - *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4))); + *crc = ~crc32(~0U, buf, nvm_image->length - 4); + *crc = (__force u32)cpu_to_be32p(crc); out: kfree(buf); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 1dd01e0373ab..3e3192a3ad9b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -1276,7 +1276,7 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) */ for (offset = 0; offset < size; offset += sizeof(u32)) { val = qed_rd(p_hwfn, p_ptt, addr + offset); - val = be32_to_cpu(val); + val = be32_to_cpu((__force __be32)val); memcpy(&p_mfw_buf[offset], &val, sizeof(u32)); } @@ -1325,7 +1325,7 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) */ for (offset = 0; offset < size; offset += sizeof(u32)) { memcpy(&val, &p_mfw_buf[offset], sizeof(u32)); - val = cpu_to_be32(val); + val = (__force u32)cpu_to_be32(val); qed_wr(p_hwfn, p_ptt, addr + offset, val); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 59d916693654..e5648ca2838b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1106,7 +1106,7 @@ static int qed_rdma_create_cq(void *rdma_cxt, p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + params->cnq_id; - p_ramrod->int_timeout = params->int_timeout; + p_ramrod->int_timeout = cpu_to_le16(params->int_timeout); /* toggle the bit for every resize or create cq for a given icid */ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); @@ -1206,7 +1206,7 @@ err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, return rc; } -void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) +void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac) { p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); @@ -1495,6 +1495,7 @@ qed_rdma_register_tid(void *rdma_cxt, struct qed_spq_entry *p_ent; enum rdma_tid_type tid_type; u8 fw_return_code; + u16 flags = 0; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); @@ -1514,54 +1515,46 @@ qed_rdma_register_tid(void *rdma_cxt, if (p_hwfn->p_rdma_info->last_tid < params->itid) p_hwfn->p_rdma_info->last_tid = params->itid; - p_ramrod = &p_ent->ramrod.rdma_register_tid; - - p_ramrod->flags = 0; - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, params->pbl_two_level); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, + params->zbva); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); /* Don't initialize D/C field, as it may override other bits. */ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, params->page_size_log - 12); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, params->remote_read); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, params->remote_write); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, params->remote_atomic); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, params->local_write); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, + params->local_read); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, params->mw_bind); + p_ramrod = &p_ent->ramrod.rdma_register_tid; + p_ramrod->flags = cpu_to_le16(flags); + SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, params->pbl_page_size_log - 12); - SET_FIELD(p_ramrod->flags2, - RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); + SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, + params->dma_mr); switch (params->tid_type) { case QED_RDMA_TID_REGISTERED_MR: @@ -1579,8 +1572,9 @@ qed_rdma_register_tid(void *rdma_cxt, qed_sp_destroy_request(p_hwfn, p_ent); return rc; } - SET_FIELD(p_ramrod->flags1, - RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); + + SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, + tid_type); p_ramrod->itid = cpu_to_le32(params->itid); p_ramrod->key = params->key; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index fba43adbb68e..6a1de3a25257 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -201,7 +201,7 @@ qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); int qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); -void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac); +void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac); bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 5433e43a1930..a1423ec0edf7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -38,7 +38,7 @@ static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code) { struct qed_rdma_events events = p_hwfn->p_rdma_info->events; @@ -54,7 +54,7 @@ static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, qed_roce_free_real_icid(p_hwfn, icid); } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { - u16 srq_id = (u16)rdata->async_handle.lo; + u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo); events.affiliated_event(events.context, fw_event_code, &srq_id); @@ -217,9 +217,9 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, struct roce_create_qp_resp_ramrod_data *p_ramrod; u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; - enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; enum protocol_type proto; + u32 flags = 0; int rc; u8 tc; @@ -252,45 +252,34 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, if (rc) goto err; - p_ramrod = &p_ent->ramrod.roce_create_qp_resp; - - p_ramrod->flags = 0; - - roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, + qed_roce_mode_to_flavor(qp->roce_mode)); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, qp->incoming_rdma_read_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, qp->incoming_rdma_write_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, qp->incoming_atomic_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, qp->e2e_flow_control_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, qp->fmr_and_reserved_lkey); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, qp->min_rnr_nak_timer); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, qed_rdma_is_xrc_qp(qp)); + p_ramrod = &p_ent->ramrod.roce_create_qp_resp; + p_ramrod->flags = cpu_to_le32(flags); p_ramrod->max_ird = qp->max_rd_atomic_resp; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; @@ -305,10 +294,10 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); - p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); - p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); - p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); - p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; + p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id); @@ -330,7 +319,7 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); - p_ramrod->udp_src_port = qp->udp_src_port; + p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); @@ -366,9 +355,9 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, struct roce_create_qp_req_ramrod_data *p_ramrod; u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; - enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; enum protocol_type proto; + u16 flags = 0; int rc; u8 tc; @@ -402,34 +391,29 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - p_ramrod = &p_ent->ramrod.roce_create_qp_req; - - p_ramrod->flags = 0; + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, + qed_roce_mode_to_flavor(qp->roce_mode)); - roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); - - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, qp->fmr_and_reserved_lkey); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, + qp->signal_all); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, + qp->retry_cnt); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, qp->rnr_retry_cnt); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, qed_rdma_is_xrc_qp(qp)); - SET_FIELD(p_ramrod->flags2, - ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode); + p_ramrod = &p_ent->ramrod.roce_create_qp_req; + p_ramrod->flags = cpu_to_le16(flags); + + SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, + qp->edpm_mode); p_ramrod->max_ord = qp->max_rd_atomic_req; p_ramrod->traffic_class = qp->traffic_class_tos; @@ -446,10 +430,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); - p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); - p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); - p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); - p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; + p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); @@ -470,7 +454,7 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); - p_ramrod->udp_src_port = qp->udp_src_port; + p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; @@ -502,6 +486,7 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, struct roce_modify_qp_resp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; + u16 flags = 0; int rc; if (!qp->has_resp) @@ -526,53 +511,43 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, return rc; } - p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; - - p_ramrod->flags = 0; + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, + !!move_to_err); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); - - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, qp->incoming_rdma_read_en); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, qp->incoming_rdma_write_en); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, qp->incoming_atomic_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, qp->e2e_flow_control_en); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); + p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; + p_ramrod->flags = cpu_to_le16(flags); + p_ramrod->fields = 0; SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, @@ -599,6 +574,7 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, struct roce_modify_qp_req_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; + u16 flags = 0; int rc; if (!qp->has_req) @@ -623,54 +599,44 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, return rc; } - p_ramrod = &p_ent->ramrod.roce_modify_qp_req; - - p_ramrod->flags = 0; + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, + !!move_to_err); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, + !!move_to_sqd); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd); - - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, qp->sqd_async); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); + p_ramrod = &p_ent->ramrod.roce_modify_qp_req; + p_ramrod->flags = cpu_to_le16(flags); + p_ramrod->fields = 0; SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); - - SET_FIELD(p_ramrod->fields, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, + SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, qp->rnr_retry_cnt); p_ramrod->max_ord = qp->max_rd_atomic_req; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index f7f983a8bf44..993f1357b6fc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -155,7 +155,7 @@ struct qed_consq { }; typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code); int diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 71ab57bca7c9..8142f5669b26 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -341,9 +341,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { - outer_tag_config->outer_tag.tpid = ETH_P_8021Q; + outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q); } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { - outer_tag_config->outer_tag.tpid = ETH_P_8021AD; + outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD); outer_tag_config->enable_stag_pri_change = 1; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b4e21e4792b7..aa215eeeb4df 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4005,7 +4005,7 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, /* List the physical address of the request so that handler * could later on copy the message from it. */ - p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; + p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo); /* Mark the event and schedule the workqueue */ p_vf->vf_mbx.b_pending_msg = true; @@ -4037,7 +4037,7 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, } } -static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, u16 echo, +static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data, u8 fw_return_code) { switch (opcode) { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 5ca081cd2ed9..90e1060da02b 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1403,16 +1403,15 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info, enum igu_int_cmd int_cmd, u8 upd_flg) { - struct igu_prod_cons_update igu_ack = { 0 }; + u32 igu_ack; - igu_ack.sb_id_and_flags = - ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | - (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | - (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | - (IGU_SEG_ACCESS_REG << - IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); - DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); + DIRECT_REG_WR(sb_info->igu_addr, igu_ack); /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. -- cgit v1.2.3 From b64b74b1d51cce8c9496b6a071c1d59786c2144d Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 6 Jul 2020 15:03:42 +0300 Subject: RDMA/core: Remove ib_alloc_mr_user function Allocating an MR flow can only be initiated by kernel users, and not from userspace. As a result, the udata parameter is always being passed as NULL. Rename ib_alloc_mr_user function to ib_alloc_mr and remove the udata parameter. Link: https://lore.kernel.org/r/20200706120343.10816-3-galpress@amazon.com Signed-off-by: Gal Pressman Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/verbs.c | 11 +++++------ include/rdma/ib_verbs.h | 10 ++-------- 2 files changed, 7 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 48d194ec15d9..e98d3ada0951 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2104,11 +2104,10 @@ int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata) EXPORT_SYMBOL(ib_dereg_mr_user); /** - * ib_alloc_mr_user() - Allocates a memory region + * ib_alloc_mr() - Allocates a memory region * @pd: protection domain associated with the region * @mr_type: memory region type * @max_num_sg: maximum sg entries available for registration. - * @udata: user data or null for kernel objects * * Notes: * Memory registeration page/sg lists must not exceed max_num_sg. @@ -2116,8 +2115,8 @@ EXPORT_SYMBOL(ib_dereg_mr_user); * max_num_sg * used_page_size. * */ -struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) +struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg) { struct ib_mr *mr; @@ -2132,7 +2131,7 @@ struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type, goto out; } - mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata); + mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, NULL); if (IS_ERR(mr)) goto out; @@ -2151,7 +2150,7 @@ out: trace_mr_alloc(pd, mr_type, max_num_sg, mr); return mr; } -EXPORT_SYMBOL(ib_alloc_mr_user); +EXPORT_SYMBOL(ib_alloc_mr); /** * ib_alloc_mr_integrity() - Allocates an integrity memory region diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index f6b51a709818..084fe8f53ae8 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -4269,14 +4269,8 @@ static inline int ib_dereg_mr(struct ib_mr *mr) return ib_dereg_mr_user(mr, NULL); } -struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); - -static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd, - enum ib_mr_type mr_type, u32 max_num_sg) -{ - return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL); -} +struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, u32 max_num_data_sg, -- cgit v1.2.3 From 42a3b153966c9cd9a90f6a669d1ffed7fef2d325 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 6 Jul 2020 15:03:43 +0300 Subject: RDMA: Remove the udata parameter from alloc_mr callback Allocating an MR flow can only be initiated by kernel users, and not from userspace so a udata parameter is redundant. Link: https://lore.kernel.org/r/20200706120343.10816-4-galpress@amazon.com Signed-off-by: Gal Pressman Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/verbs.c | 2 +- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 2 +- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 2 +- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 2 +- drivers/infiniband/hw/cxgb4/mem.c | 2 +- drivers/infiniband/hw/hns/hns_roce_device.h | 2 +- drivers/infiniband/hw/hns/hns_roce_mr.c | 2 +- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 3 +-- drivers/infiniband/hw/mlx4/mlx4_ib.h | 2 +- drivers/infiniband/hw/mlx4/mr.c | 2 +- drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 +- drivers/infiniband/hw/mlx5/mr.c | 2 +- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +- drivers/infiniband/hw/ocrdma/ocrdma_verbs.h | 2 +- drivers/infiniband/hw/qedr/verbs.c | 2 +- drivers/infiniband/hw/qedr/verbs.h | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 2 +- drivers/infiniband/sw/rdmavt/mr.c | 2 +- drivers/infiniband/sw/rdmavt/mr.h | 2 +- drivers/infiniband/sw/rxe/rxe_verbs.c | 2 +- drivers/infiniband/sw/siw/siw_verbs.c | 2 +- drivers/infiniband/sw/siw/siw_verbs.h | 2 +- include/rdma/ib_verbs.h | 2 +- 24 files changed, 24 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index e98d3ada0951..f10dc00074a7 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2131,7 +2131,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, goto out; } - mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, NULL); + mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); if (IS_ERR(mr)) goto out; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 8b6ad5cddfce..f32c7e85ae05 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3569,7 +3569,7 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, } struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index e5fbbeba6d28..b4a06b553b04 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -201,7 +201,7 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags); int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); int bnxt_re_dereg_mr(struct ib_mr *mr, struct ib_udata *udata); struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 27da0705c88a..2b2b009b371a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -980,7 +980,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); void c4iw_qp_add_ref(struct ib_qp *qp); void c4iw_qp_rem_ref(struct ib_qp *qp); struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int c4iw_dealloc_mw(struct ib_mw *mw); diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 962dc97a8ff2..ea6fb2c5b1a7 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -691,7 +691,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw) } struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { struct c4iw_dev *rhp; struct c4iw_pd *php; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a61f0c4d4dbb..5b946b5bd586 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1191,7 +1191,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, struct ib_udata *udata); struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 4c0bbb12770d..1380cdab5701 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -414,7 +414,7 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) } struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct device *dev = hr_dev->dev; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 19af29a48c55..f9ef3ac2f4cd 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -1543,10 +1543,9 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr * @pd: ibpd pointer * @mr_type: memory for stag registrion * @max_num_sg: man number of pages - * @udata: user data or NULL for kernel objects */ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { struct i40iw_pd *iwpd = to_iwpd(pd); struct i40iw_device *iwdev = to_iwdev(pd->device); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6f4ea1067095..38e87a700a2a 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -729,7 +729,7 @@ struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata); int mlx4_ib_dealloc_mw(struct ib_mw *mw); struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 7e0b205c05eb..6eecedeff2d3 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -655,7 +655,7 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) } struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_mr *mr; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 2fd199c07dda..93a471191a1d 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1210,7 +1210,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, struct ib_pd *pd, struct ib_udata *udata); int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, u32 max_num_sg, u32 max_num_meta_sg); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 44683073be0c..3e6f2f9c6655 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1961,7 +1961,7 @@ err_free: } struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0); } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index d11c74390a12..6cdbec13756a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -2901,7 +2901,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) } struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { int status; struct ocrdma_mr *mr; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index 3a5010881be5..df8e3b923a44 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -101,7 +101,7 @@ struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc); struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length, u64 virt, int acc, struct ib_udata *); struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9b9e80266367..3d7d5617818f 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3003,7 +3003,7 @@ err0: } struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { struct qedr_mr *mr; diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 5e02387e068d..39dd6286ba39 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -84,7 +84,7 @@ int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); int qedr_post_send(struct ib_qp *, const struct ib_send_wr *, const struct ib_send_wr **bad_wr); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c index b039f1f00e05..77a010e68208 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c @@ -202,7 +202,7 @@ err_umem: * @return: ib_mr pointer on success, otherwise returns an errno. */ struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { struct pvrdma_dev *dev = to_vdev(pd->device); struct pvrdma_user_mr *mr; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index 267702226f10..699b20849a7e 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -406,7 +406,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_udata *udata); int pvrdma_dereg_mr(struct ib_mr *mr, struct ib_udata *udata); struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 60864e5ca7cb..2f7c25fea44a 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -576,7 +576,7 @@ out: * Return: the memory region on success, otherwise return an errno. */ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { struct rvt_mr *mr; diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h index 780fc63af98b..b3aba359401b 100644 --- a/drivers/infiniband/sw/rdmavt/mr.h +++ b/drivers/infiniband/sw/rdmavt/mr.h @@ -71,7 +71,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_udata *udata); int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index b8a22af724e8..0472df52d36d 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -975,7 +975,7 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) } static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) + u32 max_num_sg) { struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_pd *pd = to_rpd(ibpd); diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 987e2ba05dbc..0d509f7a10a6 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -1373,7 +1373,7 @@ err_out: } struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_sge, struct ib_udata *udata) + u32 max_sge) { struct siw_device *sdev = to_siw_dev(pd->device); struct siw_mr *mr = NULL; diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h index 1a731989fad6..9335c48c01de 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.h +++ b/drivers/infiniband/sw/siw/siw_verbs.h @@ -69,7 +69,7 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags); struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len, u64 rnic_va, int rights, struct ib_udata *udata); struct ib_mr *siw_alloc_mr(struct ib_pd *base_pd, enum ib_mr_type mr_type, - u32 max_sge, struct ib_udata *udata); + u32 max_sge); struct ib_mr *siw_get_dma_mr(struct ib_pd *base_pd, int rights); int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, unsigned int *sg_off); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 084fe8f53ae8..204ec7516ef5 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2478,7 +2478,7 @@ struct ib_device_ops { struct ib_pd *pd, struct ib_udata *udata); int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata); struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata); + u32 max_num_sg); struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd, u32 max_num_data_sg, u32 max_num_meta_sg); -- cgit v1.2.3 From b73efcb26e2c6d66797c57bed8df13c3718c58cb Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Mon, 6 Jul 2020 15:27:15 +0300 Subject: RDMA/core: Clean ib_alloc_xrcd() and reuse it to allocate XRC domain ib_alloc_xrcd() already does the required initialization, so move the uverbs to call it and save code duplication, while cleaning the function argument lists of that function. Link: https://lore.kernel.org/r/20200706122716.647338-3-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_cmd.c | 12 +++--------- drivers/infiniband/core/verbs.c | 24 ++++++++++++++++++------ include/rdma/ib_verbs.h | 18 +++--------------- 3 files changed, 24 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b48b3f6e632d..89ff5d06c5d7 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -614,17 +614,11 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) } if (!xrcd) { - xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata); + xrcd = ib_alloc_xrcd_user(ib_dev, inode, &attrs->driver_udata); if (IS_ERR(xrcd)) { ret = PTR_ERR(xrcd); goto err; } - - xrcd->inode = inode; - xrcd->device = ib_dev; - atomic_set(&xrcd->usecnt, 0); - mutex_init(&xrcd->tgt_qp_mutex); - INIT_LIST_HEAD(&xrcd->tgt_qp_list); new_xrcd = 1; } @@ -663,7 +657,7 @@ err_copy: } err_dealloc_xrcd: - ib_dealloc_xrcd(xrcd, uverbs_get_cleared_udata(attrs)); + ib_dealloc_xrcd_user(xrcd, uverbs_get_cleared_udata(attrs)); err: uobj_alloc_abort(&obj->uobject, attrs); @@ -701,7 +695,7 @@ int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd, if (inode && !atomic_dec_and_test(&xrcd->usecnt)) return 0; - ret = ib_dealloc_xrcd(xrcd, &attrs->driver_udata); + ret = ib_dealloc_xrcd_user(xrcd, &attrs->driver_udata); if (ib_is_destroy_retryable(ret, why, uobject)) { atomic_inc(&xrcd->usecnt); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index f10dc00074a7..a9b99c3ff25c 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2288,17 +2288,24 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) } EXPORT_SYMBOL(ib_detach_mcast); -struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) +/** + * ib_alloc_xrcd_user - Allocates an XRC domain. + * @device: The device on which to allocate the XRC domain. + * @inode: inode to connect XRCD + * @udata: Valid user data or NULL for kernel object + */ +struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, + struct inode *inode, struct ib_udata *udata) { struct ib_xrcd *xrcd; if (!device->ops.alloc_xrcd) return ERR_PTR(-EOPNOTSUPP); - xrcd = device->ops.alloc_xrcd(device, NULL); + xrcd = device->ops.alloc_xrcd(device, udata); if (!IS_ERR(xrcd)) { xrcd->device = device; - xrcd->inode = NULL; + xrcd->inode = inode; atomic_set(&xrcd->usecnt, 0); mutex_init(&xrcd->tgt_qp_mutex); INIT_LIST_HEAD(&xrcd->tgt_qp_list); @@ -2306,9 +2313,14 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) return xrcd; } -EXPORT_SYMBOL(__ib_alloc_xrcd); +EXPORT_SYMBOL(ib_alloc_xrcd_user); -int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) +/** + * ib_dealloc_xrcd_user - Deallocates an XRC domain. + * @xrcd: The XRC domain to deallocate. + * @udata: Valid user data or NULL for kernel object + */ +int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata) { struct ib_qp *qp; int ret; @@ -2326,7 +2338,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) return xrcd->device->ops.dealloc_xrcd(xrcd, udata); } -EXPORT_SYMBOL(ib_dealloc_xrcd); +EXPORT_SYMBOL(ib_dealloc_xrcd_user); /** * ib_create_wq - Creates a WQ associated with the specified protection diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 204ec7516ef5..db6f78c5394f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -4321,21 +4321,9 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); */ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); -/** - * ib_alloc_xrcd - Allocates an XRC domain. - * @device: The device on which to allocate the XRC domain. - * @caller: Module name for kernel consumers - */ -struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller); -#define ib_alloc_xrcd(device) \ - __ib_alloc_xrcd((device), KBUILD_MODNAME) - -/** - * ib_dealloc_xrcd - Deallocates an XRC domain. - * @xrcd: The XRC domain to deallocate. - * @udata: Valid user data or NULL for kernel object - */ -int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); +struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, + struct inode *inode, struct ib_udata *udata); +int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata); static inline int ib_check_mr_access(int flags) { -- cgit v1.2.3 From 6f3ca6f4f5e05b52bf6851ada21026faa106de76 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Mon, 6 Jul 2020 15:27:16 +0300 Subject: RDMA/core: Optimize XRC target lookup Replace the mutex with read write semaphore and use xarray instead of linked list for XRC target QPs. This will give faster XRC target lookup. In addition, when QP is closed, don't insert it back to the xarray if the destroy command failed. Link: https://lore.kernel.org/r/20200706122716.647338-4-leon@kernel.org Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/verbs.c | 57 +++++++++++++++-------------------------- include/rdma/ib_verbs.h | 5 ++-- 2 files changed, 23 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a9b99c3ff25c..b1b6cc21ca96 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1090,13 +1090,6 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags); } -static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) -{ - mutex_lock(&xrcd->tgt_qp_mutex); - list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); - mutex_unlock(&xrcd->tgt_qp_mutex); -} - static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, void (*event_handler)(struct ib_event *, void *), void *qp_context) @@ -1139,16 +1132,15 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) return ERR_PTR(-EINVAL); - qp = ERR_PTR(-EINVAL); - mutex_lock(&xrcd->tgt_qp_mutex); - list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { - if (real_qp->qp_num == qp_open_attr->qp_num) { - qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, - qp_open_attr->qp_context); - break; - } + down_read(&xrcd->tgt_qps_rwsem); + real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num); + if (!real_qp) { + up_read(&xrcd->tgt_qps_rwsem); + return ERR_PTR(-EINVAL); } - mutex_unlock(&xrcd->tgt_qp_mutex); + qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, + qp_open_attr->qp_context); + up_read(&xrcd->tgt_qps_rwsem); return qp; } EXPORT_SYMBOL(ib_open_qp); @@ -1157,6 +1149,7 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr) { struct ib_qp *real_qp = qp; + int err; qp->event_handler = __ib_shared_qp_event_handler; qp->qp_context = qp; @@ -1172,7 +1165,12 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, if (IS_ERR(qp)) return qp; - __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); + err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num, + real_qp, GFP_KERNEL)); + if (err) { + ib_close_qp(qp); + return ERR_PTR(err); + } return qp; } @@ -1887,21 +1885,18 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp) real_qp = qp->real_qp; xrcd = real_qp->xrcd; - - mutex_lock(&xrcd->tgt_qp_mutex); + down_write(&xrcd->tgt_qps_rwsem); ib_close_qp(qp); if (atomic_read(&real_qp->usecnt) == 0) - list_del(&real_qp->xrcd_list); + xa_erase(&xrcd->tgt_qps, real_qp->qp_num); else real_qp = NULL; - mutex_unlock(&xrcd->tgt_qp_mutex); + up_write(&xrcd->tgt_qps_rwsem); if (real_qp) { ret = ib_destroy_qp(real_qp); if (!ret) atomic_dec(&xrcd->usecnt); - else - __ib_insert_xrcd_qp(xrcd, real_qp); } return 0; @@ -2307,8 +2302,8 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, xrcd->device = device; xrcd->inode = inode; atomic_set(&xrcd->usecnt, 0); - mutex_init(&xrcd->tgt_qp_mutex); - INIT_LIST_HEAD(&xrcd->tgt_qp_list); + init_rwsem(&xrcd->tgt_qps_rwsem); + xa_init(&xrcd->tgt_qps); } return xrcd; @@ -2322,20 +2317,10 @@ EXPORT_SYMBOL(ib_alloc_xrcd_user); */ int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata) { - struct ib_qp *qp; - int ret; - if (atomic_read(&xrcd->usecnt)) return -EBUSY; - while (!list_empty(&xrcd->tgt_qp_list)) { - qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); - ret = ib_destroy_qp(qp); - if (ret) - return ret; - } - mutex_destroy(&xrcd->tgt_qp_mutex); - + WARN_ON(!xa_empty(&xrcd->tgt_qps)); return xrcd->device->ops.dealloc_xrcd(xrcd, udata); } EXPORT_SYMBOL(ib_dealloc_xrcd_user); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index db6f78c5394f..20c801730fed 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1567,9 +1567,8 @@ struct ib_xrcd { struct ib_device *device; atomic_t usecnt; /* count all exposed resources */ struct inode *inode; - - struct mutex tgt_qp_mutex; - struct list_head tgt_qp_list; + struct rw_semaphore tgt_qps_rwsem; + struct xarray tgt_qps; }; struct ib_ah { -- cgit v1.2.3 From 1c8fb1ea5a1dbd2159e78fa580aaffb001794cfa Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 30 Jun 2020 12:39:12 +0300 Subject: IB/uverbs: Expose UAPI to query ucontext Expose UAPI to query ucontext, this will let user space application that didn't allocate the ucontext but has access to by owning the matching command FD to retrieve the ucontext information. Link: https://lore.kernel.org/r/20200630093916.332097-4-leon@kernel.org Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 1 + drivers/infiniband/core/uverbs_std_types_device.c | 41 ++++++++++++++++++++++- include/rdma/ib_verbs.h | 4 +++ include/uapi/rdma/ib_user_ioctl_cmds.h | 6 ++++ 4 files changed, 51 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 40cf07129f66..1900c0df3c8a 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2674,6 +2674,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, query_port); SET_DEVICE_OP(dev_ops, query_qp); SET_DEVICE_OP(dev_ops, query_srq); + SET_DEVICE_OP(dev_ops, query_ucontext); SET_DEVICE_OP(dev_ops, rdma_netdev_get_params); SET_DEVICE_OP(dev_ops, read_counters); SET_DEVICE_OP(dev_ops, reg_dm_mr); diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c index ae4a59d6f9b1..8e58605a17be 100644 --- a/drivers/infiniband/core/uverbs_std_types_device.c +++ b/drivers/infiniband/core/uverbs_std_types_device.c @@ -229,6 +229,37 @@ static int UVERBS_HANDLER(UVERBS_METHOD_GET_CONTEXT)( return 0; } +static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_CONTEXT)( + struct uverbs_attr_bundle *attrs) +{ + u64 core_support = IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS; + struct ib_ucontext *ucontext; + struct ib_device *ib_dev; + u32 num_comp; + int ret; + + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + + if (!ib_dev->ops.query_ucontext) + return -EOPNOTSUPP; + + num_comp = attrs->ufile->device->num_comp_vectors; + ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS, + &num_comp, sizeof(num_comp)); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT, + &core_support, sizeof(core_support)); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + return ucontext->device->ops.query_ucontext(ucontext, attrs); +} + DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_GET_CONTEXT, UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS, @@ -237,6 +268,13 @@ DECLARE_UVERBS_NAMED_METHOD( UVERBS_ATTR_TYPE(u64), UA_OPTIONAL), UVERBS_ATTR_UHW()); +DECLARE_UVERBS_NAMED_METHOD( + UVERBS_METHOD_QUERY_CONTEXT, + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS, + UVERBS_ATTR_TYPE(u32), UA_OPTIONAL), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT, + UVERBS_ATTR_TYPE(u64), UA_OPTIONAL)); + DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_INFO_HANDLES, /* Also includes any device specific object ids */ @@ -260,7 +298,8 @@ DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE, &UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT), &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE), &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES), - &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT)); + &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT), + &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT)); const struct uapi_definition uverbs_def_obj_device[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DEVICE), diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 20c801730fed..6c72bb194148 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2645,6 +2645,10 @@ struct ib_device_ops { */ int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); + /* query driver for its ucontext properties */ + int (*query_ucontext)(struct ib_ucontext *context, + struct uverbs_attr_bundle *attrs); + DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_cq); DECLARE_RDMA_OBJ_SIZE(ib_pd); diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index 4961d5e858eb..83b6e71ea216 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -69,6 +69,7 @@ enum uverbs_methods_device { UVERBS_METHOD_INFO_HANDLES, UVERBS_METHOD_QUERY_PORT, UVERBS_METHOD_GET_CONTEXT, + UVERBS_METHOD_QUERY_CONTEXT, }; enum uverbs_attrs_invoke_write_cmd_attr_ids { @@ -87,6 +88,11 @@ enum uverbs_attrs_get_context_attr_ids { UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT, }; +enum uverbs_attrs_query_context_attr_ids { + UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS, + UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT, +}; + enum uverbs_attrs_create_cq_cmd_attr_ids { UVERBS_ATTR_CREATE_CQ_HANDLE, UVERBS_ATTR_CREATE_CQ_CQE, -- cgit v1.2.3 From 0fb556b2b58d6b8336c94379042bad2a8512af1a Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 30 Jun 2020 12:39:14 +0300 Subject: RDMA/mlx5: Implement the query ucontext functionality Implement the query ucontext functionality by returning the original ucontext data as part of an extra mlx5 attribute that holds the driver UAPI response. Link: https://lore.kernel.org/r/20200630093916.332097-6-leon@kernel.org Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_ioctl.c | 1 + drivers/infiniband/hw/mlx5/main.c | 35 ++++++++++++++++++++++++++++++++ include/uapi/rdma/mlx5_user_ioctl_cmds.h | 4 ++++ 3 files changed, 40 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 2d882c02387c..ef04a261097f 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -790,6 +790,7 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, } return uverbs_copy_to(bundle, idx, from, size); } +EXPORT_SYMBOL(uverbs_copy_to_struct_or_zero); /* Once called an abort will call through to the type's destroy_hw() */ void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle, diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 56039fa6c530..dc77388464c9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1990,6 +1990,29 @@ out_ctx: return err; } +static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext, + struct uverbs_attr_bundle *attrs) +{ + struct mlx5_ib_alloc_ucontext_resp uctx_resp = {}; + int ret; + + ret = set_ucontext_resp(ibcontext, &uctx_resp); + if (ret) + return ret; + + uctx_resp.response_length = + min_t(size_t, + uverbs_attr_get_len(attrs, + MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX), + sizeof(uctx_resp)); + + ret = uverbs_copy_to_struct_or_zero(attrs, + MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX, + &uctx_resp, + sizeof(uctx_resp)); + return ret; +} + static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); @@ -6364,6 +6387,16 @@ ADD_UVERBS_ATTRIBUTES_SIMPLE( UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, enum mlx5_ib_uapi_flow_action_flags)); +ADD_UVERBS_ATTRIBUTES_SIMPLE( + mlx5_ib_query_context, + UVERBS_OBJECT_DEVICE, + UVERBS_METHOD_QUERY_CONTEXT, + UVERBS_ATTR_PTR_OUT( + MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX, + UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp, + dump_fill_mkey), + UA_MANDATORY)); + static const struct uapi_definition mlx5_ib_defs[] = { UAPI_DEF_CHAIN(mlx5_ib_devx_defs), UAPI_DEF_CHAIN(mlx5_ib_flow_defs), @@ -6372,6 +6405,7 @@ static const struct uapi_definition mlx5_ib_defs[] = { UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, &mlx5_ib_flow_action), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), + UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR, UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR), @@ -6605,6 +6639,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .query_pkey = mlx5_ib_query_pkey, .query_qp = mlx5_ib_query_qp, .query_srq = mlx5_ib_query_srq, + .query_ucontext = mlx5_ib_query_ucontext, .read_counters = mlx5_ib_read_counters, .reg_user_mr = mlx5_ib_reg_user_mr, .req_notify_cq = mlx5_ib_arm_cq, diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index 8e316ef896b5..496309e8a856 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -228,6 +228,10 @@ enum mlx5_ib_flow_matcher_methods { MLX5_IB_METHOD_FLOW_MATCHER_DESTROY, }; +enum mlx5_ib_device_query_context_attrs { + MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT), +}; + #define MLX5_IB_DW_MATCH_PARAM 0x80 struct mlx5_ib_match_params { -- cgit v1.2.3 From 05f71ef9797454b9384c740b8acd0d02eca1d1bc Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 30 Jun 2020 12:39:15 +0300 Subject: RDMA/mlx5: Introduce UAPI to query PD attributes Introduce UAPI to query PD attributes, this can be used to retrieve PD attributes by having the PD handle of the created one and owning the command FD for the ucontxet. Link: https://lore.kernel.org/r/20200630093916.332097-7-leon@kernel.org Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/Makefile | 3 ++- drivers/infiniband/hw/mlx5/main.c | 1 + drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 ++ drivers/infiniband/hw/mlx5/std_types.c | 45 ++++++++++++++++++++++++++++++++ include/uapi/rdma/mlx5_user_ioctl_cmds.h | 10 +++++++ 5 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 drivers/infiniband/hw/mlx5/std_types.c (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile index 8cca61c671f8..9838719aacb9 100644 --- a/drivers/infiniband/hw/mlx5/Makefile +++ b/drivers/infiniband/hw/mlx5/Makefile @@ -23,4 +23,5 @@ mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \ flow.o \ - qos.o + qos.o \ + std_types.o diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index dc77388464c9..324a98c77ad5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6401,6 +6401,7 @@ static const struct uapi_definition mlx5_ib_defs[] = { UAPI_DEF_CHAIN(mlx5_ib_devx_defs), UAPI_DEF_CHAIN(mlx5_ib_flow_defs), UAPI_DEF_CHAIN(mlx5_ib_qos_defs), + UAPI_DEF_CHAIN(mlx5_ib_std_types_defs), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, &mlx5_ib_flow_action), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 74d3507d6f80..27b069ef63d9 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1384,6 +1384,8 @@ int mlx5_ib_fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr); extern const struct uapi_definition mlx5_ib_devx_defs[]; extern const struct uapi_definition mlx5_ib_flow_defs[]; extern const struct uapi_definition mlx5_ib_qos_defs[]; +extern const struct uapi_definition mlx5_ib_std_types_defs[]; + #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user); diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c new file mode 100644 index 000000000000..16145fda68d0 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/std_types.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. + */ + +#include +#include +#include +#include +#include "mlx5_ib.h" + +#define UVERBS_MODULE_NAME mlx5_ib +#include + +static int UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)( + struct uverbs_attr_bundle *attrs) +{ + struct ib_pd *pd = + uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_PD_HANDLE); + struct mlx5_ib_pd *mpd = to_mpd(pd); + + return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_PD_RESP_PDN, + &mpd->pdn, sizeof(mpd->pdn)); +} + +DECLARE_UVERBS_NAMED_METHOD( + MLX5_IB_METHOD_PD_QUERY, + UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_PD_HANDLE, + UVERBS_OBJECT_PD, + UVERBS_ACCESS_READ, + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_PD_RESP_PDN, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY)); + +ADD_UVERBS_METHODS(mlx5_ib_pd, + UVERBS_OBJECT_PD, + &UVERBS_METHOD(MLX5_IB_METHOD_PD_QUERY)); + +const struct uapi_definition mlx5_ib_std_types_defs[] = { + UAPI_DEF_CHAIN_OBJ_TREE( + UVERBS_OBJECT_PD, + &mlx5_ib_pd), + {}, +}; diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index 496309e8a856..b330e6eee626 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -290,4 +290,14 @@ enum mlx5_ib_create_flow_action_create_packet_reformat_attrs { MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF, }; +enum mlx5_ib_query_pd_attrs { + MLX5_IB_ATTR_QUERY_PD_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_QUERY_PD_RESP_PDN, +}; + +enum mlx5_ib_pd_methods { + MLX5_IB_METHOD_PD_QUERY = (1U << UVERBS_ID_NS_SHIFT), + +}; + #endif -- cgit v1.2.3 From 6c01e6b218aea09ec9947cbf88a4db97b4dd155c Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 30 Jun 2020 12:39:16 +0300 Subject: IB/uverbs: Expose UAPI to query MR Expose UAPI to query MR, this will let user space application that didn't allocate the MR but has access to by owning the matching command FD to retrieve its information. Link: https://lore.kernel.org/r/20200630093916.332097-8-leon@kernel.org Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_std_types_mr.c | 52 ++++++++++++++++++++++++++- include/uapi/rdma/ib_user_ioctl_cmds.h | 9 +++++ 2 files changed, 60 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c index a2722ef8496e..62f58ad56afd 100644 --- a/drivers/infiniband/core/uverbs_std_types_mr.c +++ b/drivers/infiniband/core/uverbs_std_types_mr.c @@ -148,6 +148,36 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)( return ret; } +static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_MR)( + struct uverbs_attr_bundle *attrs) +{ + struct ib_mr *mr = + uverbs_attr_get_obj(attrs, UVERBS_ATTR_QUERY_MR_HANDLE); + int ret; + + ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_LKEY, &mr->lkey, + sizeof(mr->lkey)); + if (ret) + return ret; + + ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_RKEY, + &mr->rkey, sizeof(mr->rkey)); + + if (ret) + return ret; + + ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_LENGTH, + &mr->length, sizeof(mr->length)); + + if (ret) + return ret; + + ret = uverbs_copy_to(attrs, UVERBS_ATTR_QUERY_MR_RESP_IOVA, + &mr->iova, sizeof(mr->iova)); + + return IS_UVERBS_COPY_ERR(ret) ? ret : 0; +} + DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_ADVISE_MR, UVERBS_ATTR_IDR(UVERBS_ATTR_ADVISE_MR_PD_HANDLE, @@ -165,6 +195,25 @@ DECLARE_UVERBS_NAMED_METHOD( UA_MANDATORY, UA_ALLOC_AND_COPY)); +DECLARE_UVERBS_NAMED_METHOD( + UVERBS_METHOD_QUERY_MR, + UVERBS_ATTR_IDR(UVERBS_ATTR_QUERY_MR_HANDLE, + UVERBS_OBJECT_MR, + UVERBS_ACCESS_READ, + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_RKEY, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_LKEY, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_LENGTH, + UVERBS_ATTR_TYPE(u64), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_IOVA, + UVERBS_ATTR_TYPE(u64), + UA_OPTIONAL)); + DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_DM_MR_REG, UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE, @@ -206,7 +255,8 @@ DECLARE_UVERBS_NAMED_OBJECT( UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr), &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG), &UVERBS_METHOD(UVERBS_METHOD_MR_DESTROY), - &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR)); + &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR), + &UVERBS_METHOD(UVERBS_METHOD_QUERY_MR)); const struct uapi_definition uverbs_def_obj_mr[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MR, diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index 83b6e71ea216..99dcabf61a71 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -248,6 +248,7 @@ enum uverbs_methods_mr { UVERBS_METHOD_DM_MR_REG, UVERBS_METHOD_MR_DESTROY, UVERBS_METHOD_ADVISE_MR, + UVERBS_METHOD_QUERY_MR, }; enum uverbs_attrs_mr_destroy_ids { @@ -261,6 +262,14 @@ enum uverbs_attrs_advise_mr_cmd_attr_ids { UVERBS_ATTR_ADVISE_MR_SGE_LIST, }; +enum uverbs_attrs_query_mr_cmd_attr_ids { + UVERBS_ATTR_QUERY_MR_HANDLE, + UVERBS_ATTR_QUERY_MR_RESP_LKEY, + UVERBS_ATTR_QUERY_MR_RESP_RKEY, + UVERBS_ATTR_QUERY_MR_RESP_LENGTH, + UVERBS_ATTR_QUERY_MR_RESP_IOVA, +}; + enum uverbs_attrs_create_counters_cmd_attr_ids { UVERBS_ATTR_CREATE_COUNTERS_HANDLE, }; -- cgit v1.2.3 From 3b023e1b680a56e84c22d43486875a5aa4c78afe Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 30 Jun 2020 13:18:52 +0300 Subject: RDMA/core: Create and destroy counters in the ib_core Move allocation and destruction of counters under ib_core responsibility Link: https://lore.kernel.org/r/20200630101855.368895-2-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 1 + drivers/infiniband/core/uverbs_std_types_counters.c | 17 +++++++++-------- drivers/infiniband/hw/mlx5/main.c | 20 ++++++-------------- include/rdma/ib_verbs.h | 7 ++++--- 4 files changed, 20 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 1900c0df3c8a..0a259f475e89 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2687,6 +2687,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, set_vf_link_state); SET_OBJ_SIZE(dev_ops, ib_ah); + SET_OBJ_SIZE(dev_ops, ib_counters); SET_OBJ_SIZE(dev_ops, ib_cq); SET_OBJ_SIZE(dev_ops, ib_pd); SET_OBJ_SIZE(dev_ops, ib_srq); diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c index 9f013304e677..c7e7438752bc 100644 --- a/drivers/infiniband/core/uverbs_std_types_counters.c +++ b/drivers/infiniband/core/uverbs_std_types_counters.c @@ -46,7 +46,9 @@ static int uverbs_free_counters(struct ib_uobject *uobject, if (ret) return ret; - return counters->device->ops.destroy_counters(counters); + counters->device->ops.destroy_counters(counters); + kfree(counters); + return 0; } static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)( @@ -66,20 +68,19 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)( if (!ib_dev->ops.create_counters) return -EOPNOTSUPP; - counters = ib_dev->ops.create_counters(ib_dev, attrs); - if (IS_ERR(counters)) { - ret = PTR_ERR(counters); - goto err_create_counters; - } + counters = rdma_zalloc_drv_obj(ib_dev, ib_counters); + if (!counters) + return -ENOMEM; counters->device = ib_dev; counters->uobject = uobj; uobj->object = counters; atomic_set(&counters->usecnt, 0); - return 0; + ret = ib_dev->ops.create_counters(counters, attrs); + if (ret) + kfree(counters); -err_create_counters: return ret; } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 324a98c77ad5..6e6e126c39ef 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6455,7 +6455,7 @@ err_bound: return ret; } -static int mlx5_ib_destroy_counters(struct ib_counters *counters) +static void mlx5_ib_destroy_counters(struct ib_counters *counters) { struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); @@ -6463,24 +6463,15 @@ static int mlx5_ib_destroy_counters(struct ib_counters *counters) if (mcounters->hw_cntrs_hndl) mlx5_fc_destroy(to_mdev(counters->device)->mdev, mcounters->hw_cntrs_hndl); - - kfree(mcounters); - - return 0; } -static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device, - struct uverbs_attr_bundle *attrs) +static int mlx5_ib_create_counters(struct ib_counters *counters, + struct uverbs_attr_bundle *attrs) { - struct mlx5_ib_mcounters *mcounters; - - mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL); - if (!mcounters) - return ERR_PTR(-ENOMEM); + struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); mutex_init(&mcounters->mcntrs_mutex); - - return &mcounters->ibcntrs; + return 0; } static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) @@ -6648,6 +6639,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .resize_cq = mlx5_ib_resize_cq, INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah), + INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs), INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 6c72bb194148..ecaf299e0789 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2540,9 +2540,9 @@ struct ib_device_ops { struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, struct ib_dm_mr_attr *attr, struct uverbs_attr_bundle *attrs); - struct ib_counters *(*create_counters)( - struct ib_device *device, struct uverbs_attr_bundle *attrs); - int (*destroy_counters)(struct ib_counters *counters); + int (*create_counters)(struct ib_counters *counters, + struct uverbs_attr_bundle *attrs); + void (*destroy_counters)(struct ib_counters *counters); int (*read_counters)(struct ib_counters *counters, struct ib_counters_read_attr *counters_read_attr, struct uverbs_attr_bundle *attrs); @@ -2650,6 +2650,7 @@ struct ib_device_ops { struct uverbs_attr_bundle *attrs); DECLARE_RDMA_OBJ_SIZE(ib_ah); + DECLARE_RDMA_OBJ_SIZE(ib_counters); DECLARE_RDMA_OBJ_SIZE(ib_cq); DECLARE_RDMA_OBJ_SIZE(ib_pd); DECLARE_RDMA_OBJ_SIZE(ib_srq); -- cgit v1.2.3 From 28ad5f65c314ffdd5888d6afa61772d3032a332c Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 30 Jun 2020 13:18:54 +0300 Subject: RDMA: Move XRCD to be under ib_core responsibility Update the code to allocate and free ib_xrcd structure in the ib_core instead of inside drivers. Link: https://lore.kernel.org/r/20200630101855.368895-4-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 1 + drivers/infiniband/core/verbs.c | 28 ++++++++++++++++++--------- drivers/infiniband/hw/mlx4/main.c | 37 +++++++++++++++--------------------- drivers/infiniband/hw/mlx5/main.c | 2 ++ drivers/infiniband/hw/mlx5/mlx5_ib.h | 5 ++--- drivers/infiniband/hw/mlx5/qp.c | 32 +++++++------------------------ include/rdma/ib_verbs.h | 6 +++--- 7 files changed, 49 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 0a259f475e89..b2d617e599a1 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2692,6 +2692,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_OBJ_SIZE(dev_ops, ib_pd); SET_OBJ_SIZE(dev_ops, ib_srq); SET_OBJ_SIZE(dev_ops, ib_ucontext); + SET_OBJ_SIZE(dev_ops, ib_xrcd); } EXPORT_SYMBOL(ib_set_device_ops); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index b1b6cc21ca96..a92783105cea 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2293,20 +2293,28 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, struct inode *inode, struct ib_udata *udata) { struct ib_xrcd *xrcd; + int ret; if (!device->ops.alloc_xrcd) return ERR_PTR(-EOPNOTSUPP); - xrcd = device->ops.alloc_xrcd(device, udata); - if (!IS_ERR(xrcd)) { - xrcd->device = device; - xrcd->inode = inode; - atomic_set(&xrcd->usecnt, 0); - init_rwsem(&xrcd->tgt_qps_rwsem); - xa_init(&xrcd->tgt_qps); - } + xrcd = rdma_zalloc_drv_obj(device, ib_xrcd); + if (!xrcd) + return ERR_PTR(-ENOMEM); + xrcd->device = device; + xrcd->inode = inode; + atomic_set(&xrcd->usecnt, 0); + init_rwsem(&xrcd->tgt_qps_rwsem); + xa_init(&xrcd->tgt_qps); + + ret = device->ops.alloc_xrcd(xrcd, udata); + if (ret) + goto err; return xrcd; +err: + kfree(xrcd); + return ERR_PTR(ret); } EXPORT_SYMBOL(ib_alloc_xrcd_user); @@ -2321,7 +2329,9 @@ int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata) return -EBUSY; WARN_ON(!xa_empty(&xrcd->tgt_qps)); - return xrcd->device->ops.dealloc_xrcd(xrcd, udata); + xrcd->device->ops.dealloc_xrcd(xrcd, udata); + kfree(xrcd); + return 0; } EXPORT_SYMBOL(ib_dealloc_xrcd_user); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 816d28854a8e..5e7910a517da 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1219,56 +1219,47 @@ static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); } -static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, - struct ib_udata *udata) +static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) { - struct mlx4_ib_xrcd *xrcd; + struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device); + struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd); struct ib_cq_init_attr cq_attr = {}; int err; - if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) - return ERR_PTR(-ENOSYS); - - xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL); - if (!xrcd) - return ERR_PTR(-ENOMEM); + if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) + return -EOPNOTSUPP; - err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn); + err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn); if (err) - goto err1; + return err; - xrcd->pd = ib_alloc_pd(ibdev, 0); + xrcd->pd = ib_alloc_pd(ibxrcd->device, 0); if (IS_ERR(xrcd->pd)) { err = PTR_ERR(xrcd->pd); goto err2; } cq_attr.cqe = 1; - xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); + xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr); if (IS_ERR(xrcd->cq)) { err = PTR_ERR(xrcd->cq); goto err3; } - return &xrcd->ibxrcd; + return 0; err3: ib_dealloc_pd(xrcd->pd); err2: - mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn); -err1: - kfree(xrcd); - return ERR_PTR(err); + mlx4_xrcd_free(dev->dev, xrcd->xrcdn); + return err; } -static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) +static void mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) { ib_destroy_cq(to_mxrcd(xrcd)->cq); ib_dealloc_pd(to_mxrcd(xrcd)->pd); mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn); - kfree(xrcd); - - return 0; } static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) @@ -2607,6 +2598,8 @@ static const struct ib_device_ops mlx4_ib_dev_mw_ops = { static const struct ib_device_ops mlx4_ib_dev_xrc_ops = { .alloc_xrcd = mlx4_ib_alloc_xrcd, .dealloc_xrcd = mlx4_ib_dealloc_xrcd, + + INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd), }; static const struct ib_device_ops mlx4_ib_dev_fs_ops = { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 6e6e126c39ef..2da592054a3f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6671,6 +6671,8 @@ static const struct ib_device_ops mlx5_ib_dev_mw_ops = { static const struct ib_device_ops mlx5_ib_dev_xrc_ops = { .alloc_xrcd = mlx5_ib_alloc_xrcd, .dealloc_xrcd = mlx5_ib_dealloc_xrcd, + + INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd), }; static const struct ib_device_ops mlx5_ib_dev_dm_ops = { diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 27b069ef63d9..88e30ff77c16 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1224,9 +1224,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in, struct ib_mad *out, size_t *out_mad_size, u16 *out_mad_pkey_index); -struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, - struct ib_udata *udata); -int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); +int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); +void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7489b0479e4f..c6a4db9f6a0b 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4700,41 +4700,23 @@ out: return err; } -struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, - struct ib_udata *udata) +int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) { - struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_ib_xrcd *xrcd; - int err; + struct mlx5_ib_dev *dev = to_mdev(ibxrcd->device); + struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd); if (!MLX5_CAP_GEN(dev->mdev, xrc)) - return ERR_PTR(-ENOSYS); - - xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); - if (!xrcd) - return ERR_PTR(-ENOMEM); - - err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0); - if (err) { - kfree(xrcd); - return ERR_PTR(-ENOMEM); - } + return -EOPNOTSUPP; - return &xrcd->ibxrcd; + return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0); } -int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) +void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(xrcd->device); u32 xrcdn = to_mxrcd(xrcd)->xrcdn; - int err; - err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0); - if (err) - mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); - - kfree(xrcd); - return 0; + mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0); } static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ecaf299e0789..087f001fd020 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2494,9 +2494,8 @@ struct ib_device_ops { int (*dealloc_mw)(struct ib_mw *mw); int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); - struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, - struct ib_udata *udata); - int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); + int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); + void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); struct ib_flow *(*create_flow)(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain, struct ib_udata *udata); @@ -2655,6 +2654,7 @@ struct ib_device_ops { DECLARE_RDMA_OBJ_SIZE(ib_pd); DECLARE_RDMA_OBJ_SIZE(ib_srq); DECLARE_RDMA_OBJ_SIZE(ib_ucontext); + DECLARE_RDMA_OBJ_SIZE(ib_xrcd); }; struct ib_core_device { -- cgit v1.2.3 From 514acd00f957afa9b2a1fd521b2f180a950ee5e3 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Fri, 3 Jul 2020 12:43:54 +0200 Subject: thermal: Make thermal_zone_device_is_enabled() available to core only This function is not needed by drivers. Signed-off-by: Andrzej Pietrasiewicz Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200703104354.19657-4-andrzej.p@collabora.com --- drivers/thermal/thermal_core.c | 1 - drivers/thermal/thermal_core.h | 2 ++ include/linux/thermal.h | 5 ----- 3 files changed, 2 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index a61e91513584..052343c59b0a 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -519,7 +519,6 @@ int thermal_zone_device_is_enabled(struct thermal_zone_device *tz) return mode == THERMAL_DEVICE_ENABLED; } -EXPORT_SYMBOL_GPL(thermal_zone_device_is_enabled); void thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index c95689586e19..b1464b3a21e2 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -166,4 +166,6 @@ of_thermal_get_trip_points(struct thermal_zone_device *tz) } #endif +int thermal_zone_device_is_enabled(struct thermal_zone_device *tz); + #endif /* __THERMAL_CORE_H__ */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index b9efaa780d88..108251f23e5c 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -418,7 +418,6 @@ void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); -int thermal_zone_device_is_enabled(struct thermal_zone_device *tz); #else static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, @@ -472,10 +471,6 @@ static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) static inline int thermal_zone_device_disable(struct thermal_zone_device *tz) { return -ENODEV; } - -static inline int -thermal_zone_device_is_enabled(struct thermal_zone_device *tz) -{ return -ENODEV; } #endif /* CONFIG_THERMAL */ #endif /* __THERMAL_H__ */ -- cgit v1.2.3 From fe6a3d6521227e49857d0d6caabbdae3bd4aef89 Mon Sep 17 00:00:00 2001 From: Xu Yilun Date: Tue, 16 Jun 2020 12:08:45 +0800 Subject: fpga: dfl: afu: add interrupt support for port error reporting Error reporting interrupt is very useful to notify users that some errors are detected by the hardware. Once users are notified, they could query hardware logged error states, no need to continuously poll on these states. This patch adds interrupt support for port error reporting sub feature. It follows the common DFL interrupt notification and handling mechanism, implements two ioctl commands below for user to query number of irqs supported, and set/unset interrupt triggers. Ioctls: * DFL_FPGA_PORT_ERR_GET_IRQ_NUM get the number of irqs, which is used to determine whether/how many interrupts error reporting feature supports. * DFL_FPGA_PORT_ERR_SET_IRQ set/unset given eventfds as error interrupt triggers. Signed-off-by: Luwei Kang Signed-off-by: Wu Hao Signed-off-by: Xu Yilun Reviewed-by: Marcelo Tosatti Acked-by: Wu Hao Signed-off-by: Moritz Fischer --- drivers/fpga/dfl-afu-error.c | 17 +++++++++++++++++ drivers/fpga/dfl-afu-main.c | 4 ++++ include/uapi/linux/fpga-dfl.h | 23 +++++++++++++++++++++++ 3 files changed, 44 insertions(+) (limited to 'include') diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c index c1467ae1a6b6..c4691187cca9 100644 --- a/drivers/fpga/dfl-afu-error.c +++ b/drivers/fpga/dfl-afu-error.c @@ -14,6 +14,7 @@ * Mitchel Henry */ +#include #include #include "dfl-afu.h" @@ -219,6 +220,21 @@ static void port_err_uinit(struct platform_device *pdev, afu_port_err_mask(&pdev->dev, true); } +static long +port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case DFL_FPGA_PORT_ERR_GET_IRQ_NUM: + return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg); + case DFL_FPGA_PORT_ERR_SET_IRQ: + return dfl_feature_ioctl_set_irq(pdev, feature, arg); + default: + dev_dbg(&pdev->dev, "%x cmd not handled", cmd); + return -ENODEV; + } +} + const struct dfl_feature_id port_err_id_table[] = { {.id = PORT_FEATURE_ID_ERROR,}, {0,} @@ -227,4 +243,5 @@ const struct dfl_feature_id port_err_id_table[] = { const struct dfl_feature_ops port_err_ops = { .init = port_err_init, .uinit = port_err_uinit, + .ioctl = port_err_ioctl, }; diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index b0c31789a909..357cd5d9b267 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -577,6 +577,7 @@ static int afu_release(struct inode *inode, struct file *filp) { struct platform_device *pdev = filp->private_data; struct dfl_feature_platform_data *pdata; + struct dfl_feature *feature; dev_dbg(&pdev->dev, "Device File Release\n"); @@ -586,6 +587,9 @@ static int afu_release(struct inode *inode, struct file *filp) dfl_feature_dev_use_end(pdata); if (!dfl_feature_dev_use_count(pdata)) { + dfl_fpga_dev_for_each_feature(pdata, feature) + dfl_fpga_set_irq_triggers(feature, 0, + feature->nr_irqs, NULL); __port_reset(pdev); afu_dma_region_destroy(pdata); } diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h index 7331350f3067..6c71c9d7e926 100644 --- a/include/uapi/linux/fpga-dfl.h +++ b/include/uapi/linux/fpga-dfl.h @@ -164,6 +164,29 @@ struct dfl_fpga_irq_set { __s32 evtfds[]; }; +/** + * DFL_FPGA_PORT_ERR_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 5, + * __u32 num_irqs) + * + * Get the number of irqs supported by the fpga port error reporting private + * feature. Currently hardware supports up to 1 irq. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_PORT_ERR_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \ + DFL_PORT_BASE + 5, __u32) + +/** + * DFL_FPGA_PORT_ERR_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_PORT_BASE + 6, + * struct dfl_fpga_irq_set) + * + * Set fpga port error reporting interrupt trigger if evtfds[n] is valid. + * Unset related interrupt trigger if evtfds[n] is a negative value. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_PORT_ERR_SET_IRQ _IOW(DFL_FPGA_MAGIC, \ + DFL_PORT_BASE + 6, \ + struct dfl_fpga_irq_set) + /* IOCTLs for FME file descriptor */ /** -- cgit v1.2.3 From d43f20bae5173ba431526040c320c36fdd4f086d Mon Sep 17 00:00:00 2001 From: Xu Yilun Date: Tue, 16 Jun 2020 12:08:46 +0800 Subject: fpga: dfl: fme: add interrupt support for global error reporting Error reporting interrupt is very useful to notify users that some errors are detected by the hardware. Once users are notified, they could query hardware logged error states, no need to continuously poll on these states. This patch adds interrupt support for fme global error reporting sub feature. It follows the common DFL interrupt notification and handling mechanism. And it implements two ioctls below for user to query number of irqs supported, and set/unset interrupt triggers. Ioctls: * DFL_FPGA_FME_ERR_GET_IRQ_NUM get the number of irqs, which is used to determine whether/how many interrupts fme error reporting feature supports. * DFL_FPGA_FME_ERR_SET_IRQ set/unset given eventfds as fme error reporting interrupt triggers. Signed-off-by: Luwei Kang Signed-off-by: Wu Hao Signed-off-by: Xu Yilun Reviewed-by: Marcelo Tosatti Acked-by: Wu Hao Signed-off-by: Moritz Fischer --- drivers/fpga/dfl-fme-error.c | 18 ++++++++++++++++++ drivers/fpga/dfl-fme-main.c | 6 ++++++ include/uapi/linux/fpga-dfl.h | 23 +++++++++++++++++++++++ 3 files changed, 47 insertions(+) (limited to 'include') diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c index f897d414b923..51c2892ec06d 100644 --- a/drivers/fpga/dfl-fme-error.c +++ b/drivers/fpga/dfl-fme-error.c @@ -15,6 +15,7 @@ * Mitchel, Henry */ +#include #include #include "dfl.h" @@ -348,6 +349,22 @@ static void fme_global_err_uinit(struct platform_device *pdev, fme_err_mask(&pdev->dev, true); } +static long +fme_global_error_ioctl(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case DFL_FPGA_FME_ERR_GET_IRQ_NUM: + return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg); + case DFL_FPGA_FME_ERR_SET_IRQ: + return dfl_feature_ioctl_set_irq(pdev, feature, arg); + default: + dev_dbg(&pdev->dev, "%x cmd not handled", cmd); + return -ENODEV; + } +} + const struct dfl_feature_id fme_global_err_id_table[] = { {.id = FME_FEATURE_ID_GLOBAL_ERR,}, {0,} @@ -356,4 +373,5 @@ const struct dfl_feature_id fme_global_err_id_table[] = { const struct dfl_feature_ops fme_global_err_ops = { .init = fme_global_err_init, .uinit = fme_global_err_uinit, + .ioctl = fme_global_error_ioctl, }; diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c index fc210d4e1863..77ea04d4edbe 100644 --- a/drivers/fpga/dfl-fme-main.c +++ b/drivers/fpga/dfl-fme-main.c @@ -620,11 +620,17 @@ static int fme_release(struct inode *inode, struct file *filp) { struct dfl_feature_platform_data *pdata = filp->private_data; struct platform_device *pdev = pdata->dev; + struct dfl_feature *feature; dev_dbg(&pdev->dev, "Device File Release\n"); mutex_lock(&pdata->lock); dfl_feature_dev_use_end(pdata); + + if (!dfl_feature_dev_use_count(pdata)) + dfl_fpga_dev_for_each_feature(pdata, feature) + dfl_fpga_set_irq_triggers(feature, 0, + feature->nr_irqs, NULL); mutex_unlock(&pdata->lock); return 0; diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h index 6c71c9d7e926..b6495ea412ec 100644 --- a/include/uapi/linux/fpga-dfl.h +++ b/include/uapi/linux/fpga-dfl.h @@ -230,4 +230,27 @@ struct dfl_fpga_fme_port_pr { */ #define DFL_FPGA_FME_PORT_ASSIGN _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2, int) +/** + * DFL_FPGA_FME_ERR_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_FME_BASE + 3, + * __u32 num_irqs) + * + * Get the number of irqs supported by the fpga fme error reporting private + * feature. Currently hardware supports up to 1 irq. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_FME_ERR_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \ + DFL_FME_BASE + 3, __u32) + +/** + * DFL_FPGA_FME_ERR_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 4, + * struct dfl_fpga_irq_set) + * + * Set fpga fme error reporting interrupt trigger if evtfds[n] is valid. + * Unset related interrupt trigger if evtfds[n] is a negative value. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_FME_ERR_SET_IRQ _IOW(DFL_FPGA_MAGIC, \ + DFL_FME_BASE + 4, \ + struct dfl_fpga_irq_set) + #endif /* _UAPI_LINUX_FPGA_DFL_H */ -- cgit v1.2.3 From 09d86150141955ba1b3d7cbef23785f4996e4d6f Mon Sep 17 00:00:00 2001 From: Xu Yilun Date: Tue, 16 Jun 2020 12:08:47 +0800 Subject: fpga: dfl: afu: add AFU interrupt support AFU (Accelerated Function Unit) is dynamic region of the DFL based FPGA, and always defined by users. Some DFL based FPGA cards allow users to implement their own interrupts in AFU. In order to support this, hardware implements a new UINT (AFU Interrupt) private feature with related capability register which describes the number of supported AFU interrupts as well as the local index of the interrupts for software enumeration, and from software side, driver follows the common DFL interrupt notification and handling mechanism, and it implements two ioctls below for user to query number of irqs supported and set/unset interrupt triggers. Ioctls: * DFL_FPGA_PORT_UINT_GET_IRQ_NUM get the number of irqs, which is used to determine how many interrupts UINT feature supports. * DFL_FPGA_PORT_UINT_SET_IRQ set/unset eventfds as AFU interrupt triggers. Signed-off-by: Luwei Kang Signed-off-by: Wu Hao Signed-off-by: Xu Yilun Reviewed-by: Marcelo Tosatti Acked-by: Wu Hao Signed-off-by: Moritz Fischer --- drivers/fpga/dfl-afu-main.c | 28 ++++++++++++++++++++++++++++ include/uapi/linux/fpga-dfl.h | 23 +++++++++++++++++++++++ 2 files changed, 51 insertions(+) (limited to 'include') diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index 357cd5d9b267..7c84feea7354 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -529,6 +529,30 @@ static const struct dfl_feature_ops port_stp_ops = { .init = port_stp_init, }; +static long +port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case DFL_FPGA_PORT_UINT_GET_IRQ_NUM: + return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg); + case DFL_FPGA_PORT_UINT_SET_IRQ: + return dfl_feature_ioctl_set_irq(pdev, feature, arg); + default: + dev_dbg(&pdev->dev, "%x cmd not handled", cmd); + return -ENODEV; + } +} + +static const struct dfl_feature_id port_uint_id_table[] = { + {.id = PORT_FEATURE_ID_UINT,}, + {0,} +}; + +static const struct dfl_feature_ops port_uint_ops = { + .ioctl = port_uint_ioctl, +}; + static struct dfl_feature_driver port_feature_drvs[] = { { .id_table = port_hdr_id_table, @@ -546,6 +570,10 @@ static struct dfl_feature_driver port_feature_drvs[] = { .id_table = port_stp_id_table, .ops = &port_stp_ops, }, + { + .id_table = port_uint_id_table, + .ops = &port_uint_ops, + }, { .ops = NULL, } diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h index b6495ea412ec..1621b077bf21 100644 --- a/include/uapi/linux/fpga-dfl.h +++ b/include/uapi/linux/fpga-dfl.h @@ -187,6 +187,29 @@ struct dfl_fpga_irq_set { DFL_PORT_BASE + 6, \ struct dfl_fpga_irq_set) +/** + * DFL_FPGA_PORT_UINT_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 7, + * __u32 num_irqs) + * + * Get the number of irqs supported by the fpga AFU interrupt private + * feature. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_PORT_UINT_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \ + DFL_PORT_BASE + 7, __u32) + +/** + * DFL_FPGA_PORT_UINT_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_PORT_BASE + 8, + * struct dfl_fpga_irq_set) + * + * Set fpga AFU interrupt trigger if evtfds[n] is valid. + * Unset related interrupt trigger if evtfds[n] is a negative value. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_PORT_UINT_SET_IRQ _IOW(DFL_FPGA_MAGIC, \ + DFL_PORT_BASE + 8, \ + struct dfl_fpga_irq_set) + /* IOCTLs for FME file descriptor */ /** -- cgit v1.2.3 From 3c5ff0c60f2febb170bc4096d4b8a8390ebb0bad Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 22 Apr 2020 08:58:22 +0100 Subject: KVM: arm64: timers: Rename kvm_timer_sync_hwstate to kvm_timer_sync_user kvm_timer_sync_hwstate() has nothing to do with the timer HW state, but more to do with the state of a userspace interrupt controller. Change the suffix from _hwstate to_user, in keeping with the rest of the code. Signed-off-by: Marc Zyngier --- arch/arm64/kvm/arch_timer.c | 2 +- arch/arm64/kvm/arm.c | 4 ++-- include/kvm/arm_arch_timer.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index a1fe0ea3254e..33d85a504720 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -615,7 +615,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) } } -void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) +void kvm_timer_sync_user(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index beb0e68cccaa..e52f2b2305b5 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -723,7 +723,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) isb(); /* Ensure work in x_flush_hwstate is committed */ kvm_pmu_sync_hwstate(vcpu); if (static_branch_unlikely(&userspace_irqchip_in_use)) - kvm_timer_sync_hwstate(vcpu); + kvm_timer_sync_user(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); preempt_enable(); @@ -768,7 +768,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * timer virtual interrupt state. */ if (static_branch_unlikely(&userspace_irqchip_in_use)) - kvm_timer_sync_hwstate(vcpu); + kvm_timer_sync_user(vcpu); kvm_arch_vcpu_ctxsync_fp(vcpu); diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index d120e6c323e7..a821dd1df0cf 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -71,7 +71,7 @@ int kvm_timer_hyp_init(bool); int kvm_timer_enable(struct kvm_vcpu *vcpu); int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); -void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); +void kvm_timer_sync_user(struct kvm_vcpu *vcpu); bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); void kvm_timer_update_run(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From 41ce82f63c0624556b94b90551e4f70a0c913879 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 28 Jun 2019 15:23:43 +0100 Subject: KVM: arm64: timers: Move timer registers to the sys_regs file Move the timer gsisters to the sysreg file. This will further help when they are directly changed by a nesting hypervisor in the VNCR page. This requires moving the initialisation of the timer struct so that some of the helpers (such as arch_timer_ctx_index) can work correctly at an early stage. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 6 ++ arch/arm64/kvm/arch_timer.c | 155 ++++++++++++++++++++++++++++++-------- arch/arm64/kvm/trace_arm.h | 8 +- include/kvm/arm_arch_timer.h | 11 +-- 4 files changed, 136 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 91b1adb6789c..e1a32c0707bb 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -189,6 +189,12 @@ enum vcpu_sysreg { SP_EL1, SPSR_EL1, + CNTVOFF_EL2, + CNTV_CVAL_EL0, + CNTV_CTL_EL0, + CNTP_CVAL_EL0, + CNTP_CTL_EL0, + /* 32bit specific registers. Keep them at the end of the range */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 33d85a504720..32ba6fbc3814 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -51,6 +51,93 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, struct arch_timer_context *timer, enum kvm_arch_timer_regs treg); +u32 timer_get_ctl(struct arch_timer_context *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt->vcpu; + + switch(arch_timer_ctx_index(ctxt)) { + case TIMER_VTIMER: + return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0); + case TIMER_PTIMER: + return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0); + default: + WARN_ON(1); + return 0; + } +} + +u64 timer_get_cval(struct arch_timer_context *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt->vcpu; + + switch(arch_timer_ctx_index(ctxt)) { + case TIMER_VTIMER: + return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0); + case TIMER_PTIMER: + return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0); + default: + WARN_ON(1); + return 0; + } +} + +static u64 timer_get_offset(struct arch_timer_context *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt->vcpu; + + switch(arch_timer_ctx_index(ctxt)) { + case TIMER_VTIMER: + return __vcpu_sys_reg(vcpu, CNTVOFF_EL2); + default: + return 0; + } +} + +static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) +{ + struct kvm_vcpu *vcpu = ctxt->vcpu; + + switch(arch_timer_ctx_index(ctxt)) { + case TIMER_VTIMER: + __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl; + break; + case TIMER_PTIMER: + __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl; + break; + default: + WARN_ON(1); + } +} + +static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) +{ + struct kvm_vcpu *vcpu = ctxt->vcpu; + + switch(arch_timer_ctx_index(ctxt)) { + case TIMER_VTIMER: + __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval; + break; + case TIMER_PTIMER: + __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval; + break; + default: + WARN_ON(1); + } +} + +static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) +{ + struct kvm_vcpu *vcpu = ctxt->vcpu; + + switch(arch_timer_ctx_index(ctxt)) { + case TIMER_VTIMER: + __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset; + break; + default: + WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt)); + } +} + u64 kvm_phys_timer_read(void) { return timecounter->cc->read(timecounter->cc); @@ -124,8 +211,8 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) { u64 cval, now; - cval = timer_ctx->cnt_cval; - now = kvm_phys_timer_read() - timer_ctx->cntvoff; + cval = timer_get_cval(timer_ctx); + now = kvm_phys_timer_read() - timer_get_offset(timer_ctx); if (now < cval) { u64 ns; @@ -144,8 +231,8 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) { WARN_ON(timer_ctx && timer_ctx->loaded); return timer_ctx && - !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) && - (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE); + ((timer_get_ctl(timer_ctx) & + (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE); } /* @@ -256,8 +343,8 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) if (!kvm_timer_irq_can_fire(timer_ctx)) return false; - cval = timer_ctx->cnt_cval; - now = kvm_phys_timer_read() - timer_ctx->cntvoff; + cval = timer_get_cval(timer_ctx); + now = kvm_phys_timer_read() - timer_get_offset(timer_ctx); return cval <= now; } @@ -350,8 +437,8 @@ static void timer_save_state(struct arch_timer_context *ctx) switch (index) { case TIMER_VTIMER: - ctx->cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL); - ctx->cnt_cval = read_sysreg_el0(SYS_CNTV_CVAL); + timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL)); + timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL)); /* Disable the timer */ write_sysreg_el0(0, SYS_CNTV_CTL); @@ -359,8 +446,8 @@ static void timer_save_state(struct arch_timer_context *ctx) break; case TIMER_PTIMER: - ctx->cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL); - ctx->cnt_cval = read_sysreg_el0(SYS_CNTP_CVAL); + timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL)); + timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL)); /* Disable the timer */ write_sysreg_el0(0, SYS_CNTP_CTL); @@ -429,14 +516,14 @@ static void timer_restore_state(struct arch_timer_context *ctx) switch (index) { case TIMER_VTIMER: - write_sysreg_el0(ctx->cnt_cval, SYS_CNTV_CVAL); + write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL); isb(); - write_sysreg_el0(ctx->cnt_ctl, SYS_CNTV_CTL); + write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL); break; case TIMER_PTIMER: - write_sysreg_el0(ctx->cnt_cval, SYS_CNTP_CVAL); + write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL); isb(); - write_sysreg_el0(ctx->cnt_ctl, SYS_CNTP_CTL); + write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL); break; case NR_KVM_TIMERS: BUG(); @@ -528,7 +615,7 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) kvm_timer_vcpu_load_nogic(vcpu); } - set_cntvoff(map.direct_vtimer->cntvoff); + set_cntvoff(timer_get_offset(map.direct_vtimer)); kvm_timer_unblocking(vcpu); @@ -639,8 +726,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) * resets the timer to be disabled and unmasked and is compliant with * the ARMv7 architecture. */ - vcpu_vtimer(vcpu)->cnt_ctl = 0; - vcpu_ptimer(vcpu)->cnt_ctl = 0; + timer_set_ctl(vcpu_vtimer(vcpu), 0); + timer_set_ctl(vcpu_ptimer(vcpu), 0); if (timer->enabled) { kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu)); @@ -668,13 +755,13 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) mutex_lock(&kvm->lock); kvm_for_each_vcpu(i, tmp, kvm) - vcpu_vtimer(tmp)->cntvoff = cntvoff; + timer_set_offset(vcpu_vtimer(tmp), cntvoff); /* * When called from the vcpu create path, the CPU being created is not * included in the loop above, so we just set it here as well. */ - vcpu_vtimer(vcpu)->cntvoff = cntvoff; + timer_set_offset(vcpu_vtimer(vcpu), cntvoff); mutex_unlock(&kvm->lock); } @@ -684,9 +771,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); + vtimer->vcpu = vcpu; + ptimer->vcpu = vcpu; + /* Synchronize cntvoff across all vtimers of a VM. */ update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); - ptimer->cntvoff = 0; + timer_set_offset(ptimer, 0); hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); timer->bg_timer.function = kvm_bg_timer_expire; @@ -704,9 +794,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) vtimer->host_timer_irq_flags = host_vtimer_irq_flags; ptimer->host_timer_irq_flags = host_ptimer_irq_flags; - - vtimer->vcpu = vcpu; - ptimer->vcpu = vcpu; } static void kvm_timer_init_interrupt(void *info) @@ -756,10 +843,12 @@ static u64 read_timer_ctl(struct arch_timer_context *timer) * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit * regardless of ENABLE bit for our implementation convenience. */ + u32 ctl = timer_get_ctl(timer); + if (!kvm_timer_compute_delta(timer)) - return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT; - else - return timer->cnt_ctl; + ctl |= ARCH_TIMER_CTRL_IT_STAT; + + return ctl; } u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) @@ -795,8 +884,8 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, switch (treg) { case TIMER_REG_TVAL: - val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff; - val &= lower_32_bits(val); + val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer); + val = lower_32_bits(val); break; case TIMER_REG_CTL: @@ -804,11 +893,11 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, break; case TIMER_REG_CVAL: - val = timer->cnt_cval; + val = timer_get_cval(timer); break; case TIMER_REG_CNT: - val = kvm_phys_timer_read() - timer->cntvoff; + val = kvm_phys_timer_read() - timer_get_offset(timer); break; default: @@ -842,15 +931,15 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, { switch (treg) { case TIMER_REG_TVAL: - timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val; + timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val); break; case TIMER_REG_CTL: - timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT; + timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT); break; case TIMER_REG_CVAL: - timer->cnt_cval = val; + timer_set_cval(timer, val); break; default: diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index 4c71270cc097..4691053c5ee4 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -301,8 +301,8 @@ TRACE_EVENT(kvm_timer_save_state, ), TP_fast_assign( - __entry->ctl = ctx->cnt_ctl; - __entry->cval = ctx->cnt_cval; + __entry->ctl = timer_get_ctl(ctx); + __entry->cval = timer_get_cval(ctx); __entry->timer_idx = arch_timer_ctx_index(ctx); ), @@ -323,8 +323,8 @@ TRACE_EVENT(kvm_timer_restore_state, ), TP_fast_assign( - __entry->ctl = ctx->cnt_ctl; - __entry->cval = ctx->cnt_cval; + __entry->ctl = timer_get_ctl(ctx); + __entry->cval = timer_get_cval(ctx); __entry->timer_idx = arch_timer_ctx_index(ctx); ), diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index a821dd1df0cf..51c19381108c 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -26,16 +26,9 @@ enum kvm_arch_timer_regs { struct arch_timer_context { struct kvm_vcpu *vcpu; - /* Registers: control register, timer value */ - u32 cnt_ctl; - u64 cnt_cval; - /* Timer IRQ */ struct kvm_irq_level irq; - /* Virtual offset */ - u64 cntvoff; - /* Emulated Timer (may be unused) */ struct hrtimer hrtimer; @@ -109,4 +102,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, enum kvm_arch_timer_regs treg, u64 val); +/* Needed for tracing */ +u32 timer_get_ctl(struct arch_timer_context *ctxt); +u64 timer_get_cval(struct arch_timer_context *ctxt); + #endif -- cgit v1.2.3 From 7af928851508fb25207806f57e287272dd498981 Mon Sep 17 00:00:00 2001 From: Andrew Scull Date: Thu, 18 Jun 2020 15:55:11 +0100 Subject: smccc: Make constants available to assembly Move constants out of the C-only section of the header next to the other constants that are available to assembly. Signed-off-by: Andrew Scull Reviewed-by: Sudeep Holla Acked-by: Will Deacon Link: https://lore.kernel.org/r/20200618145511.69203-1-ascull@google.com Signed-off-by: Catalin Marinas --- include/linux/arm-smccc.h | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 56d6a5c6e353..efcbde731f03 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -81,6 +81,28 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +/* Paravirtualised time calls (defined by ARM DEN0057A) */ +#define ARM_SMCCC_HV_PV_TIME_FEATURES \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD_HYP, \ + 0x20) + +#define ARM_SMCCC_HV_PV_TIME_ST \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD_HYP, \ + 0x21) + +/* + * Return codes defined in ARM DEN 0070A + * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C + */ +#define SMCCC_RET_SUCCESS 0 +#define SMCCC_RET_NOT_SUPPORTED -1 +#define SMCCC_RET_NOT_REQUIRED -2 +#define SMCCC_RET_INVALID_PARAMETER -3 + #ifndef __ASSEMBLY__ #include @@ -331,15 +353,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, */ #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) -/* - * Return codes defined in ARM DEN 0070A - * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C - */ -#define SMCCC_RET_SUCCESS 0 -#define SMCCC_RET_NOT_SUPPORTED -1 -#define SMCCC_RET_NOT_REQUIRED -2 -#define SMCCC_RET_INVALID_PARAMETER -3 - /* * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED. * Used when the SMCCC conduit is not defined. The empty asm statement @@ -385,18 +398,5 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, method; \ }) -/* Paravirtualised time calls (defined by ARM DEN0057A) */ -#define ARM_SMCCC_HV_PV_TIME_FEATURES \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_64, \ - ARM_SMCCC_OWNER_STANDARD_HYP, \ - 0x20) - -#define ARM_SMCCC_HV_PV_TIME_ST \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_64, \ - ARM_SMCCC_OWNER_STANDARD_HYP, \ - 0x21) - #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ -- cgit v1.2.3 From 65cec1ef250ae7d74047c8a0c3ea83644391a546 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Thu, 2 Jul 2020 14:35:42 -0500 Subject: ALSA: isa/gus: remove -Wmissing-prototypes warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix W=1 warnings by adding prototypes to header file sound/isa/gus/gus_timer.c:141:6: warning: no previous prototype for ‘snd_gf1_timers_init’ [-Wmissing-prototypes] 141 | void snd_gf1_timers_init(struct snd_gus_card * gus) | ^~~~~~~~~~~~~~~~~~~ sound/isa/gus/gus_timer.c:177:6: warning: no previous prototype for ‘snd_gf1_timers_done’ [-Wmissing-prototypes] 177 | void snd_gf1_timers_done(struct snd_gus_card * gus) | ^~~~~~~~~~~~~~~~~~~ Signed-off-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20200702193604.169059-2-pierre-louis.bossart@linux.intel.com Signed-off-by: Takashi Iwai --- include/sound/gus.h | 4 ++++ sound/isa/gus/gus_reset.c | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/sound/gus.h b/include/sound/gus.h index 410939ecf3a5..cd8da68cab92 100644 --- a/include/sound/gus.h +++ b/include/sound/gus.h @@ -613,4 +613,8 @@ int snd_gus_dram_write(struct snd_gus_card *gus, char __user *ptr, int snd_gus_dram_read(struct snd_gus_card *gus, char __user *ptr, unsigned int addr, unsigned int size, int rom); +/* gus_timer.c */ +void snd_gf1_timers_init(struct snd_gus_card *gus); +void snd_gf1_timers_done(struct snd_gus_card *gus); + #endif /* __SOUND_GUS_H */ diff --git a/sound/isa/gus/gus_reset.c b/sound/isa/gus/gus_reset.c index 07bfcda43827..9a1ab5872c4f 100644 --- a/sound/isa/gus/gus_reset.c +++ b/sound/isa/gus/gus_reset.c @@ -9,8 +9,6 @@ #include #include -extern void snd_gf1_timers_init(struct snd_gus_card * gus); -extern void snd_gf1_timers_done(struct snd_gus_card * gus); extern int snd_gf1_synth_init(struct snd_gus_card * gus); extern void snd_gf1_synth_done(struct snd_gus_card * gus); -- cgit v1.2.3 From 2631ed00b0498810f8d5c2163c6b5270d893687b Mon Sep 17 00:00:00 2001 From: "Peter Zijlstra (Intel)" Date: Thu, 25 Jun 2020 16:03:12 +0800 Subject: tlb: mmu_gather: add tlb_flush_*_range APIs tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, then set corresponding cleared_*. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Zhenyu Ye Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20200625080314.230-5-yezhenyu2@huawei.com Signed-off-by: Catalin Marinas --- include/asm-generic/tlb.h | 55 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 3f1649a8cf55..ef75ec86f865 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -512,6 +512,38 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm } #endif +/* + * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, + * and set corresponding cleared_*. + */ +static inline void tlb_flush_pte_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + __tlb_adjust_range(tlb, address, size); + tlb->cleared_ptes = 1; +} + +static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + __tlb_adjust_range(tlb, address, size); + tlb->cleared_pmds = 1; +} + +static inline void tlb_flush_pud_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + __tlb_adjust_range(tlb, address, size); + tlb->cleared_puds = 1; +} + +static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + __tlb_adjust_range(tlb, address, size); + tlb->cleared_p4ds = 1; +} + #ifndef __tlb_remove_tlb_entry #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #endif @@ -525,19 +557,17 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm */ #define tlb_remove_tlb_entry(tlb, ptep, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ - tlb->cleared_ptes = 1; \ + tlb_flush_pte_range(tlb, address, PAGE_SIZE); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ do { \ unsigned long _sz = huge_page_size(h); \ - __tlb_adjust_range(tlb, address, _sz); \ if (_sz == PMD_SIZE) \ - tlb->cleared_pmds = 1; \ + tlb_flush_pmd_range(tlb, address, _sz); \ else if (_sz == PUD_SIZE) \ - tlb->cleared_puds = 1; \ + tlb_flush_pud_range(tlb, address, _sz); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) @@ -551,8 +581,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ do { \ - __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ - tlb->cleared_pmds = 1; \ + tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) @@ -566,8 +595,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ do { \ - __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ - tlb->cleared_puds = 1; \ + tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \ __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ } while (0) @@ -592,9 +620,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm #ifndef pte_free_tlb #define pte_free_tlb(tlb, ptep, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ - tlb->cleared_pmds = 1; \ __pte_free_tlb(tlb, ptep, address); \ } while (0) #endif @@ -602,9 +629,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm #ifndef pmd_free_tlb #define pmd_free_tlb(tlb, pmdp, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb_flush_pud_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ - tlb->cleared_puds = 1; \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) #endif @@ -612,9 +638,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm #ifndef pud_free_tlb #define pud_free_tlb(tlb, pudp, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \ tlb->freed_tables = 1; \ - tlb->cleared_p4ds = 1; \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif -- cgit v1.2.3 From 27c90e5e48d008bfda1cf6108eb699697317c67b Mon Sep 17 00:00:00 2001 From: Drew Fustini Date: Wed, 1 Jul 2020 03:33:20 +0200 Subject: ARM: dts: am33xx-l4: change #pinctrl-cells from 1 to 2 Increase #pinctrl-cells to 2 so that mux and conf be kept separate. This requires the AM33XX_PADCONF macro in omap.h to also be modified to keep pin conf and pin mux values separate. Signed-off-by: Drew Fustini Acked-by: Tony Lindgren Acked-by: Haojian Zhuang Link: https://lore.kernel.org/r/20200701013320.130441-3-drew@beagleboard.org Signed-off-by: Linus Walleij --- arch/arm/boot/dts/am33xx-l4.dtsi | 2 +- include/dt-bindings/pinctrl/omap.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 7ff11d6bf0f2..dafd6e8b42a1 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -278,7 +278,7 @@ am33xx_pinmux: pinmux@800 { compatible = "pinctrl-single"; reg = <0x800 0x238>; - #pinctrl-cells = <1>; + #pinctrl-cells = <2>; pinctrl-single,register-width = <32>; pinctrl-single,function-mask = <0x7f>; }; diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h index 625718042413..2d2a8c737822 100644 --- a/include/dt-bindings/pinctrl/omap.h +++ b/include/dt-bindings/pinctrl/omap.h @@ -65,7 +65,7 @@ #define DM814X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) #define DM816X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) #define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val) -#define AM33XX_PADCONF(pa, dir, mux) OMAP_IOPAD_OFFSET((pa), 0x0800) ((dir) | (mux)) +#define AM33XX_PADCONF(pa, conf, mux) OMAP_IOPAD_OFFSET((pa), 0x0800) (conf) (mux) /* * Macros to allow using the offset from the padconf physical address -- cgit v1.2.3 From 1ce50e7d408ef2bdc8ca021363fd46d1b8bfad00 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 6 Jul 2020 12:55:37 +0200 Subject: thermal: core: genetlink support for events/cmd/sampling Initially the thermal framework had a very simple notification mechanism to send generic netlink messages to the userspace. The notification function was never called from anywhere and the corresponding dead code was removed. It was probably a first attempt to introduce the netlink notification. At LPC2018, the presentation "Linux thermal: User kernel interface", proposed to create the notifications to the userspace via a kfifo. The advantage of the kfifo is the performance. It is usually used from a 1:1 communication channel where a driver captures data and sends it as fast as possible to a userspace process. The drawback is that only one process uses the notification channel exclusively, thus no other process is allowed to use the channel to get temperature or notifications. This patch defines a generic netlink API to discover the current thermal setup and adds event notifications as well as temperature sampling. As any genetlink protocol, it can evolve and the versioning allows to keep the backward compatibility. In order to prevent the user from getting flooded with data on a single channel, there are two multicast channels, one for the temperature sampling when the thermal zone is updated and another one for the events, so the user can get the events only without the thermal zone temperature sampling. Also, a list of commands to discover the thermal setup is added and can be extended when needed. Reviewed-by: Amit Kucheria Signed-off-by: Daniel Lezcano Acked-by: Zhang Rui Link: https://lore.kernel.org/r/20200706105538.2159-3-daniel.lezcano@linaro.org --- drivers/thermal/Makefile | 2 +- drivers/thermal/thermal_core.h | 18 ++ drivers/thermal/thermal_netlink.c | 648 ++++++++++++++++++++++++++++++++++++++ include/linux/thermal.h | 17 - include/uapi/linux/thermal.h | 89 +++++- 5 files changed, 739 insertions(+), 35 deletions(-) create mode 100644 drivers/thermal/thermal_netlink.c (limited to 'include') diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 0c8b84a09b9a..1bbf0805fb04 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_THERMAL) += thermal_sys.o thermal_sys-y += thermal_core.o thermal_sysfs.o \ - thermal_helpers.o + thermal_helpers.o thermal_netlink.o # interface to/from other layers providing sensors thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 967b0ba6593e..4d9455ef0c32 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -52,6 +52,24 @@ int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), struct thermal_zone_device *thermal_zone_get_by_id(int id); +/* Netlink notification function */ +int thermal_notify_tz_create(int tz_id, const char *name); +int thermal_notify_tz_delete(int tz_id); +int thermal_notify_tz_enable(int tz_id); +int thermal_notify_tz_disable(int tz_id); +int thermal_notify_tz_trip_down(int tz_id, int id); +int thermal_notify_tz_trip_up(int tz_id, int id); +int thermal_notify_tz_trip_delete(int tz_id, int id); +int thermal_notify_tz_trip_add(int tz_id, int id, int type, + int temp, int hyst); +int thermal_notify_tz_trip_change(int tz_id, int id, int type, + int temp, int hyst); +int thermal_notify_cdev_state_update(int cdev_id, int state); +int thermal_notify_cdev_add(int cdev_id, const char *name, int max_state); +int thermal_notify_cdev_delete(int cdev_id); +int thermal_notify_tz_gov_change(int tz_id, const char *name); +int thermal_genl_sampling_temp(int id, int temp); + struct thermal_attr { struct device_attribute attr; char name[THERMAL_NAME_LENGTH]; diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c new file mode 100644 index 000000000000..dd0a3b889674 --- /dev/null +++ b/drivers/thermal/thermal_netlink.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2020 Linaro Limited + * + * Author: Daniel Lezcano + * + * Generic netlink for thermal management framework + */ +#include +#include +#include +#include + +#include "thermal_core.h" + +static const struct genl_multicast_group thermal_genl_mcgrps[] = { + { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, }, + { .name = THERMAL_GENL_EVENT_GROUP_NAME, }, +}; + +static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = { + /* Thermal zone */ + [THERMAL_GENL_ATTR_TZ] = { .type = NLA_NESTED }, + [THERMAL_GENL_ATTR_TZ_ID] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_TZ_TEMP] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_TZ_TRIP] = { .type = NLA_NESTED }, + [THERMAL_GENL_ATTR_TZ_TRIP_ID] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_TZ_TRIP_TEMP] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_TZ_TRIP_TYPE] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_TZ_TRIP_HYST] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_TZ_MODE] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_TZ_NAME] = { .type = NLA_STRING, + .len = THERMAL_NAME_LENGTH }, + /* Governor(s) */ + [THERMAL_GENL_ATTR_TZ_GOV] = { .type = NLA_NESTED }, + [THERMAL_GENL_ATTR_TZ_GOV_NAME] = { .type = NLA_STRING, + .len = THERMAL_NAME_LENGTH }, + /* Cooling devices */ + [THERMAL_GENL_ATTR_CDEV] = { .type = NLA_NESTED }, + [THERMAL_GENL_ATTR_CDEV_ID] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_CDEV_CUR_STATE] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_CDEV_MAX_STATE] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_CDEV_NAME] = { .type = NLA_STRING, + .len = THERMAL_NAME_LENGTH }, +}; + +struct param { + struct nlattr **attrs; + struct sk_buff *msg; + const char *name; + int tz_id; + int cdev_id; + int trip_id; + int trip_temp; + int trip_type; + int trip_hyst; + int temp; + int cdev_state; + int cdev_max_state; +}; + +typedef int (*cb_t)(struct param *); + +static struct genl_family thermal_gnl_family; + +/************************** Sampling encoding *******************************/ + +int thermal_genl_sampling_temp(int id, int temp) +{ + struct sk_buff *skb; + void *hdr; + + skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0, + THERMAL_GENL_SAMPLING_TEMP); + if (!hdr) + return -EMSGSIZE; + + if (nla_put_u32(skb, THERMAL_GENL_ATTR_TZ_ID, id)) + goto out_cancel; + + if (nla_put_u32(skb, THERMAL_GENL_ATTR_TZ_TEMP, temp)) + goto out_cancel; + + genlmsg_end(skb, hdr); + + genlmsg_multicast(&thermal_gnl_family, skb, 0, 0, GFP_KERNEL); + + return 0; +out_cancel: + genlmsg_cancel(skb, hdr); + nlmsg_free(skb); + + return -EMSGSIZE; +} + +/**************************** Event encoding *********************************/ + +static int thermal_genl_event_tz_create(struct param *p) +{ + if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || + nla_put_string(p->msg, THERMAL_GENL_ATTR_TZ_NAME, p->name)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_event_tz(struct param *p) +{ + if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_event_tz_trip_up(struct param *p) +{ + if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || + nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_event_tz_trip_add(struct param *p) +{ + if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || + nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id) || + nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, p->trip_type) || + nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, p->trip_temp) || + nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, p->trip_hyst)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_event_tz_trip_delete(struct param *p) +{ + if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || + nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_event_cdev_add(struct param *p) +{ + if (nla_put_string(p->msg, THERMAL_GENL_ATTR_CDEV_NAME, + p->name) || + nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_ID, + p->cdev_id) || + nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_MAX_STATE, + p->cdev_max_state)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_event_cdev_delete(struct param *p) +{ + if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_ID, p->cdev_id)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_event_cdev_state_update(struct param *p) +{ + if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_ID, + p->cdev_id) || + nla_put_u32(p->msg, THERMAL_GENL_ATTR_CDEV_CUR_STATE, + p->cdev_state)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_event_gov_change(struct param *p) +{ + if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) || + nla_put_string(p->msg, THERMAL_GENL_ATTR_GOV_NAME, p->name)) + return -EMSGSIZE; + + return 0; +} + +int thermal_genl_event_tz_delete(struct param *p) + __attribute__((alias("thermal_genl_event_tz"))); + +int thermal_genl_event_tz_enable(struct param *p) + __attribute__((alias("thermal_genl_event_tz"))); + +int thermal_genl_event_tz_disable(struct param *p) + __attribute__((alias("thermal_genl_event_tz"))); + +int thermal_genl_event_tz_trip_down(struct param *p) + __attribute__((alias("thermal_genl_event_tz_trip_up"))); + +int thermal_genl_event_tz_trip_change(struct param *p) + __attribute__((alias("thermal_genl_event_tz_trip_add"))); + +static cb_t event_cb[] = { + [THERMAL_GENL_EVENT_TZ_CREATE] = thermal_genl_event_tz_create, + [THERMAL_GENL_EVENT_TZ_DELETE] = thermal_genl_event_tz_delete, + [THERMAL_GENL_EVENT_TZ_ENABLE] = thermal_genl_event_tz_enable, + [THERMAL_GENL_EVENT_TZ_DISABLE] = thermal_genl_event_tz_disable, + [THERMAL_GENL_EVENT_TZ_TRIP_UP] = thermal_genl_event_tz_trip_up, + [THERMAL_GENL_EVENT_TZ_TRIP_DOWN] = thermal_genl_event_tz_trip_down, + [THERMAL_GENL_EVENT_TZ_TRIP_CHANGE] = thermal_genl_event_tz_trip_change, + [THERMAL_GENL_EVENT_TZ_TRIP_ADD] = thermal_genl_event_tz_trip_add, + [THERMAL_GENL_EVENT_TZ_TRIP_DELETE] = thermal_genl_event_tz_trip_delete, + [THERMAL_GENL_EVENT_CDEV_ADD] = thermal_genl_event_cdev_add, + [THERMAL_GENL_EVENT_CDEV_DELETE] = thermal_genl_event_cdev_delete, + [THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = thermal_genl_event_cdev_state_update, + [THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = thermal_genl_event_gov_change, +}; + +/* + * Generic netlink event encoding + */ +static int thermal_genl_send_event(enum thermal_genl_event event, + struct param *p) +{ + struct sk_buff *msg; + int ret = -EMSGSIZE; + void *hdr; + + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + p->msg = msg; + + hdr = genlmsg_put(msg, 0, 0, &thermal_gnl_family, 0, event); + if (!hdr) + goto out_free_msg; + + ret = event_cb[event](p); + if (ret) + goto out_cancel_msg; + + genlmsg_end(msg, hdr); + + genlmsg_multicast(&thermal_gnl_family, msg, 0, 1, GFP_KERNEL); + + return 0; + +out_cancel_msg: + genlmsg_cancel(msg, hdr); +out_free_msg: + nlmsg_free(msg); + + return ret; +} + +int thermal_notify_tz_create(int tz_id, const char *name) +{ + struct param p = { .tz_id = tz_id, .name = name }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_CREATE, &p); +} + +int thermal_notify_tz_delete(int tz_id) +{ + struct param p = { .tz_id = tz_id }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_DELETE, &p); +} + +int thermal_notify_tz_enable(int tz_id) +{ + struct param p = { .tz_id = tz_id }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_ENABLE, &p); +} + +int thermal_notify_tz_disable(int tz_id) +{ + struct param p = { .tz_id = tz_id }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_DISABLE, &p); +} + +int thermal_notify_tz_trip_down(int tz_id, int trip_id) +{ + struct param p = { .tz_id = tz_id, .trip_id = trip_id }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_DOWN, &p); +} + +int thermal_notify_tz_trip_up(int tz_id, int trip_id) +{ + struct param p = { .tz_id = tz_id, .trip_id = trip_id }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_UP, &p); +} + +int thermal_notify_tz_trip_add(int tz_id, int trip_id, int trip_type, + int trip_temp, int trip_hyst) +{ + struct param p = { .tz_id = tz_id, .trip_id = trip_id, + .trip_type = trip_type, .trip_temp = trip_temp, + .trip_hyst = trip_hyst }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_ADD, &p); +} + +int thermal_notify_tz_trip_delete(int tz_id, int trip_id) +{ + struct param p = { .tz_id = tz_id, .trip_id = trip_id }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_DELETE, &p); +} + +int thermal_notify_tz_trip_change(int tz_id, int trip_id, int trip_type, + int trip_temp, int trip_hyst) +{ + struct param p = { .tz_id = tz_id, .trip_id = trip_id, + .trip_type = trip_type, .trip_temp = trip_temp, + .trip_hyst = trip_hyst }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_CHANGE, &p); +} + +int thermal_notify_cdev_state_update(int cdev_id, int cdev_state) +{ + struct param p = { .cdev_id = cdev_id, .cdev_state = cdev_state }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, &p); +} + +int thermal_notify_cdev_add(int cdev_id, const char *name, int cdev_max_state) +{ + struct param p = { .cdev_id = cdev_id, .name = name, + .cdev_max_state = cdev_max_state }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_ADD, &p); +} + +int thermal_notify_cdev_delete(int cdev_id) +{ + struct param p = { .cdev_id = cdev_id }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_DELETE, &p); +} + +int thermal_notify_tz_gov_change(int tz_id, const char *name) +{ + struct param p = { .tz_id = tz_id, .name = name }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_GOV_CHANGE, &p); +} + +/*************************** Command encoding ********************************/ + +static int __thermal_genl_cmd_tz_get_id(struct thermal_zone_device *tz, + void *data) +{ + struct sk_buff *msg = data; + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, tz->id) || + nla_put_string(msg, THERMAL_GENL_ATTR_TZ_NAME, tz->type)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_cmd_tz_get_id(struct param *p) +{ + struct sk_buff *msg = p->msg; + struct nlattr *start_tz; + int ret; + + start_tz = nla_nest_start(msg, THERMAL_GENL_ATTR_TZ); + if (!start_tz) + return -EMSGSIZE; + + ret = for_each_thermal_zone(__thermal_genl_cmd_tz_get_id, msg); + if (ret) + goto out_cancel_nest; + + nla_nest_end(msg, start_tz); + + return 0; + +out_cancel_nest: + nla_nest_cancel(msg, start_tz); + + return ret; +} + +static int thermal_genl_cmd_tz_get_trip(struct param *p) +{ + struct sk_buff *msg = p->msg; + struct thermal_zone_device *tz; + struct nlattr *start_trip; + int i, id; + + if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) + return -EINVAL; + + id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); + + tz = thermal_zone_get_by_id(id); + if (!tz) + return -EINVAL; + + start_trip = nla_nest_start(msg, THERMAL_GENL_ATTR_TZ_TRIP); + if (!start_trip) + return -EMSGSIZE; + + mutex_lock(&tz->lock); + + for (i = 0; i < tz->trips; i++) { + + enum thermal_trip_type type; + int temp, hyst; + + tz->ops->get_trip_type(tz, i, &type); + tz->ops->get_trip_temp(tz, i, &temp); + tz->ops->get_trip_hyst(tz, i, &hyst); + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) || + nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) || + nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, temp) || + nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, hyst)) + goto out_cancel_nest; + } + + mutex_unlock(&tz->lock); + + nla_nest_end(msg, start_trip); + + return 0; + +out_cancel_nest: + mutex_unlock(&tz->lock); + + return -EMSGSIZE; +} + +static int thermal_genl_cmd_tz_get_temp(struct param *p) +{ + struct sk_buff *msg = p->msg; + struct thermal_zone_device *tz; + int temp, ret, id; + + if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) + return -EINVAL; + + id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); + + tz = thermal_zone_get_by_id(id); + if (!tz) + return -EINVAL; + + ret = thermal_zone_get_temp(tz, &temp); + if (ret) + return ret; + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, id) || + nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TEMP, temp)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_cmd_tz_get_gov(struct param *p) +{ + struct sk_buff *msg = p->msg; + struct thermal_zone_device *tz; + int id, ret = 0; + + if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) + return -EINVAL; + + id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); + + tz = thermal_zone_get_by_id(id); + if (!tz) + return -EINVAL; + + mutex_lock(&tz->lock); + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, id) || + nla_put_string(msg, THERMAL_GENL_ATTR_TZ_GOV_NAME, + tz->governor->name)) + ret = -EMSGSIZE; + + mutex_unlock(&tz->lock); + + return ret; +} + +static int __thermal_genl_cmd_cdev_get(struct thermal_cooling_device *cdev, + void *data) +{ + struct sk_buff *msg = data; + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CDEV_ID, cdev->id)) + return -EMSGSIZE; + + if (nla_put_string(msg, THERMAL_GENL_ATTR_CDEV_NAME, cdev->type)) + return -EMSGSIZE; + + return 0; +} + +static int thermal_genl_cmd_cdev_get(struct param *p) +{ + struct sk_buff *msg = p->msg; + struct nlattr *start_cdev; + int ret; + + start_cdev = nla_nest_start(msg, THERMAL_GENL_ATTR_CDEV); + if (!start_cdev) + return -EMSGSIZE; + + ret = for_each_thermal_cooling_device(__thermal_genl_cmd_cdev_get, msg); + if (ret) + goto out_cancel_nest; + + nla_nest_end(msg, start_cdev); + + return 0; +out_cancel_nest: + nla_nest_cancel(msg, start_cdev); + + return ret; +} + +static cb_t cmd_cb[] = { + [THERMAL_GENL_CMD_TZ_GET_ID] = thermal_genl_cmd_tz_get_id, + [THERMAL_GENL_CMD_TZ_GET_TRIP] = thermal_genl_cmd_tz_get_trip, + [THERMAL_GENL_CMD_TZ_GET_TEMP] = thermal_genl_cmd_tz_get_temp, + [THERMAL_GENL_CMD_TZ_GET_GOV] = thermal_genl_cmd_tz_get_gov, + [THERMAL_GENL_CMD_CDEV_GET] = thermal_genl_cmd_cdev_get, +}; + +static int thermal_genl_cmd_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct param p = { .msg = skb }; + const struct genl_dumpit_info *info = genl_dumpit_info(cb); + int cmd = info->ops->cmd; + int ret = -EMSGSIZE; + void *hdr; + + hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0, cmd); + if (!hdr) + return -EMSGSIZE; + + ret = cmd_cb[cmd](&p); + if (ret) + goto out_cancel_msg; + + genlmsg_end(skb, hdr); + + return 0; + +out_cancel_msg: + genlmsg_cancel(skb, hdr); + + return ret; +} + +static int thermal_genl_cmd_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct param p = { .attrs = info->attrs }; + struct sk_buff *msg; + void *hdr; + int cmd = info->genlhdr->cmd; + int ret = -EMSGSIZE; + + msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + p.msg = msg; + + hdr = genlmsg_put_reply(msg, info, &thermal_gnl_family, 0, cmd); + if (!hdr) + goto out_free_msg; + + ret = cmd_cb[cmd](&p); + if (ret) + goto out_cancel_msg; + + genlmsg_end(msg, hdr); + + return genlmsg_reply(msg, info); + +out_cancel_msg: + genlmsg_cancel(msg, hdr); +out_free_msg: + nlmsg_free(msg); + + return ret; +} + +static const struct genl_ops thermal_genl_ops[] = { + { + .cmd = THERMAL_GENL_CMD_TZ_GET_ID, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .dumpit = thermal_genl_cmd_dumpit, + }, + { + .cmd = THERMAL_GENL_CMD_TZ_GET_TRIP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = thermal_genl_cmd_doit, + }, + { + .cmd = THERMAL_GENL_CMD_TZ_GET_TEMP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = thermal_genl_cmd_doit, + }, + { + .cmd = THERMAL_GENL_CMD_TZ_GET_GOV, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = thermal_genl_cmd_doit, + }, + { + .cmd = THERMAL_GENL_CMD_CDEV_GET, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .dumpit = thermal_genl_cmd_dumpit, + }, +}; + +static struct genl_family thermal_gnl_family __ro_after_init = { + .hdrsize = 0, + .name = THERMAL_GENL_FAMILY_NAME, + .version = THERMAL_GENL_VERSION, + .maxattr = THERMAL_GENL_ATTR_MAX, + .policy = thermal_genl_policy, + .ops = thermal_genl_ops, + .n_ops = ARRAY_SIZE(thermal_genl_ops), + .mcgrps = thermal_genl_mcgrps, + .n_mcgrps = ARRAY_SIZE(thermal_genl_mcgrps), +}; + +static int __init thermal_netlink_init(void) +{ + return genl_register_family(&thermal_gnl_family); +} +core_initcall(thermal_netlink_init); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 108251f23e5c..42ef807e5d84 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -37,18 +37,6 @@ struct thermal_cooling_device; struct thermal_instance; struct thermal_attr; -enum thermal_device_mode { - THERMAL_DEVICE_DISABLED = 0, - THERMAL_DEVICE_ENABLED, -}; - -enum thermal_trip_type { - THERMAL_TRIP_ACTIVE = 0, - THERMAL_TRIP_PASSIVE, - THERMAL_TRIP_HOT, - THERMAL_TRIP_CRITICAL, -}; - enum thermal_trend { THERMAL_TREND_STABLE, /* temperature is stable */ THERMAL_TREND_RAISING, /* temperature is raising */ @@ -303,11 +291,6 @@ struct thermal_zone_params { int offset; }; -struct thermal_genl_event { - u32 orig; - enum events event; -}; - /** * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones * diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h index 96218378dda8..c105054cbb57 100644 --- a/include/uapi/linux/thermal.h +++ b/include/uapi/linux/thermal.h @@ -4,31 +4,86 @@ #define THERMAL_NAME_LENGTH 20 -/* Adding event notification support elements */ -#define THERMAL_GENL_FAMILY_NAME "thermal_event" -#define THERMAL_GENL_VERSION 0x01 -#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp" - -/* Events supported by Thermal Netlink */ -enum events { - THERMAL_AUX0, - THERMAL_AUX1, - THERMAL_CRITICAL, - THERMAL_DEV_FAULT, +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED, +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE, + THERMAL_TRIP_HOT, + THERMAL_TRIP_CRITICAL, }; -/* attributes of thermal_genl_family */ -enum { +/* Adding event notification support elements */ +#define THERMAL_GENL_FAMILY_NAME "thermal" +#define THERMAL_GENL_VERSION 0x01 +#define THERMAL_GENL_SAMPLING_GROUP_NAME "sampling" +#define THERMAL_GENL_EVENT_GROUP_NAME "event" + +/* Attributes of thermal_genl_family */ +enum thermal_genl_attr { THERMAL_GENL_ATTR_UNSPEC, - THERMAL_GENL_ATTR_EVENT, + THERMAL_GENL_ATTR_TZ, + THERMAL_GENL_ATTR_TZ_ID, + THERMAL_GENL_ATTR_TZ_TEMP, + THERMAL_GENL_ATTR_TZ_TRIP, + THERMAL_GENL_ATTR_TZ_TRIP_ID, + THERMAL_GENL_ATTR_TZ_TRIP_TYPE, + THERMAL_GENL_ATTR_TZ_TRIP_TEMP, + THERMAL_GENL_ATTR_TZ_TRIP_HYST, + THERMAL_GENL_ATTR_TZ_MODE, + THERMAL_GENL_ATTR_TZ_NAME, + THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT, + THERMAL_GENL_ATTR_TZ_GOV, + THERMAL_GENL_ATTR_TZ_GOV_NAME, + THERMAL_GENL_ATTR_CDEV, + THERMAL_GENL_ATTR_CDEV_ID, + THERMAL_GENL_ATTR_CDEV_CUR_STATE, + THERMAL_GENL_ATTR_CDEV_MAX_STATE, + THERMAL_GENL_ATTR_CDEV_NAME, + THERMAL_GENL_ATTR_GOV_NAME, + __THERMAL_GENL_ATTR_MAX, }; #define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) -/* commands supported by the thermal_genl_family */ -enum { +enum thermal_genl_sampling { + THERMAL_GENL_SAMPLING_TEMP, + __THERMAL_GENL_SAMPLING_MAX, +}; +#define THERMAL_GENL_SAMPLING_MAX (__THERMAL_GENL_SAMPLING_MAX - 1) + +/* Events of thermal_genl_family */ +enum thermal_genl_event { + THERMAL_GENL_EVENT_UNSPEC, + THERMAL_GENL_EVENT_TZ_CREATE, /* Thermal zone creation */ + THERMAL_GENL_EVENT_TZ_DELETE, /* Thermal zone deletion */ + THERMAL_GENL_EVENT_TZ_DISABLE, /* Thermal zone disabed */ + THERMAL_GENL_EVENT_TZ_ENABLE, /* Thermal zone enabled */ + THERMAL_GENL_EVENT_TZ_TRIP_UP, /* Trip point crossed the way up */ + THERMAL_GENL_EVENT_TZ_TRIP_DOWN, /* Trip point crossed the way down */ + THERMAL_GENL_EVENT_TZ_TRIP_CHANGE, /* Trip point changed */ + THERMAL_GENL_EVENT_TZ_TRIP_ADD, /* Trip point added */ + THERMAL_GENL_EVENT_TZ_TRIP_DELETE, /* Trip point deleted */ + THERMAL_GENL_EVENT_CDEV_ADD, /* Cdev bound to the thermal zone */ + THERMAL_GENL_EVENT_CDEV_DELETE, /* Cdev unbound */ + THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, /* Cdev state updated */ + THERMAL_GENL_EVENT_TZ_GOV_CHANGE, /* Governor policy changed */ + __THERMAL_GENL_EVENT_MAX, +}; +#define THERMAL_GENL_EVENT_MAX (__THERMAL_GENL_EVENT_MAX - 1) + +/* Commands supported by the thermal_genl_family */ +enum thermal_genl_cmd { THERMAL_GENL_CMD_UNSPEC, - THERMAL_GENL_CMD_EVENT, + THERMAL_GENL_CMD_TZ_GET_ID, /* List of thermal zones id */ + THERMAL_GENL_CMD_TZ_GET_TRIP, /* List of thermal trips */ + THERMAL_GENL_CMD_TZ_GET_TEMP, /* Get the thermal zone temperature */ + THERMAL_GENL_CMD_TZ_GET_GOV, /* Get the thermal zone governor */ + THERMAL_GENL_CMD_TZ_GET_MODE, /* Get the thermal zone mode */ + THERMAL_GENL_CMD_CDEV_GET, /* List of cdev id */ __THERMAL_GENL_CMD_MAX, }; #define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) -- cgit v1.2.3 From 49b020c1d236a36a4533e7db6d2604cb57ed4c51 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Mon, 29 Jun 2020 16:11:00 +0000 Subject: Bluetooth: Adding a configurable autoconnect timeout This patch adds a configurable LE autoconnect timeout. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 1 + net/bluetooth/hci_event.c | 2 +- net/bluetooth/mgmt_config.c | 13 +++++++++++++ 4 files changed, 16 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 836dc997ff94..34ad5b207598 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -376,6 +376,7 @@ struct hci_dev { __u16 def_br_lsto; __u16 def_page_timeout; __u16 def_multi_adv_rotation_duration; + __u16 def_le_autoconnect_timeout; __u16 pkt_type; __u16 esco_type; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 7959b851cc63..e6bf3d9f9d7a 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3569,6 +3569,7 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; + hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index e060fc9ebb18..03a0759f2fc2 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5315,7 +5315,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, } conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, - HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER, + hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER, direct_rpa); if (!IS_ERR(conn)) { /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c index 8d01a8ff85e9..b30b571f8caf 100644 --- a/net/bluetooth/mgmt_config.c +++ b/net/bluetooth/mgmt_config.c @@ -17,6 +17,12 @@ { cpu_to_le16(hdev->_param_name_) } \ } +#define HDEV_PARAM_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \ +{ \ + { cpu_to_le16(_param_code_), sizeof(__u16) }, \ + { cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) } \ +} + int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { @@ -59,6 +65,8 @@ int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, HDEV_PARAM_U16(0x0018, le_conn_max_interval), HDEV_PARAM_U16(0x0019, le_conn_latency), HDEV_PARAM_U16(0x001a, le_supv_timeout), + HDEV_PARAM_U16_JIFFIES_TO_MSECS(0x001b, + def_le_autoconnect_timeout), }; struct mgmt_rp_read_def_system_config *rp = (void *)params; @@ -129,6 +137,7 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, case 0x0018: case 0x0019: case 0x001a: + case 0x001b: if (len != sizeof(u16)) { bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d", len, sizeof(u16), type); @@ -238,6 +247,10 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, case 0x0001a: hdev->le_supv_timeout = TLV_GET_LE16(buffer); break; + case 0x0001b: + hdev->def_le_autoconnect_timeout = + msecs_to_jiffies(TLV_GET_LE16(buffer)); + break; default: bt_dev_warn(hdev, "unsupported parameter %u", type); break; -- cgit v1.2.3 From 38fd525a4c61e7ecdc9ad4dcbf7b767d0a007962 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 1 Jul 2020 07:30:06 -0500 Subject: exit: Factor thread_group_exited out of pidfd_poll Create an independent helper thread_group_exited which returns true when all threads have passed exit_notify in do_exit. AKA all of the threads are at least zombies and might be dead or completely gone. Create this helper by taking the logic out of pidfd_poll where it is already tested, and adding a READ_ONCE on the read of task->exit_state. I will be changing the user mode driver code to use this same logic to know when a user mode driver needs to be restarted. Place the new helper thread_group_exited in kernel/exit.c and EXPORT it so it can be used by modules. Link: https://lkml.kernel.org/r/20200702164140.4468-13-ebiederm@xmission.com Acked-by: Christian Brauner Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/sched/signal.h | 2 ++ kernel/exit.c | 24 ++++++++++++++++++++++++ kernel/fork.c | 6 +----- 3 files changed, 27 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 0ee5e696c5d8..1bad18a1d8ba 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -674,6 +674,8 @@ static inline int thread_group_empty(struct task_struct *p) #define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p)) +extern bool thread_group_exited(struct pid *pid); + extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, unsigned long *flags); diff --git a/kernel/exit.c b/kernel/exit.c index d3294b611df1..dee246c0866f 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1713,6 +1713,30 @@ Efault: } #endif +/** + * thread_group_exited - check that a thread group has exited + * @pid: tgid of thread group to be checked. + * + * Test if the thread group represented by tgid has exited (all + * threads are zombies, dead or completely gone). + * + * Return: true if the thread group has exited. false otherwise. + */ +bool thread_group_exited(struct pid *pid) +{ + struct task_struct *task; + bool exited; + + rcu_read_lock(); + task = pid_task(pid, PIDTYPE_PID); + exited = !task || + (READ_ONCE(task->exit_state) && thread_group_empty(task)); + rcu_read_unlock(); + + return exited; +} +EXPORT_SYMBOL(thread_group_exited); + __weak void abort(void) { BUG(); diff --git a/kernel/fork.c b/kernel/fork.c index 142b23645d82..bf215af7a904 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1787,22 +1787,18 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) */ static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) { - struct task_struct *task; struct pid *pid = file->private_data; __poll_t poll_flags = 0; poll_wait(file, &pid->wait_pidfd, pts); - rcu_read_lock(); - task = pid_task(pid, PIDTYPE_PID); /* * Inform pollers only when the whole thread group exits. * If the thread group leader exits before all other threads in the * group, then poll(2) should block, similar to the wait(2) family. */ - if (!task || (task->exit_state && thread_group_empty(task))) + if (thread_group_exited(pid)) poll_flags = EPOLLIN | EPOLLRDNORM; - rcu_read_unlock(); return poll_flags; } -- cgit v1.2.3 From e80eb1dc868bc1ed93602389d54b27f170ca770c Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 25 Jun 2020 17:23:22 -0500 Subject: bpfilter: Take advantage of the facilities of struct pid Instead of relying on the exit_umh cleanup callback use the fact a struct pid can be tested to see if a process still exists, and that struct pid has a wait queue that notifies when the process dies. v1: https://lkml.kernel.org/r/87h7uydlu9.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/874kqt4owu.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-14-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/bpfilter.h | 3 ++- net/bpfilter/bpfilter_kern.c | 15 +++++---------- net/ipv4/bpfilter/sockopt.c | 15 ++++++++------- 3 files changed, 15 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index ec9972d822e0..9b114c718a76 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h @@ -10,6 +10,8 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen); int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen); +void bpfilter_umh_cleanup(struct umd_info *info); + struct bpfilter_umh_ops { struct umd_info info; /* since ip_getsockopt() can run in parallel, serialize access to umh */ @@ -18,7 +20,6 @@ struct bpfilter_umh_ops { char __user *optval, unsigned int optlen, bool is_set); int (*start)(void); - bool stop; }; extern struct bpfilter_umh_ops bpfilter_ops; #endif diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 08ea77c2b137..9616fb7defeb 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -18,10 +18,11 @@ static void shutdown_umh(void) struct umd_info *info = &bpfilter_ops.info; struct pid *tgid = info->tgid; - if (bpfilter_ops.stop) - return; - - kill_pid(tgid, SIGKILL, 1); + if (tgid) { + kill_pid(tgid, SIGKILL, 1); + wait_event(tgid->wait_pidfd, thread_group_exited(tgid)); + bpfilter_umh_cleanup(info); + } } static void __stop_umh(void) @@ -77,7 +78,6 @@ static int start_umh(void) err = fork_usermode_driver(&bpfilter_ops.info); if (err) return err; - bpfilter_ops.stop = false; pr_info("Loaded bpfilter_umh pid %d\n", pid_nr(bpfilter_ops.info.tgid)); /* health check that usermode process started correctly */ @@ -100,16 +100,11 @@ static int __init load_umh(void) return err; mutex_lock(&bpfilter_ops.lock); - if (!bpfilter_ops.stop) { - err = -EFAULT; - goto out; - } err = start_umh(); if (!err && IS_ENABLED(CONFIG_INET)) { bpfilter_ops.sockopt = &__bpfilter_process_sockopt; bpfilter_ops.start = &start_umh; } -out: mutex_unlock(&bpfilter_ops.lock); if (err) umd_unload_blob(&bpfilter_ops.info); diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 56cbc43145f6..9063c6767d34 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -12,16 +12,14 @@ struct bpfilter_umh_ops bpfilter_ops; EXPORT_SYMBOL_GPL(bpfilter_ops); -static void bpfilter_umh_cleanup(struct umd_info *info) +void bpfilter_umh_cleanup(struct umd_info *info) { - mutex_lock(&bpfilter_ops.lock); - bpfilter_ops.stop = true; fput(info->pipe_to_umh); fput(info->pipe_from_umh); put_pid(info->tgid); info->tgid = NULL; - mutex_unlock(&bpfilter_ops.lock); } +EXPORT_SYMBOL_GPL(bpfilter_umh_cleanup); static int bpfilter_mbox_request(struct sock *sk, int optname, char __user *optval, @@ -39,7 +37,11 @@ static int bpfilter_mbox_request(struct sock *sk, int optname, goto out; } } - if (bpfilter_ops.stop) { + if (bpfilter_ops.info.tgid && + thread_group_exited(bpfilter_ops.info.tgid)) + bpfilter_umh_cleanup(&bpfilter_ops.info); + + if (!bpfilter_ops.info.tgid) { err = bpfilter_ops.start(); if (err) goto out; @@ -70,9 +72,8 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, static int __init bpfilter_sockopt_init(void) { mutex_init(&bpfilter_ops.lock); - bpfilter_ops.stop = true; + bpfilter_ops.info.tgid = NULL; bpfilter_ops.info.driver_name = "bpfilter_umh"; - bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup; return 0; } -- cgit v1.2.3 From 8c2f52663973e643c617663d826e2b0daa008b38 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 25 Jun 2020 17:40:40 -0500 Subject: umd: Remove exit_umh The bpfilter code no longer uses the umd_info.cleanup callback. This callback is what exit_umh exists to call. So remove exit_umh and all of it's associated booking. v1: https://lkml.kernel.org/r/87bll6dlte.fsf_-_@x220.int.ebiederm.org v2: https://lkml.kernel.org/r/87y2o53abg.fsf_-_@x220.int.ebiederm.org Link: https://lkml.kernel.org/r/20200702164140.4468-15-ebiederm@xmission.com Reviewed-by: Greg Kroah-Hartman Acked-by: Alexei Starovoitov Tested-by: Alexei Starovoitov Signed-off-by: "Eric W. Biederman" --- include/linux/sched.h | 1 - include/linux/usermode_driver.h | 16 ---------------- kernel/exit.c | 3 --- kernel/usermode_driver.c | 28 ---------------------------- 4 files changed, 48 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 59d1e92bb88e..edb2020875ad 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1511,7 +1511,6 @@ extern struct pid *cad_pid; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h index 45adbffb31d9..073a9e0ec07d 100644 --- a/include/linux/usermode_driver.h +++ b/include/linux/usermode_driver.h @@ -4,26 +4,10 @@ #include #include -#ifdef CONFIG_BPFILTER -void __exit_umh(struct task_struct *tsk); - -static inline void exit_umh(struct task_struct *tsk) -{ - if (unlikely(tsk->flags & PF_UMH)) - __exit_umh(tsk); -} -#else -static inline void exit_umh(struct task_struct *tsk) -{ -} -#endif - struct umd_info { const char *driver_name; struct file *pipe_to_umh; struct file *pipe_from_umh; - struct list_head list; - void (*cleanup)(struct umd_info *info); struct path wd; struct pid *tgid; }; diff --git a/kernel/exit.c b/kernel/exit.c index dee246c0866f..39226a018ed7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -63,7 +63,6 @@ #include #include #include -#include #include #include @@ -805,8 +804,6 @@ void __noreturn do_exit(long code) exit_task_namespaces(tsk); exit_task_work(tsk); exit_thread(tsk); - if (group_dead) - exit_umh(tsk); /* * Flush inherited counters to the parent - before the parent diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c index f77f8d7ce9e3..cd136f86f799 100644 --- a/kernel/usermode_driver.c +++ b/kernel/usermode_driver.c @@ -9,9 +9,6 @@ #include #include -static LIST_HEAD(umh_list); -static DEFINE_MUTEX(umh_list_lock); - static struct vfsmount *blob_to_mnt(const void *data, size_t len, const char *name) { struct file_system_type *type; @@ -134,7 +131,6 @@ static int umd_setup(struct subprocess_info *info, struct cred *new) umd_info->pipe_to_umh = to_umh[1]; umd_info->pipe_from_umh = from_umh[0]; umd_info->tgid = get_pid(task_tgid(current)); - current->flags |= PF_UMH; return 0; } @@ -182,11 +178,6 @@ int fork_usermode_driver(struct umd_info *info) goto out; err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); - if (!err) { - mutex_lock(&umh_list_lock); - list_add(&info->list, &umh_list); - mutex_unlock(&umh_list_lock); - } out: if (argv) argv_free(argv); @@ -194,23 +185,4 @@ out: } EXPORT_SYMBOL_GPL(fork_usermode_driver); -void __exit_umh(struct task_struct *tsk) -{ - struct umd_info *info; - struct pid *tgid = task_tgid(tsk); - - mutex_lock(&umh_list_lock); - list_for_each_entry(info, &umh_list, list) { - if (info->tgid == tgid) { - list_del(&info->list); - mutex_unlock(&umh_list_lock); - goto out; - } - } - mutex_unlock(&umh_list_lock); - return; -out: - if (info->cleanup) - info->cleanup(info); -} -- cgit v1.2.3 From 6d4ebd565d15f374aab3d0f16f156bafec3c61b3 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Tue, 30 Jun 2020 07:57:03 +0300 Subject: iio: core: wrap IIO device into an iio_dev_opaque object There are plenty of bad designs we want to discourage or not have to review manually usually about accessing private (marked as [INTERN]) fields of 'struct iio_dev'. Sometimes users copy drivers that are not always the best examples. A better idea is to hide those fields into the framework. For 'struct iio_dev' this is a 'struct iio_dev_opaque' which wraps a public 'struct iio_dev' object. In the next series, some fields will be moved to this new struct, each with it's own rework. This rework will not be complete-able for a while, as many fields need some drivers to be reworked in order to finalize them (e.g. 'indio_dev->mlock'). But some fields can already be moved, and in time, all of them may get there (in the 'struct iio_dev_opaque' object). Since a lot of drivers also call 'iio_priv()', in order to preserve fast-paths (where this matters), the public iio_dev object will have a 'priv' field that will have the pointer to the private information already computed. The reference returned by this field should be guaranteed to be cacheline aligned. The opaque parts will be moved into the 'include/linux/iio/iio-opaque.h' header. Should the hidden information be required for some debugging or some special needs, it can be made available via this header. Otherwise, only the IIO core files should include this file. Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-core.c | 19 +++++++++++++------ include/linux/iio/iio-opaque.h | 17 +++++++++++++++++ include/linux/iio/iio.h | 6 +++++- 3 files changed, 35 insertions(+), 7 deletions(-) create mode 100644 include/linux/iio/iio-opaque.h (limited to 'include') diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 75661661aaba..33e2953cf021 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "iio_core.h" #include "iio_core_trigger.h" #include @@ -1473,6 +1474,8 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) static void iio_dev_release(struct device *device) { struct iio_dev *indio_dev = dev_to_iio_dev(device); + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) iio_device_unregister_trigger_consumer(indio_dev); iio_device_unregister_eventset(indio_dev); @@ -1481,7 +1484,7 @@ static void iio_dev_release(struct device *device) iio_buffer_put(indio_dev->buffer); ida_simple_remove(&iio_ida, indio_dev->id); - kfree(indio_dev); + kfree(iio_dev_opaque); } struct device_type iio_device_type = { @@ -1495,10 +1498,11 @@ struct device_type iio_device_type = { **/ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) { + struct iio_dev_opaque *iio_dev_opaque; struct iio_dev *dev; size_t alloc_size; - alloc_size = sizeof(struct iio_dev); + alloc_size = sizeof(struct iio_dev_opaque); if (sizeof_priv) { alloc_size = ALIGN(alloc_size, IIO_ALIGN); alloc_size += sizeof_priv; @@ -1506,11 +1510,14 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) /* ensure 32-byte alignment of whole construct ? */ alloc_size += IIO_ALIGN - 1; - dev = kzalloc(alloc_size, GFP_KERNEL); - if (!dev) + iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); + if (!iio_dev_opaque) return NULL; - dev->dev.parent = parent; + dev = &iio_dev_opaque->indio_dev; + dev->priv = (char *)iio_dev_opaque + + ALIGN(sizeof(struct iio_dev_opaque), IIO_ALIGN); + dev->dev.groups = dev->groups; dev->dev.type = &iio_device_type; dev->dev.bus = &iio_bus_type; @@ -1524,7 +1531,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) if (dev->id < 0) { /* cannot use a dev_err as the name isn't available */ pr_err("failed to get device id\n"); - kfree(dev); + kfree(iio_dev_opaque); return NULL; } dev_set_name(&dev->dev, "iio:device%d", dev->id); diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h new file mode 100644 index 000000000000..1375674f14cd --- /dev/null +++ b/include/linux/iio/iio-opaque.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _INDUSTRIAL_IO_OPAQUE_H_ +#define _INDUSTRIAL_IO_OPAQUE_H_ + +/** + * struct iio_dev_opaque - industrial I/O device opaque information + * @indio_dev: public industrial I/O device information + */ +struct iio_dev_opaque { + struct iio_dev indio_dev; +}; + +#define to_iio_dev_opaque(indio_dev) \ + container_of(indio_dev, struct iio_dev_opaque, indio_dev) + +#endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 1c1d02107722..c2f6d9ef213a 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -522,6 +522,8 @@ struct iio_buffer_setup_ops { * @flags: [INTERN] file ops related flags including busy flag. * @debugfs_dentry: [INTERN] device specific debugfs dentry. * @cached_reg_addr: [INTERN] cached register address for debugfs reads. + * @priv: [DRIVER] reference to driver's private information + * **MUST** be accessed **ONLY** via iio_priv() helper */ struct iio_dev { int id; @@ -571,6 +573,7 @@ struct iio_dev { char read_buf[20]; unsigned int read_buf_len; #endif + void *priv; }; const struct iio_chan_spec @@ -698,9 +701,10 @@ static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) #define IIO_ALIGN L1_CACHE_BYTES struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); +/* The information at the returned address is guaranteed to be cacheline aligned */ static inline void *iio_priv(const struct iio_dev *indio_dev) { - return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN); + return indio_dev->priv; } static inline struct iio_dev *iio_priv_to_dev(void *priv) -- cgit v1.2.3 From 96fb1b67422ed652e3217a140cf9be505041db07 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Tue, 30 Jun 2020 07:57:05 +0300 Subject: iio: core: move debugfs data on the private iio dev info This change moves all iio_dev debugfs fields to the iio_dev_priv object. It's not the biggest advantage yet (to the whole thing of abstractization) but it's a start. The iio_get_debugfs_dentry() function (which is moved in industrialio-core.c) needs to also be guarded against the CONFIG_DEBUG_FS symbol, when it isn't defined. We do want to keep the inline definition in the iio.h header, so that the compiler can better infer when to compile out debugfs code that is related to the IIO debugfs directory. Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-core.c | 46 +++++++++++++++++++++++++++++------------ include/linux/iio/iio-opaque.h | 10 +++++++++ include/linux/iio/iio.h | 13 +----------- 3 files changed, 44 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 27005ba4d09c..bb2771974404 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -165,6 +165,19 @@ static const char * const iio_chan_info_postfix[] = { [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", }; +#if defined(CONFIG_DEBUG_FS) +/** + * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for + * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined + */ +struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) +{ + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + return iio_dev_opaque->debugfs_dentry; +} +EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); +#endif + /** * iio_find_channel_from_si() - get channel from its scan index * @indio_dev: device @@ -308,35 +321,37 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct iio_dev *indio_dev = file->private_data; + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); unsigned val = 0; int ret; if (*ppos > 0) return simple_read_from_buffer(userbuf, count, ppos, - indio_dev->read_buf, - indio_dev->read_buf_len); + iio_dev_opaque->read_buf, + iio_dev_opaque->read_buf_len); ret = indio_dev->info->debugfs_reg_access(indio_dev, - indio_dev->cached_reg_addr, + iio_dev_opaque->cached_reg_addr, 0, &val); if (ret) { dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); return ret; } - indio_dev->read_buf_len = snprintf(indio_dev->read_buf, - sizeof(indio_dev->read_buf), - "0x%X\n", val); + iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, + sizeof(iio_dev_opaque->read_buf), + "0x%X\n", val); return simple_read_from_buffer(userbuf, count, ppos, - indio_dev->read_buf, - indio_dev->read_buf_len); + iio_dev_opaque->read_buf, + iio_dev_opaque->read_buf_len); } static ssize_t iio_debugfs_write_reg(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct iio_dev *indio_dev = file->private_data; + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); unsigned reg, val; char buf[80]; int ret; @@ -351,10 +366,10 @@ static ssize_t iio_debugfs_write_reg(struct file *file, switch (ret) { case 1: - indio_dev->cached_reg_addr = reg; + iio_dev_opaque->cached_reg_addr = reg; break; case 2: - indio_dev->cached_reg_addr = reg; + iio_dev_opaque->cached_reg_addr = reg; ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, val, NULL); if (ret) { @@ -378,23 +393,28 @@ static const struct file_operations iio_debugfs_reg_fops = { static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) { - debugfs_remove_recursive(indio_dev->debugfs_dentry); + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); } static void iio_device_register_debugfs(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque; + if (indio_dev->info->debugfs_reg_access == NULL) return; if (!iio_debugfs_dentry) return; - indio_dev->debugfs_dentry = + iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + iio_dev_opaque->debugfs_dentry = debugfs_create_dir(dev_name(&indio_dev->dev), iio_debugfs_dentry); debugfs_create_file("direct_reg_access", 0644, - indio_dev->debugfs_dentry, indio_dev, + iio_dev_opaque->debugfs_dentry, indio_dev, &iio_debugfs_reg_fops); } #else diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index 1375674f14cd..b3f234b4c1e9 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -6,9 +6,19 @@ /** * struct iio_dev_opaque - industrial I/O device opaque information * @indio_dev: public industrial I/O device information + * @debugfs_dentry: device specific debugfs dentry + * @cached_reg_addr: cached register address for debugfs reads + * @read_buf: read buffer to be used for the initial reg read + * @read_buf_len: data length in @read_buf */ struct iio_dev_opaque { struct iio_dev indio_dev; +#if defined(CONFIG_DEBUG_FS) + struct dentry *debugfs_dentry; + unsigned cached_reg_addr; + char read_buf[20]; + unsigned int read_buf_len; +#endif }; #define to_iio_dev_opaque(indio_dev) \ diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index c2f6d9ef213a..36915282269b 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -520,8 +520,6 @@ struct iio_buffer_setup_ops { * @groups: [INTERN] attribute groups * @groupcounter: [INTERN] index of next attribute group * @flags: [INTERN] file ops related flags including busy flag. - * @debugfs_dentry: [INTERN] device specific debugfs dentry. - * @cached_reg_addr: [INTERN] cached register address for debugfs reads. * @priv: [DRIVER] reference to driver's private information * **MUST** be accessed **ONLY** via iio_priv() helper */ @@ -567,12 +565,6 @@ struct iio_dev { int groupcounter; unsigned long flags; -#if defined(CONFIG_DEBUG_FS) - struct dentry *debugfs_dentry; - unsigned cached_reg_addr; - char read_buf[20]; - unsigned int read_buf_len; -#endif void *priv; }; @@ -733,10 +725,7 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev) * @indio_dev: IIO device structure for device **/ #if defined(CONFIG_DEBUG_FS) -static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) -{ - return indio_dev->debugfs_dentry; -} +struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev); #else static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) { -- cgit v1.2.3 From 207c2d27a010c0154691833960756b60816fe59d Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Tue, 30 Jun 2020 07:57:06 +0300 Subject: iio: core: move channel list & group to private iio device object This change bit straightforward and simple, since the 'channel_attr_list' & 'chan_attr_group' fields are only used in 'industrialio-core.c'. This change moves to the private IIO device object Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-core.c | 46 +++++++++++++++++++++++------------------ include/linux/iio/iio-opaque.h | 5 +++++ include/linux/iio/iio.h | 5 ----- 3 files changed, 31 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index bb2771974404..1629559e680e 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1137,6 +1137,7 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, enum iio_shared_by shared_by, const long *infomask) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int i, ret, attrcount = 0; for_each_set_bit(i, infomask, sizeof(*infomask)*8) { @@ -1149,7 +1150,7 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, i, shared_by, &indio_dev->dev, - &indio_dev->channel_attr_list); + &iio_dev_opaque->channel_attr_list); if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) continue; else if (ret < 0) @@ -1165,6 +1166,7 @@ static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, enum iio_shared_by shared_by, const long *infomask) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int i, ret, attrcount = 0; char *avail_postfix; @@ -1184,7 +1186,7 @@ static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, i, shared_by, &indio_dev->dev, - &indio_dev->channel_attr_list); + &iio_dev_opaque->channel_attr_list); kfree(avail_postfix); if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) continue; @@ -1199,6 +1201,7 @@ static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int ret, attrcount = 0; const struct iio_chan_spec_ext_info *ext_info; @@ -1274,7 +1277,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, i, ext_info->shared, &indio_dev->dev, - &indio_dev->channel_attr_list); + &iio_dev_opaque->channel_attr_list); i++; if (ret == -EBUSY && ext_info->shared) continue; @@ -1409,6 +1412,7 @@ static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR, static int iio_device_register_sysfs(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int i, ret = 0, attrcount, attrn, attrcount_orig = 0; struct iio_dev_attr *p; struct attribute **attr, *clk = NULL; @@ -1448,47 +1452,49 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev) if (clk) attrcount++; - indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1, - sizeof(indio_dev->chan_attr_group.attrs[0]), - GFP_KERNEL); - if (indio_dev->chan_attr_group.attrs == NULL) { + iio_dev_opaque->chan_attr_group.attrs = + kcalloc(attrcount + 1, + sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), + GFP_KERNEL); + if (iio_dev_opaque->chan_attr_group.attrs == NULL) { ret = -ENOMEM; goto error_clear_attrs; } /* Copy across original attributes */ if (indio_dev->info->attrs) - memcpy(indio_dev->chan_attr_group.attrs, + memcpy(iio_dev_opaque->chan_attr_group.attrs, indio_dev->info->attrs->attrs, - sizeof(indio_dev->chan_attr_group.attrs[0]) + sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) *attrcount_orig); attrn = attrcount_orig; /* Add all elements from the list. */ - list_for_each_entry(p, &indio_dev->channel_attr_list, l) - indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; + list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) + iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; if (indio_dev->name) - indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; + iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; if (indio_dev->label) - indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; + iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; if (clk) - indio_dev->chan_attr_group.attrs[attrn++] = clk; + iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; indio_dev->groups[indio_dev->groupcounter++] = - &indio_dev->chan_attr_group; + &iio_dev_opaque->chan_attr_group; return 0; error_clear_attrs: - iio_free_chan_devattr_list(&indio_dev->channel_attr_list); + iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); return ret; } static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); - iio_free_chan_devattr_list(&indio_dev->channel_attr_list); - kfree(indio_dev->chan_attr_group.attrs); - indio_dev->chan_attr_group.attrs = NULL; + iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); + kfree(iio_dev_opaque->chan_attr_group.attrs); + iio_dev_opaque->chan_attr_group.attrs = NULL; } static void iio_dev_release(struct device *device) @@ -1543,7 +1549,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) dev_set_drvdata(&dev->dev, (void *)dev); mutex_init(&dev->mlock); mutex_init(&dev->info_exist_lock); - INIT_LIST_HEAD(&dev->channel_attr_list); + INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL); if (dev->id < 0) { diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index b3f234b4c1e9..9419a05c698d 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -6,6 +6,9 @@ /** * struct iio_dev_opaque - industrial I/O device opaque information * @indio_dev: public industrial I/O device information + * @channel_attr_list: keep track of automatically created channel + * attributes + * @chan_attr_group: group for all attrs in base directory * @debugfs_dentry: device specific debugfs dentry * @cached_reg_addr: cached register address for debugfs reads * @read_buf: read buffer to be used for the initial reg read @@ -13,6 +16,8 @@ */ struct iio_dev_opaque { struct iio_dev indio_dev; + struct list_head channel_attr_list; + struct attribute_group chan_attr_group; #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; unsigned cached_reg_addr; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 36915282269b..61c4bf9c03ec 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -506,9 +506,6 @@ struct iio_buffer_setup_ops { * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table * @num_channels: [DRIVER] number of channels specified in @channels. - * @channel_attr_list: [INTERN] keep track of automatically created channel - * attributes - * @chan_attr_group: [INTERN] group for all attrs in base directory * @name: [DRIVER] name of the device. * @label: [DRIVER] unique name to identify which device this is * @info: [DRIVER] callbacks and constant info from driver @@ -551,8 +548,6 @@ struct iio_dev { struct iio_chan_spec const *channels; int num_channels; - struct list_head channel_attr_list; - struct attribute_group chan_attr_group; const char *name; const char *label; const struct iio_info *info; -- cgit v1.2.3 From 6a8c6b26f7531a85803380911c7c4a05a639afbe Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Tue, 30 Jun 2020 07:57:07 +0300 Subject: iio: core: move iio_dev's buffer_list to the private iio device object This change moves the 'buffer_list' away from the public IIO device object into the private part. Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-buffer.c | 38 ++++++++++++++++++++++++-------------- drivers/iio/industrialio-core.c | 2 +- include/linux/iio/iio-opaque.h | 2 ++ include/linux/iio/iio.h | 2 -- 4 files changed, 27 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 329dd4d6757a..2aec8b85f40d 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -19,6 +19,7 @@ #include #include +#include #include "iio_core.h" #include "iio_core_trigger.h" #include @@ -599,8 +600,10 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, static void iio_buffer_activate(struct iio_dev *indio_dev, struct iio_buffer *buffer) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + iio_buffer_get(buffer); - list_add(&buffer->buffer_list, &indio_dev->buffer_list); + list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list); } static void iio_buffer_deactivate(struct iio_buffer *buffer) @@ -612,10 +615,11 @@ static void iio_buffer_deactivate(struct iio_buffer *buffer) static void iio_buffer_deactivate_all(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer, *_buffer; list_for_each_entry_safe(buffer, _buffer, - &indio_dev->buffer_list, buffer_list) + &iio_dev_opaque->buffer_list, buffer_list) iio_buffer_deactivate(buffer); } @@ -688,6 +692,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer, struct iio_device_config *config) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); unsigned long *compound_mask; const unsigned long *scan_mask; bool strict_scanmask = false; @@ -710,12 +715,12 @@ static int iio_verify_update(struct iio_dev *indio_dev, * to verify. */ if (remove_buffer && !insert_buffer && - list_is_singular(&indio_dev->buffer_list)) + list_is_singular(&iio_dev_opaque->buffer_list)) return 0; modes = indio_dev->modes; - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { if (buffer == remove_buffer) continue; modes &= buffer->access->modes; @@ -736,7 +741,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, * Keep things simple for now and only allow a single buffer to * be connected in hardware mode. */ - if (insert_buffer && !list_empty(&indio_dev->buffer_list)) + if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list)) return -EINVAL; config->mode = INDIO_BUFFER_HARDWARE; strict_scanmask = true; @@ -756,7 +761,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, scan_timestamp = false; - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { if (buffer == remove_buffer) continue; bitmap_or(compound_mask, compound_mask, buffer->scan_mask, @@ -902,10 +907,11 @@ error_clear_mux_table: static int iio_update_demux(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer; int ret; - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { ret = iio_buffer_update_demux(indio_dev, buffer); if (ret < 0) goto error_clear_mux_table; @@ -913,7 +919,7 @@ static int iio_update_demux(struct iio_dev *indio_dev) return 0; error_clear_mux_table: - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) iio_buffer_demux_free(buffer); return ret; @@ -922,6 +928,7 @@ error_clear_mux_table: static int iio_enable_buffers(struct iio_dev *indio_dev, struct iio_device_config *config) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer; int ret; @@ -958,7 +965,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, indio_dev->info->hwfifo_set_watermark(indio_dev, config->watermark); - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { ret = iio_buffer_enable(buffer, indio_dev); if (ret) goto err_disable_buffers; @@ -983,7 +990,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, return 0; err_disable_buffers: - list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list, + list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list, buffer_list) iio_buffer_disable(buffer, indio_dev); err_run_postdisable: @@ -998,12 +1005,13 @@ err_undo_config: static int iio_disable_buffers(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer; int ret = 0; int ret2; /* Wind down existing buffers - iff there are any */ - if (list_empty(&indio_dev->buffer_list)) + if (list_empty(&iio_dev_opaque->buffer_list)) return 0; if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { @@ -1024,7 +1032,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) ret = ret2; } - list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { ret2 = iio_buffer_disable(buffer, indio_dev); if (ret2 && !ret) ret = ret2; @@ -1047,6 +1055,7 @@ static int __iio_update_buffers(struct iio_dev *indio_dev, struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_device_config new_config; int ret; @@ -1071,7 +1080,7 @@ static int __iio_update_buffers(struct iio_dev *indio_dev, iio_buffer_activate(indio_dev, insert_buffer); /* If no buffers in list, we are done */ - if (list_empty(&indio_dev->buffer_list)) + if (list_empty(&iio_dev_opaque->buffer_list)) return 0; ret = iio_enable_buffers(indio_dev, &new_config); @@ -1420,10 +1429,11 @@ static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) */ int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int ret; struct iio_buffer *buf; - list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { + list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) { ret = iio_push_to_buffer(buf, data); if (ret < 0) return ret; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 1629559e680e..fab5720a7901 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1559,7 +1559,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) return NULL; } dev_set_name(&dev->dev, "iio:device%d", dev->id); - INIT_LIST_HEAD(&dev->buffer_list); + INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); return dev; } diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index 9419a05c698d..af6c69a40169 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -6,6 +6,7 @@ /** * struct iio_dev_opaque - industrial I/O device opaque information * @indio_dev: public industrial I/O device information + * @buffer_list: list of all buffers currently attached * @channel_attr_list: keep track of automatically created channel * attributes * @chan_attr_group: group for all attrs in base directory @@ -16,6 +17,7 @@ */ struct iio_dev_opaque { struct iio_dev indio_dev; + struct list_head buffer_list; struct list_head channel_attr_list; struct attribute_group chan_attr_group; #if defined(CONFIG_DEBUG_FS) diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 61c4bf9c03ec..a0110b1e29fe 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -490,7 +490,6 @@ struct iio_buffer_setup_ops { * and owner * @event_interface: [INTERN] event chrdevs associated with interrupt lines * @buffer: [DRIVER] any buffer present - * @buffer_list: [INTERN] list of all buffers currently attached * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux * @mlock: [INTERN] lock used to prevent simultaneous device state * changes @@ -531,7 +530,6 @@ struct iio_dev { struct iio_event_interface *event_interface; struct iio_buffer *buffer; - struct list_head buffer_list; int scan_bytes; struct mutex mlock; -- cgit v1.2.3 From fa83c3baa539a7ec734c7ee65fad499122f427d7 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Tue, 30 Jun 2020 07:57:08 +0300 Subject: iio: core: move event interface on the opaque struct Same as with other private fields, this moves the event interface reference to the opaque IIO device object, to be invisible to drivers. Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-core.c | 5 +-- drivers/iio/industrialio-event.c | 68 +++++++++++++++++++++++----------------- include/linux/iio/iio-opaque.h | 2 ++ include/linux/iio/iio.h | 3 -- 4 files changed, 45 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index fab5720a7901..ef5e23c33c9d 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -211,7 +211,8 @@ EXPORT_SYMBOL(iio_read_const_attr); int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) { int ret; - const struct iio_event_interface *ev_int = indio_dev->event_interface; + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; ret = mutex_lock_interruptible(&indio_dev->mlock); if (ret) @@ -1442,7 +1443,7 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev) attrcount += ret; } - if (indio_dev->event_interface) + if (iio_dev_opaque->event_interface) clk = &dev_attr_current_timestamp_clock.attr; if (indio_dev->name) diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index 5b17c92d3b50..2ab4d4c44427 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "iio_core.h" #include #include @@ -62,7 +63,8 @@ bool iio_event_enabled(const struct iio_event_interface *ev_int) **/ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) { - struct iio_event_interface *ev_int = indio_dev->event_interface; + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; struct iio_event_data ev; int copied; @@ -96,7 +98,8 @@ static __poll_t iio_event_poll(struct file *filep, struct poll_table_struct *wait) { struct iio_dev *indio_dev = filep->private_data; - struct iio_event_interface *ev_int = indio_dev->event_interface; + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; __poll_t events = 0; if (!indio_dev->info) @@ -116,7 +119,8 @@ static ssize_t iio_event_chrdev_read(struct file *filep, loff_t *f_ps) { struct iio_dev *indio_dev = filep->private_data; - struct iio_event_interface *ev_int = indio_dev->event_interface; + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; unsigned int copied; int ret; @@ -165,7 +169,8 @@ static ssize_t iio_event_chrdev_read(struct file *filep, static int iio_event_chrdev_release(struct inode *inode, struct file *filep) { struct iio_dev *indio_dev = filep->private_data; - struct iio_event_interface *ev_int = indio_dev->event_interface; + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); @@ -184,7 +189,8 @@ static const struct file_operations iio_event_chrdev_fileops = { int iio_event_getfd(struct iio_dev *indio_dev) { - struct iio_event_interface *ev_int = indio_dev->event_interface; + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; int fd; if (ev_int == NULL) @@ -343,6 +349,7 @@ static int iio_device_add_event(struct iio_dev *indio_dev, enum iio_event_type type, enum iio_event_direction dir, enum iio_shared_by shared_by, const unsigned long *mask) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); ssize_t (*show)(struct device *, struct device_attribute *, char *); ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t); @@ -376,7 +383,7 @@ static int iio_device_add_event(struct iio_dev *indio_dev, ret = __iio_add_chan_devattr(postfix, chan, show, store, (i << 16) | spec_index, shared_by, &indio_dev->dev, - &indio_dev->event_interface->dev_attr_list); + &iio_dev_opaque->event_interface->dev_attr_list); kfree(postfix); if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) @@ -469,6 +476,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int) static const char *iio_event_group_name = "events"; int iio_device_register_eventset(struct iio_dev *indio_dev) { + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_dev_attr *p; int ret = 0, attrcount_orig = 0, attrcount, attrn; struct attribute **attr; @@ -477,14 +485,14 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) iio_check_for_dynamic_events(indio_dev))) return 0; - indio_dev->event_interface = + iio_dev_opaque->event_interface = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL); - if (indio_dev->event_interface == NULL) + if (iio_dev_opaque->event_interface == NULL) return -ENOMEM; - INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list); + INIT_LIST_HEAD(&iio_dev_opaque->event_interface->dev_attr_list); - iio_setup_ev_int(indio_dev->event_interface); + iio_setup_ev_int(iio_dev_opaque->event_interface); if (indio_dev->info->event_attrs != NULL) { attr = indio_dev->info->event_attrs->attrs; while (*attr++ != NULL) @@ -498,35 +506,35 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) attrcount += ret; } - indio_dev->event_interface->group.name = iio_event_group_name; - indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1, - sizeof(indio_dev->event_interface->group.attrs[0]), + iio_dev_opaque->event_interface->group.name = iio_event_group_name; + iio_dev_opaque->event_interface->group.attrs = kcalloc(attrcount + 1, + sizeof(iio_dev_opaque->event_interface->group.attrs[0]), GFP_KERNEL); - if (indio_dev->event_interface->group.attrs == NULL) { + if (iio_dev_opaque->event_interface->group.attrs == NULL) { ret = -ENOMEM; goto error_free_setup_event_lines; } if (indio_dev->info->event_attrs) - memcpy(indio_dev->event_interface->group.attrs, + memcpy(iio_dev_opaque->event_interface->group.attrs, indio_dev->info->event_attrs->attrs, - sizeof(indio_dev->event_interface->group.attrs[0]) + sizeof(iio_dev_opaque->event_interface->group.attrs[0]) *attrcount_orig); attrn = attrcount_orig; /* Add all elements from the list. */ list_for_each_entry(p, - &indio_dev->event_interface->dev_attr_list, + &iio_dev_opaque->event_interface->dev_attr_list, l) - indio_dev->event_interface->group.attrs[attrn++] = + iio_dev_opaque->event_interface->group.attrs[attrn++] = &p->dev_attr.attr; indio_dev->groups[indio_dev->groupcounter++] = - &indio_dev->event_interface->group; + &iio_dev_opaque->event_interface->group; return 0; error_free_setup_event_lines: - iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); - kfree(indio_dev->event_interface); - indio_dev->event_interface = NULL; + iio_free_chan_devattr_list(&iio_dev_opaque->event_interface->dev_attr_list); + kfree(iio_dev_opaque->event_interface); + iio_dev_opaque->event_interface = NULL; return ret; } @@ -539,16 +547,20 @@ error_free_setup_event_lines: */ void iio_device_wakeup_eventset(struct iio_dev *indio_dev) { - if (indio_dev->event_interface == NULL) + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + if (iio_dev_opaque->event_interface == NULL) return; - wake_up(&indio_dev->event_interface->wait); + wake_up(&iio_dev_opaque->event_interface->wait); } void iio_device_unregister_eventset(struct iio_dev *indio_dev) { - if (indio_dev->event_interface == NULL) + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + if (iio_dev_opaque->event_interface == NULL) return; - iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); - kfree(indio_dev->event_interface->group.attrs); - kfree(indio_dev->event_interface); + iio_free_chan_devattr_list(&iio_dev_opaque->event_interface->dev_attr_list); + kfree(iio_dev_opaque->event_interface->group.attrs); + kfree(iio_dev_opaque->event_interface); } diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index af6c69a40169..f2e94196d31f 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -6,6 +6,7 @@ /** * struct iio_dev_opaque - industrial I/O device opaque information * @indio_dev: public industrial I/O device information + * @event_interface: event chrdevs associated with interrupt lines * @buffer_list: list of all buffers currently attached * @channel_attr_list: keep track of automatically created channel * attributes @@ -17,6 +18,7 @@ */ struct iio_dev_opaque { struct iio_dev indio_dev; + struct iio_event_interface *event_interface; struct list_head buffer_list; struct list_head channel_attr_list; struct attribute_group chan_attr_group; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index a0110b1e29fe..30c9c9502478 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -488,7 +488,6 @@ struct iio_buffer_setup_ops { * @currentmode: [DRIVER] current operating mode * @dev: [DRIVER] device structure, should be assigned a parent * and owner - * @event_interface: [INTERN] event chrdevs associated with interrupt lines * @buffer: [DRIVER] any buffer present * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux * @mlock: [INTERN] lock used to prevent simultaneous device state @@ -527,8 +526,6 @@ struct iio_dev { int currentmode; struct device dev; - struct iio_event_interface *event_interface; - struct iio_buffer *buffer; int scan_bytes; struct mutex mlock; -- cgit v1.2.3 From 3970ed49a46bd0b062870edcc0894b2bd820371d Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 7 Jul 2020 03:49:35 +0200 Subject: net: phy: Properly define genphy_c45_driver Avoid the W=1 warning that symbol 'genphy_c45_driver' was not declared. Should it be static? Declare it on the phy header file. Reviewed-by: Florian Fainelli Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 1 - include/linux/phy.h | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index eb1068a77ce1..98be28567c65 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -227,7 +227,6 @@ static void phy_mdio_device_remove(struct mdio_device *mdiodev) } static struct phy_driver genphy_driver; -extern struct phy_driver genphy_c45_driver; static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); diff --git a/include/linux/phy.h b/include/linux/phy.h index 101a48fa6750..1592c3d0e12f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1385,6 +1385,9 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); int genphy_c45_config_aneg(struct phy_device *phydev); +/* Generic C45 PHY driver */ +extern struct phy_driver genphy_c45_driver; + /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); -- cgit v1.2.3 From 16d79cd4e23b1964d36c041ab027505ceacbbeeb Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Thu, 2 Jul 2020 18:26:49 +0200 Subject: PCI: Use 'pci_channel_state_t' instead of 'enum pci_channel_state' The method struct pci_error_handlers.error_detected() is defined and documented as taking an 'enum pci_channel_state' for the second argument, but most drivers use 'pci_channel_state_t' instead. This 'pci_channel_state_t' is not a typedef for the enum but a typedef for a bitwise type in order to have better/stricter typechecking. Consolidate everything by using 'pci_channel_state_t' in the method's definition, in the related helpers and in the drivers. Enforce use of 'pci_channel_state_t' by replacing 'enum pci_channel_state' with an anonymous 'enum'. Note: Currently, from a typechecking point of view this patch changes nothing because only the constants defined by the enum are bitwise, not the enum itself (sparse doesn't have the notion of 'bitwise enum'). This may change in some not too far future, hence the patch. [bhelgaas: squash in https://lore.kernel.org/r/20200702162651.49526-3-luc.vanoostenryck@gmail.com https://lore.kernel.org/r/20200702162651.49526-4-luc.vanoostenryck@gmail.com] Link: https://lore.kernel.org/r/20200702162651.49526-2-luc.vanoostenryck@gmail.com Signed-off-by: Luc Van Oostenryck Signed-off-by: Bjorn Helgaas --- Documentation/PCI/pci-error-recovery.rst | 8 ++++---- arch/powerpc/kernel/eeh_driver.c | 2 +- drivers/block/rsxx/core.c | 2 +- drivers/dma/ioat/init.c | 2 +- drivers/media/pci/ngene/ngene-cards.c | 2 +- drivers/misc/genwqe/card_base.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/ice/ice_main.c | 2 +- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 4 ++-- drivers/net/ethernet/sfc/efx.c | 2 +- drivers/net/ethernet/sfc/falcon/efx.c | 2 +- drivers/pci/pci.h | 2 +- drivers/pci/pcie/err.c | 4 ++-- drivers/pci/pcie/portdrv_pci.c | 2 +- drivers/scsi/aacraid/linit.c | 2 +- drivers/scsi/sym53c8xx_2/sym_glue.c | 2 +- drivers/staging/qlge/qlge_main.c | 2 +- include/linux/pci.h | 4 ++-- 18 files changed, 24 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/Documentation/PCI/pci-error-recovery.rst b/Documentation/PCI/pci-error-recovery.rst index 13beee23cb04..ccd713423133 100644 --- a/Documentation/PCI/pci-error-recovery.rst +++ b/Documentation/PCI/pci-error-recovery.rst @@ -79,7 +79,7 @@ This structure has the form:: struct pci_error_handlers { - int (*error_detected)(struct pci_dev *dev, enum pci_channel_state); + int (*error_detected)(struct pci_dev *dev, pci_channel_state_t); int (*mmio_enabled)(struct pci_dev *dev); int (*slot_reset)(struct pci_dev *dev); void (*resume)(struct pci_dev *dev); @@ -87,11 +87,11 @@ This structure has the form:: The possible channel states are:: - enum pci_channel_state { + typedef enum { pci_channel_io_normal, /* I/O channel is in normal state */ pci_channel_io_frozen, /* I/O to channel is blocked */ pci_channel_io_perm_failure, /* PCI card is dead */ - }; + } pci_channel_state_t; Possible return values are:: @@ -348,7 +348,7 @@ STEP 6: Permanent Failure ------------------------- A "permanent failure" has occurred, and the platform cannot recover the device. The platform will call error_detected() with a -pci_channel_state value of pci_channel_io_perm_failure. +pci_channel_state_t value of pci_channel_io_perm_failure. The device driver should, at this point, assume the worst. It should cancel all pending I/O, refuse all new I/O, returning -EIO to diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 7b048cee767c..ab8806d2e03e 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -214,7 +214,7 @@ static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata) pci_save_state(pdev); } -static void eeh_set_channel_state(struct eeh_pe *root, enum pci_channel_state s) +static void eeh_set_channel_state(struct eeh_pe *root, pci_channel_state_t s) { struct eeh_pe *pe; struct eeh_dev *edev, *tmp; diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 10f6368117d8..34e937dd6bca 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -625,7 +625,7 @@ static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) } static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev, - enum pci_channel_state error) + pci_channel_state_t error) { int st; diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 58d13564f88b..089893f2bbb8 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -1267,7 +1267,7 @@ static void ioat_resume(struct ioatdma_device *ioat_dma) #define DRV_NAME "ioatdma" static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev, - enum pci_channel_state error) + pci_channel_state_t error) { dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error); diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c index 6185806a00e0..8bfb3d8ea610 100644 --- a/drivers/media/pci/ngene/ngene-cards.c +++ b/drivers/media/pci/ngene/ngene-cards.c @@ -1186,7 +1186,7 @@ MODULE_DEVICE_TABLE(pci, ngene_id_tbl); /****************************************************************************/ static pci_ers_result_t ngene_error_detected(struct pci_dev *dev, - enum pci_channel_state state) + pci_channel_state_t state) { dev_err(&dev->dev, "PCI error\n"); if (state == pci_channel_io_perm_failure) diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c index 1dc6c7c5cbce..97b8ecc42383 100644 --- a/drivers/misc/genwqe/card_base.c +++ b/drivers/misc/genwqe/card_base.c @@ -1240,7 +1240,7 @@ static void genwqe_remove(struct pci_dev *pci_dev) * error is detected. */ static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev, - enum pci_channel_state state) + pci_channel_state_t state) { struct genwqe_dev *cd; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5d807c8004f8..f0de2d1842b4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -15465,7 +15465,7 @@ unmap: * remediation. **/ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, - enum pci_channel_state error) + pci_channel_state_t error) { struct i40e_pf *pf = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 082825e3cb39..4dd9226a12df 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3586,7 +3586,7 @@ static void ice_remove(struct pci_dev *pdev) * is in progress. Allows the driver to gracefully prepare/handle PCI errors. */ static pci_ers_result_t -ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err) +ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) { struct ice_pf *pf = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index b64e91ea3465..00db4b5863b1 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -82,7 +82,7 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, static void ixgb_restore_vlan(struct ixgb_adapter *adapter); static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, - enum pci_channel_state state); + pci_channel_state_t state); static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); static void ixgb_io_resume (struct pci_dev *pdev); @@ -2194,7 +2194,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) * a PCI bus error is detected. */ static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgb_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 256807c28ff7..ed627aff7b36 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1519,7 +1519,7 @@ static const struct dev_pm_ops efx_pm_ops = { * Stop the software path and request a slot reset. */ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; struct efx_nic *efx = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 42bcd34fc508..f8979991970e 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -3118,7 +3118,7 @@ static const struct dev_pm_ops ef4_pm_ops = { * Stop the software path and request a slot reset. */ static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; struct ef4_nic *efx = pci_get_drvdata(pdev); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6d3f75867106..c6c0c455f59f 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -555,7 +555,7 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) /* PCI error reporting and recovery */ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - enum pci_channel_state state, + pci_channel_state_t state, pci_ers_result_t (*reset_link)(struct pci_dev *pdev)); bool pcie_wait_for_link(struct pci_dev *pdev, bool active); diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 14bb8f54723e..467686ee2d8b 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -46,7 +46,7 @@ static pci_ers_result_t merge_result(enum pci_ers_result orig, } static int report_error_detected(struct pci_dev *dev, - enum pci_channel_state state, + pci_channel_state_t state, enum pci_ers_result *result) { pci_ers_result_t vote; @@ -147,7 +147,7 @@ out: } pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - enum pci_channel_state state, + pci_channel_state_t state, pci_ers_result_t (*reset_link)(struct pci_dev *pdev)) { pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 3acf151ae015..3a3ce40ae1ab 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -146,7 +146,7 @@ static void pcie_portdrv_remove(struct pci_dev *dev) } static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, - enum pci_channel_state error) + pci_channel_state_t error) { /* Root Port has no impact. Always recovers. */ return PCI_ERS_RESULT_CAN_RECOVER; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index a308e86a97f1..37f65602b0ec 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -2002,7 +2002,7 @@ static void aac_remove_one(struct pci_dev *pdev) } static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev, - enum pci_channel_state error) + pci_channel_state_t error) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct aac_dev *aac = shost_priv(shost); diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 2ca018ce796f..f455243bdb9b 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -1743,7 +1743,7 @@ static void sym2_remove(struct pci_dev *pdev) * @state: current state of the PCI slot */ static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { /* If slot is permanently frozen, turn everything off */ if (state == pci_channel_io_perm_failure) { diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 402edaeffe12..ac30aefe49a1 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -4678,7 +4678,7 @@ static void ql_eeh_close(struct net_device *ndev) * a PCI bus error is detected. */ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); diff --git a/include/linux/pci.h b/include/linux/pci.h index c79d83304e52..adcee9e30bfa 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -179,7 +179,7 @@ static inline const char *pci_power_name(pci_power_t state) */ typedef unsigned int __bitwise pci_channel_state_t; -enum pci_channel_state { +enum { /* I/O channel is in normal state */ pci_channel_io_normal = (__force pci_channel_state_t) 1, @@ -785,7 +785,7 @@ enum pci_ers_result { struct pci_error_handlers { /* PCI bus error detected on this device */ pci_ers_result_t (*error_detected)(struct pci_dev *dev, - enum pci_channel_state error); + pci_channel_state_t error); /* MMIO has been re-enabled, but not DMA */ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); -- cgit v1.2.3 From 4895d7808e7030c831f2ef83a3cc6ec0d46c30b1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 5 Jul 2020 21:27:56 -0700 Subject: net: ethtool: Introduce ethtool_phy_ops In order to decouple ethtool from its PHY library dependency, define an ethtool_phy_ops singleton which can be overriden by the PHY library when it loads with an appropriate set of function pointers. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/ethtool.h | 25 +++++++++++++++++++++++++ net/ethtool/common.c | 11 +++++++++++ net/ethtool/common.h | 2 ++ 3 files changed, 38 insertions(+) (limited to 'include') diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 48ad3b6a0150..0c139a93b67a 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -502,5 +502,30 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd, u32 *dev_speed, u8 *dev_duplex); +struct netlink_ext_ack; +struct phy_device; +struct phy_tdr_config; + +/** + * struct ethtool_phy_ops - Optional PHY device options + * @start_cable_test - Start a cable test + * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test + * + * All operations are optional (i.e. the function pointer may be set to %NULL) + * and callers must take this into account. Callers must hold the RTNL lock. + */ +struct ethtool_phy_ops { + int (*start_cable_test)(struct phy_device *phydev, + struct netlink_ext_ack *extack); + int (*start_cable_test_tdr)(struct phy_device *phydev, + struct netlink_ext_ack *extack, + const struct phy_tdr_config *config); +}; + +/** + * ethtool_set_ethtool_phy_ops - Set the ethtool_phy_ops singleton + * @ops: Ethtool PHY operations to set + */ +void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops); #endif /* _LINUX_ETHTOOL_H */ diff --git a/net/ethtool/common.c b/net/ethtool/common.c index aaecfc916a4d..ce4dbae5a943 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -2,6 +2,7 @@ #include #include +#include #include "common.h" @@ -373,3 +374,13 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) return 0; } + +const struct ethtool_phy_ops *ethtool_phy_ops; + +void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops) +{ + rtnl_lock(); + ethtool_phy_ops = ops; + rtnl_unlock(); +} +EXPORT_SYMBOL_GPL(ethtool_set_ethtool_phy_ops); diff --git a/net/ethtool/common.h b/net/ethtool/common.h index a62f68ccc43a..b83bef38368c 100644 --- a/net/ethtool/common.h +++ b/net/ethtool/common.h @@ -37,4 +37,6 @@ bool convert_legacy_settings_to_link_ksettings( int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max); int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info); +extern const struct ethtool_phy_ops *ethtool_phy_ops; + #endif /* _ETHTOOL_COMMON_H */ -- cgit v1.2.3 From f5836749c9c04a10decd2742845ad4870965fdef Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:25 -0700 Subject: bpf: Add BPF_CGROUP_INET_SOCK_RELEASE hook Sometimes it's handy to know when the socket gets freed. In particular, we'd like to try to use a smarter allocation of ports for bpf_bind and explore the possibility of limiting the number of SOCK_DGRAM sockets the process can have. Implement BPF_CGROUP_INET_SOCK_RELEASE hook that triggers on inet socket release. It triggers only for userspace sockets (not in-kernel ones) and therefore has the same semantics as the existing BPF_CGROUP_INET_SOCK_CREATE. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-2-sdf@google.com --- include/linux/bpf-cgroup.h | 4 ++++ include/uapi/linux/bpf.h | 1 + kernel/bpf/syscall.c | 3 +++ net/core/filter.c | 1 + net/ipv4/af_inet.c | 3 +++ 5 files changed, 12 insertions(+) (limited to 'include') diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index c66c545e161a..2c6f26670acc 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -210,6 +210,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) +#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE) + #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) @@ -401,6 +404,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index da9bf35a26f8..548a749aebb3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -226,6 +226,7 @@ enum bpf_attach_type { BPF_CGROUP_INET4_GETSOCKNAME, BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, + BPF_CGROUP_INET_SOCK_RELEASE, __MAX_BPF_ATTACH_TYPE }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8da159936bab..156f51ffada2 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1981,6 +1981,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, case BPF_PROG_TYPE_CGROUP_SOCK: switch (expected_attach_type) { case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: return 0; @@ -2779,6 +2780,7 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) return BPF_PROG_TYPE_CGROUP_SKB; break; case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: return BPF_PROG_TYPE_CGROUP_SOCK; @@ -2929,6 +2931,7 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_CGROUP_INET_INGRESS: case BPF_CGROUP_INET_EGRESS: case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: case BPF_CGROUP_INET4_BIND: case BPF_CGROUP_INET6_BIND: case BPF_CGROUP_INET4_POST_BIND: diff --git a/net/core/filter.c b/net/core/filter.c index c5e696e6c315..ddcc0d6209e1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6890,6 +6890,7 @@ static bool __sock_filter_check_attach_type(int off, case offsetof(struct bpf_sock, priority): switch (attach_type) { case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: goto full_access; default: return false; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ea6ed6d487ed..ff141d630bdf 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -411,6 +411,9 @@ int inet_release(struct socket *sock) if (sk) { long timeout; + if (!sk->sk_kern_sock) + BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk); + /* Applications forget to leave groups before exiting */ ip_mc_drop_socket(sk); -- cgit v1.2.3 From b79b0a310bca6dbe86d8adc1ca6a88e818bc1f19 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 29 Jun 2020 20:13:12 +0800 Subject: f2fs: support to trace f2fs_bmap() to show f2fs_bmap()'s result as below: f2fs_bmap: dev = (251,0), ino = 7, lblock:0, pblock:396800 Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 14 +++++++++++--- include/trace/events/f2fs.h | 26 ++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 91dc7b598961..c07a50e4d967 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3713,18 +3713,26 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block) static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; + struct buffer_head tmp = { + .b_size = i_blocksize(inode), + }; + sector_t blknr = 0; if (f2fs_has_inline_data(inode)) - return 0; + goto out; /* make sure allocating whole blocks */ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) filemap_write_and_wait(mapping); if (f2fs_compressed_file(inode)) - return f2fs_bmap_compress(inode, block); + blknr = f2fs_bmap_compress(inode, block); - return generic_block_bmap(mapping, block, get_data_block_bmap); + if (!get_data_block_bmap(inode, block, &tmp, 0)) + blknr = tmp.b_blocknr; +out: + trace_f2fs_bmap(inode, block, blknr); + return blknr; } #ifdef CONFIG_MIGRATION diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 8639ab962a71..2e8713a52d76 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1891,6 +1891,32 @@ TRACE_EVENT(f2fs_iostat, __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio) ); +TRACE_EVENT(f2fs_bmap, + + TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock), + + TP_ARGS(inode, lblock, pblock), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(sector_t, lblock) + __field(sector_t, pblock) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblock = lblock; + __entry->pblock = pblock; + ), + + TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld", + show_dev_ino(__entry), + (unsigned long long)__entry->lblock, + (unsigned long long)__entry->pblock) +); + #endif /* _TRACE_F2FS_H */ /* This part must be outside protection */ -- cgit v1.2.3 From dd5a09bd05ae57a5df29dbeea84783081d47bdd1 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 29 Jun 2020 20:13:13 +0800 Subject: f2fs: support to trace f2fs_fiemap() to show f2fs_fiemap()'s result as below: f2fs_fiemap: dev = (251,0), ino = 7, lblock:0, pblock:1625292800, len:2097152, flags:0, ret:0 Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 6 +++++- fs/f2fs/inline.c | 2 ++ include/trace/events/f2fs.h | 37 +++++++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c07a50e4d967..995cf78b23c5 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1813,6 +1813,7 @@ static int f2fs_xattr_fiemap(struct inode *inode, flags |= FIEMAP_EXTENT_LAST; err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); + trace_f2fs_fiemap(inode, 0, phys, len, flags, err); if (err || err == 1) return err; } @@ -1836,8 +1837,10 @@ static int f2fs_xattr_fiemap(struct inode *inode, flags = FIEMAP_EXTENT_LAST; } - if (phys) + if (phys) { err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); + trace_f2fs_fiemap(inode, 0, phys, len, flags, err); + } return (err < 0 ? err : 0); } @@ -1931,6 +1934,7 @@ next: ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); + trace_f2fs_fiemap(inode, logical, phys, size, flags, ret); if (ret) goto out; size = 0; diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index dbade310dc79..def4b8481883 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -12,6 +12,7 @@ #include "f2fs.h" #include "node.h" +#include bool f2fs_may_inline_data(struct inode *inode) { @@ -776,6 +777,7 @@ int f2fs_inline_data_fiemap(struct inode *inode, byteaddr += (char *)inline_data_addr(inode, ipage) - (char *)F2FS_INODE(ipage); err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags); + trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err); out: f2fs_put_page(ipage, 1); return err; diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 2e8713a52d76..8a1c1311acac 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1917,6 +1917,43 @@ TRACE_EVENT(f2fs_bmap, (unsigned long long)__entry->pblock) ); +TRACE_EVENT(f2fs_fiemap, + + TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock, + unsigned long long len, unsigned int flags, int ret), + + TP_ARGS(inode, lblock, pblock, len, flags, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(sector_t, lblock) + __field(sector_t, pblock) + __field(unsigned long long, len) + __field(unsigned int, flags) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblock = lblock; + __entry->pblock = pblock; + __entry->len = len; + __entry->flags = flags; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld, " + "len:%llu, flags:%u, ret:%d", + show_dev_ino(__entry), + (unsigned long long)__entry->lblock, + (unsigned long long)__entry->pblock, + __entry->len, + __entry->flags, + __entry->ret) +); + #endif /* _TRACE_F2FS_H */ /* This part must be outside protection */ -- cgit v1.2.3 From 76c12881a38aaa83e1eb4ce2fada36c3a732bad4 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 6 Jul 2020 17:49:11 +0200 Subject: nsproxy: support CLONE_NEWTIME with setns() So far setns() was missing time namespace support. This was partially due to it simply not being implemented but also because vdso_join_timens() could still fail which made switching to multiple namespaces atomically problematic. This is now fixed so support CLONE_NEWTIME with setns() Signed-off-by: Christian Brauner Reviewed-by: Andrei Vagin Cc: Thomas Gleixner Cc: Michael Kerrisk Cc: Serge Hallyn Cc: Dmitry Safonov Link: https://lore.kernel.org/r/20200706154912.3248030-4-christian.brauner@ubuntu.com --- include/linux/time_namespace.h | 6 ++++++ kernel/nsproxy.c | 21 +++++++++++++++++++-- kernel/time/namespace.c | 5 +---- 3 files changed, 26 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h index 824d54e057eb..5b6031385db0 100644 --- a/include/linux/time_namespace.h +++ b/include/linux/time_namespace.h @@ -33,6 +33,7 @@ extern struct time_namespace init_time_ns; #ifdef CONFIG_TIME_NS extern int vdso_join_timens(struct task_struct *task, struct time_namespace *ns); +extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns); static inline struct time_namespace *get_time_ns(struct time_namespace *ns) { @@ -96,6 +97,11 @@ static inline int vdso_join_timens(struct task_struct *task, return 0; } +static inline void timens_commit(struct task_struct *tsk, + struct time_namespace *ns) +{ +} + static inline struct time_namespace *get_time_ns(struct time_namespace *ns) { return NULL; diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index cd356630a311..12dd41b39a7f 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -262,8 +262,8 @@ void exit_task_namespaces(struct task_struct *p) static int check_setns_flags(unsigned long flags) { if (!flags || (flags & ~(CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | - CLONE_NEWNET | CLONE_NEWUSER | CLONE_NEWPID | - CLONE_NEWCGROUP))) + CLONE_NEWNET | CLONE_NEWTIME | CLONE_NEWUSER | + CLONE_NEWPID | CLONE_NEWCGROUP))) return -EINVAL; #ifndef CONFIG_USER_NS @@ -290,6 +290,10 @@ static int check_setns_flags(unsigned long flags) if (flags & CLONE_NEWNET) return -EINVAL; #endif +#ifndef CONFIG_TIME_NS + if (flags & CLONE_NEWTIME) + return -EINVAL; +#endif return 0; } @@ -464,6 +468,14 @@ static int validate_nsset(struct nsset *nsset, struct pid *pid) } #endif +#ifdef CONFIG_TIME_NS + if (flags & CLONE_NEWTIME) { + ret = validate_ns(nsset, &nsp->time_ns->ns); + if (ret) + goto out; + } +#endif + out: if (pid_ns) put_pid_ns(pid_ns); @@ -507,6 +519,11 @@ static void commit_nsset(struct nsset *nsset) exit_sem(me); #endif +#ifdef CONFIG_TIME_NS + if (flags & CLONE_NEWTIME) + timens_commit(me, nsset->nsproxy->time_ns); +#endif + /* transfer ownership */ switch_task_namespaces(me, nsset->nsproxy); nsset->nsproxy = NULL; diff --git a/kernel/time/namespace.c b/kernel/time/namespace.c index aa7b90aac2a7..afc65e6be33e 100644 --- a/kernel/time/namespace.c +++ b/kernel/time/namespace.c @@ -280,7 +280,7 @@ static void timens_put(struct ns_common *ns) put_time_ns(to_time_ns(ns)); } -static void timens_commit(struct task_struct *tsk, struct time_namespace *ns) +void timens_commit(struct task_struct *tsk, struct time_namespace *ns) { timens_set_vvar_page(tsk, ns); vdso_join_timens(tsk, ns); @@ -298,9 +298,6 @@ static int timens_install(struct nsset *nsset, struct ns_common *new) !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) return -EPERM; - - timens_commit(current, ns); - get_time_ns(ns); put_time_ns(nsproxy->time_ns); nsproxy->time_ns = ns; -- cgit v1.2.3 From 217c2a633ebb36f1cc6d249f4ef2e4a809d46818 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 3 Jul 2020 05:49:22 -0700 Subject: perf/core: Use kmem_cache to allocate the PMU specific data Currently, the PMU specific data task_ctx_data is allocated by the function kzalloc() in the perf generic code. When there is no specific alignment requirement for the task_ctx_data, the method works well for now. However, there will be a problem once a specific alignment requirement is introduced in future features, e.g., the Architecture LBR XSAVE feature requires 64-byte alignment. If the specific alignment requirement is not fulfilled, the XSAVE family of instructions will fail to save/restore the xstate to/from the task_ctx_data. The function kzalloc() itself only guarantees a natural alignment. A new method to allocate the task_ctx_data has to be introduced, which has to meet the requirements as below: - must be a generic method can be used by different architectures, because the allocation of the task_ctx_data is implemented in the perf generic code; - must be an alignment-guarantee method (The alignment requirement is not changed after the boot); - must be able to allocate/free a buffer (smaller than a page size) dynamically; - should not cause extra CPU overhead or space overhead. Several options were considered as below: - One option is to allocate a larger buffer for task_ctx_data. E.g., ptr = kmalloc(size + alignment, GFP_KERNEL); ptr &= ~(alignment - 1); This option causes space overhead. - Another option is to allocate the task_ctx_data in the PMU specific code. To do so, several function pointers have to be added. As a result, both the generic structure and the PMU specific structure will become bigger. Besides, extra function calls are added when allocating/freeing the buffer. This option will increase both the space overhead and CPU overhead. - The third option is to use a kmem_cache to allocate a buffer for the task_ctx_data. The kmem_cache can be created with a specific alignment requirement by the PMU at boot time. A new pointer for kmem_cache has to be added in the generic struct pmu, which would be used to dynamically allocate a buffer for the task_ctx_data at run time. Although the new pointer is added to the struct pmu, the existing variable task_ctx_size is not required anymore. The size of the generic structure is kept the same. The third option which meets all the aforementioned requirements is used to replace kzalloc() for the PMU specific data allocation. A later patch will remove the kzalloc() method and the related variables. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/1593780569-62993-17-git-send-email-kan.liang@linux.intel.com --- include/linux/perf_event.h | 5 +++++ kernel/events/core.c | 8 +++++++- 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 46fe5cfb5163..09915ae06d28 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -424,6 +424,11 @@ struct pmu { */ size_t task_ctx_size; + /* + * Kmem cache of PMU specific data + */ + struct kmem_cache *task_ctx_cache; + /* * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data) * can be synchronized using this function. See Intel LBR callstack support diff --git a/kernel/events/core.c b/kernel/events/core.c index 75090403f942..30d9b3182369 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1240,12 +1240,18 @@ static void get_ctx(struct perf_event_context *ctx) static void *alloc_task_ctx_data(struct pmu *pmu) { + if (pmu->task_ctx_cache) + return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL); + return kzalloc(pmu->task_ctx_size, GFP_KERNEL); } static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data) { - kfree(task_ctx_data); + if (pmu->task_ctx_cache && task_ctx_data) + kmem_cache_free(pmu->task_ctx_cache, task_ctx_data); + else + kfree(task_ctx_data); } static void free_ctx(struct rcu_head *head) -- cgit v1.2.3 From 5a09928d339f3cf0973991ddc3a2798825c84c99 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 3 Jul 2020 05:49:24 -0700 Subject: perf/x86: Remove task_ctx_size A new kmem_cache method has replaced the kzalloc() to allocate the PMU specific data. The task_ctx_size is not required anymore. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/1593780569-62993-19-git-send-email-kan.liang@linux.intel.com --- arch/x86/events/core.c | 1 - arch/x86/events/intel/lbr.c | 1 - include/linux/perf_event.h | 4 ---- kernel/events/core.c | 4 +--- 4 files changed, 1 insertion(+), 9 deletions(-) (limited to 'include') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d740c861724c..6b1228ae007d 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2371,7 +2371,6 @@ static struct pmu pmu = { .event_idx = x86_pmu_event_idx, .sched_task = x86_pmu_sched_task, - .task_ctx_size = sizeof(struct x86_perf_task_context), .swap_task_ctx = x86_pmu_swap_task_ctx, .check_period = x86_pmu_check_period, diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index e784c1d485ca..3ad528996d1c 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1672,7 +1672,6 @@ void __init intel_pmu_arch_lbr_init(void) size = sizeof(struct x86_perf_task_context_arch_lbr) + lbr_nr * sizeof(struct lbr_entry); - x86_get_pmu()->task_ctx_size = size; x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0); x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 09915ae06d28..3b22db08b6fb 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -419,10 +419,6 @@ struct pmu { */ void (*sched_task) (struct perf_event_context *ctx, bool sched_in); - /* - * PMU specific data size - */ - size_t task_ctx_size; /* * Kmem cache of PMU specific data diff --git a/kernel/events/core.c b/kernel/events/core.c index 30d9b3182369..7c436d705fbd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1243,15 +1243,13 @@ static void *alloc_task_ctx_data(struct pmu *pmu) if (pmu->task_ctx_cache) return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL); - return kzalloc(pmu->task_ctx_size, GFP_KERNEL); + return NULL; } static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data) { if (pmu->task_ctx_cache && task_ctx_data) kmem_cache_free(pmu->task_ctx_cache, task_ctx_data); - else - kfree(task_ctx_data); } static void free_ctx(struct rcu_head *head) -- cgit v1.2.3 From 85c2ce9104eb93517db2037699471c517e81f9b4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 30 Jun 2020 16:49:05 +0200 Subject: sched, vmlinux.lds: Increase STRUCT_ALIGNMENT to 64 bytes for GCC-4.9 For some mysterious reason GCC-4.9 has a 64 byte section alignment for structures, all other GCC versions (and Clang) tested (including 4.8 and 5.0) are fine with the 32 bytes alignment. Getting this right is important for the new SCHED_DATA macro that creates an explicitly ordered array of 'struct sched_class' in the linker script and expect pointer arithmetic to work. Fixes: c3a340f7e7ea ("sched: Have sched_class_highest define by vmlinux.lds.h") Reported-by: kernel test robot Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200630144905.GX4817@hirez.programming.kicks-ass.net --- include/asm-generic/vmlinux.lds.h | 18 +++++++++++------- kernel/sched/sched.h | 3 ++- 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 66fb84c3dc7e..3ceb4b7279ec 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -108,6 +108,17 @@ #define SBSS_MAIN .sbss #endif +/* + * GCC 4.5 and later have a 32 bytes section alignment for structures. + * Except GCC 4.9, that feels the need to align on 64 bytes. + */ +#if __GNUC__ == 4 && __GNUC_MINOR__ == 9 +#define STRUCT_ALIGNMENT 64 +#else +#define STRUCT_ALIGNMENT 32 +#endif +#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) + /* * The order of the sched class addresses are important, as they are * used to determine the order of the priority of each sched class in @@ -123,13 +134,6 @@ *(__stop_sched_class) \ __end_sched_classes = .; -/* - * Align to a 32 byte boundary equal to the - * alignment gcc 4.5 uses for a struct - */ -#define STRUCT_ALIGNMENT 32 -#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) - /* The actual configuration determine if the init/exit sections * are handled as text/data or they can be discarded (which * often happens at runtime) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5aa6661ecaf1..9bef2dd01247 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -67,6 +67,7 @@ #include #include +#include #ifdef CONFIG_PARAVIRT # include @@ -1810,7 +1811,7 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_change_group)(struct task_struct *p, int type); #endif -} __aligned(32); /* STRUCT_ALIGN(), vmlinux.lds.h */ +} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) { -- cgit v1.2.3 From 9d246053a69196c7c27068870e9b4b66ac536f68 Mon Sep 17 00:00:00 2001 From: Phil Auld Date: Mon, 29 Jun 2020 15:23:03 -0400 Subject: sched: Add a tracepoint to track rq->nr_running Add a bare tracepoint trace_sched_update_nr_running_tp which tracks ->nr_running CPU's rq. This is used to accurately trace this data and provide a visualization of scheduler imbalances in, for example, the form of a heat map. The tracepoint is accessed by loading an external kernel module. An example module (forked from Qais' module and including the pelt related tracepoints) can be found at: https://github.com/auldp/tracepoints-helpers.git A script to turn the trace-cmd report output into a heatmap plot can be found at: https://github.com/jirvoz/plot-nr-running The tracepoints are added to add_nr_running() and sub_nr_running() which are in kernel/sched/sched.h. In order to avoid CREATE_TRACE_POINTS in the header a wrapper call is used and the trace/events/sched.h include is moved before sched.h in kernel/sched/core. Signed-off-by: Phil Auld Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200629192303.GC120228@lorien.usersys.redhat.com --- include/linux/sched.h | 1 + include/trace/events/sched.h | 4 ++++ kernel/sched/core.c | 13 +++++++++---- kernel/sched/fair.c | 8 ++++++-- kernel/sched/pelt.c | 2 -- kernel/sched/sched.h | 10 ++++++++++ 6 files changed, 30 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 683372943093..12b10ce51a08 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2044,6 +2044,7 @@ const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); int sched_trace_rq_cpu(struct rq *rq); +int sched_trace_rq_nr_running(struct rq *rq); const struct cpumask *sched_trace_rd_span(struct root_domain *rd); diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 04f9a4c7b0d9..0d5ff0958d48 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -642,6 +642,10 @@ DECLARE_TRACE(sched_util_est_se_tp, TP_PROTO(struct sched_entity *se), TP_ARGS(se)); +DECLARE_TRACE(sched_update_nr_running_tp, + TP_PROTO(struct rq *rq, int change), + TP_ARGS(rq, change)); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4cf30e4de653..ff0519551188 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6,6 +6,10 @@ * * Copyright (C) 1991-2002 Linus Torvalds */ +#define CREATE_TRACE_POINTS +#include +#undef CREATE_TRACE_POINTS + #include "sched.h" #include @@ -23,9 +27,6 @@ #include "pelt.h" #include "smp.h" -#define CREATE_TRACE_POINTS -#include - /* * Export tracepoints that act as a bare tracehook (ie: have no trace event * associated with them) to allow external modules to probe them. @@ -38,6 +39,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); @@ -8195,4 +8197,7 @@ const u32 sched_prio_to_wmult[40] = { /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; -#undef CREATE_TRACE_POINTS +void call_trace_sched_update_nr_running(struct rq *rq, int count) +{ + trace_sched_update_nr_running_tp(rq, count); +} diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6fab1d17c575..3213cb247aff 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -22,8 +22,6 @@ */ #include "sched.h" -#include - /* * Targeted preemption latency for CPU-bound tasks: * @@ -11296,3 +11294,9 @@ const struct cpumask *sched_trace_rd_span(struct root_domain *rd) #endif } EXPORT_SYMBOL_GPL(sched_trace_rd_span); + +int sched_trace_rq_nr_running(struct rq *rq) +{ + return rq ? rq->nr_running : -1; +} +EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running); diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 11bea3b08115..2c613e1cff3a 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -28,8 +28,6 @@ #include "sched.h" #include "pelt.h" -#include - /* * Approximate: * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b1432f608061..65b72e0487bf 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -76,6 +76,8 @@ #include "cpupri.h" #include "cpudeadline.h" +#include + #ifdef CONFIG_SCHED_DEBUG # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) #else @@ -97,6 +99,7 @@ extern atomic_long_t calc_load_tasks; extern void calc_global_load_tick(struct rq *this_rq); extern long calc_load_fold_active(struct rq *this_rq, long adjust); +extern void call_trace_sched_update_nr_running(struct rq *rq, int count); /* * Helpers for converting nanosecond timing to jiffy resolution */ @@ -1973,6 +1976,9 @@ static inline void add_nr_running(struct rq *rq, unsigned count) unsigned prev_nr = rq->nr_running; rq->nr_running = prev_nr + count; + if (trace_sched_update_nr_running_tp_enabled()) { + call_trace_sched_update_nr_running(rq, count); + } #ifdef CONFIG_SMP if (prev_nr < 2 && rq->nr_running >= 2) { @@ -1987,6 +1993,10 @@ static inline void add_nr_running(struct rq *rq, unsigned count) static inline void sub_nr_running(struct rq *rq, unsigned count) { rq->nr_running -= count; + if (trace_sched_update_nr_running_tp_enabled()) { + call_trace_sched_update_nr_running(rq, count); + } + /* Check if we still need preemption */ sched_update_tick_dependency(rq); } -- cgit v1.2.3 From 5cc2013bfeee756a1ee6da9bfbe42e52b4695035 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Mon, 6 Jul 2020 19:53:41 +0200 Subject: regmap-irq: use fwnode instead of device node in add_irq_chip() Convert the argument to the newer fwnode_handle instead a device tree node. Fortunately, there are no users for now. So this is an easy change. Signed-off-by: Michael Walle Link: https://lore.kernel.org/r/20200706175353.16404-2-michael@walle.cc Signed-off-by: Mark Brown --- drivers/base/regmap/regmap-irq.c | 53 ++++++++++++++++++++++------------------ include/linux/regmap.h | 21 +++++++++------- 2 files changed, 41 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 4340e1d268b6..369a57e6f89d 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -541,9 +541,9 @@ static const struct irq_domain_ops regmap_domain_ops = { }; /** - * regmap_add_irq_chip_np() - Use standard regmap IRQ controller handling + * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling * - * @np: The device_node where the IRQ domain should be added to. + * @fwnode: The firmware node where the IRQ domain should be added to. * @map: The regmap for the device. * @irq: The IRQ the device uses to signal interrupts. * @irq_flags: The IRQF_ flags to use for the primary interrupt. @@ -557,10 +557,11 @@ static const struct irq_domain_ops regmap_domain_ops = { * register cache. The chip driver is responsible for restoring the * register values used by the IRQ controller over suspend and resume. */ -int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq, - int irq_flags, int irq_base, - const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data) +int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) { struct regmap_irq_chip_data *d; int i; @@ -771,10 +772,12 @@ int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq, } if (irq_base) - d->domain = irq_domain_add_legacy(np, chip->num_irqs, irq_base, + d->domain = irq_domain_add_legacy(to_of_node(fwnode), + chip->num_irqs, irq_base, 0, ®map_domain_ops, d); else - d->domain = irq_domain_add_linear(np, chip->num_irqs, + d->domain = irq_domain_add_linear(to_of_node(fwnode), + chip->num_irqs, ®map_domain_ops, d); if (!d->domain) { dev_err(map->dev, "Failed to create IRQ domain\n"); @@ -808,7 +811,7 @@ err_alloc: kfree(d); return ret; } -EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np); +EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode); /** * regmap_add_irq_chip() - Use standard regmap IRQ controller handling @@ -822,15 +825,15 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip_np); * * Returns 0 on success or an errno on failure. * - * This is the same as regmap_add_irq_chip_np, except that the device + * This is the same as regmap_add_irq_chip_fwnode, except that the firmware * node of the regmap is used. */ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data) { - return regmap_add_irq_chip_np(map->dev->of_node, map, irq, irq_flags, - irq_base, chip, data); + return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq, + irq_flags, irq_base, chip, data); } EXPORT_SYMBOL_GPL(regmap_add_irq_chip); @@ -899,10 +902,10 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) } /** - * devm_regmap_add_irq_chip_np() - Resource manager regmap_add_irq_chip_np() + * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode() * * @dev: The device pointer on which irq_chip belongs to. - * @np: The device_node where the IRQ domain should be added to. + * @fwnode: The firmware node where the IRQ domain should be added to. * @map: The regmap for the device. * @irq: The IRQ the device uses to signal interrupts * @irq_flags: The IRQF_ flags to use for the primary interrupt. @@ -915,11 +918,12 @@ static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) * The ®map_irq_chip_data will be automatically released when the device is * unbound. */ -int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np, - struct regmap *map, int irq, int irq_flags, - int irq_base, - const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data) +int devm_regmap_add_irq_chip_fwnode(struct device *dev, + struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) { struct regmap_irq_chip_data **ptr, *d; int ret; @@ -929,8 +933,8 @@ int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np, if (!ptr) return -ENOMEM; - ret = regmap_add_irq_chip_np(np, map, irq, irq_flags, irq_base, - chip, &d); + ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base, + chip, &d); if (ret < 0) { devres_free(ptr); return ret; @@ -941,7 +945,7 @@ int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np, *data = d; return 0; } -EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_np); +EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode); /** * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip() @@ -964,8 +968,9 @@ int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data) { - return devm_regmap_add_irq_chip_np(dev, map->dev->of_node, map, irq, - irq_flags, irq_base, chip, data); + return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map, + irq, irq_flags, irq_base, chip, + data); } EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index cb666b9c6b6a..e2c22c0f3b9a 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -18,6 +18,7 @@ #include #include #include +#include struct module; struct clk; @@ -1311,21 +1312,23 @@ struct regmap_irq_chip_data; int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); -int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq, - int irq_flags, int irq_base, - const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data); +int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); -int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np, - struct regmap *map, int irq, int irq_flags, - int irq_base, - const struct regmap_irq_chip *chip, - struct regmap_irq_chip_data **data); +int devm_regmap_add_irq_chip_fwnode(struct device *dev, + struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); void devm_regmap_del_irq_chip(struct device *dev, int irq, struct regmap_irq_chip_data *data); -- cgit v1.2.3 From 82394db7383d33641f3f565bd79792fb41b1741f Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Mon, 29 Jun 2020 12:06:37 -0700 Subject: block: add capacity field to zone descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the zoned storage model, the sectors within a zone are typically all writeable. With the introduction of the Zoned Namespace (ZNS) Command Set in the NVM Express organization, the model was extended to have a specific writeable capacity. Extend the zone descriptor data structure with a zone capacity field to indicate to the user how many sectors in a zone are writeable. Introduce backward compatibility in the zone report ioctl by extending the zone report header data structure with a flags field to indicate if the capacity field is available. Reviewed-by: Jens Axboe Reviewed-by: Javier González Reviewed-by: Chaitanya Kulkarni Reviewed-by: Himanshu Madhani Reviewed-by: Martin K. Petersen Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Daniel Wagner Signed-off-by: Matias Bjørling Signed-off-by: Christoph Hellwig --- block/blk-zoned.c | 1 + drivers/block/null_blk_zoned.c | 2 ++ drivers/scsi/sd_zbc.c | 1 + include/uapi/linux/blkzoned.h | 15 +++++++++++++-- 4 files changed, 17 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 23831fa8701d..81152a260354 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -312,6 +312,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, return ret; rep.nr_zones = ret; + rep.flags = BLK_ZONE_REP_CAPACITY; if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) return -EFAULT; return 0; diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index cc47606d8ffe..624aac09b005 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -47,6 +47,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) zone->start = sector; zone->len = dev->zone_size_sects; + zone->capacity = zone->len; zone->wp = zone->start + zone->len; zone->type = BLK_ZONE_TYPE_CONVENTIONAL; zone->cond = BLK_ZONE_COND_NOT_WP; @@ -59,6 +60,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) zone->start = zone->wp = sector; zone->len = dev->zone_size_sects; + zone->capacity = zone->len; zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; zone->cond = BLK_ZONE_COND_EMPTY; diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 6f7eba66687e..183a20720da9 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -59,6 +59,7 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf, zone.non_seq = 1; zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8])); + zone.capacity = zone.len; zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16])); zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24])); if (zone.type != ZBC_ZONE_TYPE_CONV && diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h index 0cdef67135f0..42c3366cc25f 100644 --- a/include/uapi/linux/blkzoned.h +++ b/include/uapi/linux/blkzoned.h @@ -73,6 +73,15 @@ enum blk_zone_cond { BLK_ZONE_COND_OFFLINE = 0xF, }; +/** + * enum blk_zone_report_flags - Feature flags of reported zone descriptors. + * + * @BLK_ZONE_REP_CAPACITY: Zone descriptor has capacity field. + */ +enum blk_zone_report_flags { + BLK_ZONE_REP_CAPACITY = (1 << 0), +}; + /** * struct blk_zone - Zone descriptor for BLKREPORTZONE ioctl. * @@ -99,7 +108,9 @@ struct blk_zone { __u8 cond; /* Zone condition */ __u8 non_seq; /* Non-sequential write resources active */ __u8 reset; /* Reset write pointer recommended */ - __u8 reserved[36]; + __u8 resv[4]; + __u64 capacity; /* Zone capacity in number of sectors */ + __u8 reserved[24]; }; /** @@ -115,7 +126,7 @@ struct blk_zone { struct blk_zone_report { __u64 sector; __u32 nr_zones; - __u8 reserved[4]; + __u32 flags; struct blk_zone zones[0]; }; -- cgit v1.2.3 From 71010c30945425203da8d069a10fa45a05a00f96 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Mon, 29 Jun 2020 12:06:39 -0700 Subject: nvme: implement multiple I/O Command Set support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implements support for multiple I/O Command Sets. NVMe TP 4056 introduces a method to enumerate multiple command sets per namespace. If the command set is exposed, this method for enumeration will be used instead of the traditional method that uses the CC.CSS register command set register for command set identification. For namespaces where the Command Set Identifier is not supported or recognized, the specific namespace will not be created. Reviewed-by: Javier González Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Reviewed-by: Matias Bjørling Reviewed-by: Daniel Wagner Reviewed-by: Himanshu Madhani Reviewed-by: Hannes Reinecke Signed-off-by: Niklas Cassel Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 53 +++++++++++++++++++++++++++++++++++++++--------- drivers/nvme/host/nvme.h | 1 + include/linux/nvme.h | 19 +++++++++++++++-- 3 files changed, 61 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 96898040a6d5..892291dbee64 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1056,8 +1056,13 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) return error; } +static bool nvme_multi_css(struct nvme_ctrl *ctrl) +{ + return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; +} + static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, - struct nvme_ns_id_desc *cur) + struct nvme_ns_id_desc *cur, bool *csi_seen) { const char *warn_str = "ctrl returned bogus length:"; void *data = cur; @@ -1087,6 +1092,15 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, } uuid_copy(&ids->uuid, data + sizeof(*cur)); return NVME_NIDT_UUID_LEN; + case NVME_NIDT_CSI: + if (cur->nidl != NVME_NIDT_CSI_LEN) { + dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", + warn_str, cur->nidl); + return -1; + } + memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); + *csi_seen = true; + return NVME_NIDT_CSI_LEN; default: /* Skip unknown types */ return cur->nidl; @@ -1097,10 +1111,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, struct nvme_ns_ids *ids) { struct nvme_command c = { }; - int status; + bool csi_seen = false; + int status, pos, len; void *data; - int pos; - int len; c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(nsid); @@ -1125,7 +1138,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, * device just because of a temporal retry-able error (such * as path of transport errors). */ - if (status > 0 && (status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_SC_DNR) && !nvme_multi_css(ctrl)) status = 0; goto free_data; } @@ -1136,12 +1149,19 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, if (cur->nidl == 0) break; - len = nvme_process_ns_desc(ctrl, ids, cur); + len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen); if (len < 0) - goto free_data; + break; len += sizeof(*cur); } + + if (nvme_multi_css(ctrl) && !csi_seen) { + dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", + nsid); + status = -EINVAL; + } + free_data: kfree(data); return status; @@ -1798,7 +1818,7 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); if (ctrl->vs >= NVME_VS(1, 2, 0)) memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); - if (ctrl->vs >= NVME_VS(1, 3, 0)) + if (ctrl->vs >= NVME_VS(1, 3, 0) || nvme_multi_css(ctrl)) return nvme_identify_ns_descs(ctrl, nsid, ids); return 0; } @@ -1814,7 +1834,8 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) { return uuid_equal(&a->uuid, &b->uuid) && memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && - memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0; + memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && + a->csi == b->csi; } static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns, @@ -1936,6 +1957,15 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->lba_shift == 0) ns->lba_shift = 9; + switch (ns->head->ids.csi) { + case NVME_CSI_NVM: + break; + default: + dev_warn(ctrl->device, "unknown csi:%d ns:%d\n", + ns->head->ids.csi, ns->head->ns_id); + return -ENODEV; + } + if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && is_power_of_2(ctrl->max_hw_sectors)) iob = ctrl->max_hw_sectors; @@ -2270,7 +2300,10 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) ctrl->page_size = 1 << page_shift; - ctrl->ctrl_config = NVME_CC_CSS_NVM; + if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) + ctrl->ctrl_config = NVME_CC_CSS_CSI; + else + ctrl->ctrl_config = NVME_CC_CSS_NVM; ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index b5a2e8b7e0be..5573159f714d 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -339,6 +339,7 @@ struct nvme_ns_ids { u8 eui64[8]; u8 nguid[16]; uuid_t uuid; + u8 csi; }; /* diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 5ce51ab4c50e..81ffe5247505 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -132,6 +132,7 @@ enum { #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) #define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1) +#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff) #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) @@ -162,7 +163,6 @@ enum { enum { NVME_CC_ENABLE = 1 << 0, - NVME_CC_CSS_NVM = 0 << 4, NVME_CC_EN_SHIFT = 0, NVME_CC_CSS_SHIFT = 4, NVME_CC_MPS_SHIFT = 7, @@ -170,6 +170,9 @@ enum { NVME_CC_SHN_SHIFT = 14, NVME_CC_IOSQES_SHIFT = 16, NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT, + NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT, + NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT, NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT, NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT, NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT, @@ -179,6 +182,8 @@ enum { NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT, NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT, NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT, + NVME_CAP_CSS_NVM = 1 << 0, + NVME_CAP_CSS_CSI = 1 << 6, NVME_CSTS_RDY = 1 << 0, NVME_CSTS_CFS = 1 << 1, NVME_CSTS_NSSRO = 1 << 4, @@ -374,6 +379,8 @@ enum { NVME_ID_CNS_CTRL = 0x01, NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, NVME_ID_CNS_NS_DESC_LIST = 0x03, + NVME_ID_CNS_CS_NS = 0x05, + NVME_ID_CNS_CS_CTRL = 0x06, NVME_ID_CNS_NS_PRESENT_LIST = 0x10, NVME_ID_CNS_NS_PRESENT = 0x11, NVME_ID_CNS_CTRL_NS_LIST = 0x12, @@ -383,6 +390,10 @@ enum { NVME_ID_CNS_UUID_LIST = 0x17, }; +enum { + NVME_CSI_NVM = 0, +}; + enum { NVME_DIR_IDENTIFY = 0x00, NVME_DIR_STREAMS = 0x01, @@ -435,11 +446,13 @@ struct nvme_ns_id_desc { #define NVME_NIDT_EUI64_LEN 8 #define NVME_NIDT_NGUID_LEN 16 #define NVME_NIDT_UUID_LEN 16 +#define NVME_NIDT_CSI_LEN 1 enum { NVME_NIDT_EUI64 = 0x01, NVME_NIDT_NGUID = 0x02, NVME_NIDT_UUID = 0x03, + NVME_NIDT_CSI = 0x04, }; struct nvme_smart_log { @@ -972,7 +985,9 @@ struct nvme_identify { __u8 cns; __u8 rsvd3; __le16 ctrlid; - __u32 rsvd11[5]; + __u8 rsvd11[3]; + __u8 csi; + __u32 rsvd12[4]; }; #define NVME_IDENTIFY_DATA_SIZE 4096 -- cgit v1.2.3 From be93e87e780253780df9bb6ecc9bc1199b0d94c3 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 29 Jun 2020 12:06:40 -0700 Subject: nvme: support for multiple Command Sets Supported and Effects log pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Commands Supported and Effects log page was extended with a CSI field that enables the host to query the log page for each command set supported. Retrieve this log page for each command set that an attached namespace supports, and save a pointer to that log in the namespace head. Reviewed-by: Matias Bjørling Reviewed-by: Javier González Reviewed-by: Himanshu Madhani Reviewed-by: Martin K. Petersen Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Daniel Wagner Signed-off-by: Keith Busch Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 79 ++++++++++++++++++++++++++++++++----------- drivers/nvme/host/hwmon.c | 2 +- drivers/nvme/host/lightnvm.c | 4 +-- drivers/nvme/host/multipath.c | 2 +- drivers/nvme/host/nvme.h | 10 +++++- include/linux/nvme.h | 4 ++- 6 files changed, 76 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 892291dbee64..62b2cdc764da 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1370,8 +1370,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects = 0; if (ns) { - if (ctrl->effects) - effects = le32_to_cpu(ctrl->effects->iocs[opcode]); + if (ns->head->effects) + effects = le32_to_cpu(ns->head->effects->iocs[opcode]); if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) dev_warn(ctrl->device, "IO command:%02x has unhandled effects:%08x\n", @@ -2851,7 +2851,7 @@ out_unlock: return ret; } -int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, +int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, void *log, size_t size, u64 offset) { struct nvme_command c = { }; @@ -2865,27 +2865,55 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); + c.get_log_page.csi = csi; return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); } -static int nvme_get_effects_log(struct nvme_ctrl *ctrl) +static struct nvme_cel *nvme_find_cel(struct nvme_ctrl *ctrl, u8 csi) { + struct nvme_cel *cel, *ret = NULL; + + spin_lock(&ctrl->lock); + list_for_each_entry(cel, &ctrl->cels, entry) { + if (cel->csi == csi) { + ret = cel; + break; + } + } + spin_unlock(&ctrl->lock); + + return ret; +} + +static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, + struct nvme_effects_log **log) +{ + struct nvme_cel *cel = nvme_find_cel(ctrl, csi); int ret; - if (!ctrl->effects) - ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); + if (cel) + goto out; - if (!ctrl->effects) - return 0; + cel = kzalloc(sizeof(*cel), GFP_KERNEL); + if (!cel) + return -ENOMEM; - ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, - ctrl->effects, sizeof(*ctrl->effects), 0); + ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, csi, + &cel->log, sizeof(cel->log), 0); if (ret) { - kfree(ctrl->effects); - ctrl->effects = NULL; + kfree(cel); + return ret; } - return ret; + + cel->csi = csi; + + spin_lock(&ctrl->lock); + list_add_tail(&cel->entry, &ctrl->cels); + spin_unlock(&ctrl->lock); +out: + *log = &cel->log; + return 0; } /* @@ -2918,7 +2946,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) } if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { - ret = nvme_get_effects_log(ctrl); + ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); if (ret < 0) goto out_free; } @@ -3551,6 +3579,13 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, goto out_cleanup_srcu; } + if (head->ids.csi) { + ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); + if (ret) + goto out_cleanup_srcu; + } else + head->effects = ctrl->effects; + ret = nvme_mpath_alloc_disk(ctrl, head); if (ret) goto out_cleanup_srcu; @@ -3891,8 +3926,8 @@ static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) * raced with us in reading the log page, which could cause us to miss * updates. */ - error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log, - log_size, 0); + error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, + NVME_CSI_NVM, log, log_size, 0); if (error) dev_warn(ctrl->device, "reading changed ns log failed: %d\n", error); @@ -4036,8 +4071,8 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) if (!log) return; - if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log, - sizeof(*log), 0)) + if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, + log, sizeof(*log), 0)) dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); kfree(log); } @@ -4174,11 +4209,16 @@ static void nvme_free_ctrl(struct device *dev) struct nvme_ctrl *ctrl = container_of(dev, struct nvme_ctrl, ctrl_device); struct nvme_subsystem *subsys = ctrl->subsys; + struct nvme_cel *cel, *next; if (subsys && ctrl->instance != subsys->instance) ida_simple_remove(&nvme_instance_ida, ctrl->instance); - kfree(ctrl->effects); + list_for_each_entry_safe(cel, next, &ctrl->cels, entry) { + list_del(&cel->entry); + kfree(cel); + } + nvme_mpath_uninit(ctrl); __free_page(ctrl->discard_page); @@ -4209,6 +4249,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, spin_lock_init(&ctrl->lock); mutex_init(&ctrl->scan_lock); INIT_LIST_HEAD(&ctrl->namespaces); + INIT_LIST_HEAD(&ctrl->cels); init_rwsem(&ctrl->namespaces_rwsem); ctrl->dev = dev; ctrl->ops = ops; diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 2e6477ed420f..23ba8bf678ae 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -62,7 +62,7 @@ static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) int ret; ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, - &data->log, sizeof(data->log), 0); + NVME_CSI_NVM, &data->log, sizeof(data->log), 0); return ret <= 0 ? ret : -EIO; } diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 69608755d415..8e562d0f2c30 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -593,8 +593,8 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev, dev_meta_off = dev_meta; ret = nvme_get_log(ctrl, ns->head->ns_id, - NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len, - offset); + NVME_NVM_LOG_REPORT_CHUNK, 0, NVME_CSI_NVM, + dev_meta, len, offset); if (ret) { dev_err(ctrl->device, "Get REPORT CHUNK log error\n"); break; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5a37a595411e..74bad4e3d377 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -527,7 +527,7 @@ static int nvme_read_ana_log(struct nvme_ctrl *ctrl) int error; mutex_lock(&ctrl->ana_lock); - error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, + error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM, ctrl->ana_log_buf, ctrl->ana_log_size, 0); if (error) { dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 5573159f714d..fe9424c7097f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -191,6 +191,12 @@ struct nvme_fault_inject { #endif }; +struct nvme_cel { + struct list_head entry; + struct nvme_effects_log log; + u8 csi; +}; + struct nvme_ctrl { bool comp_seen; enum nvme_ctrl_state state; @@ -257,6 +263,7 @@ struct nvme_ctrl { unsigned long quirks; struct nvme_id_power_state psd[32]; struct nvme_effects_log *effects; + struct list_head cels; struct work_struct scan_work; struct work_struct async_event_work; struct delayed_work ka_work; @@ -359,6 +366,7 @@ struct nvme_ns_head { struct kref ref; bool shared; int instance; + struct nvme_effects_log *effects; #ifdef CONFIG_NVME_MULTIPATH struct gendisk *disk; struct bio_list requeue_list; @@ -561,7 +569,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); int nvme_try_sched_reset(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl); -int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, +int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, void *log, size_t size, u64 offset); extern const struct attribute_group *nvme_ns_id_attr_groups[]; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 81ffe5247505..95cd03e240a1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1101,7 +1101,9 @@ struct nvme_get_log_page_command { }; __le64 lpo; }; - __u32 rsvd14[2]; + __u8 rsvd14[3]; + __u8 csi; + __u32 rsvd15; }; struct nvme_directive_cmd { -- cgit v1.2.3 From 240e6ee272c07a2636dfc7d65f5bbb18377c49e5 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 29 Jun 2020 12:06:41 -0700 Subject: nvme: support for zoned namespaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for NVM Express Zoned Namespaces (ZNS) Command Set defined in NVM Express TP4053. Zoned namespaces are discovered based on their Command Set Identifier reported in the namespaces Namespace Identification Descriptor list. A successfully discovered Zoned Namespace will be registered with the block layer as a host managed zoned block device with Zone Append command support. A namespace that does not support append is not supported by the driver. Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Reviewed-by: Hannes Reinecke Reviewed-by: Sagi Grimberg Reviewed-by: Javier González Reviewed-by: Himanshu Madhani Signed-off-by: Hans Holmberg Signed-off-by: Dmitry Fomichev Signed-off-by: Ajay Joshi Signed-off-by: Aravind Ramesh Signed-off-by: Niklas Cassel Signed-off-by: Matias Bjørling Signed-off-by: Damien Le Moal Signed-off-by: Keith Busch Signed-off-by: Christoph Hellwig --- block/Kconfig | 5 +- drivers/nvme/host/Makefile | 1 + drivers/nvme/host/core.c | 97 ++++++++++++++--- drivers/nvme/host/nvme.h | 39 +++++++ drivers/nvme/host/zns.c | 254 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/nvme.h | 111 ++++++++++++++++++++ 6 files changed, 492 insertions(+), 15 deletions(-) create mode 100644 drivers/nvme/host/zns.c (limited to 'include') diff --git a/block/Kconfig b/block/Kconfig index 9357d7302398..bbad5e8bbffe 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -86,9 +86,10 @@ config BLK_DEV_ZONED select MQ_IOSCHED_DEADLINE help Block layer zoned block device support. This option enables - support for ZAC/ZBC host-managed and host-aware zoned block devices. + support for ZAC/ZBC/ZNS host-managed and host-aware zoned block + devices. - Say yes here if you have a ZAC or ZBC storage device. + Say yes here if you have a ZAC, ZBC, or ZNS storage device. config BLK_DEV_THROTTLING bool "Block layer bio throttling support" diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index fc7b26be692d..d7f6a87687b8 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -13,6 +13,7 @@ nvme-core-y := core.o nvme-core-$(CONFIG_TRACING) += trace.o nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o nvme-core-$(CONFIG_NVM) += lightnvm.o +nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 62b2cdc764da..a8ee10a0cd32 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -89,7 +89,7 @@ static dev_t nvme_chr_devt; static struct class *nvme_class; static struct class *nvme_subsys_class; -static int nvme_revalidate_disk(struct gendisk *disk); +static int _nvme_revalidate_disk(struct gendisk *disk); static void nvme_put_subsystem(struct nvme_subsystem *subsys); static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid); @@ -287,6 +287,10 @@ void nvme_complete_rq(struct request *req) nvme_retry_req(req); return; } + } else if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + req_op(req) == REQ_OP_ZONE_APPEND) { + req->__sector = nvme_lba_to_sect(req->q->queuedata, + le64_to_cpu(nvme_req(req)->result.u64)); } nvme_trace_bio_complete(req, status); @@ -673,7 +677,8 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, } static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, - struct request *req, struct nvme_command *cmnd) + struct request *req, struct nvme_command *cmnd, + enum nvme_opcode op) { struct nvme_ctrl *ctrl = ns->ctrl; u16 control = 0; @@ -687,7 +692,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; - cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); + cmnd->rw.opcode = op; cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); @@ -716,6 +721,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, case NVME_NS_DPS_PI_TYPE2: control |= NVME_RW_PRINFO_PRCHK_GUARD | NVME_RW_PRINFO_PRCHK_REF; + if (op == nvme_cmd_zone_append) + control |= NVME_RW_APPEND_PIREMAP; cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); break; } @@ -756,6 +763,19 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, case REQ_OP_FLUSH: nvme_setup_flush(ns, cmd); break; + case REQ_OP_ZONE_RESET_ALL: + case REQ_OP_ZONE_RESET: + ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); + break; + case REQ_OP_ZONE_OPEN: + ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); + break; + case REQ_OP_ZONE_CLOSE: + ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); + break; + case REQ_OP_ZONE_FINISH: + ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); + break; case REQ_OP_WRITE_ZEROES: ret = nvme_setup_write_zeroes(ns, req, cmd); break; @@ -763,8 +783,13 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, ret = nvme_setup_discard(ns, req, cmd); break; case REQ_OP_READ: + ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); + break; case REQ_OP_WRITE: - ret = nvme_setup_rw(ns, req, cmd); + ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); + break; + case REQ_OP_ZONE_APPEND: + ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); break; default: WARN_ON_ONCE(1); @@ -1398,14 +1423,23 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return effects; } -static void nvme_update_formats(struct nvme_ctrl *ctrl) +static void nvme_update_formats(struct nvme_ctrl *ctrl, u32 *effects) { struct nvme_ns *ns; down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) - if (ns->disk && nvme_revalidate_disk(ns->disk)) + if (ns->disk && _nvme_revalidate_disk(ns->disk)) nvme_set_queue_dying(ns); + else if (blk_queue_is_zoned(ns->disk->queue)) { + /* + * IO commands are required to fully revalidate a zoned + * device. Force the command effects to trigger rescan + * work so report zones can run in a context with + * unfrozen IO queues. + */ + *effects |= NVME_CMD_EFFECTS_NCC; + } up_read(&ctrl->namespaces_rwsem); } @@ -1417,7 +1451,7 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) * this command. */ if (effects & NVME_CMD_EFFECTS_LBCC) - nvme_update_formats(ctrl); + nvme_update_formats(ctrl, &effects); if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { nvme_unfreeze(ctrl); nvme_mpath_unfreeze(ctrl->subsys); @@ -1532,7 +1566,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, * Issue ioctl requests on the first available path. Note that unlike normal * block layer requests we will not retry failed request on another controller. */ -static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, +struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, struct nvme_ns_head **head, int *srcu_idx) { #ifdef CONFIG_NVME_MULTIPATH @@ -1552,7 +1586,7 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, return disk->private_data; } -static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) +void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) { if (head) srcu_read_unlock(&head->srcu, idx); @@ -1945,23 +1979,34 @@ static void nvme_update_disk_info(struct gendisk *disk, static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) { + unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; struct nvme_ns *ns = disk->private_data; struct nvme_ctrl *ctrl = ns->ctrl; + int ret; u32 iob; /* * If identify namespace failed, use default 512 byte block size so * block layer can use before failing read/write for 0 capacity. */ - ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; + ns->lba_shift = id->lbaf[lbaf].ds; if (ns->lba_shift == 0) ns->lba_shift = 9; switch (ns->head->ids.csi) { case NVME_CSI_NVM: break; + case NVME_CSI_ZNS: + ret = nvme_update_zone_info(disk, ns, lbaf); + if (ret) { + dev_warn(ctrl->device, + "failed to add zoned namespace:%u ret:%d\n", + ns->head->ns_id, ret); + return ret; + } + break; default: - dev_warn(ctrl->device, "unknown csi:%d ns:%d\n", + dev_warn(ctrl->device, "unknown csi:%u ns:%u\n", ns->head->ids.csi, ns->head->ns_id); return -ENODEV; } @@ -1973,7 +2018,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); ns->features = 0; - ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); + ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); /* the PI implementation requires metadata equal t10 pi tuple size */ if (ns->ms == sizeof(struct t10_pi_tuple)) ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; @@ -2015,7 +2060,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) return 0; } -static int nvme_revalidate_disk(struct gendisk *disk) +static int _nvme_revalidate_disk(struct gendisk *disk) { struct nvme_ns *ns = disk->private_data; struct nvme_ctrl *ctrl = ns->ctrl; @@ -2063,6 +2108,28 @@ out: return ret; } +static int nvme_revalidate_disk(struct gendisk *disk) +{ + int ret; + + ret = _nvme_revalidate_disk(disk); + if (ret) + return ret; + +#ifdef CONFIG_BLK_DEV_ZONED + if (blk_queue_is_zoned(disk->queue)) { + struct nvme_ns *ns = disk->private_data; + struct nvme_ctrl *ctrl = ns->ctrl; + + ret = blk_revalidate_disk_zones(disk, NULL); + if (!ret) + blk_queue_max_zone_append_sectors(disk->queue, + ctrl->max_zone_append); + } +#endif + return ret; +} + static char nvme_pr_type(enum pr_type type) { switch (type) { @@ -2193,6 +2260,7 @@ static const struct block_device_operations nvme_fops = { .release = nvme_release, .getgeo = nvme_getgeo, .revalidate_disk= nvme_revalidate_disk, + .report_zones = nvme_report_zones, .pr_ops = &nvme_pr_ops, }; @@ -2219,6 +2287,7 @@ const struct block_device_operations nvme_ns_head_ops = { .ioctl = nvme_ioctl, .compat_ioctl = nvme_compat_ioctl, .getgeo = nvme_getgeo, + .report_zones = nvme_report_zones, .pr_ops = &nvme_pr_ops, }; #endif /* CONFIG_NVME_MULTIPATH */ @@ -4446,6 +4515,8 @@ static inline void _nvme_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index fe9424c7097f..13ca90bcd352 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -238,6 +238,9 @@ struct nvme_ctrl { u32 max_hw_sectors; u32 max_segments; u32 max_integrity_segments; +#ifdef CONFIG_BLK_DEV_ZONED + u32 max_zone_append; +#endif u16 crdt[3]; u16 oncs; u16 oacs; @@ -404,6 +407,9 @@ struct nvme_ns { u16 sgs; u32 sws; u8 pi_type; +#ifdef CONFIG_BLK_DEV_ZONED + u64 zsze; +#endif unsigned long features; unsigned long flags; #define NVME_NS_REMOVING 0 @@ -571,6 +577,9 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl); int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, void *log, size_t size, u64 offset); +struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, + struct nvme_ns_head **head, int *srcu_idx); +void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx); extern const struct attribute_group *nvme_ns_id_attr_groups[]; extern const struct block_device_operations nvme_ns_head_ops; @@ -693,6 +702,36 @@ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) } #endif /* CONFIG_NVME_MULTIPATH */ +#ifdef CONFIG_BLK_DEV_ZONED +int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns, + unsigned lbaf); + +int nvme_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data); + +blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, + struct nvme_command *cmnd, + enum nvme_zone_mgmt_action action); +#else +#define nvme_report_zones NULL + +static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, + struct request *req, struct nvme_command *cmnd, + enum nvme_zone_mgmt_action action) +{ + return BLK_STS_NOTSUPP; +} + +static inline int nvme_update_zone_info(struct gendisk *disk, + struct nvme_ns *ns, + unsigned lbaf) +{ + dev_warn(ns->ctrl->device, + "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n"); + return -EPROTONOSUPPORT; +} +#endif + #ifdef CONFIG_NVM int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); void nvme_nvm_unregister(struct nvme_ns *ns); diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c new file mode 100644 index 000000000000..04e5b991c00c --- /dev/null +++ b/drivers/nvme/host/zns.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ + +#include +#include +#include "nvme.h" + +static int nvme_set_max_append(struct nvme_ctrl *ctrl) +{ + struct nvme_command c = { }; + struct nvme_id_ctrl_zns *id; + int status; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) + return -ENOMEM; + + c.identify.opcode = nvme_admin_identify; + c.identify.cns = NVME_ID_CNS_CS_CTRL; + c.identify.csi = NVME_CSI_ZNS; + + status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); + if (status) { + kfree(id); + return status; + } + + if (id->zasl) + ctrl->max_zone_append = 1 << (id->zasl + 3); + else + ctrl->max_zone_append = ctrl->max_hw_sectors; + kfree(id); + return 0; +} + +int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns, + unsigned lbaf) +{ + struct nvme_effects_log *log = ns->head->effects; + struct request_queue *q = disk->queue; + struct nvme_command c = { }; + struct nvme_id_ns_zns *id; + int status; + + /* Driver requires zone append support */ + if (!(le32_to_cpu(log->iocs[nvme_cmd_zone_append]) & + NVME_CMD_EFFECTS_CSUPP)) { + dev_warn(ns->ctrl->device, + "append not supported for zoned namespace:%d\n", + ns->head->ns_id); + return -EINVAL; + } + + /* Lazily query controller append limit for the first zoned namespace */ + if (!ns->ctrl->max_zone_append) { + status = nvme_set_max_append(ns->ctrl); + if (status) + return status; + } + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) + return -ENOMEM; + + c.identify.opcode = nvme_admin_identify; + c.identify.nsid = cpu_to_le32(ns->head->ns_id); + c.identify.cns = NVME_ID_CNS_CS_NS; + c.identify.csi = NVME_CSI_ZNS; + + status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id)); + if (status) + goto free_data; + + /* + * We currently do not handle devices requiring any of the zoned + * operation characteristics. + */ + if (id->zoc) { + dev_warn(ns->ctrl->device, + "zone operations:%x not supported for namespace:%u\n", + le16_to_cpu(id->zoc), ns->head->ns_id); + status = -EINVAL; + goto free_data; + } + + ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze)); + if (!is_power_of_2(ns->zsze)) { + dev_warn(ns->ctrl->device, + "invalid zone size:%llu for namespace:%u\n", + ns->zsze, ns->head->ns_id); + status = -EINVAL; + goto free_data; + } + + q->limits.zoned = BLK_ZONED_HM; + blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); +free_data: + kfree(id); + return status; +} + +static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, + unsigned int nr_zones, size_t *buflen) +{ + struct request_queue *q = ns->disk->queue; + size_t bufsize; + void *buf; + + const size_t min_bufsize = sizeof(struct nvme_zone_report) + + sizeof(struct nvme_zone_descriptor); + + nr_zones = min_t(unsigned int, nr_zones, + get_capacity(ns->disk) >> ilog2(ns->zsze)); + + bufsize = sizeof(struct nvme_zone_report) + + nr_zones * sizeof(struct nvme_zone_descriptor); + bufsize = min_t(size_t, bufsize, + queue_max_hw_sectors(q) << SECTOR_SHIFT); + bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); + + while (bufsize >= min_bufsize) { + buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); + if (buf) { + *buflen = bufsize; + return buf; + } + bufsize >>= 1; + } + return NULL; +} + +static int __nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + struct nvme_zone_report *report, + size_t buflen) +{ + struct nvme_command c = { }; + int ret; + + c.zmr.opcode = nvme_cmd_zone_mgmt_recv; + c.zmr.nsid = cpu_to_le32(ns->head->ns_id); + c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector)); + c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen)); + c.zmr.zra = NVME_ZRA_ZONE_REPORT; + c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL; + c.zmr.pr = NVME_REPORT_ZONE_PARTIAL; + + ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); + if (ret) + return ret; + + return le64_to_cpu(report->nr_zones); +} + +static int nvme_zone_parse_entry(struct nvme_ns *ns, + struct nvme_zone_descriptor *entry, + unsigned int idx, report_zones_cb cb, + void *data) +{ + struct blk_zone zone = { }; + + if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) { + dev_err(ns->ctrl->device, "invalid zone type %#x\n", + entry->zt); + return -EINVAL; + } + + zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; + zone.cond = entry->zs >> 4; + zone.len = ns->zsze; + zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); + zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); + zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); + + return cb(&zone, idx, data); +} + +static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct nvme_zone_report *report; + int ret, zone_idx = 0; + unsigned int nz, i; + size_t buflen; + + report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); + if (!report) + return -ENOMEM; + + sector &= ~(ns->zsze - 1); + while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { + memset(report, 0, buflen); + ret = __nvme_ns_report_zones(ns, sector, report, buflen); + if (ret < 0) + goto out_free; + + nz = min_t(unsigned int, ret, nr_zones); + if (!nz) + break; + + for (i = 0; i < nz && zone_idx < nr_zones; i++) { + ret = nvme_zone_parse_entry(ns, &report->entries[i], + zone_idx, cb, data); + if (ret) + goto out_free; + zone_idx++; + } + + sector += ns->zsze * nz; + } + + if (zone_idx > 0) + ret = zone_idx; + else + ret = -EINVAL; +out_free: + kvfree(report); + return ret; +} + +int nvme_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct nvme_ns_head *head = NULL; + struct nvme_ns *ns; + int srcu_idx, ret; + + ns = nvme_get_ns_from_disk(disk, &head, &srcu_idx); + if (unlikely(!ns)) + return -EWOULDBLOCK; + + if (ns->head->ids.csi == NVME_CSI_ZNS) + ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); + else + ret = -EINVAL; + nvme_put_ns_from_disk(head, srcu_idx); + + return ret; +} + +blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, + struct nvme_command *c, enum nvme_zone_mgmt_action action) +{ + c->zms.opcode = nvme_cmd_zone_mgmt_send; + c->zms.nsid = cpu_to_le32(ns->head->ns_id); + c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + c->zms.zsa = action; + + if (req_op(req) == REQ_OP_ZONE_RESET_ALL) + c->zms.select_all = 1; + + return BLK_STS_OK; +} diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 95cd03e240a1..1643005d21e3 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -374,6 +374,30 @@ struct nvme_id_ns { __u8 vs[3712]; }; +struct nvme_zns_lbafe { + __le64 zsze; + __u8 zdes; + __u8 rsvd9[7]; +}; + +struct nvme_id_ns_zns { + __le16 zoc; + __le16 ozcs; + __le32 mar; + __le32 mor; + __le32 rrl; + __le32 frl; + __u8 rsvd20[2796]; + struct nvme_zns_lbafe lbafe[16]; + __u8 rsvd3072[768]; + __u8 vs[256]; +}; + +struct nvme_id_ctrl_zns { + __u8 zasl; + __u8 rsvd1[4095]; +}; + enum { NVME_ID_CNS_NS = 0x00, NVME_ID_CNS_CTRL = 0x01, @@ -392,6 +416,7 @@ enum { enum { NVME_CSI_NVM = 0, + NVME_CSI_ZNS = 2, }; enum { @@ -532,6 +557,27 @@ struct nvme_ana_rsp_hdr { __le16 rsvd10[3]; }; +struct nvme_zone_descriptor { + __u8 zt; + __u8 zs; + __u8 za; + __u8 rsvd3[5]; + __le64 zcap; + __le64 zslba; + __le64 wp; + __u8 rsvd32[32]; +}; + +enum { + NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2, +}; + +struct nvme_zone_report { + __le64 nr_zones; + __u8 resv8[56]; + struct nvme_zone_descriptor entries[]; +}; + enum { NVME_SMART_CRIT_SPARE = 1 << 0, NVME_SMART_CRIT_TEMPERATURE = 1 << 1, @@ -626,6 +672,9 @@ enum nvme_opcode { nvme_cmd_resv_report = 0x0e, nvme_cmd_resv_acquire = 0x11, nvme_cmd_resv_release = 0x15, + nvme_cmd_zone_mgmt_send = 0x79, + nvme_cmd_zone_mgmt_recv = 0x7a, + nvme_cmd_zone_append = 0x7d, }; #define nvme_opcode_name(opcode) { opcode, #opcode } @@ -764,6 +813,7 @@ struct nvme_rw_command { enum { NVME_RW_LR = 1 << 15, NVME_RW_FUA = 1 << 14, + NVME_RW_APPEND_PIREMAP = 1 << 9, NVME_RW_DSM_FREQ_UNSPEC = 0, NVME_RW_DSM_FREQ_TYPICAL = 1, NVME_RW_DSM_FREQ_RARE = 2, @@ -829,6 +879,53 @@ struct nvme_write_zeroes_cmd { __le16 appmask; }; +enum nvme_zone_mgmt_action { + NVME_ZONE_CLOSE = 0x1, + NVME_ZONE_FINISH = 0x2, + NVME_ZONE_OPEN = 0x3, + NVME_ZONE_RESET = 0x4, + NVME_ZONE_OFFLINE = 0x5, + NVME_ZONE_SET_DESC_EXT = 0x10, +}; + +struct nvme_zone_mgmt_send_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le32 cdw12; + __u8 zsa; + __u8 select_all; + __u8 rsvd13[2]; + __le32 cdw14[2]; +}; + +struct nvme_zone_mgmt_recv_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd2[2]; + union nvme_data_ptr dptr; + __le64 slba; + __le32 numd; + __u8 zra; + __u8 zrasf; + __u8 pr; + __u8 rsvd13; + __le32 cdw14[2]; +}; + +enum { + NVME_ZRA_ZONE_REPORT = 0, + NVME_ZRASF_ZONE_REPORT_ALL = 0, + NVME_REPORT_ZONE_PARTIAL = 1, +}; + /* Features */ enum { @@ -1300,6 +1397,8 @@ struct nvme_command { struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; struct nvme_write_zeroes_cmd write_zeroes; + struct nvme_zone_mgmt_send_cmd zms; + struct nvme_zone_mgmt_recv_cmd zmr; struct nvme_abort_cmd abort; struct nvme_get_log_page_command get_log_page; struct nvmf_common_command fabrics; @@ -1433,6 +1532,18 @@ enum { NVME_SC_DISCOVERY_RESTART = 0x190, NVME_SC_AUTH_REQUIRED = 0x191, + /* + * I/O Command Set Specific - Zoned commands: + */ + NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8, + NVME_SC_ZONE_FULL = 0x1b9, + NVME_SC_ZONE_READ_ONLY = 0x1ba, + NVME_SC_ZONE_OFFLINE = 0x1bb, + NVME_SC_ZONE_INVALID_WRITE = 0x1bc, + NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd, + NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be, + NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf, + /* * Media and Data Integrity Errors: */ -- cgit v1.2.3 From 457e7a135cbf0a0b5ed2717c192c0c57112c3b32 Mon Sep 17 00:00:00 2001 From: Satya Tangirala Date: Thu, 2 Jul 2020 01:56:04 +0000 Subject: fs: introduce SB_INLINECRYPT Introduce SB_INLINECRYPT, which is set by filesystems that wish to use blk-crypto for file content en/decryption. This flag maps to the '-o inlinecrypt' mount option which multiple filesystems will implement, and code in fs/crypto/ needs to be able to check for this mount option in a filesystem-independent way. Signed-off-by: Satya Tangirala Reviewed-by: Jaegeuk Kim Reviewed-by: Eric Biggers Reviewed-by: Theodore Ts'o Link: https://lore.kernel.org/r/20200702015607.1215430-2-satyat@google.com Signed-off-by: Eric Biggers --- include/linux/fs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 3f881a892ea7..b5e07fcdd11d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1380,6 +1380,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_NODIRATIME 2048 /* Do not update directory access times */ #define SB_SILENT 32768 #define SB_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */ #define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define SB_I_VERSION (1<<23) /* Update inode I_version field */ #define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ -- cgit v1.2.3 From 5fee36095cda45d34555aed3a2e8973b80cd6bf8 Mon Sep 17 00:00:00 2001 From: Satya Tangirala Date: Thu, 2 Jul 2020 01:56:05 +0000 Subject: fscrypt: add inline encryption support Add support for inline encryption to fs/crypto/. With "inline encryption", the block layer handles the decryption/encryption as part of the bio, instead of the filesystem doing the crypto itself via Linux's crypto API. This model is needed in order to take advantage of the inline encryption hardware present on most modern mobile SoCs. To use inline encryption, the filesystem needs to be mounted with '-o inlinecrypt'. Blk-crypto will then be used instead of the traditional filesystem-layer crypto whenever possible to encrypt the contents of any encrypted files in that filesystem. Fscrypt still provides the key and IV to use, and the actual ciphertext on-disk is still the same; therefore it's testable using the existing fscrypt ciphertext verification tests. Note that since blk-crypto has a fallback to Linux's crypto API, and also supports all the encryption modes currently supported by fscrypt, this feature is usable and testable even without actual inline encryption hardware. Per-filesystem changes will be needed to set encryption contexts when submitting bios and to implement the 'inlinecrypt' mount option. This patch just adds the common code. Signed-off-by: Satya Tangirala Reviewed-by: Jaegeuk Kim Reviewed-by: Eric Biggers Reviewed-by: Theodore Ts'o Link: https://lore.kernel.org/r/20200702015607.1215430-3-satyat@google.com Co-developed-by: Eric Biggers Signed-off-by: Eric Biggers --- Documentation/filesystems/fscrypt.rst | 3 + fs/crypto/Kconfig | 6 + fs/crypto/Makefile | 1 + fs/crypto/bio.c | 51 +++++ fs/crypto/crypto.c | 2 +- fs/crypto/fname.c | 4 +- fs/crypto/fscrypt_private.h | 115 ++++++++++- fs/crypto/inline_crypt.c | 364 ++++++++++++++++++++++++++++++++++ fs/crypto/keyring.c | 6 +- fs/crypto/keysetup.c | 70 +++++-- fs/crypto/keysetup_v1.c | 16 +- include/linux/fscrypt.h | 82 ++++++++ 12 files changed, 673 insertions(+), 47 deletions(-) create mode 100644 fs/crypto/inline_crypt.c (limited to 'include') diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index f517af8ec11c..f5d8b0303ddf 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -1255,6 +1255,7 @@ f2fs encryption using `kvm-xfstests `_:: kvm-xfstests -c ext4,f2fs -g encrypt + kvm-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt UBIFS encryption can also be tested this way, but it should be done in a separate command, and it takes some time for kvm-xfstests to set up @@ -1276,6 +1277,7 @@ This tests the encrypted I/O paths more thoroughly. To do this with kvm-xfstests, use the "encrypt" filesystem configuration:: kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto + kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto -m inlinecrypt Because this runs many more tests than "-g encrypt" does, it takes much longer to run; so also consider using `gce-xfstests @@ -1283,3 +1285,4 @@ much longer to run; so also consider using `gce-xfstests instead of kvm-xfstests:: gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto + gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto -m inlinecrypt diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index 8046d7c7a3e9..f1f11a6228eb 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig @@ -24,3 +24,9 @@ config FS_ENCRYPTION_ALGS select CRYPTO_SHA256 select CRYPTO_SHA512 select CRYPTO_XTS + +config FS_ENCRYPTION_INLINE_CRYPT + bool "Enable fscrypt to use inline crypto" + depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION + help + Enable fscrypt to use inline encryption hardware if available. diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index 232e2bb5a337..652c7180ec6d 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile @@ -11,3 +11,4 @@ fscrypto-y := crypto.o \ policy.o fscrypto-$(CONFIG_BLOCK) += bio.o +fscrypto-$(CONFIG_FS_ENCRYPTION_INLINE_CRYPT) += inline_crypt.o diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 4fa18fff9c4e..b048a0e38516 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -41,6 +41,53 @@ void fscrypt_decrypt_bio(struct bio *bio) } EXPORT_SYMBOL(fscrypt_decrypt_bio); +static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, + pgoff_t lblk, sector_t pblk, + unsigned int len) +{ + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits); + struct bio *bio; + int ret, err = 0; + int num_pages = 0; + + /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ + bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); + + while (len) { + unsigned int blocks_this_page = min(len, blocks_per_page); + unsigned int bytes_this_page = blocks_this_page << blockbits; + + if (num_pages == 0) { + fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); + bio_set_dev(bio, inode->i_sb->s_bdev); + bio->bi_iter.bi_sector = + pblk << (blockbits - SECTOR_SHIFT); + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); + } + ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); + if (WARN_ON(ret != bytes_this_page)) { + err = -EIO; + goto out; + } + num_pages++; + len -= blocks_this_page; + lblk += blocks_this_page; + pblk += blocks_this_page; + if (num_pages == BIO_MAX_PAGES || !len || + !fscrypt_mergeable_bio(bio, inode, lblk)) { + err = submit_bio_wait(bio); + if (err) + goto out; + bio_reset(bio); + num_pages = 0; + } + } +out: + bio_put(bio); + return err; +} + /** * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file * @inode: the file's inode @@ -75,6 +122,10 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, if (len == 0) return 0; + if (fscrypt_inode_uses_inline_crypto(inode)) + return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk, + len); + BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES); nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), (len + blocks_per_page - 1) >> blocks_per_page_bits); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index ed015cb66c7c..a52cf32733ab 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -100,7 +100,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; int res = 0; if (WARN_ON_ONCE(len <= 0)) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 83ca5f1e7934..d828e3df898b 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -115,7 +115,7 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); const struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; union fscrypt_iv iv; struct scatterlist sg; int res; @@ -171,7 +171,7 @@ static int fname_decrypt(const struct inode *inode, DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; const struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; union fscrypt_iv iv; int res; diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index eb7fcd2b7fb8..5bb40d0109c8 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -14,6 +14,7 @@ #include #include #include +#include #define CONST_STRLEN(str) (sizeof(str) - 1) @@ -166,6 +167,20 @@ struct fscrypt_symlink_data { char encrypted_path[1]; } __packed; +/** + * struct fscrypt_prepared_key - a key prepared for actual encryption/decryption + * @tfm: crypto API transform object + * @blk_key: key for blk-crypto + * + * Normally only one of the fields will be non-NULL. + */ +struct fscrypt_prepared_key { + struct crypto_skcipher *tfm; +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + struct fscrypt_blk_crypto_key *blk_key; +#endif +}; + /* * fscrypt_info - the "encryption key" for an inode * @@ -175,12 +190,20 @@ struct fscrypt_symlink_data { */ struct fscrypt_info { - /* The actual crypto transform used for encryption and decryption */ - struct crypto_skcipher *ci_ctfm; + /* The key in a form prepared for actual encryption/decryption */ + struct fscrypt_prepared_key ci_enc_key; - /* True if the key should be freed when this fscrypt_info is freed */ + /* True if ci_enc_key should be freed when this fscrypt_info is freed */ bool ci_owns_key; +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + /* + * True if this inode will use inline encryption (blk-crypto) instead of + * the traditional filesystem-layer encryption. + */ + bool ci_inlinecrypt; +#endif + /* * Encryption mode used for this inode. It corresponds to either the * contents or filenames encryption mode, depending on the inode type. @@ -205,7 +228,7 @@ struct fscrypt_info { /* * If non-NULL, then encryption is done using the master key directly - * and ci_ctfm will equal ci_direct_key->dk_ctfm. + * and ci_enc_key will equal ci_direct_key->dk_key. */ struct fscrypt_direct_key *ci_direct_key; @@ -260,6 +283,7 @@ union fscrypt_iv { u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; }; u8 raw[FSCRYPT_MAX_IV_SIZE]; + __le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)]; }; void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, @@ -302,6 +326,75 @@ int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); +/* inline_crypt.c */ +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT +int fscrypt_select_encryption_impl(struct fscrypt_info *ci); + +static inline bool +fscrypt_using_inline_encryption(const struct fscrypt_info *ci) +{ + return ci->ci_inlinecrypt; +} + +int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci); + +void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key); + +/* + * Check whether the crypto transform or blk-crypto key has been allocated in + * @prep_key, depending on which encryption implementation the file will use. + */ +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + /* + * The READ_ONCE() here pairs with the smp_store_release() in + * fscrypt_prepare_key(). (This only matters for the per-mode keys, + * which are shared by multiple inodes.) + */ + if (fscrypt_using_inline_encryption(ci)) + return READ_ONCE(prep_key->blk_key) != NULL; + return READ_ONCE(prep_key->tfm) != NULL; +} + +#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ + return 0; +} + +static inline bool +fscrypt_using_inline_encryption(const struct fscrypt_info *ci) +{ + return false; +} + +static inline int +fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci) +{ + WARN_ON(1); + return -EOPNOTSUPP; +} + +static inline void +fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) +{ +} + +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + return READ_ONCE(prep_key->tfm) != NULL; +} +#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + /* keyring.c */ /* @@ -395,9 +488,9 @@ struct fscrypt_master_key { * Per-mode encryption keys for the various types of encryption policies * that use them. Allocated and derived on-demand. */ - struct crypto_skcipher *mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; - struct crypto_skcipher *mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; - struct crypto_skcipher *mk_iv_ino_lblk_32_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_iv_ino_lblk_32_keys[__FSCRYPT_MODE_MAX + 1]; /* Hash key for inode numbers. Initialized only when needed. */ siphash_key_t mk_ino_hash_key; @@ -461,13 +554,15 @@ struct fscrypt_mode { int keysize; int ivsize; int logged_impl_name; + enum blk_crypto_mode_num blk_crypto_mode; }; extern struct fscrypt_mode fscrypt_modes[]; -struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, - const u8 *raw_key, - const struct inode *inode); +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, const struct fscrypt_info *ci); + +void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key); int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key); diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c new file mode 100644 index 000000000000..d7aecadf33c1 --- /dev/null +++ b/fs/crypto/inline_crypt.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Inline encryption support for fscrypt + * + * Copyright 2019 Google LLC + */ + +/* + * With "inline encryption", the block layer handles the decryption/encryption + * as part of the bio, instead of the filesystem doing the crypto itself via + * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still + * provides the key and IV to use. + */ + +#include +#include +#include +#include + +#include "fscrypt_private.h" + +struct fscrypt_blk_crypto_key { + struct blk_crypto_key base; + int num_devs; + struct request_queue *devs[]; +}; + +static int fscrypt_get_num_devices(struct super_block *sb) +{ + if (sb->s_cop->get_num_devices) + return sb->s_cop->get_num_devices(sb); + return 1; +} + +static void fscrypt_get_devices(struct super_block *sb, int num_devs, + struct request_queue **devs) +{ + if (num_devs == 1) + devs[0] = bdev_get_queue(sb->s_bdev); + else + sb->s_cop->get_devices(sb, devs); +} + +static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) +{ + struct super_block *sb = ci->ci_inode->i_sb; + unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); + int ino_bits = 64, lblk_bits = 64; + + if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + return offsetofend(union fscrypt_iv, nonce); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) + return sizeof(__le64); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) + return sizeof(__le32); + + /* Default case: IVs are just the file logical block number */ + if (sb->s_cop->get_ino_and_lblk_bits) + sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); + return DIV_ROUND_UP(lblk_bits, 8); +} + +/* Enable inline encryption for this file if supported. */ +int fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + struct blk_crypto_config crypto_cfg; + int num_devs; + struct request_queue **devs; + int i; + + /* The file must need contents encryption, not filenames encryption */ + if (!fscrypt_needs_contents_encryption(inode)) + return 0; + + /* The crypto mode must have a blk-crypto counterpart */ + if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) + return 0; + + /* The filesystem must be mounted with -o inlinecrypt */ + if (!(sb->s_flags & SB_INLINECRYPT)) + return 0; + + /* + * When a page contains multiple logically contiguous filesystem blocks, + * some filesystem code only calls fscrypt_mergeable_bio() for the first + * block in the page. This is fine for most of fscrypt's IV generation + * strategies, where contiguous blocks imply contiguous IVs. But it + * doesn't work with IV_INO_LBLK_32. For now, simply exclude + * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption. + */ + if ((fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && + sb->s_blocksize != PAGE_SIZE) + return 0; + + /* + * On all the filesystem's devices, blk-crypto must support the crypto + * configuration that the file would use. + */ + crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode; + crypto_cfg.data_unit_size = sb->s_blocksize; + crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci); + num_devs = fscrypt_get_num_devices(sb); + devs = kmalloc_array(num_devs, sizeof(*devs), GFP_NOFS); + if (!devs) + return -ENOMEM; + fscrypt_get_devices(sb, num_devs, devs); + + for (i = 0; i < num_devs; i++) { + if (!blk_crypto_config_supported(devs[i], &crypto_cfg)) + goto out_free_devs; + } + + ci->ci_inlinecrypt = true; +out_free_devs: + kfree(devs); + + return 0; +} + +int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + int num_devs = fscrypt_get_num_devices(sb); + int queue_refs = 0; + struct fscrypt_blk_crypto_key *blk_key; + int err; + int i; + unsigned int flags; + + blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS); + if (!blk_key) + return -ENOMEM; + + blk_key->num_devs = num_devs; + fscrypt_get_devices(sb, num_devs, blk_key->devs); + + err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode, + fscrypt_get_dun_bytes(ci), sb->s_blocksize); + if (err) { + fscrypt_err(inode, "error %d initializing blk-crypto key", err); + goto fail; + } + + /* + * We have to start using blk-crypto on all the filesystem's devices. + * We also have to save all the request_queue's for later so that the + * key can be evicted from them. This is needed because some keys + * aren't destroyed until after the filesystem was already unmounted + * (namely, the per-mode keys in struct fscrypt_master_key). + */ + for (i = 0; i < num_devs; i++) { + if (!blk_get_queue(blk_key->devs[i])) { + fscrypt_err(inode, "couldn't get request_queue"); + err = -EAGAIN; + goto fail; + } + queue_refs++; + + flags = memalloc_nofs_save(); + err = blk_crypto_start_using_key(&blk_key->base, + blk_key->devs[i]); + memalloc_nofs_restore(flags); + if (err) { + fscrypt_err(inode, + "error %d starting to use blk-crypto", err); + goto fail; + } + } + /* + * Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters + * for the per-mode keys, which are shared by multiple inodes.) + */ + smp_store_release(&prep_key->blk_key, blk_key); + return 0; + +fail: + for (i = 0; i < queue_refs; i++) + blk_put_queue(blk_key->devs[i]); + kzfree(blk_key); + return err; +} + +void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) +{ + struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key; + int i; + + if (blk_key) { + for (i = 0; i < blk_key->num_devs; i++) { + blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); + blk_put_queue(blk_key->devs[i]); + } + kzfree(blk_key); + } +} + +bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return inode->i_crypt_info->ci_inlinecrypt; +} +EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto); + +static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) +{ + union fscrypt_iv iv; + int i; + + fscrypt_generate_iv(&iv, lblk_num, ci); + + BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); + memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); + for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) + dun[i] = le64_to_cpu(iv.dun[i]); +} + +/** + * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto + * @bio: a bio which will eventually be submitted to the file + * @inode: the file's inode + * @first_lblk: the first file logical block number in the I/O + * @gfp_mask: memory allocation flags - these must be a waiting mask so that + * bio_crypt_set_ctx can't fail. + * + * If the contents of the file should be encrypted (or decrypted) with inline + * encryption, then assign the appropriate encryption context to the bio. + * + * Normally the bio should be newly allocated (i.e. no pages added yet), as + * otherwise fscrypt_mergeable_bio() won't work as intended. + * + * The encryption context will be freed automatically when the bio is freed. + */ +void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask) +{ + const struct fscrypt_info *ci = inode->i_crypt_info; + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (!fscrypt_inode_uses_inline_crypto(inode)) + return; + + fscrypt_generate_dun(ci, first_lblk, dun); + bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); + +/* Extract the inode and logical block number from a buffer_head. */ +static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, + const struct inode **inode_ret, + u64 *lblk_num_ret) +{ + struct page *page = bh->b_page; + const struct address_space *mapping; + const struct inode *inode; + + /* + * The ext4 journal (jbd2) can submit a buffer_head it directly created + * for a non-pagecache page. fscrypt doesn't care about these. + */ + mapping = page_mapping(page); + if (!mapping) + return false; + inode = mapping->host; + + *inode_ret = inode; + *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + + (bh_offset(bh) >> inode->i_blkbits); + return true; +} + +/** + * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline + * crypto + * @bio: a bio which will eventually be submitted to the file + * @first_bh: the first buffer_head for which I/O will be submitted + * @gfp_mask: memory allocation flags + * + * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead + * of an inode and block number directly. + */ +void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask) +{ + const struct inode *inode; + u64 first_lblk; + + if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) + fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); + +/** + * fscrypt_mergeable_bio() - test whether data can be added to a bio + * @bio: the bio being built up + * @inode: the inode for the next part of the I/O + * @next_lblk: the next file logical block number in the I/O + * + * When building a bio which may contain data which should undergo inline + * encryption (or decryption) via fscrypt, filesystems should call this function + * to ensure that the resulting bio contains only contiguous data unit numbers. + * This will return false if the next part of the I/O cannot be merged with the + * bio because either the encryption key would be different or the encryption + * data unit numbers would be discontiguous. + * + * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, + u64 next_lblk) +{ + const struct bio_crypt_ctx *bc = bio->bi_crypt_context; + u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) + return false; + if (!bc) + return true; + + /* + * Comparing the key pointers is good enough, as all I/O for each key + * uses the same pointer. I.e., there's currently no need to support + * merging requests where the keys are the same but the pointers differ. + */ + if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base) + return false; + + fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); + return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); + +/** + * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio + * @bio: the bio being built up + * @next_bh: the next buffer_head for which I/O will be submitted + * + * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of + * an inode and block number directly. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh) +{ + const struct inode *inode; + u64 next_lblk; + + if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) + return !bio->bi_crypt_context; + + return fscrypt_mergeable_bio(bio, inode, next_lblk); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index e24eb48bfbe1..7f8ac61a20d6 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -45,9 +45,9 @@ static void free_master_key(struct fscrypt_master_key *mk) wipe_master_key_secret(&mk->mk_secret); for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { - crypto_free_skcipher(mk->mk_direct_keys[i]); - crypto_free_skcipher(mk->mk_iv_ino_lblk_64_keys[i]); - crypto_free_skcipher(mk->mk_iv_ino_lblk_32_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]); } key_put(mk->mk_users); diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 1129adfa097d..99d3e0d07fc6 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -19,6 +19,7 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "xts(aes)", .keysize = 64, .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS, }, [FSCRYPT_MODE_AES_256_CTS] = { .friendly_name = "AES-256-CTS-CBC", @@ -31,6 +32,7 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "essiv(cbc(aes),sha256)", .keysize = 16, .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, }, [FSCRYPT_MODE_AES_128_CTS] = { .friendly_name = "AES-128-CTS-CBC", @@ -43,6 +45,7 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "adiantum(xchacha12,aes)", .keysize = 32, .ivsize = 32, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM, }, }; @@ -64,9 +67,9 @@ select_encryption_mode(const union fscrypt_policy *policy, } /* Create a symmetric cipher object for the given encryption mode and key */ -struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, - const u8 *raw_key, - const struct inode *inode) +static struct crypto_skcipher * +fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, + const struct inode *inode) { struct crypto_skcipher *tfm; int err; @@ -109,30 +112,54 @@ err_free_tfm: return ERR_PTR(err); } -/* Given a per-file encryption key, set up the file's crypto transform object */ -int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key) +/* + * Prepare the crypto transform object or blk-crypto key in @prep_key, given the + * raw key, encryption mode, and flag indicating which encryption implementation + * (fs-layer or blk-crypto) will be used. + */ +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, const struct fscrypt_info *ci) { struct crypto_skcipher *tfm; + if (fscrypt_using_inline_encryption(ci)) + return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci); + tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode); if (IS_ERR(tfm)) return PTR_ERR(tfm); + /* + * Pairs with READ_ONCE() in fscrypt_is_key_prepared(). (Only matters + * for the per-mode keys, which are shared by multiple inodes.) + */ + smp_store_release(&prep_key->tfm, tfm); + return 0; +} + +/* Destroy a crypto transform object and/or blk-crypto key. */ +void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key) +{ + crypto_free_skcipher(prep_key->tfm); + fscrypt_destroy_inline_crypt_key(prep_key); +} - ci->ci_ctfm = tfm; +/* Given a per-file encryption key, set up the file's crypto transform object */ +int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key) +{ ci->ci_owns_key = true; - return 0; + return fscrypt_prepare_key(&ci->ci_enc_key, raw_key, ci); } static int setup_per_mode_enc_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk, - struct crypto_skcipher **tfms, + struct fscrypt_prepared_key *keys, u8 hkdf_context, bool include_fs_uuid) { const struct inode *inode = ci->ci_inode; const struct super_block *sb = inode->i_sb; struct fscrypt_mode *mode = ci->ci_mode; const u8 mode_num = mode - fscrypt_modes; - struct crypto_skcipher *tfm; + struct fscrypt_prepared_key *prep_key; u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)]; unsigned int hkdf_infolen = 0; @@ -141,16 +168,15 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX)) return -EINVAL; - /* pairs with smp_store_release() below */ - tfm = READ_ONCE(tfms[mode_num]); - if (likely(tfm != NULL)) { - ci->ci_ctfm = tfm; + prep_key = &keys[mode_num]; + if (fscrypt_is_key_prepared(prep_key, ci)) { + ci->ci_enc_key = *prep_key; return 0; } mutex_lock(&fscrypt_mode_key_setup_mutex); - if (tfms[mode_num]) + if (fscrypt_is_key_prepared(prep_key, ci)) goto done_unlock; BUILD_BUG_ON(sizeof(mode_num) != 1); @@ -167,16 +193,12 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, mode_key, mode->keysize); if (err) goto out_unlock; - tfm = fscrypt_allocate_skcipher(mode, mode_key, inode); + err = fscrypt_prepare_key(prep_key, mode_key, ci); memzero_explicit(mode_key, mode->keysize); - if (IS_ERR(tfm)) { - err = PTR_ERR(tfm); + if (err) goto out_unlock; - } - /* pairs with READ_ONCE() above */ - smp_store_release(&tfms[mode_num], tfm); done_unlock: - ci->ci_ctfm = tfm; + ci->ci_enc_key = *prep_key; err = 0; out_unlock: mutex_unlock(&fscrypt_mode_key_setup_mutex); @@ -310,6 +332,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, struct fscrypt_key_specifier mk_spec; int err; + err = fscrypt_select_encryption_impl(ci); + if (err) + return err; + switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; @@ -402,7 +428,7 @@ static void put_crypt_info(struct fscrypt_info *ci) if (ci->ci_direct_key) fscrypt_put_direct_key(ci->ci_direct_key); else if (ci->ci_owns_key) - crypto_free_skcipher(ci->ci_ctfm); + fscrypt_destroy_prepared_key(&ci->ci_enc_key); key = ci->ci_master_key; if (key) { diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 801b48c0cd7f..a52686729a67 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -146,7 +146,7 @@ struct fscrypt_direct_key { struct hlist_node dk_node; refcount_t dk_refcount; const struct fscrypt_mode *dk_mode; - struct crypto_skcipher *dk_ctfm; + struct fscrypt_prepared_key dk_key; u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 dk_raw[FSCRYPT_MAX_KEY_SIZE]; }; @@ -154,7 +154,7 @@ struct fscrypt_direct_key { static void free_direct_key(struct fscrypt_direct_key *dk) { if (dk) { - crypto_free_skcipher(dk->dk_ctfm); + fscrypt_destroy_prepared_key(&dk->dk_key); kzfree(dk); } } @@ -199,6 +199,8 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert, continue; if (ci->ci_mode != dk->dk_mode) continue; + if (!fscrypt_is_key_prepared(&dk->dk_key, ci)) + continue; if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize)) continue; /* using existing tfm with same (descriptor, mode, raw_key) */ @@ -231,13 +233,9 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) return ERR_PTR(-ENOMEM); refcount_set(&dk->dk_refcount, 1); dk->dk_mode = ci->ci_mode; - dk->dk_ctfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, - ci->ci_inode); - if (IS_ERR(dk->dk_ctfm)) { - err = PTR_ERR(dk->dk_ctfm); - dk->dk_ctfm = NULL; + err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci); + if (err) goto err_free_dk; - } memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize); @@ -259,7 +257,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci, if (IS_ERR(dk)) return PTR_ERR(dk); ci->ci_direct_key = dk; - ci->ci_ctfm = dk->dk_ctfm; + ci->ci_enc_key = dk->dk_key; return 0; } diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 2862ca5fea33..bb257411365f 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -69,6 +69,9 @@ struct fscrypt_operations { bool (*has_stable_inodes)(struct super_block *sb); void (*get_ino_and_lblk_bits)(struct super_block *sb, int *ino_bits_ret, int *lblk_bits_ret); + int (*get_num_devices)(struct super_block *sb); + void (*get_devices)(struct super_block *sb, + struct request_queue **devs); }; static inline bool fscrypt_has_encryption_key(const struct inode *inode) @@ -537,6 +540,85 @@ static inline void fscrypt_set_ops(struct super_block *sb, #endif /* !CONFIG_FS_ENCRYPTION */ +/* inline_crypt.c */ +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + +bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode); + +void fscrypt_set_bio_crypt_ctx(struct bio *bio, + const struct inode *inode, u64 first_lblk, + gfp_t gfp_mask); + +void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask); + +bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, + u64 next_lblk); + +bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh); + +#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return false; +} + +static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio, + const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask) { } + +static inline void fscrypt_set_bio_crypt_ctx_bh( + struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask) { } + +static inline bool fscrypt_mergeable_bio(struct bio *bio, + const struct inode *inode, + u64 next_lblk) +{ + return true; +} + +static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh) +{ + return true; +} +#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +/** + * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline + * encryption + * @inode: an inode. If encrypted, its key must be set up. + * + * Return: true if the inode requires file contents encryption and if the + * encryption should be done in the block layer via blk-crypto rather + * than in the filesystem layer. + */ +static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return fscrypt_needs_contents_encryption(inode) && + __fscrypt_inode_uses_inline_crypto(inode); +} + +/** + * fscrypt_inode_uses_fs_layer_crypto() - test whether an inode uses fs-layer + * encryption + * @inode: an inode. If encrypted, its key must be set up. + * + * Return: true if the inode requires file contents encryption and if the + * encryption should be done in the filesystem layer rather than in the + * block layer via blk-crypto. + */ +static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) +{ + return fscrypt_needs_contents_encryption(inode) && + !__fscrypt_inode_uses_inline_crypto(inode); +} + /** * fscrypt_require_key() - require an inode's encryption key * @inode: the inode we need the key for -- cgit v1.2.3 From bd36ed1c935144a0e3b788bba659258f666e7b5b Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 8 Jul 2020 09:46:24 -0700 Subject: net: phy: Define PHY statistics ethtool_phy_ops Extend ethtool_phy_ops to include the 3 function pointers necessary for implementing PHY statistics. In a subsequent change we will uninline those functions. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 3 +++ include/linux/ethtool.h | 7 +++++++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 233334406f0f..7cda95330aea 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3028,6 +3028,9 @@ static struct phy_driver genphy_driver = { }; static const struct ethtool_phy_ops phy_ethtool_phy_ops = { + .get_sset_count = phy_ethtool_get_sset_count, + .get_strings = phy_ethtool_get_strings, + .get_stats = phy_ethtool_get_stats, .start_cable_test = phy_start_cable_test, .start_cable_test_tdr = phy_start_cable_test_tdr, }; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 0c139a93b67a..969a80211df6 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -508,6 +508,9 @@ struct phy_tdr_config; /** * struct ethtool_phy_ops - Optional PHY device options + * @get_sset_count: Get number of strings that @get_strings will write. + * @get_strings: Return a set of strings that describe the requested objects + * @get_stats: Return extended statistics about the PHY device. * @start_cable_test - Start a cable test * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test * @@ -515,6 +518,10 @@ struct phy_tdr_config; * and callers must take this into account. Callers must hold the RTNL lock. */ struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *dev); + int (*get_strings)(struct phy_device *dev, u8 *data); + int (*get_stats)(struct phy_device *dev, + struct ethtool_stats *stats, u64 *data); int (*start_cable_test)(struct phy_device *phydev, struct netlink_ext_ack *extack); int (*start_cable_test_tdr)(struct phy_device *phydev, -- cgit v1.2.3 From 17809516a03a045565ad6a80e6241754615871ac Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 8 Jul 2020 09:46:25 -0700 Subject: net: phy: Uninline PHY ethtool statistics operations Now that we have moved the PHY ethtool statistics to be dynamically registered, we no longer need to inline those for ethtool. This used to be done to avoid cross symbol referencing and allow ethtool to be decoupled from PHYLIB entirely. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/phy.h | 49 ++++--------------------------------------------- net/ethtool/ioctl.c | 23 +++++++++++++++-------- net/ethtool/strset.c | 11 +++++++---- 4 files changed, 74 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 56cfae950472..79b4f35d151e 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -489,6 +489,54 @@ static void phy_abort_cable_test(struct phy_device *phydev) phydev_err(phydev, "Error while aborting cable test"); } +int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) +{ + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_strings(phydev, data); + mutex_unlock(&phydev->lock); + + return 0; +} +EXPORT_SYMBOL(phy_ethtool_get_strings); + +int phy_ethtool_get_sset_count(struct phy_device *phydev) +{ + int ret; + + if (!phydev->drv) + return -EIO; + + if (phydev->drv->get_sset_count && + phydev->drv->get_strings && + phydev->drv->get_stats) { + mutex_lock(&phydev->lock); + ret = phydev->drv->get_sset_count(phydev); + mutex_unlock(&phydev->lock); + + return ret; + } + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(phy_ethtool_get_sset_count); + +int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_stats(phydev, stats, data); + mutex_unlock(&phydev->lock); + + return 0; +} +EXPORT_SYMBOL(phy_ethtool_get_stats); + int phy_start_cable_test(struct phy_device *phydev, struct netlink_ext_ack *extack) { diff --git a/include/linux/phy.h b/include/linux/phy.h index 1592c3d0e12f..0403eb799913 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1474,51 +1474,10 @@ int __init mdio_bus_init(void); void mdio_bus_exit(void); #endif -/* Inline function for use within net/core/ethtool.c (built-in) */ -static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) -{ - if (!phydev->drv) - return -EIO; - - mutex_lock(&phydev->lock); - phydev->drv->get_strings(phydev, data); - mutex_unlock(&phydev->lock); - - return 0; -} - -static inline int phy_ethtool_get_sset_count(struct phy_device *phydev) -{ - int ret; - - if (!phydev->drv) - return -EIO; - - if (phydev->drv->get_sset_count && - phydev->drv->get_strings && - phydev->drv->get_stats) { - mutex_lock(&phydev->lock); - ret = phydev->drv->get_sset_count(phydev); - mutex_unlock(&phydev->lock); - - return ret; - } - - return -EOPNOTSUPP; -} - -static inline int phy_ethtool_get_stats(struct phy_device *phydev, - struct ethtool_stats *stats, u64 *data) -{ - if (!phydev->drv) - return -EIO; - - mutex_lock(&phydev->lock); - phydev->drv->get_stats(phydev, stats, data); - mutex_unlock(&phydev->lock); - - return 0; -} +int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); +int phy_ethtool_get_sset_count(struct phy_device *phydev); +int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data); static inline int phy_package_read(struct phy_device *phydev, u32 regnum) { diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 83f22196d64c..441794e0034f 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -135,6 +135,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr) static int __ethtool_get_sset_count(struct net_device *dev, int sset) { + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; const struct ethtool_ops *ops = dev->ethtool_ops; if (sset == ETH_SS_FEATURES) @@ -150,8 +151,9 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset) return ARRAY_SIZE(phy_tunable_strings); if (sset == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) - return phy_ethtool_get_sset_count(dev->phydev); + !ops->get_ethtool_phy_stats && + phy_ops && phy_ops->get_sset_count) + return phy_ops->get_sset_count(dev->phydev); if (sset == ETH_SS_LINK_MODES) return __ETHTOOL_LINK_MODE_MASK_NBITS; @@ -165,6 +167,7 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset) static void __ethtool_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; const struct ethtool_ops *ops = dev->ethtool_ops; if (stringset == ETH_SS_FEATURES) @@ -178,8 +181,9 @@ static void __ethtool_get_strings(struct net_device *dev, else if (stringset == ETH_SS_PHY_TUNABLES) memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings)); else if (stringset == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) - phy_ethtool_get_strings(dev->phydev, data); + !ops->get_ethtool_phy_stats && phy_ops && + phy_ops->get_strings) + phy_ops->get_strings(dev->phydev, data); else if (stringset == ETH_SS_LINK_MODES) memcpy(data, link_mode_names, __ETHTOOL_LINK_MODE_MASK_NBITS * ETH_GSTRING_LEN); @@ -1929,6 +1933,7 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) { + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; const struct ethtool_ops *ops = dev->ethtool_ops; struct phy_device *phydev = dev->phydev; struct ethtool_stats stats; @@ -1938,8 +1943,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) if (!phydev && (!ops->get_ethtool_phy_stats || !ops->get_sset_count)) return -EOPNOTSUPP; - if (dev->phydev && !ops->get_ethtool_phy_stats) - n_stats = phy_ethtool_get_sset_count(dev->phydev); + if (dev->phydev && !ops->get_ethtool_phy_stats && + phy_ops && phy_ops->get_sset_count) + n_stats = phy_ops->get_sset_count(dev->phydev); else n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS); if (n_stats < 0) @@ -1958,8 +1964,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) if (!data) return -ENOMEM; - if (dev->phydev && !ops->get_ethtool_phy_stats) { - ret = phy_ethtool_get_stats(dev->phydev, &stats, data); + if (dev->phydev && !ops->get_ethtool_phy_stats && + phy_ops && phy_ops->get_stats) { + ret = phy_ops->get_stats(dev->phydev, &stats, data); if (ret < 0) goto out; } else { diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c index 0eed4e4909ab..773634b6b048 100644 --- a/net/ethtool/strset.c +++ b/net/ethtool/strset.c @@ -209,13 +209,15 @@ static void strset_cleanup_data(struct ethnl_reply_data *reply_base) static int strset_prepare_set(struct strset_info *info, struct net_device *dev, unsigned int id, bool counts_only) { + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; const struct ethtool_ops *ops = dev->ethtool_ops; void *strings; int count, ret; if (id == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) - ret = phy_ethtool_get_sset_count(dev->phydev); + !ops->get_ethtool_phy_stats && phy_ops && + phy_ops->get_sset_count) + ret = phy_ops->get_sset_count(dev->phydev); else if (ops->get_sset_count && ops->get_strings) ret = ops->get_sset_count(dev, id); else @@ -231,8 +233,9 @@ static int strset_prepare_set(struct strset_info *info, struct net_device *dev, if (!strings) return -ENOMEM; if (id == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) - phy_ethtool_get_strings(dev->phydev, strings); + !ops->get_ethtool_phy_stats && phy_ops && + phy_ops->get_strings) + phy_ops->get_strings(dev->phydev, strings); else ops->get_strings(dev, id, strings); info->strings = strings; -- cgit v1.2.3 From e8c22266e68f0db2a7e11b0a9f29fd88ec0cfd4a Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 15 Jun 2020 14:13:34 +0200 Subject: KVM: async_pf: change kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type to bool Unlike normal 'int' functions returning '0' on success, kvm_setup_async_pf()/ kvm_arch_setup_async_pf() return '1' when a job to handle page fault asynchronously was scheduled and '0' otherwise. To avoid the confusion change return type to 'bool'. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Message-Id: <20200615121334.91300-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/s390/kvm/kvm-s390.c | 20 +++++++++----------- arch/x86/kvm/mmu/mmu.c | 4 ++-- include/linux/kvm_host.h | 4 ++-- virt/kvm/async_pf.c | 16 ++++++++++------ 4 files changed, 23 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d47c19718615..7fd4fdb165fc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3954,33 +3954,31 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) return true; } -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) +static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) { hva_t hva; struct kvm_arch_async_pf arch; - int rc; if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) - return 0; + return false; if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != vcpu->arch.pfault_compare) - return 0; + return false; if (psw_extint_disabled(vcpu)) - return 0; + return false; if (kvm_s390_vcpu_has_irq(vcpu, 0)) - return 0; + return false; if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) - return 0; + return false; if (!vcpu->arch.gmap->pfault_enabled) - return 0; + return false; hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); hva += current->thread.gmap_addr & ~PAGE_MASK; if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) - return 0; + return false; - rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); - return rc; + return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); } static int vcpu_pre_run(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3ca70554d5f1..a1850120ede0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4045,8 +4045,8 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) walk_shadow_page_lockless_end(vcpu); } -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - gfn_t gfn) +static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + gfn_t gfn) { struct kvm_arch_async_pf arch; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 62ec926c78a0..9edc6fc71a89 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -211,8 +211,8 @@ struct kvm_async_pf { void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - unsigned long hva, struct kvm_arch_async_pf *arch); +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + unsigned long hva, struct kvm_arch_async_pf *arch); int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 45799606bb3e..390f758d5a27 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -156,17 +156,21 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) } } -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - unsigned long hva, struct kvm_arch_async_pf *arch) +/* + * Try to schedule a job to handle page fault asynchronously. Returns 'true' on + * success, 'false' on failure (page fault has to be handled synchronously). + */ +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + unsigned long hva, struct kvm_arch_async_pf *arch) { struct kvm_async_pf *work; if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) - return 0; + return false; /* Arch specific code should not do async PF in this case */ if (unlikely(kvm_is_error_hva(hva))) - return 0; + return false; /* * do alloc nowait since if we are going to sleep anyway we @@ -174,7 +178,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, */ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); if (!work) - return 0; + return false; work->wakeup_all = false; work->vcpu = vcpu; @@ -193,7 +197,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, schedule_work(&work->work); - return 1; + return true; } int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 1aa561b1a4c0ae2a9a9b9c21a84b5ca66b4775d8 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 3 Jun 2020 16:56:21 -0700 Subject: kvm: x86: Add "last CPU" to some KVM_EXIT information More often than not, a failed VM-entry in an x86 production environment is induced by a defective CPU. To help identify the bad hardware, include the id of the last logical CPU to run a vCPU in the information provided to userspace on a KVM exit for failed VM-entry or for KVM internal errors not associated with emulation. The presence of this additional information is indicated by a new capability, KVM_CAP_LAST_CPU. Signed-off-by: Jim Mattson Reviewed-by: Oliver Upton Reviewed-by: Peter Shier Message-Id: <20200603235623.245638-5-jmattson@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 1 + arch/x86/kvm/svm/svm.c | 4 +++- arch/x86/kvm/vmx/vmx.c | 10 ++++++++-- arch/x86/kvm/x86.c | 1 + include/uapi/linux/kvm.h | 2 ++ 5 files changed, 15 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 426f94582b7a..1cfe79b932d6 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -4794,6 +4794,7 @@ hardware_exit_reason. /* KVM_EXIT_FAIL_ENTRY */ struct { __u64 hardware_entry_failure_reason; + __u32 cpu; /* if KVM_LAST_CPU */ } fail_entry; If exit_reason is KVM_EXIT_FAIL_ENTRY, the vcpu could not be run due diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 24b7f321874f..8ecd46f2cb1e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2947,6 +2947,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.hardware_entry_failure_reason = svm->vmcb->control.exit_code; + kvm_run->fail_entry.cpu = svm->last_cpu; dump_vmcb(vcpu); return 0; } @@ -2970,8 +2971,9 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; - vcpu->run->internal.ndata = 1; + vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = exit_code; + vcpu->run->internal.data[1] = svm->last_cpu; return 0; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4d8f12c0a5c6..b52bcebfa094 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4781,10 +4781,11 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; - vcpu->run->internal.ndata = 3; + vcpu->run->internal.ndata = 4; vcpu->run->internal.data[0] = vect_info; vcpu->run->internal.data[1] = intr_info; vcpu->run->internal.data[2] = error_code; + vcpu->run->internal.data[3] = vmx->last_cpu; return 0; } @@ -6006,6 +6007,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason; + vcpu->run->fail_entry.cpu = vmx->last_cpu; return 0; } @@ -6014,6 +6016,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = vmcs_read32(VM_INSTRUCTION_ERROR); + vcpu->run->fail_entry.cpu = vmx->last_cpu; return 0; } @@ -6040,6 +6043,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) vcpu->run->internal.data[3] = vmcs_read64(GUEST_PHYSICAL_ADDRESS); } + vcpu->run->internal.data[vcpu->run->internal.ndata++] = + vmx->last_cpu; return 0; } @@ -6095,8 +6100,9 @@ unexpected_vmexit: vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; - vcpu->run->internal.ndata = 1; + vcpu->run->internal.ndata = 2; vcpu->run->internal.data[0] = exit_reason; + vcpu->run->internal.data[1] = vmx->last_cpu; return 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 82f457f0e7e0..1a0fad1018f9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3510,6 +3510,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MSR_PLATFORM_INFO: case KVM_CAP_EXCEPTION_PAYLOAD: case KVM_CAP_SET_GUEST_DEBUG: + case KVM_CAP_LAST_CPU: r = 1; break; case KVM_CAP_SYNC_REGS: diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4fdf30316582..ff9b335620d0 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -289,6 +289,7 @@ struct kvm_run { /* KVM_EXIT_FAIL_ENTRY */ struct { __u64 hardware_entry_failure_reason; + __u32 cpu; } fail_entry; /* KVM_EXIT_EXCEPTION */ struct { @@ -1031,6 +1032,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_SECURE_GUEST 181 #define KVM_CAP_HALT_POLL 182 #define KVM_CAP_ASYNC_PF_INT 183 +#define KVM_CAP_LAST_CPU 184 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From ecd7274fb4cd2d6c035a52d59ca3d6ec936a07be Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 4 Jun 2020 18:11:15 +0100 Subject: iommu: Remove unused IOMMU_SYS_CACHE_ONLY flag The IOMMU_SYS_CACHE_ONLY flag was never exposed via the DMA API and has no in-tree users. Remove it. Cc: Robin Murphy Cc: "Isaac J. Manjarres" Cc: Joerg Roedel Cc: Rob Clark Reviewed-by: Christoph Hellwig Reviewed-by: Sai Prakash Ranjan Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 3 --- include/linux/iommu.h | 6 ------ 2 files changed, 9 deletions(-) (limited to 'include') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 04fbd4bf0ff9..8f175c02f8e3 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -438,9 +438,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, else if (prot & IOMMU_CACHE) pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); - else if (prot & IOMMU_SYS_CACHE_ONLY) - pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE - << ARM_LPAE_PTE_ATTRINDX_SHIFT); } if (prot & IOMMU_CACHE) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5f0b7859d2eb..bee1a8fa1fb1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -31,12 +31,6 @@ * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) -/* - * Non-coherent masters can use this page protection flag to set cacheable - * memory attributes for only a transparent outer level of cache, also known as - * the last-level or system cache. - */ -#define IOMMU_SYS_CACHE_ONLY (1 << 6) struct iommu_ops; struct iommu_group; -- cgit v1.2.3 From a564e23f0f99759f453dbefcb9160dec6d99df96 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 8 Jul 2020 14:25:41 +0200 Subject: md: switch to ->check_events for media change notifications md is the last driver using the legacy media_changed method. Switch it over to (not so) new ->clear_events approach, which also removes the need for the ->revalidate_disk method. Signed-off-by: Christoph Hellwig [axboe: remove unused 'bdops' variable in disk_clear_events()] Signed-off-by: Jens Axboe --- Documentation/filesystems/locking.rst | 4 +--- block/genhd.c | 8 +------- drivers/md/md.c | 19 ++++++++----------- include/linux/blkdev.h | 2 -- 4 files changed, 10 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 318605de83f3..17bea12538c3 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -467,7 +467,6 @@ prototypes:: int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *); - int (*media_changed) (struct gendisk *); void (*unlock_native_capacity) (struct gendisk *); int (*revalidate_disk) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); @@ -483,14 +482,13 @@ release: yes ioctl: no compat_ioctl: no direct_access: no -media_changed: no unlock_native_capacity: no revalidate_disk: no getgeo: no swap_slot_free_notify: no (see below) ======================= =================== -media_changed, unlock_native_capacity and revalidate_disk are called only from +unlock_native_capacity and revalidate_disk are called only from check_disk_change(). swap_slot_free_notify is called with swap_lock and sometimes the page lock diff --git a/block/genhd.c b/block/genhd.c index 60ae4e1b4d38..c42a49f2f537 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -2056,18 +2056,12 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask) */ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) { - const struct block_device_operations *bdops = disk->fops; struct disk_events *ev = disk->ev; unsigned int pending; unsigned int clearing = mask; - if (!ev) { - /* for drivers still using the old ->media_changed method */ - if ((mask & DISK_EVENT_MEDIA_CHANGE) && - bdops->media_changed && bdops->media_changed(disk)) - return DISK_EVENT_MEDIA_CHANGE; + if (!ev) return 0; - } disk_block_events(disk); diff --git a/drivers/md/md.c b/drivers/md/md.c index 8bb69c61afe0..77dfe4765c31 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5670,6 +5670,7 @@ static int md_alloc(dev_t dev, char *name) * remove it now. */ disk->flags |= GENHD_FL_EXT_DEVT; + disk->events |= DISK_EVENT_MEDIA_CHANGE; mddev->gendisk = disk; /* As soon as we call add_disk(), another thread could get * through to md_open, so make sure it doesn't get too far @@ -7806,20 +7807,17 @@ static void md_release(struct gendisk *disk, fmode_t mode) mddev_put(mddev); } -static int md_media_changed(struct gendisk *disk) -{ - struct mddev *mddev = disk->private_data; - - return mddev->changed; -} - -static int md_revalidate(struct gendisk *disk) +static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing) { struct mddev *mddev = disk->private_data; + unsigned int ret = 0; + if (mddev->changed) + ret = DISK_EVENT_MEDIA_CHANGE; mddev->changed = 0; - return 0; + return ret; } + static const struct block_device_operations md_fops = { .owner = THIS_MODULE, @@ -7831,8 +7829,7 @@ static const struct block_device_operations md_fops = .compat_ioctl = md_compat_ioctl, #endif .getgeo = md_getgeo, - .media_changed = md_media_changed, - .revalidate_disk= md_revalidate, + .check_events = md_check_events, }; static int md_thread(void *arg) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 408eb66a82fd..71173a1ffa8b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1781,8 +1781,6 @@ struct block_device_operations { int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); - /* ->media_changed() is DEPRECATED, use ->check_events() instead */ - int (*media_changed) (struct gendisk *); void (*unlock_native_capacity) (struct gendisk *); int (*revalidate_disk) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); -- cgit v1.2.3 From 8c22eb3a77373c616f141b56f44ef225ee15c96b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 8 Jul 2020 14:25:42 +0200 Subject: cdrom: remove the unused cdrom_media_changed function As well as the ->media_changed method. All these are left over from before the drivers were switched over to the check_events scheme. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- Documentation/cdrom/cdrom-standard.rst | 18 +----------------- drivers/cdrom/cdrom.c | 28 +++++----------------------- include/linux/cdrom.h | 2 -- 3 files changed, 6 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/Documentation/cdrom/cdrom-standard.rst b/Documentation/cdrom/cdrom-standard.rst index dde4f7f7fdbf..2de905810590 100644 --- a/Documentation/cdrom/cdrom-standard.rst +++ b/Documentation/cdrom/cdrom-standard.rst @@ -157,7 +157,6 @@ with the kernel as a block device by registering the following general cdrom_release, /∗ release ∗/ NULL, /∗ fsync ∗/ NULL, /∗ fasync ∗/ - cdrom_media_changed, /∗ media change ∗/ NULL /∗ revalidate ∗/ }; @@ -366,19 +365,6 @@ which may or may not be in the drive). If the drive is not a changer, CDS_DRIVE_NOT_READY /* something is wrong, tray is moving? */ CDS_DISC_OK /* a disc is loaded and everything is fine */ -:: - - int media_changed(struct cdrom_device_info *cdi, int disc_nr) - -This function is very similar to the original function in $struct -file_operations*. It returns 1 if the medium of the device *cdi->dev* -has changed since the last call, and 0 otherwise. The parameter -*disc_nr* identifies a specific slot in a juke-box, it should be -ignored for single-disc drives. Note that by `re-routing` this -function through *cdrom_media_changed()*, we can implement separate -queues for the VFS and a new *ioctl()* function that can report device -changes to software (e. g., an auto-mounting daemon). - :: int tray_move(struct cdrom_device_info *cdi, int position) @@ -917,9 +903,7 @@ commands can be identified by the underscores in their names. maximum number of discs in the juke-box found in the *cdrom_dops*. `CDROM_MEDIA_CHANGED` Returns 1 if a disc has been changed since the last call. - Note that calls to *cdrom_media_changed* by the VFS are treated - by an independent queue, so both mechanisms will detect a - media change once. For juke-boxes, an extra argument *arg* + For juke-boxes, an extra argument *arg* specifies the slot for which the information is given. The special value *CDSL_CURRENT* requests that information about the currently selected slot be returned. diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index d82b3b7658bd..0c271b9e3c5b 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -605,7 +605,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi) disk->cdi = cdi; ENSURE(cdo, drive_status, CDC_DRIVE_STATUS); - if (cdo->check_events == NULL && cdo->media_changed == NULL) + if (cdo->check_events == NULL) WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC)); ENSURE(cdo, tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); ENSURE(cdo, lock_door, CDC_LOCK); @@ -1419,8 +1419,6 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot) if (cdi->ops->check_events) cdi->ops->check_events(cdi, 0, slot); - else - cdi->ops->media_changed(cdi, slot); if (slot == CDSL_NONE) { /* set media changed bits, on both queues */ @@ -1517,13 +1515,10 @@ int media_changed(struct cdrom_device_info *cdi, int queue) return ret; /* changed since last call? */ - if (cdi->ops->check_events) { - BUG_ON(!queue); /* shouldn't be called from VFS path */ - cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE); - changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE; - cdi->ioctl_events = 0; - } else - changed = cdi->ops->media_changed(cdi, CDSL_CURRENT); + BUG_ON(!queue); /* shouldn't be called from VFS path */ + cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE); + changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE; + cdi->ioctl_events = 0; if (changed) { cdi->mc_flags = 0x3; /* set bit on both queues */ @@ -1535,18 +1530,6 @@ int media_changed(struct cdrom_device_info *cdi, int queue) return ret; } -int cdrom_media_changed(struct cdrom_device_info *cdi) -{ - /* This talks to the VFS, which doesn't like errors - just 1 or 0. - * Returning "0" is always safe (media hasn't been changed). Do that - * if the low-level cdrom driver dosn't support media changed. */ - if (cdi == NULL || cdi->ops->media_changed == NULL) - return 0; - if (!CDROM_CAN(CDC_MEDIA_CHANGED)) - return 0; - return media_changed(cdi, 0); -} - /* Requests to the low-level drivers will /always/ be done in the following format convention: @@ -3464,7 +3447,6 @@ EXPORT_SYMBOL(unregister_cdrom); EXPORT_SYMBOL(cdrom_open); EXPORT_SYMBOL(cdrom_release); EXPORT_SYMBOL(cdrom_ioctl); -EXPORT_SYMBOL(cdrom_media_changed); EXPORT_SYMBOL(cdrom_number_of_slots); EXPORT_SYMBOL(cdrom_mode_select); EXPORT_SYMBOL(cdrom_mode_sense); diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index 8543fa59da72..f48d0a31deae 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -73,7 +73,6 @@ struct cdrom_device_ops { int (*drive_status) (struct cdrom_device_info *, int); unsigned int (*check_events) (struct cdrom_device_info *cdi, unsigned int clearing, int slot); - int (*media_changed) (struct cdrom_device_info *, int); int (*tray_move) (struct cdrom_device_info *, int); int (*lock_door) (struct cdrom_device_info *, int); int (*select_speed) (struct cdrom_device_info *, int); @@ -107,7 +106,6 @@ extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, unsigned int clearing); -extern int cdrom_media_changed(struct cdrom_device_info *); extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi); extern void unregister_cdrom(struct cdrom_device_info *cdi); -- cgit v1.2.3 From 065e0d42a0a728d7f6c2aec7c9f3e5dc7b715394 Mon Sep 17 00:00:00 2001 From: Meir Lichtinger Date: Mon, 6 Jul 2020 20:42:32 -0700 Subject: ethtool: Add support for 100Gbps per lane link modes Define 100G, 200G and 400G link modes using 100Gbps per lane LR, ER and FR are defined as a single link mode because they are using same technology and by design are fully interoperable. EEPROM content indicates if the module is LR, ER, or FR, and the user space ethtool decoder is planned to support decoding these modes in the EEPROM. Signed-off-by: Meir Lichtinger CC: Andrew Lunn Reviewed-by: Aya Levin Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/phy/phy-core.c | 17 ++++++++++++++++- include/uapi/linux/ethtool.h | 15 +++++++++++++++ net/ethtool/common.c | 15 +++++++++++++++ net/ethtool/linkmodes.c | 15 +++++++++++++++ 4 files changed, 61 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 46bd68e9ecfa..ff8e14b01eeb 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -8,7 +8,7 @@ const char *phy_speed_to_str(int speed) { - BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 75, + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 90, "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " "If a speed or mode has been added please update phy_speed_to_str " "and the PHY settings array.\n"); @@ -78,12 +78,22 @@ static const struct phy_setting settings[] = { PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full ), PHY_SETTING( 400000, FULL, 400000baseDR8_Full ), PHY_SETTING( 400000, FULL, 400000baseSR8_Full ), + PHY_SETTING( 400000, FULL, 400000baseCR4_Full ), + PHY_SETTING( 400000, FULL, 400000baseKR4_Full ), + PHY_SETTING( 400000, FULL, 400000baseLR4_ER4_FR4_Full ), + PHY_SETTING( 400000, FULL, 400000baseDR4_Full ), + PHY_SETTING( 400000, FULL, 400000baseSR4_Full ), /* 200G */ PHY_SETTING( 200000, FULL, 200000baseCR4_Full ), PHY_SETTING( 200000, FULL, 200000baseKR4_Full ), PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full ), PHY_SETTING( 200000, FULL, 200000baseDR4_Full ), PHY_SETTING( 200000, FULL, 200000baseSR4_Full ), + PHY_SETTING( 200000, FULL, 200000baseCR2_Full ), + PHY_SETTING( 200000, FULL, 200000baseKR2_Full ), + PHY_SETTING( 200000, FULL, 200000baseLR2_ER2_FR2_Full ), + PHY_SETTING( 200000, FULL, 200000baseDR2_Full ), + PHY_SETTING( 200000, FULL, 200000baseSR2_Full ), /* 100G */ PHY_SETTING( 100000, FULL, 100000baseCR4_Full ), PHY_SETTING( 100000, FULL, 100000baseKR4_Full ), @@ -94,6 +104,11 @@ static const struct phy_setting settings[] = { PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full ), PHY_SETTING( 100000, FULL, 100000baseDR2_Full ), PHY_SETTING( 100000, FULL, 100000baseSR2_Full ), + PHY_SETTING( 100000, FULL, 100000baseCR_Full ), + PHY_SETTING( 100000, FULL, 100000baseKR_Full ), + PHY_SETTING( 100000, FULL, 100000baseLR_ER_FR_Full ), + PHY_SETTING( 100000, FULL, 100000baseDR_Full ), + PHY_SETTING( 100000, FULL, 100000baseSR_Full ), /* 56G */ PHY_SETTING( 56000, FULL, 56000baseCR4_Full ), PHY_SETTING( 56000, FULL, 56000baseKR4_Full ), diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index d1413538ef30..60856e0f9618 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1600,6 +1600,21 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS }; diff --git a/net/ethtool/common.c b/net/ethtool/common.c index ce4dbae5a943..c54166713797 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -176,6 +176,21 @@ const char link_mode_names[][ETH_GSTRING_LEN] = { __DEFINE_LINK_MODE_NAME(400000, DR8, Full), __DEFINE_LINK_MODE_NAME(400000, CR8, Full), __DEFINE_SPECIAL_MODE_NAME(FEC_LLRS, "LLRS"), + __DEFINE_LINK_MODE_NAME(100000, KR, Full), + __DEFINE_LINK_MODE_NAME(100000, SR, Full), + __DEFINE_LINK_MODE_NAME(100000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_NAME(100000, DR, Full), + __DEFINE_LINK_MODE_NAME(100000, CR, Full), + __DEFINE_LINK_MODE_NAME(200000, KR2, Full), + __DEFINE_LINK_MODE_NAME(200000, SR2, Full), + __DEFINE_LINK_MODE_NAME(200000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_NAME(200000, DR2, Full), + __DEFINE_LINK_MODE_NAME(200000, CR2, Full), + __DEFINE_LINK_MODE_NAME(400000, KR4, Full), + __DEFINE_LINK_MODE_NAME(400000, SR4, Full), + __DEFINE_LINK_MODE_NAME(400000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_NAME(400000, DR4, Full), + __DEFINE_LINK_MODE_NAME(400000, CR4, Full), }; static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c index fd4f3e58c6f6..317a93129551 100644 --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c @@ -257,6 +257,21 @@ static const struct link_mode_info link_mode_params[] = { __DEFINE_LINK_MODE_PARAMS(400000, DR8, Full), __DEFINE_LINK_MODE_PARAMS(400000, CR8, Full), __DEFINE_SPECIAL_MODE_PARAMS(FEC_LLRS), + __DEFINE_LINK_MODE_PARAMS(100000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, DR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, CR4, Full), }; static const struct nla_policy -- cgit v1.2.3 From 12fdafb817c6cde87a4fa0e674e66b0226a0889d Mon Sep 17 00:00:00 2001 From: Meir Lichtinger Date: Mon, 6 Jul 2020 20:42:33 -0700 Subject: net/mlx5: Added support for 100Gbps per lane link modes This patch exposes new link modes using 100Gbps per lane, including 100G, 200G and 400G modes. Signed-off-by: Meir Lichtinger Reviewed-by: Aya Levin Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en/port.c | 3 +++ .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 21 ++++++++++++++++++++- include/linux/mlx5/port.h | 3 +++ 3 files changed, 26 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 2a8950b3056f..be83db63aca0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -76,6 +76,9 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000, [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, [MLX5E_400GAUI_8] = 400000, + [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000, + [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000, + [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000, }; static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index ec5658bbe3c5..6183bee7d21b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -194,6 +194,24 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_2_200GBASE_CR2_KR2, ext, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_4_400GBASE_CR4_KR4, ext, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT); } static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, @@ -1012,7 +1030,8 @@ static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes) unsigned long modes[2]; for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) { - if (*ptys2ext_ethtool_table[i].advertised == 0) + if (ptys2ext_ethtool_table[i].advertised[0] == 0 && + ptys2ext_ethtool_table[i].advertised[1] == 0) continue; memset(modes, 0, sizeof(modes)); bitmap_and(modes, ptys2ext_ethtool_table[i].advertised, diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index de9a272c9f3d..2d45a6af52a4 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -104,8 +104,11 @@ enum mlx5e_ext_link_mode { MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8, MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9, MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10, + MLX5E_100GAUI_1_100GBASE_CR_KR = 11, MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, + MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13, MLX5E_400GAUI_8 = 15, + MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16, MLX5E_EXT_LINK_MODES_NUMBER, }; -- cgit v1.2.3 From efd7fe68f0c6c9649757bf80cbc382fd21e764c9 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 8 Jul 2020 14:25:36 +0200 Subject: net: dsa: tag_rtl4_a: Implement Realtek 4 byte A tag This implements the known parts of the Realtek 4 byte tag protocol version 0xA, as found in the RTL8366RB DSA switch. It is designated as protocol version 0xA as a different Realtek 4 byte tag format with protocol version 0x9 is known to exist in the Realtek RTL8306 chips. The tag and switch chip lacks public documentation, so the tag format has been reverse-engineered from packet dumps. As only ingress traffic has been available for analysis an egress tag has not been possible to develop (even using educated guesses about bit fields) so this is as far as it gets. It is not known if the switch even supports egress tagging. Excessive attempts to figure out the egress tag format was made. When nothing else worked, I just tried all bit combinations with 0xannp where a is protocol and p is port. I looped through all values several times trying to get a response from ping, without any positive result. Using just these ingress tags however, the switch functionality is vastly improved and the packets find their way into the destination port without any tricky VLAN configuration. On the D-Link DIR-685 the LAN ports now come up and respond to ping without any command line configuration so this is a real improvement for users. Egress packets need to be restricted to the proper target ports using VLAN, which the RTL8366RB DSA switch driver already sets up. Cc: DENG Qingfang Cc: Mauri Sandberg Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- include/net/dsa.h | 2 + net/dsa/Kconfig | 7 +++ net/dsa/Makefile | 1 + net/dsa/tag_rtl4_a.c | 130 +++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 140 insertions(+) create mode 100644 net/dsa/tag_rtl4_a.c (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index 4046ccd1945d..b28c95c76762 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -44,6 +44,7 @@ struct phylink_link_state; #define DSA_TAG_PROTO_KSZ8795_VALUE 14 #define DSA_TAG_PROTO_OCELOT_VALUE 15 #define DSA_TAG_PROTO_AR9331_VALUE 16 +#define DSA_TAG_PROTO_RTL4_A_VALUE 17 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -63,6 +64,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE, DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE, DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE, + DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE, }; struct packet_type; diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index d5bc6ac599ef..1f9b9b11008c 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -86,6 +86,13 @@ config NET_DSA_TAG_KSZ Say Y if you want to enable support for tagging frames for the Microchip 8795/9477/9893 families of switches. +config NET_DSA_TAG_RTL4_A + tristate "Tag driver for Realtek 4 byte protocol A tags" + help + Say Y or M if you want to enable support for tagging frames for the + Realtek switches with 4 byte protocol A tags, sich as found in + the Realtek RTL8366RB. + config NET_DSA_TAG_OCELOT tristate "Tag driver for Ocelot family of switches" select PACKING diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 108486cfdeef..4f47b2025ff5 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o +obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c new file mode 100644 index 000000000000..7b63010fa87b --- /dev/null +++ b/net/dsa/tag_rtl4_a.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Handler for Realtek 4 byte DSA switch tags + * Currently only supports protocol "A" found in RTL8366RB + * Copyright (c) 2020 Linus Walleij + * + * This "proprietary tag" header looks like so: + * + * ------------------------------------------------- + * | MAC DA | MAC SA | 0x8899 | 2 bytes tag | Type | + * ------------------------------------------------- + * + * The 2 bytes tag form a 16 bit big endian word. The exact + * meaning has been guessed from packet dumps from ingress + * frames, as no working egress traffic has been available + * we do not know the format of the egress tags or if they + * are even supported. + */ + +#include +#include + +#include "dsa_priv.h" + +#define RTL4_A_HDR_LEN 4 +#define RTL4_A_ETHERTYPE 0x8899 +#define RTL4_A_PROTOCOL_SHIFT 12 +/* + * 0x1 = Realtek Remote Control protocol (RRCP) + * 0x2/0x3 seems to be used for loopback testing + * 0x9 = RTL8306 DSA protocol + * 0xa = RTL8366RB DSA protocol + */ +#define RTL4_A_PROTOCOL_RTL8366RB 0xa + +static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + /* + * Just let it pass thru, we don't know if it is possible + * to tag a frame with the 0x8899 ethertype and direct it + * to a specific port, all attempts at reverse-engineering have + * ended up with the frames getting dropped. + * + * The VLAN set-up needs to restrict the frames to the right port. + * + * If you have documentation on the tagging format for RTL8366RB + * (tag type A) then please contribute. + */ + return skb; +} + +static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt) +{ + u16 protport; + __be16 *p; + u16 etype; + u8 *tag; + u8 prot; + u8 port; + + if (unlikely(!pskb_may_pull(skb, RTL4_A_HDR_LEN))) + return NULL; + + /* The RTL4 header has its own custom Ethertype 0x8899 and that + * starts right at the beginning of the packet, after the src + * ethernet addr. Apparantly skb->data always points 2 bytes in, + * behind the Ethertype. + */ + tag = skb->data - 2; + p = (__be16 *)tag; + etype = ntohs(*p); + if (etype != RTL4_A_ETHERTYPE) { + /* Not custom, just pass through */ + netdev_dbg(dev, "non-realtek ethertype 0x%04x\n", etype); + return skb; + } + p = (__be16 *)(tag + 2); + protport = ntohs(*p); + /* The 4 upper bits are the protocol */ + prot = (protport >> RTL4_A_PROTOCOL_SHIFT) & 0x0f; + if (prot != RTL4_A_PROTOCOL_RTL8366RB) { + netdev_err(dev, "unknown realtek protocol 0x%01x\n", prot); + return NULL; + } + port = protport & 0xff; + + skb->dev = dsa_master_find_slave(dev, 0, port); + if (!skb->dev) { + netdev_dbg(dev, "could not find slave for port %d\n", port); + return NULL; + } + + /* Remove RTL4 tag and recalculate checksum */ + skb_pull_rcsum(skb, RTL4_A_HDR_LEN); + + /* Move ethernet DA and SA in front of the data */ + memmove(skb->data - ETH_HLEN, + skb->data - ETH_HLEN - RTL4_A_HDR_LEN, + 2 * ETH_ALEN); + + skb->offload_fwd_mark = 1; + + return skb; +} + +static int rtl4a_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto, + int *offset) +{ + *offset = RTL4_A_HDR_LEN; + /* Skip past the tag and fetch the encapsulated Ethertype */ + *proto = ((__be16 *)skb->data)[1]; + + return 0; +} + +static const struct dsa_device_ops rtl4a_netdev_ops = { + .name = "rtl4a", + .proto = DSA_TAG_PROTO_RTL4_A, + .xmit = rtl4a_tag_xmit, + .rcv = rtl4a_tag_rcv, + .flow_dissect = rtl4a_tag_flow_dissect, + .overhead = RTL4_A_HDR_LEN, +}; +module_dsa_tag_driver(rtl4a_netdev_ops); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL4_A); -- cgit v1.2.3 From d7481b24b816b8c3955a9eaf01b97e2bd7f61a37 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Fri, 3 Jul 2020 12:56:19 -0400 Subject: audit: issue CWD record to accompany LSM_AUDIT_DATA_* records The LSM_AUDIT_DATA_* records for PATH, FILE, IOCTL_OP, DENTRY and INODE are incomplete without the task context of the AUDIT Current Working Directory record. Add it. This record addition can't use audit_dummy_context to determine whether or not to store the record information since the LSM_AUDIT_DATA_* records are initiated by various LSMs independent of any audit rules. context->in_syscall is used to determine if it was called in user context like audit_getname. Please see the upstream issue https://github.com/linux-audit/audit-kernel/issues/96 Adapted from Vladis Dronov's v2 patch. Signed-off-by: Richard Guy Briggs Signed-off-by: Paul Moore --- include/linux/audit.h | 9 ++++++++- kernel/auditsc.c | 17 +++++++++++++++-- security/lsm_audit.c | 5 +++++ 3 files changed, 28 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/audit.h b/include/linux/audit.h index b5478c64bc69..523f77494847 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -292,7 +292,7 @@ extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); - +extern void __audit_getcwd(void); extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); @@ -351,6 +351,11 @@ static inline void audit_getname(struct filename *name) if (unlikely(!audit_dummy_context())) __audit_getname(name); } +static inline void audit_getcwd(void) +{ + if (unlikely(audit_context())) + __audit_getcwd(); +} static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) { @@ -579,6 +584,8 @@ static inline struct filename *audit_reusename(const __user char *name) } static inline void audit_getname(struct filename *name) { } +static inline void audit_getcwd(void) +{ } static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) diff --git a/kernel/auditsc.c b/kernel/auditsc.c index eae1a599ffe3..6884b50069d1 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1891,6 +1891,20 @@ __audit_reusename(const __user char *uptr) return NULL; } +inline void _audit_getcwd(struct audit_context *context) +{ + if (!context->pwd.dentry) + get_fs_pwd(current->fs, &context->pwd); +} + +void __audit_getcwd(void) +{ + struct audit_context *context = audit_context(); + + if (context->in_syscall) + _audit_getcwd(context); +} + /** * __audit_getname - add a name to the list * @name: name to add @@ -1915,8 +1929,7 @@ void __audit_getname(struct filename *name) name->aname = n; name->refcnt++; - if (!context->pwd.dentry) - get_fs_pwd(current->fs, &context->pwd); + _audit_getcwd(context); } static inline int audit_copy_fcaps(struct audit_names *name, diff --git a/security/lsm_audit.c b/security/lsm_audit.c index 2d2bf49016f4..7c555621c2bd 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -241,6 +241,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, audit_log_untrustedstring(ab, inode->i_sb->s_id); audit_log_format(ab, " ino=%lu", inode->i_ino); } + audit_getcwd(); break; } case LSM_AUDIT_DATA_FILE: { @@ -254,6 +255,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, audit_log_untrustedstring(ab, inode->i_sb->s_id); audit_log_format(ab, " ino=%lu", inode->i_ino); } + audit_getcwd(); break; } case LSM_AUDIT_DATA_IOCTL_OP: { @@ -269,6 +271,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, } audit_log_format(ab, " ioctlcmd=0x%hx", a->u.op->cmd); + audit_getcwd(); break; } case LSM_AUDIT_DATA_DENTRY: { @@ -283,6 +286,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, audit_log_untrustedstring(ab, inode->i_sb->s_id); audit_log_format(ab, " ino=%lu", inode->i_ino); } + audit_getcwd(); break; } case LSM_AUDIT_DATA_INODE: { @@ -300,6 +304,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, audit_log_format(ab, " dev="); audit_log_untrustedstring(ab, inode->i_sb->s_id); audit_log_format(ab, " ino=%lu", inode->i_ino); + audit_getcwd(); break; } case LSM_AUDIT_DATA_TASK: { -- cgit v1.2.3 From 492d76b215660be833f12c3fa7cf2faf39434841 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Jul 2020 11:06:20 +0200 Subject: writeback: remove {set,clear}_wb_congested Just merge them into their only callers. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/backing-dev-defs.h | 14 ++------------ mm/backing-dev.c | 12 ++++++------ 2 files changed, 8 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 90a7e844a098..cc5aa1f32b91 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -232,18 +232,8 @@ enum { BLK_RW_SYNC = 1, }; -void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); -void set_wb_congested(struct bdi_writeback_congested *congested, int sync); - -static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) -{ - clear_wb_congested(bdi->wb.congested, sync); -} - -static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) -{ - set_wb_congested(bdi->wb.congested, sync); -} +void clear_bdi_congested(struct backing_dev_info *bdi, int sync); +void set_bdi_congested(struct backing_dev_info *bdi, int sync); struct wb_lock_cookie { bool locked; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index d382272bcc31..3ebe5144a102 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -1047,29 +1047,29 @@ static wait_queue_head_t congestion_wqh[2] = { }; static atomic_t nr_wb_congested[2]; -void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) +void clear_bdi_congested(struct backing_dev_info *bdi, int sync) { wait_queue_head_t *wqh = &congestion_wqh[sync]; enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; - if (test_and_clear_bit(bit, &congested->state)) + if (test_and_clear_bit(bit, &bdi->wb.congested->state)) atomic_dec(&nr_wb_congested[sync]); smp_mb__after_atomic(); if (waitqueue_active(wqh)) wake_up(wqh); } -EXPORT_SYMBOL(clear_wb_congested); +EXPORT_SYMBOL(clear_bdi_congested); -void set_wb_congested(struct bdi_writeback_congested *congested, int sync) +void set_bdi_congested(struct backing_dev_info *bdi, int sync) { enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; - if (!test_and_set_bit(bit, &congested->state)) + if (!test_and_set_bit(bit, &bdi->wb.congested->state)) atomic_inc(&nr_wb_congested[sync]); } -EXPORT_SYMBOL(set_wb_congested); +EXPORT_SYMBOL(set_bdi_congested); /** * congestion_wait - wait for a backing_dev to become uncongested -- cgit v1.2.3 From 8c911f3d4c074a17955a1757c9d1d5a9a5209ca5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Jul 2020 11:06:21 +0200 Subject: writeback: remove struct bdi_writeback_congested We never set any congested bits in the group writeback instances of it. And for the simpler bdi-wide case a simple scalar field is all that that is needed. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 19 +---- drivers/md/dm.c | 2 +- include/linux/backing-dev-defs.h | 25 +------ include/linux/backing-dev.h | 18 +---- include/linux/blk-cgroup.h | 6 -- mm/backing-dev.c | 149 +++------------------------------------ 6 files changed, 14 insertions(+), 205 deletions(-) (limited to 'include') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 594f1d0b0e5a..e00d0458a9d6 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -95,9 +95,6 @@ static void __blkg_release(struct rcu_head *rcu) css_put(&blkg->blkcg->css); if (blkg->parent) blkg_put(blkg->parent); - - wb_congested_put(blkg->wb_congested); - blkg_free(blkg); } @@ -227,7 +224,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct blkcg_gq *new_blkg) { struct blkcg_gq *blkg; - struct bdi_writeback_congested *wb_congested; int i, ret; WARN_ON_ONCE(!rcu_read_lock_held()); @@ -245,31 +241,22 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, goto err_free_blkg; } - wb_congested = wb_congested_get_create(q->backing_dev_info, - blkcg->css.id, - GFP_NOWAIT | __GFP_NOWARN); - if (!wb_congested) { - ret = -ENOMEM; - goto err_put_css; - } - /* allocate */ if (!new_blkg) { new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); if (unlikely(!new_blkg)) { ret = -ENOMEM; - goto err_put_congested; + goto err_put_css; } } blkg = new_blkg; - blkg->wb_congested = wb_congested; /* link parent */ if (blkcg_parent(blkcg)) { blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); if (WARN_ON_ONCE(!blkg->parent)) { ret = -ENODEV; - goto err_put_congested; + goto err_put_css; } blkg_get(blkg->parent); } @@ -306,8 +293,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, blkg_put(blkg); return ERR_PTR(ret); -err_put_congested: - wb_congested_put(wb_congested); err_put_css: css_put(&blkcg->css); err_free_blkg: diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2d368cafb23e..48bfd41658aa 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1834,7 +1834,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) * top-level queue for congestion. */ struct backing_dev_info *bdi = md->queue->backing_dev_info; - r = bdi->wb.congested->state & bdi_bits; + r = bdi->wb.congested & bdi_bits; } else { map = dm_get_live_table_fast(md); if (map) diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index cc5aa1f32b91..1cec4521e1fb 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -87,26 +87,6 @@ struct wb_completion { #define DEFINE_WB_COMPLETION(cmpl, bdi) \ struct wb_completion cmpl = WB_COMPLETION_INIT(bdi) -/* - * For cgroup writeback, multiple wb's may map to the same blkcg. Those - * wb's can operate mostly independently but should share the congested - * state. To facilitate such sharing, the congested state is tracked using - * the following struct which is created on demand, indexed by blkcg ID on - * its bdi, and refcounted. - */ -struct bdi_writeback_congested { - unsigned long state; /* WB_[a]sync_congested flags */ - refcount_t refcnt; /* nr of attached wb's and blkg */ - -#ifdef CONFIG_CGROUP_WRITEBACK - struct backing_dev_info *__bdi; /* the associated bdi, set to NULL - * on bdi unregistration. For memcg-wb - * internal use only! */ - int blkcg_id; /* ID of the associated blkcg */ - struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ -#endif -}; - /* * Each wb (bdi_writeback) can perform writeback operations, is measured * and throttled, independently. Without cgroup writeback, each bdi @@ -140,7 +120,7 @@ struct bdi_writeback { struct percpu_counter stat[NR_WB_STAT_ITEMS]; - struct bdi_writeback_congested *congested; + unsigned long congested; /* WB_[a]sync_congested flags */ unsigned long bw_time_stamp; /* last time write bw is updated */ unsigned long dirtied_stamp; @@ -208,11 +188,8 @@ struct backing_dev_info { struct list_head wb_list; /* list of all wbs */ #ifdef CONFIG_CGROUP_WRITEBACK struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ - struct rb_root cgwb_congested_tree; /* their congested states */ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ -#else - struct bdi_writeback_congested *wb_congested; #endif wait_queue_head_t wb_waitq; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 6b3504bf7a42..9173d2c22b4a 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -173,7 +173,7 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) if (bdi->congested_fn) return bdi->congested_fn(bdi->congested_data, cong_bits); - return wb->congested->state & cong_bits; + return wb->congested & cong_bits; } long congestion_wait(int sync, long timeout); @@ -224,9 +224,6 @@ static inline int bdi_sched_wait(void *word) #ifdef CONFIG_CGROUP_WRITEBACK -struct bdi_writeback_congested * -wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); -void wb_congested_put(struct bdi_writeback_congested *congested); struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css); struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, @@ -404,19 +401,6 @@ static inline bool inode_cgwb_enabled(struct inode *inode) return false; } -static inline struct bdi_writeback_congested * -wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) -{ - refcount_inc(&bdi->wb_congested->refcnt); - return bdi->wb_congested; -} - -static inline void wb_congested_put(struct bdi_writeback_congested *congested) -{ - if (refcount_dec_and_test(&congested->refcnt)) - kfree(congested); -} - static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { return &bdi->wb; diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 431b2d18bf40..c8fc9792ac77 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -109,12 +109,6 @@ struct blkcg_gq { struct hlist_node blkcg_node; struct blkcg *blkcg; - /* - * Each blkg gets congested separately and the congestion state is - * propagated to the matching bdi_writeback_congested. - */ - struct bdi_writeback_congested *wb_congested; - /* all non-root blkcg_gq's are guaranteed to have access to parent */ struct blkcg_gq *parent; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 3ebe5144a102..8e8b00627bb2 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -281,7 +281,7 @@ void wb_wakeup_delayed(struct bdi_writeback *wb) #define INIT_BW (100 << (20 - PAGE_SHIFT)) static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, - int blkcg_id, gfp_t gfp) + gfp_t gfp) { int i, err; @@ -308,15 +308,9 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, INIT_DELAYED_WORK(&wb->dwork, wb_workfn); wb->dirty_sleep = jiffies; - wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp); - if (!wb->congested) { - err = -ENOMEM; - goto out_put_bdi; - } - err = fprop_local_init_percpu(&wb->completions, gfp); if (err) - goto out_put_cong; + goto out_put_bdi; for (i = 0; i < NR_WB_STAT_ITEMS; i++) { err = percpu_counter_init(&wb->stat[i], 0, gfp); @@ -330,8 +324,6 @@ out_destroy_stat: while (i--) percpu_counter_destroy(&wb->stat[i]); fprop_local_destroy_percpu(&wb->completions); -out_put_cong: - wb_congested_put(wb->congested); out_put_bdi: if (wb != &bdi->wb) bdi_put(bdi); @@ -374,7 +366,6 @@ static void wb_exit(struct bdi_writeback *wb) percpu_counter_destroy(&wb->stat[i]); fprop_local_destroy_percpu(&wb->completions); - wb_congested_put(wb->congested); if (wb != &wb->bdi->wb) bdi_put(wb->bdi); } @@ -384,99 +375,12 @@ static void wb_exit(struct bdi_writeback *wb) #include /* - * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree, - * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU - * protected. + * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list. + * bdi->cgwb_tree is also RCU protected. */ static DEFINE_SPINLOCK(cgwb_lock); static struct workqueue_struct *cgwb_release_wq; -/** - * wb_congested_get_create - get or create a wb_congested - * @bdi: associated bdi - * @blkcg_id: ID of the associated blkcg - * @gfp: allocation mask - * - * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one. - * The returned wb_congested has its reference count incremented. Returns - * NULL on failure. - */ -struct bdi_writeback_congested * -wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) -{ - struct bdi_writeback_congested *new_congested = NULL, *congested; - struct rb_node **node, *parent; - unsigned long flags; -retry: - spin_lock_irqsave(&cgwb_lock, flags); - - node = &bdi->cgwb_congested_tree.rb_node; - parent = NULL; - - while (*node != NULL) { - parent = *node; - congested = rb_entry(parent, struct bdi_writeback_congested, - rb_node); - if (congested->blkcg_id < blkcg_id) - node = &parent->rb_left; - else if (congested->blkcg_id > blkcg_id) - node = &parent->rb_right; - else - goto found; - } - - if (new_congested) { - /* !found and storage for new one already allocated, insert */ - congested = new_congested; - rb_link_node(&congested->rb_node, parent, node); - rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree); - spin_unlock_irqrestore(&cgwb_lock, flags); - return congested; - } - - spin_unlock_irqrestore(&cgwb_lock, flags); - - /* allocate storage for new one and retry */ - new_congested = kzalloc(sizeof(*new_congested), gfp); - if (!new_congested) - return NULL; - - refcount_set(&new_congested->refcnt, 1); - new_congested->__bdi = bdi; - new_congested->blkcg_id = blkcg_id; - goto retry; - -found: - refcount_inc(&congested->refcnt); - spin_unlock_irqrestore(&cgwb_lock, flags); - kfree(new_congested); - return congested; -} - -/** - * wb_congested_put - put a wb_congested - * @congested: wb_congested to put - * - * Put @congested and destroy it if the refcnt reaches zero. - */ -void wb_congested_put(struct bdi_writeback_congested *congested) -{ - unsigned long flags; - - if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags)) - return; - - /* bdi might already have been destroyed leaving @congested unlinked */ - if (congested->__bdi) { - rb_erase(&congested->rb_node, - &congested->__bdi->cgwb_congested_tree); - congested->__bdi = NULL; - } - - spin_unlock_irqrestore(&cgwb_lock, flags); - kfree(congested); -} - static void cgwb_release_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(work, struct bdi_writeback, @@ -558,7 +462,7 @@ static int cgwb_create(struct backing_dev_info *bdi, goto out_put; } - ret = wb_init(wb, bdi, blkcg_css->id, gfp); + ret = wb_init(wb, bdi, gfp); if (ret) goto err_free; @@ -696,11 +600,10 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) int ret; INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); - bdi->cgwb_congested_tree = RB_ROOT; mutex_init(&bdi->cgwb_release_mutex); init_rwsem(&bdi->wb_switch_rwsem); - ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); + ret = wb_init(&bdi->wb, bdi, GFP_KERNEL); if (!ret) { bdi->wb.memcg_css = &root_mem_cgroup->css; bdi->wb.blkcg_css = blkcg_root_css; @@ -769,21 +672,6 @@ void wb_blkcg_offline(struct blkcg *blkcg) spin_unlock_irq(&cgwb_lock); } -static void cgwb_bdi_exit(struct backing_dev_info *bdi) -{ - struct rb_node *rbn; - - spin_lock_irq(&cgwb_lock); - while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { - struct bdi_writeback_congested *congested = - rb_entry(rbn, struct bdi_writeback_congested, rb_node); - - rb_erase(rbn, &bdi->cgwb_congested_tree); - congested->__bdi = NULL; /* mark @congested unlinked */ - } - spin_unlock_irq(&cgwb_lock); -} - static void cgwb_bdi_register(struct backing_dev_info *bdi) { spin_lock_irq(&cgwb_lock); @@ -810,29 +698,11 @@ subsys_initcall(cgwb_init); static int cgwb_bdi_init(struct backing_dev_info *bdi) { - int err; - - bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL); - if (!bdi->wb_congested) - return -ENOMEM; - - refcount_set(&bdi->wb_congested->refcnt, 1); - - err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); - if (err) { - wb_congested_put(bdi->wb_congested); - return err; - } - return 0; + return wb_init(&bdi->wb, bdi, GFP_KERNEL); } static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { } -static void cgwb_bdi_exit(struct backing_dev_info *bdi) -{ - wb_congested_put(bdi->wb_congested); -} - static void cgwb_bdi_register(struct backing_dev_info *bdi) { list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); @@ -1023,7 +893,6 @@ static void release_bdi(struct kref *ref) bdi_unregister(bdi); WARN_ON_ONCE(bdi->dev); wb_exit(&bdi->wb); - cgwb_bdi_exit(bdi); kfree(bdi); } @@ -1053,7 +922,7 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync) enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; - if (test_and_clear_bit(bit, &bdi->wb.congested->state)) + if (test_and_clear_bit(bit, &bdi->wb.congested)) atomic_dec(&nr_wb_congested[sync]); smp_mb__after_atomic(); if (waitqueue_active(wqh)) @@ -1066,7 +935,7 @@ void set_bdi_congested(struct backing_dev_info *bdi, int sync) enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; - if (!test_and_set_bit(bit, &bdi->wb.congested->state)) + if (!test_and_set_bit(bit, &bdi->wb.congested)) atomic_inc(&nr_wb_congested[sync]); } EXPORT_SYMBOL(set_bdi_congested); -- cgit v1.2.3 From 21cf866145047f8bfecb38ec8d2fed64464c074f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Jul 2020 11:06:22 +0200 Subject: writeback: remove bdi->congested_fn Except for pktdvd, the only places setting congested bits are file systems that allocate their own backing_dev_info structures. And pktdvd is a deprecated driver that isn't useful in stack setup either. So remove the dead congested_fn stacking infrastructure. Signed-off-by: Christoph Hellwig Acked-by: Song Liu Acked-by: David Sterba [axboe: fixup unused variables in bcache/request.c] Signed-off-by: Jens Axboe --- drivers/block/drbd/drbd_main.c | 59 ---------------------------------------- drivers/md/bcache/request.c | 47 -------------------------------- drivers/md/bcache/super.c | 1 - drivers/md/dm-cache-target.c | 19 ------------- drivers/md/dm-clone-target.c | 15 ---------- drivers/md/dm-era-target.c | 15 ---------- drivers/md/dm-raid.c | 12 -------- drivers/md/dm-table.c | 37 +------------------------ drivers/md/dm-thin.c | 16 ----------- drivers/md/dm.c | 33 ---------------------- drivers/md/dm.h | 1 - drivers/md/md-linear.c | 24 ---------------- drivers/md/md-multipath.c | 23 ---------------- drivers/md/md.c | 23 ---------------- drivers/md/md.h | 4 --- drivers/md/raid0.c | 16 ----------- drivers/md/raid1.c | 31 --------------------- drivers/md/raid10.c | 26 ------------------ drivers/md/raid5.c | 25 ----------------- fs/btrfs/disk-io.c | 23 ---------------- include/linux/backing-dev-defs.h | 4 --- include/linux/backing-dev.h | 4 --- include/linux/device-mapper.h | 11 -------- 23 files changed, 1 insertion(+), 468 deletions(-) (limited to 'include') diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7c34cc0ad8cc..cb687ccdbd96 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2415,62 +2415,6 @@ static void drbd_cleanup(void) pr_info("module cleanup done.\n"); } -/** - * drbd_congested() - Callback for the flusher thread - * @congested_data: User data - * @bdi_bits: Bits the BDI flusher thread is currently interested in - * - * Returns 1<connection->flags)) { - r |= (1 << WB_async_congested); - /* Without good local data, we would need to read from remote, - * and that would need the worker thread as well, which is - * currently blocked waiting for that usermode helper to - * finish. - */ - if (!get_ldev_if_state(device, D_UP_TO_DATE)) - r |= (1 << WB_sync_congested); - else - put_ldev(device); - r &= bdi_bits; - reason = 'c'; - goto out; - } - - if (get_ldev(device)) { - q = bdev_get_queue(device->ldev->backing_bdev); - r = bdi_congested(q->backing_dev_info, bdi_bits); - put_ldev(device); - if (r) - reason = 'b'; - } - - if (bdi_bits & (1 << WB_async_congested) && - test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) { - r |= (1 << WB_async_congested); - reason = reason == 'b' ? 'a' : 'n'; - } - -out: - device->congestion_reason = reason; - return r; -} - static void drbd_init_workqueue(struct drbd_work_queue* wq) { spin_lock_init(&wq->q_lock); @@ -2825,9 +2769,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig /* we have no partitions. we contain only ourselves. */ device->this_bdev->bd_contains = device->this_bdev; - q->backing_dev_info->congested_fn = drbd_congested; - q->backing_dev_info->congested_data = device; - blk_queue_write_cache(q, true, true); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index dd012ebface0..a190bf47076d 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1228,36 +1228,8 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); } -static int cached_dev_congested(void *data, int bits) -{ - struct bcache_device *d = data; - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - struct request_queue *q = bdev_get_queue(dc->bdev); - int ret = 0; - - if (bdi_congested(q->backing_dev_info, bits)) - return 1; - - if (cached_dev_get(dc)) { - unsigned int i; - struct cache *ca; - - for_each_cache(ca, d->c, i) { - q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(q->backing_dev_info, bits); - } - - cached_dev_put(dc); - } - - return ret; -} - void bch_cached_dev_request_init(struct cached_dev *dc) { - struct gendisk *g = dc->disk.disk; - - g->queue->backing_dev_info->congested_fn = cached_dev_congested; dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.ioctl = cached_dev_ioctl; } @@ -1341,27 +1313,8 @@ static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, return -ENOTTY; } -static int flash_dev_congested(void *data, int bits) -{ - struct bcache_device *d = data; - struct request_queue *q; - struct cache *ca; - unsigned int i; - int ret = 0; - - for_each_cache(ca, d->c, i) { - q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(q->backing_dev_info, bits); - } - - return ret; -} - void bch_flash_dev_request_init(struct bcache_device *d) { - struct gendisk *g = d->disk; - - g->queue->backing_dev_info->congested_fn = flash_dev_congested; d->cache_miss = flash_dev_cache_miss; d->ioctl = flash_dev_ioctl; } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index de13f6e91696..9e45faa054b6 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -885,7 +885,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, return -ENOMEM; d->disk->queue = q; - q->backing_dev_info->congested_data = d; q->limits.max_hw_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX; q->limits.max_segment_size = UINT_MAX; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9eccced92896..96c93802ee4d 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -421,8 +421,6 @@ struct cache { struct rw_semaphore quiesce_lock; - struct dm_target_callbacks callbacks; - /* * origin_blocks entries, discarded if set. */ @@ -2423,20 +2421,6 @@ static void set_cache_size(struct cache *cache, dm_cblock_t size) cache->cache_size = size; } -static int is_congested(struct dm_dev *dev, int bdi_bits) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(q->backing_dev_info, bdi_bits); -} - -static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct cache *cache = container_of(cb, struct cache, callbacks); - - return is_congested(cache->origin_dev, bdi_bits) || - is_congested(cache->cache_dev, bdi_bits); -} - #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) @@ -2471,9 +2455,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) goto bad; } - cache->callbacks.congested_fn = cache_is_congested; - dm_table_add_target_callbacks(ti->table, &cache->callbacks); - cache->metadata_dev = ca->metadata_dev; cache->origin_dev = ca->origin_dev; cache->cache_dev = ca->cache_dev; diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 59ed8a67c2e3..bdb255edc200 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -68,7 +68,6 @@ struct hash_table_bucket; struct clone { struct dm_target *ti; - struct dm_target_callbacks callbacks; struct dm_dev *metadata_dev; struct dm_dev *dest_dev; @@ -1518,18 +1517,6 @@ error: DMEMIT("Error"); } -static int clone_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct request_queue *dest_q, *source_q; - struct clone *clone = container_of(cb, struct clone, callbacks); - - source_q = bdev_get_queue(clone->source_dev->bdev); - dest_q = bdev_get_queue(clone->dest_dev->bdev); - - return (bdi_congested(dest_q->backing_dev_info, bdi_bits) | - bdi_congested(source_q->backing_dev_info, bdi_bits)); -} - static sector_t get_dev_size(struct dm_dev *dev) { return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; @@ -1930,8 +1917,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto out_with_mempool; mutex_init(&clone->commit_lock); - clone->callbacks.congested_fn = clone_is_congested; - dm_table_add_target_callbacks(ti->table, &clone->callbacks); /* Enable flushes */ ti->num_flush_bios = 1; diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 566ddbdb16a4..b24e3839bb3a 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1137,7 +1137,6 @@ static int metadata_get_stats(struct era_metadata *md, void *ptr) struct era { struct dm_target *ti; - struct dm_target_callbacks callbacks; struct dm_dev *metadata_dev; struct dm_dev *origin_dev; @@ -1375,18 +1374,6 @@ static void stop_worker(struct era *era) /*---------------------------------------------------------------- * Target methods *--------------------------------------------------------------*/ -static int dev_is_congested(struct dm_dev *dev, int bdi_bits) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(q->backing_dev_info, bdi_bits); -} - -static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct era *era = container_of(cb, struct era, callbacks); - return dev_is_congested(era->origin_dev, bdi_bits); -} - static void era_destroy(struct era *era) { if (era->md) @@ -1514,8 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->flush_supported = true; ti->num_discard_bios = 1; - era->callbacks.congested_fn = era_is_congested; - dm_table_add_target_callbacks(ti->table, &era->callbacks); return 0; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 10e8b2fe787b..d9e270957e18 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -242,7 +242,6 @@ struct raid_set { struct mddev md; struct raid_type *raid_type; - struct dm_target_callbacks callbacks; sector_t array_sectors; sector_t dev_sectors; @@ -1705,13 +1704,6 @@ static void do_table_event(struct work_struct *ws) dm_table_event(rs->ti->table); } -static int raid_is_congested(struct dm_target_callbacks *cb, int bits) -{ - struct raid_set *rs = container_of(cb, struct raid_set, callbacks); - - return mddev_congested(&rs->md, bits); -} - /* * Make sure a valid takover (level switch) is being requested on @rs * @@ -3248,9 +3240,6 @@ size_check: goto bad_md_start; } - rs->callbacks.congested_fn = raid_is_congested; - dm_table_add_target_callbacks(ti->table, &rs->callbacks); - /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); @@ -3310,7 +3299,6 @@ static void raid_dtr(struct dm_target *ti) { struct raid_set *rs = ti->private; - list_del_init(&rs->callbacks.list); md_stop(&rs->md); raid_set_free(rs); } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 8277b959e00b..0ea5b7367179 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -64,8 +64,6 @@ struct dm_table { void *event_context; struct dm_md_mempools *mempools; - - struct list_head target_callbacks; }; /* @@ -190,7 +188,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, return -ENOMEM; INIT_LIST_HEAD(&t->devices); - INIT_LIST_HEAD(&t->target_callbacks); if (!num_targets) num_targets = KEYS_PER_NODE; @@ -361,7 +358,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, * This upgrades the mode on an already open dm_dev, being * careful to leave things as they were if we fail to reopen the * device and not to touch the existing bdev field in case - * it is accessed concurrently inside dm_table_any_congested(). + * it is accessed concurrently. */ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) @@ -2052,38 +2049,6 @@ int dm_table_resume_targets(struct dm_table *t) return 0; } -void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) -{ - list_add(&cb->list, &t->target_callbacks); -} -EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); - -int dm_table_any_congested(struct dm_table *t, int bdi_bits) -{ - struct dm_dev_internal *dd; - struct list_head *devices = dm_table_get_devices(t); - struct dm_target_callbacks *cb; - int r = 0; - - list_for_each_entry(dd, devices, list) { - struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); - char b[BDEVNAME_SIZE]; - - if (likely(q)) - r |= bdi_congested(q->backing_dev_info, bdi_bits); - else - DMWARN_LIMIT("%s: any_congested: nonexistent device %s", - dm_device_name(t->md), - bdevname(dd->dm_dev->bdev, b)); - } - - list_for_each_entry(cb, &t->target_callbacks, list) - if (cb->congested_fn) - r |= cb->congested_fn(cb, bdi_bits); - - return r; -} - struct mapped_device *dm_table_get_md(struct dm_table *t) { return t->md; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fe2de2888709..fff4c50df74d 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -326,7 +326,6 @@ struct pool_c { struct pool *pool; struct dm_dev *data_dev; struct dm_dev *metadata_dev; - struct dm_target_callbacks callbacks; dm_block_t low_water_blocks; struct pool_features requested_pf; /* Features requested during table load */ @@ -2796,18 +2795,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) } } -static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct pool_c *pt = container_of(cb, struct pool_c, callbacks); - struct request_queue *q; - - if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) - return 1; - - q = bdev_get_queue(pt->data_dev->bdev); - return bdi_congested(q->backing_dev_info, bdi_bits); -} - static void requeue_bios(struct pool *pool) { struct thin_c *tc; @@ -3420,9 +3407,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) dm_pool_register_pre_commit_callback(pool->pmd, metadata_pre_commit_callback, pool); - pt->callbacks.congested_fn = pool_is_congested; - dm_table_add_target_callbacks(ti->table, &pt->callbacks); - mutex_unlock(&dm_thin_pool_table.mutex); return 0; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 48bfd41658aa..e2148fcb88bb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1821,31 +1821,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio) return ret; } -static int dm_any_congested(void *congested_data, int bdi_bits) -{ - int r = bdi_bits; - struct mapped_device *md = congested_data; - struct dm_table *map; - - if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { - if (dm_request_based(md)) { - /* - * With request-based DM we only need to check the - * top-level queue for congestion. - */ - struct backing_dev_info *bdi = md->queue->backing_dev_info; - r = bdi->wb.congested & bdi_bits; - } else { - map = dm_get_live_table_fast(md); - if (map) - r = dm_table_any_congested(map, bdi_bits); - dm_put_live_table_fast(md); - } - } - - return r; -} - /*----------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. *---------------------------------------------------------------*/ @@ -2284,12 +2259,6 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_get_queue_limits); -static void dm_init_congested_fn(struct mapped_device *md) -{ - md->queue->backing_dev_info->congested_data = md; - md->queue->backing_dev_info->congested_fn = dm_any_congested; -} - /* * Setup the DM device's queue based on md's type */ @@ -2306,12 +2275,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) DMERR("Cannot initialize queue for request-based dm-mq mapped device"); return r; } - dm_init_congested_fn(md); break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_NVME_BIO_BASED: - dm_init_congested_fn(md); break; case DM_TYPE_NONE: WARN_ON_ONCE(true); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index d7c4f6606b5f..4f5fe664d05a 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -63,7 +63,6 @@ void dm_table_presuspend_targets(struct dm_table *t); void dm_table_presuspend_undo_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t); int dm_table_resume_targets(struct dm_table *t); -int dm_table_any_congested(struct dm_table *t, int bdi_bits); enum dm_queue_mode dm_table_get_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); struct dm_target *dm_table_get_immutable_target(struct dm_table *t); diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 8efada3ee16f..c2ae9125c4c3 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -46,29 +46,6 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) return conf->disks + lo; } -/* - * In linear_congested() conf->raid_disks is used as a copy of - * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks - * and conf->disks[] are created in linear_conf(), they are always - * consitent with each other, but mddev->raid_disks does not. - */ -static int linear_congested(struct mddev *mddev, int bits) -{ - struct linear_conf *conf; - int i, ret = 0; - - rcu_read_lock(); - conf = rcu_dereference(mddev->private); - - for (i = 0; i < conf->raid_disks && !ret ; i++) { - struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); - ret |= bdi_congested(q->backing_dev_info, bits); - } - - rcu_read_unlock(); - return ret; -} - static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) { struct linear_conf *conf; @@ -322,7 +299,6 @@ static struct md_personality linear_personality = .hot_add_disk = linear_add, .size = linear_size, .quiesce = linear_quiesce, - .congested = linear_congested, }; static int __init linear_init (void) diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 277fdfd9ee54..776bbe542db5 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -151,28 +151,6 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev) seq_putc(seq, ']'); } -static int multipath_congested(struct mddev *mddev, int bits) -{ - struct mpconf *conf = mddev->private; - int i, ret = 0; - - rcu_read_lock(); - for (i = 0; i < mddev->raid_disks ; i++) { - struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - ret |= bdi_congested(q->backing_dev_info, bits); - /* Just like multipath_map, we just check the - * first available device - */ - break; - } - } - rcu_read_unlock(); - return ret; -} - /* * Careful, this can execute in IRQ contexts as well! */ @@ -478,7 +456,6 @@ static struct md_personality multipath_personality = .hot_add_disk = multipath_add_disk, .hot_remove_disk= multipath_remove_disk, .size = multipath_size, - .congested = multipath_congested, }; static int __init multipath_init (void) diff --git a/drivers/md/md.c b/drivers/md/md.c index 77dfe4765c31..96b28f6d025c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -549,26 +549,6 @@ void mddev_resume(struct mddev *mddev) } EXPORT_SYMBOL_GPL(mddev_resume); -int mddev_congested(struct mddev *mddev, int bits) -{ - struct md_personality *pers = mddev->pers; - int ret = 0; - - rcu_read_lock(); - if (mddev->suspended) - ret = 1; - else if (pers && pers->congested) - ret = pers->congested(mddev, bits); - rcu_read_unlock(); - return ret; -} -EXPORT_SYMBOL_GPL(mddev_congested); -static int md_congested(void *data, int bits) -{ - struct mddev *mddev = data; - return mddev_congested(mddev, bits); -} - /* * Generic flush handling for md */ @@ -5965,8 +5945,6 @@ int md_run(struct mddev *mddev) blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); else blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); - mddev->queue->backing_dev_info->congested_data = mddev; - mddev->queue->backing_dev_info->congested_fn = md_congested; } if (pers->sync_request) { if (mddev->kobj.sd && @@ -6351,7 +6329,6 @@ static int do_md_stop(struct mddev *mddev, int mode, __md_stop_writes(mddev); __md_stop(mddev); - mddev->queue->backing_dev_info->congested_fn = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent_safe(mddev->sysfs_state); diff --git a/drivers/md/md.h b/drivers/md/md.h index 612814d07d35..e2f1ad9afc48 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -597,9 +597,6 @@ struct md_personality * array. */ void *(*takeover) (struct mddev *mddev); - /* congested implements bdi.congested_fn(). - * Will not be called while array is 'suspended' */ - int (*congested)(struct mddev *mddev, int bits); /* Changes the consistency policy of an active array. */ int (*change_consistency_policy)(struct mddev *mddev, const char *buf); }; @@ -710,7 +707,6 @@ extern void md_done_sync(struct mddev *mddev, int blocks, int ok); extern void md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_finish_reshape(struct mddev *mddev); -extern int mddev_congested(struct mddev *mddev, int bits); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e9e91c8d8afc..f54a449f97aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -29,21 +29,6 @@ module_param(default_layout, int, 0644); (1L << MD_HAS_PPL) | \ (1L << MD_HAS_MULTIPLE_PPLS)) -static int raid0_congested(struct mddev *mddev, int bits) -{ - struct r0conf *conf = mddev->private; - struct md_rdev **devlist = conf->devlist; - int raid_disks = conf->strip_zone[0].nb_dev; - int i, ret = 0; - - for (i = 0; i < raid_disks && !ret ; i++) { - struct request_queue *q = bdev_get_queue(devlist[i]->bdev); - - ret |= bdi_congested(q->backing_dev_info, bits); - } - return ret; -} - /* * inform the user of the raid configuration */ @@ -818,7 +803,6 @@ static struct md_personality raid0_personality= .size = raid0_size, .takeover = raid0_takeover, .quiesce = raid0_quiesce, - .congested = raid0_congested, }; static int __init raid0_init (void) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 2aa2649cca66..960d854c07f8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -786,36 +786,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect return best_disk; } -static int raid1_congested(struct mddev *mddev, int bits) -{ - struct r1conf *conf = mddev->private; - int i, ret = 0; - - if ((bits & (1 << WB_async_congested)) && - conf->pending_count >= max_queued_requests) - return 1; - - rcu_read_lock(); - for (i = 0; i < conf->raid_disks * 2; i++) { - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - BUG_ON(!q); - - /* Note the '|| 1' - when read_balance prefers - * non-congested targets, it can be removed - */ - if ((bits & (1 << WB_async_congested)) || 1) - ret |= bdi_congested(q->backing_dev_info, bits); - else - ret &= bdi_congested(q->backing_dev_info, bits); - } - } - rcu_read_unlock(); - return ret; -} - static void flush_bio_list(struct r1conf *conf, struct bio *bio) { /* flush any pending bitmap writes to disk before proceeding w/ I/O */ @@ -3396,7 +3366,6 @@ static struct md_personality raid1_personality = .check_reshape = raid1_reshape, .quiesce = raid1_quiesce, .takeover = raid1_takeover, - .congested = raid1_congested, }; static int __init raid_init(void) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e45fd56cf584..353288bc4cb7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -848,31 +848,6 @@ static struct md_rdev *read_balance(struct r10conf *conf, return rdev; } -static int raid10_congested(struct mddev *mddev, int bits) -{ - struct r10conf *conf = mddev->private; - int i, ret = 0; - - if ((bits & (1 << WB_async_congested)) && - conf->pending_count >= max_queued_requests) - return 1; - - rcu_read_lock(); - for (i = 0; - (i < conf->geo.raid_disks || i < conf->prev.raid_disks) - && ret == 0; - i++) { - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - ret |= bdi_congested(q->backing_dev_info, bits); - } - } - rcu_read_unlock(); - return ret; -} - static void flush_pending_writes(struct r10conf *conf) { /* Any writes that have been queued but are awaiting @@ -4929,7 +4904,6 @@ static struct md_personality raid10_personality = .start_reshape = raid10_start_reshape, .finish_reshape = raid10_finish_reshape, .update_reshape_pos = raid10_update_reshape_pos, - .congested = raid10_congested, }; static int __init raid_init(void) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8dea4398b191..774ea893d47e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5099,28 +5099,6 @@ static void activate_bit_delay(struct r5conf *conf, } } -static int raid5_congested(struct mddev *mddev, int bits) -{ - struct r5conf *conf = mddev->private; - - /* No difference between reads and writes. Just check - * how busy the stripe_cache is - */ - - if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) - return 1; - - /* Also checks whether there is pressure on r5cache log space */ - if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) - return 1; - if (conf->quiesce) - return 1; - if (atomic_read(&conf->empty_inactive_list_nr)) - return 1; - - return 0; -} - static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) { struct r5conf *conf = mddev->private; @@ -8427,7 +8405,6 @@ static struct md_personality raid6_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid6_takeover, - .congested = raid5_congested, .change_consistency_policy = raid5_change_consistency_policy, }; static struct md_personality raid5_personality = @@ -8452,7 +8429,6 @@ static struct md_personality raid5_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid5_takeover, - .congested = raid5_congested, .change_consistency_policy = raid5_change_consistency_policy, }; @@ -8478,7 +8454,6 @@ static struct md_personality raid4_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid4_takeover, - .congested = raid5_congested, .change_consistency_policy = raid5_change_consistency_policy, }; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7c6f0bbb54a5..eb5f2506cede 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1616,27 +1616,6 @@ fail: return ERR_PTR(ret); } -static int btrfs_congested_fn(void *congested_data, int bdi_bits) -{ - struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; - int ret = 0; - struct btrfs_device *device; - struct backing_dev_info *bdi; - - rcu_read_lock(); - list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { - if (!device->bdev) - continue; - bdi = device->bdev->bd_bdi; - if (bdi_congested(bdi, bdi_bits)) { - ret = 1; - break; - } - } - rcu_read_unlock(); - return ret; -} - /* * called by the kthread helper functions to finally call the bio end_io * functions. This is where read checksum verification actually happens @@ -3051,8 +3030,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device goto fail_sb_buffer; } - sb->s_bdi->congested_fn = btrfs_congested_fn; - sb->s_bdi->congested_data = fs_info; sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 1cec4521e1fb..fff9367a6348 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -33,8 +33,6 @@ enum wb_congested_state { WB_sync_congested, /* The sync queue is getting full */ }; -typedef int (congested_fn)(void *, int); - enum wb_stat_item { WB_RECLAIMABLE, WB_WRITEBACK, @@ -170,8 +168,6 @@ struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ unsigned long io_pages; /* max allowed IO size */ - congested_fn *congested_fn; /* Function pointer if device is md/dm */ - void *congested_data; /* Pointer to aux data for congested func */ struct kref refcnt; /* Reference counter for the structure */ unsigned int capabilities; /* Device capabilities */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 9173d2c22b4a..0b06b2d26c9a 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -169,10 +169,6 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) { - struct backing_dev_info *bdi = wb->bdi; - - if (bdi->congested_fn) - return bdi->congested_fn(bdi->congested_data, cong_bits); return wb->congested & cong_bits; } diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8750f2dc5613..d5306d9c29c4 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -322,12 +322,6 @@ struct dm_target { bool discards_supported:1; }; -/* Each target can link one of these into the table */ -struct dm_target_callbacks { - struct list_head list; - int (*congested_fn) (struct dm_target_callbacks *, int); -}; - void *dm_per_bio_data(struct bio *bio, size_t data_size); struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); unsigned dm_bio_get_target_bio_nr(const struct bio *bio); @@ -477,11 +471,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params); -/* - * Target_ctr should call this if it needs to add any callbacks. - */ -void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); - /* * Target can use this to set the table's type. * Can only ever be called from a target's ctr. -- cgit v1.2.3 From 5abfe5cf0b8358b8ad0da99e4188c2519839d67c Mon Sep 17 00:00:00 2001 From: Rishabh Bhatnagar Date: Tue, 23 Jun 2020 19:23:27 -0700 Subject: remoteproc: qcom: Add per subsystem SSR notification Currently there is a single notification chain which is called whenever any remoteproc shuts down. This leads to all the listeners being notified, and is not an optimal design as kernel drivers might only be interested in listening to notifications from a particular remoteproc. Create a global list of remoteproc notification info data structures. This will hold the name and notifier_list information for a particular remoteproc. The API to register for notifications will use name argument to retrieve the notification info data structure and the notifier block will be added to that data structure's notification chain. Also move from blocking notifier to srcu notifer based implementation to support dynamic notifier head creation. Reviewed-by: Alex Elder Co-developed-by: Siddharth Gupta Signed-off-by: Siddharth Gupta Signed-off-by: Rishabh Bhatnagar Link: https://lore.kernel.org/r/1592965408-16908-2-git-send-email-rishabhb@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/qcom_common.c | 90 ++++++++++++++++++++++++++++++----- drivers/remoteproc/qcom_common.h | 5 +- include/linux/remoteproc/qcom_rproc.h | 20 ++++++-- 3 files changed, 95 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c index 9028cea2d81e..48b399af9c83 100644 --- a/drivers/remoteproc/qcom_common.c +++ b/drivers/remoteproc/qcom_common.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -23,7 +24,14 @@ #define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev) #define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev) -static BLOCKING_NOTIFIER_HEAD(ssr_notifiers); +struct qcom_ssr_subsystem { + const char *name; + struct srcu_notifier_head notifier_list; + struct list_head list; +}; + +static LIST_HEAD(qcom_ssr_subsystem_list); +static DEFINE_MUTEX(qcom_ssr_subsys_lock); static int glink_subdev_start(struct rproc_subdev *subdev) { @@ -189,37 +197,83 @@ void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd) } EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev); +static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name) +{ + struct qcom_ssr_subsystem *info; + + mutex_lock(&qcom_ssr_subsys_lock); + /* Match in the global qcom_ssr_subsystem_list with name */ + list_for_each_entry(info, &qcom_ssr_subsystem_list, list) + if (!strcmp(info->name, name)) + goto out; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + info = ERR_PTR(-ENOMEM); + goto out; + } + info->name = kstrdup_const(name, GFP_KERNEL); + srcu_init_notifier_head(&info->notifier_list); + + /* Add to global notification list */ + list_add_tail(&info->list, &qcom_ssr_subsystem_list); + +out: + mutex_unlock(&qcom_ssr_subsys_lock); + return info; +} + /** * qcom_register_ssr_notifier() - register SSR notification handler - * @nb: notifier_block to notify for restart notifications + * @name: Subsystem's SSR name + * @nb: notifier_block to be invoked upon subsystem's state change * - * Returns 0 on success, negative errno on failure. + * This registers the @nb notifier block as part the notifier chain for a + * remoteproc associated with @name. The notifier block's callback + * will be invoked when the remote processor's SSR events occur + * (pre/post startup and pre/post shutdown). * - * This register the @notify function as handler for restart notifications. As - * remote processors are stopped this function will be called, with the SSR - * name passed as a parameter. + * Return: a subsystem cookie on success, ERR_PTR on failure. */ -int qcom_register_ssr_notifier(struct notifier_block *nb) +void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb) { - return blocking_notifier_chain_register(&ssr_notifiers, nb); + struct qcom_ssr_subsystem *info; + + info = qcom_ssr_get_subsys(name); + if (IS_ERR(info)) + return info; + + srcu_notifier_chain_register(&info->notifier_list, nb); + + return &info->notifier_list; } EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier); /** * qcom_unregister_ssr_notifier() - unregister SSR notification handler + * @notify: subsystem cookie returned from qcom_register_ssr_notifier * @nb: notifier_block to unregister + * + * This function will unregister the notifier from the particular notifier + * chain. + * + * Return: 0 on success, %ENOENT otherwise. */ -void qcom_unregister_ssr_notifier(struct notifier_block *nb) +int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb) { - blocking_notifier_chain_unregister(&ssr_notifiers, nb); + return srcu_notifier_chain_unregister(notify, nb); } EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier); static void ssr_notify_unprepare(struct rproc_subdev *subdev) { struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; - blocking_notifier_call_chain(&ssr_notifiers, 0, (void *)ssr->name); + srcu_notifier_call_chain(&ssr->info->notifier_list, 0, &data); } /** @@ -229,12 +283,21 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev) * @ssr_name: identifier to use for notifications originating from @rproc * * As the @ssr is registered with the @rproc SSR events will be sent to all - * registered listeners in the system as the remoteproc is shut down. + * registered listeners for the remoteproc when it's SSR events occur + * (pre/post startup and pre/post shutdown). */ void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr, const char *ssr_name) { - ssr->name = ssr_name; + struct qcom_ssr_subsystem *info; + + info = qcom_ssr_get_subsys(ssr_name); + if (IS_ERR(info)) { + dev_err(&rproc->dev, "Failed to add ssr subdevice\n"); + return; + } + + ssr->info = info; ssr->subdev.unprepare = ssr_notify_unprepare; rproc_add_subdev(rproc, &ssr->subdev); @@ -249,6 +312,7 @@ EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev); void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr) { rproc_remove_subdev(rproc, &ssr->subdev); + ssr->info = NULL; } EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev); diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h index 34e5188187dc..dfc641c3a98b 100644 --- a/drivers/remoteproc/qcom_common.h +++ b/drivers/remoteproc/qcom_common.h @@ -26,10 +26,11 @@ struct qcom_rproc_subdev { struct qcom_smd_edge *edge; }; +struct qcom_ssr_subsystem; + struct qcom_rproc_ssr { struct rproc_subdev subdev; - - const char *name; + struct qcom_ssr_subsystem *info; }; void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink, diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h index fa8e38681b4b..2a1d6d0249d9 100644 --- a/include/linux/remoteproc/qcom_rproc.h +++ b/include/linux/remoteproc/qcom_rproc.h @@ -5,17 +5,27 @@ struct notifier_block; #if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) -int qcom_register_ssr_notifier(struct notifier_block *nb); -void qcom_unregister_ssr_notifier(struct notifier_block *nb); +struct qcom_ssr_notify_data { + const char *name; + bool crashed; +}; + +void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb); +int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb); #else -static inline int qcom_register_ssr_notifier(struct notifier_block *nb) +static inline void *qcom_register_ssr_notifier(const char *name, + struct notifier_block *nb) { - return 0; + return NULL; } -static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {} +static inline int qcom_unregister_ssr_notifier(void *notify, + struct notifier_block *nb) +{ + return 0; +} #endif -- cgit v1.2.3 From 62495d778439a4e47571293511a785cba754874c Mon Sep 17 00:00:00 2001 From: Rishabh Bhatnagar Date: Tue, 23 Jun 2020 19:23:28 -0700 Subject: remoteproc: qcom: Add notification types to SSR The SSR subdevice only adds callback for the unprepare event. Add callbacks for prepare, start and prepare events. The client driver for a particular remoteproc might be interested in knowing the status of the remoteproc while undergoing SSR, not just when the remoteproc has finished shutting down. Reviewed-by: Alex Elder Signed-off-by: Siddharth Gupta Signed-off-by: Rishabh Bhatnagar Link: https://lore.kernel.org/r/1592965408-16908-3-git-send-email-rishabhb@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/qcom_common.c | 44 ++++++++++++++++++++++++++++++++++- include/linux/remoteproc/qcom_rproc.h | 16 +++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c index 48b399af9c83..2f45f0c79914 100644 --- a/drivers/remoteproc/qcom_common.c +++ b/drivers/remoteproc/qcom_common.c @@ -265,6 +265,44 @@ int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb) } EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier); +static int ssr_notify_prepare(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_BEFORE_POWERUP, &data); + return 0; +} + +static int ssr_notify_start(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_AFTER_POWERUP, &data); + return 0; +} + +static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = crashed, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_BEFORE_SHUTDOWN, &data); +} + static void ssr_notify_unprepare(struct rproc_subdev *subdev) { struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); @@ -273,7 +311,8 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev) .crashed = false, }; - srcu_notifier_call_chain(&ssr->info->notifier_list, 0, &data); + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_AFTER_SHUTDOWN, &data); } /** @@ -298,6 +337,9 @@ void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr, } ssr->info = info; + ssr->subdev.prepare = ssr_notify_prepare; + ssr->subdev.start = ssr_notify_start; + ssr->subdev.stop = ssr_notify_stop; ssr->subdev.unprepare = ssr_notify_unprepare; rproc_add_subdev(rproc, &ssr->subdev); diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h index 2a1d6d0249d9..647051662174 100644 --- a/include/linux/remoteproc/qcom_rproc.h +++ b/include/linux/remoteproc/qcom_rproc.h @@ -5,6 +5,22 @@ struct notifier_block; #if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) +/** + * enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc + * processor. + * + * @QCOM_SSR_BEFORE_POWERUP: Remoteproc about to start (prepare stage) + * @QCOM_SSR_AFTER_POWERUP: Remoteproc is running (start stage) + * @QCOM_SSR_BEFORE_SHUTDOWN: Remoteproc crashed or shutting down (stop stage) + * @QCOM_SSR_AFTER_SHUTDOWN: Remoteproc is down (unprepare stage) + */ +enum qcom_ssr_notify_type { + QCOM_SSR_BEFORE_POWERUP, + QCOM_SSR_AFTER_POWERUP, + QCOM_SSR_BEFORE_SHUTDOWN, + QCOM_SSR_AFTER_SHUTDOWN, +}; + struct qcom_ssr_notify_data { const char *name; bool crashed; -- cgit v1.2.3 From 4fbfbdb5726ff15bdce1c371efa1281b28322f64 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 8 Jul 2020 14:49:56 +0200 Subject: USB: serial: inline sysrq dummy function Inline the dummy sysrq character handling when either console support or magic-sysrq support isn't enabled to allow the compiler to eliminate unused code. Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold --- drivers/usb/serial/generic.c | 9 ++------- include/linux/usb/serial.h | 9 +++++++++ 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index c5b35252c931..a9b6d103aaf6 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -571,7 +571,7 @@ int usb_serial_generic_get_icount(struct tty_struct *tty, } EXPORT_SYMBOL_GPL(usb_serial_generic_get_icount); -#ifdef CONFIG_MAGIC_SYSRQ +#if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) { if (port->sysrq) { @@ -584,13 +584,8 @@ int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) } return 0; } -#else -int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) -{ - return 0; -} -#endif EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char); +#endif int usb_serial_handle_break(struct usb_serial_port *port) { diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 14cac4a1ae8f..be73646706a9 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -365,8 +365,17 @@ extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, extern void usb_serial_generic_process_read_urb(struct urb *urb); extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size); + +#if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch); +#else +static inline int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) +{ + return 0; +} +#endif + extern int usb_serial_handle_break(struct usb_serial_port *port); extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, struct tty_struct *tty, -- cgit v1.2.3 From 4b5cf2b8f90faf32bbb735b545510edefce094be Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 8 Jul 2020 14:49:57 +0200 Subject: USB: serial: add sysrq break-handler dummy Add inline sysrq break-handler dummy to allow the compiler to eliminate further code when either console or sysrq support isn't enabled and to clearly mark the two sysrq functions as belonging together. Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold --- drivers/usb/serial/generic.c | 4 ++-- include/linux/usb/serial.h | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index a9b6d103aaf6..e60f74f11acc 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -585,11 +585,10 @@ int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) return 0; } EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char); -#endif int usb_serial_handle_break(struct usb_serial_port *port) { - if (!port->port.console || !IS_ENABLED(CONFIG_MAGIC_SYSRQ)) + if (!port->port.console) return 0; if (!port->sysrq) { @@ -600,6 +599,7 @@ int usb_serial_handle_break(struct usb_serial_port *port) return 0; } EXPORT_SYMBOL_GPL(usb_serial_handle_break); +#endif /** * usb_serial_handle_dcd_change - handle a change of carrier detect state diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index be73646706a9..c4ed4404335e 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -369,14 +369,18 @@ extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, #if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch); +extern int usb_serial_handle_break(struct usb_serial_port *port); #else static inline int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) { return 0; } +static inline int usb_serial_handle_break(struct usb_serial_port *port) +{ + return 0; +} #endif -extern int usb_serial_handle_break(struct usb_serial_port *port); extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, struct tty_struct *tty, unsigned int status); -- cgit v1.2.3 From 1cafb03d5d88631c218a072e4116ac92e9782dd0 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 8 Jul 2020 14:49:58 +0200 Subject: USB: serial: drop unnecessary sysrq include There's no need to include sysrq.h in the subsystem header. Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold --- include/linux/usb/serial.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index c4ed4404335e..4becca7ae264 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -17,7 +17,6 @@ #include #include #include -#include #include /* The maximum number of ports one device can grab at once */ -- cgit v1.2.3 From 7aab96d6e3c54128f9e335fa8b11a0bd8815e118 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 8 Jul 2020 14:49:59 +0200 Subject: USB: serial: drop extern keyword from function declarations Drop the redundant extern keyword from function declarations in the subsystem header file to improve readability (and make it easier to spot the global variables). Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold --- include/linux/usb/serial.h | 81 +++++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 4becca7ae264..6d756d03f46f 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -315,19 +315,19 @@ struct usb_serial_driver { #define to_usb_serial_driver(d) \ container_of(d, struct usb_serial_driver, driver) -extern int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], +int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], const char *name, const struct usb_device_id *id_table); -extern void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]); -extern void usb_serial_port_softint(struct usb_serial_port *port); +void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]); +void usb_serial_port_softint(struct usb_serial_port *port); -extern int usb_serial_suspend(struct usb_interface *intf, pm_message_t message); -extern int usb_serial_resume(struct usb_interface *intf); +int usb_serial_suspend(struct usb_interface *intf, pm_message_t message); +int usb_serial_resume(struct usb_interface *intf); /* USB Serial console functions */ #ifdef CONFIG_USB_SERIAL_CONSOLE -extern void usb_serial_console_init(int minor); -extern void usb_serial_console_exit(void); -extern void usb_serial_console_disconnect(struct usb_serial *serial); +void usb_serial_console_init(int minor); +void usb_serial_console_exit(void); +void usb_serial_console_disconnect(struct usb_serial *serial); #else static inline void usb_serial_console_init(int minor) { } static inline void usb_serial_console_exit(void) { } @@ -335,40 +335,32 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {} #endif /* Functions needed by other parts of the usbserial core */ -extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor); -extern void usb_serial_put(struct usb_serial *serial); -extern int usb_serial_generic_open(struct tty_struct *tty, - struct usb_serial_port *port); -extern int usb_serial_generic_write_start(struct usb_serial_port *port, - gfp_t mem_flags); -extern int usb_serial_generic_write(struct tty_struct *tty, - struct usb_serial_port *port, const unsigned char *buf, int count); -extern void usb_serial_generic_close(struct usb_serial_port *port); -extern int usb_serial_generic_resume(struct usb_serial *serial); -extern int usb_serial_generic_write_room(struct tty_struct *tty); -extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); -extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty, - long timeout); -extern void usb_serial_generic_read_bulk_callback(struct urb *urb); -extern void usb_serial_generic_write_bulk_callback(struct urb *urb); -extern void usb_serial_generic_throttle(struct tty_struct *tty); -extern void usb_serial_generic_unthrottle(struct tty_struct *tty); -extern int usb_serial_generic_tiocmiwait(struct tty_struct *tty, - unsigned long arg); -extern int usb_serial_generic_get_icount(struct tty_struct *tty, - struct serial_icounter_struct *icount); -extern int usb_serial_generic_register(void); -extern void usb_serial_generic_deregister(void); -extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, - gfp_t mem_flags); -extern void usb_serial_generic_process_read_urb(struct urb *urb); -extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, - void *dest, size_t size); +struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor); +void usb_serial_put(struct usb_serial *serial); +int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port); +int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags); +int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, + const unsigned char *buf, int count); +void usb_serial_generic_close(struct usb_serial_port *port); +int usb_serial_generic_resume(struct usb_serial *serial); +int usb_serial_generic_write_room(struct tty_struct *tty); +int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); +void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout); +void usb_serial_generic_read_bulk_callback(struct urb *urb); +void usb_serial_generic_write_bulk_callback(struct urb *urb); +void usb_serial_generic_throttle(struct tty_struct *tty); +void usb_serial_generic_unthrottle(struct tty_struct *tty); +int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg); +int usb_serial_generic_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); +int usb_serial_generic_register(void); +void usb_serial_generic_deregister(void); +int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, gfp_t mem_flags); +void usb_serial_generic_process_read_urb(struct urb *urb); +int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size); #if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) -extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port, - unsigned int ch); -extern int usb_serial_handle_break(struct usb_serial_port *port); +int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch); +int usb_serial_handle_break(struct usb_serial_port *port); #else static inline int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) { @@ -380,13 +372,12 @@ static inline int usb_serial_handle_break(struct usb_serial_port *port) } #endif -extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, - struct tty_struct *tty, - unsigned int status); +void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, + struct tty_struct *tty, unsigned int status); -extern int usb_serial_bus_register(struct usb_serial_driver *device); -extern void usb_serial_bus_deregister(struct usb_serial_driver *device); +int usb_serial_bus_register(struct usb_serial_driver *device); +void usb_serial_bus_deregister(struct usb_serial_driver *device); extern struct bus_type usb_serial_bus_type; extern struct tty_driver *usb_serial_tty_driver; -- cgit v1.2.3 From f7f611f2b1dc69547d425de0daeac548add2c761 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 8 Jul 2020 09:11:49 +0200 Subject: ARM: s3c24xx: leds: Convert to use GPIO descriptors This converts the s3c24xx LED driver to use GPIO descriptors and also modify all board files to account for these changes by registering the appropriate GPIO tables for each board. The driver was using a custom flag to indicate open drain (tristate) but this can be handled by standard descriptor machine tables. The driver was setting non-pull-up for the pin using the custom S3C24xx GPIO API, but this is a custom pin control system used by the S3C24xx and no generic GPIO function, so this has simply been pushed back into the respective board files. Signed-off-by: Linus Walleij Acked-by: Jacek Anaszewski Signed-off-by: Krzysztof Kozlowski --- arch/arm/mach-s3c24xx/common-smdk.c | 67 ++++++++++++++++++++---------- arch/arm/mach-s3c24xx/mach-mini2440.c | 63 ++++++++++++++++++++++++---- arch/arm/mach-s3c24xx/mach-n30.c | 54 +++++++++++++++++++++--- arch/arm/mach-s3c24xx/mach-qt2410.c | 12 +++++- arch/arm/mach-s3c24xx/mach-vr1000.c | 38 +++++++++++++++-- drivers/leds/leds-s3c24xx.c | 36 ++++------------ include/linux/platform_data/leds-s3c24xx.h | 6 --- 7 files changed, 199 insertions(+), 77 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-s3c24xx/common-smdk.c b/arch/arm/mach-s3c24xx/common-smdk.c index 58e30cad386c..75064dfaceb1 100644 --- a/arch/arm/mach-s3c24xx/common-smdk.c +++ b/arch/arm/mach-s3c24xx/common-smdk.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -44,29 +45,53 @@ /* LED devices */ +static struct gpiod_lookup_table smdk_led4_gpio_table = { + .dev_id = "s3c24xx_led.0", + .table = { + GPIO_LOOKUP("GPF", 4, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + +static struct gpiod_lookup_table smdk_led5_gpio_table = { + .dev_id = "s3c24xx_led.1", + .table = { + GPIO_LOOKUP("GPF", 5, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + +static struct gpiod_lookup_table smdk_led6_gpio_table = { + .dev_id = "s3c24xx_led.2", + .table = { + GPIO_LOOKUP("GPF", 6, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + +static struct gpiod_lookup_table smdk_led7_gpio_table = { + .dev_id = "s3c24xx_led.3", + .table = { + GPIO_LOOKUP("GPF", 7, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + static struct s3c24xx_led_platdata smdk_pdata_led4 = { - .gpio = S3C2410_GPF(4), - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, .name = "led4", .def_trigger = "timer", }; static struct s3c24xx_led_platdata smdk_pdata_led5 = { - .gpio = S3C2410_GPF(5), - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, .name = "led5", .def_trigger = "nand-disk", }; static struct s3c24xx_led_platdata smdk_pdata_led6 = { - .gpio = S3C2410_GPF(6), - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, .name = "led6", }; static struct s3c24xx_led_platdata smdk_pdata_led7 = { - .gpio = S3C2410_GPF(7), - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, .name = "led7", }; @@ -179,27 +204,25 @@ static struct platform_device __initdata *smdk_devs[] = { &smdk_led7, }; -static const struct gpio smdk_led_gpios[] = { - { S3C2410_GPF(4), GPIOF_OUT_INIT_HIGH, NULL }, - { S3C2410_GPF(5), GPIOF_OUT_INIT_HIGH, NULL }, - { S3C2410_GPF(6), GPIOF_OUT_INIT_HIGH, NULL }, - { S3C2410_GPF(7), GPIOF_OUT_INIT_HIGH, NULL }, -}; - void __init smdk_machine_init(void) { - /* Configure the LEDs (even if we have no LED support)*/ - - int ret = gpio_request_array(smdk_led_gpios, - ARRAY_SIZE(smdk_led_gpios)); - if (!WARN_ON(ret < 0)) - gpio_free_array(smdk_led_gpios, ARRAY_SIZE(smdk_led_gpios)); - if (machine_is_smdk2443()) smdk_nand_info.twrph0 = 50; s3c_nand_set_platdata(&smdk_nand_info); + /* Disable pull-up on the LED lines */ + s3c_gpio_setpull(S3C2410_GPF(4), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPF(5), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPF(6), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPF(7), S3C_GPIO_PULL_NONE); + + /* Add lookups for the lines */ + gpiod_add_lookup_table(&smdk_led4_gpio_table); + gpiod_add_lookup_table(&smdk_led5_gpio_table); + gpiod_add_lookup_table(&smdk_led6_gpio_table); + gpiod_add_lookup_table(&smdk_led7_gpio_table); + platform_add_devices(smdk_devs, ARRAY_SIZE(smdk_devs)); s3c_pm_init(); diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c index 9035f868fb34..aa0c33109865 100644 --- a/arch/arm/mach-s3c24xx/mach-mini2440.c +++ b/arch/arm/mach-s3c24xx/mach-mini2440.c @@ -402,37 +402,68 @@ static struct platform_device mini2440_button_device = { /* LEDS */ +static struct gpiod_lookup_table mini2440_led1_gpio_table = { + .dev_id = "s3c24xx_led.1", + .table = { + GPIO_LOOKUP("GPB", 5, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + +static struct gpiod_lookup_table mini2440_led2_gpio_table = { + .dev_id = "s3c24xx_led.2", + .table = { + GPIO_LOOKUP("GPB", 6, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + +static struct gpiod_lookup_table mini2440_led3_gpio_table = { + .dev_id = "s3c24xx_led.3", + .table = { + GPIO_LOOKUP("GPB", 7, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + +static struct gpiod_lookup_table mini2440_led4_gpio_table = { + .dev_id = "s3c24xx_led.4", + .table = { + GPIO_LOOKUP("GPB", 8, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + +static struct gpiod_lookup_table mini2440_backlight_gpio_table = { + .dev_id = "s3c24xx_led.5", + .table = { + GPIO_LOOKUP("GPG", 4, NULL, GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct s3c24xx_led_platdata mini2440_led1_pdata = { .name = "led1", - .gpio = S3C2410_GPB(5), - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, .def_trigger = "heartbeat", }; static struct s3c24xx_led_platdata mini2440_led2_pdata = { .name = "led2", - .gpio = S3C2410_GPB(6), - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, .def_trigger = "nand-disk", }; static struct s3c24xx_led_platdata mini2440_led3_pdata = { .name = "led3", - .gpio = S3C2410_GPB(7), - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, .def_trigger = "mmc0", }; static struct s3c24xx_led_platdata mini2440_led4_pdata = { .name = "led4", - .gpio = S3C2410_GPB(8), - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, .def_trigger = "", }; static struct s3c24xx_led_platdata mini2440_led_backlight_pdata = { .name = "backlight", - .gpio = S3C2410_GPG(4), .def_trigger = "backlight", }; @@ -714,6 +745,20 @@ static void __init mini2440_init(void) i2c_register_board_info(0, mini2440_i2c_devs, ARRAY_SIZE(mini2440_i2c_devs)); + /* Disable pull-up on the LED lines */ + s3c_gpio_setpull(S3C2410_GPB(5), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPB(6), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPB(7), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPB(8), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPG(4), S3C_GPIO_PULL_NONE); + + /* Add lookups for the lines */ + gpiod_add_lookup_table(&mini2440_led1_gpio_table); + gpiod_add_lookup_table(&mini2440_led2_gpio_table); + gpiod_add_lookup_table(&mini2440_led3_gpio_table); + gpiod_add_lookup_table(&mini2440_led4_gpio_table); + gpiod_add_lookup_table(&mini2440_backlight_gpio_table); + platform_add_devices(mini2440_devices, ARRAY_SIZE(mini2440_devices)); if (features.count) /* the optional features */ diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c index d856f23939af..58a64f6d5fd0 100644 --- a/arch/arm/mach-s3c24xx/mach-n30.c +++ b/arch/arm/mach-s3c24xx/mach-n30.c @@ -45,6 +45,7 @@ #include #include +#include #include #include #include @@ -246,17 +247,33 @@ static struct platform_device n35_button_device = { }; /* This is the bluetooth LED on the device. */ + +static struct gpiod_lookup_table n30_blue_led_gpio_table = { + .dev_id = "s3c24xx_led.1", + .table = { + GPIO_LOOKUP("GPG", 6, NULL, GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct s3c24xx_led_platdata n30_blue_led_pdata = { .name = "blue_led", - .gpio = S3C2410_GPG(6), .def_trigger = "", }; /* This is the blue LED on the device. Originally used to indicate GPS activity * by flashing. */ + +static struct gpiod_lookup_table n35_blue_led_gpio_table = { + .dev_id = "s3c24xx_led.1", + .table = { + GPIO_LOOKUP("GPD", 8, NULL, GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct s3c24xx_led_platdata n35_blue_led_pdata = { .name = "blue_led", - .gpio = S3C2410_GPD(8), .def_trigger = "", }; @@ -264,17 +281,30 @@ static struct s3c24xx_led_platdata n35_blue_led_pdata = { * red, blinking green or solid green when the battery is low, * charging or full respectively. By driving GPD9 low, it's possible * to force the LED to blink red, so call that warning LED. */ + +static struct gpiod_lookup_table n30_warning_led_gpio_table = { + .dev_id = "s3c24xx_led.2", + .table = { + GPIO_LOOKUP("GPD", 9, NULL, GPIO_ACTIVE_LOW), + { }, + }, +}; + static struct s3c24xx_led_platdata n30_warning_led_pdata = { .name = "warning_led", - .flags = S3C24XX_LEDF_ACTLOW, - .gpio = S3C2410_GPD(9), .def_trigger = "", }; +static struct gpiod_lookup_table n35_warning_led_gpio_table = { + .dev_id = "s3c24xx_led.2", + .table = { + GPIO_LOOKUP("GPD", 9, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + static struct s3c24xx_led_platdata n35_warning_led_pdata = { .name = "warning_led", - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, - .gpio = S3C2410_GPD(9), .def_trigger = "", }; @@ -577,6 +607,12 @@ static void __init n30_init(void) S3C2410_MISCCR_USBSUSPND0 | S3C2410_MISCCR_USBSUSPND1, 0x0); + /* Disable pull-up and add GPIO tables */ + s3c_gpio_setpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE); + gpiod_add_lookup_table(&n30_blue_led_gpio_table); + gpiod_add_lookup_table(&n30_warning_led_gpio_table); + platform_add_devices(n30_devices, ARRAY_SIZE(n30_devices)); } @@ -594,6 +630,12 @@ static void __init n30_init(void) S3C2410_MISCCR_USBSUSPND1, S3C2410_MISCCR_USBSUSPND0); + /* Disable pull-up and add GPIO tables */ + s3c_gpio_setpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE); + gpiod_add_lookup_table(&n35_blue_led_gpio_table); + gpiod_add_lookup_table(&n35_warning_led_gpio_table); + platform_add_devices(n35_devices, ARRAY_SIZE(n35_devices)); } } diff --git a/arch/arm/mach-s3c24xx/mach-qt2410.c b/arch/arm/mach-s3c24xx/mach-qt2410.c index 5d48e5b6e738..ff9e3197309b 100644 --- a/arch/arm/mach-s3c24xx/mach-qt2410.c +++ b/arch/arm/mach-s3c24xx/mach-qt2410.c @@ -177,9 +177,15 @@ static struct platform_device qt2410_cs89x0 = { /* LED */ +static struct gpiod_lookup_table qt2410_led_gpio_table = { + .dev_id = "s3c24xx_led.0", + .table = { + GPIO_LOOKUP("GPB", 0, NULL, GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN), + { }, + }, +}; + static struct s3c24xx_led_platdata qt2410_pdata_led = { - .gpio = S3C2410_GPB(0), - .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE, .name = "led", .def_trigger = "timer", }; @@ -338,6 +344,8 @@ static void __init qt2410_machine_init(void) s3c_i2c0_set_platdata(NULL); gpiod_add_lookup_table(&qt2410_spi_gpiod_table); + s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE); + gpiod_add_lookup_table(&qt2410_led_gpio_table); platform_add_devices(qt2410_devices, ARRAY_SIZE(qt2410_devices)); s3c_pm_init(); } diff --git a/arch/arm/mach-s3c24xx/mach-vr1000.c b/arch/arm/mach-s3c24xx/mach-vr1000.c index 853e74f9b8b5..6a3fb2becc7c 100644 --- a/arch/arm/mach-s3c24xx/mach-vr1000.c +++ b/arch/arm/mach-s3c24xx/mach-vr1000.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -40,6 +41,7 @@ #include #include +#include #include #include "bast.h" @@ -223,21 +225,42 @@ static struct platform_device vr1000_dm9k1 = { /* LEDS */ +static struct gpiod_lookup_table vr1000_led1_gpio_table = { + .dev_id = "s3c24xx_led.1", + .table = { + GPIO_LOOKUP("GPB", 0, NULL, GPIO_ACTIVE_HIGH), + { }, + }, +}; + +static struct gpiod_lookup_table vr1000_led2_gpio_table = { + .dev_id = "s3c24xx_led.2", + .table = { + GPIO_LOOKUP("GPB", 1, NULL, GPIO_ACTIVE_HIGH), + { }, + }, +}; + +static struct gpiod_lookup_table vr1000_led3_gpio_table = { + .dev_id = "s3c24xx_led.3", + .table = { + GPIO_LOOKUP("GPB", 2, NULL, GPIO_ACTIVE_HIGH), + { }, + }, +}; + static struct s3c24xx_led_platdata vr1000_led1_pdata = { .name = "led1", - .gpio = S3C2410_GPB(0), .def_trigger = "", }; static struct s3c24xx_led_platdata vr1000_led2_pdata = { .name = "led2", - .gpio = S3C2410_GPB(1), .def_trigger = "", }; static struct s3c24xx_led_platdata vr1000_led3_pdata = { .name = "led3", - .gpio = S3C2410_GPB(2), .def_trigger = "", }; @@ -317,6 +340,15 @@ static void __init vr1000_init_time(void) static void __init vr1000_init(void) { s3c_i2c0_set_platdata(NULL); + + /* Disable pull-up on LED lines and register GPIO lookups */ + s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPB(1), S3C_GPIO_PULL_NONE); + s3c_gpio_setpull(S3C2410_GPB(2), S3C_GPIO_PULL_NONE); + gpiod_add_lookup_table(&vr1000_led1_gpio_table); + gpiod_add_lookup_table(&vr1000_led2_gpio_table); + gpiod_add_lookup_table(&vr1000_led3_gpio_table); + platform_add_devices(vr1000_devices, ARRAY_SIZE(vr1000_devices)); i2c_register_board_info(0, vr1000_i2c_devs, diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c index f8b8d6e313ee..9b5e67664ba3 100644 --- a/drivers/leds/leds-s3c24xx.c +++ b/drivers/leds/leds-s3c24xx.c @@ -11,19 +11,19 @@ #include #include #include -#include +#include #include #include #include #include -#include /* our context */ struct s3c24xx_gpio_led { struct led_classdev cdev; struct s3c24xx_led_platdata *pdata; + struct gpio_desc *gpiod; }; static inline struct s3c24xx_gpio_led *to_gpio(struct led_classdev *led_cdev) @@ -35,20 +35,8 @@ static void s3c24xx_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct s3c24xx_gpio_led *led = to_gpio(led_cdev); - struct s3c24xx_led_platdata *pd = led->pdata; - int state = (value ? 1 : 0) ^ (pd->flags & S3C24XX_LEDF_ACTLOW); - /* there will be a short delay between setting the output and - * going from output to input when using tristate. */ - - gpio_set_value(pd->gpio, state); - - if (pd->flags & S3C24XX_LEDF_TRISTATE) { - if (value) - gpio_direction_output(pd->gpio, state); - else - gpio_direction_input(pd->gpio); - } + gpiod_set_value(led->gpiod, !!value); } static int s3c24xx_led_probe(struct platform_device *dev) @@ -69,22 +57,12 @@ static int s3c24xx_led_probe(struct platform_device *dev) led->pdata = pdata; - ret = devm_gpio_request(&dev->dev, pdata->gpio, "S3C24XX_LED"); - if (ret < 0) - return ret; - - /* no point in having a pull-up if we are always driving */ - - s3c_gpio_setpull(pdata->gpio, S3C_GPIO_PULL_NONE); - - if (pdata->flags & S3C24XX_LEDF_TRISTATE) - gpio_direction_input(pdata->gpio); - else - gpio_direction_output(pdata->gpio, - pdata->flags & S3C24XX_LEDF_ACTLOW ? 1 : 0); + /* Default to off */ + led->gpiod = devm_gpiod_get(&dev->dev, NULL, GPIOD_OUT_LOW); + if (IS_ERR(led->gpiod)) + return PTR_ERR(led->gpiod); /* register our new led device */ - ret = devm_led_classdev_register(&dev->dev, &led->cdev); if (ret < 0) dev_err(&dev->dev, "led_classdev_register failed\n"); diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h index 5bbae85811e2..64f8d14876e0 100644 --- a/include/linux/platform_data/leds-s3c24xx.h +++ b/include/linux/platform_data/leds-s3c24xx.h @@ -10,13 +10,7 @@ #ifndef __LEDS_S3C24XX_H #define __LEDS_S3C24XX_H -#define S3C24XX_LEDF_ACTLOW (1<<0) /* LED is on when GPIO low */ -#define S3C24XX_LEDF_TRISTATE (1<<1) /* tristate to turn off */ - struct s3c24xx_led_platdata { - unsigned int gpio; - unsigned int flags; - char *name; char *def_trigger; }; -- cgit v1.2.3 From 7bc13b5b60e9412a7ddef300ce2c661eecd1fd5d Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sun, 5 Jul 2020 21:18:58 +1200 Subject: crypto: api - permit users to specify numa node of acomp hardware For a Linux server with NUMA, there are possibly multiple (de)compressors which are either local or remote to some NUMA node. Some drivers will automatically use the (de)compressor near the CPU calling acomp_alloc(). However, it is not necessarily correct because users who send acomp_req could be from different NUMA node with the CPU which allocates acomp. Just like kernel has kmalloc() and kmalloc_node(), here crypto can have same support. Cc: Seth Jennings Cc: Dan Streetman Cc: Vitaly Wool Cc: Andrew Morton Cc: Jonathan Cameron Signed-off-by: Barry Song Signed-off-by: Herbert Xu --- crypto/acompress.c | 8 ++++++++ crypto/api.c | 24 +++++++++++++++--------- crypto/internal.h | 23 +++++++++++++++++++---- include/crypto/acompress.h | 18 ++++++++++++++++++ include/linux/crypto.h | 2 ++ 5 files changed, 62 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/crypto/acompress.c b/crypto/acompress.c index 84a76723e851..c32c72048a1c 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_acomp); +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node) +{ + return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask, + node); +} +EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); + struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) { struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); diff --git a/crypto/api.c b/crypto/api.c index edcf690800d4..5d8fe60b36c1 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -433,8 +433,9 @@ err: } EXPORT_SYMBOL_GPL(crypto_alloc_base); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend) +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, + int node) { char *mem; struct crypto_tfm *tfm = NULL; @@ -445,12 +446,13 @@ void *crypto_create_tfm(struct crypto_alg *alg, tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); - mem = kzalloc(total, GFP_KERNEL); + mem = kzalloc_node(total, GFP_KERNEL, node); if (mem == NULL) goto out_err; tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; + tfm->node = node; err = frontend->init_tfm(tfm); if (err) @@ -472,7 +474,7 @@ out_err: out: return mem; } -EXPORT_SYMBOL_GPL(crypto_create_tfm); +EXPORT_SYMBOL_GPL(crypto_create_tfm_node); struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, @@ -490,11 +492,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name, EXPORT_SYMBOL_GPL(crypto_find_alg); /* - * crypto_alloc_tfm - Locate algorithm and allocate transform + * crypto_alloc_tfm_node - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @frontend: Frontend algorithm type * @type: Type of algorithm * @mask: Mask for type comparison + * @node: NUMA node in which users desire to put requests, if node is + * NUMA_NO_NODE, it means users have no special requirement. * * crypto_alloc_tfm() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable @@ -509,8 +513,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg); * * In case of error the return value is an error pointer. */ -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask) + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node) { void *tfm; int err; @@ -524,7 +530,7 @@ void *crypto_alloc_tfm(const char *alg_name, goto err; } - tfm = crypto_create_tfm(alg, frontend); + tfm = crypto_create_tfm_node(alg, frontend, node); if (!IS_ERR(tfm)) return tfm; @@ -542,7 +548,7 @@ err: return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(crypto_alloc_tfm); +EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node); /* * crypto_destroy_tfm - Free crypto transform diff --git a/crypto/internal.h b/crypto/internal.h index ff06a3bd1ca1..1b92a5a61852 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -68,13 +68,28 @@ void crypto_remove_final(struct list_head *list); void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend); +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, int node); + +static inline void *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend) +{ + return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE); +} + struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask); -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask); + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node); + +static inline void *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask) +{ + return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE); +} int crypto_probing_notify(unsigned long val, void *v); diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 2b4d2b06ccbd..fcde59c65a81 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -106,6 +106,24 @@ struct acomp_alg { */ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, u32 mask); +/** + * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * compression algorithm e.g. "deflate" + * @type: specifies the type of the algorithm + * @mask: specifies the mask for the algorithm + * @node: specifies the NUMA node the ZIP hardware belongs to + * + * Allocate a handle for a compression algorithm. Drivers should try to use + * (de)compressors on the specified NUMA node. + * The returned struct crypto_acomp is the handle that is required for any + * subsequent API invocation for the compression operations. + * + * Return: allocated handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node); static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index bc5d2d4bfc3d..7cd2d00f0a05 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -594,6 +594,8 @@ int crypto_has_alg(const char *name, u32 type, u32 mask); struct crypto_tfm { u32 crt_flags; + + int node; void (*exit)(struct crypto_tfm *tfm); -- cgit v1.2.3 From 1475ee0ac9a16dd5df23ca8abe1039eb6086eb66 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:29 +0800 Subject: xfrm: add is_ipip to struct xfrm_input_afinfo This patch is to add a new member is_ipip to struct xfrm_input_afinfo, to allow another group family of callback functions to be registered with is_ipip set. This will be used for doing a callback for struct xfrm(6)_tunnel of ipip/ipv6 tunnels in xfrm_input() by calling xfrm_rcv_cb(), which is needed by ipip/ipv6 tunnels' support in ip(6)_vti and xfrm interface in the next patches. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 3 ++- net/xfrm/xfrm_input.c | 24 +++++++++++++----------- 2 files changed, 15 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index e20b2b27ec48..4666bc9e59ab 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -373,7 +373,8 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family); struct xfrm_input_afinfo { - unsigned int family; + u8 family; + bool is_ipip; int (*callback)(struct sk_buff *skb, u8 protocol, int err); }; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index bd984ff17c2d..37456d022cfa 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -42,7 +42,7 @@ struct xfrm_trans_cb { #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0])) static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); -static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1]; +static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1]; static struct gro_cells gro_cells; static struct net_device xfrm_napi_dev; @@ -53,14 +53,14 @@ int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) { int err = 0; - if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo))) + if (WARN_ON(afinfo->family > AF_INET6)) return -EAFNOSUPPORT; spin_lock_bh(&xfrm_input_afinfo_lock); - if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL)) + if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) err = -EEXIST; else - rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo); + rcu_assign_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], afinfo); spin_unlock_bh(&xfrm_input_afinfo_lock); return err; } @@ -71,11 +71,11 @@ int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) int err = 0; spin_lock_bh(&xfrm_input_afinfo_lock); - if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) { - if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo)) + if (likely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) { + if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family] != afinfo)) err = -EINVAL; else - RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL); + RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], NULL); } spin_unlock_bh(&xfrm_input_afinfo_lock); synchronize_rcu(); @@ -83,15 +83,15 @@ int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_input_unregister_afinfo); -static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family) +static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(u8 family, bool is_ipip) { const struct xfrm_input_afinfo *afinfo; - if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo))) + if (WARN_ON_ONCE(family > AF_INET6)) return NULL; rcu_read_lock(); - afinfo = rcu_dereference(xfrm_input_afinfo[family]); + afinfo = rcu_dereference(xfrm_input_afinfo[is_ipip][family]); if (unlikely(!afinfo)) rcu_read_unlock(); return afinfo; @@ -100,9 +100,11 @@ static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, int err) { + bool is_ipip = (protocol == IPPROTO_IPIP || protocol == IPPROTO_IPV6); + const struct xfrm_input_afinfo *afinfo; int ret; - const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family); + afinfo = xfrm_input_get_afinfo(family, is_ipip); if (!afinfo) return -EAFNOSUPPORT; -- cgit v1.2.3 From 6df2db5d37ba3df8c80d90c15f1e20480be43f75 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:30 +0800 Subject: tunnel4: add cb_handler to struct xfrm_tunnel This patch is to register a callback function tunnel4_rcv_cb with is_ipip set in a xfrm_input_afinfo object for tunnel4 and tunnel64. It will be called by xfrm_rcv_cb() from xfrm_input() when family is AF_INET and proto is IPPROTO_IPIP or IPPROTO_IPV6. v1->v2: - Fix a sparse warning caused by the missing "__rcu", as Jakub noticed. - Handle the err returned by xfrm_input_register_afinfo() in tunnel4_init/fini(), as Sabrina noticed. v2->v3: - Add "#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)" to fix the build error when xfrm is disabled, reported by kbuild test robot. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 + net/ipv4/tunnel4.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 4666bc9e59ab..c1ec6294d773 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1416,6 +1416,7 @@ struct xfrm6_protocol { /* XFRM tunnel handlers. */ struct xfrm_tunnel { int (*handler)(struct sk_buff *skb); + int (*cb_handler)(struct sk_buff *skb, int err); int (*err_handler)(struct sk_buff *skb, u32 info); struct xfrm_tunnel __rcu *next; diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c index c4b2ccbeba04..e44aaf41a138 100644 --- a/net/ipv4/tunnel4.c +++ b/net/ipv4/tunnel4.c @@ -110,6 +110,33 @@ drop: return 0; } +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +static int tunnel4_rcv_cb(struct sk_buff *skb, u8 proto, int err) +{ + struct xfrm_tunnel __rcu *head; + struct xfrm_tunnel *handler; + int ret; + + head = (proto == IPPROTO_IPIP) ? tunnel4_handlers : tunnel64_handlers; + + for_each_tunnel_rcu(head, handler) { + if (handler->cb_handler) { + ret = handler->cb_handler(skb, err); + if (ret <= 0) + return ret; + } + } + + return 0; +} + +static const struct xfrm_input_afinfo tunnel4_input_afinfo = { + .family = AF_INET, + .is_ipip = true, + .callback = tunnel4_rcv_cb, +}; +#endif + #if IS_ENABLED(CONFIG_IPV6) static int tunnel64_rcv(struct sk_buff *skb) { @@ -230,6 +257,18 @@ static int __init tunnel4_init(void) #endif goto err; } +#endif +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) + if (xfrm_input_register_afinfo(&tunnel4_input_afinfo)) { + inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); +#if IS_ENABLED(CONFIG_IPV6) + inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6); +#endif +#if IS_ENABLED(CONFIG_MPLS) + inet_del_protocol(&tunnelmpls4_protocol, IPPROTO_MPLS); +#endif + goto err; + } #endif return 0; @@ -240,6 +279,10 @@ err: static void __exit tunnel4_fini(void) { +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) + if (xfrm_input_unregister_afinfo(&tunnel4_input_afinfo)) + pr_err("tunnel4 close: can't remove input afinfo\n"); +#endif #if IS_ENABLED(CONFIG_MPLS) if (inet_del_protocol(&tunnelmpls4_protocol, IPPROTO_MPLS)) pr_err("tunnelmpls4 close: can't remove protocol\n"); -- cgit v1.2.3 From 86afc7031826147407e96412668d343e0f1bd6fd Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:31 +0800 Subject: tunnel6: add tunnel6_input_afinfo for ipip and ipv6 tunnels This patch is to register a callback function tunnel6_rcv_cb with is_ipip set in a xfrm_input_afinfo object for tunnel6 and tunnel46. It will be called by xfrm_rcv_cb() from xfrm_input() when family is AF_INET6 and proto is IPPROTO_IPIP or IPPROTO_IPV6. v1->v2: - Fix a sparse warning caused by the missing "__rcu", as Jakub noticed. - Handle the err returned by xfrm_input_register_afinfo() in tunnel6_init/fini(), as Sabrina noticed. v2->v3: - Add "#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL)" to fix the build error when xfrm is disabled, reported by kbuild test robot Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 + net/ipv6/tunnel6.c | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c1ec6294d773..83a532dda1bd 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1425,6 +1425,7 @@ struct xfrm_tunnel { struct xfrm6_tunnel { int (*handler)(struct sk_buff *skb); + int (*cb_handler)(struct sk_buff *skb, int err); int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info); struct xfrm6_tunnel __rcu *next; diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index 06c02ebe6b9b..00e8d8b1c9a7 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c @@ -155,6 +155,33 @@ drop: return 0; } +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +static int tunnel6_rcv_cb(struct sk_buff *skb, u8 proto, int err) +{ + struct xfrm6_tunnel __rcu *head; + struct xfrm6_tunnel *handler; + int ret; + + head = (proto == IPPROTO_IPV6) ? tunnel6_handlers : tunnel46_handlers; + + for_each_tunnel_rcu(head, handler) { + if (handler->cb_handler) { + ret = handler->cb_handler(skb, err); + if (ret <= 0) + return ret; + } + } + + return 0; +} + +static const struct xfrm_input_afinfo tunnel6_input_afinfo = { + .family = AF_INET6, + .is_ipip = true, + .callback = tunnel6_rcv_cb, +}; +#endif + static int tunnel46_rcv(struct sk_buff *skb) { struct xfrm6_tunnel *handler; @@ -245,11 +272,25 @@ static int __init tunnel6_init(void) inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP); return -EAGAIN; } +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + if (xfrm_input_register_afinfo(&tunnel6_input_afinfo)) { + pr_err("%s: can't add input afinfo\n", __func__); + inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6); + inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP); + if (xfrm6_tunnel_mpls_supported()) + inet6_del_protocol(&tunnelmpls6_protocol, IPPROTO_MPLS); + return -EAGAIN; + } +#endif return 0; } static void __exit tunnel6_fini(void) { +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + if (xfrm_input_unregister_afinfo(&tunnel6_input_afinfo)) + pr_err("%s: can't remove input afinfo\n", __func__); +#endif if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP)) pr_err("%s: can't remove protocol\n", __func__); if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6)) -- cgit v1.2.3 From 995decb6c43e1d6e9d6a7d590471f2eea74600f4 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 8 Jul 2020 16:00:23 +0200 Subject: KVM: x86: take as_id into account when checking PGD OVMF booted guest running on shadow pages crashes on TRIPLE FAULT after enabling paging from SMM. The crash is triggered from mmu_check_root() and is caused by kvm_is_visible_gfn() searching through memslots with as_id = 0 while vCPU may be in a different context (address space). Introduce kvm_vcpu_is_visible_gfn() and use it from mmu_check_root(). Signed-off-by: Vitaly Kuznetsov Message-Id: <20200708140023.1476020-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 8 ++++++++ 3 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0011b2c97f65..231beb6d9cf7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3693,7 +3693,7 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) { int ret = 0; - if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { + if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); ret = 1; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9edc6fc71a89..87140e79648b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -774,6 +774,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0a68c9d3d3ab..b528a59b0a84 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1626,6 +1626,14 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); +bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + + return kvm_is_visible_memslot(memslot); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn); + unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) { struct vm_area_struct *vma; -- cgit v1.2.3 From 8d87ae48ced2dffd5e7247d19eb4c88be6f1c6f1 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 8 Jul 2020 16:32:13 -0700 Subject: PM: domains: Fix up terminology with parent/child The genpd infrastructure uses the terms master/slave, but such uses have no external exposures (not even in Documentation/driver-api/pm/*) and are not mandated by nor associated with any external specifications. Change the language used through-out to parent/child. There was one possible exception in the debugfs node "pm_genpd/pm_genpd_summary" but its path has no hits outside of the kernel itself when performing a code search[1], and it seems even this single usage has been non-functional since it was introduced due to a typo in the Python ("apend" instead of correct "append"). Fix the typo while we're at it. Link: https://codesearch.debian.net/ # [1] Signed-off-by: Kees Cook Reviewed-by: Greg Kroah-Hartman Reviewed-by: Kieran Bingham Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 194 +++++++++++++++++------------------ drivers/base/power/domain_governor.c | 12 +-- include/linux/pm_domain.h | 12 +-- scripts/gdb/linux/genpd.py | 12 +-- 4 files changed, 115 insertions(+), 115 deletions(-) (limited to 'include') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0a01df608849..2cb5e04cf86c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -263,18 +263,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, /* * Traverse all sub-domains within the domain. This can be * done without any additional locking as the link->performance_state - * field is protected by the master genpd->lock, which is already taken. + * field is protected by the parent genpd->lock, which is already taken. * * Also note that link->performance_state (subdomain's performance state - * requirement to master domain) is different from - * link->slave->performance_state (current performance state requirement + * requirement to parent domain) is different from + * link->child->performance_state (current performance state requirement * of the devices/sub-domains of the subdomain) and so can have a * different value. * * Note that we also take vote from powered-off sub-domains into account * as the same is done for devices right now. */ - list_for_each_entry(link, &genpd->master_links, master_node) { + list_for_each_entry(link, &genpd->parent_links, parent_node) { if (link->performance_state > state) state = link->performance_state; } @@ -285,40 +285,40 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, static int _genpd_set_performance_state(struct generic_pm_domain *genpd, unsigned int state, int depth) { - struct generic_pm_domain *master; + struct generic_pm_domain *parent; struct gpd_link *link; - int master_state, ret; + int parent_state, ret; if (state == genpd->performance_state) return 0; - /* Propagate to masters of genpd */ - list_for_each_entry(link, &genpd->slave_links, slave_node) { - master = link->master; + /* Propagate to parents of genpd */ + list_for_each_entry(link, &genpd->child_links, child_node) { + parent = link->parent; - if (!master->set_performance_state) + if (!parent->set_performance_state) continue; - /* Find master's performance state */ + /* Find parent's performance state */ ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, - master->opp_table, + parent->opp_table, state); if (unlikely(ret < 0)) goto err; - master_state = ret; + parent_state = ret; - genpd_lock_nested(master, depth + 1); + genpd_lock_nested(parent, depth + 1); link->prev_performance_state = link->performance_state; - link->performance_state = master_state; - master_state = _genpd_reeval_performance_state(master, - master_state); - ret = _genpd_set_performance_state(master, master_state, depth + 1); + link->performance_state = parent_state; + parent_state = _genpd_reeval_performance_state(parent, + parent_state); + ret = _genpd_set_performance_state(parent, parent_state, depth + 1); if (ret) link->performance_state = link->prev_performance_state; - genpd_unlock(master); + genpd_unlock(parent); if (ret) goto err; @@ -333,26 +333,26 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd, err: /* Encountered an error, lets rollback */ - list_for_each_entry_continue_reverse(link, &genpd->slave_links, - slave_node) { - master = link->master; + list_for_each_entry_continue_reverse(link, &genpd->child_links, + child_node) { + parent = link->parent; - if (!master->set_performance_state) + if (!parent->set_performance_state) continue; - genpd_lock_nested(master, depth + 1); + genpd_lock_nested(parent, depth + 1); - master_state = link->prev_performance_state; - link->performance_state = master_state; + parent_state = link->prev_performance_state; + link->performance_state = parent_state; - master_state = _genpd_reeval_performance_state(master, - master_state); - if (_genpd_set_performance_state(master, master_state, depth + 1)) { + parent_state = _genpd_reeval_performance_state(parent, + parent_state); + if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { pr_err("%s: Failed to roll back to %d performance state\n", - master->name, master_state); + parent->name, parent_state); } - genpd_unlock(master); + genpd_unlock(parent); } return ret; @@ -552,7 +552,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, /* * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call genpd_power_on() for the master yet after + * managed to call genpd_power_on() for the parent yet after * incrementing it. In that case genpd_power_on() will wait * for us to drop the lock, so we can call .power_off() and let * the genpd_power_on() restore power for us (this shouldn't @@ -566,22 +566,22 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, genpd->status = GPD_STATE_POWER_OFF; genpd_update_accounting(genpd); - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_dec(link->master); - genpd_lock_nested(link->master, depth + 1); - genpd_power_off(link->master, false, depth + 1); - genpd_unlock(link->master); + list_for_each_entry(link, &genpd->child_links, child_node) { + genpd_sd_counter_dec(link->parent); + genpd_lock_nested(link->parent, depth + 1); + genpd_power_off(link->parent, false, depth + 1); + genpd_unlock(link->parent); } return 0; } /** - * genpd_power_on - Restore power to a given PM domain and its masters. + * genpd_power_on - Restore power to a given PM domain and its parents. * @genpd: PM domain to power up. * @depth: nesting count for lockdep. * - * Restore power to @genpd and all of its masters so that it is possible to + * Restore power to @genpd and all of its parents so that it is possible to * resume a device belonging to it. */ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) @@ -594,20 +594,20 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) /* * The list is guaranteed not to change while the loop below is being - * executed, unless one of the masters' .power_on() callbacks fiddles + * executed, unless one of the parents' .power_on() callbacks fiddles * with it. */ - list_for_each_entry(link, &genpd->slave_links, slave_node) { - struct generic_pm_domain *master = link->master; + list_for_each_entry(link, &genpd->child_links, child_node) { + struct generic_pm_domain *parent = link->parent; - genpd_sd_counter_inc(master); + genpd_sd_counter_inc(parent); - genpd_lock_nested(master, depth + 1); - ret = genpd_power_on(master, depth + 1); - genpd_unlock(master); + genpd_lock_nested(parent, depth + 1); + ret = genpd_power_on(parent, depth + 1); + genpd_unlock(parent); if (ret) { - genpd_sd_counter_dec(master); + genpd_sd_counter_dec(parent); goto err; } } @@ -623,12 +623,12 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) err: list_for_each_entry_continue_reverse(link, - &genpd->slave_links, - slave_node) { - genpd_sd_counter_dec(link->master); - genpd_lock_nested(link->master, depth + 1); - genpd_power_off(link->master, false, depth + 1); - genpd_unlock(link->master); + &genpd->child_links, + child_node) { + genpd_sd_counter_dec(link->parent); + genpd_lock_nested(link->parent, depth + 1); + genpd_power_off(link->parent, false, depth + 1); + genpd_unlock(link->parent); } return ret; @@ -932,13 +932,13 @@ late_initcall(genpd_power_off_unused); #ifdef CONFIG_PM_SLEEP /** - * genpd_sync_power_off - Synchronously power off a PM domain and its masters. + * genpd_sync_power_off - Synchronously power off a PM domain and its parents. * @genpd: PM domain to power off, if possible. * @use_lock: use the lock. * @depth: nesting count for lockdep. * * Check if the given PM domain can be powered off (during system suspend or - * hibernation) and do that if so. Also, in that case propagate to its masters. + * hibernation) and do that if so. Also, in that case propagate to its parents. * * This function is only called in "noirq" and "syscore" stages of system power * transitions. The "noirq" callbacks may be executed asynchronously, thus in @@ -963,21 +963,21 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, genpd->status = GPD_STATE_POWER_OFF; - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_dec(link->master); + list_for_each_entry(link, &genpd->child_links, child_node) { + genpd_sd_counter_dec(link->parent); if (use_lock) - genpd_lock_nested(link->master, depth + 1); + genpd_lock_nested(link->parent, depth + 1); - genpd_sync_power_off(link->master, use_lock, depth + 1); + genpd_sync_power_off(link->parent, use_lock, depth + 1); if (use_lock) - genpd_unlock(link->master); + genpd_unlock(link->parent); } } /** - * genpd_sync_power_on - Synchronously power on a PM domain and its masters. + * genpd_sync_power_on - Synchronously power on a PM domain and its parents. * @genpd: PM domain to power on. * @use_lock: use the lock. * @depth: nesting count for lockdep. @@ -994,16 +994,16 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, if (genpd_status_on(genpd)) return; - list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_inc(link->master); + list_for_each_entry(link, &genpd->child_links, child_node) { + genpd_sd_counter_inc(link->parent); if (use_lock) - genpd_lock_nested(link->master, depth + 1); + genpd_lock_nested(link->parent, depth + 1); - genpd_sync_power_on(link->master, use_lock, depth + 1); + genpd_sync_power_on(link->parent, use_lock, depth + 1); if (use_lock) - genpd_unlock(link->master); + genpd_unlock(link->parent); } _genpd_power_on(genpd, false); @@ -1443,12 +1443,12 @@ static void genpd_update_cpumask(struct generic_pm_domain *genpd, if (!genpd_is_cpu_domain(genpd)) return; - list_for_each_entry(link, &genpd->slave_links, slave_node) { - struct generic_pm_domain *master = link->master; + list_for_each_entry(link, &genpd->child_links, child_node) { + struct generic_pm_domain *parent = link->parent; - genpd_lock_nested(master, depth + 1); - genpd_update_cpumask(master, cpu, set, depth + 1); - genpd_unlock(master); + genpd_lock_nested(parent, depth + 1); + genpd_update_cpumask(parent, cpu, set, depth + 1); + genpd_unlock(parent); } if (set) @@ -1636,17 +1636,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd, goto out; } - list_for_each_entry(itr, &genpd->master_links, master_node) { - if (itr->slave == subdomain && itr->master == genpd) { + list_for_each_entry(itr, &genpd->parent_links, parent_node) { + if (itr->child == subdomain && itr->parent == genpd) { ret = -EINVAL; goto out; } } - link->master = genpd; - list_add_tail(&link->master_node, &genpd->master_links); - link->slave = subdomain; - list_add_tail(&link->slave_node, &subdomain->slave_links); + link->parent = genpd; + list_add_tail(&link->parent_node, &genpd->parent_links); + link->child = subdomain; + list_add_tail(&link->child_node, &subdomain->child_links); if (genpd_status_on(subdomain)) genpd_sd_counter_inc(genpd); @@ -1660,7 +1660,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd, /** * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. - * @genpd: Master PM domain to add the subdomain to. + * @genpd: Leader PM domain to add the subdomain to. * @subdomain: Subdomain to be added. */ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, @@ -1678,7 +1678,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); /** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. - * @genpd: Master PM domain to remove the subdomain from. + * @genpd: Leader PM domain to remove the subdomain from. * @subdomain: Subdomain to be removed. */ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, @@ -1693,19 +1693,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, genpd_lock(subdomain); genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); - if (!list_empty(&subdomain->master_links) || subdomain->device_count) { + if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { pr_warn("%s: unable to remove subdomain %s\n", genpd->name, subdomain->name); ret = -EBUSY; goto out; } - list_for_each_entry_safe(link, l, &genpd->master_links, master_node) { - if (link->slave != subdomain) + list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { + if (link->child != subdomain) continue; - list_del(&link->master_node); - list_del(&link->slave_node); + list_del(&link->parent_node); + list_del(&link->child_node); kfree(link); if (genpd_status_on(subdomain)) genpd_sd_counter_dec(genpd); @@ -1770,8 +1770,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd)) return -EINVAL; - INIT_LIST_HEAD(&genpd->master_links); - INIT_LIST_HEAD(&genpd->slave_links); + INIT_LIST_HEAD(&genpd->parent_links); + INIT_LIST_HEAD(&genpd->child_links); INIT_LIST_HEAD(&genpd->dev_list); genpd_lock_init(genpd); genpd->gov = gov; @@ -1848,15 +1848,15 @@ static int genpd_remove(struct generic_pm_domain *genpd) return -EBUSY; } - if (!list_empty(&genpd->master_links) || genpd->device_count) { + if (!list_empty(&genpd->parent_links) || genpd->device_count) { genpd_unlock(genpd); pr_err("%s: unable to remove %s\n", __func__, genpd->name); return -EBUSY; } - list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) { - list_del(&link->master_node); - list_del(&link->slave_node); + list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { + list_del(&link->parent_node); + list_del(&link->child_node); kfree(link); } @@ -2827,12 +2827,12 @@ static int genpd_summary_one(struct seq_file *s, /* * Modifications on the list require holding locks on both - * master and slave, so we are safe. + * parent and child, so we are safe. * Also genpd->name is immutable. */ - list_for_each_entry(link, &genpd->master_links, master_node) { - seq_printf(s, "%s", link->slave->name); - if (!list_is_last(&link->master_node, &genpd->master_links)) + list_for_each_entry(link, &genpd->parent_links, parent_node) { + seq_printf(s, "%s", link->child->name); + if (!list_is_last(&link->parent_node, &genpd->parent_links)) seq_puts(s, ", "); } @@ -2860,7 +2860,7 @@ static int summary_show(struct seq_file *s, void *data) struct generic_pm_domain *genpd; int ret = 0; - seq_puts(s, "domain status slaves\n"); + seq_puts(s, "domain status children\n"); seq_puts(s, " /device runtime status\n"); seq_puts(s, "----------------------------------------------------------------------\n"); @@ -2915,8 +2915,8 @@ static int sub_domains_show(struct seq_file *s, void *data) if (ret) return -ERESTARTSYS; - list_for_each_entry(link, &genpd->master_links, master_node) - seq_printf(s, "%s\n", link->slave->name); + list_for_each_entry(link, &genpd->parent_links, parent_node) + seq_printf(s, "%s\n", link->child->name); genpd_unlock(genpd); return ret; diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index daa8c7689f7e..490ed7deb99a 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -135,8 +135,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, * * All subdomains have been powered off already at this point. */ - list_for_each_entry(link, &genpd->master_links, master_node) { - struct generic_pm_domain *sd = link->slave; + list_for_each_entry(link, &genpd->parent_links, parent_node) { + struct generic_pm_domain *sd = link->child; s64 sd_max_off_ns = sd->max_off_time_ns; if (sd_max_off_ns < 0) @@ -217,13 +217,13 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) } /* - * We have to invalidate the cached results for the masters, so + * We have to invalidate the cached results for the parents, so * use the observation that default_power_down_ok() is not - * going to be called for any master until this instance + * going to be called for any parent until this instance * returns. */ - list_for_each_entry(link, &genpd->slave_links, slave_node) - link->master->max_off_time_changed = true; + list_for_each_entry(link, &genpd->child_links, child_node) + link->parent->max_off_time_changed = true; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = false; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 9ec78ee53652..574a1fadb1e5 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -95,8 +95,8 @@ struct generic_pm_domain { struct device dev; struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ - struct list_head master_links; /* Links with PM domain as a master */ - struct list_head slave_links; /* Links with PM domain as a slave */ + struct list_head parent_links; /* Links with PM domain as a parent */ + struct list_head child_links;/* Links with PM domain as a child */ struct list_head dev_list; /* List of devices */ struct dev_power_governor *gov; struct work_struct power_off_work; @@ -151,10 +151,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) } struct gpd_link { - struct generic_pm_domain *master; - struct list_head master_node; - struct generic_pm_domain *slave; - struct list_head slave_node; + struct generic_pm_domain *parent; + struct list_head parent_node; + struct generic_pm_domain *child; + struct list_head child_node; /* Sub-domain's per-master domain performance state */ unsigned int performance_state; diff --git a/scripts/gdb/linux/genpd.py b/scripts/gdb/linux/genpd.py index 6ca93bd2949e..39cd1abd8559 100644 --- a/scripts/gdb/linux/genpd.py +++ b/scripts/gdb/linux/genpd.py @@ -49,17 +49,17 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary''' else: status_string = 'off-{}'.format(genpd['state_idx']) - slave_names = [] + child_names = [] for link in list_for_each_entry( - genpd['master_links'], + genpd['parent_links'], device_link_type.get_type().pointer(), - 'master_node'): - slave_names.apend(link['slave']['name']) + 'parent_node'): + child_names.append(link['child']['name']) gdb.write('%-30s %-15s %s\n' % ( genpd['name'].string(), status_string, - ', '.join(slave_names))) + ', '.join(child_names))) # Print devices in domain for pm_data in list_for_each_entry(genpd['dev_list'], @@ -70,7 +70,7 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary''' gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev))) def invoke(self, arg, from_tty): - gdb.write('domain status slaves\n'); + gdb.write('domain status children\n'); gdb.write(' /device runtime status\n'); gdb.write('----------------------------------------------------------------------\n'); for genpd in list_for_each_entry( -- cgit v1.2.3 From 2aa9c199cf8151c190c7e7ca3ddfcfbb2d85ac36 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 2 Jul 2020 19:35:38 -0700 Subject: KVM: Move x86's version of struct kvm_mmu_memory_cache to common code Move x86's 'struct kvm_mmu_memory_cache' to common code in anticipation of moving the entire x86 implementation code to common KVM and reusing it for arm64 and MIPS. Add a new architecture specific asm/kvm_types.h to control the existence and parameters of the struct. The new header is needed to avoid a chicken-and-egg problem with asm/kvm_host.h as all architectures define instances of the struct in their vCPU structs. Add an asm-generic version of kvm_types.h to avoid having empty files on PPC and s390 in the long term, and for arm64 and mips in the short term. Suggested-by: Christoffer Dall Reviewed-by: Ben Gardon Signed-off-by: Sean Christopherson Message-Id: <20200703023545.8771-15-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/Kbuild | 1 + arch/mips/include/asm/Kbuild | 1 + arch/powerpc/include/asm/Kbuild | 1 + arch/s390/include/asm/Kbuild | 1 + arch/x86/include/asm/kvm_host.h | 13 ------------- arch/x86/include/asm/kvm_types.h | 7 +++++++ include/asm-generic/kvm_types.h | 5 +++++ include/linux/kvm_types.h | 19 +++++++++++++++++++ 8 files changed, 35 insertions(+), 13 deletions(-) create mode 100644 arch/x86/include/asm/kvm_types.h create mode 100644 include/asm-generic/kvm_types.h (limited to 'include') diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index ff9cbb631212..35a68155cd0e 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 generic-y += early_ioremap.h +generic-y += kvm_types.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += qrwlock.h diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 8643d313890e..397e6d24d2ab 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -5,6 +5,7 @@ generated-y += syscall_table_64_n32.h generated-y += syscall_table_64_n64.h generated-y += syscall_table_64_o32.h generic-y += export.h +generic-y += kvm_types.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += parport.h diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index dadbcf3a0b1e..2d444d09b553 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -4,6 +4,7 @@ generated-y += syscall_table_64.h generated-y += syscall_table_c32.h generated-y += syscall_table_spu.h generic-y += export.h +generic-y += kvm_types.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += vtime.h diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 83f6e85de7bc..319efa0e6d02 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -6,5 +6,6 @@ generated-y += unistd_nr.h generic-y += asm-offsets.h generic-y += export.h +generic-y += kvm_types.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9d41eb5a8453..5aaef036627f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -193,8 +193,6 @@ struct x86_exception; enum x86_intercept; enum x86_intercept_stage; -#define KVM_NR_MEM_OBJS 40 - #define KVM_NR_DB_REGS 4 #define DR6_BD (1 << 13) @@ -245,17 +243,6 @@ enum x86_intercept_stage; struct kvm_kernel_irq_routing_entry; -/* - * We don't want allocation failures within the mmu code, so we preallocate - * enough memory for a single page fault in a cache. - */ -struct kvm_mmu_memory_cache { - int nobjs; - gfp_t gfp_zero; - struct kmem_cache *kmem_cache; - void *objects[KVM_NR_MEM_OBJS]; -}; - /* * the pages used as guest page table on soft mmu are tracked by * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used diff --git a/arch/x86/include/asm/kvm_types.h b/arch/x86/include/asm/kvm_types.h new file mode 100644 index 000000000000..08f1b57d3b62 --- /dev/null +++ b/arch/x86/include/asm/kvm_types.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_KVM_TYPES_H +#define _ASM_X86_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 + +#endif /* _ASM_X86_KVM_TYPES_H */ diff --git a/include/asm-generic/kvm_types.h b/include/asm-generic/kvm_types.h new file mode 100644 index 000000000000..2a82daf110f1 --- /dev/null +++ b/include/asm-generic/kvm_types.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_KVM_TYPES_H +#define _ASM_GENERIC_KVM_TYPES_H + +#endif diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 68e84cf42a3f..a7580f69dda0 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -20,6 +20,8 @@ enum kvm_mr_change; #include +#include + /* * Address types: * @@ -58,4 +60,21 @@ struct gfn_to_pfn_cache { bool dirty; }; +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE +/* + * Memory caches are used to preallocate memory ahead of various MMU flows, + * e.g. page fault handlers. Gracefully handling allocation failures deep in + * MMU flows is problematic, as is triggering reclaim, I/O, etc... while + * holding MMU locks. Note, these caches act more like prefetch buffers than + * classical caches, i.e. objects are not returned to the cache on being freed. + */ +struct kvm_mmu_memory_cache { + int nobjs; + gfp_t gfp_zero; + struct kmem_cache *kmem_cache; + void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE]; +}; +#endif + + #endif /* __KVM_TYPES_H__ */ -- cgit v1.2.3 From 6926f95accee3f8ceb5f69dbecd880687028ae70 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 2 Jul 2020 19:35:39 -0700 Subject: KVM: Move x86's MMU memory cache helpers to common KVM code Move x86's memory cache helpers to common KVM code so that they can be reused by arm64 and MIPS in future patches. Suggested-by: Christoffer Dall Reviewed-by: Ben Gardon Signed-off-by: Sean Christopherson Message-Id: <20200703023545.8771-16-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 53 ---------------------------------------------- include/linux/kvm_host.h | 7 ++++++ virt/kvm/kvm_main.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 942b6a90cb17..fa506aaaf019 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1061,47 +1061,6 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) local_irq_enable(); } -static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, - gfp_t gfp_flags) -{ - gfp_flags |= mc->gfp_zero; - - if (mc->kmem_cache) - return kmem_cache_alloc(mc->kmem_cache, gfp_flags); - else - return (void *)__get_free_page(gfp_flags); -} - -static int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) -{ - void *obj; - - if (mc->nobjs >= min) - return 0; - while (mc->nobjs < ARRAY_SIZE(mc->objects)) { - obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); - if (!obj) - return mc->nobjs >= min ? 0 : -ENOMEM; - mc->objects[mc->nobjs++] = obj; - } - return 0; -} - -static int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) -{ - return mc->nobjs; -} - -static void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) -{ - while (mc->nobjs) { - if (mc->kmem_cache) - kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); - else - free_page((unsigned long)mc->objects[--mc->nobjs]); - } -} - static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) { int r; @@ -1133,18 +1092,6 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); } -static void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) -{ - void *p; - - if (WARN_ON(!mc->nobjs)) - p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); - else - p = mc->objects[--mc->nobjs]; - BUG_ON(!p); - return p; -} - static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) { return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 87140e79648b..989afcbe642f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -817,6 +817,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE +int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); +int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); +void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); +void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); +#endif + bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b528a59b0a84..2c2c0254c2d8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -340,6 +340,61 @@ void kvm_reload_remote_mmus(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE +static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, + gfp_t gfp_flags) +{ + gfp_flags |= mc->gfp_zero; + + if (mc->kmem_cache) + return kmem_cache_alloc(mc->kmem_cache, gfp_flags); + else + return (void *)__get_free_page(gfp_flags); +} + +int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min) +{ + void *obj; + + if (mc->nobjs >= min) + return 0; + while (mc->nobjs < ARRAY_SIZE(mc->objects)) { + obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT); + if (!obj) + return mc->nobjs >= min ? 0 : -ENOMEM; + mc->objects[mc->nobjs++] = obj; + } + return 0; +} + +int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc) +{ + return mc->nobjs; +} + +void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) +{ + while (mc->nobjs) { + if (mc->kmem_cache) + kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); + else + free_page((unsigned long)mc->objects[--mc->nobjs]); + } +} + +void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +{ + void *p; + + if (WARN_ON(!mc->nobjs)) + p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT); + else + p = mc->objects[--mc->nobjs]; + BUG_ON(!p); + return p; +} +#endif + static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { mutex_init(&vcpu->mutex); -- cgit v1.2.3 From 3f935c75eb52dd968351dba824adf466fb9c9429 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 9 Jul 2020 15:12:39 +0200 Subject: inet_diag: support for wider protocol numbers After commit bf9765145b85 ("sock: Make sk_protocol a 16-bit value") the current size of 'sdiag_protocol' is not sufficient to represent the possible protocol values. This change introduces a new inet diag request attribute to let user space specify the relevant protocol number using u32 values. The attribute is parsed by inet diag core on get/dump command and the extended protocol value, if available, is preferred to 'sdiag_protocol' to lookup the diag handler. The parse attributed are exposed to all the diag handlers via the cb->data. Note that inet_diag_dump_one_icsk() is left unmodified, as it will not be used by protocol using the extended attribute. Suggested-by: David S. Miller Co-developed-by: Christoph Paasch Signed-off-by: Christoph Paasch Acked-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- include/uapi/linux/inet_diag.h | 1 + net/core/sock.c | 1 + net/ipv4/inet_diag.c | 65 +++++++++++++++++++++++++++++++----------- 3 files changed, 50 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index e6f183ee8417..5ba122c1949a 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -65,6 +65,7 @@ enum { INET_DIAG_REQ_NONE, INET_DIAG_REQ_BYTECODE, INET_DIAG_REQ_SK_BPF_STORAGES, + INET_DIAG_REQ_PROTOCOL, __INET_DIAG_REQ_MAX, }; diff --git a/net/core/sock.c b/net/core/sock.c index f5b5fdd61c88..de26fe4ea19f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3566,6 +3566,7 @@ int sock_load_diag_module(int family, int protocol) #ifdef CONFIG_INET if (family == AF_INET && protocol != IPPROTO_RAW && + protocol < MAX_INET_PROTOS && !rcu_access_pointer(inet_protos[protocol])) return -ENOENT; #endif diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 125f4f8a36b4..4a98dd736270 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -52,6 +52,11 @@ static DEFINE_MUTEX(inet_diag_table_mutex); static const struct inet_diag_handler *inet_diag_lock_handler(int proto) { + if (proto < 0 || proto >= IPPROTO_MAX) { + mutex_lock(&inet_diag_table_mutex); + return ERR_PTR(-ENOENT); + } + if (!inet_diag_table[proto]) sock_load_diag_module(AF_INET, proto); @@ -181,6 +186,28 @@ errout: } EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill); +static void inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen, + struct nlattr **req_nlas) +{ + struct nlattr *nla; + int remaining; + + nlmsg_for_each_attr(nla, nlh, hdrlen, remaining) { + int type = nla_type(nla); + + if (type < __INET_DIAG_REQ_MAX) + req_nlas[type] = nla; + } +} + +static int inet_diag_get_protocol(const struct inet_diag_req_v2 *req, + const struct inet_diag_dump_data *data) +{ + if (data->req_nlas[INET_DIAG_REQ_PROTOCOL]) + return nla_get_u32(data->req_nlas[INET_DIAG_REQ_PROTOCOL]); + return req->sdiag_protocol; +} + #define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, @@ -198,7 +225,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, void *info = NULL; cb_data = cb->data; - handler = inet_diag_table[req->sdiag_protocol]; + handler = inet_diag_table[inet_diag_get_protocol(req, cb_data)]; BUG_ON(!handler); nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -539,20 +566,25 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk); static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb, const struct nlmsghdr *nlh, + int hdrlen, const struct inet_diag_req_v2 *req) { const struct inet_diag_handler *handler; - int err; + struct inet_diag_dump_data dump_data; + int err, protocol; - handler = inet_diag_lock_handler(req->sdiag_protocol); + memset(&dump_data, 0, sizeof(dump_data)); + inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas); + protocol = inet_diag_get_protocol(req, &dump_data); + + handler = inet_diag_lock_handler(protocol); if (IS_ERR(handler)) { err = PTR_ERR(handler); } else if (cmd == SOCK_DIAG_BY_FAMILY) { - struct inet_diag_dump_data empty_dump_data = {}; struct netlink_callback cb = { .nlh = nlh, .skb = in_skb, - .data = &empty_dump_data, + .data = &dump_data, }; err = handler->dump_one(&cb, req); } else if (cmd == SOCK_DESTROY && handler->destroy) { @@ -1103,13 +1135,16 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_icsk); static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { + struct inet_diag_dump_data *cb_data = cb->data; const struct inet_diag_handler *handler; u32 prev_min_dump_alloc; - int err = 0; + int protocol, err = 0; + + protocol = inet_diag_get_protocol(r, cb_data); again: prev_min_dump_alloc = cb->min_dump_alloc; - handler = inet_diag_lock_handler(r->sdiag_protocol); + handler = inet_diag_lock_handler(protocol); if (!IS_ERR(handler)) handler->dump(skb, cb, r); else @@ -1139,19 +1174,13 @@ static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen) struct inet_diag_dump_data *cb_data; struct sk_buff *skb = cb->skb; struct nlattr *nla; - int rem, err; + int err; cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL); if (!cb_data) return -ENOMEM; - nla_for_each_attr(nla, nlmsg_attrdata(nlh, hdrlen), - nlmsg_attrlen(nlh, hdrlen), rem) { - int type = nla_type(nla); - - if (type < __INET_DIAG_REQ_MAX) - cb_data->req_nlas[type] = nla; - } + inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas); nla = cb_data->inet_diag_nla_bc; if (nla) { @@ -1237,7 +1266,8 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb, req.idiag_states = rc->idiag_states; req.id = rc->id; - return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, &req); + return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, + sizeof(struct inet_diag_req), &req); } static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) @@ -1279,7 +1309,8 @@ static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h) return netlink_dump_start(net->diag_nlsk, skb, h, &c); } - return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h)); + return inet_diag_cmd_exact(h->nlmsg_type, skb, h, hdrlen, + nlmsg_data(h)); } static -- cgit v1.2.3 From ac3b45f6095452a9731f8825be1513d326dbfa15 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 9 Jul 2020 15:12:41 +0200 Subject: mptcp: add MPTCP socket diag interface exposes basic inet socket attribute, plus some MPTCP socket fields comprising PM status and MPTCP-level sequence numbers. Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- include/uapi/linux/mptcp.h | 17 +++++ net/mptcp/Kconfig | 4 ++ net/mptcp/Makefile | 2 + net/mptcp/mptcp_diag.c | 169 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 192 insertions(+) create mode 100644 net/mptcp/mptcp_diag.c (limited to 'include') diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 5f2c77082d9e..9762660df741 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -86,4 +86,21 @@ enum { __MPTCP_PM_CMD_AFTER_LAST }; +#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0) +#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1) + +struct mptcp_info { + __u8 mptcpi_subflows; + __u8 mptcpi_add_addr_signal; + __u8 mptcpi_add_addr_accepted; + __u8 mptcpi_subflows_max; + __u8 mptcpi_add_addr_signal_max; + __u8 mptcpi_add_addr_accepted_max; + __u32 mptcpi_flags; + __u32 mptcpi_token; + __u64 mptcpi_write_seq; + __u64 mptcpi_snd_una; + __u64 mptcpi_rcv_nxt; +}; + #endif /* _UAPI_MPTCP_H */ diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig index af84fce70bb0..698bc3525160 100644 --- a/net/mptcp/Kconfig +++ b/net/mptcp/Kconfig @@ -13,6 +13,10 @@ config MPTCP if MPTCP +config INET_MPTCP_DIAG + depends on INET_DIAG + def_tristate INET_DIAG + config MPTCP_IPV6 bool "MPTCP: IPv6 support for Multipath TCP" select IPV6 diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile index c53f9b845523..2360cbd27d59 100644 --- a/net/mptcp/Makefile +++ b/net/mptcp/Makefile @@ -4,6 +4,8 @@ obj-$(CONFIG_MPTCP) += mptcp.o mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \ mib.o pm_netlink.o +obj-$(CONFIG_INET_MPTCP_DIAG) += mptcp_diag.o + mptcp_crypto_test-objs := crypto_test.o mptcp_token_test-objs := token_test.o obj-$(CONFIG_MPTCP_KUNIT_TESTS) += mptcp_crypto_test.o mptcp_token_test.o diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c new file mode 100644 index 000000000000..5f390a97f556 --- /dev/null +++ b/net/mptcp/mptcp_diag.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +/* MPTCP socket monitoring support + * + * Copyright (c) 2020 Red Hat + * + * Author: Paolo Abeni + */ + +#include +#include +#include +#include +#include +#include "protocol.h" + +static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, + struct netlink_callback *cb, + const struct inet_diag_req_v2 *req, + struct nlattr *bc, bool net_admin) +{ + if (!inet_diag_bc_sk(bc, sk)) + return 0; + + return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, req, NLM_F_MULTI, + net_admin); +} + +static int mptcp_diag_dump_one(struct netlink_callback *cb, + const struct inet_diag_req_v2 *req) +{ + struct sk_buff *in_skb = cb->skb; + struct mptcp_sock *msk = NULL; + struct sk_buff *rep; + int err = -ENOENT; + struct net *net; + struct sock *sk; + + net = sock_net(in_skb->sk); + msk = mptcp_token_get_sock(req->id.idiag_cookie[0]); + if (!msk) + goto out_nosk; + + err = -ENOMEM; + sk = (struct sock *)msk; + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct mptcp_info)) + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, + GFP_KERNEL); + if (!rep) + goto out; + + err = inet_sk_diag_fill(sk, inet_csk(sk), rep, cb, req, 0, + netlink_net_capable(in_skb, CAP_NET_ADMIN)); + if (err < 0) { + WARN_ON(err == -EMSGSIZE); + kfree_skb(rep); + goto out; + } + err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, + MSG_DONTWAIT); + if (err > 0) + err = 0; +out: + sock_put(sk); + +out_nosk: + return err; +} + +static void mptcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *r) +{ + bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); + struct net *net = sock_net(skb->sk); + struct inet_diag_dump_data *cb_data; + struct mptcp_sock *msk; + struct nlattr *bc; + + cb_data = cb->data; + bc = cb_data->inet_diag_nla_bc; + + while ((msk = mptcp_token_iter_next(net, &cb->args[0], &cb->args[1])) != + NULL) { + struct inet_sock *inet = (struct inet_sock *)msk; + struct sock *sk = (struct sock *)msk; + int ret = 0; + + if (!(r->idiag_states & (1 << sk->sk_state))) + goto next; + if (r->sdiag_family != AF_UNSPEC && + sk->sk_family != r->sdiag_family) + goto next; + if (r->id.idiag_sport != inet->inet_sport && + r->id.idiag_sport) + goto next; + if (r->id.idiag_dport != inet->inet_dport && + r->id.idiag_dport) + goto next; + + ret = sk_diag_dump(sk, skb, cb, r, bc, net_admin); +next: + sock_put(sk); + if (ret < 0) { + /* will retry on the same position */ + cb->args[1]--; + break; + } + cond_resched(); + } +} + +static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, + void *_info) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + struct mptcp_info *info = _info; + u32 flags = 0; + bool slow; + u8 val; + + r->idiag_rqueue = sk_rmem_alloc_get(sk); + r->idiag_wqueue = sk_wmem_alloc_get(sk); + if (!info) + return; + + slow = lock_sock_fast(sk); + info->mptcpi_subflows = READ_ONCE(msk->pm.subflows); + info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled); + info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted); + info->mptcpi_subflows_max = READ_ONCE(msk->pm.subflows_max); + val = READ_ONCE(msk->pm.add_addr_signal_max); + info->mptcpi_add_addr_signal_max = val; + val = READ_ONCE(msk->pm.add_addr_accept_max); + info->mptcpi_add_addr_accepted_max = val; + if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) + flags |= MPTCP_INFO_FLAG_FALLBACK; + if (READ_ONCE(msk->can_ack)) + flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED; + info->mptcpi_flags = flags; + info->mptcpi_token = READ_ONCE(msk->token); + info->mptcpi_write_seq = READ_ONCE(msk->write_seq); + info->mptcpi_snd_una = atomic64_read(&msk->snd_una); + info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq); + unlock_sock_fast(sk, slow); +} + +static const struct inet_diag_handler mptcp_diag_handler = { + .dump = mptcp_diag_dump, + .dump_one = mptcp_diag_dump_one, + .idiag_get_info = mptcp_diag_get_info, + .idiag_type = IPPROTO_MPTCP, + .idiag_info_size = sizeof(struct mptcp_info), +}; + +static int __init mptcp_diag_init(void) +{ + return inet_diag_register(&mptcp_diag_handler); +} + +static void __exit mptcp_diag_exit(void) +{ + inet_diag_unregister(&mptcp_diag_handler); +} + +module_init(mptcp_diag_init); +module_exit(mptcp_diag_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-262 /* AF_INET - IPPROTO_MPTCP */); -- cgit v1.2.3 From 10a429bab4462581bbda3fd7f41d4ec0ddc5e682 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:14 +0300 Subject: devlink: Move set attribute of devlink_port_attrs to devlink_port The struct devlink_port_attrs holds the attributes of devlink_port. The 'set' field is not devlink_port's attribute as opposed to most of the others. Move 'set' to be devlink_port's field called 'attrs_set'. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/devlink.h | 4 ++-- net/core/devlink.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/devlink.h b/include/net/devlink.h index 428f55f8197c..28f8d92c5741 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -65,8 +65,7 @@ struct devlink_port_pci_vf_attrs { }; struct devlink_port_attrs { - u8 set:1, - split:1, + u8 split:1, switch_port:1; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; @@ -90,6 +89,7 @@ struct devlink_port { enum devlink_port_type desired_type; void *type_dev; struct devlink_port_attrs attrs; + u8 attrs_set:1; struct delayed_work type_warn_dw; }; diff --git a/net/core/devlink.c b/net/core/devlink.c index 6ae36808c152..f28ae63cdb6b 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -528,7 +528,7 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, { struct devlink_port_attrs *attrs = &devlink_port->attrs; - if (!attrs->set) + if (!devlink_port->attrs_set) return 0; if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) return -EMSGSIZE; @@ -7518,7 +7518,7 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port, if (WARN_ON(devlink_port->registered)) return -EEXIST; - attrs->set = true; + devlink_port->attrs_set = true; attrs->flavour = flavour; if (switch_id) { attrs->switch_port = true; @@ -7626,7 +7626,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, struct devlink_port_attrs *attrs = &devlink_port->attrs; int n = 0; - if (!attrs->set) + if (!devlink_port->attrs_set) return -EOPNOTSUPP; switch (attrs->flavour) { -- cgit v1.2.3 From 46737a194945e540e3e2eb1fc870207928a9c2eb Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:15 +0300 Subject: devlink: Move switch_port attribute of devlink_port_attrs to devlink_port The struct devlink_port_attrs holds the attributes of devlink_port. Similarly to the previous patch, 'switch_port' attribute is another exception. Move 'switch_port' to be devlink_port's field. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/devlink.h | 6 +++--- net/core/devlink.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/devlink.h b/include/net/devlink.h index 28f8d92c5741..de4b5dcdb4a5 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -65,8 +65,7 @@ struct devlink_port_pci_vf_attrs { }; struct devlink_port_attrs { - u8 split:1, - switch_port:1; + u8 split:1; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; union { @@ -89,7 +88,8 @@ struct devlink_port { enum devlink_port_type desired_type; void *type_dev; struct devlink_port_attrs attrs; - u8 attrs_set:1; + u8 attrs_set:1, + switch_port:1; struct delayed_work type_warn_dw; }; diff --git a/net/core/devlink.c b/net/core/devlink.c index f28ae63cdb6b..452b2f8a054e 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -7521,13 +7521,13 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port, devlink_port->attrs_set = true; attrs->flavour = flavour; if (switch_id) { - attrs->switch_port = true; + devlink_port->switch_port = true; if (WARN_ON(switch_id_len > MAX_PHYS_ITEM_ID_LEN)) switch_id_len = MAX_PHYS_ITEM_ID_LEN; memcpy(attrs->switch_id.id, switch_id, switch_id_len); attrs->switch_id.id_len = switch_id_len; } else { - attrs->switch_port = false; + devlink_port->switch_port = false; } return 0; } @@ -9461,7 +9461,7 @@ int devlink_compat_switch_id_get(struct net_device *dev, * any devlink lock as only permanent values are accessed. */ devlink_port = netdev_to_devlink_port(dev); - if (!devlink_port || !devlink_port->attrs.switch_port) + if (!devlink_port || !devlink_port->switch_port) return -EOPNOTSUPP; memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid)); -- cgit v1.2.3 From 71ad8d55f8e5ea101069b552422f392655e2ffb6 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:16 +0300 Subject: devlink: Replace devlink_port_attrs_set parameters with a struct Currently, devlink_port_attrs_set accepts a long list of parameters, that most of them are devlink port's attributes. Use the devlink_port_attrs struct to replace the relevant parameters. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 9 ++-- drivers/net/ethernet/intel/ice/ice_devlink.c | 6 ++- .../net/ethernet/mellanox/mlx5/core/en/devlink.c | 19 ++++---- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 16 +++---- drivers/net/ethernet/mellanox/mlxsw/core.c | 11 +++-- drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 11 +++-- .../net/ethernet/pensando/ionic/ionic_devlink.c | 5 +- drivers/net/netdevsim/dev.c | 10 ++-- include/net/devlink.h | 20 ++++---- net/core/devlink.c | 54 +++++----------------- net/dsa/dsa2.c | 17 ++++--- 11 files changed, 82 insertions(+), 96 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 2bd610fafc58..3a854195d5b0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -691,6 +691,7 @@ static void bnxt_dl_params_unregister(struct bnxt *bp) int bnxt_dl_register(struct bnxt *bp) { + struct devlink_port_attrs attrs = {}; struct devlink *dl; int rc; @@ -719,9 +720,11 @@ int bnxt_dl_register(struct bnxt *bp) if (!BNXT_PF(bp)) return 0; - devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - bp->pf.port_id, false, 0, bp->dsn, - sizeof(bp->dsn)); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = bp->pf.port_id; + memcpy(attrs.switch_id.id, bp->dsn, sizeof(bp->dsn)); + attrs.switch_id.id_len = sizeof(bp->dsn); + devlink_port_attrs_set(&bp->dl_port, &attrs); rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); if (rc) { netdev_err(bp->dev, "devlink_port_register failed\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 3ea470e8cfa2..43da2dcb0cbc 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -312,6 +312,7 @@ int ice_devlink_create_port(struct ice_pf *pf) struct devlink *devlink = priv_to_devlink(pf); struct ice_vsi *vsi = ice_get_main_vsi(pf); struct device *dev = ice_pf_to_dev(pf); + struct devlink_port_attrs attrs = {}; int err; if (!vsi) { @@ -319,8 +320,9 @@ int ice_devlink_create_port(struct ice_pf *pf) return -EIO; } - devlink_port_attrs_set(&pf->devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - pf->hw.pf_id, false, 0, NULL, 0); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pf->hw.pf_id; + devlink_port_attrs_set(&pf->devlink_port, &attrs); err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id); if (err) { dev_err(dev, "devlink_port_register failed: %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index f8b2de4b04be..a69c62d72d16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -6,17 +6,16 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv) { struct devlink *devlink = priv_to_devlink(priv->mdev); + struct devlink_port_attrs attrs = {}; - if (mlx5_core_is_pf(priv->mdev)) - devlink_port_attrs_set(&priv->dl_port, - DEVLINK_PORT_FLAVOUR_PHYSICAL, - PCI_FUNC(priv->mdev->pdev->devfn), - false, 0, - NULL, 0); - else - devlink_port_attrs_set(&priv->dl_port, - DEVLINK_PORT_FLAVOUR_VIRTUAL, - 0, false, 0, NULL, 0); + if (mlx5_core_is_pf(priv->mdev)) { + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn); + } else { + attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; + } + + devlink_port_attrs_set(&priv->dl_port, &attrs); return devlink_port_register(devlink, &priv->dl_port, 1); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index ed2430677b12..0a69f10ac30c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1185,6 +1185,7 @@ static int register_devlink_port(struct mlx5_core_dev *dev, { struct devlink *devlink = priv_to_devlink(dev); struct mlx5_eswitch_rep *rep = rpriv->rep; + struct devlink_port_attrs attrs = {}; struct netdev_phys_item_id ppid = {}; unsigned int dl_port_index = 0; u16 pfnum; @@ -1195,19 +1196,16 @@ static int register_devlink_port(struct mlx5_core_dev *dev, mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport); pfnum = PCI_FUNC(dev->pdev->devfn); - + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pfnum; + memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len); + attrs.switch_id.id_len = ppid.id_len; if (rep->vport == MLX5_VPORT_UPLINK) - devlink_port_attrs_set(&rpriv->dl_port, - DEVLINK_PORT_FLAVOUR_PHYSICAL, - pfnum, false, 0, - &ppid.id[0], ppid.id_len); + devlink_port_attrs_set(&rpriv->dl_port, &attrs); else if (rep->vport == MLX5_VPORT_PF) - devlink_port_attrs_pci_pf_set(&rpriv->dl_port, - &ppid.id[0], ppid.id_len, - pfnum); + devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum); else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) devlink_port_attrs_pci_vf_set(&rpriv->dl_port, - &ppid.id[0], ppid.id_len, pfnum, rep->vport - 1); return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e9ccd333f61d..bbe7358d4ea5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2129,12 +2129,17 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + struct devlink_port_attrs attrs = {}; int err; + attrs.split = split; + attrs.flavour = flavour; + attrs.phys.port_number = port_number; + attrs.phys.split_subport_number = split_port_subnumber; + memcpy(attrs.switch_id.id, switch_id, switch_id_len); + attrs.switch_id.id_len = switch_id_len; mlxsw_core_port->local_port = local_port; - devlink_port_attrs_set(devlink_port, flavour, port_number, - split, split_port_subnumber, - switch_id, switch_id_len); + devlink_port_attrs_set(devlink_port, &attrs); err = devlink_port_register(devlink, devlink_port, local_port); if (err) memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 07dbf4d72227..71f4e624b3db 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -353,6 +353,7 @@ const struct devlink_ops nfp_devlink_ops = { int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) { + struct devlink_port_attrs attrs = {}; struct nfp_eth_table_port eth_port; struct devlink *devlink; const u8 *serial; @@ -365,10 +366,14 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) if (ret) return ret; + attrs.split = eth_port.is_split; + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = eth_port.label_port; + attrs.phys.split_subport_number = eth_port.label_subport; serial_len = nfp_cpp_serial(port->app->cpp, &serial); - devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - eth_port.label_port, eth_port.is_split, - eth_port.label_subport, serial, serial_len); + memcpy(attrs.switch_id.id, serial, serial_len); + attrs.switch_id.id_len = serial_len; + devlink_port_attrs_set(&port->dl_port, &attrs); devlink = priv_to_devlink(app->pf); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c index 2d590e571133..c4f4fd469fe3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -69,6 +69,7 @@ void ionic_devlink_free(struct ionic *ionic) int ionic_devlink_register(struct ionic *ionic) { struct devlink *dl = priv_to_devlink(ionic); + struct devlink_port_attrs attrs = {}; int err; err = devlink_register(dl, ionic->dev); @@ -77,8 +78,8 @@ int ionic_devlink_register(struct ionic *ionic) return err; } - devlink_port_attrs_set(&ionic->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - 0, false, 0, NULL, 0); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + devlink_port_attrs_set(&ionic->dl_port, &attrs); err = devlink_port_register(dl, &ionic->dl_port, 0); if (err) dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index ec6b6f7818ac..0dc2c66a5d56 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -889,6 +889,7 @@ static const struct devlink_ops nsim_dev_devlink_ops = { static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, unsigned int port_index) { + struct devlink_port_attrs attrs = {}; struct nsim_dev_port *nsim_dev_port; struct devlink_port *devlink_port; int err; @@ -899,10 +900,11 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, nsim_dev_port->port_index = port_index; devlink_port = &nsim_dev_port->devlink_port; - devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - port_index + 1, 0, 0, - nsim_dev->switch_id.id, - nsim_dev->switch_id.id_len); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = port_index + 1; + memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len); + attrs.switch_id.id_len = nsim_dev->switch_id.id_len; + devlink_port_attrs_set(devlink_port, &attrs); err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port, port_index); if (err) diff --git a/include/net/devlink.h b/include/net/devlink.h index de4b5dcdb4a5..8f9db991192d 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -52,7 +52,7 @@ struct devlink_port_phys_attrs { * A physical port which is visible to the user * for a given port flavour. */ - u32 split_subport_number; + u32 split_subport_number; /* If the port is split, this is the number of subport. */ }; struct devlink_port_pci_pf_attrs { @@ -64,6 +64,12 @@ struct devlink_port_pci_vf_attrs { u16 vf; /* Associated PCI VF for of the PCI PF for this port. */ }; +/** + * struct devlink_port_attrs - devlink port object + * @flavour: flavour of the port + * @split: indicates if this is split port + * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL + */ struct devlink_port_attrs { u8 split:1; enum devlink_port_flavour flavour; @@ -1180,17 +1186,9 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port, struct ib_device *ibdev); void devlink_port_type_clear(struct devlink_port *devlink_port); void devlink_port_attrs_set(struct devlink_port *devlink_port, - enum devlink_port_flavour flavour, - u32 port_number, bool split, - u32 split_subport_number, - const unsigned char *switch_id, - unsigned char switch_id_len); -void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, - const unsigned char *switch_id, - unsigned char switch_id_len, u16 pf); + struct devlink_port_attrs *devlink_port_attrs); +void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf); void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, - const unsigned char *switch_id, - unsigned char switch_id_len, u16 pf, u16 vf); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, diff --git a/net/core/devlink.c b/net/core/devlink.c index 452b2f8a054e..266936c38357 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -7510,9 +7510,7 @@ void devlink_port_type_clear(struct devlink_port *devlink_port) EXPORT_SYMBOL_GPL(devlink_port_type_clear); static int __devlink_port_attrs_set(struct devlink_port *devlink_port, - enum devlink_port_flavour flavour, - const unsigned char *switch_id, - unsigned char switch_id_len) + enum devlink_port_flavour flavour) { struct devlink_port_attrs *attrs = &devlink_port->attrs; @@ -7520,12 +7518,10 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port, return -EEXIST; devlink_port->attrs_set = true; attrs->flavour = flavour; - if (switch_id) { + if (attrs->switch_id.id_len) { devlink_port->switch_port = true; - if (WARN_ON(switch_id_len > MAX_PHYS_ITEM_ID_LEN)) - switch_id_len = MAX_PHYS_ITEM_ID_LEN; - memcpy(attrs->switch_id.id, switch_id, switch_id_len); - attrs->switch_id.id_len = switch_id_len; + if (WARN_ON(attrs->switch_id.id_len > MAX_PHYS_ITEM_ID_LEN)) + attrs->switch_id.id_len = MAX_PHYS_ITEM_ID_LEN; } else { devlink_port->switch_port = false; } @@ -7536,33 +7532,17 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port, * devlink_port_attrs_set - Set port attributes * * @devlink_port: devlink port - * @flavour: flavour of the port - * @port_number: number of the port that is facing user, for example - * the front panel port number - * @split: indicates if this is split port - * @split_subport_number: if the port is split, this is the number - * of subport. - * @switch_id: if the port is part of switch, this is buffer with ID, - * otwerwise this is NULL - * @switch_id_len: length of the switch_id buffer + * @attrs: devlink port attrs */ void devlink_port_attrs_set(struct devlink_port *devlink_port, - enum devlink_port_flavour flavour, - u32 port_number, bool split, - u32 split_subport_number, - const unsigned char *switch_id, - unsigned char switch_id_len) + struct devlink_port_attrs *attrs) { - struct devlink_port_attrs *attrs = &devlink_port->attrs; int ret; - ret = __devlink_port_attrs_set(devlink_port, flavour, - switch_id, switch_id_len); + devlink_port->attrs = *attrs; + ret = __devlink_port_attrs_set(devlink_port, attrs->flavour); if (ret) return; - attrs->split = split; - attrs->phys.port_number = port_number; - attrs->phys.split_subport_number = split_subport_number; } EXPORT_SYMBOL_GPL(devlink_port_attrs_set); @@ -7571,20 +7551,14 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_set); * * @devlink_port: devlink port * @pf: associated PF for the devlink port instance - * @switch_id: if the port is part of switch, this is buffer with ID, - * otherwise this is NULL - * @switch_id_len: length of the switch_id buffer */ -void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, - const unsigned char *switch_id, - unsigned char switch_id_len, u16 pf) +void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf) { struct devlink_port_attrs *attrs = &devlink_port->attrs; int ret; ret = __devlink_port_attrs_set(devlink_port, - DEVLINK_PORT_FLAVOUR_PCI_PF, - switch_id, switch_id_len); + DEVLINK_PORT_FLAVOUR_PCI_PF); if (ret) return; @@ -7598,21 +7572,15 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set); * @devlink_port: devlink port * @pf: associated PF for the devlink port instance * @vf: associated VF of a PF for the devlink port instance - * @switch_id: if the port is part of switch, this is buffer with ID, - * otherwise this is NULL - * @switch_id_len: length of the switch_id buffer */ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, - const unsigned char *switch_id, - unsigned char switch_id_len, u16 pf, u16 vf) { struct devlink_port_attrs *attrs = &devlink_port->attrs; int ret; ret = __devlink_port_attrs_set(devlink_port, - DEVLINK_PORT_FLAVOUR_PCI_VF, - switch_id, switch_id_len); + DEVLINK_PORT_FLAVOUR_PCI_VF); if (ret) return; attrs->pci_vf.pf = pf; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 076908fdd29b..e055efff390b 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -261,10 +261,15 @@ static int dsa_port_setup(struct dsa_port *dp) struct devlink_port *dlp = &dp->devlink_port; bool dsa_port_link_registered = false; bool devlink_port_registered = false; + struct devlink_port_attrs attrs = {}; struct devlink *dl = ds->devlink; bool dsa_port_enabled = false; int err = 0; + attrs.phys.port_number = dp->index; + memcpy(attrs.switch_id.id, id, len); + attrs.switch_id.id_len = len; + if (dp->setup) return 0; @@ -274,8 +279,8 @@ static int dsa_port_setup(struct dsa_port *dp) break; case DSA_PORT_TYPE_CPU: memset(dlp, 0, sizeof(*dlp)); - devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_CPU, - dp->index, false, 0, id, len); + attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU; + devlink_port_attrs_set(dlp, &attrs); err = devlink_port_register(dl, dlp, dp->index); if (err) break; @@ -294,8 +299,8 @@ static int dsa_port_setup(struct dsa_port *dp) break; case DSA_PORT_TYPE_DSA: memset(dlp, 0, sizeof(*dlp)); - devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_DSA, - dp->index, false, 0, id, len); + attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA; + devlink_port_attrs_set(dlp, &attrs); err = devlink_port_register(dl, dlp, dp->index); if (err) break; @@ -314,8 +319,8 @@ static int dsa_port_setup(struct dsa_port *dp) break; case DSA_PORT_TYPE_USER: memset(dlp, 0, sizeof(*dlp)); - devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_PHYSICAL, - dp->index, false, 0, id, len); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + devlink_port_attrs_set(dlp, &attrs); err = devlink_port_register(dl, dlp, dp->index); if (err) break; -- cgit v1.2.3 From a21cf0a8330bba60e44ca6c99e1591042f336ff5 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:18 +0300 Subject: devlink: Add a new devlink port lanes attribute and pass to netlink Add a new devlink port attribute that indicates the port's number of lanes. Drivers are expected to set it via devlink_port_attrs_set(), before registering the port. The attribute is not passed to user space in case the number of lanes is invalid (0). Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 1 + include/net/devlink.h | 2 ++ include/uapi/linux/devlink.h | 2 ++ net/core/devlink.c | 4 ++++ 4 files changed, 9 insertions(+) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f44cb1a537f3..6cde196f6b70 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2134,6 +2134,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, int err; attrs.split = split; + attrs.lanes = lanes; attrs.flavour = flavour; attrs.phys.port_number = port_number; attrs.phys.split_subport_number = split_port_subnumber; diff --git a/include/net/devlink.h b/include/net/devlink.h index 8f9db991192d..91a9f8770d08 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -68,10 +68,12 @@ struct devlink_port_pci_vf_attrs { * struct devlink_port_attrs - devlink port object * @flavour: flavour of the port * @split: indicates if this is split port + * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink. * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL */ struct devlink_port_attrs { u8 split:1; + u32 lanes; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; union { diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 87c83a82991b..f741ab8d9cf0 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -455,6 +455,8 @@ enum devlink_attr { DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, /* string */ + DEVLINK_ATTR_PORT_LANES, /* u32 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index 266936c38357..7f26d1054974 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -530,6 +530,10 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, if (!devlink_port->attrs_set) return 0; + if (attrs->lanes) { + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes)) + return -EMSGSIZE; + } if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) return -EMSGSIZE; switch (devlink_port->attrs.flavour) { -- cgit v1.2.3 From a0f49b54865273c895be3826d6d59cbc5ad725c2 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:20 +0300 Subject: devlink: Add a new devlink port split ability attribute and pass to netlink Add a new attribute that indicates the split ability of devlink port. Drivers are expected to set it via devlink_port_attrs_set(), before registering the port. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 1 + drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 1 + include/net/devlink.h | 4 +++- include/uapi/linux/devlink.h | 1 + net/core/devlink.c | 3 +++ 5 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f85f5d88d331..8b3791d73c99 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2135,6 +2135,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, attrs.split = split; attrs.lanes = lanes; + attrs.splittable = splittable; attrs.flavour = flavour; attrs.phys.port_number = port_number; attrs.phys.split_subport_number = split_port_subnumber; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 71f4e624b3db..b6a10565309a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -367,6 +367,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) return ret; attrs.split = eth_port.is_split; + attrs.splittable = !attrs.split; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = eth_port.label_port; attrs.phys.split_subport_number = eth_port.label_subport; diff --git a/include/net/devlink.h b/include/net/devlink.h index 91a9f8770d08..746bed538664 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -68,11 +68,13 @@ struct devlink_port_pci_vf_attrs { * struct devlink_port_attrs - devlink port object * @flavour: flavour of the port * @split: indicates if this is split port + * @splittable: indicates if the port can be split. * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink. * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL */ struct devlink_port_attrs { - u8 split:1; + u8 split:1, + splittable:1; u32 lanes; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index f741ab8d9cf0..cfef4245ea5a 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -456,6 +456,7 @@ enum devlink_attr { DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, /* string */ DEVLINK_ATTR_PORT_LANES, /* u32 */ + DEVLINK_ATTR_PORT_SPLITTABLE, /* u8 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 7f26d1054974..94c797b74378 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -534,6 +534,8 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes)) return -EMSGSIZE; } + if (nla_put_u8(msg, DEVLINK_ATTR_PORT_SPLITTABLE, attrs->splittable)) + return -EMSGSIZE; if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) return -EMSGSIZE; switch (devlink_port->attrs.flavour) { @@ -7547,6 +7549,7 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port, ret = __devlink_port_attrs_set(devlink_port, attrs->flavour); if (ret) return; + WARN_ON(attrs->splittable && attrs->split); } EXPORT_SYMBOL_GPL(devlink_port_attrs_set); -- cgit v1.2.3 From 60a883d119ab9ef63f830c85bbd2f0e2e2314f4f Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 9 Jul 2020 08:50:07 +0200 Subject: spi: use kthread_create_worker() helper Use kthread_create_worker() helper to simplify the code. It uses the kthread worker API the right way. It will eventually allow to remove the FIXME in kthread_worker_fn() and add more consistency checks in the future. Signed-off-by: Marek Szyprowski Reviewed-by: Petr Mladek Link: https://lore.kernel.org/r/20200709065007.26896-1-m.szyprowski@samsung.com Signed-off-by: Mark Brown --- drivers/spi/spi.c | 26 ++++++++++++-------------- include/linux/spi/spi.h | 6 ++---- 2 files changed, 14 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index d4ba723a30da..1d7bba434225 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1368,7 +1368,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) /* If another context is idling the device then defer */ if (ctlr->idling) { - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } @@ -1382,7 +1382,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) /* Only do teardown in the thread */ if (!in_kthread) { - kthread_queue_work(&ctlr->kworker, + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; @@ -1618,7 +1618,7 @@ static void spi_set_thread_rt(struct spi_controller *ctlr) dev_info(&ctlr->dev, "will run message pump with realtime priority\n"); - sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); + sched_setscheduler(ctlr->kworker->task, SCHED_FIFO, ¶m); } static int spi_init_queue(struct spi_controller *ctlr) @@ -1626,13 +1626,12 @@ static int spi_init_queue(struct spi_controller *ctlr) ctlr->running = false; ctlr->busy = false; - kthread_init_worker(&ctlr->kworker); - ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, - "%s", dev_name(&ctlr->dev)); - if (IS_ERR(ctlr->kworker_task)) { - dev_err(&ctlr->dev, "failed to create message pump task\n"); - return PTR_ERR(ctlr->kworker_task); + ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); + if (IS_ERR(ctlr->kworker)) { + dev_err(&ctlr->dev, "failed to create message pump kworker\n"); + return PTR_ERR(ctlr->kworker); } + kthread_init_work(&ctlr->pump_messages, spi_pump_messages); /* @@ -1716,7 +1715,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) ctlr->cur_msg = NULL; ctlr->cur_msg_prepared = false; ctlr->fallback = false; - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); trace_spi_message_done(mesg); @@ -1742,7 +1741,7 @@ static int spi_start_queue(struct spi_controller *ctlr) ctlr->cur_msg = NULL; spin_unlock_irqrestore(&ctlr->queue_lock, flags); - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); return 0; } @@ -1798,8 +1797,7 @@ static int spi_destroy_queue(struct spi_controller *ctlr) return ret; } - kthread_flush_worker(&ctlr->kworker); - kthread_stop(ctlr->kworker_task); + kthread_destroy_worker(ctlr->kworker); return 0; } @@ -1822,7 +1820,7 @@ static int __spi_queued_transfer(struct spi_device *spi, list_add_tail(&msg->queue, &ctlr->queue); if (!ctlr->busy && need_pump) - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return 0; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 0e67a9a3a1d3..5fcf5da13fdb 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -358,8 +358,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @cleanup: frees controller-specific state * @can_dma: determine whether this controller supports DMA * @queued: whether this controller is providing an internal message queue - * @kworker: thread struct for message pump - * @kworker_task: pointer to task for message pump kworker thread + * @kworker: pointer to thread struct for message pump * @pump_messages: work struct for scheduling work to the message pump * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue @@ -593,8 +592,7 @@ struct spi_controller { * Over time we expect SPI drivers to be phased over to this API. */ bool queued; - struct kthread_worker kworker; - struct task_struct *kworker_task; + struct kthread_worker *kworker; struct kthread_work pump_messages; spinlock_t queue_lock; struct list_head queue; -- cgit v1.2.3 From 2575b2f3ee711f4638e772e07a5146afcc704f30 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Wed, 8 Jul 2020 15:59:30 +0800 Subject: PCI: Move PCI_VENDOR_ID_REDHAT definition to pci_ids.h Instead of duplicating the PCI_VENDOR_ID_REDHAT definition everywhere, move it to include/linux/pci_ids.h. [bhelgaas: also update MDPY_PCI_VENDOR_ID] Link: https://lore.kernel.org/r/1594195170-11119-1-git-send-email-chenhc@lemote.com Signed-off-by: Huacai Chen Signed-off-by: Bjorn Helgaas Acked-by: Gerd Hoffmann --- drivers/gpu/drm/qxl/qxl_dev.h | 2 -- drivers/net/ethernet/rocker/rocker_hw.h | 1 - include/linux/pci_ids.h | 2 ++ samples/vfio-mdev/mdpy-defs.h | 2 +- 4 files changed, 3 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h index a0ee41632d7e..a7bc31f6d565 100644 --- a/drivers/gpu/drm/qxl/qxl_dev.h +++ b/drivers/gpu/drm/qxl/qxl_dev.h @@ -131,8 +131,6 @@ enum SpiceCursorType { #pragma pack(push, 1) -#define REDHAT_PCI_VENDOR_ID 0x1b36 - /* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */ #define QXL_DEVICE_ID_STABLE 0x0100 diff --git a/drivers/net/ethernet/rocker/rocker_hw.h b/drivers/net/ethernet/rocker/rocker_hw.h index 59f1f8b690d2..62fd84cf3435 100644 --- a/drivers/net/ethernet/rocker/rocker_hw.h +++ b/drivers/net/ethernet/rocker/rocker_hw.h @@ -25,7 +25,6 @@ enum { #define ROCKER_FP_PORTS_MAX 62 -#define PCI_VENDOR_ID_REDHAT 0x1b36 #define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 #define ROCKER_PCI_BAR0_SIZE 0x2000 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0ad57693f392..5c709a1450b1 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2585,6 +2585,8 @@ #define PCI_VENDOR_ID_ASMEDIA 0x1b21 +#define PCI_VENDOR_ID_REDHAT 0x1b36 + #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 diff --git a/samples/vfio-mdev/mdpy-defs.h b/samples/vfio-mdev/mdpy-defs.h index eb26421b6429..961c55ec3ffd 100644 --- a/samples/vfio-mdev/mdpy-defs.h +++ b/samples/vfio-mdev/mdpy-defs.h @@ -9,7 +9,7 @@ */ /* pci ids */ -#define MDPY_PCI_VENDOR_ID 0x1b36 /* redhat */ +#define MDPY_PCI_VENDOR_ID PCI_VENDOR_ID_REDHAT #define MDPY_PCI_DEVICE_ID 0x000f #define MDPY_PCI_SUBVENDOR_ID PCI_SUBVENDOR_ID_REDHAT_QUMRANET #define MDPY_PCI_SUBDEVICE_ID PCI_SUBDEVICE_ID_QEMU -- cgit v1.2.3 From ba1f2b2eaa2a529dba722507c55ff3d761d325dd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2020 15:50:29 +0200 Subject: x86/entry: Fix NMI vs IRQ state tracking While the nmi_enter() users did trace_hardirqs_{off_prepare,on_finish}() there was no matching lockdep_hardirqs_*() calls to complete the picture. Introduce idtentry_{enter,exit}_nmi() to enable proper IRQ state tracking across the NMIs. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ingo Molnar Link: https://lkml.kernel.org/r/20200623083721.216740948@infradead.org --- arch/x86/entry/common.c | 42 +++++++++++++++++++++++++++++++++++++---- arch/x86/include/asm/idtentry.h | 3 +++ arch/x86/kernel/nmi.c | 9 ++++----- arch/x86/kernel/traps.c | 17 ++++++----------- include/linux/hardirq.h | 28 ++++++++++++++++++--------- 5 files changed, 70 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 0521546022cb..63c607dd6c52 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -592,7 +592,7 @@ SYSCALL_DEFINE0(ni_syscall) * The return value must be fed into the state argument of * idtentry_exit(). */ -idtentry_state_t noinstr idtentry_enter(struct pt_regs *regs) +noinstr idtentry_state_t idtentry_enter(struct pt_regs *regs) { idtentry_state_t ret = { .exit_rcu = false, @@ -687,7 +687,7 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched) * Counterpart to idtentry_enter(). The return value of the entry * function must be fed into the @state argument. */ -void noinstr idtentry_exit(struct pt_regs *regs, idtentry_state_t state) +noinstr void idtentry_exit(struct pt_regs *regs, idtentry_state_t state) { lockdep_assert_irqs_disabled(); @@ -731,7 +731,7 @@ void noinstr idtentry_exit(struct pt_regs *regs, idtentry_state_t state) * Invokes enter_from_user_mode() to establish the proper context for * NOHZ_FULL. Otherwise scheduling on exit would not be possible. */ -void noinstr idtentry_enter_user(struct pt_regs *regs) +noinstr void idtentry_enter_user(struct pt_regs *regs) { check_user_regs(regs); enter_from_user_mode(); @@ -749,13 +749,47 @@ void noinstr idtentry_enter_user(struct pt_regs *regs) * * Counterpart to idtentry_enter_user(). */ -void noinstr idtentry_exit_user(struct pt_regs *regs) +noinstr void idtentry_exit_user(struct pt_regs *regs) { lockdep_assert_irqs_disabled(); prepare_exit_to_usermode(regs); } +noinstr bool idtentry_enter_nmi(struct pt_regs *regs) +{ + bool irq_state = lockdep_hardirqs_enabled(current); + + __nmi_enter(); + lockdep_hardirqs_off(CALLER_ADDR0); + lockdep_hardirq_enter(); + rcu_nmi_enter(); + + instrumentation_begin(); + trace_hardirqs_off_finish(); + ftrace_nmi_enter(); + instrumentation_end(); + + return irq_state; +} + +noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore) +{ + instrumentation_begin(); + ftrace_nmi_exit(); + if (restore) { + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + } + instrumentation_end(); + + rcu_nmi_exit(); + lockdep_hardirq_exit(); + if (restore) + lockdep_hardirqs_on(CALLER_ADDR0); + __nmi_exit(); +} + #ifdef CONFIG_XEN_PV #ifndef CONFIG_PREEMPTION /* diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index 7227225cf45d..2b0497486525 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -20,6 +20,9 @@ typedef struct idtentry_state { idtentry_state_t idtentry_enter(struct pt_regs *regs); void idtentry_exit(struct pt_regs *regs, idtentry_state_t state); +bool idtentry_enter_nmi(struct pt_regs *regs); +void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state); + /** * DECLARE_IDTENTRY - Declare functions for simple IDT entry points * No error code pushed by hardware diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index d7c5e44b26f7..4fc9954a9560 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -330,7 +330,6 @@ static noinstr void default_do_nmi(struct pt_regs *regs) __this_cpu_write(last_nmi_rip, regs->ip); instrumentation_begin(); - trace_hardirqs_off_finish(); handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); @@ -417,8 +416,6 @@ static noinstr void default_do_nmi(struct pt_regs *regs) unknown_nmi_error(reason, regs); out: - if (regs->flags & X86_EFLAGS_IF) - trace_hardirqs_on_prepare(); instrumentation_end(); } @@ -478,6 +475,8 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7); DEFINE_IDTENTRY_RAW(exc_nmi) { + bool irq_state; + if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) return; @@ -491,14 +490,14 @@ nmi_restart: this_cpu_write(nmi_dr7, local_db_save()); - nmi_enter(); + irq_state = idtentry_enter_nmi(regs); inc_irq_stat(__nmi_count); if (!ignore_nmis) default_do_nmi(regs); - nmi_exit(); + idtentry_exit_nmi(regs, irq_state); local_db_restore(this_cpu_read(nmi_dr7)); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 4627f826fb57..cdd73829e637 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -403,7 +403,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault) } #endif - nmi_enter(); + idtentry_enter_nmi(regs); instrumentation_begin(); notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); @@ -649,15 +649,12 @@ DEFINE_IDTENTRY_RAW(exc_int3) instrumentation_end(); idtentry_exit_user(regs); } else { - nmi_enter(); + bool irq_state = idtentry_enter_nmi(regs); instrumentation_begin(); - trace_hardirqs_off_finish(); if (!do_int3(regs)) die("int3", regs, 0); - if (regs->flags & X86_EFLAGS_IF) - trace_hardirqs_on_prepare(); instrumentation_end(); - nmi_exit(); + idtentry_exit_nmi(regs, irq_state); } } @@ -865,9 +862,8 @@ out: static __always_inline void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6) { - nmi_enter(); + bool irq_state = idtentry_enter_nmi(regs); instrumentation_begin(); - trace_hardirqs_off_finish(); /* * If something gets miswired and we end up here for a user mode @@ -884,10 +880,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs, handle_debug(regs, dr6, false); - if (regs->flags & X86_EFLAGS_IF) - trace_hardirqs_on_prepare(); instrumentation_end(); - nmi_exit(); + idtentry_exit_nmi(regs, irq_state); } static __always_inline void exc_debug_user(struct pt_regs *regs, @@ -903,6 +897,7 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, instrumentation_begin(); handle_debug(regs, dr6, true); + instrumentation_end(); idtentry_exit_user(regs); } diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 03c9fece7d43..754f67ac4326 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -111,32 +111,42 @@ extern void rcu_nmi_exit(void); /* * nmi_enter() can nest up to 15 times; see NMI_BITS. */ -#define nmi_enter() \ +#define __nmi_enter() \ do { \ + lockdep_off(); \ arch_nmi_enter(); \ printk_nmi_enter(); \ - lockdep_off(); \ BUG_ON(in_nmi() == NMI_MASK); \ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ - rcu_nmi_enter(); \ + } while (0) + +#define nmi_enter() \ + do { \ + __nmi_enter(); \ lockdep_hardirq_enter(); \ + rcu_nmi_enter(); \ instrumentation_begin(); \ ftrace_nmi_enter(); \ instrumentation_end(); \ } while (0) +#define __nmi_exit() \ + do { \ + BUG_ON(!in_nmi()); \ + __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ + printk_nmi_exit(); \ + arch_nmi_exit(); \ + lockdep_on(); \ + } while (0) + #define nmi_exit() \ do { \ instrumentation_begin(); \ ftrace_nmi_exit(); \ instrumentation_end(); \ - lockdep_hardirq_exit(); \ rcu_nmi_exit(); \ - BUG_ON(!in_nmi()); \ - __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ - lockdep_on(); \ - printk_nmi_exit(); \ - arch_nmi_exit(); \ + lockdep_hardirq_exit(); \ + __nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ -- cgit v1.2.3 From a21ee6055c30ce68c4e201c6496f0ed2a1936230 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 May 2020 12:22:41 +0200 Subject: lockdep: Change hardirq{s_enabled,_context} to per-cpu variables Currently all IRQ-tracking state is in task_struct, this means that task_struct needs to be defined before we use it. Especially for lockdep_assert_irq*() this can lead to header-hell. Move the hardirq state into per-cpu variables to avoid the task_struct dependency. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ingo Molnar Link: https://lkml.kernel.org/r/20200623083721.512673481@infradead.org --- include/linux/irqflags.h | 19 ++++++++++++------- include/linux/lockdep.h | 34 ++++++++++++++++++---------------- include/linux/sched.h | 2 -- kernel/fork.c | 4 +--- kernel/locking/lockdep.c | 30 +++++++++++++++--------------- kernel/softirq.c | 6 ++++++ 6 files changed, 52 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 6384d2813ded..255444fe4609 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -14,6 +14,7 @@ #include #include +#include /* Currently lockdep_softirqs_on/off is used only by lockdep */ #ifdef CONFIG_PROVE_LOCKING @@ -31,18 +32,22 @@ #endif #ifdef CONFIG_TRACE_IRQFLAGS + +DECLARE_PER_CPU(int, hardirqs_enabled); +DECLARE_PER_CPU(int, hardirq_context); + extern void trace_hardirqs_on_prepare(void); extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); -# define lockdep_hardirq_context(p) ((p)->hardirq_context) +# define lockdep_hardirq_context(p) (this_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) -# define lockdep_hardirqs_enabled(p) ((p)->hardirqs_enabled) +# define lockdep_hardirqs_enabled(p) (this_cpu_read(hardirqs_enabled)) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) -# define lockdep_hardirq_enter() \ -do { \ - if (!current->hardirq_context++) \ - current->hardirq_threaded = 0; \ +# define lockdep_hardirq_enter() \ +do { \ + if (this_cpu_inc_return(hardirq_context) == 1) \ + current->hardirq_threaded = 0; \ } while (0) # define lockdep_hardirq_threaded() \ do { \ @@ -50,7 +55,7 @@ do { \ } while (0) # define lockdep_hardirq_exit() \ do { \ - current->hardirq_context--; \ + this_cpu_dec(hardirq_context); \ } while (0) # define lockdep_softirq_enter() \ do { \ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 3b73cf84f77d..be6cb17a8879 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -11,6 +11,7 @@ #define __LINUX_LOCKDEP_H #include +#include struct task_struct; @@ -529,28 +530,29 @@ do { \ lock_release(&(lock)->dep_map, _THIS_IP_); \ } while (0) -#define lockdep_assert_irqs_enabled() do { \ - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - !current->hardirqs_enabled, \ - "IRQs not enabled as expected\n"); \ - } while (0) +DECLARE_PER_CPU(int, hardirqs_enabled); +DECLARE_PER_CPU(int, hardirq_context); -#define lockdep_assert_irqs_disabled() do { \ - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - current->hardirqs_enabled, \ - "IRQs not disabled as expected\n"); \ - } while (0) +#define lockdep_assert_irqs_enabled() \ +do { \ + WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \ +} while (0) -#define lockdep_assert_in_irq() do { \ - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - !current->hardirq_context, \ - "Not in hardirq as expected\n"); \ - } while (0) +#define lockdep_assert_irqs_disabled() \ +do { \ + WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \ +} while (0) + +#define lockdep_assert_in_irq() \ +do { \ + WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ +} while (0) #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) # define might_lock_nested(lock, subclass) do { } while (0) + # define lockdep_assert_irqs_enabled() do { } while (0) # define lockdep_assert_irqs_disabled() do { } while (0) # define lockdep_assert_in_irq() do { } while (0) @@ -560,7 +562,7 @@ do { \ # define lockdep_assert_RT_in_threaded_ctx() do { \ WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - current->hardirq_context && \ + lockdep_hardirq_context(current) && \ !(current->hardirq_threaded || current->irq_config), \ "Not in threaded context on PREEMPT_RT as expected\n"); \ } while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h index 692e327d7455..3903a9500926 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -990,8 +990,6 @@ struct task_struct { unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; unsigned int hardirq_disable_event; - int hardirqs_enabled; - int hardirq_context; u64 hardirq_chain_key; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; diff --git a/kernel/fork.c b/kernel/fork.c index efc5493203ae..70d9d0a4de2a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1954,8 +1954,8 @@ static __latent_entropy struct task_struct *copy_process( rt_mutex_init_task(p); + lockdep_assert_irqs_enabled(); #ifdef CONFIG_PROVE_LOCKING - DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; @@ -2036,7 +2036,6 @@ static __latent_entropy struct task_struct *copy_process( #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; - p->hardirqs_enabled = 0; p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; @@ -2046,7 +2045,6 @@ static __latent_entropy struct task_struct *copy_process( p->softirq_enable_event = 0; p->softirq_disable_ip = 0; p->softirq_disable_event = 0; - p->hardirq_context = 0; p->softirq_context = 0; #endif diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index d595623c4b34..ab4ffbe0e9e9 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2062,9 +2062,9 @@ print_bad_irq_dependency(struct task_struct *curr, pr_warn("-----------------------------------------------------\n"); pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", curr->comm, task_pid_nr(curr), - curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, + lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, - curr->hardirqs_enabled, + lockdep_hardirqs_enabled(curr), curr->softirqs_enabled); print_lock(next); @@ -3658,7 +3658,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) return; - if (unlikely(current->hardirqs_enabled)) { + if (unlikely(lockdep_hardirqs_enabled(current))) { /* * Neither irq nor preemption are disabled here * so this is racy by nature but losing one hit @@ -3686,7 +3686,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) * Can't allow enabling interrupts while in an interrupt handler, * that's general bad form and such. Recursion, limited stack etc.. */ - if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context(current))) return; current->hardirq_chain_key = current->curr_chain_key; @@ -3724,7 +3724,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) return; - if (curr->hardirqs_enabled) { + if (lockdep_hardirqs_enabled(curr)) { /* * Neither irq nor preemption are disabled here * so this is racy by nature but losing one hit @@ -3751,7 +3751,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) skip_checks: /* we'll do an OFF -> ON transition: */ - curr->hardirqs_enabled = 1; + this_cpu_write(hardirqs_enabled, 1); curr->hardirq_enable_ip = ip; curr->hardirq_enable_event = ++curr->irq_events; debug_atomic_inc(hardirqs_on_events); @@ -3783,11 +3783,11 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; - if (curr->hardirqs_enabled) { + if (lockdep_hardirqs_enabled(curr)) { /* * We have done an ON -> OFF transition: */ - curr->hardirqs_enabled = 0; + this_cpu_write(hardirqs_enabled, 0); curr->hardirq_disable_ip = ip; curr->hardirq_disable_event = ++curr->irq_events; debug_atomic_inc(hardirqs_off_events); @@ -3832,7 +3832,7 @@ void lockdep_softirqs_on(unsigned long ip) * usage bit for all held locks, if hardirqs are * enabled too: */ - if (curr->hardirqs_enabled) + if (lockdep_hardirqs_enabled(curr)) mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); lockdep_recursion_finish(); } @@ -3881,7 +3881,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) */ if (!hlock->trylock) { if (hlock->read) { - if (curr->hardirq_context) + if (lockdep_hardirq_context(curr)) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ_READ)) return 0; @@ -3890,7 +3890,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) LOCK_USED_IN_SOFTIRQ_READ)) return 0; } else { - if (curr->hardirq_context) + if (lockdep_hardirq_context(curr)) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) return 0; if (curr->softirq_context) @@ -3928,7 +3928,7 @@ lock_used: static inline unsigned int task_irq_context(struct task_struct *task) { - return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context + + return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context(task) + LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; } @@ -4021,7 +4021,7 @@ static inline short task_wait_context(struct task_struct *curr) * Set appropriate wait type for the context; for IRQs we have to take * into account force_irqthread as that is implied by PREEMPT_RT. */ - if (curr->hardirq_context) { + if (lockdep_hardirq_context(curr)) { /* * Check if force_irqthreads will run us threaded. */ @@ -4864,11 +4864,11 @@ static void check_flags(unsigned long flags) return; if (irqs_disabled_flags(flags)) { - if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled(current))) { printk("possible reason: unannotated irqs-off.\n"); } } else { - if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { + if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled(current))) { printk("possible reason: unannotated irqs-on.\n"); } } diff --git a/kernel/softirq.c b/kernel/softirq.c index c4201b7f42b1..342c53feaa7a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -107,6 +107,12 @@ static bool ksoftirqd_running(unsigned long pending) * where hardirqs are disabled legitimately: */ #ifdef CONFIG_TRACE_IRQFLAGS + +DEFINE_PER_CPU(int, hardirqs_enabled); +DEFINE_PER_CPU(int, hardirq_context); +EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); +EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); + void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) { unsigned long flags; -- cgit v1.2.3 From f9ad4a5f3f20bee022b1bdde94e5ece6dc0b0edc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2020 13:03:26 +0200 Subject: lockdep: Remove lockdep_hardirq{s_enabled,_context}() argument Now that the macros use per-cpu data, we no longer need the argument. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ingo Molnar Link: https://lkml.kernel.org/r/20200623083721.571835311@infradead.org --- arch/x86/entry/common.c | 2 +- include/linux/irqflags.h | 8 ++++---- include/linux/lockdep.h | 2 +- kernel/locking/lockdep.c | 30 +++++++++++++++--------------- kernel/softirq.c | 2 +- tools/include/linux/irqflags.h | 4 ++-- 6 files changed, 24 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 63c607dd6c52..4ea640363f5d 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -758,7 +758,7 @@ noinstr void idtentry_exit_user(struct pt_regs *regs) noinstr bool idtentry_enter_nmi(struct pt_regs *regs) { - bool irq_state = lockdep_hardirqs_enabled(current); + bool irq_state = lockdep_hardirqs_enabled(); __nmi_enter(); lockdep_hardirqs_off(CALLER_ADDR0); diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 255444fe4609..5811ee8a5cd8 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -40,9 +40,9 @@ DECLARE_PER_CPU(int, hardirq_context); extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); -# define lockdep_hardirq_context(p) (this_cpu_read(hardirq_context)) +# define lockdep_hardirq_context() (this_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) -# define lockdep_hardirqs_enabled(p) (this_cpu_read(hardirqs_enabled)) +# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) # define lockdep_hardirq_enter() \ do { \ @@ -109,9 +109,9 @@ do { \ # define trace_hardirqs_off_finish() do { } while (0) # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) -# define lockdep_hardirq_context(p) 0 +# define lockdep_hardirq_context() 0 # define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled(p) 0 +# define lockdep_hardirqs_enabled() 0 # define lockdep_softirqs_enabled(p) 0 # define lockdep_hardirq_enter() do { } while (0) # define lockdep_hardirq_threaded() do { } while (0) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index be6cb17a8879..fd04b9e96091 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -562,7 +562,7 @@ do { \ # define lockdep_assert_RT_in_threaded_ctx() do { \ WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - lockdep_hardirq_context(current) && \ + lockdep_hardirq_context() && \ !(current->hardirq_threaded || current->irq_config), \ "Not in threaded context on PREEMPT_RT as expected\n"); \ } while (0) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index ab4ffbe0e9e9..c9ea05edce25 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2062,9 +2062,9 @@ print_bad_irq_dependency(struct task_struct *curr, pr_warn("-----------------------------------------------------\n"); pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", curr->comm, task_pid_nr(curr), - lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, + lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, - lockdep_hardirqs_enabled(curr), + lockdep_hardirqs_enabled(), curr->softirqs_enabled); print_lock(next); @@ -3331,9 +3331,9 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", curr->comm, task_pid_nr(curr), - lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, + lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, - lockdep_hardirqs_enabled(curr), + lockdep_hardirqs_enabled(), lockdep_softirqs_enabled(curr)); print_lock(this); @@ -3658,7 +3658,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) return; - if (unlikely(lockdep_hardirqs_enabled(current))) { + if (unlikely(lockdep_hardirqs_enabled())) { /* * Neither irq nor preemption are disabled here * so this is racy by nature but losing one hit @@ -3686,7 +3686,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) * Can't allow enabling interrupts while in an interrupt handler, * that's general bad form and such. Recursion, limited stack etc.. */ - if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context(current))) + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context())) return; current->hardirq_chain_key = current->curr_chain_key; @@ -3724,7 +3724,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) return; - if (lockdep_hardirqs_enabled(curr)) { + if (lockdep_hardirqs_enabled()) { /* * Neither irq nor preemption are disabled here * so this is racy by nature but losing one hit @@ -3783,7 +3783,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; - if (lockdep_hardirqs_enabled(curr)) { + if (lockdep_hardirqs_enabled()) { /* * We have done an ON -> OFF transition: */ @@ -3832,7 +3832,7 @@ void lockdep_softirqs_on(unsigned long ip) * usage bit for all held locks, if hardirqs are * enabled too: */ - if (lockdep_hardirqs_enabled(curr)) + if (lockdep_hardirqs_enabled()) mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); lockdep_recursion_finish(); } @@ -3881,7 +3881,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) */ if (!hlock->trylock) { if (hlock->read) { - if (lockdep_hardirq_context(curr)) + if (lockdep_hardirq_context()) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ_READ)) return 0; @@ -3890,7 +3890,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) LOCK_USED_IN_SOFTIRQ_READ)) return 0; } else { - if (lockdep_hardirq_context(curr)) + if (lockdep_hardirq_context()) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) return 0; if (curr->softirq_context) @@ -3928,7 +3928,7 @@ lock_used: static inline unsigned int task_irq_context(struct task_struct *task) { - return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context(task) + + return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() + LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; } @@ -4021,7 +4021,7 @@ static inline short task_wait_context(struct task_struct *curr) * Set appropriate wait type for the context; for IRQs we have to take * into account force_irqthread as that is implied by PREEMPT_RT. */ - if (lockdep_hardirq_context(curr)) { + if (lockdep_hardirq_context()) { /* * Check if force_irqthreads will run us threaded. */ @@ -4864,11 +4864,11 @@ static void check_flags(unsigned long flags) return; if (irqs_disabled_flags(flags)) { - if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled(current))) { + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) { printk("possible reason: unannotated irqs-off.\n"); } } else { - if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled(current))) { + if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) { printk("possible reason: unannotated irqs-on.\n"); } } diff --git a/kernel/softirq.c b/kernel/softirq.c index 342c53feaa7a..5e9aaa648a74 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -230,7 +230,7 @@ static inline bool lockdep_softirq_start(void) { bool in_hardirq = false; - if (lockdep_hardirq_context(current)) { + if (lockdep_hardirq_context()) { in_hardirq = true; lockdep_hardirq_exit(); } diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h index 67e01bbadbfe..501262aee8ff 100644 --- a/tools/include/linux/irqflags.h +++ b/tools/include/linux/irqflags.h @@ -2,9 +2,9 @@ #ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ #define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ -# define lockdep_hardirq_context(p) 0 +# define lockdep_hardirq_context() 0 # define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled(p) 0 +# define lockdep_hardirqs_enabled() 0 # define lockdep_softirqs_enabled(p) 0 # define lockdep_hardirq_enter() do { } while (0) # define lockdep_hardirq_exit() do { } while (0) -- cgit v1.2.3 From 631beddc5466731b048263a4a9d3d67150e72f8d Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 9 Jul 2020 14:08:55 +0200 Subject: virt: vbox: Add support for the new VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES ioctl Add support for the new VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES ioctl, this is necessary for automatic resizing of the guest resolution to match the VM-window size to work with the new VMSVGA virtual GPU which is now the new default in VirtualBox. BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1789545 Acked-by: Arnd Bergmann Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20200709120858.63928-6-hdegoede@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/virt/vboxguest/vboxguest_core.c | 163 +++++++++++++++++++++++++++++++- drivers/virt/vboxguest/vboxguest_core.h | 14 +++ include/uapi/linux/vboxguest.h | 24 +++++ 3 files changed, 200 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 15b3cb618c6e..4f1addaa3f6f 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -679,7 +679,7 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev, WARN_ON(!mutex_is_locked(&gdev->session_mutex)); - caps = gdev->set_guest_caps_tracker.mask; + caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask; if (gdev->guest_caps_host == caps) return 0; @@ -703,6 +703,113 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev, return vbg_status_code_to_errno(rc); } +/** + * Acquire (get exclusive access) guest capabilities for a session. + * Takes the session mutex. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @session: The session. + * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX). + * @or_mask: The capabilities to add. + * @not_mask: The capabilities to remove. + * @session_termination: Set if we're called by the session cleanup code. + * This tweaks the error handling so we perform + * proper session cleanup even if the host + * misbehaves. + */ +static int vbg_acquire_session_capabilities(struct vbg_dev *gdev, + struct vbg_session *session, + u32 or_mask, u32 not_mask, + u32 flags, bool session_termination) +{ + unsigned long irqflags; + bool wakeup = false; + int ret = 0; + + mutex_lock(&gdev->session_mutex); + + if (gdev->set_guest_caps_tracker.mask & or_mask) { + vbg_err("%s error: cannot acquire caps which are currently set\n", + __func__); + ret = -EINVAL; + goto out; + } + + /* + * Mark any caps in the or_mask as now being in acquire-mode. Note + * once caps are in acquire_mode they always stay in this mode. + * This impacts event handling, so we take the event-lock. + */ + spin_lock_irqsave(&gdev->event_spinlock, irqflags); + gdev->acquire_mode_guest_caps |= or_mask; + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); + + /* If we only have to switch the caps to acquire mode, we're done. */ + if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE) + goto out; + + not_mask &= ~or_mask; /* or_mask takes priority over not_mask */ + not_mask &= session->acquired_guest_caps; + or_mask &= ~session->acquired_guest_caps; + + if (or_mask == 0 && not_mask == 0) + goto out; + + if (gdev->acquired_guest_caps & or_mask) { + ret = -EBUSY; + goto out; + } + + gdev->acquired_guest_caps |= or_mask; + gdev->acquired_guest_caps &= ~not_mask; + /* session->acquired_guest_caps impacts event handling, take the lock */ + spin_lock_irqsave(&gdev->event_spinlock, irqflags); + session->acquired_guest_caps |= or_mask; + session->acquired_guest_caps &= ~not_mask; + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); + + ret = vbg_set_host_capabilities(gdev, session, session_termination); + /* Roll back on failure, unless it's session termination time. */ + if (ret < 0 && !session_termination) { + gdev->acquired_guest_caps &= ~or_mask; + gdev->acquired_guest_caps |= not_mask; + spin_lock_irqsave(&gdev->event_spinlock, irqflags); + session->acquired_guest_caps &= ~or_mask; + session->acquired_guest_caps |= not_mask; + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); + } + + /* + * If we added a capability, check if that means some other thread in + * our session should be unblocked because there are events pending + * (the result of vbg_get_allowed_event_mask_for_session() may change). + * + * HACK ALERT! When the seamless support capability is added we generate + * a seamless change event so that the ring-3 client can sync with + * the seamless state. + */ + if (ret == 0 && or_mask != 0) { + spin_lock_irqsave(&gdev->event_spinlock, irqflags); + + if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS) + gdev->pending_events |= + VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST; + + if (gdev->pending_events) + wakeup = true; + + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); + + if (wakeup) + wake_up(&gdev->event_wq); + } + +out: + mutex_unlock(&gdev->session_mutex); + + return ret; +} + /** * Sets the guest capabilities for a session. Takes the session spinlock. * Return: 0 or negative errno value. @@ -725,6 +832,13 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev, mutex_lock(&gdev->session_mutex); + if (gdev->acquire_mode_guest_caps & or_mask) { + vbg_err("%s error: cannot set caps which are in acquire_mode\n", + __func__); + ret = -EBUSY; + goto out; + } + /* Apply the changes to the session mask. */ previous = session->set_guest_caps; session->set_guest_caps |= or_mask; @@ -962,6 +1076,7 @@ void vbg_core_close_session(struct vbg_session *session) struct vbg_dev *gdev = session->gdev; int i, rc; + vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true); vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true); vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true); @@ -1019,6 +1134,25 @@ static int vbg_ioctl_driver_version_info( return 0; } +/* Must be called with the event_lock held */ +static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev, + struct vbg_session *session) +{ + u32 acquire_mode_caps = gdev->acquire_mode_guest_caps; + u32 session_acquired_caps = session->acquired_guest_caps; + u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK; + + if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) && + !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)) + allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST; + + if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) && + !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)) + allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST; + + return allowed_events; +} + static bool vbg_wait_event_cond(struct vbg_dev *gdev, struct vbg_session *session, u32 event_mask) @@ -1030,6 +1164,7 @@ static bool vbg_wait_event_cond(struct vbg_dev *gdev, spin_lock_irqsave(&gdev->event_spinlock, flags); events = gdev->pending_events & event_mask; + events &= vbg_get_allowed_event_mask_for_session(gdev, session); wakeup = events || session->cancel_waiters; spin_unlock_irqrestore(&gdev->event_spinlock, flags); @@ -1044,6 +1179,7 @@ static u32 vbg_consume_events_locked(struct vbg_dev *gdev, { u32 events = gdev->pending_events & event_mask; + events &= vbg_get_allowed_event_mask_for_session(gdev, session); gdev->pending_events &= ~events; return events; } @@ -1445,6 +1581,29 @@ static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev, false); } +static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev, + struct vbg_session *session, + struct vbg_ioctl_acquire_guest_caps *caps) +{ + u32 flags, or_mask, not_mask; + + if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0)) + return -EINVAL; + + flags = caps->u.in.flags; + or_mask = caps->u.in.or_mask; + not_mask = caps->u.in.not_mask; + + if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) + return -EINVAL; + + if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) + return -EINVAL; + + return vbg_acquire_session_capabilities(gdev, session, or_mask, + not_mask, flags, false); +} + static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps) { @@ -1554,6 +1713,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); case VBG_IOCTL_CHANGE_FILTER_MASK: return vbg_ioctl_change_filter_mask(gdev, session, data); + case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES: + return vbg_ioctl_acquire_guest_capabilities(gdev, session, data); case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES: return vbg_ioctl_change_guest_capabilities(gdev, session, data); case VBG_IOCTL_CHECK_BALLOON: diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index dc745a033164..ab4bf64e2cec 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -117,6 +117,15 @@ struct vbg_dev { */ u32 event_filter_host; + /** + * Guest capabilities which have been switched to acquire_mode. + */ + u32 acquire_mode_guest_caps; + /** + * Guest capabilities acquired by vbg_acquire_session_capabilities(). + * Only one session can acquire a capability at a time. + */ + u32 acquired_guest_caps; /** * Usage counters for guest capabilities requested through * vbg_set_session_capabilities(). Indexed by capability bit @@ -164,6 +173,11 @@ struct vbg_session { * host filter. Protected by vbg_gdev.session_mutex. */ u32 event_filter; + /** + * Guest capabilities acquired by vbg_acquire_session_capabilities(). + * Only one session can acquire a capability at a time. + */ + u32 acquired_guest_caps; /** * Guest capabilities set through vbg_set_session_capabilities(). * A capability claimed by any guest session will be reported to the diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h index f79d7abe27db..15125f6ec60d 100644 --- a/include/uapi/linux/vboxguest.h +++ b/include/uapi/linux/vboxguest.h @@ -257,6 +257,30 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8); _IOWR('V', 12, struct vbg_ioctl_change_filter) +/** VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES data structure. */ +struct vbg_ioctl_acquire_guest_caps { + /** The header. */ + struct vbg_ioctl_hdr hdr; + union { + struct { + /** Flags (VBGL_IOC_AGC_FLAGS_XXX). */ + __u32 flags; + /** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */ + __u32 or_mask; + /** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */ + __u32 not_mask; + } in; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_acquire_guest_caps, 24 + 12); + +#define VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE 0x00000001 +#define VBGL_IOC_AGC_FLAGS_VALID_MASK 0x00000001 + +#define VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES \ + _IOWR('V', 13, struct vbg_ioctl_acquire_guest_caps) + + /** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */ struct vbg_ioctl_set_guest_caps { /** The header. */ -- cgit v1.2.3 From 316b0035402f05fe9e9e5334d1ff65dae285cb7c Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 9 Jul 2020 14:08:56 +0200 Subject: virt: vbox: Add a few new vmmdev request types to the userspace whitelist Upstream VirtualBox has defined and is using a few new request types for vmmdev requests passed through /dev/vboxguest to the hypervisor. Add the defines for these to vbox_vmmdev_types.h and add add them to the whitelists of vmmdev requests which userspace is allowed to make. BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1789545 Acked-by: Arnd Bergmann Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20200709120858.63928-7-hdegoede@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/virt/vboxguest/vboxguest_core.c | 2 ++ include/uapi/linux/vbox_vmmdev_types.h | 3 +++ 2 files changed, 5 insertions(+) (limited to 'include') diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 4f1addaa3f6f..ffd76b949276 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -1299,7 +1299,9 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, case VMMDEVREQ_VIDEO_ACCEL_ENABLE: case VMMDEVREQ_VIDEO_ACCEL_FLUSH: case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION: + case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS: case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX: + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI: case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ: case VMMDEVREQ_GET_VRDPCHANGE_REQ: case VMMDEVREQ_LOG_STRING: diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h index c27289fd619a..f8a8d6b3c521 100644 --- a/include/uapi/linux/vbox_vmmdev_types.h +++ b/include/uapi/linux/vbox_vmmdev_types.h @@ -63,6 +63,7 @@ enum vmmdev_request_type { VMMDEVREQ_SET_GUEST_CAPABILITIES = 56, VMMDEVREQ_VIDEMODE_SUPPORTED2 = 57, /* since version 3.2.0 */ VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX = 80, /* since version 4.2.4 */ + VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI = 81, VMMDEVREQ_HGCM_CONNECT = 60, VMMDEVREQ_HGCM_DISCONNECT = 61, VMMDEVREQ_HGCM_CALL32 = 62, @@ -92,6 +93,8 @@ enum vmmdev_request_type { VMMDEVREQ_WRITE_COREDUMP = 218, VMMDEVREQ_GUEST_HEARTBEAT = 219, VMMDEVREQ_HEARTBEAT_CONFIGURE = 220, + VMMDEVREQ_NT_BUG_CHECK = 221, + VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS = 222, /* Ensure the enum is a 32 bit data-type */ VMMDEVREQ_SIZEHACK = 0x7fffffff }; -- cgit v1.2.3 From 5bc117a27fd044bd5ddeb8ab22c58976bf01b50d Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 9 Jul 2020 14:08:57 +0200 Subject: virt: vbox: Log unknown ioctl requests as error Every now and then upstream adds new ioctls without notifying us, log unknown ioctl requests as an error to catch these. Acked-by: Arnd Bergmann Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20200709120858.63928-8-hdegoede@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/virt/vboxguest/vboxguest_core.c | 2 +- drivers/virt/vboxguest/vboxguest_utils.c | 1 + include/linux/vbox_utils.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index ffd76b949276..c5dfcd42fd07 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -1739,7 +1739,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) return vbg_ioctl_log(data); } - vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req); + vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req); return -ENOTTY; } diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 7396187ee32a..ea05af41ec69 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -59,6 +59,7 @@ EXPORT_SYMBOL(name) VBG_LOG(vbg_info, pr_info); VBG_LOG(vbg_warn, pr_warn); VBG_LOG(vbg_err, pr_err); +VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited); #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) VBG_LOG(vbg_debug, pr_debug); #endif diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h index ff56c443180c..db8a7d118093 100644 --- a/include/linux/vbox_utils.h +++ b/include/linux/vbox_utils.h @@ -16,6 +16,7 @@ struct vbg_dev; __printf(1, 2) void vbg_info(const char *fmt, ...); __printf(1, 2) void vbg_warn(const char *fmt, ...); __printf(1, 2) void vbg_err(const char *fmt, ...); +__printf(1, 2) void vbg_err_ratelimited(const char *fmt, ...); /* Only use backdoor logging for non-dynamic debug builds */ #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) -- cgit v1.2.3 From 04aaca197f16aff608e19ee98b1e55f535d746be Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 17 Jun 2020 17:33:13 +0900 Subject: char: raw: do not leak CONFIG_MAX_RAW_DEVS to userspace include/uapi/linux/raw.h leaks CONFIG_MAX_RAW_DEVS to userspace. Userspace programs cannot use MAX_RAW_MINORS since CONFIG_MAX_RAW_DEVS is not available anyway. Remove the MAX_RAW_MINORS definition from the exported header, and use CONFIG_MAX_RAW_DEVS in drivers/char/raw.c While I was here, I converted printk(KERN_WARNING ...) to pr_warn(...) and stretched the warning message. Signed-off-by: Masahiro Yamada Link: https://lore.kernel.org/r/20200617083313.183184-1-masahiroy@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/char/raw.c | 8 ++++---- include/uapi/linux/raw.h | 2 -- scripts/headers_install.sh | 1 - 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 3484e9145aea..380bf518338e 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -37,7 +37,7 @@ static struct raw_device_data *raw_devices; static DEFINE_MUTEX(raw_mutex); static const struct file_operations raw_ctl_fops; /* forward declaration */ -static int max_raw_minors = MAX_RAW_MINORS; +static int max_raw_minors = CONFIG_MAX_RAW_DEVS; module_param(max_raw_minors, int, 0); MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)"); @@ -317,9 +317,9 @@ static int __init raw_init(void) int ret; if (max_raw_minors < 1 || max_raw_minors > 65536) { - printk(KERN_WARNING "raw: invalid max_raw_minors (must be" - " between 1 and 65536), using %d\n", MAX_RAW_MINORS); - max_raw_minors = MAX_RAW_MINORS; + pr_warn("raw: invalid max_raw_minors (must be between 1 and 65536), using %d\n", + CONFIG_MAX_RAW_DEVS); + max_raw_minors = CONFIG_MAX_RAW_DEVS; } raw_devices = vzalloc(array_size(max_raw_minors, diff --git a/include/uapi/linux/raw.h b/include/uapi/linux/raw.h index dc96dda479d6..47874919d0b9 100644 --- a/include/uapi/linux/raw.h +++ b/include/uapi/linux/raw.h @@ -14,6 +14,4 @@ struct raw_config_request __u64 block_minor; }; -#define MAX_RAW_MINORS CONFIG_MAX_RAW_DEVS - #endif /* __LINUX_RAW_H */ diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index 224f51012b6e..cdd66038818c 100755 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh @@ -90,7 +90,6 @@ include/uapi/linux/elfcore.h:CONFIG_BINFMT_ELF_FDPIC include/uapi/linux/eventpoll.h:CONFIG_PM_SLEEP include/uapi/linux/hw_breakpoint.h:CONFIG_HAVE_MIXED_BREAKPOINTS_REGS include/uapi/linux/pktcdvd.h:CONFIG_CDROM_PKTCDVD_WCACHE -include/uapi/linux/raw.h:CONFIG_MAX_RAW_DEVS " for c in $configs -- cgit v1.2.3 From a9f91cebbeb8ea1355e852cce6d40efbcddbfe2b Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Wed, 8 Jul 2020 13:57:09 +0100 Subject: misc: vmw_vmci_defs: Mark 'struct vmci_handle VMCI_ANON_SRC_HANDLE' as __maybe_unused MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit vmw_vmci_defs.h is included by multiple source files. Some of which do not make use of 'struct vmci_handle VMCI_ANON_SRC_HANDLE' rendering it unused. Ensure the compiler knows that this is in fact intentional by marking it as __maybe_unused. This fixes the following W=1 warnings: In file included from drivers/misc/vmw_vmci/vmci_context.c:8: include/linux/vmw_vmci_defs.h:162:33: warning: ‘VMCI_ANON_SRC_HANDLE’ defined but not used [-Wunused-const-variable=] 162 | static const struct vmci_handle VMCI_ANON_SRC_HANDLE = { | ^~~~~~~~~~~~~~~~~~~~ In file included from drivers/misc/vmw_vmci/vmci_datagram.c:8: include/linux/vmw_vmci_defs.h:162:33: warning: ‘VMCI_ANON_SRC_HANDLE’ defined but not used [-Wunused-const-variable=] 162 | static const struct vmci_handle VMCI_ANON_SRC_HANDLE = { | ^~~~~~~~~~~~~~~~~~~~ Cc: George Zhang Signed-off-by: Lee Jones Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200708125711.3443569-2-lee.jones@linaro.org Signed-off-by: Greg Kroah-Hartman --- include/linux/vmw_vmci_defs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index fefb5292403b..be0afe6f379b 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -159,7 +159,7 @@ static inline bool vmci_handle_is_invalid(struct vmci_handle h) */ #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID -static const struct vmci_handle VMCI_ANON_SRC_HANDLE = { +static const struct vmci_handle __maybe_unused VMCI_ANON_SRC_HANDLE = { .context = VMCI_ANON_SRC_CONTEXT_ID, .resource = VMCI_ANON_SRC_RESOURCE_ID }; -- cgit v1.2.3 From 849a9366cba92cb5dc9dc1161ef49416a290aae9 Mon Sep 17 00:00:00 2001 From: Ricky Wu Date: Mon, 6 Jul 2020 15:02:59 +0800 Subject: misc: rtsx: Add support new chip rts5228 mmc: rtsx: Add support MMC_CAP2_NO_MMC In order to support new chip rts5228, the definitions of some internal registers and workflow have to be modified. Added rts5228.c rts5228.h for independent functions of the new chip rts5228 Signed-off-by: Ricky Wu Link: https://lore.kernel.org/r/20200706070259.32565-1-ricky_wu@realtek.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/Makefile | 2 +- drivers/misc/cardreader/rts5228.c | 740 +++++++++++++++++++++++++++++++++++++ drivers/misc/cardreader/rts5228.h | 168 +++++++++ drivers/misc/cardreader/rtsx_pcr.c | 89 +++-- drivers/misc/cardreader/rtsx_pcr.h | 5 + drivers/mmc/host/rtsx_pci_sdmmc.c | 2 + include/linux/rtsx_pci.h | 11 + 7 files changed, 985 insertions(+), 32 deletions(-) create mode 100644 drivers/misc/cardreader/rts5228.c create mode 100644 drivers/misc/cardreader/rts5228.h (limited to 'include') diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile index 1f56267ed2f4..895128475d83 100644 --- a/drivers/misc/cardreader/Makefile +++ b/drivers/misc/cardreader/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o -rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o +rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c new file mode 100644 index 000000000000..99aff7cd0a93 --- /dev/null +++ b/drivers/misc/cardreader/rts5228.c @@ -0,0 +1,740 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Ricky WU + * Rui FENG + * Wei WANG + */ + +#include +#include +#include + +#include "rts5228.h" +#include "rtsx_pcr.h" + +static u8 rts5228_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val); + return val & IC_VERSION_MASK; +} + +static void rts5228_fill_driving(struct rtsx_pcr *pcr, u8 voltage) +{ + u8 driving_3v3[4][3] = { + {0x13, 0x13, 0x13}, + {0x96, 0x96, 0x96}, + {0x7F, 0x7F, 0x7F}, + {0x96, 0x96, 0x96}, + }; + u8 driving_1v8[4][3] = { + {0x99, 0x99, 0x99}, + {0xB5, 0xB5, 0xB5}, + {0xE6, 0x7E, 0xFE}, + {0x6B, 0x6B, 0x6B}, + }; + u8 (*driving)[3], drive_sel; + + if (voltage == OUTPUT_3V3) { + driving = driving_3v3; + drive_sel = pcr->sd30_drive_sel_3v3; + } else { + driving = driving_1v8; + drive_sel = pcr->sd30_drive_sel_1v8; + } + + rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL, + 0xFF, driving[drive_sel][0]); + + rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL, + 0xFF, driving[drive_sel][1]); + + rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL, + 0xFF, driving[drive_sel][2]); +} + +static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr) +{ + u32 reg; + /* 0x724~0x727 */ + rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); + + if (!rtsx_vendor_setting_valid(reg)) { + pcr_dbg(pcr, "skip fetch vendor setting\n"); + return; + } + pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg); + pcr->aspm_en = rtsx_reg_to_aspm(reg); + + /* 0x814~0x817 */ + rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, ®); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + + pcr->rtd3_en = rtsx_reg_to_rtd3(reg); + if (rtsx_check_mmc_support(reg)) + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; + pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); + if (rtsx_reg_check_reverse_socket(reg)) + pcr->flags |= PCR_REVERSE_SOCKET; +} + +static int rts5228_optimize_phy(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_phy_register(pcr, 0x07, 0x8F40); +} + +static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) +{ + /* Set relink_time to 0 */ + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, + RELINK_TIME_MASK, 0); + + if (pm_state == HOST_ENTER_S3) + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, + D3_DELINK_MODE_EN, D3_DELINK_MODE_EN); + + rtsx_pci_write_register(pcr, FPDCTL, + SSC_POWER_DOWN, SSC_POWER_DOWN); +} + +static int rts5228_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_EN); +} + +static int rts5228_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_DISABLE); +} + +static int rts5228_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x02); +} + +static int rts5228_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x00); +} + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5228_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5228_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + 0, +}; + +static int rts5228_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr) +{ + rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK + | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + + return 0; +} + +static int rts5228_card_power_on(struct rtsx_pcr *pcr, int card) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_enable_ocp(pcr); + + rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, + CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD); + + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, + RTS5228_LDO1_TUNE_MASK, RTS5228_LDO1_33); + + rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL, + RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_SOFTSTART); + mdelay(2); + rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL, + RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_FULLON); + + + rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL, + RTS5228_LDO3318_POWERON, RTS5228_LDO3318_POWERON); + + msleep(20); + + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); + + /* Initialize SD_CFG1 register */ + rtsx_pci_write_register(pcr, SD_CFG1, 0xFF, + SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT); + + rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL, + 0xFF, SD20_RX_POS_EDGE); + rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR, + SD_STOP | SD_CLR_ERR); + + /* Reset SD_CFG3 register */ + rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0); + rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG, + SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 | + SD30_CLK_STOP_CFG0, 0); + + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 || + pcr->extra_caps & EXTRA_CAPS_SD_SDR104) + rts5228_sd_set_sample_push_timing_sd30(pcr); + + return 0; +} + +static int rts5228_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + int err; + u16 val = 0; + + rtsx_pci_write_register(pcr, RTS5228_CARD_PWR_CTL, + RTS5228_PUPDC, RTS5228_PUPDC); + + switch (voltage) { + case OUTPUT_3V3: + rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val); + val |= PHY_TUNE_SDBUS_33; + err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val); + if (err < 0) + return err; + + rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG, + RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_33); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, 0); + break; + case OUTPUT_1V8: + rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val); + val &= ~PHY_TUNE_SDBUS_33; + err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val); + if (err < 0) + return err; + + rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG, + RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_18); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, SD_IO_USING_1V8); + break; + default: + return -EINVAL; + } + + /* set pad drive */ + rts5228_fill_driving(pcr, voltage); + + return 0; +} + +static void rts5228_stop_cmd(struct rtsx_pcr *pcr) +{ + rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); + rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); + rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0, + RTS5260_DMA_RST | RTS5260_ADMA3_RST, + RTS5260_DMA_RST | RTS5260_ADMA3_RST); + rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH); +} + +static void rts5228_card_before_power_off(struct rtsx_pcr *pcr) +{ + rts5228_stop_cmd(pcr); + rts5228_switch_output_voltage(pcr, OUTPUT_3V3); +} + +static void rts5228_enable_ocp(struct rtsx_pcr *pcr) +{ + u8 val = 0; + + val = SD_OCP_INT_EN | SD_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN); +} + +static void rts5228_disable_ocp(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + + mask = SD_OCP_INT_EN | SD_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0); +} + +static int rts5228_card_power_off(struct rtsx_pcr *pcr, int card) +{ + int err = 0; + + rts5228_card_before_power_off(pcr); + err = rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL, + RTS5228_LDO_POWERON_MASK, 0); + rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, CFG_SD_POW_AUTO_PD, 0); + + if (pcr->option.ocp_en) + rtsx_pci_disable_ocp(pcr); + + return err; +} + +static void rts5228_init_ocp(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) { + u8 mask, val; + + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN); + + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd); + + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_LMT_THD_MASK, + RTS5228_LDO1_LMT_THD_1500); + + rtsx_pci_read_register(pcr, RTS5228_LDO1_CFG0, &val); + + mask = SD_OCP_GLITCH_MASK; + val = pcr->hw_param.ocp_glitch; + rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val); + + rts5228_enable_ocp(pcr); + + } else { + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0, + RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0); + } +} + +static void rts5228_clear_ocpstat(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + u8 val = 0; + + mask = SD_OCP_INT_CLR | SD_OC_CLR; + val = SD_OCP_INT_CLR | SD_OC_CLR; + + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + + udelay(1000); + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + +} + +static void rts5228_process_ocp(struct rtsx_pcr *pcr) +{ + if (!pcr->option.ocp_en) + return; + + rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); + + if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { + rts5228_clear_ocpstat(pcr); + rts5228_card_power_off(pcr, RTSX_SD_CARD); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + pcr->ocp_stat = 0; + } + +} + +static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) +{ + u32 lval; + struct rtsx_cr_option *option = &pcr->option; + + rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval); + + + if (0 == (lval & 0x0F)) + rtsx_pci_enable_oobs_polling(pcr); + else + rtsx_pci_disable_oobs_polling(pcr); + + if (lval & ASPM_L1_1_EN_MASK) + rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); + + if (lval & ASPM_L1_2_EN_MASK) + rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); + + if (lval & PM_L1_1_EN_MASK) + rtsx_set_dev_flag(pcr, PM_L1_1_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_1_EN); + + if (lval & PM_L1_2_EN_MASK) + rtsx_set_dev_flag(pcr, PM_L1_2_EN); + else + rtsx_clear_dev_flag(pcr, PM_L1_2_EN); + + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); + if (option->ltr_en) { + u16 val; + + pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); + if (val & PCI_EXP_DEVCTL2_LTR_EN) { + option->ltr_enabled = true; + option->ltr_active = true; + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + } else { + option->ltr_enabled = false; + } + } + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + option->force_clkreq_0 = false; + else + option->force_clkreq_0 = true; +} + +static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1, + CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); + + rts5228_init_from_cfg(pcr); + + rtsx_pci_write_register(pcr, L1SUB_CONFIG1, + AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE); + rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0); + + rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, + FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG); + + rtsx_pci_write_register(pcr, PCLK_CTL, + PCLK_MODE_SEL, PCLK_MODE_SEL); + + rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0); + rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN); + + /* LED shine disabled, set initial shine cycle period */ + rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02); + + /* Configure driving */ + rts5228_fill_driving(pcr, OUTPUT_3V3); + + if (pcr->flags & PCR_REVERSE_SOCKET) + rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30); + else + rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); + + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + + rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); + rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL); + + return 0; +} + +static void rts5228_enable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + u8 mask, val; + + if (pcr->aspm_enabled == enable) + return; + + mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + val |= (pcr->aspm_en & 0x02); + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val); + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en); + pcr->aspm_enabled = enable; +} + +static void rts5228_disable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + u8 mask, val; + + if (pcr->aspm_enabled == enable) + return; + + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, 0); + mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val); + rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); + mdelay(10); + pcr->aspm_enabled = enable; +} + +static void rts5228_set_aspm(struct rtsx_pcr *pcr, bool enable) +{ + if (enable) + rts5228_enable_aspm(pcr, true); + else + rts5228_disable_aspm(pcr, false); +} + +static void rts5228_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) +{ + struct rtsx_cr_option *option = &pcr->option; + int aspm_L1_1, aspm_L1_2; + u8 val = 0; + + aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN); + aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN); + + if (active) { + /* run, latency: 60us */ + if (aspm_L1_1) + val = option->ltr_l1off_snooze_sspwrgate; + } else { + /* l1off, latency: 300us */ + if (aspm_L1_2) + val = option->ltr_l1off_sspwrgate; + } + + rtsx_set_l1off_sub(pcr, val); +} + +static const struct pcr_ops rts5228_pcr_ops = { + .fetch_vendor_settings = rtsx5228_fetch_vendor_settings, + .turn_on_led = rts5228_turn_on_led, + .turn_off_led = rts5228_turn_off_led, + .extra_init_hw = rts5228_extra_init_hw, + .enable_auto_blink = rts5228_enable_auto_blink, + .disable_auto_blink = rts5228_disable_auto_blink, + .card_power_on = rts5228_card_power_on, + .card_power_off = rts5228_card_power_off, + .switch_output_voltage = rts5228_switch_output_voltage, + .force_power_down = rts5228_force_power_down, + .stop_cmd = rts5228_stop_cmd, + .set_aspm = rts5228_set_aspm, + .set_l1off_cfg_sub_d0 = rts5228_set_l1off_cfg_sub_d0, + .enable_ocp = rts5228_enable_ocp, + .disable_ocp = rts5228_disable_ocp, + .init_ocp = rts5228_init_ocp, + .process_ocp = rts5228_process_ocp, + .clear_ocpstat = rts5228_clear_ocpstat, + .optimize_phy = rts5228_optimize_phy, +}; + + +static inline u8 double_ssc_depth(u8 depth) +{ + return ((depth > 1) ? (depth - 1) : depth); +} + +int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) +{ + int err, clk; + u16 n; + u8 clk_divider, mcu_cnt, div; + static const u8 depth[] = { + [RTSX_SSC_DEPTH_4M] = RTS5228_SSC_DEPTH_4M, + [RTSX_SSC_DEPTH_2M] = RTS5228_SSC_DEPTH_2M, + [RTSX_SSC_DEPTH_1M] = RTS5228_SSC_DEPTH_1M, + [RTSX_SSC_DEPTH_500K] = RTS5228_SSC_DEPTH_512K, + }; + + if (initial_mode) { + /* We use 250k(around) here, in initial stage */ + clk_divider = SD_CLK_DIVIDE_128; + card_clock = 30000000; + } else { + clk_divider = SD_CLK_DIVIDE_0; + } + err = rtsx_pci_write_register(pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, clk_divider); + if (err < 0) + return err; + + card_clock /= 1000000; + pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); + + clk = card_clock; + if (!initial_mode && double_clk) + clk = card_clock * 2; + pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", + clk, pcr->cur_clock); + + if (clk == pcr->cur_clock) + return 0; + + if (pcr->ops->conv_clk_and_div_n) + n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); + else + n = clk - 4; + if ((clk <= 4) || (n > 396)) + return -EINVAL; + + mcu_cnt = 125/clk + 3; + if (mcu_cnt > 15) + mcu_cnt = 15; + + div = CLK_DIV_1; + while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) { + if (pcr->ops->conv_clk_and_div_n) { + int dbl_clk = pcr->ops->conv_clk_and_div_n(n, + DIV_N_TO_CLK) * 2; + n = pcr->ops->conv_clk_and_div_n(dbl_clk, + CLK_TO_DIV_N); + } else { + n = (n + 4) * 2 - 4; + } + div++; + } + + n = (n / 2) - 1; + pcr_dbg(pcr, "n = %d, div = %d\n", n, div); + + ssc_depth = depth[ssc_depth]; + if (double_clk) + ssc_depth = double_ssc_depth(ssc_depth); + + if (ssc_depth) { + if (div == CLK_DIV_2) { + if (ssc_depth > 1) + ssc_depth -= 1; + else + ssc_depth = RTS5228_SSC_DEPTH_8M; + } else if (div == CLK_DIV_4) { + if (ssc_depth > 2) + ssc_depth -= 2; + else + ssc_depth = RTS5228_SSC_DEPTH_8M; + } else if (div == CLK_DIV_8) { + if (ssc_depth > 3) + ssc_depth -= 3; + else + ssc_depth = RTS5228_SSC_DEPTH_8M; + } + } else { + ssc_depth = 0; + } + pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, + 0xFF, (div << 4) | mcu_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, + SSC_DEPTH_MASK, ssc_depth); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); + if (vpclk) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + } + + err = rtsx_pci_send_cmd(pcr, 2000); + if (err < 0) + return err; + + /* Wait SSC clock stable */ + udelay(SSC_CLOCK_STABLE_WAIT); + err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + if (err < 0) + return err; + + pcr->cur_clock = clk; + return 0; + +} + +void rts5228_init_params(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + struct rtsx_hw_param *hw_param = &pcr->hw_param; + + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->num_slots = 1; + pcr->ops = &rts5228_pcr_ops; + + pcr->flags = 0; + pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B; + pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B; + pcr->aspm_en = ASPM_L1_EN; + pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11); + pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); + + pcr->ic_version = rts5228_get_ic_version(pcr); + pcr->sd_pull_ctl_enable_tbl = rts5228_sd_pull_ctl_enable_tbl; + pcr->sd_pull_ctl_disable_tbl = rts5228_sd_pull_ctl_disable_tbl; + + pcr->reg_pm_ctrl3 = RTS5228_AUTOLOAD_CFG3; + + option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN + | LTR_L1SS_PWR_GATE_EN); + option->ltr_en = true; + + /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */ + option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF; + option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF; + option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF; + option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF; + option->ltr_l1off_sspwrgate = 0x7F; + option->ltr_l1off_snooze_sspwrgate = 0x78; + + option->ocp_en = 1; + hw_param->interrupt_en |= SD_OC_INT_EN; + hw_param->ocp_glitch = SD_OCP_GLITCH_800U; + option->sd_800mA_ocp_thd = RTS5228_LDO1_OCP_THD_930; +} diff --git a/drivers/misc/cardreader/rts5228.h b/drivers/misc/cardreader/rts5228.h new file mode 100644 index 000000000000..6a872246aeed --- /dev/null +++ b/drivers/misc/cardreader/rts5228.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Ricky WU + * Rui FENG + * Wei WANG + */ +#ifndef RTS5228_H +#define RTS5228_H + + +#define RTS5228_AUTOLOAD_CFG0 0xFF7B +#define RTS5228_AUTOLOAD_CFG1 0xFF7C +#define RTS5228_AUTOLOAD_CFG2 0xFF7D +#define RTS5228_AUTOLOAD_CFG3 0xFF7E +#define RTS5228_AUTOLOAD_CFG4 0xFF7F + +#define RTS5228_REG_VREF 0xFE97 +#define RTS5228_PWD_SUSPND_EN (1 << 4) + +#define RTS5228_PAD_H3L1 0xFF79 +#define PAD_GPIO_H3L1 (1 << 3) + +/* SSC_CTL2 0xFC12 */ +#define RTS5228_SSC_DEPTH_MASK 0x07 +#define RTS5228_SSC_DEPTH_DISALBE 0x00 +#define RTS5228_SSC_DEPTH_8M 0x01 +#define RTS5228_SSC_DEPTH_4M 0x02 +#define RTS5228_SSC_DEPTH_2M 0x03 +#define RTS5228_SSC_DEPTH_1M 0x04 +#define RTS5228_SSC_DEPTH_512K 0x05 +#define RTS5228_SSC_DEPTH_256K 0x06 +#define RTS5228_SSC_DEPTH_128K 0x07 + +/* DMACTL 0xFE2C */ +#define RTS5228_DMA_PACK_SIZE_MASK 0xF0 + +#define RTS5228_REG_LDO12_CFG 0xFF6E +#define RTS5228_LDO12_VO_TUNE_MASK (0x07<<1) +#define RTS5228_LDO12_100 (0x00<<1) +#define RTS5228_LDO12_105 (0x01<<1) +#define RTS5228_LDO12_110 (0x02<<1) +#define RTS5228_LDO12_115 (0x03<<1) +#define RTS5228_LDO12_120 (0x04<<1) +#define RTS5228_LDO12_125 (0x05<<1) +#define RTS5228_LDO12_130 (0x06<<1) +#define RTS5228_LDO12_135 (0x07<<1) +#define RTS5228_REG_PWD_LDO12 (0x01<<0) + +#define RTS5228_REG_LDO12_L12 0xFF6F +#define RTS5228_LDO12_L12_MASK (0x07<<4) +#define RTS5228_LDO12_L12_120 (0x04<<4) + +/* LDO control register */ +#define RTS5228_CARD_PWR_CTL 0xFD50 +#define RTS5228_PUPDC (0x01<<5) + +#define RTS5228_LDO1233318_POW_CTL 0xFF70 +#define RTS5228_LDO3318_POWERON (0x01<<3) +#define RTS5228_LDO1_POWEROFF (0x00<<0) +#define RTS5228_LDO1_SOFTSTART (0x01<<0) +#define RTS5228_LDO1_FULLON (0x03<<0) +#define RTS5228_LDO1_POWERON_MASK (0x03<<0) +#define RTS5228_LDO_POWERON_MASK (0x0F<<0) + +#define RTS5228_DV3318_CFG 0xFF71 +#define RTS5228_DV3318_TUNE_MASK (0x07<<4) +#define RTS5228_DV3318_17 (0x00<<4) +#define RTS5228_DV3318_1V75 (0x01<<4) +#define RTS5228_DV3318_18 (0x02<<4) +#define RTS5228_DV3318_1V85 (0x03<<4) +#define RTS5228_DV3318_19 (0x04<<4) +#define RTS5228_DV3318_33 (0x07<<4) +#define RTS5228_DV3318_SR_MASK (0x03<<2) +#define RTS5228_DV3318_SR_0 (0x00<<2) +#define RTS5228_DV3318_SR_250 (0x01<<2) +#define RTS5228_DV3318_SR_500 (0x02<<2) +#define RTS5228_DV3318_SR_1000 (0x03<<2) + +#define RTS5228_LDO1_CFG0 0xFF72 +#define RTS5228_LDO1_OCP_THD_MASK (0x07<<5) +#define RTS5228_LDO1_OCP_EN (0x01<<4) +#define RTS5228_LDO1_OCP_LMT_THD_MASK (0x03<<2) +#define RTS5228_LDO1_OCP_LMT_EN (0x01<<1) + +#define RTS5228_LDO1_OCP_THD_730 (0x00<<5) +#define RTS5228_LDO1_OCP_THD_780 (0x01<<5) +#define RTS5228_LDO1_OCP_THD_860 (0x02<<5) +#define RTS5228_LDO1_OCP_THD_930 (0x03<<5) +#define RTS5228_LDO1_OCP_THD_1000 (0x04<<5) +#define RTS5228_LDO1_OCP_THD_1070 (0x05<<5) +#define RTS5228_LDO1_OCP_THD_1140 (0x06<<5) +#define RTS5228_LDO1_OCP_THD_1220 (0x07<<5) + +#define RTS5228_LDO1_LMT_THD_450 (0x00<<2) +#define RTS5228_LDO1_LMT_THD_1000 (0x01<<2) +#define RTS5228_LDO1_LMT_THD_1500 (0x02<<2) +#define RTS5228_LDO1_LMT_THD_2000 (0x03<<2) + +#define RTS5228_LDO1_CFG1 0xFF73 +#define RTS5228_LDO1_SR_TIME_MASK (0x03<<6) +#define RTS5228_LDO1_SR_0_0 (0x00<<6) +#define RTS5228_LDO1_SR_0_25 (0x01<<6) +#define RTS5228_LDO1_SR_0_5 (0x02<<6) +#define RTS5228_LDO1_SR_1_0 (0x03<<6) +#define RTS5228_LDO1_TUNE_MASK (0x07<<1) +#define RTS5228_LDO1_18 (0x05<<1) +#define RTS5228_LDO1_33 (0x07<<1) +#define RTS5228_LDO1_PWD_MASK (0x01<<0) + +#define RTS5228_AUXCLK_GAT_CTL 0xFF74 + +#define RTS5228_REG_RREF_CTL_0 0xFF75 +#define RTS5228_FORCE_RREF_EXTL (0x01<<7) +#define RTS5228_REG_BG33_MASK (0x07<<0) +#define RTS5228_RREF_12_1V (0x04<<0) +#define RTS5228_RREF_12_3V (0x05<<0) + +#define RTS5228_REG_RREF_CTL_1 0xFF76 + +#define RTS5228_REG_RREF_CTL_2 0xFF77 +#define RTS5228_TEST_INTL_RREF (0x01<<7) +#define RTS5228_DGLCH_TIME_MASK (0x03<<5) +#define RTS5228_DGLCH_TIME_50 (0x00<<5) +#define RTS5228_DGLCH_TIME_75 (0x01<<5) +#define RTS5228_DGLCH_TIME_100 (0x02<<5) +#define RTS5228_DGLCH_TIME_125 (0x03<<5) +#define RTS5228_REG_REXT_TUNE_MASK (0x1F<<0) + +#define RTS5228_REG_PME_FORCE_CTL 0xFF78 +#define FORCE_PM_CONTROL 0x20 +#define FORCE_PM_VALUE 0x10 + + +/* Single LUN, support SD */ +#define DEFAULT_SINGLE 0 +#define SD_LUN 1 + + +/* For Change_FPGA_SSCClock Function */ +#define MULTIPLY_BY_1 0x00 +#define MULTIPLY_BY_2 0x01 +#define MULTIPLY_BY_3 0x02 +#define MULTIPLY_BY_4 0x03 +#define MULTIPLY_BY_5 0x04 +#define MULTIPLY_BY_6 0x05 +#define MULTIPLY_BY_7 0x06 +#define MULTIPLY_BY_8 0x07 +#define MULTIPLY_BY_9 0x08 +#define MULTIPLY_BY_10 0x09 + +#define DIVIDE_BY_2 0x01 +#define DIVIDE_BY_3 0x02 +#define DIVIDE_BY_4 0x03 +#define DIVIDE_BY_5 0x04 +#define DIVIDE_BY_6 0x05 +#define DIVIDE_BY_7 0x06 +#define DIVIDE_BY_8 0x07 +#define DIVIDE_BY_9 0x08 +#define DIVIDE_BY_10 0x09 + +int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); + +#endif /* RTS5228_H */ diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 0d5928bc1b6d..5ff690d2e9a9 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -23,6 +23,7 @@ #include "rtsx_pcr.h" #include "rts5261.h" +#include "rts5228.h" static bool msi_en = true; module_param(msi_en, bool, S_IRUGO | S_IWUSR); @@ -50,6 +51,7 @@ static const struct pci_device_id rtsx_pci_ids[] = { { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { 0, } }; @@ -206,16 +208,10 @@ int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) int err, i, finished = 0; u8 tmp; - rtsx_pci_init_cmd(pcr); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8)); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81); - - err = rtsx_pci_send_cmd(pcr, 100); - if (err < 0) - return err; + rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val); + rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8)); + rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr); + rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81); for (i = 0; i < 100000; i++) { err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); @@ -247,16 +243,10 @@ int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) { int err, i, finished = 0; u16 data; - u8 *ptr, tmp; - - rtsx_pci_init_cmd(pcr); + u8 tmp, val1, val2; - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80); - - err = rtsx_pci_send_cmd(pcr, 100); - if (err < 0) - return err; + rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr); + rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80); for (i = 0; i < 100000; i++) { err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); @@ -272,17 +262,9 @@ int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) if (!finished) return -ETIMEDOUT; - rtsx_pci_init_cmd(pcr); - - rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0); - rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0); - - err = rtsx_pci_send_cmd(pcr, 100); - if (err < 0) - return err; - - ptr = rtsx_pci_get_cmd_data(pcr); - data = ((u16)ptr[1] << 8) | ptr[0]; + rtsx_pci_read_register(pcr, PHYDATA0, &val1); + rtsx_pci_read_register(pcr, PHYDATA1, &val2); + data = val1 | (val2 << 8); if (val) *val = data; @@ -417,7 +399,7 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, if (end) option |= RTSX_SG_END; - if (PCI_PID(pcr) == PID_5261) { + if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) { if (len > 0xFFFF) val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16) | (((u64)len >> 16) << 6) | option; @@ -723,6 +705,9 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, if (PCI_PID(pcr) == PID_5261) return rts5261_pci_switch_clock(pcr, card_clock, ssc_depth, initial_mode, double_clk, vpclk); + if (PCI_PID(pcr) == PID_5228) + return rts5228_pci_switch_clock(pcr, card_clock, + ssc_depth, initial_mode, double_clk, vpclk); if (initial_mode) { /* We use 250k(around) here, in initial stage */ @@ -1202,6 +1187,36 @@ void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr) } } +void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr) +{ + u16 val; + + if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { + rtsx_pci_read_phy_register(pcr, 0x01, &val); + val |= 1<<9; + rtsx_pci_write_phy_register(pcr, 0x01, val); + } + rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32); + rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05); + rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83); + rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE); + +} + +void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr) +{ + u16 val; + + if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { + rtsx_pci_read_phy_register(pcr, 0x01, &val); + val &= ~(1<<9); + rtsx_pci_write_phy_register(pcr, 0x01, val); + } + rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03); + rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00); + +} + int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr) { rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | @@ -1233,6 +1248,10 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) { int err; + if (PCI_PID(pcr) == PID_5228) + rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK, + RTS5228_LDO1_SR_0_5); + pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP); rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); @@ -1280,6 +1299,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) if (PCI_PID(pcr) == PID_5261) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, RTS5261_SSC_DEPTH_2M); + else if (PCI_PID(pcr) == PID_5228) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, + RTS5228_SSC_DEPTH_2M); else rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12); @@ -1314,6 +1336,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) case PID_525A: case PID_5260: case PID_5261: + case PID_5228: rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1); break; default: @@ -1401,6 +1424,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) case 0x5261: rts5261_init_params(pcr); break; + + case 0x5228: + rts5228_init_params(pcr); + break; } pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n", diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h index 024cbd998b2a..6b322db8738e 100644 --- a/drivers/misc/cardreader/rtsx_pcr.h +++ b/drivers/misc/cardreader/rtsx_pcr.h @@ -53,6 +53,7 @@ void rts525a_init_params(struct rtsx_pcr *pcr); void rtl8411b_init_params(struct rtsx_pcr *pcr); void rts5260_init_params(struct rtsx_pcr *pcr); void rts5261_init_params(struct rtsx_pcr *pcr); +void rts5228_init_params(struct rtsx_pcr *pcr); static inline u8 map_sd_drive(int idx) { @@ -70,6 +71,8 @@ static inline u8 map_sd_drive(int idx) #define rts5209_vendor_setting1_valid(reg) (!((reg) & 0x80)) #define rts5209_vendor_setting2_valid(reg) ((reg) & 0x80) +#define rtsx_check_mmc_support(reg) ((reg) & 0x10) +#define rtsx_reg_to_rtd3(reg) ((reg) & 0x02) #define rtsx_reg_to_aspm(reg) (((reg) >> 28) & 0x03) #define rtsx_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 26) & 0x03) #define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03) @@ -100,6 +103,8 @@ void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr); void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr); int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val); void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr); +void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr); +void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr); int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr); int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr); diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 5a71f6678fd3..bce910de8b4d 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -1336,6 +1336,8 @@ static void init_extra_caps(struct realtek_pci_sdmmc *host) mmc->caps |= MMC_CAP_1_8V_DDR; if (pcr->extra_caps & EXTRA_CAPS_MMC_8BIT) mmc->caps |= MMC_CAP_8_BIT_DATA; + if (pcr->extra_caps & EXTRA_CAPS_NO_MMC) + mmc->caps2 |= MMC_CAP2_NO_MMC; } static void realtek_init_host(struct realtek_pci_sdmmc *host) diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index e8780d4e4636..27a6ea82aeea 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -305,6 +305,8 @@ #define SD30_CLK_STOP_CFG0 0x01 #define REG_PRE_RW_MODE 0xFD70 #define EN_INFINITE_MODE 0x01 +#define REG_CRC_DUMMY_0 0xFD71 +#define CFG_SD_POW_AUTO_PD (1<<0) #define SRCTL 0xFC13 @@ -599,6 +601,7 @@ #define ASPM_FORCE_CTL 0xFE57 #define FORCE_ASPM_CTL0 0x10 +#define FORCE_ASPM_CTL1 0x20 #define FORCE_ASPM_VAL_MASK 0x03 #define FORCE_ASPM_L1_EN 0x02 #define FORCE_ASPM_L0_EN 0x01 @@ -667,6 +670,11 @@ #define PM_WAKE_EN 0x01 #define PM_CTRL4 0xFF47 +#define REG_CFG_OOBS_OFF_TIMER 0xFEA6 +#define REG_CFG_OOBS_ON_TIMER 0xFEA7 +#define REG_CFG_VCM_ON_TIMER 0xFEA8 +#define REG_CFG_OOBS_POLLING 0xFEA9 + /* Memory mapping */ #define SRAM_BASE 0xE600 #define RBUF_BASE 0xF400 @@ -1204,6 +1212,7 @@ struct rtsx_pcr { #define EXTRA_CAPS_MMC_HSDDR (1 << 3) #define EXTRA_CAPS_MMC_HS200 (1 << 4) #define EXTRA_CAPS_MMC_8BIT (1 << 5) +#define EXTRA_CAPS_NO_MMC (1 << 7) u32 extra_caps; #define IC_VER_A 0 @@ -1242,6 +1251,7 @@ struct rtsx_pcr { u8 dma_error_count; u8 ocp_stat; u8 ocp_stat2; + u8 rtd3_en; }; #define PID_524A 0x524A @@ -1250,6 +1260,7 @@ struct rtsx_pcr { #define PID_525A 0x525A #define PID_5260 0x5260 #define PID_5261 0x5261 +#define PID_5228 0x5228 #define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid)) #define PCI_VID(pcr) ((pcr)->pci->vendor) -- cgit v1.2.3 From 776499058167d9f41c8eb468e21fe2d241c0b8e6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 1 Jul 2020 16:18:29 +0200 Subject: mm/memblock: expose only miminal interface to add/walk physmem "physmem" in the memblock allocator is somewhat weird: it's not actually used for allocation, it's simply information collected during boot, which describes the unmodified physical memory map at boot time, without any standby/hotplugged memory. It's only used on s390 and is currently the only reason s390 keeps using CONFIG_ARCH_KEEP_MEMBLOCK. Physmem isn't numa aware and current users don't specify any flags. Let's hide it from the user, exposing only for_each_physmem(), and simplify. The interface for physmem is now really minimalistic: - memblock_physmem_add() to add ranges - for_each_physmem() / __next_physmem_range() to walk physmem ranges Don't place it into an __init section and don't discard it without CONFIG_ARCH_KEEP_MEMBLOCK. As we're reusing __next_mem_range(), remove the __meminit notifier to avoid section mismatch warnings once CONFIG_ARCH_KEEP_MEMBLOCK is no longer used with CONFIG_HAVE_MEMBLOCK_PHYS_MAP. While fixing up the documentation, sneak in some related cleanups. We can stop setting CONFIG_ARCH_KEEP_MEMBLOCK for s390 next. Cc: Vasily Gorbik Cc: Christian Borntraeger Cc: Mike Rapoport Cc: Andrew Morton Signed-off-by: David Hildenbrand Reviewed-by: Mike Rapoport Message-Id: <20200701141830.18749-2-david@redhat.com> Signed-off-by: Heiko Carstens --- arch/s390/kernel/crash_dump.c | 6 ++--- include/linux/memblock.h | 28 ++++++++++++++++++--- mm/memblock.c | 57 ++++++++++++++++++++++--------------------- 3 files changed, 55 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index f96a5857bbfd..c42ce348103c 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -549,8 +549,7 @@ static int get_mem_chunk_cnt(void) int cnt = 0; u64 idx; - for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE, - MEMBLOCK_NONE, NULL, NULL, NULL) + for_each_physmem_range(idx, &oldmem_type, NULL, NULL) cnt++; return cnt; } @@ -563,8 +562,7 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset) phys_addr_t start, end; u64 idx; - for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE, - MEMBLOCK_NONE, &start, &end, NULL) { + for_each_physmem_range(idx, &oldmem_type, &start, &end) { phdr->p_filesz = end - start; phdr->p_type = PT_LOAD; phdr->p_offset = start; diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 017fae833d4a..9d925db0d355 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -77,16 +77,12 @@ struct memblock_type { * @current_limit: physical address of the current allocation limit * @memory: usable memory regions * @reserved: reserved memory regions - * @physmem: all physical memory */ struct memblock { bool bottom_up; /* is bottom up direction? */ phys_addr_t current_limit; struct memblock_type memory; struct memblock_type reserved; -#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP - struct memblock_type physmem; -#endif }; extern struct memblock memblock; @@ -145,6 +141,30 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, void __memblock_free_late(phys_addr_t base, phys_addr_t size); +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP +static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, + phys_addr_t *out_start, + phys_addr_t *out_end) +{ + extern struct memblock_type physmem; + + __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, + out_start, out_end, NULL); +} + +/** + * for_each_physmem_range - iterate through physmem areas not included in type. + * @i: u64 used as loop variable + * @type: ptr to memblock_type which excludes from the iteration, can be %NULL + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + */ +#define for_each_physmem_range(i, type, p_start, p_end) \ + for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \ + i != (u64)ULLONG_MAX; \ + __next_physmem_range(&i, type, p_start, p_end)) +#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */ + /** * for_each_mem_range - iterate through memblock areas from type_a and not * included in type_b. Or just type_a if type_b is NULL. diff --git a/mm/memblock.c b/mm/memblock.c index 39aceafc57f6..45f198750be9 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -44,19 +44,20 @@ * in the system, for instance when the memory is restricted with * ``mem=`` command line parameter * * ``reserved`` - describes the regions that were allocated - * * ``physmap`` - describes the actual physical memory regardless of - * the possible restrictions; the ``physmap`` type is only available - * on some architectures. + * * ``physmem`` - describes the actual physical memory available during + * boot regardless of the possible restrictions and memory hot(un)plug; + * the ``physmem`` type is only available on some architectures. * * Each region is represented by :c:type:`struct memblock_region` that * defines the region extents, its attributes and NUMA node id on NUMA * systems. Every memory type is described by the :c:type:`struct * memblock_type` which contains an array of memory regions along with - * the allocator metadata. The memory types are nicely wrapped with - * :c:type:`struct memblock`. This structure is statically initialzed - * at build time. The region arrays for the "memory" and "reserved" - * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the - * "physmap" type to %INIT_PHYSMEM_REGIONS. + * the allocator metadata. The "memory" and "reserved" types are nicely + * wrapped with :c:type:`struct memblock`. This structure is statically + * initialized at build time. The region arrays are initially sized to + * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS + * for "reserved". The region array for "physmem" is initially sized to + * %INIT_PHYSMEM_REGIONS. * The memblock_allow_resize() enables automatic resizing of the region * arrays during addition of new regions. This feature should be used * with care so that memory allocated for the region array will not @@ -87,8 +88,8 @@ * function frees all the memory to the buddy page allocator. * * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the - * memblock data structures will be discarded after the system - * initialization completes. + * memblock data structures (except "physmem") will be discarded after the + * system initialization completes. */ #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -104,7 +105,7 @@ unsigned long long max_possible_pfn; static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP -static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; +static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; #endif struct memblock memblock __initdata_memblock = { @@ -118,17 +119,19 @@ struct memblock memblock __initdata_memblock = { .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, .reserved.name = "reserved", -#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP - .physmem.regions = memblock_physmem_init_regions, - .physmem.cnt = 1, /* empty dummy entry */ - .physmem.max = INIT_PHYSMEM_REGIONS, - .physmem.name = "physmem", -#endif - .bottom_up = false, .current_limit = MEMBLOCK_ALLOC_ANYWHERE, }; +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP +struct memblock_type physmem = { + .regions = memblock_physmem_init_regions, + .cnt = 1, /* empty dummy entry */ + .max = INIT_PHYSMEM_REGIONS, + .name = "physmem", +}; +#endif + int memblock_debug __initdata_memblock; static bool system_has_some_mirror __initdata_memblock = false; static int memblock_can_resize __initdata_memblock; @@ -838,7 +841,7 @@ int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, &base, &end, (void *)_RET_IP_); - return memblock_add_range(&memblock.physmem, base, size, MAX_NUMNODES, 0); + return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); } #endif @@ -1019,12 +1022,10 @@ static bool should_skip_region(struct memblock_region *m, int nid, int flags) * As both region arrays are sorted, the function advances the two indices * in lockstep and returns each intersection. */ -void __init_memblock __next_mem_range(u64 *idx, int nid, - enum memblock_flags flags, - struct memblock_type *type_a, - struct memblock_type *type_b, - phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid) +void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, + struct memblock_type *type_a, + struct memblock_type *type_b, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid) { int idx_a = *idx & 0xffffffff; int idx_b = *idx >> 32; @@ -1924,7 +1925,7 @@ void __init_memblock __memblock_dump_all(void) memblock_dump(&memblock.memory); memblock_dump(&memblock.reserved); #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP - memblock_dump(&memblock.physmem); + memblock_dump(&physmem); #endif } @@ -2064,8 +2065,8 @@ static int __init memblock_init_debugfs(void) debugfs_create_file("reserved", 0444, root, &memblock.reserved, &memblock_debug_fops); #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP - debugfs_create_file("physmem", 0444, root, - &memblock.physmem, &memblock_debug_fops); + debugfs_create_file("physmem", 0444, root, &physmem, + &memblock_debug_fops); #endif return 0; -- cgit v1.2.3 From 287905e68dd29873bcb7986a8290cd1e4cfde600 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Thu, 21 May 2020 12:17:58 -0700 Subject: driver core: Expose device link details in sysfs It's helpful to be able to look at device link details from sysfs. So, expose it in sysfs. Say device-A is supplier of device-B. These are the additional files this patch would create: /sys/class/devlink/device-A:device-B/ auto_remove_on consumer/ -> .../device-B/ runtime_pm status supplier/ -> .../device-A/ sync_state_only /sys/devices/.../device-A/ consumer:device-B/ -> /sys/class/devlink/device-A:device-B/ /sys/devices/.../device-B/ supplier:device-A/ -> /sys/class/devlink/device-A:device-B/ That way: To get a list of all the device link in the system: ls /sys/class/devlink/ To get the consumer names and links of a device: ls -d /sys/devices/.../device-X/consumer:* To get the supplier names and links of a device: ls -d /sys/devices/.../device-X/supplier:* Signed-off-by: Saravana Kannan Link: https://lore.kernel.org/r/20200521191800.136035-2-saravanak@google.com Signed-off-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-class-devlink | 126 ++++++++++++++ Documentation/ABI/testing/sysfs-devices-consumer | 8 + Documentation/ABI/testing/sysfs-devices-supplier | 8 + drivers/base/core.c | 211 ++++++++++++++++++++++- include/linux/device.h | 58 ++++--- 5 files changed, 375 insertions(+), 36 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-class-devlink create mode 100644 Documentation/ABI/testing/sysfs-devices-consumer create mode 100644 Documentation/ABI/testing/sysfs-devices-supplier (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-class-devlink b/Documentation/ABI/testing/sysfs-class-devlink new file mode 100644 index 000000000000..3a24973abb83 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-devlink @@ -0,0 +1,126 @@ +What: /sys/class/devlink/.../ +Date: May 2020 +Contact: Saravana Kannan +Description: + Provide a place in sysfs for the device link objects in the + kernel at any given time. The name of a device link directory, + denoted as ... above, is of the form : + where is the supplier device name and is + the consumer device name. + +What: /sys/class/devlink/.../auto_remove_on +Date: May 2020 +Contact: Saravana Kannan +Description: + This file indicates if the device link will ever be + automatically removed by the driver core when the consumer and + supplier devices themselves are still present. + + This will be one of the following strings: + + 'consumer unbind' + 'supplier unbind' + 'never' + + 'consumer unbind' means the device link will be removed when + the consumer's driver is unbound from the consumer device. + + 'supplier unbind' means the device link will be removed when + the supplier's driver is unbound from the supplier device. + + 'never' means the device link will not be automatically removed + when as long as the supplier and consumer devices themselves + are still present. + +What: /sys/class/devlink/.../consumer +Date: May 2020 +Contact: Saravana Kannan +Description: + This file is a symlink to the consumer device's sysfs directory. + +What: /sys/class/devlink/.../runtime_pm +Date: May 2020 +Contact: Saravana Kannan +Description: + This file indicates if the device link has any impact on the + runtime power management behavior of the consumer and supplier + devices. For example: Making sure the supplier doesn't enter + runtime suspend while the consumer is active. + + This will be one of the following strings: + + '0' - Does not affect runtime power management + '1' - Affects runtime power management + +What: /sys/class/devlink/.../status +Date: May 2020 +Contact: Saravana Kannan +Description: + This file indicates the status of the device link. The status + of a device link is affected by whether the supplier and + consumer devices have been bound to their corresponding + drivers. The status of a device link also affects the binding + and unbinding of the supplier and consumer devices with their + drivers and also affects whether the software state of the + supplier device is synced with the hardware state of the + supplier device after boot up. + See also: sysfs-devices-state_synced. + + This will be one of the following strings: + + 'not tracked' + 'dormant' + 'available' + 'consumer probing' + 'active' + 'supplier unbinding' + 'unknown' + + 'not tracked' means this device link does not track the status + and has no impact on the binding, unbinding and syncing the + hardware and software device state. + + 'dormant' means the supplier and the consumer devices have not + bound to their driver. + + 'available' means the supplier has bound to its driver and is + available to supply resources to the consumer device. + + 'consumer probing' means the consumer device is currently + trying to bind to its driver. + + 'active' means the supplier and consumer devices have both + bound successfully to their drivers. + + 'supplier unbinding' means the supplier devices is currently in + the process of unbinding from its driver. + + 'unknown' means the state of the device link is not any of the + above. If this is ever the value, there's a bug in the kernel. + +What: /sys/class/devlink/.../supplier +Date: May 2020 +Contact: Saravana Kannan +Description: + This file is a symlink to the supplier device's sysfs directory. + +What: /sys/class/devlink/.../sync_state_only +Date: May 2020 +Contact: Saravana Kannan +Description: + This file indicates if the device link is limited to only + affecting the syncing of the hardware and software state of the + supplier device. + + This will be one of the following strings: + + '0' + '1' - Affects runtime power management + + '0' means the device link can affect other device behaviors + like binding/unbinding, suspend/resume, runtime power + management, etc. + + '1' means the device link will only affect the syncing of + hardware and software state of the supplier device after boot + up and doesn't not affect other behaviors of the devices. diff --git a/Documentation/ABI/testing/sysfs-devices-consumer b/Documentation/ABI/testing/sysfs-devices-consumer new file mode 100644 index 000000000000..1f06d74d1c3c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-consumer @@ -0,0 +1,8 @@ +What: /sys/devices/.../consumer: +Date: May 2020 +Contact: Saravana Kannan +Description: + The /sys/devices/.../consumer: are symlinks to device + links where this device is the supplier. denotes the + name of the consumer in that device link. There can be zero or + more of these symlinks for a given device. diff --git a/Documentation/ABI/testing/sysfs-devices-supplier b/Documentation/ABI/testing/sysfs-devices-supplier new file mode 100644 index 000000000000..a919e0db5e90 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-supplier @@ -0,0 +1,8 @@ +What: /sys/devices/.../supplier: +Date: May 2020 +Contact: Saravana Kannan +Description: + The /sys/devices/.../supplier: are symlinks to device + links where this device is the consumer. denotes the + name of the supplier in that device link. There can be zero or + more of these symlinks for a given device. diff --git a/drivers/base/core.c b/drivers/base/core.c index 67d39a90b45c..ca6403343515 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -235,6 +235,186 @@ void device_pm_move_to_tail(struct device *dev) device_links_read_unlock(idx); } +#define to_devlink(dev) container_of((dev), struct device_link, link_dev) + +static ssize_t status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char *status; + + switch (to_devlink(dev)->status) { + case DL_STATE_NONE: + status = "not tracked"; break; + case DL_STATE_DORMANT: + status = "dormant"; break; + case DL_STATE_AVAILABLE: + status = "available"; break; + case DL_STATE_CONSUMER_PROBE: + status = "consumer probing"; break; + case DL_STATE_ACTIVE: + status = "active"; break; + case DL_STATE_SUPPLIER_UNBIND: + status = "supplier unbinding"; break; + default: + status = "unknown"; break; + } + return sprintf(buf, "%s\n", status); +} +static DEVICE_ATTR_RO(status); + +static ssize_t auto_remove_on_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_link *link = to_devlink(dev); + char *str; + + if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + str = "supplier unbind"; + else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) + str = "consumer unbind"; + else + str = "never"; + + return sprintf(buf, "%s\n", str); +} +static DEVICE_ATTR_RO(auto_remove_on); + +static ssize_t runtime_pm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_link *link = to_devlink(dev); + + return sprintf(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); +} +static DEVICE_ATTR_RO(runtime_pm); + +static ssize_t sync_state_only_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_link *link = to_devlink(dev); + + return sprintf(buf, "%d\n", !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); +} +static DEVICE_ATTR_RO(sync_state_only); + +static struct attribute *devlink_attrs[] = { + &dev_attr_status.attr, + &dev_attr_auto_remove_on.attr, + &dev_attr_runtime_pm.attr, + &dev_attr_sync_state_only.attr, + NULL, +}; +ATTRIBUTE_GROUPS(devlink); + +static void devlink_dev_release(struct device *dev) +{ + kfree(to_devlink(dev)); +} + +static struct class devlink_class = { + .name = "devlink", + .owner = THIS_MODULE, + .dev_groups = devlink_groups, + .dev_release = devlink_dev_release, +}; + +static int devlink_add_symlinks(struct device *dev, + struct class_interface *class_intf) +{ + int ret; + size_t len; + struct device_link *link = to_devlink(dev); + struct device *sup = link->supplier; + struct device *con = link->consumer; + char *buf; + + len = max(strlen(dev_name(sup)), strlen(dev_name(con))); + len += strlen("supplier:") + 1; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); + if (ret) + goto out; + + ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer"); + if (ret) + goto err_con; + + snprintf(buf, len, "consumer:%s", dev_name(con)); + ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); + if (ret) + goto err_con_dev; + + snprintf(buf, len, "supplier:%s", dev_name(sup)); + ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); + if (ret) + goto err_sup_dev; + + goto out; + +err_sup_dev: + snprintf(buf, len, "consumer:%s", dev_name(con)); + sysfs_remove_link(&sup->kobj, buf); +err_con_dev: + sysfs_remove_link(&link->link_dev.kobj, "consumer"); +err_con: + sysfs_remove_link(&link->link_dev.kobj, "supplier"); +out: + kfree(buf); + return ret; +} + +static void devlink_remove_symlinks(struct device *dev, + struct class_interface *class_intf) +{ + struct device_link *link = to_devlink(dev); + size_t len; + struct device *sup = link->supplier; + struct device *con = link->consumer; + char *buf; + + sysfs_remove_link(&link->link_dev.kobj, "consumer"); + sysfs_remove_link(&link->link_dev.kobj, "supplier"); + + len = max(strlen(dev_name(sup)), strlen(dev_name(con))); + len += strlen("supplier:") + 1; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) { + WARN(1, "Unable to properly free device link symlinks!\n"); + return; + } + + snprintf(buf, len, "supplier:%s", dev_name(sup)); + sysfs_remove_link(&con->kobj, buf); + snprintf(buf, len, "consumer:%s", dev_name(con)); + sysfs_remove_link(&sup->kobj, buf); + kfree(buf); +} + +static struct class_interface devlink_class_intf = { + .class = &devlink_class, + .add_dev = devlink_add_symlinks, + .remove_dev = devlink_remove_symlinks, +}; + +static int __init devlink_class_init(void) +{ + int ret; + + ret = class_register(&devlink_class); + if (ret) + return ret; + + ret = class_interface_register(&devlink_class_intf); + if (ret) + class_unregister(&devlink_class); + + return ret; +} +postcore_initcall(devlink_class_init); + #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ DL_FLAG_AUTOREMOVE_SUPPLIER | \ DL_FLAG_AUTOPROBE_CONSUMER | \ @@ -407,13 +587,6 @@ struct device_link *device_link_add(struct device *consumer, refcount_set(&link->rpm_active, 1); - if (flags & DL_FLAG_PM_RUNTIME) { - if (flags & DL_FLAG_RPM_ACTIVE) - refcount_inc(&link->rpm_active); - - pm_runtime_new_link(consumer); - } - get_device(supplier); link->supplier = supplier; INIT_LIST_HEAD(&link->s_node); @@ -423,6 +596,25 @@ struct device_link *device_link_add(struct device *consumer, link->flags = flags; kref_init(&link->kref); + link->link_dev.class = &devlink_class; + device_set_pm_not_required(&link->link_dev); + dev_set_name(&link->link_dev, "%s:%s", + dev_name(supplier), dev_name(consumer)); + if (device_register(&link->link_dev)) { + put_device(consumer); + put_device(supplier); + kfree(link); + link = NULL; + goto out; + } + + if (flags & DL_FLAG_PM_RUNTIME) { + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + + pm_runtime_new_link(consumer); + } + /* Determine the initial link state. */ if (flags & DL_FLAG_STATELESS) link->status = DL_STATE_NONE; @@ -545,7 +737,7 @@ static void device_link_free(struct device_link *link) put_device(link->consumer); put_device(link->supplier); - kfree(link); + device_unregister(&link->link_dev); } #ifdef CONFIG_SRCU @@ -1159,6 +1351,9 @@ static void device_links_purge(struct device *dev) { struct device_link *link, *ln; + if (dev->class == &devlink_class) + return; + mutex_lock(&wfs_lock); list_del(&dev->links.needs_suppliers); mutex_unlock(&wfs_lock); diff --git a/include/linux/device.h b/include/linux/device.h index 9a62f7f43d55..efad96ea17a0 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -386,34 +386,6 @@ enum device_link_state { #define DL_FLAG_MANAGED BIT(6) #define DL_FLAG_SYNC_STATE_ONLY BIT(7) -/** - * struct device_link - Device link representation. - * @supplier: The device on the supplier end of the link. - * @s_node: Hook to the supplier device's list of links to consumers. - * @consumer: The device on the consumer end of the link. - * @c_node: Hook to the consumer device's list of links to suppliers. - * @status: The state of the link (with respect to the presence of drivers). - * @flags: Link flags. - * @rpm_active: Whether or not the consumer device is runtime-PM-active. - * @kref: Count repeated addition of the same link. - * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. - * @supplier_preactivated: Supplier has been made active before consumer probe. - */ -struct device_link { - struct device *supplier; - struct list_head s_node; - struct device *consumer; - struct list_head c_node; - enum device_link_state status; - u32 flags; - refcount_t rpm_active; - struct kref kref; -#ifdef CONFIG_SRCU - struct rcu_head rcu_head; -#endif - bool supplier_preactivated; /* Owned by consumer probe. */ -}; - /** * enum dl_dev_state - Device driver presence tracking information. * @DL_DEV_NO_DRIVER: There is no driver attached to the device. @@ -624,6 +596,36 @@ struct device { #endif }; +/** + * struct device_link - Device link representation. + * @supplier: The device on the supplier end of the link. + * @s_node: Hook to the supplier device's list of links to consumers. + * @consumer: The device on the consumer end of the link. + * @c_node: Hook to the consumer device's list of links to suppliers. + * @link_dev: device used to expose link details in sysfs + * @status: The state of the link (with respect to the presence of drivers). + * @flags: Link flags. + * @rpm_active: Whether or not the consumer device is runtime-PM-active. + * @kref: Count repeated addition of the same link. + * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. + * @supplier_preactivated: Supplier has been made active before consumer probe. + */ +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + struct device link_dev; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; +#ifdef CONFIG_SRCU + struct rcu_head rcu_head; +#endif + bool supplier_preactivated; /* Owned by consumer probe. */ +}; + static inline struct device *kobj_to_dev(struct kobject *kobj) { return container_of(kobj, struct device, kobj); -- cgit v1.2.3 From 349b00c9c0a052af6720552919c9812dad9013e7 Mon Sep 17 00:00:00 2001 From: Chao Hao Date: Fri, 3 Jul 2020 12:41:18 +0800 Subject: dt-bindings: mediatek: Add bindings for MT6779 This patch adds description for MT6779 IOMMU. MT6779 has two iommus, they are mm_iommu and apu_iommu which both use ARM Short-Descriptor translation format. In addition, mm_iommu and apu_iommu are two independent HW instance , we need to set them separately. The MT6779 IOMMU hardware diagram is as below, it is only a brief diagram about iommu, it don't focus on the part of smi_larb, so I don't describe the smi_larb detailedly. EMI | -------------------------------------- | | MM_IOMMU APU_IOMMU | | SMI_COMMOM----------- APU_BUS | | | SMI_LARB(0~11) | | | | | | | -------------- | | | | | Multimedia engine CCU VPU MDLA EMDA All the connections are hardware fixed, software can not adjust it. Signed-off-by: Chao Hao Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20200703044127.27438-2-chao.hao@mediatek.com Signed-off-by: Joerg Roedel --- .../devicetree/bindings/iommu/mediatek,iommu.txt | 2 + include/dt-bindings/memory/mt6779-larb-port.h | 206 +++++++++++++++++++++ 2 files changed, 208 insertions(+) create mode 100644 include/dt-bindings/memory/mt6779-larb-port.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt b/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt index ce59a505f5a4..c1ccd8582eb2 100644 --- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt +++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt @@ -58,6 +58,7 @@ Required properties: - compatible : must be one of the following string: "mediatek,mt2701-m4u" for mt2701 which uses generation one m4u HW. "mediatek,mt2712-m4u" for mt2712 which uses generation two m4u HW. + "mediatek,mt6779-m4u" for mt6779 which uses generation two m4u HW. "mediatek,mt7623-m4u", "mediatek,mt2701-m4u" for mt7623 which uses generation one m4u HW. "mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW. @@ -78,6 +79,7 @@ Required properties: Specifies the mtk_m4u_id as defined in dt-binding/memory/mt2701-larb-port.h for mt2701, mt7623 dt-binding/memory/mt2712-larb-port.h for mt2712, + dt-binding/memory/mt6779-larb-port.h for mt6779, dt-binding/memory/mt8173-larb-port.h for mt8173, and dt-binding/memory/mt8183-larb-port.h for mt8183. diff --git a/include/dt-bindings/memory/mt6779-larb-port.h b/include/dt-bindings/memory/mt6779-larb-port.h new file mode 100644 index 000000000000..2ad0899fbf2f --- /dev/null +++ b/include/dt-bindings/memory/mt6779-larb-port.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Chao Hao + */ + +#ifndef _DTS_IOMMU_PORT_MT6779_H_ +#define _DTS_IOMMU_PORT_MT6779_H_ + +#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) + +#define M4U_LARB0_ID 0 +#define M4U_LARB1_ID 1 +#define M4U_LARB2_ID 2 +#define M4U_LARB3_ID 3 +#define M4U_LARB4_ID 4 +#define M4U_LARB5_ID 5 +#define M4U_LARB6_ID 6 +#define M4U_LARB7_ID 7 +#define M4U_LARB8_ID 8 +#define M4U_LARB9_ID 9 +#define M4U_LARB10_ID 10 +#define M4U_LARB11_ID 11 + +/* larb0 */ +#define M4U_PORT_DISP_POSTMASK0 MTK_M4U_ID(M4U_LARB0_ID, 0) +#define M4U_PORT_DISP_OVL0_HDR MTK_M4U_ID(M4U_LARB0_ID, 1) +#define M4U_PORT_DISP_OVL1_HDR MTK_M4U_ID(M4U_LARB0_ID, 2) +#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 3) +#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4) +#define M4U_PORT_DISP_PVRIC0 MTK_M4U_ID(M4U_LARB0_ID, 5) +#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 6) +#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 7) +#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 8) + +/* larb1 */ +#define M4U_PORT_DISP_OVL0_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 0) +#define M4U_PORT_DISP_OVL1_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 1) +#define M4U_PORT_DISP_OVL0_2L MTK_M4U_ID(M4U_LARB1_ID, 2) +#define M4U_PORT_DISP_OVL1_2L MTK_M4U_ID(M4U_LARB1_ID, 3) +#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 4) +#define M4U_PORT_MDP_PVRIC0 MTK_M4U_ID(M4U_LARB1_ID, 5) +#define M4U_PORT_MDP_PVRIC1 MTK_M4U_ID(M4U_LARB1_ID, 6) +#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB1_ID, 7) +#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 8) +#define M4U_PORT_MDP_WROT0_R MTK_M4U_ID(M4U_LARB1_ID, 9) +#define M4U_PORT_MDP_WROT0_W MTK_M4U_ID(M4U_LARB1_ID, 10) +#define M4U_PORT_MDP_WROT1_R MTK_M4U_ID(M4U_LARB1_ID, 11) +#define M4U_PORT_MDP_WROT1_W MTK_M4U_ID(M4U_LARB1_ID, 12) +#define M4U_PORT_DISP_FAKE1 MTK_M4U_ID(M4U_LARB1_ID, 13) + +/* larb2-VDEC */ +#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB2_ID, 0) +#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB2_ID, 1) +#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB2_ID, 2) +#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB2_ID, 3) +#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB2_ID, 4) +#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB2_ID, 5) +#define M4U_PORT_HW_VDEC_TILE_EXT MTK_M4U_ID(M4U_LARB2_ID, 6) +#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB2_ID, 7) +#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB2_ID, 8) +#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB2_ID, 9) +#define M4U_PORT_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(M4U_LARB2_ID, 10) +#define M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(M4U_LARB2_ID, 11) + +/* larb3-VENC */ +#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0) +#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1) +#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2) +#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3) +#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4) +#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 5) +#define M4U_PORT_VENC_NBM_RDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 6) +#define M4U_PORT_JPGENC_Y_RDMA MTK_M4U_ID(M4U_LARB3_ID, 7) +#define M4U_PORT_JPGENC_C_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8) +#define M4U_PORT_JPGENC_Q_TABLE MTK_M4U_ID(M4U_LARB3_ID, 9) +#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 10) +#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 11) +#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 12) +#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 13) +#define M4U_PORT_VENC_NBM_WDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 14) +#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 15) +#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 16) +#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 17) +#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 18) + +/* larb4-dummy */ + +/* larb5-IMG */ +#define M4U_PORT_IMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 0) +#define M4U_PORT_IMGBI_D1 MTK_M4U_ID(M4U_LARB5_ID, 1) +#define M4U_PORT_DMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 2) +#define M4U_PORT_DEPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 3) +#define M4U_PORT_LCEI_D1 MTK_M4U_ID(M4U_LARB5_ID, 4) +#define M4U_PORT_SMTI_D1 MTK_M4U_ID(M4U_LARB5_ID, 5) +#define M4U_PORT_SMTO_D2 MTK_M4U_ID(M4U_LARB5_ID, 6) +#define M4U_PORT_SMTO_D1 MTK_M4U_ID(M4U_LARB5_ID, 7) +#define M4U_PORT_CRZO_D1 MTK_M4U_ID(M4U_LARB5_ID, 8) +#define M4U_PORT_IMG3O_D1 MTK_M4U_ID(M4U_LARB5_ID, 9) +#define M4U_PORT_VIPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 10) +#define M4U_PORT_WPE_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 11) +#define M4U_PORT_WPE_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 12) +#define M4U_PORT_WPE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 13) +#define M4U_PORT_TIMGO_D1 MTK_M4U_ID(M4U_LARB5_ID, 14) +#define M4U_PORT_MFB_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 15) +#define M4U_PORT_MFB_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 16) +#define M4U_PORT_MFB_RDMA2 MTK_M4U_ID(M4U_LARB5_ID, 17) +#define M4U_PORT_MFB_RDMA3 MTK_M4U_ID(M4U_LARB5_ID, 18) +#define M4U_PORT_MFB_WDMA MTK_M4U_ID(M4U_LARB5_ID, 19) +#define M4U_PORT_RESERVE1 MTK_M4U_ID(M4U_LARB5_ID, 20) +#define M4U_PORT_RESERVE2 MTK_M4U_ID(M4U_LARB5_ID, 21) +#define M4U_PORT_RESERVE3 MTK_M4U_ID(M4U_LARB5_ID, 22) +#define M4U_PORT_RESERVE4 MTK_M4U_ID(M4U_LARB5_ID, 23) +#define M4U_PORT_RESERVE5 MTK_M4U_ID(M4U_LARB5_ID, 24) +#define M4U_PORT_RESERVE6 MTK_M4U_ID(M4U_LARB5_ID, 25) + +/* larb6-IMG-VPU */ +#define M4U_PORT_IMG_IPUO MTK_M4U_ID(M4U_LARB6_ID, 0) +#define M4U_PORT_IMG_IPU3O MTK_M4U_ID(M4U_LARB6_ID, 1) +#define M4U_PORT_IMG_IPUI MTK_M4U_ID(M4U_LARB6_ID, 2) + +/* larb7-DVS */ +#define M4U_PORT_DVS_RDMA MTK_M4U_ID(M4U_LARB7_ID, 0) +#define M4U_PORT_DVS_WDMA MTK_M4U_ID(M4U_LARB7_ID, 1) +#define M4U_PORT_DVP_RDMA MTK_M4U_ID(M4U_LARB7_ID, 2) +#define M4U_PORT_DVP_WDMA MTK_M4U_ID(M4U_LARB7_ID, 3) + +/* larb8-IPESYS */ +#define M4U_PORT_FDVT_RDA MTK_M4U_ID(M4U_LARB8_ID, 0) +#define M4U_PORT_FDVT_RDB MTK_M4U_ID(M4U_LARB8_ID, 1) +#define M4U_PORT_FDVT_WRA MTK_M4U_ID(M4U_LARB8_ID, 2) +#define M4U_PORT_FDVT_WRB MTK_M4U_ID(M4U_LARB8_ID, 3) +#define M4U_PORT_FE_RD0 MTK_M4U_ID(M4U_LARB8_ID, 4) +#define M4U_PORT_FE_RD1 MTK_M4U_ID(M4U_LARB8_ID, 5) +#define M4U_PORT_FE_WR0 MTK_M4U_ID(M4U_LARB8_ID, 6) +#define M4U_PORT_FE_WR1 MTK_M4U_ID(M4U_LARB8_ID, 7) +#define M4U_PORT_RSC_RDMA0 MTK_M4U_ID(M4U_LARB8_ID, 8) +#define M4U_PORT_RSC_WDMA MTK_M4U_ID(M4U_LARB8_ID, 9) + +/* larb9-CAM */ +#define M4U_PORT_CAM_IMGO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 0) +#define M4U_PORT_CAM_RRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 1) +#define M4U_PORT_CAM_LSCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 2) +#define M4U_PORT_CAM_BPCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 3) +#define M4U_PORT_CAM_YUVO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 4) +#define M4U_PORT_CAM_UFDI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 5) +#define M4U_PORT_CAM_RAWI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 6) +#define M4U_PORT_CAM_RAWI_R5_C MTK_M4U_ID(M4U_LARB9_ID, 7) +#define M4U_PORT_CAM_CAMSV_1 MTK_M4U_ID(M4U_LARB9_ID, 8) +#define M4U_PORT_CAM_CAMSV_2 MTK_M4U_ID(M4U_LARB9_ID, 9) +#define M4U_PORT_CAM_CAMSV_3 MTK_M4U_ID(M4U_LARB9_ID, 10) +#define M4U_PORT_CAM_CAMSV_4 MTK_M4U_ID(M4U_LARB9_ID, 11) +#define M4U_PORT_CAM_CAMSV_5 MTK_M4U_ID(M4U_LARB9_ID, 12) +#define M4U_PORT_CAM_CAMSV_6 MTK_M4U_ID(M4U_LARB9_ID, 13) +#define M4U_PORT_CAM_AAO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 14) +#define M4U_PORT_CAM_AFO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 15) +#define M4U_PORT_CAM_FLKO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 16) +#define M4U_PORT_CAM_LCESO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 17) +#define M4U_PORT_CAM_CRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 18) +#define M4U_PORT_CAM_LTMSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 19) +#define M4U_PORT_CAM_RSSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 20) +#define M4U_PORT_CAM_CCUI MTK_M4U_ID(M4U_LARB9_ID, 21) +#define M4U_PORT_CAM_CCUO MTK_M4U_ID(M4U_LARB9_ID, 22) +#define M4U_PORT_CAM_FAKE MTK_M4U_ID(M4U_LARB9_ID, 23) + +/* larb10-CAM_A */ +#define M4U_PORT_CAM_IMGO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 0) +#define M4U_PORT_CAM_RRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 1) +#define M4U_PORT_CAM_LSCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 2) +#define M4U_PORT_CAM_BPCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 3) +#define M4U_PORT_CAM_YUVO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 4) +#define M4U_PORT_CAM_UFDI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 5) +#define M4U_PORT_CAM_RAWI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 6) +#define M4U_PORT_CAM_RAWI_R5_A MTK_M4U_ID(M4U_LARB10_ID, 7) +#define M4U_PORT_CAM_IMGO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 8) +#define M4U_PORT_CAM_RRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 9) +#define M4U_PORT_CAM_LSCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 10) +#define M4U_PORT_CAM_BPCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 11) +#define M4U_PORT_CAM_YUVO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 12) +#define M4U_PORT_CAM_UFDI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 13) +#define M4U_PORT_CAM_RAWI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 14) +#define M4U_PORT_CAM_RAWI_R5_B MTK_M4U_ID(M4U_LARB10_ID, 15) +#define M4U_PORT_CAM_CAMSV_0 MTK_M4U_ID(M4U_LARB10_ID, 16) +#define M4U_PORT_CAM_AAO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 17) +#define M4U_PORT_CAM_AFO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 18) +#define M4U_PORT_CAM_FLKO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 19) +#define M4U_PORT_CAM_LCESO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 20) +#define M4U_PORT_CAM_CRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 21) +#define M4U_PORT_CAM_AAO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 22) +#define M4U_PORT_CAM_AFO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 23) +#define M4U_PORT_CAM_FLKO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 24) +#define M4U_PORT_CAM_LCESO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 25) +#define M4U_PORT_CAM_CRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 26) +#define M4U_PORT_CAM_LTMSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 27) +#define M4U_PORT_CAM_RSSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 28) +#define M4U_PORT_CAM_LTMSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 29) +#define M4U_PORT_CAM_RSSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 30) + +/* larb11-CAM-VPU */ +#define M4U_PORT_CAM_IPUO MTK_M4U_ID(M4U_LARB11_ID, 0) +#define M4U_PORT_CAM_IPU2O MTK_M4U_ID(M4U_LARB11_ID, 1) +#define M4U_PORT_CAM_IPU3O MTK_M4U_ID(M4U_LARB11_ID, 2) +#define M4U_PORT_CAM_IPUI MTK_M4U_ID(M4U_LARB11_ID, 3) +#define M4U_PORT_CAM_IPU2I MTK_M4U_ID(M4U_LARB11_ID, 4) + +#endif -- cgit v1.2.3 From 52fbf5bdeeef415b28b8e6cdade1e48927927f60 Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Tue, 7 Jul 2020 15:46:02 -0700 Subject: PCI: Cache ACS capability offset in device Currently the ACS capability is being looked up at a number of places. Read and store it once at enumeration so that it can be used by all later. No functional change intended. Link: https://lore.kernel.org/r/20200707224604.3737893-2-rajatja@google.com Signed-off-by: Rajat Jain Signed-off-by: Bjorn Helgaas --- drivers/pci/p2pdma.c | 2 +- drivers/pci/pci.c | 20 ++++++++++++++++---- drivers/pci/pci.h | 2 +- drivers/pci/probe.c | 2 +- drivers/pci/quirks.c | 8 ++++---- include/linux/pci.h | 1 + 6 files changed, 24 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index e8e444eeb1cd..f29a48f8fa59 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -253,7 +253,7 @@ static int pci_bridge_has_acs_redir(struct pci_dev *pdev) int pos; u16 ctrl; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); + pos = pdev->acs_cap; if (!pos) return 0; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index eec625f0e594..73a862782214 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -831,7 +831,7 @@ static void pci_disable_acs_redir(struct pci_dev *dev) if (!pci_dev_specific_disable_acs_redir(dev)) return; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) { pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); return; @@ -857,7 +857,7 @@ static void pci_std_enable_acs(struct pci_dev *dev) u16 cap; u16 ctrl; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return; @@ -883,7 +883,7 @@ static void pci_std_enable_acs(struct pci_dev *dev) * pci_enable_acs - enable ACS if hardware support it * @dev: the PCI device */ -void pci_enable_acs(struct pci_dev *dev) +static void pci_enable_acs(struct pci_dev *dev) { if (!pci_acs_enable) goto disable_acs_redir; @@ -3362,7 +3362,7 @@ static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) int pos; u16 cap, ctrl; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); + pos = pdev->acs_cap; if (!pos) return false; @@ -3487,6 +3487,18 @@ bool pci_acs_path_enabled(struct pci_dev *start, return true; } +/** + * pci_acs_init - Initialize ACS if hardware supports it + * @dev: the PCI device + */ +void pci_acs_init(struct pci_dev *dev) +{ + dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + + if (dev->acs_cap) + pci_enable_acs(dev); +} + /** * pci_rebar_find_pos - find position of resize ctrl reg for BAR * @pdev: PCI device diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6d3f75867106..12fb79fbe29d 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -532,7 +532,7 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, return resource_alignment(res); } -void pci_enable_acs(struct pci_dev *dev); +void pci_acs_init(struct pci_dev *dev); #ifdef CONFIG_PCI_QUIRKS int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); int pci_dev_specific_enable_acs(struct pci_dev *dev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 2f66988cea25..6d87066a5ecc 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2390,7 +2390,7 @@ static void pci_init_capabilities(struct pci_dev *dev) pci_ats_init(dev); /* Address Translation Services */ pci_pri_init(dev); /* Page Request Interface */ pci_pasid_init(dev); /* Process Address Space ID */ - pci_enable_acs(dev); /* Enable ACS P2P upstream forwarding */ + pci_acs_init(dev); /* Access Control Services */ pci_ptm_init(dev); /* Precision Time Measurement */ pci_aer_init(dev); /* Advanced Error Reporting */ pci_dpc_init(dev); /* Downstream Port Containment */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 812bfc32ecb8..b341628e4752 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4653,7 +4653,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return -ENOTTY; @@ -4961,7 +4961,7 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev) if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return -ENOTTY; @@ -4988,7 +4988,7 @@ static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev) if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return -ENOTTY; @@ -5355,7 +5355,7 @@ int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout) bool found; struct pci_dev *bridge = bus->self; - pos = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ACS); + pos = bridge->acs_cap; /* Disable ACS SV before initial config reads */ if (pos) { diff --git a/include/linux/pci.h b/include/linux/pci.h index c79d83304e52..a26be5332bba 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -486,6 +486,7 @@ struct pci_dev { #ifdef CONFIG_PCI_P2PDMA struct pci_p2pdma *p2pdma; #endif + u16 acs_cap; /* ACS Capability offset */ phys_addr_t rom; /* Physical address if not from BAR */ size_t romlen; /* Length if not from BAR */ char *driver_override; /* Driver name to force a match */ -- cgit v1.2.3 From 99b50be9d8ec9ef319cc7d5de07f4d405fac7764 Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Tue, 7 Jul 2020 15:46:03 -0700 Subject: PCI: Treat "external-facing" devices themselves as internal "External-facing" devices are internal devices that expose PCIe hierarchies such as Thunderbolt outside the platform [1]. Previously these internal devices were marked as "untrusted" the same as devices downstream from them. Use the ACPI or DT information to identify external-facing devices, but only mark the devices *downstream* from them as "untrusted" [2]. The external-facing device itself is no longer marked as untrusted. [1] https://docs.microsoft.com/en-us/windows-hardware/drivers/pci/dsd-for-pcie-root-ports#identifying-externally-exposed-pcie-root-ports [2] https://lore.kernel.org/linux-pci/20200610230906.GA1528594@bjorn-Precision-5520/ Link: https://lore.kernel.org/r/20200707224604.3737893-3-rajatja@google.com Signed-off-by: Rajat Jain Signed-off-by: Bjorn Helgaas --- drivers/iommu/intel/iommu.c | 6 +++--- drivers/pci/of.c | 2 +- drivers/pci/pci-acpi.c | 9 ++++----- drivers/pci/probe.c | 2 +- include/linux/pci.h | 6 ++++++ 5 files changed, 15 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 9129663a7406..d23ce26b8833 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4730,12 +4730,12 @@ const struct attribute_group *intel_iommu_groups[] = { NULL, }; -static inline bool has_untrusted_dev(void) +static inline bool has_external_pci(void) { struct pci_dev *pdev = NULL; for_each_pci_dev(pdev) - if (pdev->untrusted) + if (pdev->external_facing) return true; return false; @@ -4743,7 +4743,7 @@ static inline bool has_untrusted_dev(void) static int __init platform_optin_force_iommu(void) { - if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev()) + if (!dmar_platform_optin() || no_platform_optin || !has_external_pci()) return 0; if (no_iommu || dmar_disabled) diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 27839cd2459f..22727fc9558d 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -42,7 +42,7 @@ void pci_set_bus_of_node(struct pci_bus *bus) } else { node = of_node_get(bus->self->dev.of_node); if (node && of_property_read_bool(node, "external-facing")) - bus->self->untrusted = true; + bus->self->external_facing = true; } bus->dev.of_node = node; diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 7224b1e5f2a8..54520d34e27e 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -1213,7 +1213,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev, ACPI_FREE(obj); } -static void pci_acpi_set_untrusted(struct pci_dev *dev) +static void pci_acpi_set_external_facing(struct pci_dev *dev) { u8 val; @@ -1224,11 +1224,10 @@ static void pci_acpi_set_untrusted(struct pci_dev *dev) /* * These root ports expose PCIe (including DMA) outside of the - * system so make sure we treat them and everything behind as - * untrusted. + * system. Everything downstream from them is external. */ if (val) - dev->untrusted = 1; + dev->external_facing = 1; } static void pci_acpi_setup(struct device *dev) @@ -1240,7 +1239,7 @@ static void pci_acpi_setup(struct device *dev) return; pci_acpi_optimize_delay(pci_dev, adev->handle); - pci_acpi_set_untrusted(pci_dev); + pci_acpi_set_external_facing(pci_dev); pci_acpi_add_edr_notifier(pci_dev); pci_acpi_add_pm_notifier(adev, pci_dev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 6d87066a5ecc..8c40c00413e7 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1552,7 +1552,7 @@ static void set_pcie_untrusted(struct pci_dev *dev) * untrusted as well. */ parent = pci_upstream_bridge(dev); - if (parent && parent->untrusted) + if (parent && (parent->untrusted || parent->external_facing)) dev->untrusted = true; } diff --git a/include/linux/pci.h b/include/linux/pci.h index a26be5332bba..7a40cd5caed0 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -432,6 +432,12 @@ struct pci_dev { * mappings to make sure they cannot access arbitrary memory. */ unsigned int untrusted:1; + /* + * Info from the platform, e.g., ACPI or device tree, may mark a + * device as "external-facing". An external-facing device is + * itself internal but devices downstream from it are external. + */ + unsigned int external_facing:1; unsigned int broken_intx_masking:1; /* INTx masking can't be used */ unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ unsigned int irq_managed:1; -- cgit v1.2.3 From 3b50a6e536d2d843857ffe5f923eff7be4222afe Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Wed, 1 Jul 2020 15:53:49 -0700 Subject: mm/hmm: provide the page mapping order in hmm_range_fault() hmm_range_fault() returns an array of page frame numbers and flags for how the pages are mapped in the requested process' page tables. The PFN can be used to get the struct page with hmm_pfn_to_page() and the page size order can be determined with compound_order(page). However, if the page is larger than order 0 (PAGE_SIZE), there is no indication that a compound page is mapped by the CPU using a larger page size. Without this information, the caller can't safely use a large device PTE to map the compound page because the CPU might be using smaller PTEs with different read/write permissions. Add a new function hmm_pfn_to_map_order() to return the mapping size order so that callers know the pages are being mapped with consistent permissions and a large device page table mapping can be used if one is available. This will allow devices to optimize mapping the page into HW by avoiding or batching work for huge pages. For instance the dma_map can be done with a high order directly. Link: https://lore.kernel.org/r/20200701225352.9649-3-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Signed-off-by: Jason Gunthorpe --- include/linux/hmm.h | 24 ++++++++++++++++++++++-- mm/hmm.c | 16 +++++++++++++--- 2 files changed, 35 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/hmm.h b/include/linux/hmm.h index f4a09ed223ac..866a0fa104c4 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -37,16 +37,17 @@ * will fail. Must be combined with HMM_PFN_REQ_FAULT. */ enum hmm_pfn_flags { - /* Output flags */ + /* Output fields and flags */ HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1), HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2), HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3), + HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8), /* Input flags */ HMM_PFN_REQ_FAULT = HMM_PFN_VALID, HMM_PFN_REQ_WRITE = HMM_PFN_WRITE, - HMM_PFN_FLAGS = HMM_PFN_VALID | HMM_PFN_WRITE | HMM_PFN_ERROR, + HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT, }; /* @@ -61,6 +62,25 @@ static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn) return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS); } +/* + * hmm_pfn_to_map_order() - return the CPU mapping size order + * + * This is optionally useful to optimize processing of the pfn result + * array. It indicates that the page starts at the order aligned VA and is + * 1<> HMM_PFN_ORDER_SHIFT) & 0x1F; +} + /* * struct hmm_range - track invalidation lock on virtual address range * diff --git a/mm/hmm.c b/mm/hmm.c index e9a545751108..0809baee49d0 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -165,12 +165,19 @@ static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, return hmm_pfns_fill(addr, end, range, 0); } +static inline unsigned long hmm_pfn_flags_order(unsigned long order) +{ + return order << HMM_PFN_ORDER_SHIFT; +} + static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) { if (pmd_protnone(pmd)) return 0; - return pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; + return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : + HMM_PFN_VALID) | + hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -389,7 +396,9 @@ static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, { if (!pud_present(pud)) return 0; - return pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; + return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : + HMM_PFN_VALID) | + hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); } static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, @@ -474,7 +483,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, i = (start - range->start) >> PAGE_SHIFT; pfn_req_flags = range->hmm_pfns[i]; - cpu_flags = pte_to_hmm_pfn_flags(range, entry); + cpu_flags = pte_to_hmm_pfn_flags(range, entry) | + hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); required_fault = hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); if (required_fault) { -- cgit v1.2.3 From 7c97f3aded10aa86fc1944341288434117e9c926 Mon Sep 17 00:00:00 2001 From: Mark Zhang Date: Thu, 2 Jul 2020 11:29:31 +0300 Subject: RDMA/counter: Add PID category support in auto mode With the "PID" category QPs have same PID will be bound to same counter; If this category is not set then QPs have different PIDs will be bound to same counter. This is implemented for 2 reasons: 1. The counter is a limited resource, while there may be dozens of applications, each of which creates several types of QPs, which means it may doesn't have enough counter. 2. The system administrator needs all QPs created by all applications with same type bound to one counter. The counter name and PID is only make sense when "PID" category are configured. This category can also be used in combine with others, e.g. QP type. Link: https://lore.kernel.org/r/20200702082933.424537-2-leon@kernel.org Signed-off-by: Mark Zhang Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/counters.c | 20 +++++--------------- drivers/infiniband/core/nldev.c | 8 ++++++-- include/uapi/rdma/rdma_netlink.h | 1 + 3 files changed, 12 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 2257d7f7810f..40204c6caa5b 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -8,7 +8,7 @@ #include "core_priv.h" #include "restrack.h" -#define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE) +#define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE | RDMA_COUNTER_MASK_PID) static int __counter_set_mode(struct rdma_counter_mode *curr, enum rdma_nl_counter_mode new_mode, @@ -149,23 +149,13 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter, struct auto_mode_param *param = &counter->mode.param; bool match = true; - /* - * Ensure that counter belongs to the right PID. This operation can - * race with user space which kills the process and leaves QP and - * counters orphans. - * - * It is not a big deal because exitted task will leave both QP and - * counter in the same bucket of zombie process. Just ensure that - * process is still alive before procedding. - * - */ - if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task) || - !task_pid_nr(qp->res.task)) - return false; - if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) match &= (param->qp_type == qp->qp_type); + if (auto_mask & RDMA_COUNTER_MASK_PID) + match &= (task_pid_nr(counter->res.task) == + task_pid_nr(qp->res.task)); + return match; } diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 1051b5622b62..76af7ea2875d 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -711,11 +711,16 @@ static int fill_stat_counter_mode(struct sk_buff *msg, if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) return -EMSGSIZE; - if (m->mode == RDMA_COUNTER_MODE_AUTO) + if (m->mode == RDMA_COUNTER_MODE_AUTO) { if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) && nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) return -EMSGSIZE; + if ((m->mask & RDMA_COUNTER_MASK_PID) && + fill_res_name_pid(msg, &counter->res)) + return -EMSGSIZE; + } + return 0; } @@ -855,7 +860,6 @@ static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || - fill_res_name_pid(msg, &counter->res) || fill_stat_counter_mode(msg, counter) || fill_stat_counter_qps(msg, counter) || fill_stat_counter_hwcounters(msg, counter)) diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 3826143d420d..d2f5b8396243 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -569,5 +569,6 @@ enum rdma_nl_counter_mode { */ enum rdma_nl_counter_mask { RDMA_COUNTER_MASK_QP_TYPE = 1, + RDMA_COUNTER_MASK_PID = 1 << 1, }; #endif /* _UAPI_RDMA_NETLINK_H */ -- cgit v1.2.3 From a2b992c828f7651db369ba8f0eb0818d70232636 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:44 -0700 Subject: debugfs: make sure we can remove u32_array files cleanly debugfs_create_u32_array() allocates a small structure to wrap the data and size information about the array. If users ever try to remove the file this leads to a leak since nothing ever frees this wrapper. That said there are no upstream users of debugfs_create_u32_array() that'd remove a u32 array file (we only have one u32 array user in CMA), so there is no real bug here. Make callers pass a wrapper they allocated. This way the lifetime management of the wrapper is on the caller, and we can avoid the potential leak in debugfs. CC: Chucheng Luo Signed-off-by: Jakub Kicinski Reviewed-by: Greg Kroah-Hartman Signed-off-by: David S. Miller --- Documentation/filesystems/debugfs.rst | 12 ++++++++---- fs/debugfs/file.c | 27 +++++++-------------------- include/linux/debugfs.h | 12 +++++++++--- mm/cma.h | 3 +++ mm/cma_debug.c | 7 ++++--- 5 files changed, 31 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/debugfs.rst b/Documentation/filesystems/debugfs.rst index 1da7a4b7383d..728ab57a611a 100644 --- a/Documentation/filesystems/debugfs.rst +++ b/Documentation/filesystems/debugfs.rst @@ -185,13 +185,17 @@ byte offsets over a base for the register block. If you want to dump an u32 array in debugfs, you can create file with:: + struct debugfs_u32_array { + u32 *array; + u32 n_elements; + }; + void debugfs_create_u32_array(const char *name, umode_t mode, struct dentry *parent, - u32 *array, u32 elements); + struct debugfs_u32_array *array); -The "array" argument provides data, and the "elements" argument is -the number of elements in the array. Note: Once array is created its -size can not be changed. +The "array" argument wraps a pointer to the array's data and the number +of its elements. Note: Once array is created its size can not be changed. There is a helper function to create device related seq_file:: diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index ae49a55bda00..d0ed71f37511 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -918,11 +918,6 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode, } EXPORT_SYMBOL_GPL(debugfs_create_blob); -struct array_data { - void *array; - u32 elements; -}; - static size_t u32_format_array(char *buf, size_t bufsize, u32 *array, int array_size) { @@ -943,8 +938,8 @@ static size_t u32_format_array(char *buf, size_t bufsize, static int u32_array_open(struct inode *inode, struct file *file) { - struct array_data *data = inode->i_private; - int size, elements = data->elements; + struct debugfs_u32_array *data = inode->i_private; + int size, elements = data->n_elements; char *buf; /* @@ -959,7 +954,7 @@ static int u32_array_open(struct inode *inode, struct file *file) buf[size] = 0; file->private_data = buf; - u32_format_array(buf, size, data->array, data->elements); + u32_format_array(buf, size, data->array, data->n_elements); return nonseekable_open(inode, file); } @@ -996,8 +991,7 @@ static const struct file_operations u32_array_fops = { * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. - * @array: u32 array that provides data. - * @elements: total number of elements in the array. + * @array: wrapper struct containing data pointer and size of the array. * * This function creates a file in debugfs with the given name that exports * @array as data. If the @mode variable is so set it can be read from. @@ -1005,17 +999,10 @@ static const struct file_operations u32_array_fops = { * Once array is created its size can not be changed. */ void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, u32 *array, u32 elements) + struct dentry *parent, + struct debugfs_u32_array *array) { - struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL); - - if (data == NULL) - return; - - data->array = array; - data->elements = elements; - - debugfs_create_file_unsafe(name, mode, parent, data, &u32_array_fops); + debugfs_create_file_unsafe(name, mode, parent, array, &u32_array_fops); } EXPORT_SYMBOL_GPL(debugfs_create_u32_array); diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 63cb3606dea7..851dd1f9a8a5 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -38,6 +38,11 @@ struct debugfs_regset32 { struct device *dev; /* Optional device for Runtime PM */ }; +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + extern struct dentry *arch_debugfs_dir; #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ @@ -136,7 +141,8 @@ void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix); void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, u32 *array, u32 elements); + struct dentry *parent, + struct debugfs_u32_array *array); struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, struct dentry *parent, @@ -316,8 +322,8 @@ static inline bool debugfs_initialized(void) } static inline void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, u32 *array, - u32 elements) + struct dentry *parent, + struct debugfs_u32_array *array) { } diff --git a/mm/cma.h b/mm/cma.h index 33c0b517733c..6698fa63279b 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -2,6 +2,8 @@ #ifndef __MM_CMA_H__ #define __MM_CMA_H__ +#include + struct cma { unsigned long base_pfn; unsigned long count; @@ -11,6 +13,7 @@ struct cma { #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; + struct debugfs_u32_array dfs_bitmap; #endif const char *name; }; diff --git a/mm/cma_debug.c b/mm/cma_debug.c index 4e6cbe2f586e..d5bf8aa34fdc 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -164,7 +164,6 @@ static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry) { struct dentry *tmp; char name[16]; - int u32s; scnprintf(name, sizeof(name), "cma-%s", cma->name); @@ -180,8 +179,10 @@ static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry) debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); - u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32)); - debugfs_create_u32_array("bitmap", 0444, tmp, (u32 *)cma->bitmap, u32s); + cma->dfs_bitmap.array = (u32 *)cma->bitmap; + cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma), + BITS_PER_BYTE * sizeof(u32)); + debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap); } static int __init cma_debugfs_init(void) -- cgit v1.2.3 From 84a4160e5a5951357947ad296932b433de3e34a0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:45 -0700 Subject: udp_tunnel: re-number the offload tunnel types Make it possible to use tunnel types as flags more easily. There doesn't appear to be any user using the type as an array index, so this should make no difference. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/net/udp_tunnel.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index e7312ceb2794..0615e25f041c 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -106,9 +106,9 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, * call this function to perform Tx offloads on outgoing traffic. */ enum udp_parsable_tunnel_type { - UDP_TUNNEL_TYPE_VXLAN, /* RFC 7348 */ - UDP_TUNNEL_TYPE_GENEVE, /* draft-ietf-nvo3-geneve */ - UDP_TUNNEL_TYPE_VXLAN_GPE, /* draft-ietf-nvo3-vxlan-gpe */ + UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */ + UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */ + UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */ }; struct udp_tunnel_info { -- cgit v1.2.3 From cc4e3835eff474aa274d6e1d18f69d9d296d3b76 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:46 -0700 Subject: udp_tunnel: add central NIC RX port offload infrastructure Cater to devices which: (a) may want to sleep in the callbacks; (b) only have IPv4 support; (c) need all the programming to happen while the netdev is up. Drivers attach UDP tunnel offload info struct to their netdevs, where they declare how many UDP ports of various tunnel types they support. Core takes care of tracking which ports to offload. Use a fixed-size array since this matches what almost all drivers do, and avoids a complexity and uncertainty around memory allocations in an atomic context. Make sure that tunnel drivers don't try to replay the ports when new NIC netdev is registered. Automatic replays would mess up reference counting, and will be removed completely once all drivers are converted. v4: - use a #define NULL to avoid build issues with CONFIG_INET=n. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/geneve.c | 6 +- drivers/net/vxlan.c | 6 +- include/linux/netdevice.h | 8 + include/net/udp_tunnel.h | 137 ++++++++ net/ipv4/Makefile | 3 +- net/ipv4/udp_tunnel.c | 224 ------------- net/ipv4/udp_tunnel_core.c | 224 +++++++++++++ net/ipv4/udp_tunnel_nic.c | 821 +++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/udp_tunnel_stub.c | 7 + 9 files changed, 1207 insertions(+), 229 deletions(-) delete mode 100644 net/ipv4/udp_tunnel.c create mode 100644 net/ipv4/udp_tunnel_core.c create mode 100644 net/ipv4/udp_tunnel_nic.c create mode 100644 net/ipv4/udp_tunnel_stub.c (limited to 'include') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index e3d074008da2..49b00def2eef 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1796,9 +1796,11 @@ static int geneve_netdevice_event(struct notifier_block *unused, event == NETDEV_UDP_TUNNEL_DROP_INFO) { geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); } else if (event == NETDEV_UNREGISTER) { - geneve_offload_rx_ports(dev, false); + if (!dev->udp_tunnel_nic_info) + geneve_offload_rx_ports(dev, false); } else if (event == NETDEV_REGISTER) { - geneve_offload_rx_ports(dev, true); + if (!dev->udp_tunnel_nic_info) + geneve_offload_rx_ports(dev, true); } return NOTIFY_DONE; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 89d85dcb200e..a43c97b13924 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -4477,10 +4477,12 @@ static int vxlan_netdevice_event(struct notifier_block *unused, struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); if (event == NETDEV_UNREGISTER) { - vxlan_offload_rx_ports(dev, false); + if (!dev->udp_tunnel_nic_info) + vxlan_offload_rx_ports(dev, false); vxlan_handle_lowerdev_unregister(vn, dev); } else if (event == NETDEV_REGISTER) { - vxlan_offload_rx_ports(dev, true); + if (!dev->udp_tunnel_nic_info) + vxlan_offload_rx_ports(dev, true); } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || event == NETDEV_UDP_TUNNEL_DROP_INFO) { vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 39e28e11863c..ac2cd3f49aba 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -65,6 +65,8 @@ struct wpan_dev; struct mpls_dev; /* UDP Tunnel offloads */ struct udp_tunnel_info; +struct udp_tunnel_nic_info; +struct udp_tunnel_nic; struct bpf_prog; struct xdp_buff; @@ -1836,6 +1838,10 @@ enum netdev_priv_flags { * * @macsec_ops: MACsec offloading ops * + * @udp_tunnel_nic_info: static structure describing the UDP tunnel + * offload capabilities of the device + * @udp_tunnel_nic: UDP tunnel offload state + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -2134,6 +2140,8 @@ struct net_device { /* MACsec management functions */ const struct macsec_ops *macsec_ops; #endif + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; }; #define to_net_dev(d) container_of(d, struct net_device, dev) diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 0615e25f041c..ee34619e4cfa 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -115,6 +115,7 @@ struct udp_tunnel_info { unsigned short type; sa_family_t sa_family; __be16 port; + u8 hw_priv; }; /* Notify network devices of offloadable types */ @@ -181,4 +182,140 @@ static inline void udp_tunnel_encap_enable(struct socket *sock) udp_encap_enable(); } +#define UDP_TUNNEL_NIC_MAX_TABLES 4 + +enum udp_tunnel_nic_info_flags { + /* Device callbacks may sleep */ + UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0), + /* Device only supports offloads when it's open, all ports + * will be removed before close and re-added after open. + */ + UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1), + /* Device supports only IPv4 tunnels */ + UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2), +}; + +/** + * struct udp_tunnel_nic_info - driver UDP tunnel offload information + * @set_port: callback for adding a new port + * @unset_port: callback for removing a port + * @sync_table: callback for syncing the entire port table at once + * @flags: device flags from enum udp_tunnel_nic_info_flags + * @tables: UDP port tables this device has + * @tables.n_entries: number of entries in this table + * @tables.tunnel_types: types of tunnels this table accepts + * + * Drivers are expected to provide either @set_port and @unset_port callbacks + * or the @sync_table callback. Callbacks are invoked with rtnl lock held. + * + * Known limitations: + * - UDP tunnel port notifications are fundamentally best-effort - + * it is likely the driver will both see skbs which use a UDP tunnel port, + * while not being a tunneled skb, and tunnel skbs from other ports - + * drivers should only use these ports for non-critical RX-side offloads, + * e.g. the checksum offload; + * - none of the devices care about the socket family at present, so we don't + * track it. Please extend this code if you care. + */ +struct udp_tunnel_nic_info { + /* one-by-one */ + int (*set_port)(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti); + int (*unset_port)(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti); + + /* all at once */ + int (*sync_table)(struct net_device *dev, unsigned int table); + + unsigned int flags; + + struct udp_tunnel_nic_table_info { + unsigned int n_entries; + unsigned int tunnel_types; + } tables[UDP_TUNNEL_NIC_MAX_TABLES]; +}; + +/* UDP tunnel module dependencies + * + * Tunnel drivers are expected to have a hard dependency on the udp_tunnel + * module. NIC drivers are not, they just attach their + * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come. + * Loading a tunnel driver will cause the udp_tunnel module to be loaded + * and only then will all the required state structures be allocated. + * Since we want a weak dependency from the drivers and the core to udp_tunnel + * we call things through the following stubs. + */ +struct udp_tunnel_nic_ops { + void (*get_port)(struct net_device *dev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti); + void (*set_port_priv)(struct net_device *dev, unsigned int table, + unsigned int idx, u8 priv); + void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti); + void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti); + void (*reset_ntf)(struct net_device *dev); +}; + +#ifdef CONFIG_INET +extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops; +#else +#define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL) +#endif + +static inline void +udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti) +{ + /* This helper is used from .sync_table, we indicate empty entries + * by zero'ed @ti. Drivers which need to know the details of a port + * when it gets deleted should use the .set_port / .unset_port + * callbacks. + * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings. + */ + memset(ti, 0, sizeof(*ti)); + + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->get_port(dev, table, idx, ti); +} + +static inline void +udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, + unsigned int idx, u8 priv) +{ + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv); +} + +static inline void +udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) +{ + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->add_port(dev, ti); +} + +static inline void +udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) +{ + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->del_port(dev, ti); +} + +/** + * udp_tunnel_nic_reset_ntf() - device-originating reset notification + * @dev: network interface device structure + * + * Called by the driver to inform the core that the entire UDP tunnel port + * state has been lost, usually due to device reset. Core will assume device + * forgot all the ports and issue .set_port and .sync_table callbacks as + * necessary. + * + * This function must be called with rtnl lock held, and will issue all + * the callbacks before returning. + */ +static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev) +{ + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->reset_ntf(dev); +} #endif diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 9e1a186a3671..5b77a46885b9 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -14,7 +14,7 @@ obj-y := route.o inetpeer.o protocol.o \ udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \ fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \ inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o \ - metrics.o netlink.o nexthop.o + metrics.o netlink.o nexthop.o udp_tunnel_stub.o obj-$(CONFIG_BPFILTER) += bpfilter/ @@ -29,6 +29,7 @@ gre-y := gre_demux.o obj-$(CONFIG_NET_FOU) += fou.o obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o obj-$(CONFIG_NET_IPGRE) += ip_gre.o +udp_tunnel-y := udp_tunnel_core.o udp_tunnel_nic.o obj-$(CONFIG_NET_UDP_TUNNEL) += udp_tunnel.o obj-$(CONFIG_NET_IPVTI) += ip_vti.o obj-$(CONFIG_SYN_COOKIES) += syncookies.o diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c deleted file mode 100644 index 3eecba0874aa..000000000000 --- a/net/ipv4/udp_tunnel.c +++ /dev/null @@ -1,224 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, - struct socket **sockp) -{ - int err; - struct socket *sock = NULL; - struct sockaddr_in udp_addr; - - err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock); - if (err < 0) - goto error; - - if (cfg->bind_ifindex) { - err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true); - if (err < 0) - goto error; - } - - udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->local_ip; - udp_addr.sin_port = cfg->local_udp_port; - err = kernel_bind(sock, (struct sockaddr *)&udp_addr, - sizeof(udp_addr)); - if (err < 0) - goto error; - - if (cfg->peer_udp_port) { - udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->peer_ip; - udp_addr.sin_port = cfg->peer_udp_port; - err = kernel_connect(sock, (struct sockaddr *)&udp_addr, - sizeof(udp_addr), 0); - if (err < 0) - goto error; - } - - sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; - - *sockp = sock; - return 0; - -error: - if (sock) { - kernel_sock_shutdown(sock, SHUT_RDWR); - sock_release(sock); - } - *sockp = NULL; - return err; -} -EXPORT_SYMBOL(udp_sock_create4); - -void setup_udp_tunnel_sock(struct net *net, struct socket *sock, - struct udp_tunnel_sock_cfg *cfg) -{ - struct sock *sk = sock->sk; - - /* Disable multicast loopback */ - inet_sk(sk)->mc_loop = 0; - - /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ - inet_inc_convert_csum(sk); - - rcu_assign_sk_user_data(sk, cfg->sk_user_data); - - udp_sk(sk)->encap_type = cfg->encap_type; - udp_sk(sk)->encap_rcv = cfg->encap_rcv; - udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup; - udp_sk(sk)->encap_destroy = cfg->encap_destroy; - udp_sk(sk)->gro_receive = cfg->gro_receive; - udp_sk(sk)->gro_complete = cfg->gro_complete; - - udp_tunnel_encap_enable(sock); -} -EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); - -void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, - unsigned short type) -{ - struct sock *sk = sock->sk; - struct udp_tunnel_info ti; - - if (!dev->netdev_ops->ndo_udp_tunnel_add || - !(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) - return; - - ti.type = type; - ti.sa_family = sk->sk_family; - ti.port = inet_sk(sk)->inet_sport; - - dev->netdev_ops->ndo_udp_tunnel_add(dev, &ti); -} -EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port); - -void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock, - unsigned short type) -{ - struct sock *sk = sock->sk; - struct udp_tunnel_info ti; - - if (!dev->netdev_ops->ndo_udp_tunnel_del || - !(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) - return; - - ti.type = type; - ti.sa_family = sk->sk_family; - ti.port = inet_sk(sk)->inet_sport; - - dev->netdev_ops->ndo_udp_tunnel_del(dev, &ti); -} -EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port); - -/* Notify netdevs that UDP port started listening */ -void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type) -{ - struct sock *sk = sock->sk; - struct net *net = sock_net(sk); - struct udp_tunnel_info ti; - struct net_device *dev; - - ti.type = type; - ti.sa_family = sk->sk_family; - ti.port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (!dev->netdev_ops->ndo_udp_tunnel_add) - continue; - if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) - continue; - dev->netdev_ops->ndo_udp_tunnel_add(dev, &ti); - } - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port); - -/* Notify netdevs that UDP port is no more listening */ -void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type) -{ - struct sock *sk = sock->sk; - struct net *net = sock_net(sk); - struct udp_tunnel_info ti; - struct net_device *dev; - - ti.type = type; - ti.sa_family = sk->sk_family; - ti.port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (!dev->netdev_ops->ndo_udp_tunnel_del) - continue; - if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) - continue; - dev->netdev_ops->ndo_udp_tunnel_del(dev, &ti); - } - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port); - -void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, - __be32 src, __be32 dst, __u8 tos, __u8 ttl, - __be16 df, __be16 src_port, __be16 dst_port, - bool xnet, bool nocheck) -{ - struct udphdr *uh; - - __skb_push(skb, sizeof(*uh)); - skb_reset_transport_header(skb); - uh = udp_hdr(skb); - - uh->dest = dst_port; - uh->source = src_port; - uh->len = htons(skb->len); - - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - - udp_set_csum(nocheck, skb, src, dst, skb->len); - - iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet); -} -EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); - -void udp_tunnel_sock_release(struct socket *sock) -{ - rcu_assign_sk_user_data(sock->sk, NULL); - kernel_sock_shutdown(sock, SHUT_RDWR); - sock_release(sock); -} -EXPORT_SYMBOL_GPL(udp_tunnel_sock_release); - -struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, - __be16 flags, __be64 tunnel_id, int md_size) -{ - struct metadata_dst *tun_dst; - struct ip_tunnel_info *info; - - if (family == AF_INET) - tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size); - else - tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size); - if (!tun_dst) - return NULL; - - info = &tun_dst->u.tun_info; - info->key.tp_src = udp_hdr(skb)->source; - info->key.tp_dst = udp_hdr(skb)->dest; - if (udp_hdr(skb)->check) - info->key.tun_flags |= TUNNEL_CSUM; - return tun_dst; -} -EXPORT_SYMBOL_GPL(udp_tun_rx_dst); - -MODULE_LICENSE("GPL"); diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c new file mode 100644 index 000000000000..3eecba0874aa --- /dev/null +++ b/net/ipv4/udp_tunnel_core.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + int err; + struct socket *sock = NULL; + struct sockaddr_in udp_addr; + + err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock); + if (err < 0) + goto error; + + if (cfg->bind_ifindex) { + err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true); + if (err < 0) + goto error; + } + + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->local_ip; + udp_addr.sin_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr)); + if (err < 0) + goto error; + + if (cfg->peer_udp_port) { + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->peer_ip; + udp_addr.sin_port = cfg->peer_udp_port; + err = kernel_connect(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr), 0); + if (err < 0) + goto error; + } + + sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; + + *sockp = sock; + return 0; + +error: + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sock_release(sock); + } + *sockp = NULL; + return err; +} +EXPORT_SYMBOL(udp_sock_create4); + +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *cfg) +{ + struct sock *sk = sock->sk; + + /* Disable multicast loopback */ + inet_sk(sk)->mc_loop = 0; + + /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ + inet_inc_convert_csum(sk); + + rcu_assign_sk_user_data(sk, cfg->sk_user_data); + + udp_sk(sk)->encap_type = cfg->encap_type; + udp_sk(sk)->encap_rcv = cfg->encap_rcv; + udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup; + udp_sk(sk)->encap_destroy = cfg->encap_destroy; + udp_sk(sk)->gro_receive = cfg->gro_receive; + udp_sk(sk)->gro_complete = cfg->gro_complete; + + udp_tunnel_encap_enable(sock); +} +EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); + +void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, + unsigned short type) +{ + struct sock *sk = sock->sk; + struct udp_tunnel_info ti; + + if (!dev->netdev_ops->ndo_udp_tunnel_add || + !(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + return; + + ti.type = type; + ti.sa_family = sk->sk_family; + ti.port = inet_sk(sk)->inet_sport; + + dev->netdev_ops->ndo_udp_tunnel_add(dev, &ti); +} +EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port); + +void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock, + unsigned short type) +{ + struct sock *sk = sock->sk; + struct udp_tunnel_info ti; + + if (!dev->netdev_ops->ndo_udp_tunnel_del || + !(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + return; + + ti.type = type; + ti.sa_family = sk->sk_family; + ti.port = inet_sk(sk)->inet_sport; + + dev->netdev_ops->ndo_udp_tunnel_del(dev, &ti); +} +EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port); + +/* Notify netdevs that UDP port started listening */ +void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type) +{ + struct sock *sk = sock->sk; + struct net *net = sock_net(sk); + struct udp_tunnel_info ti; + struct net_device *dev; + + ti.type = type; + ti.sa_family = sk->sk_family; + ti.port = inet_sk(sk)->inet_sport; + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (!dev->netdev_ops->ndo_udp_tunnel_add) + continue; + if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + continue; + dev->netdev_ops->ndo_udp_tunnel_add(dev, &ti); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port); + +/* Notify netdevs that UDP port is no more listening */ +void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type) +{ + struct sock *sk = sock->sk; + struct net *net = sock_net(sk); + struct udp_tunnel_info ti; + struct net_device *dev; + + ti.type = type; + ti.sa_family = sk->sk_family; + ti.port = inet_sk(sk)->inet_sport; + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + if (!dev->netdev_ops->ndo_udp_tunnel_del) + continue; + if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + continue; + dev->netdev_ops->ndo_udp_tunnel_del(dev, &ti); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port); + +void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, + __be16 df, __be16 src_port, __be16 dst_port, + bool xnet, bool nocheck) +{ + struct udphdr *uh; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + + udp_set_csum(nocheck, skb, src, dst, skb->len); + + iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet); +} +EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); + +void udp_tunnel_sock_release(struct socket *sock) +{ + rcu_assign_sk_user_data(sock->sk, NULL); + kernel_sock_shutdown(sock, SHUT_RDWR); + sock_release(sock); +} +EXPORT_SYMBOL_GPL(udp_tunnel_sock_release); + +struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, + __be16 flags, __be64 tunnel_id, int md_size) +{ + struct metadata_dst *tun_dst; + struct ip_tunnel_info *info; + + if (family == AF_INET) + tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size); + else + tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size); + if (!tun_dst) + return NULL; + + info = &tun_dst->u.tun_info; + info->key.tp_src = udp_hdr(skb)->source; + info->key.tp_dst = udp_hdr(skb)->dest; + if (udp_hdr(skb)->check) + info->key.tun_flags |= TUNNEL_CSUM; + return tun_dst; +} +EXPORT_SYMBOL_GPL(udp_tun_rx_dst); + +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c new file mode 100644 index 000000000000..056cfe0b770e --- /dev/null +++ b/net/ipv4/udp_tunnel_nic.c @@ -0,0 +1,821 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2020 Facebook Inc. + +#include +#include +#include +#include +#include + +enum udp_tunnel_nic_table_entry_flags { + UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0), + UDP_TUNNEL_NIC_ENTRY_DEL = BIT(1), + UDP_TUNNEL_NIC_ENTRY_OP_FAIL = BIT(2), + UDP_TUNNEL_NIC_ENTRY_FROZEN = BIT(3), +}; + +struct udp_tunnel_nic_table_entry { + __be16 port; + u8 type; + u8 use_cnt; + u8 flags; + u8 hw_priv; +}; + +/** + * struct udp_tunnel_nic - UDP tunnel port offload state + * @work: async work for talking to hardware from process context + * @dev: netdev pointer + * @need_sync: at least one port start changed + * @need_replay: space was freed, we need a replay of all ports + * @work_pending: @work is currently scheduled + * @n_tables: number of tables under @entries + * @missed: bitmap of tables which overflown + * @entries: table of tables of ports currently offloaded + */ +struct udp_tunnel_nic { + struct work_struct work; + + struct net_device *dev; + + u8 need_sync:1; + u8 need_replay:1; + u8 work_pending:1; + + unsigned int n_tables; + unsigned long missed; + struct udp_tunnel_nic_table_entry **entries; +}; + +/* We ensure all work structs are done using driver state, but not the code. + * We need a workqueue we can flush before module gets removed. + */ +static struct workqueue_struct *udp_tunnel_nic_workqueue; + +static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type) +{ + switch (type) { + case UDP_TUNNEL_TYPE_VXLAN: + return "vxlan"; + case UDP_TUNNEL_TYPE_GENEVE: + return "geneve"; + case UDP_TUNNEL_TYPE_VXLAN_GPE: + return "vxlan-gpe"; + default: + return "unknown"; + } +} + +static bool +udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry) +{ + return entry->use_cnt == 0 && !entry->flags; +} + +static bool +udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry) +{ + return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN; +} + +static void +udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry) +{ + if (!udp_tunnel_nic_entry_is_free(entry)) + entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN; +} + +static void +udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry) +{ + entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN; +} + +static bool +udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry) +{ + return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD | + UDP_TUNNEL_NIC_ENTRY_DEL); +} + +static void +udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn, + struct udp_tunnel_nic_table_entry *entry, + unsigned int flag) +{ + entry->flags |= flag; + utn->need_sync = 1; +} + +static void +udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry, + struct udp_tunnel_info *ti) +{ + memset(ti, 0, sizeof(*ti)); + ti->port = entry->port; + ti->type = entry->type; + ti->hw_priv = entry->hw_priv; +} + +static bool +udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) + if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j])) + return false; + return true; +} + +static bool +udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_table_info *table; + unsigned int i, j; + + if (!utn->missed) + return false; + + for (i = 0; i < utn->n_tables; i++) { + table = &dev->udp_tunnel_nic_info->tables[i]; + if (!test_bit(i, &utn->missed)) + continue; + + for (j = 0; j < table->n_entries; j++) + if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j])) + return true; + } + + return false; +} + +static void +__udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti) +{ + struct udp_tunnel_nic_table_entry *entry; + struct udp_tunnel_nic *utn; + + utn = dev->udp_tunnel_nic; + entry = &utn->entries[table][idx]; + + if (entry->use_cnt) + udp_tunnel_nic_ti_from_entry(entry, ti); +} + +static void +__udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, + unsigned int idx, u8 priv) +{ + dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv; +} + +static void +udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry, + int err) +{ + bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL; + + WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD && + entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL); + + if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD && + (!err || (err == -EEXIST && dodgy))) + entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD; + + if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL && + (!err || (err == -ENOENT && dodgy))) + entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL; + + if (!err) + entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL; + else + entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL; +} + +static void +udp_tunnel_nic_device_sync_one(struct net_device *dev, + struct udp_tunnel_nic *utn, + unsigned int table, unsigned int idx) +{ + struct udp_tunnel_nic_table_entry *entry; + struct udp_tunnel_info ti; + int err; + + entry = &utn->entries[table][idx]; + if (!udp_tunnel_nic_entry_is_queued(entry)) + return; + + udp_tunnel_nic_ti_from_entry(entry, &ti); + if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD) + err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti); + else + err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx, + &ti); + udp_tunnel_nic_entry_update_done(entry, err); + + if (err) + netdev_warn(dev, + "UDP tunnel port sync failed port %d type %s: %d\n", + be16_to_cpu(entry->port), + udp_tunnel_nic_tunnel_type_name(entry->type), + err); +} + +static void +udp_tunnel_nic_device_sync_by_port(struct net_device *dev, + struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) + udp_tunnel_nic_device_sync_one(dev, utn, i, j); +} + +static void +udp_tunnel_nic_device_sync_by_table(struct net_device *dev, + struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + int err; + + for (i = 0; i < utn->n_tables; i++) { + /* Find something that needs sync in this table */ + for (j = 0; j < info->tables[i].n_entries; j++) + if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j])) + break; + if (j == info->tables[i].n_entries) + continue; + + err = info->sync_table(dev, i); + if (err) + netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n", + i, err); + + for (j = 0; j < info->tables[i].n_entries; j++) { + struct udp_tunnel_nic_table_entry *entry; + + entry = &utn->entries[i][j]; + if (udp_tunnel_nic_entry_is_queued(entry)) + udp_tunnel_nic_entry_update_done(entry, err); + } + } +} + +static void +__udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + if (!utn->need_sync) + return; + + if (dev->udp_tunnel_nic_info->sync_table) + udp_tunnel_nic_device_sync_by_table(dev, utn); + else + udp_tunnel_nic_device_sync_by_port(dev, utn); + + utn->need_sync = 0; + /* Can't replay directly here, in case we come from the tunnel driver's + * notification - trying to replay may deadlock inside tunnel driver. + */ + utn->need_replay = udp_tunnel_nic_should_replay(dev, utn); +} + +static void +udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + bool may_sleep; + + if (!utn->need_sync) + return; + + /* Drivers which sleep in the callback need to update from + * the workqueue, if we come from the tunnel driver's notification. + */ + may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP; + if (!may_sleep) + __udp_tunnel_nic_device_sync(dev, utn); + if (may_sleep || utn->need_replay) { + queue_work(udp_tunnel_nic_workqueue, &utn->work); + utn->work_pending = 1; + } +} + +static bool +udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table, + struct udp_tunnel_info *ti) +{ + return table->tunnel_types & ti->type; +} + +static bool +udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i; + + /* Special case IPv4-only NICs */ + if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY && + ti->sa_family != AF_INET) + return false; + + for (i = 0; i < utn->n_tables; i++) + if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti)) + return true; + return false; +} + +static int +udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic_table_entry *entry; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) { + entry = &utn->entries[i][j]; + + if (!udp_tunnel_nic_entry_is_free(entry) && + entry->port == ti->port && + entry->type != ti->type) { + __set_bit(i, &utn->missed); + return true; + } + } + return false; +} + +static void +udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn, + unsigned int table, unsigned int idx, int use_cnt_adj) +{ + struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx]; + bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL; + unsigned int from, to; + + /* If not going from used to unused or vice versa - all done. + * For dodgy entries make sure we try to sync again (queue the entry). + */ + entry->use_cnt += use_cnt_adj; + if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj)) + return; + + /* Cancel the op before it was sent to the device, if possible, + * otherwise we'd need to take special care to issue commands + * in the same order the ports arrived. + */ + if (use_cnt_adj < 0) { + from = UDP_TUNNEL_NIC_ENTRY_ADD; + to = UDP_TUNNEL_NIC_ENTRY_DEL; + } else { + from = UDP_TUNNEL_NIC_ENTRY_DEL; + to = UDP_TUNNEL_NIC_ENTRY_ADD; + } + + if (entry->flags & from) { + entry->flags &= ~from; + if (!dodgy) + return; + } + + udp_tunnel_nic_entry_queue(utn, entry, to); +} + +static bool +udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn, + unsigned int table, unsigned int idx, + struct udp_tunnel_info *ti, int use_cnt_adj) +{ + struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx]; + + if (udp_tunnel_nic_entry_is_free(entry) || + entry->port != ti->port || + entry->type != ti->type) + return false; + + if (udp_tunnel_nic_entry_is_frozen(entry)) + return true; + + udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj); + return true; +} + +/* Try to find existing matching entry and adjust its use count, instead of + * adding a new one. Returns true if entry was found. In case of delete the + * entry may have gotten removed in the process, in which case it will be + * queued for removal. + */ +static bool +udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti, int use_cnt_adj) +{ + const struct udp_tunnel_nic_table_info *table; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) { + table = &dev->udp_tunnel_nic_info->tables[i]; + if (!udp_tunnel_nic_table_is_capable(table, ti)) + continue; + + for (j = 0; j < table->n_entries; j++) + if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti, + use_cnt_adj)) + return true; + } + + return false; +} + +static bool +udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + return udp_tunnel_nic_try_existing(dev, utn, ti, +1); +} + +static bool +udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + return udp_tunnel_nic_try_existing(dev, utn, ti, -1); +} + +static bool +udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + const struct udp_tunnel_nic_table_info *table; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) { + table = &dev->udp_tunnel_nic_info->tables[i]; + if (!udp_tunnel_nic_table_is_capable(table, ti)) + continue; + + for (j = 0; j < table->n_entries; j++) { + struct udp_tunnel_nic_table_entry *entry; + + entry = &utn->entries[i][j]; + if (!udp_tunnel_nic_entry_is_free(entry)) + continue; + + entry->port = ti->port; + entry->type = ti->type; + entry->use_cnt = 1; + udp_tunnel_nic_entry_queue(utn, entry, + UDP_TUNNEL_NIC_ENTRY_ADD); + return true; + } + + /* The different table may still fit this port in, but there + * are no devices currently which have multiple tables accepting + * the same tunnel type, and false positives are okay. + */ + __set_bit(i, &utn->missed); + } + + return false; +} + +static void +__udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + + utn = dev->udp_tunnel_nic; + if (!utn) + return; + if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY) + return; + + if (!udp_tunnel_nic_is_capable(dev, utn, ti)) + return; + + /* It may happen that a tunnel of one type is removed and different + * tunnel type tries to reuse its port before the device was informed. + * Rely on utn->missed to re-add this port later. + */ + if (udp_tunnel_nic_has_collision(dev, utn, ti)) + return; + + if (!udp_tunnel_nic_add_existing(dev, utn, ti)) + udp_tunnel_nic_add_new(dev, utn, ti); + + udp_tunnel_nic_device_sync(dev, utn); +} + +static void +__udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct udp_tunnel_nic *utn; + + utn = dev->udp_tunnel_nic; + if (!utn) + return; + + if (!udp_tunnel_nic_is_capable(dev, utn, ti)) + return; + + udp_tunnel_nic_del_existing(dev, utn, ti); + + udp_tunnel_nic_device_sync(dev, utn); +} + +static void __udp_tunnel_nic_reset_ntf(struct net_device *dev) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + unsigned int i, j; + + ASSERT_RTNL(); + + utn = dev->udp_tunnel_nic; + if (!utn) + return; + + utn->need_sync = false; + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) { + struct udp_tunnel_nic_table_entry *entry; + + entry = &utn->entries[i][j]; + + entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL | + UDP_TUNNEL_NIC_ENTRY_OP_FAIL); + /* We don't release rtnl across ops */ + WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN); + if (!entry->use_cnt) + continue; + + udp_tunnel_nic_entry_queue(utn, entry, + UDP_TUNNEL_NIC_ENTRY_ADD); + } + + __udp_tunnel_nic_device_sync(dev, utn); +} + +static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = { + .get_port = __udp_tunnel_nic_get_port, + .set_port_priv = __udp_tunnel_nic_set_port_priv, + .add_port = __udp_tunnel_nic_add_port, + .del_port = __udp_tunnel_nic_del_port, + .reset_ntf = __udp_tunnel_nic_reset_ntf, +}; + +static void +udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) { + int adj_cnt = -utn->entries[i][j].use_cnt; + + if (adj_cnt) + udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt); + } + + __udp_tunnel_nic_device_sync(dev, utn); + + for (i = 0; i < utn->n_tables; i++) + memset(utn->entries[i], 0, array_size(info->tables[i].n_entries, + sizeof(**utn->entries))); + WARN_ON(utn->need_sync); + utn->need_replay = 0; +} + +static void +udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + + /* Freeze all the ports we are already tracking so that the replay + * does not double up the refcount. + */ + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) + udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]); + utn->missed = 0; + utn->need_replay = 0; + + udp_tunnel_get_rx_info(dev); + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) + udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]); +} + +static void udp_tunnel_nic_device_sync_work(struct work_struct *work) +{ + struct udp_tunnel_nic *utn = + container_of(work, struct udp_tunnel_nic, work); + + rtnl_lock(); + utn->work_pending = 0; + __udp_tunnel_nic_device_sync(utn->dev, utn); + + if (utn->need_replay) + udp_tunnel_nic_replay(utn->dev, utn); + rtnl_unlock(); +} + +static struct udp_tunnel_nic * +udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info, + unsigned int n_tables) +{ + struct udp_tunnel_nic *utn; + unsigned int i; + + utn = kzalloc(sizeof(*utn), GFP_KERNEL); + if (!utn) + return NULL; + utn->n_tables = n_tables; + INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work); + + utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL); + if (!utn->entries) + goto err_free_utn; + + for (i = 0; i < n_tables; i++) { + utn->entries[i] = kcalloc(info->tables[i].n_entries, + sizeof(*utn->entries[i]), GFP_KERNEL); + if (!utn->entries[i]) + goto err_free_prev_entries; + } + + return utn; + +err_free_prev_entries: + while (i--) + kfree(utn->entries[i]); + kfree(utn->entries); +err_free_utn: + kfree(utn); + return NULL; +} + +static int udp_tunnel_nic_register(struct net_device *dev) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + unsigned int n_tables, i; + + BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE < + UDP_TUNNEL_NIC_MAX_TABLES); + + if (WARN_ON(!info->set_port != !info->unset_port) || + WARN_ON(!info->set_port == !info->sync_table) || + WARN_ON(!info->tables[0].n_entries)) + return -EINVAL; + + n_tables = 1; + for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { + if (!info->tables[i].n_entries) + continue; + + n_tables++; + if (WARN_ON(!info->tables[i - 1].n_entries)) + return -EINVAL; + } + + utn = udp_tunnel_nic_alloc(info, n_tables); + if (!utn) + return -ENOMEM; + + utn->dev = dev; + dev_hold(dev); + dev->udp_tunnel_nic = utn; + + if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) + udp_tunnel_get_rx_info(dev); + + return 0; +} + +static void +udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + unsigned int i; + + /* Flush before we check work, so we don't waste time adding entries + * from the work which we will boot immediately. + */ + udp_tunnel_nic_flush(dev, utn); + + /* Wait for the work to be done using the state, netdev core will + * retry unregister until we give up our reference on this device. + */ + if (utn->work_pending) + return; + + for (i = 0; i < utn->n_tables; i++) + kfree(utn->entries[i]); + kfree(utn->entries); + kfree(utn); + dev->udp_tunnel_nic = NULL; + dev_put(dev); +} + +static int +udp_tunnel_nic_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + const struct udp_tunnel_nic_info *info; + struct udp_tunnel_nic *utn; + + info = dev->udp_tunnel_nic_info; + if (!info) + return NOTIFY_DONE; + + if (event == NETDEV_REGISTER) { + int err; + + err = udp_tunnel_nic_register(dev); + if (err) + netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err); + return notifier_from_errno(err); + } + /* All other events will need the udp_tunnel_nic state */ + utn = dev->udp_tunnel_nic; + if (!utn) + return NOTIFY_DONE; + + if (event == NETDEV_UNREGISTER) { + udp_tunnel_nic_unregister(dev, utn); + return NOTIFY_OK; + } + + /* All other events only matter if NIC has to be programmed open */ + if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) + return NOTIFY_DONE; + + if (event == NETDEV_UP) { + WARN_ON(!udp_tunnel_nic_is_empty(dev, utn)); + udp_tunnel_get_rx_info(dev); + return NOTIFY_OK; + } + if (event == NETDEV_GOING_DOWN) { + udp_tunnel_nic_flush(dev, utn); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = { + .notifier_call = udp_tunnel_nic_netdevice_event, +}; + +static int __init udp_tunnel_nic_init_module(void) +{ + int err; + + udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0); + if (!udp_tunnel_nic_workqueue) + return -ENOMEM; + + rtnl_lock(); + udp_tunnel_nic_ops = &__udp_tunnel_nic_ops; + rtnl_unlock(); + + err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block); + if (err) + goto err_unset_ops; + + return 0; + +err_unset_ops: + rtnl_lock(); + udp_tunnel_nic_ops = NULL; + rtnl_unlock(); + destroy_workqueue(udp_tunnel_nic_workqueue); + return err; +} +late_initcall(udp_tunnel_nic_init_module); + +static void __exit udp_tunnel_nic_cleanup_module(void) +{ + unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block); + + rtnl_lock(); + udp_tunnel_nic_ops = NULL; + rtnl_unlock(); + + destroy_workqueue(udp_tunnel_nic_workqueue); +} +module_exit(udp_tunnel_nic_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/udp_tunnel_stub.c b/net/ipv4/udp_tunnel_stub.c new file mode 100644 index 000000000000..c4b2888f5fef --- /dev/null +++ b/net/ipv4/udp_tunnel_stub.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2020 Facebook Inc. + +#include + +const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops; +EXPORT_SYMBOL_GPL(udp_tunnel_nic_ops); -- cgit v1.2.3 From c7d759eb7b12f91a25f4d3cd03ff5209046ddfc2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:47 -0700 Subject: ethtool: add tunnel info interface Add an interface to report offloaded UDP ports via ethtool netlink. Now that core takes care of tracking which UDP tunnel ports the NICs are aware of we can quite easily export this information out to user space. The responsibility of writing the netlink dumps is split between ethtool code and udp_tunnel_nic.c - since udp_tunnel module may not always be loaded, yet we should always report the capabilities of the NIC. $ ethtool --show-tunnels eth0 Tunnel information for eth0: UDP port table 0: Size: 4 Types: vxlan No entries UDP port table 1: Size: 4 Types: geneve, vxlan-gpe Entries (1): port 1230, vxlan-gpe v4: - back to v2, build fix is now directly in udp_tunnel.h v3: - don't compile ETHTOOL_MSG_TUNNEL_INFO_GET in if CONFIG_INET not set. v2: - fix string set count, - reorder enums in the uAPI, - fix type of ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES to bitset in docs and comments. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- Documentation/networking/ethtool-netlink.rst | 33 ++++ include/net/udp_tunnel.h | 21 +++ include/uapi/linux/ethtool.h | 2 + include/uapi/linux/ethtool_netlink.h | 55 ++++++ net/ethtool/Makefile | 3 +- net/ethtool/common.c | 9 + net/ethtool/common.h | 1 + net/ethtool/netlink.c | 12 ++ net/ethtool/netlink.h | 4 + net/ethtool/strset.c | 5 + net/ethtool/tunnels.c | 259 +++++++++++++++++++++++++++ net/ipv4/udp_tunnel_nic.c | 69 +++++++ 12 files changed, 472 insertions(+), 1 deletion(-) create mode 100644 net/ethtool/tunnels.c (limited to 'include') diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 459a0d11cfde..7d75f1e32152 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -1230,6 +1230,39 @@ used to report the amplitude of the reflection for a given pair. | | | ``ETHTOOL_A_CABLE_AMPLITUDE_mV`` | s16 | Reflection amplitude | +-+-+-----------------------------------------+--------+----------------------+ +TUNNEL_INFO +=========== + +Gets information about the tunnel state NIC is aware of. + +Request contents: + + ===================================== ====== ========================== + ``ETHTOOL_A_TUNNEL_INFO_HEADER`` nested request header + ===================================== ====== ========================== + +Kernel response contents: + + +---------------------------------------------+--------+---------------------+ + | ``ETHTOOL_A_TUNNEL_INFO_HEADER`` | nested | reply header | + +---------------------------------------------+--------+---------------------+ + | ``ETHTOOL_A_TUNNEL_INFO_UDP_PORTS`` | nested | all UDP port tables | + +-+-------------------------------------------+--------+---------------------+ + | | ``ETHTOOL_A_TUNNEL_UDP_TABLE`` | nested | one UDP port table | + +-+-+-----------------------------------------+--------+---------------------+ + | | | ``ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE`` | u32 | max size of the | + | | | | | table | + +-+-+-----------------------------------------+--------+---------------------+ + | | | ``ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES`` | bitset | tunnel types which | + | | | | | table can hold | + +-+-+-----------------------------------------+--------+---------------------+ + | | | ``ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY`` | nested | offloaded UDP port | + +-+-+-+---------------------------------------+--------+---------------------+ + | | | | ``ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT`` | be16 | UDP port | + +-+-+-+---------------------------------------+--------+---------------------+ + | | | | ``ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE`` | u32 | tunnel type | + +-+-+-+---------------------------------------+--------+---------------------+ + Request translation =================== diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index ee34619e4cfa..dd20ce99740c 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -255,6 +255,10 @@ struct udp_tunnel_nic_ops { void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*reset_ntf)(struct net_device *dev); + + size_t (*dump_size)(struct net_device *dev, unsigned int table); + int (*dump_write)(struct net_device *dev, unsigned int table, + struct sk_buff *skb); }; #ifdef CONFIG_INET @@ -318,4 +322,21 @@ static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev) if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->reset_ntf(dev); } + +static inline size_t +udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table) +{ + if (!udp_tunnel_nic_ops) + return 0; + return udp_tunnel_nic_ops->dump_size(dev, table); +} + +static inline int +udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table, + struct sk_buff *skb) +{ + if (!udp_tunnel_nic_ops) + return 0; + return udp_tunnel_nic_ops->dump_write(dev, table, skb); +} #endif diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 60856e0f9618..b4f2d134e713 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -669,6 +669,7 @@ enum ethtool_link_ext_substate_cable_issue { * @ETH_SS_SOF_TIMESTAMPING: SOF_TIMESTAMPING_* flags * @ETH_SS_TS_TX_TYPES: timestamping Tx types * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters + * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types */ enum ethtool_stringset { ETH_SS_TEST = 0, @@ -686,6 +687,7 @@ enum ethtool_stringset { ETH_SS_SOF_TIMESTAMPING, ETH_SS_TS_TX_TYPES, ETH_SS_TS_RX_FILTERS, + ETH_SS_UDP_TUNNEL_TYPES, /* add new constants above here */ ETH_SS_COUNT diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index c12ce4df4b6b..5dcd24cb33ea 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -41,6 +41,7 @@ enum { ETHTOOL_MSG_TSINFO_GET, ETHTOOL_MSG_CABLE_TEST_ACT, ETHTOOL_MSG_CABLE_TEST_TDR_ACT, + ETHTOOL_MSG_TUNNEL_INFO_GET, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -556,6 +557,60 @@ enum { ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX = __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT - 1 }; +/* TUNNEL INFO */ + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE, + + __ETHTOOL_UDP_TUNNEL_TYPE_CNT +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC, + + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, /* be16 */ + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, /* u32 */ + + /* add new constants above here */ + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = (__ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC, + + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, /* u32 */ + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, /* bitset */ + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY, /* nest - _UDP_ENTRY_* */ + + /* add new constants above here */ + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = (__ETHTOOL_A_TUNNEL_UDP_TABLE_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC, + + ETHTOOL_A_TUNNEL_UDP_TABLE, /* nest - _UDP_TABLE_* */ + + /* add new constants above here */ + __ETHTOOL_A_TUNNEL_UDP_CNT, + ETHTOOL_A_TUNNEL_UDP_MAX = (__ETHTOOL_A_TUNNEL_UDP_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC, + ETHTOOL_A_TUNNEL_INFO_HEADER, /* nest - _A_HEADER_* */ + + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS, /* nest - _UDP_TABLE */ + + /* add new constants above here */ + __ETHTOOL_A_TUNNEL_INFO_CNT, + ETHTOOL_A_TUNNEL_INFO_MAX = (__ETHTOOL_A_TUNNEL_INFO_CNT - 1) +}; + /* generic netlink info */ #define ETHTOOL_GENL_NAME "ethtool" #define ETHTOOL_GENL_VERSION 1 diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile index 0c2b94f20499..7a849ff22dad 100644 --- a/net/ethtool/Makefile +++ b/net/ethtool/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \ linkstate.o debug.o wol.o features.o privflags.o rings.o \ - channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o + channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \ + tunnels.o diff --git a/net/ethtool/common.c b/net/ethtool/common.c index c54166713797..ed19573fccd7 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include #include #include #include @@ -272,6 +273,14 @@ const char ts_rx_filter_names[][ETH_GSTRING_LEN] = { }; static_assert(ARRAY_SIZE(ts_rx_filter_names) == __HWTSTAMP_FILTER_CNT); +const char udp_tunnel_type_names[][ETH_GSTRING_LEN] = { + [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN] = "vxlan", + [ETHTOOL_UDP_TUNNEL_TYPE_GENEVE] = "geneve", + [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE] = "vxlan-gpe", +}; +static_assert(ARRAY_SIZE(udp_tunnel_type_names) == + __ETHTOOL_UDP_TUNNEL_TYPE_CNT); + /* return false if legacy contained non-0 deprecated fields * maxtxpkt/maxrxpkt. rest of ksettings always updated */ diff --git a/net/ethtool/common.h b/net/ethtool/common.h index b83bef38368c..3d9251c95a8b 100644 --- a/net/ethtool/common.h +++ b/net/ethtool/common.h @@ -28,6 +28,7 @@ extern const char wol_mode_names[][ETH_GSTRING_LEN]; extern const char sof_timestamping_names[][ETH_GSTRING_LEN]; extern const char ts_tx_type_names[][ETH_GSTRING_LEN]; extern const char ts_rx_filter_names[][ETH_GSTRING_LEN]; +extern const char udp_tunnel_type_names[][ETH_GSTRING_LEN]; int __ethtool_get_link(struct net_device *dev); diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 88fd07f47040..fb9d096faaa4 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -181,6 +181,12 @@ err: return NULL; } +void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd) +{ + return genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + ðtool_genl_family, 0, cmd); +} + void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd) { return genlmsg_put(skb, 0, ++ethnl_bcast_seq, ðtool_genl_family, 0, @@ -849,6 +855,12 @@ static const struct genl_ops ethtool_genl_ops[] = { .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_act_cable_test_tdr, }, + { + .cmd = ETHTOOL_MSG_TUNNEL_INFO_GET, + .doit = ethnl_tunnel_info_doit, + .start = ethnl_tunnel_info_start, + .dumpit = ethnl_tunnel_info_dumpit, + }, }; static const struct genl_multicast_group ethtool_nl_mcgrps[] = { diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 9a96b6e90dc2..e2085005caac 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -19,6 +19,7 @@ int ethnl_fill_reply_header(struct sk_buff *skb, struct net_device *dev, struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd, u16 hdr_attrtype, struct genl_info *info, void **ehdrp); +void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd); void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd); int ethnl_multicast(struct sk_buff *skb, struct net_device *dev); @@ -361,5 +362,8 @@ int ethnl_set_pause(struct sk_buff *skb, struct genl_info *info); int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info); int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info); int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info); +int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info); +int ethnl_tunnel_info_start(struct netlink_callback *cb); +int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb); #endif /* _NET_ETHTOOL_NETLINK_H */ diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c index 773634b6b048..82707b662fe4 100644 --- a/net/ethtool/strset.c +++ b/net/ethtool/strset.c @@ -75,6 +75,11 @@ static const struct strset_info info_template[] = { .count = __HWTSTAMP_FILTER_CNT, .strings = ts_rx_filter_names, }, + [ETH_SS_UDP_TUNNEL_TYPES] = { + .per_dev = false, + .count = __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + .strings = udp_tunnel_type_names, + }, }; struct strset_req_info { diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c new file mode 100644 index 000000000000..6b89255f1231 --- /dev/null +++ b/net/ethtool/tunnels.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include "bitset.h" +#include "common.h" +#include "netlink.h" + +static const struct nla_policy +ethtool_tunnel_info_policy[ETHTOOL_A_TUNNEL_INFO_MAX + 1] = { + [ETHTOOL_A_TUNNEL_INFO_UNSPEC] = { .type = NLA_REJECT }, + [ETHTOOL_A_TUNNEL_INFO_HEADER] = { .type = NLA_NESTED }, +}; + +static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN)); +static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE)); +static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE == + ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE)); + +static ssize_t +ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, + struct netlink_ext_ack *extack) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct udp_tunnel_nic_info *info; + unsigned int i; + size_t size; + int ret; + + info = req_base->dev->udp_tunnel_nic_info; + if (!info) { + NL_SET_ERR_MSG(extack, + "device does not report tunnel offload info"); + return -EOPNOTSUPP; + } + + size = nla_total_size(0); /* _INFO_UDP_PORTS */ + + for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { + if (!info->tables[i].n_entries) + return size; + + size += nla_total_size(0); /* _UDP_TABLE */ + size += nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */ + ret = ethnl_bitset32_size(&info->tables[i].tunnel_types, NULL, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact); + if (ret < 0) + return ret; + size += ret; + + size += udp_tunnel_nic_dump_size(req_base->dev, i); + } + + return size; +} + +static int +ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base, + struct sk_buff *skb) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct udp_tunnel_nic_info *info; + struct nlattr *ports, *table; + unsigned int i; + + info = req_base->dev->udp_tunnel_nic_info; + if (!info) + return -EOPNOTSUPP; + + ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS); + if (!ports) + return -EMSGSIZE; + + for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { + if (!info->tables[i].n_entries) + break; + + table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE); + if (!table) + goto err_cancel_ports; + + if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, + info->tables[i].n_entries)) + goto err_cancel_table; + + if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, + &info->tables[i].tunnel_types, NULL, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact)) + goto err_cancel_table; + + if (udp_tunnel_nic_dump_write(req_base->dev, i, skb)) + goto err_cancel_table; + + nla_nest_end(skb, table); + } + + nla_nest_end(skb, ports); + + return 0; + +err_cancel_table: + nla_nest_cancel(skb, table); +err_cancel_ports: + nla_nest_cancel(skb, ports); + return -EMSGSIZE; +} + +static int +ethnl_tunnel_info_req_parse(struct ethnl_req_info *req_info, + const struct nlmsghdr *nlhdr, struct net *net, + struct netlink_ext_ack *extack, bool require_dev) +{ + struct nlattr *tb[ETHTOOL_A_TUNNEL_INFO_MAX + 1]; + int ret; + + ret = nlmsg_parse(nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_TUNNEL_INFO_MAX, + ethtool_tunnel_info_policy, extack); + if (ret < 0) + return ret; + + return ethnl_parse_header_dev_get(req_info, + tb[ETHTOOL_A_TUNNEL_INFO_HEADER], + net, extack, require_dev); +} + +int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct ethnl_req_info req_info = {}; + struct sk_buff *rskb; + void *reply_payload; + int reply_len; + int ret; + + ret = ethnl_tunnel_info_req_parse(&req_info, info->nlhdr, + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + + rtnl_lock(); + ret = ethnl_tunnel_info_reply_size(&req_info, info->extack); + if (ret < 0) + goto err_unlock_rtnl; + reply_len = ret + ethnl_reply_header_size(); + + rskb = ethnl_reply_init(reply_len, req_info.dev, + ETHTOOL_MSG_TUNNEL_INFO_GET, + ETHTOOL_A_TUNNEL_INFO_HEADER, + info, &reply_payload); + if (!rskb) { + ret = -ENOMEM; + goto err_unlock_rtnl; + } + + ret = ethnl_tunnel_info_fill_reply(&req_info, rskb); + if (ret) + goto err_free_msg; + rtnl_unlock(); + dev_put(req_info.dev); + genlmsg_end(rskb, reply_payload); + + return genlmsg_reply(rskb, info); + +err_free_msg: + nlmsg_free(rskb); +err_unlock_rtnl: + rtnl_unlock(); + dev_put(req_info.dev); + return ret; +} + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + int pos_hash; + int pos_idx; +}; + +int ethnl_tunnel_info_start(struct netlink_callback *cb) +{ + struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx; + int ret; + + BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); + + memset(ctx, 0, sizeof(*ctx)); + + ret = ethnl_tunnel_info_req_parse(&ctx->req_info, cb->nlh, + sock_net(cb->skb->sk), cb->extack, + false); + if (ctx->req_info.dev) { + dev_put(ctx->req_info.dev); + ctx->req_info.dev = NULL; + } + + return ret; +} + +int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx; + struct net *net = sock_net(skb->sk); + int s_idx = ctx->pos_idx; + int h, idx = 0; + int ret = 0; + void *ehdr; + + rtnl_lock(); + cb->seq = net->dev_base_seq; + for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + struct hlist_head *head; + struct net_device *dev; + + head = &net->dev_index_head[h]; + idx = 0; + hlist_for_each_entry(dev, head, index_hlist) { + if (idx < s_idx) + goto cont; + + ehdr = ethnl_dump_put(skb, cb, + ETHTOOL_MSG_TUNNEL_INFO_GET); + if (!ehdr) { + ret = -EMSGSIZE; + goto out; + } + + ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TUNNEL_INFO_HEADER); + if (ret < 0) { + genlmsg_cancel(skb, ehdr); + goto out; + } + + ctx->req_info.dev = dev; + ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb); + ctx->req_info.dev = NULL; + if (ret < 0) { + genlmsg_cancel(skb, ehdr); + if (ret == -EOPNOTSUPP) + goto cont; + goto out; + } + genlmsg_end(skb, ehdr); +cont: + idx++; + } + } +out: + rtnl_unlock(); + + ctx->pos_hash = h; + ctx->pos_idx = idx; + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); + + if (ret == -EMSGSIZE && skb->len) + return skb->len; + return ret; +} diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c index 056cfe0b770e..f0dbd9905a53 100644 --- a/net/ipv4/udp_tunnel_nic.c +++ b/net/ipv4/udp_tunnel_nic.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2020 Facebook Inc. +#include #include #include #include @@ -72,6 +73,12 @@ udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry) return entry->use_cnt == 0 && !entry->flags; } +static bool +udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry) +{ + return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN); +} + static bool udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry) { @@ -564,12 +571,74 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev) __udp_tunnel_nic_device_sync(dev, utn); } +static size_t +__udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + unsigned int j; + size_t size; + + utn = dev->udp_tunnel_nic; + if (!utn) + return 0; + + size = 0; + for (j = 0; j < info->tables[table].n_entries; j++) { + if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j])) + continue; + + size += nla_total_size(0) + /* _TABLE_ENTRY */ + nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */ + nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */ + } + + return size; +} + +static int +__udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table, + struct sk_buff *skb) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + struct nlattr *nest; + unsigned int j; + + utn = dev->udp_tunnel_nic; + if (!utn) + return 0; + + for (j = 0; j < info->tables[table].n_entries; j++) { + if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j])) + continue; + + nest = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY); + + if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, + utn->entries[table][j].port) || + nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, + ilog2(utn->entries[table][j].type))) + goto err_cancel; + + nla_nest_end(skb, nest); + } + + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = { .get_port = __udp_tunnel_nic_get_port, .set_port_priv = __udp_tunnel_nic_set_port_priv, .add_port = __udp_tunnel_nic_add_port, .del_port = __udp_tunnel_nic_del_port, .reset_ntf = __udp_tunnel_nic_reset_ntf, + .dump_size = __udp_tunnel_nic_dump_size, + .dump_write = __udp_tunnel_nic_dump_write, }; static void -- cgit v1.2.3 From 3edd68399dc155b80335244c8c2673eaa652931a Mon Sep 17 00:00:00 2001 From: Mohammed Gamal Date: Fri, 10 Jul 2020 17:48:11 +0200 Subject: KVM: x86: Add a capability for GUEST_MAXPHYADDR < HOST_MAXPHYADDR support This patch adds a new capability KVM_CAP_SMALLER_MAXPHYADDR which allows userspace to query if the underlying architecture would support GUEST_MAXPHYADDR < HOST_MAXPHYADDR and hence act accordingly (e.g. qemu can decide if it should warn for -cpu ..,phys-bits=X) The complications in this patch are due to unexpected (but documented) behaviour we see with NPF vmexit handling in AMD processor. If SVM is modified to add guest physical address checks in the NPF and guest #PF paths, we see the followning error multiple times in the 'access' test in kvm-unit-tests: test pte.p pte.36 pde.p: FAIL: pte 2000021 expected 2000001 Dump mapping: address: 0x123400000000 ------L4: 24c3027 ------L3: 24c4027 ------L2: 24c5021 ------L1: 1002000021 This is because the PTE's accessed bit is set by the CPU hardware before the NPF vmexit. This is handled completely by hardware and cannot be fixed in software. Therefore, availability of the new capability depends on a boolean variable allow_smaller_maxphyaddr which is set individually by VMX and SVM init routines. On VMX it's always set to true, on SVM it's only set to true when NPT is not enabled. CC: Tom Lendacky CC: Babu Moger Signed-off-by: Mohammed Gamal Message-Id: <20200710154811.418214-10-mgamal@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/svm/svm.c | 15 +++++++++++++++ arch/x86/kvm/vmx/vmx.c | 7 +++++++ arch/x86/kvm/x86.c | 6 ++++++ include/uapi/linux/kvm.h | 2 ++ 5 files changed, 31 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1df95f10c903..1bab87a444d7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1263,7 +1263,7 @@ struct kvm_arch_async_pf { }; extern u64 __read_mostly host_efer; - +extern bool __read_mostly allow_smaller_maxphyaddr; extern struct kvm_x86_ops kvm_x86_ops; #define __KVM_HAVE_ARCH_VM_ALLOC diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 2371b1e40f39..783330d0e7b8 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -924,6 +924,21 @@ static __init int svm_hardware_setup(void) svm_set_cpu_caps(); + /* + * It seems that on AMD processors PTE's accessed bit is + * being set by the CPU hardware before the NPF vmexit. + * This is not expected behaviour and our tests fail because + * of it. + * A workaround here is to disable support for + * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled. + * In this case userspace can know if there is support using + * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle + * it + * If future AMD CPU models change the behaviour described above, + * this variable can be changed accordingly + */ + allow_smaller_maxphyaddr = !npt_enabled; + return 0; err: diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 962a78c7dde5..1bb59ae5016d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8309,6 +8309,13 @@ static int __init vmx_init(void) #endif vmx_check_vmcs12_offsets(); + /* + * Intel processors don't have problems with + * GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable + * it for VMX by default + */ + allow_smaller_maxphyaddr = true; + return 0; } module_init(vmx_init); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 35abe69aad28..95ef62922869 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -187,6 +187,9 @@ static struct kvm_shared_msrs __percpu *shared_msrs; u64 __read_mostly host_efer; EXPORT_SYMBOL_GPL(host_efer); +bool __read_mostly allow_smaller_maxphyaddr; +EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); + static u64 __read_mostly host_xss; u64 __read_mostly supported_xss; EXPORT_SYMBOL_GPL(supported_xss); @@ -3574,6 +3577,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; break; + case KVM_CAP_SMALLER_MAXPHYADDR: + r = (int) allow_smaller_maxphyaddr; + break; default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ff9b335620d0..2c73dcfb3dbb 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1033,6 +1033,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HALT_POLL 182 #define KVM_CAP_ASYNC_PF_INT 183 #define KVM_CAP_LAST_CPU 184 +#define KVM_CAP_SMALLER_MAXPHYADDR 185 + #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From f4f541660121aeb91a1b462ab3f3c5a86ab7c3dd Mon Sep 17 00:00:00 2001 From: Vladyslav Tarasiuk Date: Fri, 10 Jul 2020 15:25:10 +0300 Subject: devlink: Implement devlink health reporters on per-port basis Add devlink-health reporter support on per-port basis. The main difference existing devlink-health is that port reporters are stored in per-devlink_port lists. Upon creation of such health reporter the reference to a port it belongs to is stored in reporter struct. Fill the port index attribute in devlink-health response to allow devlink userspace utility to distinguish between device and port reporters. Signed-off-by: Vladyslav Tarasiuk Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 2 ++ net/core/devlink.c | 94 +++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 79 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/net/devlink.h b/include/net/devlink.h index 746bed538664..bb1139752405 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -101,6 +101,8 @@ struct devlink_port { u8 attrs_set:1, switch_port:1; struct delayed_work type_warn_dw; + struct list_head reporter_list; + struct mutex reporters_lock; /* Protects reporter_list */ }; struct devlink_sb_pool_info { diff --git a/net/core/devlink.c b/net/core/devlink.c index 4e995de65b36..b4a231ca7135 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -386,19 +386,21 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id) return NULL; } -#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) -#define DEVLINK_NL_FLAG_NEED_PORT BIT(1) -#define DEVLINK_NL_FLAG_NEED_SB BIT(2) +#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) +#define DEVLINK_NL_FLAG_NEED_PORT BIT(1) +#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(2) +#define DEVLINK_NL_FLAG_NEED_SB BIT(3) /* The per devlink instance lock is taken by default in the pre-doit * operation, yet several commands do not require this. The global * devlink lock is taken and protects from disruption by user-calls. */ -#define DEVLINK_NL_FLAG_NO_LOCK BIT(3) +#define DEVLINK_NL_FLAG_NO_LOCK BIT(4) static int devlink_nl_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { + struct devlink_port *devlink_port; struct devlink *devlink; int err; @@ -413,14 +415,17 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) { info->user_ptr[0] = devlink; } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) { - struct devlink_port *devlink_port; - devlink_port = devlink_port_get_from_info(devlink, info); if (IS_ERR(devlink_port)) { err = PTR_ERR(devlink_port); goto unlock; } info->user_ptr[0] = devlink_port; + } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) { + info->user_ptr[0] = devlink; + devlink_port = devlink_port_get_from_info(devlink, info); + if (!IS_ERR(devlink_port)) + info->user_ptr[1] = devlink_port; } if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) { struct devlink_sb *devlink_sb; @@ -5287,6 +5292,7 @@ struct devlink_health_reporter { void *priv; const struct devlink_health_reporter_ops *ops; struct devlink *devlink; + struct devlink_port *devlink_port; struct devlink_fmsg *dump_fmsg; struct mutex dump_lock; /* lock parallel read/write from dump buffers */ u64 graceful_period; @@ -5331,6 +5337,15 @@ devlink_health_reporter_find_by_name(struct devlink *devlink, reporter_name); } +static struct devlink_health_reporter * +devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port, + const char *reporter_name) +{ + return __devlink_health_reporter_find_by_name(&devlink_port->reporter_list, + &devlink_port->reporters_lock, + reporter_name); +} + static struct devlink_health_reporter * __devlink_health_reporter_create(struct devlink *devlink, const struct devlink_health_reporter_ops *ops, @@ -5443,6 +5458,10 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg, if (devlink_nl_put_handle(msg, devlink)) goto genlmsg_cancel; + if (reporter->devlink_port) { + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, reporter->devlink_port->index)) + goto genlmsg_cancel; + } reporter_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_HEALTH_REPORTER); if (!reporter_attr) @@ -5650,17 +5669,28 @@ devlink_health_reporter_get_from_attrs(struct devlink *devlink, struct nlattr **attrs) { struct devlink_health_reporter *reporter; + struct devlink_port *devlink_port; char *reporter_name; if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]) return NULL; reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]); - mutex_lock(&devlink->reporters_lock); - reporter = devlink_health_reporter_find_by_name(devlink, reporter_name); - if (reporter) - refcount_inc(&reporter->refcount); - mutex_unlock(&devlink->reporters_lock); + devlink_port = devlink_port_get_from_attrs(devlink, attrs); + if (IS_ERR(devlink_port)) { + mutex_lock(&devlink->reporters_lock); + reporter = devlink_health_reporter_find_by_name(devlink, reporter_name); + if (reporter) + refcount_inc(&reporter->refcount); + mutex_unlock(&devlink->reporters_lock); + } else { + mutex_lock(&devlink_port->reporters_lock); + reporter = devlink_port_health_reporter_find_by_name(devlink_port, reporter_name); + if (reporter) + refcount_inc(&reporter->refcount); + mutex_unlock(&devlink_port->reporters_lock); + } + return reporter; } @@ -5748,6 +5778,7 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { struct devlink_health_reporter *reporter; + struct devlink_port *port; struct devlink *devlink; int start = cb->args[0]; int idx = 0; @@ -5778,6 +5809,31 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg, } mutex_unlock(&devlink->reporters_lock); } + + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + continue; + list_for_each_entry(port, &devlink->port_list, list) { + mutex_lock(&port->reporters_lock); + list_for_each_entry(reporter, &port->reporter_list, list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_health_reporter_fill(msg, devlink, reporter, + DEVLINK_CMD_HEALTH_REPORTER_GET, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI); + if (err) { + mutex_unlock(&port->reporters_lock); + goto out; + } + idx++; + } + mutex_unlock(&port->reporters_lock); + } + } out: mutex_unlock(&devlink_mutex); @@ -7157,7 +7213,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_get_doit, .dumpit = devlink_nl_cmd_health_reporter_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, /* can be retrieved by unprivileged users */ }, @@ -7166,7 +7222,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7174,7 +7230,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_recover_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7182,7 +7238,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_diagnose_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7191,7 +7247,7 @@ static const struct genl_ops devlink_nl_ops[] = { GENL_DONT_VALIDATE_DUMP_STRICT, .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7199,7 +7255,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_dump_clear_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7459,6 +7515,8 @@ int devlink_port_register(struct devlink *devlink, list_add_tail(&devlink_port->list, &devlink->port_list); INIT_LIST_HEAD(&devlink_port->param_list); mutex_unlock(&devlink->lock); + INIT_LIST_HEAD(&devlink_port->reporter_list); + mutex_init(&devlink_port->reporters_lock); INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn); devlink_port_type_warn_schedule(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); @@ -7475,6 +7533,8 @@ void devlink_port_unregister(struct devlink_port *devlink_port) { struct devlink *devlink = devlink_port->devlink; + WARN_ON(!list_empty(&devlink_port->reporter_list)); + mutex_destroy(&devlink_port->reporters_lock); devlink_port_type_warn_cancel(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); mutex_lock(&devlink->lock); -- cgit v1.2.3 From 15c724b997a8fe1a677cf11797fb29c0bdecc63f Mon Sep 17 00:00:00 2001 From: Vladyslav Tarasiuk Date: Fri, 10 Jul 2020 15:25:11 +0300 Subject: devlink: Add devlink health port reporters API In order to use new devlink port health reporters infrastructure, add corresponding constructor and destructor functions. Signed-off-by: Vladyslav Tarasiuk Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 9 +++++++++ net/core/devlink.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) (limited to 'include') diff --git a/include/net/devlink.h b/include/net/devlink.h index bb1139752405..913e8679ae35 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1338,9 +1338,18 @@ struct devlink_health_reporter * devlink_health_reporter_create(struct devlink *devlink, const struct devlink_health_reporter_ops *ops, u64 graceful_period, void *priv); + +struct devlink_health_reporter * +devlink_port_health_reporter_create(struct devlink_port *port, + const struct devlink_health_reporter_ops *ops, + u64 graceful_period, void *priv); + void devlink_health_reporter_destroy(struct devlink_health_reporter *reporter); +void +devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter); + void * devlink_health_reporter_priv(struct devlink_health_reporter *reporter); int devlink_health_report(struct devlink_health_reporter *reporter, diff --git a/net/core/devlink.c b/net/core/devlink.c index b4a231ca7135..20a83aace642 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -5371,6 +5371,42 @@ __devlink_health_reporter_create(struct devlink *devlink, return reporter; } +/** + * devlink_port_health_reporter_create - create devlink health reporter for + * specified port instance + * + * @port: devlink_port which should contain the new reporter + * @ops: ops + * @graceful_period: to avoid recovery loops, in msecs + * @priv: priv + */ +struct devlink_health_reporter * +devlink_port_health_reporter_create(struct devlink_port *port, + const struct devlink_health_reporter_ops *ops, + u64 graceful_period, void *priv) +{ + struct devlink_health_reporter *reporter; + + mutex_lock(&port->reporters_lock); + if (__devlink_health_reporter_find_by_name(&port->reporter_list, + &port->reporters_lock, ops->name)) { + reporter = ERR_PTR(-EEXIST); + goto unlock; + } + + reporter = __devlink_health_reporter_create(port->devlink, ops, + graceful_period, priv); + if (IS_ERR(reporter)) + goto unlock; + + reporter->devlink_port = port; + list_add_tail(&reporter->list, &port->reporter_list); +unlock: + mutex_unlock(&port->reporters_lock); + return reporter; +} +EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create); + /** * devlink_health_reporter_create - create devlink health reporter * @@ -5441,6 +5477,20 @@ devlink_health_reporter_destroy(struct devlink_health_reporter *reporter) } EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy); +/** + * devlink_port_health_reporter_destroy - destroy devlink port health reporter + * + * @reporter: devlink health reporter to destroy + */ +void +devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter) +{ + mutex_lock(&reporter->devlink_port->reporters_lock); + __devlink_health_reporter_destroy(reporter); + mutex_unlock(&reporter->devlink_port->reporters_lock); +} +EXPORT_SYMBOL_GPL(devlink_port_health_reporter_destroy); + static int devlink_nl_health_reporter_fill(struct sk_buff *msg, struct devlink *devlink, -- cgit v1.2.3 From c818c03b661cd769e035e41673d5543ba2ebda64 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 13 May 2020 14:11:26 -0700 Subject: seccomp: Report number of loaded filters in /proc/$pid/status A common question asked when debugging seccomp filters is "how many filters are attached to your process?" Provide a way to easily answer this question through /proc/$pid/status with a "Seccomp_filters" line. Signed-off-by: Kees Cook --- fs/proc/array.c | 2 ++ include/linux/seccomp.h | 2 ++ init/init_task.c | 3 +++ kernel/seccomp.c | 3 +++ 4 files changed, 10 insertions(+) (limited to 'include') diff --git a/fs/proc/array.c b/fs/proc/array.c index 55ecbeb3a721..65ec2029fa80 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -341,6 +341,8 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p) seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p)); #ifdef CONFIG_SECCOMP seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode); + seq_put_decimal_ull(m, "\nSeccomp_filters:\t", + atomic_read(&p->seccomp.filter_count)); #endif seq_puts(m, "\nSpeculation_Store_Bypass:\t"); switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) { diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 4192369b8418..2ec2720f83cc 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -13,6 +13,7 @@ #ifdef CONFIG_SECCOMP #include +#include #include struct seccomp_filter; @@ -29,6 +30,7 @@ struct seccomp_filter; */ struct seccomp { int mode; + atomic_t filter_count; struct seccomp_filter *filter; }; diff --git a/init/init_task.c b/init/init_task.c index 15089d15010a..a3eb3847e1f4 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -204,6 +204,9 @@ struct task_struct init_task #ifdef CONFIG_SECURITY .security = NULL, #endif +#ifdef CONFIG_SECCOMP + .seccomp = { .filter_count = ATOMIC_INIT(0) }, +#endif }; EXPORT_SYMBOL(init_task); diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d653d8426de9..f387e5004c29 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -398,6 +398,8 @@ static inline void seccomp_sync_threads(unsigned long flags) put_seccomp_filter(thread); smp_store_release(&thread->seccomp.filter, caller->seccomp.filter); + atomic_set(&thread->seccomp.filter_count, + atomic_read(&thread->seccomp.filter_count)); /* * Don't let an unprivileged task work around @@ -544,6 +546,7 @@ static long seccomp_attach_filter(unsigned int flags, */ filter->prev = current->seccomp.filter; current->seccomp.filter = filter; + atomic_inc(¤t->seccomp.filter_count); /* Now that the new filter is in place, synchronize to all threads. */ if (flags & SECCOMP_FILTER_FLAG_TSYNC) -- cgit v1.2.3 From 3a15fb6ed92cb32b0a83f406aa4a96f28c9adbc3 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Sun, 31 May 2020 13:50:29 +0200 Subject: seccomp: release filter after task is fully dead The seccomp filter used to be released in free_task() which is called asynchronously via call_rcu() and assorted mechanisms. Since we need to inform tasks waiting on the seccomp notifier when a filter goes empty we will notify them as soon as a task has been marked fully dead in release_task(). To not split seccomp cleanup into two parts, move filter release out of free_task() and into release_task() after we've unhashed struct task from struct pid, exited signals, and unlinked it from the threadgroups' thread list. We'll put the empty filter notification infrastructure into it in a follow up patch. This also renames put_seccomp_filter() to seccomp_filter_release() which is a more descriptive name of what we're doing here especially once we've added the empty filter notification mechanism in there. We're also NULL-ing the task's filter tree entrypoint which seems cleaner than leaving a dangling pointer in there. Note that this shouldn't need any memory barriers since we're calling this when the task is in release_task() which means it's EXIT_DEAD. So it can't modify its seccomp filters anymore. You can also see this from the point where we're calling seccomp_filter_release(). It's after __exit_signal() and at this point, tsk->sighand will already have been NULLed which is required for thread-sync and filter installation alike. Cc: Tycho Andersen Cc: Kees Cook Cc: Matt Denton Cc: Sargun Dhillon Cc: Jann Horn Cc: Chris Palmer Cc: Aleksa Sarai Cc: Robert Sesek Cc: Jeffrey Vander Stoep Cc: Linux Containers Signed-off-by: Christian Brauner Link: https://lore.kernel.org/r/20200531115031.391515-2-christian.brauner@ubuntu.com Signed-off-by: Kees Cook --- include/linux/seccomp.h | 4 ++-- kernel/exit.c | 1 + kernel/fork.c | 1 - kernel/seccomp.c | 62 +++++++++++++++++++++++++++++-------------------- 4 files changed, 40 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 2ec2720f83cc..babcd6c02d09 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -84,10 +84,10 @@ static inline int seccomp_mode(struct seccomp *s) #endif /* CONFIG_SECCOMP */ #ifdef CONFIG_SECCOMP_FILTER -extern void put_seccomp_filter(struct task_struct *tsk); +extern void seccomp_filter_release(struct task_struct *tsk); extern void get_seccomp_filter(struct task_struct *tsk); #else /* CONFIG_SECCOMP_FILTER */ -static inline void put_seccomp_filter(struct task_struct *tsk) +static inline void seccomp_filter_release(struct task_struct *tsk) { return; } diff --git a/kernel/exit.c b/kernel/exit.c index 727150f28103..00d77e5ba700 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -217,6 +217,7 @@ repeat: } write_unlock_irq(&tasklist_lock); + seccomp_filter_release(p); proc_flush_pid(thread_pid); put_pid(thread_pid); release_thread(p); diff --git a/kernel/fork.c b/kernel/fork.c index 142b23645d82..c51a9cd824c5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -473,7 +473,6 @@ void free_task(struct task_struct *tsk) #endif rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); - put_seccomp_filter(tsk); arch_release_task_struct(tsk); if (tsk->flags & PF_KTHREAD) free_kthread_struct(tsk); diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d4dd3344e312..0ca6d5243427 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -368,6 +368,42 @@ static inline pid_t seccomp_can_sync_threads(void) return 0; } +static inline void seccomp_filter_free(struct seccomp_filter *filter) +{ + if (filter) { + bpf_prog_destroy(filter->prog); + kfree(filter); + } +} + +static void __put_seccomp_filter(struct seccomp_filter *orig) +{ + /* Clean up single-reference branches iteratively. */ + while (orig && refcount_dec_and_test(&orig->refs)) { + struct seccomp_filter *freeme = orig; + orig = orig->prev; + seccomp_filter_free(freeme); + } +} + +/** + * seccomp_filter_release - Detach the task from its filter tree + * and drop its reference count during + * exit. + * + * This function should only be called when the task is exiting as + * it detaches it from its filter tree. As such, READ_ONCE() and + * barriers are not needed here, as would normally be needed. + */ +void seccomp_filter_release(struct task_struct *tsk) +{ + struct seccomp_filter *orig = tsk->seccomp.filter; + + /* Detach task from its filter tree. */ + tsk->seccomp.filter = NULL; + __put_seccomp_filter(orig); +} + /** * seccomp_sync_threads: sets all threads to use current's filter * @@ -397,7 +433,7 @@ static inline void seccomp_sync_threads(unsigned long flags) * current's path will hold a reference. (This also * allows a put before the assignment.) */ - put_seccomp_filter(thread); + __put_seccomp_filter(thread->seccomp.filter); smp_store_release(&thread->seccomp.filter, caller->seccomp.filter); atomic_set(&thread->seccomp.filter_count, @@ -571,30 +607,6 @@ void get_seccomp_filter(struct task_struct *tsk) __get_seccomp_filter(orig); } -static inline void seccomp_filter_free(struct seccomp_filter *filter) -{ - if (filter) { - bpf_prog_destroy(filter->prog); - kfree(filter); - } -} - -static void __put_seccomp_filter(struct seccomp_filter *orig) -{ - /* Clean up single-reference branches iteratively. */ - while (orig && refcount_dec_and_test(&orig->refs)) { - struct seccomp_filter *freeme = orig; - orig = orig->prev; - seccomp_filter_free(freeme); - } -} - -/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ -void put_seccomp_filter(struct task_struct *tsk) -{ - __put_seccomp_filter(tsk->seccomp.filter); -} - static void seccomp_init_siginfo(kernel_siginfo_t *info, int syscall, int reason) { clear_siginfo(info); -- cgit v1.2.3 From 47e33c05f9f07cac3de833e531bcac9ae052c7ca Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 15 Jun 2020 15:42:46 -0700 Subject: seccomp: Fix ioctl number for SECCOMP_IOCTL_NOTIF_ID_VALID When SECCOMP_IOCTL_NOTIF_ID_VALID was first introduced it had the wrong direction flag set. While this isn't a big deal as nothing currently enforces these bits in the kernel, it should be defined correctly. Fix the define and provide support for the old command until it is no longer needed for backward compatibility. Fixes: 6a21cc50f0c7 ("seccomp: add a return code to trap to userspace") Signed-off-by: Kees Cook --- include/uapi/linux/seccomp.h | 3 ++- kernel/seccomp.c | 9 +++++++++ tools/testing/selftests/seccomp/seccomp_bpf.c | 2 +- 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index c1735455bc53..965290f7dcc2 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -123,5 +123,6 @@ struct seccomp_notif_resp { #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ struct seccomp_notif_resp) -#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64) +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) + #endif /* _UAPI_LINUX_SECCOMP_H */ diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 5f0e3f3a7a5d..0ed57e8c49d0 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -44,6 +44,14 @@ #include #include +/* + * When SECCOMP_IOCTL_NOTIF_ID_VALID was first introduced, it had the + * wrong direction flag in the ioctl number. This is the broken one, + * which the kernel needs to keep supporting until all userspaces stop + * using the wrong command number. + */ +#define SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR SECCOMP_IOR(2, __u64) + enum notify_state { SECCOMP_NOTIFY_INIT, SECCOMP_NOTIFY_SENT, @@ -1236,6 +1244,7 @@ static long seccomp_notify_ioctl(struct file *file, unsigned int cmd, return seccomp_notify_recv(filter, buf); case SECCOMP_IOCTL_NOTIF_SEND: return seccomp_notify_send(filter, buf); + case SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR: case SECCOMP_IOCTL_NOTIF_ID_VALID: return seccomp_notify_id_valid(filter, buf); default: diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 43884b6625fc..61f9ac200001 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -186,7 +186,7 @@ struct seccomp_metadata { #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ struct seccomp_notif_resp) -#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64) +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) struct seccomp_notif { __u64 id; -- cgit v1.2.3 From fe4bfff86ec54773df3db79e8112e3b0f820c799 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 19 Jun 2020 12:20:15 -0700 Subject: seccomp: Use -1 marker for end of mode 1 syscall list The terminator for the mode 1 syscalls list was a 0, but that could be a valid syscall number (e.g. x86_64 __NR_read). By luck, __NR_read was listed first and the loop construct would not test it, so there was no bug. However, this is fragile. Replace the terminator with -1 instead, and make the variable name for mode 1 syscall lists more descriptive. Cc: Andy Lutomirski Cc: Will Drewry Signed-off-by: Kees Cook --- arch/mips/include/asm/seccomp.h | 4 ++-- include/asm-generic/seccomp.h | 2 +- kernel/seccomp.c | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h index e383d7e27b93..aa809589a181 100644 --- a/arch/mips/include/asm/seccomp.h +++ b/arch/mips/include/asm/seccomp.h @@ -9,12 +9,12 @@ static inline const int *get_compat_mode1_syscalls(void) static const int syscalls_O32[] = { __NR_O32_Linux + 3, __NR_O32_Linux + 4, __NR_O32_Linux + 1, __NR_O32_Linux + 193, - 0, /* null terminated */ + -1, /* negative terminated */ }; static const int syscalls_N32[] = { __NR_N32_Linux + 0, __NR_N32_Linux + 1, __NR_N32_Linux + 58, __NR_N32_Linux + 211, - 0, /* null terminated */ + -1, /* negative terminated */ }; if (IS_ENABLED(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS)) diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h index 1321ac7821d7..6b6f42bc58f9 100644 --- a/include/asm-generic/seccomp.h +++ b/include/asm-generic/seccomp.h @@ -33,7 +33,7 @@ static inline const int *get_compat_mode1_syscalls(void) static const int mode1_syscalls_32[] = { __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, - 0, /* null terminated */ + -1, /* negative terminated */ }; return mode1_syscalls_32; } diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 0ed57e8c49d0..866a432cd746 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -742,20 +742,20 @@ static inline void seccomp_log(unsigned long syscall, long signr, u32 action, */ static const int mode1_syscalls[] = { __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn, - 0, /* null terminated */ + -1, /* negative terminated */ }; static void __secure_computing_strict(int this_syscall) { - const int *syscall_whitelist = mode1_syscalls; + const int *allowed_syscalls = mode1_syscalls; #ifdef CONFIG_COMPAT if (in_compat_syscall()) - syscall_whitelist = get_compat_mode1_syscalls(); + allowed_syscalls = get_compat_mode1_syscalls(); #endif do { - if (*syscall_whitelist == this_syscall) + if (*allowed_syscalls == this_syscall) return; - } while (*++syscall_whitelist); + } while (*++allowed_syscalls != -1); #ifdef SECCOMP_DEBUG dump_stack(); -- cgit v1.2.3 From 6d0efeb14bbe2350a94ba07b403a686d731c5179 Mon Sep 17 00:00:00 2001 From: Ilia Lin Date: Fri, 3 Jul 2020 10:49:41 +0200 Subject: soc: qcom: Separate kryo l2 accessors from PMU driver The driver provides kernel level API for other drivers to access the MSM8996 L2 cache registers. Separating the L2 access code from the PMU driver and making it public to allow other drivers use it. The accesses must be separated with a single spinlock, maintained in this driver. Signed-off-by: Ilia Lin Signed-off-by: Loic Poulain Link: https://lore.kernel.org/r/1593766185-16346-2-git-send-email-loic.poulain@linaro.org Acked-by: Will Deacon Signed-off-by: Stephen Boyd --- drivers/perf/Kconfig | 1 + drivers/perf/qcom_l2_pmu.c | 90 ++++++++++-------------------------- drivers/soc/qcom/Kconfig | 4 ++ drivers/soc/qcom/Makefile | 1 + drivers/soc/qcom/kryo-l2-accessors.c | 57 +++++++++++++++++++++++ include/soc/qcom/kryo-l2-accessors.h | 12 +++++ 6 files changed, 99 insertions(+), 66 deletions(-) create mode 100644 drivers/soc/qcom/kryo-l2-accessors.c create mode 100644 include/soc/qcom/kryo-l2-accessors.h (limited to 'include') diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index a9261cf48293..7305d57d1890 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -82,6 +82,7 @@ config FSL_IMX8_DDR_PMU config QCOM_L2_PMU bool "Qualcomm Technologies L2-cache PMU" depends on ARCH_QCOM && ARM64 && ACPI + select QCOM_KRYO_L2_ACCESSORS help Provides support for the L2 cache performance monitor unit (PMU) in Qualcomm Technologies processors. diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 21d6991dbe0b..02ca1fadbedd 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -23,6 +23,7 @@ #include #include #include +#include #define MAX_L2_CTRS 9 @@ -79,8 +80,6 @@ #define L2_COUNTER_RELOAD BIT_ULL(31) #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63) -#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6) -#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7) #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE) @@ -99,48 +98,7 @@ #define L2_EVENT_STREX 0x421 #define L2_EVENT_CLREX 0x422 -static DEFINE_RAW_SPINLOCK(l2_access_lock); -/** - * set_l2_indirect_reg: write value to an L2 register - * @reg: Address of L2 register. - * @value: Value to be written to register. - * - * Use architecturally required barriers for ordering between system register - * accesses - */ -static void set_l2_indirect_reg(u64 reg, u64 val) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&l2_access_lock, flags); - write_sysreg_s(reg, L2CPUSRSELR_EL1); - isb(); - write_sysreg_s(val, L2CPUSRDR_EL1); - isb(); - raw_spin_unlock_irqrestore(&l2_access_lock, flags); -} - -/** - * get_l2_indirect_reg: read an L2 register value - * @reg: Address of L2 register. - * - * Use architecturally required barriers for ordering between system register - * accesses - */ -static u64 get_l2_indirect_reg(u64 reg) -{ - u64 val; - unsigned long flags; - - raw_spin_lock_irqsave(&l2_access_lock, flags); - write_sysreg_s(reg, L2CPUSRSELR_EL1); - isb(); - val = read_sysreg_s(L2CPUSRDR_EL1); - raw_spin_unlock_irqrestore(&l2_access_lock, flags); - - return val; -} struct cluster_pmu; @@ -211,28 +169,28 @@ static inline struct cluster_pmu *get_cluster_pmu( static void cluster_pmu_reset(void) { /* Reset all counters */ - set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); - set_l2_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask); - set_l2_indirect_reg(L2PMINTENCLR, l2_counter_present_mask); - set_l2_indirect_reg(L2PMOVSCLR, l2_counter_present_mask); + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); + kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask); + kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask); + kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask); } static inline void cluster_pmu_enable(void) { - set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); } static inline void cluster_pmu_disable(void) { - set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); } static inline void cluster_pmu_counter_set_value(u32 idx, u64 value) { if (idx == l2_cycle_ctr_idx) - set_l2_indirect_reg(L2PMCCNTR, value); + kryo_l2_set_indirect_reg(L2PMCCNTR, value); else - set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value); + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value); } static inline u64 cluster_pmu_counter_get_value(u32 idx) @@ -240,46 +198,46 @@ static inline u64 cluster_pmu_counter_get_value(u32 idx) u64 value; if (idx == l2_cycle_ctr_idx) - value = get_l2_indirect_reg(L2PMCCNTR); + value = kryo_l2_get_indirect_reg(L2PMCCNTR); else - value = get_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); + value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); return value; } static inline void cluster_pmu_counter_enable(u32 idx) { - set_l2_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx)); + kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx)); } static inline void cluster_pmu_counter_disable(u32 idx) { - set_l2_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx)); + kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx)); } static inline void cluster_pmu_counter_enable_interrupt(u32 idx) { - set_l2_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx)); + kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx)); } static inline void cluster_pmu_counter_disable_interrupt(u32 idx) { - set_l2_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx)); + kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx)); } static inline void cluster_pmu_set_evccntcr(u32 val) { - set_l2_indirect_reg(L2PMCCNTCR, val); + kryo_l2_set_indirect_reg(L2PMCCNTCR, val); } static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val) { - set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); } static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val) { - set_l2_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); } static void cluster_pmu_set_resr(struct cluster_pmu *cluster, @@ -295,11 +253,11 @@ static void cluster_pmu_set_resr(struct cluster_pmu *cluster, spin_lock_irqsave(&cluster->pmu_lock, flags); - resr_val = get_l2_indirect_reg(L2PMRESR); + resr_val = kryo_l2_get_indirect_reg(L2PMRESR); resr_val &= ~(L2PMRESR_GROUP_MASK << shift); resr_val |= field; resr_val |= L2PMRESR_EN; - set_l2_indirect_reg(L2PMRESR, resr_val); + kryo_l2_set_indirect_reg(L2PMRESR, resr_val); spin_unlock_irqrestore(&cluster->pmu_lock, flags); } @@ -315,14 +273,14 @@ static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr) L2PMXEVFILTER_ORGFILTER_IDINDEP | L2PMXEVFILTER_ORGFILTER_ALL; - set_l2_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); } static inline u32 cluster_pmu_getreset_ovsr(void) { - u32 result = get_l2_indirect_reg(L2PMOVSSET); + u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET); - set_l2_indirect_reg(L2PMOVSCLR, result); + kryo_l2_set_indirect_reg(L2PMOVSCLR, result); return result; } @@ -767,7 +725,7 @@ static int get_num_counters(void) { int val; - val = get_l2_indirect_reg(L2PMCR); + val = kryo_l2_get_indirect_reg(L2PMCR); /* * Read number of counters from L2PMCR and add 1 diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 07bb261a63d2..cdc4f46d64ef 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -53,6 +53,10 @@ config QCOM_LLCC SDM845. This provides interfaces to clients that use the LLCC. Say yes here to enable LLCC slice driver. +config QCOM_KRYO_L2_ACCESSORS + bool + depends on ARCH_QCOM && ARM64 || COMPILE_TEST + config QCOM_MDT_LOADER tristate select QCOM_SCM diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 7d7e2ecbdce6..93392d9dc7f7 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -24,3 +24,4 @@ obj-$(CONFIG_QCOM_APR) += apr.o obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o +obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c new file mode 100644 index 000000000000..c20cb92077c0 --- /dev/null +++ b/drivers/soc/qcom/kryo-l2-accessors.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6) +#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7) + +static DEFINE_RAW_SPINLOCK(l2_access_lock); + +/** + * kryo_l2_set_indirect_reg() - write value to an L2 register + * @reg: Address of L2 register. + * @value: Value to be written to register. + * + * Use architecturally required barriers for ordering between system register + * accesses, and system registers with respect to device memory + */ +void kryo_l2_set_indirect_reg(u64 reg, u64 val) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&l2_access_lock, flags); + write_sysreg_s(reg, L2CPUSRSELR_EL1); + isb(); + write_sysreg_s(val, L2CPUSRDR_EL1); + isb(); + raw_spin_unlock_irqrestore(&l2_access_lock, flags); +} +EXPORT_SYMBOL(kryo_l2_set_indirect_reg); + +/** + * kryo_l2_get_indirect_reg() - read an L2 register value + * @reg: Address of L2 register. + * + * Use architecturally required barriers for ordering between system register + * accesses, and system registers with respect to device memory + */ +u64 kryo_l2_get_indirect_reg(u64 reg) +{ + u64 val; + unsigned long flags; + + raw_spin_lock_irqsave(&l2_access_lock, flags); + write_sysreg_s(reg, L2CPUSRSELR_EL1); + isb(); + val = read_sysreg_s(L2CPUSRDR_EL1); + raw_spin_unlock_irqrestore(&l2_access_lock, flags); + + return val; +} +EXPORT_SYMBOL(kryo_l2_get_indirect_reg); diff --git a/include/soc/qcom/kryo-l2-accessors.h b/include/soc/qcom/kryo-l2-accessors.h new file mode 100644 index 000000000000..673c5344afe3 --- /dev/null +++ b/include/soc/qcom/kryo-l2-accessors.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __SOC_ARCH_QCOM_KRYO_L2_ACCESSORS_H +#define __SOC_ARCH_QCOM_KRYO_L2_ACCESSORS_H + +void kryo_l2_set_indirect_reg(u64 reg, u64 val); +u64 kryo_l2_get_indirect_reg(u64 reg); + +#endif -- cgit v1.2.3 From 943c8a80830b88b7b203fa3e2d755620d85129d1 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 29 Jun 2020 09:43:52 -0700 Subject: : add stub for of_get_next_parent() to fix qcom build error Fix a (COMPILE_TEST) build error when CONFIG_OF is not set/enabled by adding a stub for of_get_next_parent(). ../drivers/soc/qcom/qcom-geni-se.c:819:11: error: implicit declaration of function 'of_get_next_parent'; did you mean 'of_get_parent'? [-Werror=implicit-function-declaration] ../drivers/soc/qcom/qcom-geni-se.c:819:9: warning: assignment makes pointer from integer without a cast [-Wint-conversion] Fixes: 048eb908a1f2 ("soc: qcom-geni-se: Add interconnect support to fix earlycon crash") Acked-by: Rob Herring Reviewed-by: Bjorn Andersson Signed-off-by: Randy Dunlap Cc: Rob Herring Cc: Frank Rowand Cc: devicetree@vger.kernel.org Cc: Andy Gross Cc: Bjorn Andersson Cc: linux-arm-msm@vger.kernel.org Link: https://lore.kernel.org/r/ce0d7561-ff93-d267-b57a-6505014c728c@infradead.org Signed-off-by: Bjorn Andersson --- include/linux/of.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/of.h b/include/linux/of.h index c669c0a4732f..c98ed245b815 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -630,6 +630,11 @@ static inline struct device_node *of_get_parent(const struct device_node *node) return NULL; } +static inline struct device_node *of_get_next_parent(struct device_node *node) +{ + return NULL; +} + static inline struct device_node *of_get_next_child( const struct device_node *node, struct device_node *prev) { -- cgit v1.2.3 From e7fb524cfccaf649b257d517f437392f50b3931f Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Sun, 5 Jul 2020 14:47:54 +0530 Subject: dt-bindings: clock: qcom: ipq8074: Add missing bindings for PCIe Add missing clock bindings for PCIe port0 of ipq8074. Co-developed-by: Selvam Sathappan Periakaruppan Signed-off-by: Selvam Sathappan Periakaruppan Signed-off-by: Sivaprakash Murugesan Link: https://lore.kernel.org/r/1593940680-2363-4-git-send-email-sivaprak@codeaurora.org [sboyd@kernel.org: Clean up commit text subject] Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/qcom,gcc-ipq8074.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h index 4de4811a3540..e3e018565add 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h @@ -362,5 +362,9 @@ #define GCC_PCIE1_AXI_SLAVE_ARES 128 #define GCC_PCIE1_AHB_ARES 129 #define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130 +#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 131 +#define GCC_PCIE0_AXI_S_BRIDGE_CLK 132 +#define GCC_PCIE0_RCHNG_CLK_SRC 133 +#define GCC_PCIE0_RCHNG_CLK 134 #endif -- cgit v1.2.3 From b4297844995f380588e6f935a2f98c399129a9b2 Mon Sep 17 00:00:00 2001 From: Konrad Dybcio Date: Wed, 24 Jun 2020 01:00:18 +0200 Subject: clk: qcom: smd: Add support for MSM8992/4 rpm clocks Add rpm smd clocks, PMIC and bus clocks which are required on MSM8992, MSM8994 (and APQ variants) for clients to vote on. Signed-off-by: Konrad Dybcio Link: https://lore.kernel.org/r/20200623230018.303776-1-konradybcio@gmail.com [sboyd@kernel.org: Fixed up binding numbers] Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/qcom,rpmcc.txt | 2 + drivers/clk/qcom/clk-smd-rpm.c | 171 +++++++++++++++++++++ include/dt-bindings/clock/qcom,rpmcc.h | 4 + 3 files changed, 177 insertions(+) (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt index 8786d19ffe17..b44a0622fb3a 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt @@ -18,6 +18,8 @@ Required properties : "qcom,rpmcc-msm8976", "qcom,rpmcc" "qcom,rpmcc-apq8064", "qcom,rpmcc" "qcom,rpmcc-ipq806x", "qcom,rpmcc" + "qcom,rpmcc-msm8992",·"qcom,rpmcc" + "qcom,rpmcc-msm8994",·"qcom,rpmcc" "qcom,rpmcc-msm8996", "qcom,rpmcc" "qcom,rpmcc-msm8998", "qcom,rpmcc" "qcom,rpmcc-qcs404", "qcom,rpmcc" diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 083399affc8e..0e1dfa89489e 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -623,6 +623,175 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8976 = { .num_clks = ARRAY_SIZE(msm8976_clks), }; +/* msm8992 */ +DEFINE_CLK_SMD_RPM(msm8992, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8992, ocmemgx_clk, ocmemgx_a_clk, QCOM_SMD_RPM_MEM_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8992, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8992, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8992, gfx3d_clk_src, gfx3d_a_clk_src, QCOM_SMD_RPM_MEM_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8992, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8992, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8992, bb_clk2_pin, bb_clk2_a_pin, 2); + +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk1, div_clk1_a, 11); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk2, div_clk2_a, 12); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk3, div_clk3_a, 13); +DEFINE_CLK_SMD_RPM(msm8992, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, ln_bb_clk, ln_bb_a_clk, 8); +DEFINE_CLK_SMD_RPM(msm8992, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, + QCOM_SMD_RPM_BUS_CLK, 3); +DEFINE_CLK_SMD_RPM_QDSS(msm8992, qdss_clk, qdss_a_clk, + QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8992, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8992, rf_clk2_pin, rf_clk2_a_pin, 5); + +DEFINE_CLK_SMD_RPM(msm8992, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8992, ce2_clk, ce2_a_clk, QCOM_SMD_RPM_CE_CLK, 1); + +static struct clk_smd_rpm *msm8992_clks[] = { + [RPM_SMD_PNOC_CLK] = &msm8992_pnoc_clk, + [RPM_SMD_PNOC_A_CLK] = &msm8992_pnoc_a_clk, + [RPM_SMD_OCMEMGX_CLK] = &msm8992_ocmemgx_clk, + [RPM_SMD_OCMEMGX_A_CLK] = &msm8992_ocmemgx_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8992_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8992_bimc_a_clk, + [RPM_SMD_CNOC_CLK] = &msm8992_cnoc_clk, + [RPM_SMD_CNOC_A_CLK] = &msm8992_cnoc_a_clk, + [RPM_SMD_GFX3D_CLK_SRC] = &msm8992_gfx3d_clk_src, + [RPM_SMD_GFX3D_A_CLK_SRC] = &msm8992_gfx3d_a_clk_src, + [RPM_SMD_SNOC_CLK] = &msm8992_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8992_snoc_a_clk, + [RPM_SMD_BB_CLK1] = &msm8992_bb_clk1, + [RPM_SMD_BB_CLK1_A] = &msm8992_bb_clk1_a, + [RPM_SMD_BB_CLK1_PIN] = &msm8992_bb_clk1_pin, + [RPM_SMD_BB_CLK1_A_PIN] = &msm8992_bb_clk1_a_pin, + [RPM_SMD_BB_CLK2] = &msm8992_bb_clk2, + [RPM_SMD_BB_CLK2_A] = &msm8992_bb_clk2_a, + [RPM_SMD_BB_CLK2_PIN] = &msm8992_bb_clk2_pin, + [RPM_SMD_BB_CLK2_A_PIN] = &msm8992_bb_clk2_a_pin, + [RPM_SMD_DIV_CLK1] = &msm8992_div_clk1, + [RPM_SMD_DIV_A_CLK1] = &msm8992_div_clk1_a, + [RPM_SMD_DIV_CLK2] = &msm8992_div_clk2, + [RPM_SMD_DIV_A_CLK2] = &msm8992_div_clk2_a, + [RPM_SMD_DIV_CLK3] = &msm8992_div_clk3, + [RPM_SMD_DIV_A_CLK3] = &msm8992_div_clk3_a, + [RPM_SMD_IPA_CLK] = &msm8992_ipa_clk, + [RPM_SMD_IPA_A_CLK] = &msm8992_ipa_a_clk, + [RPM_SMD_LN_BB_CLK] = &msm8992_ln_bb_clk, + [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk, + [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8992_mmssnoc_ahb_clk, + [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8992_mmssnoc_ahb_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8992_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8992_qdss_a_clk, + [RPM_SMD_RF_CLK1] = &msm8992_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &msm8992_rf_clk1_a, + [RPM_SMD_RF_CLK2] = &msm8992_rf_clk2, + [RPM_SMD_RF_CLK2_A] = &msm8992_rf_clk2_a, + [RPM_SMD_RF_CLK1_PIN] = &msm8992_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &msm8992_rf_clk1_a_pin, + [RPM_SMD_RF_CLK2_PIN] = &msm8992_rf_clk2_pin, + [RPM_SMD_RF_CLK2_A_PIN] = &msm8992_rf_clk2_a_pin, + [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk, + [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk, + [RPM_SMD_CE2_CLK] = &msm8992_ce2_clk, + [RPM_SMD_CE2_A_CLK] = &msm8992_ce2_a_clk, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8992 = { + .clks = msm8992_clks, + .num_clks = ARRAY_SIZE(msm8992_clks), +}; + +/* msm8994 */ +DEFINE_CLK_SMD_RPM(msm8994, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8994, ocmemgx_clk, ocmemgx_a_clk, QCOM_SMD_RPM_MEM_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8994, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8994, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8994, gfx3d_clk_src, gfx3d_a_clk_src, QCOM_SMD_RPM_MEM_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8994, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8994, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8994, bb_clk2_pin, bb_clk2_a_pin, 2); + +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, div_clk1, div_clk1_a, 11); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, div_clk2, div_clk2_a, 12); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, div_clk3, div_clk3_a, 13); +DEFINE_CLK_SMD_RPM(msm8994, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, ln_bb_clk, ln_bb_a_clk, 8); +DEFINE_CLK_SMD_RPM(msm8994, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, + QCOM_SMD_RPM_BUS_CLK, 3); +DEFINE_CLK_SMD_RPM_QDSS(msm8994, qdss_clk, qdss_a_clk, + QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8994, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8994, rf_clk2_pin, rf_clk2_a_pin, 5); + +DEFINE_CLK_SMD_RPM(msm8994, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8994, ce2_clk, ce2_a_clk, QCOM_SMD_RPM_CE_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8994, ce3_clk, ce3_a_clk, QCOM_SMD_RPM_CE_CLK, 2); + +static struct clk_smd_rpm *msm8994_clks[] = { + [RPM_SMD_PNOC_CLK] = &msm8994_pnoc_clk, + [RPM_SMD_PNOC_A_CLK] = &msm8994_pnoc_a_clk, + [RPM_SMD_OCMEMGX_CLK] = &msm8994_ocmemgx_clk, + [RPM_SMD_OCMEMGX_A_CLK] = &msm8994_ocmemgx_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8994_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8994_bimc_a_clk, + [RPM_SMD_CNOC_CLK] = &msm8994_cnoc_clk, + [RPM_SMD_CNOC_A_CLK] = &msm8994_cnoc_a_clk, + [RPM_SMD_GFX3D_CLK_SRC] = &msm8994_gfx3d_clk_src, + [RPM_SMD_GFX3D_A_CLK_SRC] = &msm8994_gfx3d_a_clk_src, + [RPM_SMD_SNOC_CLK] = &msm8994_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8994_snoc_a_clk, + [RPM_SMD_BB_CLK1] = &msm8994_bb_clk1, + [RPM_SMD_BB_CLK1_A] = &msm8994_bb_clk1_a, + [RPM_SMD_BB_CLK1_PIN] = &msm8994_bb_clk1_pin, + [RPM_SMD_BB_CLK1_A_PIN] = &msm8994_bb_clk1_a_pin, + [RPM_SMD_BB_CLK2] = &msm8994_bb_clk2, + [RPM_SMD_BB_CLK2_A] = &msm8994_bb_clk2_a, + [RPM_SMD_BB_CLK2_PIN] = &msm8994_bb_clk2_pin, + [RPM_SMD_BB_CLK2_A_PIN] = &msm8994_bb_clk2_a_pin, + [RPM_SMD_DIV_CLK1] = &msm8994_div_clk1, + [RPM_SMD_DIV_A_CLK1] = &msm8994_div_clk1_a, + [RPM_SMD_DIV_CLK2] = &msm8994_div_clk2, + [RPM_SMD_DIV_A_CLK2] = &msm8994_div_clk2_a, + [RPM_SMD_DIV_CLK3] = &msm8994_div_clk3, + [RPM_SMD_DIV_A_CLK3] = &msm8994_div_clk3_a, + [RPM_SMD_IPA_CLK] = &msm8994_ipa_clk, + [RPM_SMD_IPA_A_CLK] = &msm8994_ipa_a_clk, + [RPM_SMD_LN_BB_CLK] = &msm8994_ln_bb_clk, + [RPM_SMD_LN_BB_A_CLK] = &msm8994_ln_bb_a_clk, + [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8994_mmssnoc_ahb_clk, + [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8994_mmssnoc_ahb_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8994_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8994_qdss_a_clk, + [RPM_SMD_RF_CLK1] = &msm8994_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &msm8994_rf_clk1_a, + [RPM_SMD_RF_CLK2] = &msm8994_rf_clk2, + [RPM_SMD_RF_CLK2_A] = &msm8994_rf_clk2_a, + [RPM_SMD_RF_CLK1_PIN] = &msm8994_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &msm8994_rf_clk1_a_pin, + [RPM_SMD_RF_CLK2_PIN] = &msm8994_rf_clk2_pin, + [RPM_SMD_RF_CLK2_A_PIN] = &msm8994_rf_clk2_a_pin, + [RPM_SMD_CE1_CLK] = &msm8994_ce1_clk, + [RPM_SMD_CE1_A_CLK] = &msm8994_ce1_a_clk, + [RPM_SMD_CE2_CLK] = &msm8994_ce2_clk, + [RPM_SMD_CE2_A_CLK] = &msm8994_ce2_a_clk, + [RPM_SMD_CE3_CLK] = &msm8994_ce3_clk, + [RPM_SMD_CE3_A_CLK] = &msm8994_ce3_a_clk, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8994 = { + .clks = msm8994_clks, + .num_clks = ARRAY_SIZE(msm8994_clks), +}; + /* msm8996 */ DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); @@ -895,6 +1064,8 @@ static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 }, { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, { .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 }, + { .compatible = "qcom,rpmcc-msm8992", .data = &rpm_clk_msm8992 }, + { .compatible = "qcom,rpmcc-msm8994", .data = &rpm_clk_msm8994 }, { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, { .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 }, { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 }, diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index e98ed70d91b3..8aaba7cd9589 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -145,5 +145,9 @@ #define RPM_SMD_LN_BB_CLK2_A_PIN 99 #define RPM_SMD_SYSMMNOC_CLK 100 #define RPM_SMD_SYSMMNOC_A_CLK 101 +#define RPM_SMD_CE2_CLK 102 +#define RPM_SMD_CE2_A_CLK 103 +#define RPM_SMD_CE3_CLK 104 +#define RPM_SMD_CE3_A_CLK 105 #endif -- cgit v1.2.3 From 738c58ccac386bb068cba2446bd9dbabeae09b62 Mon Sep 17 00:00:00 2001 From: Kamel Bouhara Date: Sat, 11 Jul 2020 01:08:08 +0200 Subject: ARM: at91: add atmel tcb capabilities Some atmel socs have extra tcb capabilities that allow using a generic clock source or enabling a quadrature decoder. Signed-off-by: Kamel Bouhara Signed-off-by: Alexandre Belloni Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200710230813.1005150-5-alexandre.belloni@bootlin.com --- include/soc/at91/atmel_tcb.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h index c3c7200ce151..1d7071dc0bca 100644 --- a/include/soc/at91/atmel_tcb.h +++ b/include/soc/at91/atmel_tcb.h @@ -36,9 +36,14 @@ struct clk; /** * struct atmel_tcb_config - SoC data for a Timer/Counter Block * @counter_width: size in bits of a timer counter register + * @has_gclk: boolean indicating if a timer counter has a generic clock + * @has_qdec: boolean indicating if a timer counter has a quadrature + * decoder. */ struct atmel_tcb_config { size_t counter_width; + bool has_gclk; + bool has_qdec; }; /** -- cgit v1.2.3 From ac219bf3c9bdf9200767e8c98a56ad42c75e5cd5 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 27 Jun 2020 00:40:11 +0200 Subject: leds: lp55xx: Convert to use GPIO descriptors The LP55xx driver is already using the of_gpio() functions to pick a global GPIO number for "enable" from the device tree and request the line. Simplify it by just using a GPIO descriptor. Make sure to keep the enable GPIO line optional, change the naming from "lp5523_enable" to "LP55xx enable" to reflect that this is used on all LP55xx LED drivers. Cc: Milo Kim Signed-off-by: Linus Walleij Signed-off-by: Pavel Machek --- drivers/leds/leds-lp55xx-common.c | 27 +++++++++++---------------- include/linux/platform_data/leds-lp55xx.h | 6 ++++-- 2 files changed, 15 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 44ced02b49f9..1354965ac866 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -17,8 +17,7 @@ #include #include #include -#include -#include +#include #include "leds-lp55xx-common.h" @@ -395,18 +394,11 @@ int lp55xx_init_device(struct lp55xx_chip *chip) if (!pdata || !cfg) return -EINVAL; - if (gpio_is_valid(pdata->enable_gpio)) { - ret = devm_gpio_request_one(dev, pdata->enable_gpio, - GPIOF_DIR_OUT, "lp5523_enable"); - if (ret < 0) { - dev_err(dev, "could not acquire enable gpio (err=%d)\n", - ret); - goto err; - } - - gpio_set_value(pdata->enable_gpio, 0); + if (pdata->enable_gpiod) { + gpiod_set_consumer_name(pdata->enable_gpiod, "LP55xx enable"); + gpiod_set_value(pdata->enable_gpiod, 0); usleep_range(1000, 2000); /* Keep enable down at least 1ms */ - gpio_set_value(pdata->enable_gpio, 1); + gpiod_set_value(pdata->enable_gpiod, 1); usleep_range(1000, 2000); /* 500us abs min. */ } @@ -447,8 +439,8 @@ void lp55xx_deinit_device(struct lp55xx_chip *chip) if (chip->clk) clk_disable_unprepare(chip->clk); - if (gpio_is_valid(pdata->enable_gpio)) - gpio_set_value(pdata->enable_gpio, 0); + if (pdata->enable_gpiod) + gpiod_set_value(pdata->enable_gpiod, 0); } EXPORT_SYMBOL_GPL(lp55xx_deinit_device); @@ -579,7 +571,10 @@ struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev, of_property_read_string(np, "label", &pdata->label); of_property_read_u8(np, "clock-mode", &pdata->clock_mode); - pdata->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0); + pdata->enable_gpiod = devm_gpiod_get_optional(dev, "enable", + GPIOD_ASIS); + if (IS_ERR(pdata->enable_gpiod)) + return ERR_CAST(pdata->enable_gpiod); /* LP8501 specific */ of_property_read_u8(np, "pwr-sel", (u8 *)&pdata->pwr_sel); diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h index 96a787100fda..00492d6ff018 100644 --- a/include/linux/platform_data/leds-lp55xx.h +++ b/include/linux/platform_data/leds-lp55xx.h @@ -12,6 +12,8 @@ #ifndef _LEDS_LP55XX_H #define _LEDS_LP55XX_H +#include + /* Clock configuration */ #define LP55XX_CLOCK_AUTO 0 #define LP55XX_CLOCK_INT 1 @@ -49,7 +51,7 @@ enum lp8501_pwr_sel { * @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT * @setup_resources : Platform specific function before enabling the chip * @release_resources : Platform specific function after disabling the chip - * @enable : EN pin control by platform side + * @enable_gpiod : enable GPIO descriptor * @patterns : Predefined pattern data for RGB channels * @num_patterns : Number of patterns * @update_config : Value of CONFIG register @@ -65,7 +67,7 @@ struct lp55xx_platform_data { u8 clock_mode; /* optional enable GPIO */ - int enable_gpio; + struct gpio_desc *enable_gpiod; /* Predefined pattern data */ struct lp55xx_predef_pattern *patterns; -- cgit v1.2.3 From 21249616f02dc3f4c5efc3b3e9ba48e428b0131c Mon Sep 17 00:00:00 2001 From: Kent Gibson Date: Wed, 8 Jul 2020 12:15:57 +0800 Subject: gpio: uapi: fix misplaced comment line The second line of the description for event_type is before the first. Move it to after the first line. Signed-off-by: Kent Gibson Signed-off-by: Bartosz Golaszewski --- include/uapi/linux/gpio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h index 0206383c0383..9c27cecf406f 100644 --- a/include/uapi/linux/gpio.h +++ b/include/uapi/linux/gpio.h @@ -71,8 +71,8 @@ enum { * of a GPIO line * @info: updated line information * @timestamp: estimate of time of status change occurrence, in nanoseconds - * and GPIOLINE_CHANGED_CONFIG * @event_type: one of GPIOLINE_CHANGED_REQUESTED, GPIOLINE_CHANGED_RELEASED + * and GPIOLINE_CHANGED_CONFIG * * Note: struct gpioline_info embedded here has 32-bit alignment on its own, * but it works fine with 64-bit alignment too. With its 72 byte size, we can -- cgit v1.2.3 From a24015fa8b89e4605a9c6581d56b48d991e2b66b Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Fri, 26 Jun 2020 05:14:07 +0800 Subject: firmware: imx: Move i.MX SCU soc driver into imx firmware folder The i.MX SCU soc driver depends on SCU firmware driver, so it has to use platform driver model for proper defer probe operation, since it has no device binding in DT file, a simple platform device is created together inside the platform driver. To make it more clean, we can just move the entire SCU soc driver into imx firmware folder and initialized by i.MX SCU firmware driver. Signed-off-by: Anson Huang Signed-off-by: Shawn Guo --- arch/arm64/configs/defconfig | 1 - drivers/firmware/imx/Makefile | 2 +- drivers/firmware/imx/imx-scu-soc.c | 138 +++++++++++++++++++++++++++++ drivers/firmware/imx/imx-scu.c | 4 + drivers/soc/imx/Kconfig | 9 -- drivers/soc/imx/Makefile | 1 - drivers/soc/imx/soc-imx-scu.c | 172 ------------------------------------- include/linux/firmware/imx/sci.h | 1 + 8 files changed, 144 insertions(+), 184 deletions(-) create mode 100644 drivers/firmware/imx/imx-scu-soc.c delete mode 100644 drivers/soc/imx/soc-imx-scu.c (limited to 'include') diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 883e8bace3ed..c99237ac10a4 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -850,7 +850,6 @@ CONFIG_OWL_PM_DOMAINS=y CONFIG_RASPBERRYPI_POWER=y CONFIG_FSL_DPAA=y CONFIG_FSL_MC_DPIO=y -CONFIG_IMX_SCU_SOC=y CONFIG_QCOM_AOSS_QMP=y CONFIG_QCOM_GENI_SE=y CONFIG_QCOM_RMTFS_MEM=m diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile index 17ea3613e142..b76acbade2a0 100644 --- a/drivers/firmware/imx/Makefile +++ b/drivers/firmware/imx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IMX_DSP) += imx-dsp.o -obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o +obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o diff --git a/drivers/firmware/imx/imx-scu-soc.c b/drivers/firmware/imx/imx-scu-soc.c new file mode 100644 index 000000000000..2f32353de2c9 --- /dev/null +++ b/drivers/firmware/imx/imx-scu-soc.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include +#include +#include +#include +#include +#include + +static struct imx_sc_ipc *imx_sc_soc_ipc_handle; + +struct imx_sc_msg_misc_get_soc_id { + struct imx_sc_rpc_msg hdr; + union { + struct { + u32 control; + u16 resource; + } __packed req; + struct { + u32 id; + } resp; + } data; +} __packed __aligned(4); + +struct imx_sc_msg_misc_get_soc_uid { + struct imx_sc_rpc_msg hdr; + u32 uid_low; + u32 uid_high; +} __packed; + +static int imx_scu_soc_uid(u64 *soc_uid) +{ + struct imx_sc_msg_misc_get_soc_uid msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + int ret; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_MISC; + hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID; + hdr->size = 1; + + ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true); + if (ret) { + pr_err("%s: get soc uid failed, ret %d\n", __func__, ret); + return ret; + } + + *soc_uid = msg.uid_high; + *soc_uid <<= 32; + *soc_uid |= msg.uid_low; + + return 0; +} + +static int imx_scu_soc_id(void) +{ + struct imx_sc_msg_misc_get_soc_id msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + int ret; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_MISC; + hdr->func = IMX_SC_MISC_FUNC_GET_CONTROL; + hdr->size = 3; + + msg.data.req.control = IMX_SC_C_ID; + msg.data.req.resource = IMX_SC_R_SYSTEM; + + ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true); + if (ret) { + pr_err("%s: get soc info failed, ret %d\n", __func__, ret); + return ret; + } + + return msg.data.resp.id; +} + +int imx_scu_soc_init(struct device *dev) +{ + struct soc_device_attribute *soc_dev_attr; + struct soc_device *soc_dev; + int id, ret; + u64 uid = 0; + u32 val; + + ret = imx_scu_get_handle(&imx_sc_soc_ipc_handle); + if (ret) + return ret; + + soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), + GFP_KERNEL); + if (!soc_dev_attr) + return -ENOMEM; + + soc_dev_attr->family = "Freescale i.MX"; + + ret = of_property_read_string(of_root, + "model", + &soc_dev_attr->machine); + if (ret) + return ret; + + id = imx_scu_soc_id(); + if (id < 0) + return -EINVAL; + + ret = imx_scu_soc_uid(&uid); + if (ret < 0) + return -EINVAL; + + /* format soc_id value passed from SCU firmware */ + val = id & 0x1f; + soc_dev_attr->soc_id = devm_kasprintf(dev, GFP_KERNEL, "0x%x", val); + if (!soc_dev_attr->soc_id) + return -ENOMEM; + + /* format revision value passed from SCU firmware */ + val = (id >> 5) & 0xf; + val = (((val >> 2) + 1) << 4) | (val & 0x3); + soc_dev_attr->revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d", + (val >> 4) & 0xf, val & 0xf); + if (!soc_dev_attr->revision) + return -ENOMEM; + + soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, + "%016llX", uid); + if (!soc_dev_attr->serial_number) + return -ENOMEM; + + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR(soc_dev)) + return PTR_ERR(soc_dev); + + return 0; +} diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 2ab048222fe9..dca79caccd01 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -328,6 +328,10 @@ static int imx_scu_probe(struct platform_device *pdev) imx_sc_ipc_handle = sc_ipc; + ret = imx_scu_soc_init(dev); + if (ret) + dev_warn(dev, "failed to initialize SoC info: %d\n", ret); + ret = imx_scu_enable_general_irq_channel(dev); if (ret) dev_warn(dev, diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index d515d2cc20ed..d49fa63ed9c9 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig @@ -8,15 +8,6 @@ config IMX_GPCV2_PM_DOMAINS select PM_GENERIC_DOMAINS default y if SOC_IMX7D -config IMX_SCU_SOC - bool "i.MX System Controller Unit SoC info support" - depends on IMX_SCU - select SOC_BUS - help - If you say yes here you get support for the NXP i.MX System - Controller Unit SoC info module, it will provide the SoC info - like SoC family, ID and revision etc. - config SOC_IMX8M bool "i.MX8M SoC family support" depends on ARCH_MXC || COMPILE_TEST diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile index 446143241fe7..078dc918f4f3 100644 --- a/drivers/soc/imx/Makefile +++ b/drivers/soc/imx/Makefile @@ -5,4 +5,3 @@ endif obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o -obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c deleted file mode 100644 index 92448ca9a6f8..000000000000 --- a/drivers/soc/imx/soc-imx-scu.c +++ /dev/null @@ -1,172 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2019 NXP. - */ - -#include -#include -#include -#include -#include -#include - -#define IMX_SCU_SOC_DRIVER_NAME "imx-scu-soc" - -static struct imx_sc_ipc *soc_ipc_handle; - -struct imx_sc_msg_misc_get_soc_id { - struct imx_sc_rpc_msg hdr; - union { - struct { - u32 control; - u16 resource; - } __packed req; - struct { - u32 id; - } resp; - } data; -} __packed __aligned(4); - -struct imx_sc_msg_misc_get_soc_uid { - struct imx_sc_rpc_msg hdr; - u32 uid_low; - u32 uid_high; -} __packed; - -static int imx_scu_soc_uid(u64 *soc_uid) -{ - struct imx_sc_msg_misc_get_soc_uid msg; - struct imx_sc_rpc_msg *hdr = &msg.hdr; - int ret; - - hdr->ver = IMX_SC_RPC_VERSION; - hdr->svc = IMX_SC_RPC_SVC_MISC; - hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID; - hdr->size = 1; - - ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true); - if (ret) { - pr_err("%s: get soc uid failed, ret %d\n", __func__, ret); - return ret; - } - - *soc_uid = msg.uid_high; - *soc_uid <<= 32; - *soc_uid |= msg.uid_low; - - return 0; -} - -static int imx_scu_soc_id(void) -{ - struct imx_sc_msg_misc_get_soc_id msg; - struct imx_sc_rpc_msg *hdr = &msg.hdr; - int ret; - - hdr->ver = IMX_SC_RPC_VERSION; - hdr->svc = IMX_SC_RPC_SVC_MISC; - hdr->func = IMX_SC_MISC_FUNC_GET_CONTROL; - hdr->size = 3; - - msg.data.req.control = IMX_SC_C_ID; - msg.data.req.resource = IMX_SC_R_SYSTEM; - - ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true); - if (ret) { - pr_err("%s: get soc info failed, ret %d\n", __func__, ret); - return ret; - } - - return msg.data.resp.id; -} - -static int imx_scu_soc_probe(struct platform_device *pdev) -{ - struct soc_device_attribute *soc_dev_attr; - struct soc_device *soc_dev; - int id, ret; - u64 uid = 0; - u32 val; - - ret = imx_scu_get_handle(&soc_ipc_handle); - if (ret) - return ret; - - soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr), - GFP_KERNEL); - if (!soc_dev_attr) - return -ENOMEM; - - soc_dev_attr->family = "Freescale i.MX"; - - ret = of_property_read_string(of_root, - "model", - &soc_dev_attr->machine); - if (ret) - return ret; - - id = imx_scu_soc_id(); - if (id < 0) - return -EINVAL; - - ret = imx_scu_soc_uid(&uid); - if (ret < 0) - return -EINVAL; - - /* format soc_id value passed from SCU firmware */ - val = id & 0x1f; - soc_dev_attr->soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "0x%x", val); - if (!soc_dev_attr->soc_id) - return -ENOMEM; - - /* format revision value passed from SCU firmware */ - val = (id >> 5) & 0xf; - val = (((val >> 2) + 1) << 4) | (val & 0x3); - soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d.%d", - (val >> 4) & 0xf, val & 0xf); - if (!soc_dev_attr->revision) - return -ENOMEM; - - soc_dev_attr->serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL, - "%016llX", uid); - if (!soc_dev_attr->serial_number) - return -ENOMEM; - - soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR(soc_dev)) - return PTR_ERR(soc_dev); - - return 0; -} - -static struct platform_driver imx_scu_soc_driver = { - .driver = { - .name = IMX_SCU_SOC_DRIVER_NAME, - }, - .probe = imx_scu_soc_probe, -}; - -static int __init imx_scu_soc_init(void) -{ - struct platform_device *pdev; - struct device_node *np; - int ret; - - np = of_find_compatible_node(NULL, NULL, "fsl,imx-scu"); - if (!np) - return -ENODEV; - - of_node_put(np); - - ret = platform_driver_register(&imx_scu_soc_driver); - if (ret) - return ret; - - pdev = platform_device_register_simple(IMX_SCU_SOC_DRIVER_NAME, - -1, NULL, 0); - if (IS_ERR(pdev)) - platform_driver_unregister(&imx_scu_soc_driver); - - return PTR_ERR_OR_ZERO(pdev); -} -device_initcall(imx_scu_soc_init); diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h index 3c459f54a88f..22c76571a294 100644 --- a/include/linux/firmware/imx/sci.h +++ b/include/linux/firmware/imx/sci.h @@ -20,4 +20,5 @@ int imx_scu_enable_general_irq_channel(struct device *dev); int imx_scu_irq_register_notifier(struct notifier_block *nb); int imx_scu_irq_unregister_notifier(struct notifier_block *nb); int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable); +int imx_scu_soc_init(struct device *dev); #endif /* _SC_SCI_H */ -- cgit v1.2.3 From e24779649c840ce1ecb638a30e7c821075630184 Mon Sep 17 00:00:00 2001 From: Marian-Cristian Rotariu Date: Tue, 7 Jul 2020 17:18:04 +0100 Subject: dt-bindings: power: Add r8a774e1 SYSC power domain definitions This patch adds power domain indices for the RZ/G2H (r8a774e1) SoC. Signed-off-by: Marian-Cristian Rotariu Signed-off-by: Lad Prabhakar Link: https://lore.kernel.org/r/1594138692-16816-5-git-send-email-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Geert Uytterhoeven --- include/dt-bindings/power/r8a774e1-sysc.h | 36 +++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 include/dt-bindings/power/r8a774e1-sysc.h (limited to 'include') diff --git a/include/dt-bindings/power/r8a774e1-sysc.h b/include/dt-bindings/power/r8a774e1-sysc.h new file mode 100644 index 000000000000..7edb8161db36 --- /dev/null +++ b/include/dt-bindings/power/r8a774e1-sysc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A774E1_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A774E1_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A774E1_PD_CA57_CPU0 0 +#define R8A774E1_PD_CA57_CPU1 1 +#define R8A774E1_PD_CA57_CPU2 2 +#define R8A774E1_PD_CA57_CPU3 3 +#define R8A774E1_PD_CA53_CPU0 5 +#define R8A774E1_PD_CA53_CPU1 6 +#define R8A774E1_PD_CA53_CPU2 7 +#define R8A774E1_PD_CA53_CPU3 8 +#define R8A774E1_PD_A3VP 9 +#define R8A774E1_PD_CA57_SCU 12 +#define R8A774E1_PD_A3VC 14 +#define R8A774E1_PD_3DG_A 17 +#define R8A774E1_PD_3DG_B 18 +#define R8A774E1_PD_3DG_C 19 +#define R8A774E1_PD_3DG_D 20 +#define R8A774E1_PD_CA53_SCU 21 +#define R8A774E1_PD_3DG_E 22 +#define R8A774E1_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A774E1_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A774E1_SYSC_H__ */ -- cgit v1.2.3 From ef1c9924287d11660cfc7900aeeeb4732188743e Mon Sep 17 00:00:00 2001 From: Marian-Cristian Rotariu Date: Tue, 7 Jul 2020 17:18:09 +0100 Subject: clk: renesas: Add r8a774e1 CPG Core Clock Definitions Add all RZ/G2H Clock Pulse Generator Core Clock Outputs, as listed in Table 11.2 ("List of Clocks [RZ/G2H]") of the RZ/G2H Hardware User's Manual. Signed-off-by: Marian-Cristian Rotariu Signed-off-by: Lad Prabhakar Link: https://lore.kernel.org/r/1594138692-16816-10-git-send-email-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Geert Uytterhoeven --- include/dt-bindings/clock/r8a774e1-cpg-mssr.h | 59 +++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 include/dt-bindings/clock/r8a774e1-cpg-mssr.h (limited to 'include') diff --git a/include/dt-bindings/clock/r8a774e1-cpg-mssr.h b/include/dt-bindings/clock/r8a774e1-cpg-mssr.h new file mode 100644 index 000000000000..b2fc1d1c3c47 --- /dev/null +++ b/include/dt-bindings/clock/r8a774e1-cpg-mssr.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A774E1_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A774E1_CPG_MSSR_H__ + +#include + +/* R8A774E1 CPG Core Clocks */ +#define R8A774E1_CLK_Z 0 +#define R8A774E1_CLK_Z2 1 +#define R8A774E1_CLK_ZG 2 +#define R8A774E1_CLK_ZTR 3 +#define R8A774E1_CLK_ZTRD2 4 +#define R8A774E1_CLK_ZT 5 +#define R8A774E1_CLK_ZX 6 +#define R8A774E1_CLK_S0D1 7 +#define R8A774E1_CLK_S0D2 8 +#define R8A774E1_CLK_S0D3 9 +#define R8A774E1_CLK_S0D4 10 +#define R8A774E1_CLK_S0D6 11 +#define R8A774E1_CLK_S0D8 12 +#define R8A774E1_CLK_S0D12 13 +#define R8A774E1_CLK_S1D2 14 +#define R8A774E1_CLK_S1D4 15 +#define R8A774E1_CLK_S2D1 16 +#define R8A774E1_CLK_S2D2 17 +#define R8A774E1_CLK_S2D4 18 +#define R8A774E1_CLK_S3D1 19 +#define R8A774E1_CLK_S3D2 20 +#define R8A774E1_CLK_S3D4 21 +#define R8A774E1_CLK_LB 22 +#define R8A774E1_CLK_CL 23 +#define R8A774E1_CLK_ZB3 24 +#define R8A774E1_CLK_ZB3D2 25 +#define R8A774E1_CLK_ZB3D4 26 +#define R8A774E1_CLK_CR 27 +#define R8A774E1_CLK_CRD2 28 +#define R8A774E1_CLK_SD0H 29 +#define R8A774E1_CLK_SD0 30 +#define R8A774E1_CLK_SD1H 31 +#define R8A774E1_CLK_SD1 32 +#define R8A774E1_CLK_SD2H 33 +#define R8A774E1_CLK_SD2 34 +#define R8A774E1_CLK_SD3H 35 +#define R8A774E1_CLK_SD3 36 +#define R8A774E1_CLK_RPC 37 +#define R8A774E1_CLK_RPCD2 38 +#define R8A774E1_CLK_MSO 39 +#define R8A774E1_CLK_HDMI 40 +#define R8A774E1_CLK_CSI0 41 +#define R8A774E1_CLK_CP 42 +#define R8A774E1_CLK_CPEX 43 +#define R8A774E1_CLK_R 44 +#define R8A774E1_CLK_OSC 45 +#define R8A774E1_CLK_CANFD 46 + +#endif /* __DT_BINDINGS_CLOCK_R8A774E1_CPG_MSSR_H__ */ -- cgit v1.2.3 From 02c003cc18dfb6db1001856fccb978a1179fe89a Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Fri, 10 Jul 2020 14:39:17 +0100 Subject: firmware: arm_scmi: Remove zero-length array in SCMI notifications Substitute zero-length array defined in scmi_base_error_report with a flexible length array definition. Link: https://lore.kernel.org/r/20200710133919.39792-1-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- include/linux/scmi_protocol.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 46d98be92466..7d4348fb7330 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -421,7 +421,7 @@ struct scmi_base_error_report { u32 agent_id; bool fatal; u16 cmd_count; - u64 reports[0]; + u64 reports[]; }; #endif /* _LINUX_SCMI_PROTOCOL_H */ -- cgit v1.2.3 From 72a5eb9d9c319c99c11cfd9cfb486380dd136840 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Fri, 10 Jul 2020 14:39:19 +0100 Subject: firmware: arm_scmi: Remove fixed size fields from reports/scmi_event_header Event reports are used to convey information describing events to the registered user-callbacks: they are necessarily derived from the underlying raw SCMI events' messages but they are not meant to expose or directly mirror any of those messages data layout, which belong to the protocol layer. Using fixed size types for report fields, mirroring messages structure, is at odd with this: get rid of them using more generic, equivalent, typing. Substitute scmi_event_header fixed size fields with generic types too and shuffle around fields definitions to minimize implicit padding while adapting involved functions. Link: https://lore.kernel.org/r/20200710133919.39792-3-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/base.c | 2 +- drivers/firmware/arm_scmi/driver.c | 4 +-- drivers/firmware/arm_scmi/notify.c | 15 ++++++----- drivers/firmware/arm_scmi/notify.h | 8 +++--- drivers/firmware/arm_scmi/perf.c | 2 +- drivers/firmware/arm_scmi/power.c | 2 +- drivers/firmware/arm_scmi/reset.c | 2 +- drivers/firmware/arm_scmi/sensors.c | 2 +- include/linux/scmi_protocol.h | 52 ++++++++++++++++++------------------- 9 files changed, 46 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index 54f378e946f1..9853bd3c4d45 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -273,7 +273,7 @@ static int scmi_base_set_notify_enabled(const struct scmi_handle *handle, } static void *scmi_base_fill_custom_report(const struct scmi_handle *handle, - u8 evt_id, u64 timestamp, + u8 evt_id, ktime_t timestamp, const void *payld, size_t payld_sz, void *report, u32 *src_id) { diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 19a4287fc0f7..03ec74242c14 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -205,13 +205,13 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr) { - u64 ts; struct scmi_xfer *xfer; struct device *dev = cinfo->dev; struct scmi_info *info = handle_to_scmi_info(cinfo->handle); struct scmi_xfers_info *minfo = &info->rx_minfo; + ktime_t ts; - ts = ktime_get_boottime_ns(); + ts = ktime_get_boottime(); xfer = scmi_xfer_get(cinfo->handle, minfo); if (IS_ERR(xfer)) { dev_err(dev, "failed to get free message slot (%ld)\n", diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c index 752415367305..4731daaacd19 100644 --- a/drivers/firmware/arm_scmi/notify.c +++ b/drivers/firmware/arm_scmi/notify.c @@ -80,6 +80,7 @@ #include #include #include +#include #include #include #include @@ -246,18 +247,18 @@ struct events_queue { * struct scmi_event_header - A utility header * @timestamp: The timestamp, in nanoseconds (boottime), which was associated * to this event as soon as it entered the SCMI RX ISR - * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol) * @payld_sz: Effective size of the embedded message payload which follows + * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol) * @payld: A reference to the embedded event payload * * This header is prepended to each received event message payload before * queueing it on the related &struct events_queue. */ struct scmi_event_header { - u64 timestamp; - u8 evt_id; - size_t payld_sz; - u8 payld[]; + ktime_t timestamp; + size_t payld_sz; + unsigned char evt_id; + unsigned char payld[]; }; struct scmi_registered_event; @@ -572,7 +573,7 @@ static void scmi_events_dispatcher(struct work_struct *work) * Return: 0 on Success */ int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id, - const void *buf, size_t len, u64 ts) + const void *buf, size_t len, ktime_t ts) { struct scmi_registered_event *r_evt; struct scmi_event_header eh; @@ -595,7 +596,7 @@ int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id, if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) { dev_warn(handle->dev, "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n", - proto_id, evt_id, ts); + proto_id, evt_id, ktime_to_ns(ts)); return -ENOMEM; } diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h index 3791bb7aa79b..3485f20fa70e 100644 --- a/drivers/firmware/arm_scmi/notify.h +++ b/drivers/firmware/arm_scmi/notify.h @@ -10,6 +10,7 @@ #define _SCMI_NOTIFY_H #include +#include #include #define SCMI_PROTO_QUEUE_SZ 4096 @@ -48,8 +49,9 @@ struct scmi_event_ops { int (*set_notify_enabled)(const struct scmi_handle *handle, u8 evt_id, u32 src_id, bool enabled); void *(*fill_custom_report)(const struct scmi_handle *handle, - u8 evt_id, u64 timestamp, const void *payld, - size_t payld_sz, void *report, u32 *src_id); + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id); }; int scmi_notification_init(struct scmi_handle *handle); @@ -61,6 +63,6 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, const struct scmi_event *evt, int num_events, int num_sources); int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id, - const void *buf, size_t len, u64 ts); + const void *buf, size_t len, ktime_t ts); #endif /* _SCMI_NOTIFY_H */ diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 8bcad96e06ca..3e1e87012c95 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -780,7 +780,7 @@ static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle, } static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle, - u8 evt_id, u64 timestamp, + u8 evt_id, ktime_t timestamp, const void *payld, size_t payld_sz, void *report, u32 *src_id) { diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index 4f6757980739..46f213644c49 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -227,7 +227,7 @@ static int scmi_power_set_notify_enabled(const struct scmi_handle *handle, } static void *scmi_power_fill_custom_report(const struct scmi_handle *handle, - u8 evt_id, u64 timestamp, + u8 evt_id, ktime_t timestamp, const void *payld, size_t payld_sz, void *report, u32 *src_id) { diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index fb7cb517900b..3691bafca057 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -240,7 +240,7 @@ static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle, } static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle, - u8 evt_id, u64 timestamp, + u8 evt_id, ktime_t timestamp, const void *payld, size_t payld_sz, void *report, u32 *src_id) { diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 2120ac4787c9..1af0ad362e82 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -296,7 +296,7 @@ static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle, } static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle, - u8 evt_id, u64 timestamp, + u8 evt_id, ktime_t timestamp, const void *payld, size_t payld_sz, void *report, u32 *src_id) { diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 7d4348fb7330..7e5dd7d1e221 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -381,47 +381,47 @@ enum scmi_notification_events { }; struct scmi_power_state_changed_report { - u64 timestamp; - u32 agent_id; - u32 domain_id; - u32 power_state; + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int power_state; }; struct scmi_perf_limits_report { - u64 timestamp; - u32 agent_id; - u32 domain_id; - u32 range_max; - u32 range_min; + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int range_max; + unsigned int range_min; }; struct scmi_perf_level_report { - u64 timestamp; - u32 agent_id; - u32 domain_id; - u32 performance_level; + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int performance_level; }; struct scmi_sensor_trip_point_report { - u64 timestamp; - u32 agent_id; - u32 sensor_id; - u32 trip_point_desc; + ktime_t timestamp; + unsigned int agent_id; + unsigned int sensor_id; + unsigned int trip_point_desc; }; struct scmi_reset_issued_report { - u64 timestamp; - u32 agent_id; - u32 domain_id; - u32 reset_state; + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int reset_state; }; struct scmi_base_error_report { - u64 timestamp; - u32 agent_id; - bool fatal; - u16 cmd_count; - u64 reports[]; + ktime_t timestamp; + unsigned int agent_id; + bool fatal; + unsigned int cmd_count; + unsigned long long reports[]; }; #endif /* _LINUX_SCMI_PROTOCOL_H */ -- cgit v1.2.3 From b52fb259dff8d0da76d38005464edf4201b01d76 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Fri, 29 May 2020 12:23:41 +0200 Subject: mmc: core: Always allow the card detect uevent to be consumed The approach to allow userspace ~5s to consume the uevent, which is triggered when a new card is inserted/initialized, currently requires the mmc host to support system wakeup. This is unnecessary limiting, especially for an mmc host that relies on a GPIO IRQ for card detect. More precisely, the mmc host may not support system wakeup for its corresponding struct device, while the GPIO IRQ still could be configured as a wakeup IRQ via enable_irq_wake(). To support all various cases, let's simply drop the need for the wakeup support. Instead let's always register a wakeup source and activate it for all card detect IRQs by calling __pm_wakeup_event(). Signed-off-by: Ulf Hansson Link: https://lore.kernel.org/r/20200529102341.12529-1-ulf.hansson@linaro.org --- drivers/mmc/core/core.c | 10 +++++----- drivers/mmc/core/host.c | 3 +++ include/linux/mmc/host.h | 1 + 3 files changed, 9 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 8d2b808e9b58..aff3fa937674 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1455,12 +1455,12 @@ void mmc_detach_bus(struct mmc_host *host) void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq) { /* - * If the device is configured as wakeup, we prevent a new sleep for - * 5 s to give provision for user space to consume the event. + * Prevent system sleep for 5s to allow user space to consume the + * corresponding uevent. This is especially useful, when CD irq is used + * as a system wakeup, but doesn't hurt in other cases. */ - if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && - device_can_wakeup(mmc_dev(host))) - pm_wakeup_event(mmc_dev(host), 5000); + if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL)) + __pm_wakeup_event(host->ws, 5000); host->detect_change = 1; mmc_schedule_delayed_work(&host->detect, delay); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index c8768726d925..6141a85749ca 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -36,6 +37,7 @@ static DEFINE_IDA(mmc_host_ida); static void mmc_host_classdev_release(struct device *dev) { struct mmc_host *host = cls_dev_to_mmc_host(dev); + wakeup_source_unregister(host->ws); ida_simple_remove(&mmc_host_ida, host->index); kfree(host); } @@ -400,6 +402,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) host->index = err; dev_set_name(&host->class_dev, "mmc%d", host->index); + host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev)); host->parent = dev; host->class_dev.parent = dev; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 7149bab555d7..1fa4fa1caef5 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -287,6 +287,7 @@ struct mmc_host { #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; #endif + struct wakeup_source *ws; /* Enable consume of uevents */ u32 max_current_330; u32 max_current_300; u32 max_current_180; -- cgit v1.2.3 From 4f7872ae920fccf6a07ca64cc40792bd6be0d0d5 Mon Sep 17 00:00:00 2001 From: Pali Rohár Date: Mon, 29 Jun 2020 09:21:44 +0200 Subject: mmc: sdio: Move SDIO IDs from rsi_sdio driver to common include file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Define appropriate macro names for consistency with other macros. Signed-off-by: Pali Rohár Link: https://lore.kernel.org/r/20200629072144.24351-1-pali@kernel.org Signed-off-by: Ulf Hansson --- drivers/net/wireless/rsi/rsi_91x_sdio.c | 8 ++++---- drivers/net/wireless/rsi/rsi_sdio.h | 4 ---- include/linux/mmc/sdio_ids.h | 4 ++++ 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 5d6143a55187..a04ff75c409f 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1038,10 +1038,10 @@ static int rsi_probe(struct sdio_func *pfunction, goto fail_free_adapter; } - if (pfunction->device == RSI_SDIO_PID_9113) { + if (pfunction->device == SDIO_DEVICE_ID_RSI_9113) { rsi_dbg(ERR_ZONE, "%s: 9113 module detected\n", __func__); adapter->device_model = RSI_DEV_9113; - } else if (pfunction->device == RSI_SDIO_PID_9116) { + } else if (pfunction->device == SDIO_DEVICE_ID_RSI_9116) { rsi_dbg(ERR_ZONE, "%s: 9116 module detected\n", __func__); adapter->device_model = RSI_DEV_9116; } else { @@ -1526,8 +1526,8 @@ static const struct dev_pm_ops rsi_pm_ops = { #endif static const struct sdio_device_id rsi_dev_table[] = { - { SDIO_DEVICE(RSI_SDIO_VENDOR_ID, RSI_SDIO_PID_9113) }, - { SDIO_DEVICE(RSI_SDIO_VENDOR_ID, RSI_SDIO_PID_9116) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_RSI, SDIO_DEVICE_ID_RSI_9113) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_RSI, SDIO_DEVICE_ID_RSI_9116) }, { /* Blank */}, }; diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index c5cfb6238f73..9afc1d0d2684 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -28,10 +28,6 @@ #include #include "rsi_main.h" -#define RSI_SDIO_VENDOR_ID 0x041B -#define RSI_SDIO_PID_9113 0x9330 -#define RSI_SDIO_PID_9116 0x9116 - enum sdio_interrupt_type { BUFFER_FULL = 0x0, BUFFER_AVAILABLE = 0x2, diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 15ed8ce9d394..ab41801c5f51 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -118,6 +118,10 @@ #define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 #define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 +#define SDIO_VENDOR_ID_RSI 0x041b +#define SDIO_DEVICE_ID_RSI_9113 0x9330 +#define SDIO_DEVICE_ID_RSI_9116 0x9116 + #define SDIO_VENDOR_ID_TI_WL1251 0x104c #define SDIO_DEVICE_ID_TI_WL1251 0x9066 -- cgit v1.2.3 From 2f96126500991f356b9eacdc6611bec78e6253ed Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Wed, 1 Jul 2020 11:23:17 +0100 Subject: arch: arm: mach-omap2: mmc: Move omap_mmc_notify_cover_event() prototype MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When building the kernel with W=1 the build system complains of: drivers/mmc/host/omap.c:854:6: warning: no previous prototype for ‘omap_mmc_notify_cover_event’ [-Wmissing-prototypes] 854 | void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed) | ^~~~~~~~~~~~~~~~~~~~~~~~~~~ If we move the prototype into a shared headerfile the build system will be satisfied. Rather than create a whole new headerfile just for this purpose, it makes sense to use the already existing mmc-omap.h. Cc: Ulf Hansson Cc: linux-mmc@vger.kernel.org Cc: Tony Lindgren Cc: linux-omap@vger.kernel.org Signed-off-by: Lee Jones Acked-by: Tony Lindgren Link: https://lore.kernel.org/r/20200701102317.235032-1-lee.jones@linaro.org Signed-off-by: Ulf Hansson --- arch/arm/mach-omap2/mmc.h | 4 ---- include/linux/platform_data/mmc-omap.h | 3 +++ 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-omap2/mmc.h b/arch/arm/mach-omap2/mmc.h index 7f4e053c3434..b5533e93cb63 100644 --- a/arch/arm/mach-omap2/mmc.h +++ b/arch/arm/mach-omap2/mmc.h @@ -16,7 +16,3 @@ static inline int omap_msdi_reset(struct omap_hwmod *oh) return 0; } #endif - -/* called from board-specific card detection service routine */ -extern void omap_mmc_notify_cover_event(struct device *dev, int slot, - int is_closed); diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h index 9acf0e87aa9b..f0b8947e6b07 100644 --- a/include/linux/platform_data/mmc-omap.h +++ b/include/linux/platform_data/mmc-omap.h @@ -116,3 +116,6 @@ struct omap_mmc_platform_data { } slots[OMAP_MMC_MAX_SLOTS]; }; + +extern void omap_mmc_notify_cover_event(struct device *dev, int slot, + int is_closed); -- cgit v1.2.3 From ec02760b63c21c439d44956e9c66d0af24011f03 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 7 Jul 2020 20:58:42 +0900 Subject: mmc: core: Add MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND The commit 5a36d6bcdf23 ("mmc: core: Add DT-bindings for MMC_CAP2_FULL_PWR_CYCLE") added the "full-pwr-cycle" property which is possible to perform a full power cycle of the card at any time. However, some environment (like r8a77951-salvator-xs) is possible to perform a full power cycle of the card in suspend via firmware (PSCI on arm-trusted-firmware). So, in worst case, since we are not doing a graceful shutdown of the eMMC device (just cut VCCQ while the eMMC is "sleeping") in suspend, it could lead to internal data corruptions. So, add MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND to do a graceful shutdown which issues Power Off notification before entering system suspend. Signed-off-by: Yoshihiro Shimoda Link: https://lore.kernel.org/r/1594123122-13156-3-git-send-email-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/host.c | 2 ++ drivers/mmc/core/mmc.c | 3 ++- include/linux/mmc/host.h | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 793597556cd7..ce43f7573d80 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -277,6 +277,8 @@ int mmc_of_parse(struct mmc_host *host) host->caps |= MMC_CAP_SDIO_IRQ; if (device_property_read_bool(dev, "full-pwr-cycle")) host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; + if (device_property_read_bool(dev, "full-pwr-cycle-in-suspend")) + host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND; if (device_property_read_bool(dev, "keep-power-in-suspend")) host->pm_caps |= MMC_PM_KEEP_POWER; if (device_property_read_bool(dev, "wakeup-source") || diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 4203303f946a..b3fa193de846 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2038,7 +2038,8 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend) goto out; if (mmc_can_poweroff_notify(host->card) && - ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend)) + ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend || + (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND))) err = mmc_poweroff_notify(host->card, notify_type); else if (mmc_can_sleep(host->card)) err = mmc_sleep(host); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 1fa4fa1caef5..c5b6e97cb21a 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -352,6 +352,7 @@ struct mmc_host { #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ +#define MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND (1 << 3) /* Can do full power cycle in suspend */ #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ -- cgit v1.2.3 From f4bf09d5303ab3896604c58363fc705ba649ccb8 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Sun, 21 Jun 2020 15:33:39 +0300 Subject: iio: core: remove iio_priv_to_dev() helper All users of this helper have been updated to not use it. Remove it now, so that we don't need to move it when creating the iio_dev_opaque structure. Signed-off-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron --- include/linux/iio/iio.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include') diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 30c9c9502478..e2df67a3b9ab 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -689,12 +689,6 @@ static inline void *iio_priv(const struct iio_dev *indio_dev) return indio_dev->priv; } -static inline struct iio_dev *iio_priv_to_dev(void *priv) -{ - return (struct iio_dev *)((char *)priv - - ALIGN(sizeof(struct iio_dev), IIO_ALIGN)); -} - void iio_device_free(struct iio_dev *indio_dev); struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, -- cgit v1.2.3 From 8cdcd8aeee2819199ec7f68114b77b04c10611d3 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 25 Jun 2020 22:02:52 +0200 Subject: spi: imx/fsl-lpspi: Convert to GPIO descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This converts the two Freescale i.MX SPI drivers Freescale i.MX (CONFIG_SPI_IMX) and Freescale i.MX LPSPI (CONFIG_SPI_FSL_LPSPI) to use GPIO descriptors handled in the SPI core for GPIO chip selects whether defined in the device tree or a board file. The reason why both are converted at the same time is that they were both using the same platform data and platform device population helpers when using board files intertwining the code so this gives a cleaner cut. The platform device creation was passing a platform data container from each boardfile down to the driver using struct spi_imx_master from , but this was only conveying the number of chipselects and an int * array of the chipselect GPIO numbers. The imx27 and imx31 platforms had code passing the now-unused platform data when creating the platform devices, this has been repurposed to pass around GPIO descriptor tables. The platform data struct that was just passing an array of integers and number of chip selects for the GPIO lines has been removed. The number of chipselects used to be passed from the board file, because this number also limits the number of native chipselects that the platform can use. To deal with this we just augment the i.MX (CONFIG_SPI_IMX) driver to support 3 chipselects if the platform does not define "num-cs" as a device property (such as from the device tree). This covers all the legacy boards as these use <= 3 native chip selects (or GPIO lines, and in that case the number of chip selects is determined by the core from the number of available GPIO lines). Any new boards should use device tree, so this is a reasonable simplification to cover all old boards. The LPSPI driver never assigned the number of chipselects and thus always fall back to the core default of 1 chip select if no GPIOs are defined in the device tree. The Freescale i.MX driver was already partly utilizing the SPI core to obtain the GPIO numbers from the device tree, so this completes the transtion to let the core handle all of it. All board files and the core i.MX boardfile registration code is augmented to account for these changes. This has been compile-tested with the imx_v4_v5_defconfig and the imx_v6_v7_defconfig. Signed-off-by: Linus Walleij Acked-by: Shawn Guo Cc: Uwe Kleine-König Cc: Robin Gong Cc: Trent Piepho Cc: Clark Wang Cc: Shawn Guo Cc: Sascha Hauer Cc: Pengutronix Kernel Team Cc: Fabio Estevam Cc: NXP Linux Team Link: https://lore.kernel.org/r/20200625200252.207614-1-linus.walleij@linaro.org Signed-off-by: Mark Brown --- arch/arm/mach-imx/devices-imx27.h | 10 +-- arch/arm/mach-imx/devices-imx31.h | 10 +-- arch/arm/mach-imx/devices/devices-common.h | 5 +- arch/arm/mach-imx/devices/platform-spi_imx.c | 9 +-- arch/arm/mach-imx/mach-mx27_3ds.c | 40 ++++++++---- arch/arm/mach-imx/mach-mx31_3ds.c | 13 +--- arch/arm/mach-imx/mach-mx31lilly.c | 14 +---- arch/arm/mach-imx/mach-mx31lite.c | 19 +----- arch/arm/mach-imx/mach-mx31moboard.c | 12 +--- arch/arm/mach-imx/mach-pca100.c | 21 +++++-- arch/arm/mach-imx/mach-pcm037_eet.c | 7 +-- drivers/spi/spi-fsl-lpspi.c | 47 +------------- drivers/spi/spi-imx.c | 92 +++++----------------------- include/linux/platform_data/spi-imx.h | 33 ---------- 14 files changed, 88 insertions(+), 244 deletions(-) delete mode 100644 include/linux/platform_data/spi-imx.h (limited to 'include') diff --git a/arch/arm/mach-imx/devices-imx27.h b/arch/arm/mach-imx/devices-imx27.h index f89f4ae0e1ca..583a1d773d68 100644 --- a/arch/arm/mach-imx/devices-imx27.h +++ b/arch/arm/mach-imx/devices-imx27.h @@ -75,11 +75,11 @@ extern const struct imx_mxc_w1_data imx27_mxc_w1_data; imx_add_mxc_w1(&imx27_mxc_w1_data) extern const struct imx_spi_imx_data imx27_cspi_data[]; -#define imx27_add_cspi(id, pdata) \ - imx_add_spi_imx(&imx27_cspi_data[id], pdata) -#define imx27_add_spi_imx0(pdata) imx27_add_cspi(0, pdata) -#define imx27_add_spi_imx1(pdata) imx27_add_cspi(1, pdata) -#define imx27_add_spi_imx2(pdata) imx27_add_cspi(2, pdata) +#define imx27_add_cspi(id, gtable) \ + imx_add_spi_imx(&imx27_cspi_data[id], gtable) +#define imx27_add_spi_imx0(gtable) imx27_add_cspi(0, gtable) +#define imx27_add_spi_imx1(gtable) imx27_add_cspi(1, gtable) +#define imx27_add_spi_imx2(gtable) imx27_add_cspi(2, gtable) extern const struct imx_pata_imx_data imx27_pata_imx_data; #define imx27_add_pata_imx() \ diff --git a/arch/arm/mach-imx/devices-imx31.h b/arch/arm/mach-imx/devices-imx31.h index 5a4ba35a47ed..f7cc62372532 100644 --- a/arch/arm/mach-imx/devices-imx31.h +++ b/arch/arm/mach-imx/devices-imx31.h @@ -69,11 +69,11 @@ extern const struct imx_mxc_w1_data imx31_mxc_w1_data; imx_add_mxc_w1(&imx31_mxc_w1_data) extern const struct imx_spi_imx_data imx31_cspi_data[]; -#define imx31_add_cspi(id, pdata) \ - imx_add_spi_imx(&imx31_cspi_data[id], pdata) -#define imx31_add_spi_imx0(pdata) imx31_add_cspi(0, pdata) -#define imx31_add_spi_imx1(pdata) imx31_add_cspi(1, pdata) -#define imx31_add_spi_imx2(pdata) imx31_add_cspi(2, pdata) +#define imx31_add_cspi(id, gtable) \ + imx_add_spi_imx(&imx31_cspi_data[id], gtable) +#define imx31_add_spi_imx0(gtable) imx31_add_cspi(0, gtable) +#define imx31_add_spi_imx1(gtable) imx31_add_cspi(1, gtable) +#define imx31_add_spi_imx2(gtable) imx31_add_cspi(2, gtable) extern const struct imx_pata_imx_data imx31_pata_imx_data; #define imx31_add_pata_imx() \ diff --git a/arch/arm/mach-imx/devices/devices-common.h b/arch/arm/mach-imx/devices/devices-common.h index 2a685adec1df..f8f3e4967c31 100644 --- a/arch/arm/mach-imx/devices/devices-common.h +++ b/arch/arm/mach-imx/devices/devices-common.h @@ -6,6 +6,7 @@ #include #include #include +#include #include extern struct device mxc_aips_bus; @@ -276,7 +277,6 @@ struct platform_device *__init imx_add_sdhci_esdhc_imx( const struct imx_sdhci_esdhc_imx_data *data, const struct esdhc_platform_data *pdata); -#include struct imx_spi_imx_data { const char *devid; int id; @@ -285,8 +285,7 @@ struct imx_spi_imx_data { int irq; }; struct platform_device *__init imx_add_spi_imx( - const struct imx_spi_imx_data *data, - const struct spi_imx_master *pdata); + const struct imx_spi_imx_data *data, struct gpiod_lookup_table *gtable); struct platform_device *imx_add_imx_dma(char *name, resource_size_t iobase, int irq, int irq_err); diff --git a/arch/arm/mach-imx/devices/platform-spi_imx.c b/arch/arm/mach-imx/devices/platform-spi_imx.c index f2cafa52c187..27747bf628a3 100644 --- a/arch/arm/mach-imx/devices/platform-spi_imx.c +++ b/arch/arm/mach-imx/devices/platform-spi_imx.c @@ -3,6 +3,7 @@ * Copyright (C) 2009-2010 Pengutronix * Uwe Kleine-Koenig */ +#include #include "../hardware.h" #include "devices-common.h" @@ -57,8 +58,7 @@ const struct imx_spi_imx_data imx35_cspi_data[] __initconst = { #endif /* ifdef CONFIG_SOC_IMX35 */ struct platform_device *__init imx_add_spi_imx( - const struct imx_spi_imx_data *data, - const struct spi_imx_master *pdata) + const struct imx_spi_imx_data *data, struct gpiod_lookup_table *gtable) { struct resource res[] = { { @@ -71,7 +71,8 @@ struct platform_device *__init imx_add_spi_imx( .flags = IORESOURCE_IRQ, }, }; - + if (gtable) + gpiod_add_lookup_table(gtable); return imx_add_platform_device(data->devid, data->id, - res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); + res, ARRAY_SIZE(res), NULL, 0); } diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c index 1da5f07952ac..2db4475b7f85 100644 --- a/arch/arm/mach-imx/mach-mx27_3ds.c +++ b/arch/arm/mach-imx/mach-mx27_3ds.c @@ -303,18 +303,34 @@ static struct imx_ssi_platform_data mx27_3ds_ssi_pdata = { }; /* SPI */ -static int spi1_chipselect[] = {SPI1_SS0}; - -static const struct spi_imx_master spi1_pdata __initconst = { - .chipselect = spi1_chipselect, - .num_chipselect = ARRAY_SIZE(spi1_chipselect), +static struct gpiod_lookup_table mx27_spi1_gpiod_table = { + .dev_id = "imx27-cspi.0", /* Actual device name for spi1 */ + .table = { + /* + * The i.MX27 has the i.MX21 GPIO controller, the SPI1 CS GPIO + * SPI1_SS0 is numbered IMX_GPIO_NR(4, 28). + * + * This is in "bank 4" which is subtracted by one in the macro + * so this is actually bank 3 on "imx21-gpio.3". + */ + GPIO_LOOKUP_IDX("imx21-gpio.3", 28, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, }; -static int spi2_chipselect[] = {SPI2_SS0}; - -static const struct spi_imx_master spi2_pdata __initconst = { - .chipselect = spi2_chipselect, - .num_chipselect = ARRAY_SIZE(spi2_chipselect), +static struct gpiod_lookup_table mx27_spi2_gpiod_table = { + .dev_id = "imx27-cspi.1", /* Actual device name for spi2 */ + .table = { + /* + * The i.MX27 has the i.MX21 GPIO controller, the SPI2 CS GPIO + * SPI2_SS0 is numbered IMX_GPIO_NR(4, 21). + * + * This is in "bank 4" which is subtracted by one in the macro + * so this is actually bank 3 on "imx21-gpio.3". + */ + GPIO_LOOKUP_IDX("imx21-gpio.3", 21, "cs", 0, GPIO_ACTIVE_LOW), + { }, + }, }; static struct imx_fb_videomode mx27_3ds_modes[] = { @@ -397,8 +413,8 @@ static void __init mx27pdk_init(void) imx27_add_imx_keypad(&mx27_3ds_keymap_data); imx27_add_imx2_wdt(); - imx27_add_spi_imx1(&spi2_pdata); - imx27_add_spi_imx0(&spi1_pdata); + imx27_add_spi_imx1(&mx27_spi2_gpiod_table); + imx27_add_spi_imx0(&mx27_spi1_gpiod_table); imx27_add_imx_i2c(0, &mx27_3ds_i2c0_data); imx27_add_imx_fb(&mx27_3ds_fb_data); diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c index e81386190479..23e63d3b4c6a 100644 --- a/arch/arm/mach-imx/mach-mx31_3ds.c +++ b/arch/arm/mach-imx/mach-mx31_3ds.c @@ -378,15 +378,6 @@ static struct imx_ssi_platform_data mx31_3ds_ssi_pdata = { .flags = IMX_SSI_DMA | IMX_SSI_NET, }; -/* SPI */ -static const struct spi_imx_master spi0_pdata __initconst = { - .num_chipselect = 3, -}; - -static const struct spi_imx_master spi1_pdata __initconst = { - .num_chipselect = 3, -}; - static struct spi_board_info mx31_3ds_spi_devs[] __initdata = { { .modalias = "mc13783", @@ -561,14 +552,14 @@ static void __init mx31_3ds_init(void) imx31_add_imx_uart0(&uart_pdata); imx31_add_mxc_nand(&mx31_3ds_nand_board_info); - imx31_add_spi_imx1(&spi1_pdata); + imx31_add_spi_imx1(NULL); imx31_add_imx_keypad(&mx31_3ds_keymap_data); imx31_add_imx2_wdt(); imx31_add_imx_i2c0(&mx31_3ds_i2c0_data); - imx31_add_spi_imx0(&spi0_pdata); + imx31_add_spi_imx0(NULL); imx31_add_ipu_core(); imx31_add_mx3_sdc_fb(&mx3fb_pdata); diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c index 8f725248299e..4b955ccc92cd 100644 --- a/arch/arm/mach-imx/mach-mx31lilly.c +++ b/arch/arm/mach-imx/mach-mx31lilly.c @@ -215,16 +215,6 @@ static void __init lilly1131_usb_init(void) imx31_add_mxc_ehci_hs(2, &usbh2_pdata); } -/* SPI */ - -static const struct spi_imx_master spi0_pdata __initconst = { - .num_chipselect = 3, -}; - -static const struct spi_imx_master spi1_pdata __initconst = { - .num_chipselect = 3, -}; - static struct mc13xxx_platform_data mc13783_pdata __initdata = { .flags = MC13XXX_USE_RTC | MC13XXX_USE_TOUCHSCREEN, }; @@ -281,8 +271,8 @@ static void __init mx31lilly_board_init(void) mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SS1__SS1, "SPI2_SS1"); mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SS2__SS2, "SPI2_SS2"); - imx31_add_spi_imx0(&spi0_pdata); - imx31_add_spi_imx1(&spi1_pdata); + imx31_add_spi_imx0(NULL); + imx31_add_spi_imx1(NULL); regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); } diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c index c0055f57c02d..aaccf52f7ac1 100644 --- a/arch/arm/mach-imx/mach-mx31lite.c +++ b/arch/arm/mach-imx/mach-mx31lite.c @@ -73,11 +73,6 @@ static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; -/* SPI */ -static const struct spi_imx_master spi0_pdata __initconst = { - .num_chipselect = 3, -}; - static const struct mxc_nand_platform_data mx31lite_nand_board_info __initconst = { .width = 1, @@ -111,16 +106,6 @@ static struct platform_device smsc911x_device = { }, }; -/* - * SPI - * - * The MC13783 is the only hard-wired SPI device on the module. - */ - -static const struct spi_imx_master spi1_pdata __initconst = { - .num_chipselect = 1, -}; - static struct mc13xxx_platform_data mc13783_pdata __initdata = { .flags = MC13XXX_USE_RTC, }; @@ -246,13 +231,13 @@ static void __init mx31lite_init(void) "mx31lite"); imx31_add_imx_uart0(&uart_pdata); - imx31_add_spi_imx0(&spi0_pdata); + imx31_add_spi_imx0(NULL); /* NOR and NAND flash */ platform_device_register(&physmap_flash_device); imx31_add_mxc_nand(&mx31lite_nand_board_info); - imx31_add_spi_imx1(&spi1_pdata); + imx31_add_spi_imx1(NULL); regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); } diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index 36f08f45b0ca..96845a4eaf57 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c @@ -143,10 +143,6 @@ static const struct imxi2c_platform_data moboard_i2c1_data __initconst = { .bitrate = 100000, }; -static const struct spi_imx_master moboard_spi1_pdata __initconst = { - .num_chipselect = 3, -}; - static struct regulator_consumer_supply sdhc_consumers[] = { { .dev_name = "imx31-mmc.0", @@ -287,10 +283,6 @@ static struct spi_board_info moboard_spi_board_info[] __initdata = { }, }; -static const struct spi_imx_master moboard_spi2_pdata __initconst = { - .num_chipselect = 2, -}; - #define SDHC1_CD IOMUX_TO_GPIO(MX31_PIN_ATA_CS0) #define SDHC1_WP IOMUX_TO_GPIO(MX31_PIN_ATA_CS1) @@ -514,8 +506,8 @@ static void __init mx31moboard_init(void) imx31_add_imx_i2c0(&moboard_i2c0_data); imx31_add_imx_i2c1(&moboard_i2c1_data); - imx31_add_spi_imx1(&moboard_spi1_pdata); - imx31_add_spi_imx2(&moboard_spi2_pdata); + imx31_add_spi_imx1(NULL); + imx31_add_spi_imx2(NULL); mx31moboard_init_cam(); diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c index 2e28e1b5cddf..27a3678e0658 100644 --- a/arch/arm/mach-imx/mach-pca100.c +++ b/arch/arm/mach-imx/mach-pca100.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -188,11 +189,19 @@ static struct spi_board_info pca100_spi_board_info[] __initdata = { }, }; -static int pca100_spi_cs[] = {SPI1_SS0, SPI1_SS1}; - -static const struct spi_imx_master pca100_spi0_data __initconst = { - .chipselect = pca100_spi_cs, - .num_chipselect = ARRAY_SIZE(pca100_spi_cs), +static struct gpiod_lookup_table pca100_spi0_gpiod_table = { + .dev_id = "imx27-cspi.0", /* Actual device name for spi0 */ + .table = { + /* + * The i.MX27 has the i.MX21 GPIO controller, port D is + * bank 3 and thus named "imx21-gpio.3". + * SPI1_SS0 is GPIO_PORTD + 28 + * SPI1_SS1 is GPIO_PORTD + 27 + */ + GPIO_LOOKUP_IDX("imx21-gpio.3", 28, "cs", 0, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("imx21-gpio.3", 27, "cs", 1, GPIO_ACTIVE_LOW), + { }, + }, }; static void pca100_ac97_warm_reset(struct snd_ac97 *ac97) @@ -362,7 +371,7 @@ static void __init pca100_init(void) mxc_gpio_mode(GPIO_PORTD | 27 | GPIO_GPIO | GPIO_IN); spi_register_board_info(pca100_spi_board_info, ARRAY_SIZE(pca100_spi_board_info)); - imx27_add_spi_imx0(&pca100_spi0_data); + imx27_add_spi_imx0(&pca100_spi0_gpiod_table); imx27_add_imx_fb(&pca100_fb_data); diff --git a/arch/arm/mach-imx/mach-pcm037_eet.c b/arch/arm/mach-imx/mach-pcm037_eet.c index 51f5142920cf..8b0e03a595c1 100644 --- a/arch/arm/mach-imx/mach-pcm037_eet.c +++ b/arch/arm/mach-imx/mach-pcm037_eet.c @@ -52,11 +52,6 @@ static struct spi_board_info pcm037_spi_dev[] = { }, }; -/* Platform Data for MXC CSPI */ -static const struct spi_imx_master pcm037_spi1_pdata __initconst = { - .num_chipselect = 2, -}; - /* GPIO-keys input device */ static struct gpio_keys_button pcm037_gpio_keys[] = { { @@ -163,7 +158,7 @@ int __init pcm037_eet_init_devices(void) /* SPI */ spi_register_board_info(pcm037_spi_dev, ARRAY_SIZE(pcm037_spi_dev)); - imx31_add_spi_imx0(&pcm037_spi1_pdata); + imx31_add_spi_imx0(NULL); imx_add_gpio_keys(&pcm037_gpio_keys_platform_data); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 1552b28b9515..38b44446c947 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -19,11 +18,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -224,20 +221,6 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller) return 0; } -static int fsl_lpspi_prepare_message(struct spi_controller *controller, - struct spi_message *msg) -{ - struct fsl_lpspi_data *fsl_lpspi = - spi_controller_get_devdata(controller); - struct spi_device *spi = msg->spi; - int gpio = fsl_lpspi->chipselect[spi->chip_select]; - - if (gpio_is_valid(gpio)) - gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); - - return 0; -} - static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) { u8 txfifo_cnt; @@ -831,13 +814,10 @@ static int fsl_lpspi_init_rpm(struct fsl_lpspi_data *fsl_lpspi) static int fsl_lpspi_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct fsl_lpspi_data *fsl_lpspi; struct spi_controller *controller; - struct spi_imx_master *lpspi_platform_info = - dev_get_platdata(&pdev->dev); struct resource *res; - int i, ret, irq; + int ret, irq; u32 temp; bool is_slave; @@ -867,6 +847,8 @@ static int fsl_lpspi_probe(struct platform_device *pdev) controller->dev.of_node = pdev->dev.of_node; controller->bus_num = pdev->id; controller->slave_abort = fsl_lpspi_slave_abort; + if (!fsl_lpspi->is_slave) + controller->use_gpio_descriptors = true; ret = devm_spi_register_controller(&pdev->dev, controller); if (ret < 0) { @@ -874,29 +856,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) goto out_controller_put; } - if (!fsl_lpspi->is_slave) { - for (i = 0; i < controller->num_chipselect; i++) { - int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); - - if (!gpio_is_valid(cs_gpio) && lpspi_platform_info) - cs_gpio = lpspi_platform_info->chipselect[i]; - - fsl_lpspi->chipselect[i] = cs_gpio; - if (!gpio_is_valid(cs_gpio)) - continue; - - ret = devm_gpio_request(&pdev->dev, - fsl_lpspi->chipselect[i], - DRIVER_NAME); - if (ret) { - dev_err(&pdev->dev, "can't get cs gpios\n"); - goto out_controller_put; - } - } - controller->cs_gpios = fsl_lpspi->chipselect; - controller->prepare_message = fsl_lpspi_prepare_message; - } - init_completion(&fsl_lpspi->xfer_done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 2b8d339f1936..fdc25f549378 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -21,10 +20,9 @@ #include #include #include -#include +#include #include -#include #define DRIVER_NAME "spi_imx" @@ -723,7 +721,7 @@ static int mx31_prepare_transfer(struct spi_imx_data *spi_imx, reg |= MX31_CSPICTRL_POL; if (spi->mode & SPI_CS_HIGH) reg |= MX31_CSPICTRL_SSPOL; - if (!gpio_is_valid(spi->cs_gpio)) + if (!spi->cs_gpiod) reg |= (spi->chip_select) << (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : MX31_CSPICTRL_CS_SHIFT); @@ -824,7 +822,7 @@ static int mx21_prepare_transfer(struct spi_imx_data *spi_imx, reg |= MX21_CSPICTRL_POL; if (spi->mode & SPI_CS_HIGH) reg |= MX21_CSPICTRL_SSPOL; - if (!gpio_is_valid(spi->cs_gpio)) + if (!spi->cs_gpiod) reg |= spi->chip_select << MX21_CSPICTRL_CS_SHIFT; writel(reg, spi_imx->base + MXC_CSPICTRL); @@ -1056,20 +1054,6 @@ static const struct of_device_id spi_imx_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); -static void spi_imx_chipselect(struct spi_device *spi, int is_active) -{ - int active = is_active != BITBANG_CS_INACTIVE; - int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); - - if (spi->mode & SPI_NO_CS) - return; - - if (!gpio_is_valid(spi->cs_gpio)) - return; - - gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active); -} - static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits) { u32 ctrl; @@ -1533,15 +1517,6 @@ static int spi_imx_setup(struct spi_device *spi) dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, spi->mode, spi->bits_per_word, spi->max_speed_hz); - if (spi->mode & SPI_NO_CS) - return 0; - - if (gpio_is_valid(spi->cs_gpio)) - gpio_direction_output(spi->cs_gpio, - spi->mode & SPI_CS_HIGH ? 0 : 1); - - spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); - return 0; } @@ -1599,20 +1574,14 @@ static int spi_imx_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_id = of_match_device(spi_imx_dt_ids, &pdev->dev); - struct spi_imx_master *mxc_platform_info = - dev_get_platdata(&pdev->dev); struct spi_master *master; struct spi_imx_data *spi_imx; struct resource *res; - int i, ret, irq, spi_drctl; + int ret, irq, spi_drctl; const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data : (struct spi_imx_devtype_data *)pdev->id_entry->driver_data; bool slave_mode; - - if (!np && !mxc_platform_info) { - dev_err(&pdev->dev, "can't get the platform data\n"); - return -EINVAL; - } + u32 val; slave_mode = devtype_data->has_slavemode && of_property_read_bool(np, "spi-slave"); @@ -1635,6 +1604,7 @@ static int spi_imx_probe(struct platform_device *pdev) master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); master->bus_num = np ? -1 : pdev->id; + master->use_gpio_descriptors = true; spi_imx = spi_master_get_devdata(master); spi_imx->bitbang.master = master; @@ -1643,28 +1613,17 @@ static int spi_imx_probe(struct platform_device *pdev) spi_imx->devtype_data = devtype_data; - /* Get number of chip selects, either platform data or OF */ - if (mxc_platform_info) { - master->num_chipselect = mxc_platform_info->num_chipselect; - if (mxc_platform_info->chipselect) { - master->cs_gpios = devm_kcalloc(&master->dev, - master->num_chipselect, sizeof(int), - GFP_KERNEL); - if (!master->cs_gpios) - return -ENOMEM; - - for (i = 0; i < master->num_chipselect; i++) - master->cs_gpios[i] = mxc_platform_info->chipselect[i]; - } - } else { - u32 num_cs; - - if (!of_property_read_u32(np, "num-cs", &num_cs)) - master->num_chipselect = num_cs; - /* If not preset, default value of 1 is used */ - } + /* + * Get number of chip selects from device properties. This can be + * coming from device tree or boardfiles, if it is not defined, + * a default value of 3 chip selects will be used, as all the legacy + * board files have <= 3 chip selects. + */ + if (!device_property_read_u32(&pdev->dev, "num-cs", &val)) + master->num_chipselect = val; + else + master->num_chipselect = 3; - spi_imx->bitbang.chipselect = spi_imx_chipselect; spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; spi_imx->bitbang.txrx_bufs = spi_imx_transfer; spi_imx->bitbang.master->setup = spi_imx_setup; @@ -1749,31 +1708,12 @@ static int spi_imx_probe(struct platform_device *pdev) goto out_clk_put; } - /* Request GPIO CS lines, if any */ - if (!spi_imx->slave_mode && master->cs_gpios) { - for (i = 0; i < master->num_chipselect; i++) { - if (!gpio_is_valid(master->cs_gpios[i])) - continue; - - ret = devm_gpio_request(&pdev->dev, - master->cs_gpios[i], - DRIVER_NAME); - if (ret) { - dev_err(&pdev->dev, "Can't get CS GPIO %i\n", - master->cs_gpios[i]); - goto out_spi_bitbang; - } - } - } - dev_info(&pdev->dev, "probed\n"); clk_disable(spi_imx->clk_ipg); clk_disable(spi_imx->clk_per); return ret; -out_spi_bitbang: - spi_bitbang_stop(&spi_imx->bitbang); out_clk_put: clk_disable_unprepare(spi_imx->clk_ipg); out_put_per: diff --git a/include/linux/platform_data/spi-imx.h b/include/linux/platform_data/spi-imx.h deleted file mode 100644 index 328f670d10bd..000000000000 --- a/include/linux/platform_data/spi-imx.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __MACH_SPI_H_ -#define __MACH_SPI_H_ - -/* - * struct spi_imx_master - device.platform_data for SPI controller devices. - * @chipselect: Array of chipselects for this master or NULL. Numbers >= 0 - * mean GPIO pins, -ENOENT means internal CSPI chipselect - * matching the position in the array. E.g., if chipselect[1] = - * -ENOENT then a SPI slave using chip select 1 will use the - * native SS1 line of the CSPI. Omitting the array will use - * all native chip selects. - - * Normally you want to use gpio based chip selects as the CSPI - * module tries to be intelligent about when to assert the - * chipselect: The CSPI module deasserts the chipselect once it - * runs out of input data. The other problem is that it is not - * possible to mix between high active and low active chipselects - * on one single bus using the internal chipselects. - * Unfortunately, on some SoCs, Freescale decided to put some - * chipselects on dedicated pins which are not usable as gpios, - * so we have to support the internal chipselects. - * - * @num_chipselect: If @chipselect is specified, ARRAY_SIZE(chipselect), - * otherwise the number of native chip selects. - */ -struct spi_imx_master { - int *chipselect; - int num_chipselect; -}; - -#endif /* __MACH_SPI_H_*/ -- cgit v1.2.3 From 5a2798ab32ba2952cfe25701ee460bccbd434c75 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:23 +0200 Subject: bpf: Add BTF_ID_LIST/BTF_ID/BTF_ID_UNUSED macros Adding support to generate .BTF_ids section that will hold BTF ID lists for verifier. Adding macros that will help to define lists of BTF ID values placed in .BTF_ids section. They are initially filled with zeros (during compilation) and resolved later during the linking phase by resolve_btfids tool. Following defines list of one BTF ID value: BTF_ID_LIST(bpf_skb_output_btf_ids) BTF_ID(struct, sk_buff) It also defines following variable to access the list: extern u32 bpf_skb_output_btf_ids[]; The BTF_ID_UNUSED macro defines 4 zero bytes. It's used when we want to define 'unused' entry in BTF_ID_LIST, like: BTF_ID_LIST(bpf_skb_output_btf_ids) BTF_ID(struct, sk_buff) BTF_ID_UNUSED BTF_ID(struct, task_struct) Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-4-jolsa@kernel.org --- include/asm-generic/vmlinux.lds.h | 4 ++ include/linux/btf_ids.h | 87 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 include/linux/btf_ids.h (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index db600ef218d7..0be2ee265931 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -641,6 +641,10 @@ __start_BTF = .; \ *(.BTF) \ __stop_BTF = .; \ + } \ + . = ALIGN(4); \ + .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ + *(.BTF_ids) \ } #else #define BTF diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h new file mode 100644 index 000000000000..fe019774f8a7 --- /dev/null +++ b/include/linux/btf_ids.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_BTF_IDS_H +#define _LINUX_BTF_IDS_H + +#include /* for __PASTE */ + +/* + * Following macros help to define lists of BTF IDs placed + * in .BTF_ids section. They are initially filled with zeros + * (during compilation) and resolved later during the + * linking phase by resolve_btfids tool. + * + * Any change in list layout must be reflected in resolve_btfids + * tool logic. + */ + +#define BTF_IDS_SECTION ".BTF_ids" + +#define ____BTF_ID(symbol) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #symbol " ; \n" \ +".type " #symbol ", @object; \n" \ +".size " #symbol ", 4; \n" \ +#symbol ": \n" \ +".zero 4 \n" \ +".popsection; \n"); + +#define __BTF_ID(symbol) \ + ____BTF_ID(symbol) + +#define __ID(prefix) \ + __PASTE(prefix, __COUNTER__) + +/* + * The BTF_ID defines unique symbol for each ID pointing + * to 4 zero bytes. + */ +#define BTF_ID(prefix, name) \ + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) + +/* + * The BTF_ID_LIST macro defines pure (unsorted) list + * of BTF IDs, with following layout: + * + * BTF_ID_LIST(list1) + * BTF_ID(type1, name1) + * BTF_ID(type2, name2) + * + * list1: + * __BTF_ID__type1__name1__1: + * .zero 4 + * __BTF_ID__type2__name2__2: + * .zero 4 + * + */ +#define __BTF_ID_LIST(name) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #name "; \n" \ +#name ":; \n" \ +".popsection; \n"); \ + +#define BTF_ID_LIST(name) \ +__BTF_ID_LIST(name) \ +extern u32 name[]; + +/* + * The BTF_ID_UNUSED macro defines 4 zero bytes. + * It's used when we want to define 'unused' entry + * in BTF_ID_LIST, like: + * + * BTF_ID_LIST(bpf_skb_output_btf_ids) + * BTF_ID(struct, sk_buff) + * BTF_ID_UNUSED + * BTF_ID(struct, task_struct) + */ + +#define BTF_ID_UNUSED \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".zero 4 \n" \ +".popsection; \n"); + + +#endif -- cgit v1.2.3 From d9539752d23283db4692384a634034f451261e29 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 9 Jun 2020 16:11:29 -0700 Subject: net/compat: Add missing sock updates for SCM_RIGHTS Add missed sock updates to compat path via a new helper, which will be used more in coming patches. (The net/core/scm.c code is left as-is here to assist with -stable backports for the compat path.) Cc: Christoph Hellwig Cc: Sargun Dhillon Cc: Jakub Kicinski Cc: stable@vger.kernel.org Fixes: 48a87cc26c13 ("net: netprio: fd passed in SCM_RIGHTS datagram not set correctly") Fixes: d84295067fc7 ("net: net_cls: fd passed in SCM_RIGHTS datagram not set correctly") Acked-by: Christian Brauner Signed-off-by: Kees Cook --- include/net/sock.h | 4 ++++ net/compat.c | 1 + net/core/sock.c | 21 +++++++++++++++++++++ 3 files changed, 26 insertions(+) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index c53cc42b5ab9..2be67f1ee8b1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -890,6 +890,8 @@ static inline int sk_memalloc_socks(void) { return static_branch_unlikely(&memalloc_socks_key); } + +void __receive_sock(struct file *file); #else static inline int sk_memalloc_socks(void) @@ -897,6 +899,8 @@ static inline int sk_memalloc_socks(void) return 0; } +static inline void __receive_sock(struct file *file) +{ } #endif static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) diff --git a/net/compat.c b/net/compat.c index 5e3041a2c37d..2937b816107d 100644 --- a/net/compat.c +++ b/net/compat.c @@ -309,6 +309,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) break; } /* Bump the usage count and install the file. */ + __receive_sock(fp[i]); fd_install(new_fd, get_file(fp[i])); } diff --git a/net/core/sock.c b/net/core/sock.c index 6c4acf1f0220..bde394979041 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2840,6 +2840,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct * } EXPORT_SYMBOL(sock_no_mmap); +/* + * When a file is received (via SCM_RIGHTS, etc), we must bump the + * various sock-based usage counts. + */ +void __receive_sock(struct file *file) +{ + struct socket *sock; + int error; + + /* + * The resulting value of "error" is ignored here since we only + * need to take action when the file is a socket and testing + * "sock" for NULL is sufficient. + */ + sock = sock_from_file(file, &error); + if (sock) { + sock_update_netprioidx(&sock->sk->sk_cgrp_data); + sock_update_classid(&sock->sk->sk_cgrp_data); + } +} + ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { ssize_t res; -- cgit v1.2.3 From c0029de50982c1fb215330a5f9d433cec0cfd8cc Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 9 Jun 2020 16:11:29 -0700 Subject: net/scm: Regularize compat handling of scm_detach_fds() Duplicate the cleanups from commit 2618d530dd8b ("net/scm: cleanup scm_detach_fds") into the compat code. Replace open-coded __receive_sock() with a call to the helper. Move the check added in commit 1f466e1f15cf ("net: cleanly handle kernel vs user buffers for ->msg_control") to before the compat call, even though it should be impossible for an in-kernel call to also be compat. Correct the int "flags" argument to unsigned int to match fd_install() and similar APIs. Regularize any remaining differences, including a whitespace issue, a checkpatch warning, and add the check from commit 6900317f5eff ("net, scm: fix PaX detected msg_controllen overflow in scm_detach_fds") which fixed an overflow unique to 64-bit. To avoid confusion when comparing the compat handler to the native handler, just include the same check in the compat handler. Cc: Christoph Hellwig Cc: Sargun Dhillon Cc: Jakub Kicinski Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Acked-by: Christian Brauner Signed-off-by: Kees Cook --- include/net/scm.h | 1 + net/compat.c | 56 +++++++++++++++++++++++++------------------------------ net/core/scm.c | 27 +++++++++++---------------- 3 files changed, 37 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/include/net/scm.h b/include/net/scm.h index 1ce365f4c256..581a94d6c613 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -37,6 +37,7 @@ struct scm_cookie { #endif }; +int __scm_install_fd(struct file *file, int __user *ufd, unsigned int o_flags); void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm); void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm); int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); diff --git a/net/compat.c b/net/compat.c index 2937b816107d..27d477fdcaa0 100644 --- a/net/compat.c +++ b/net/compat.c @@ -281,40 +281,31 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat return 0; } -void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) +static int scm_max_fds_compat(struct msghdr *msg) { - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; - int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int); - int fdnum = scm->fp->count; - struct file **fp = scm->fp->fp; - int __user *cmfptr; - int err = 0, i; + if (msg->msg_controllen <= sizeof(struct compat_cmsghdr)) + return 0; + return (msg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int); +} - if (fdnum < fdmax) - fdmax = fdnum; +void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm) +{ + struct compat_cmsghdr __user *cm = + (struct compat_cmsghdr __user *)msg->msg_control; + unsigned int o_flags = (msg->msg_flags & MSG_CMSG_CLOEXEC) ? O_CLOEXEC : 0; + int fdmax = min_t(int, scm_max_fds_compat(msg), scm->fp->count); + int __user *cmsg_data = CMSG_USER_DATA(cm); + int err = 0, i; - for (i = 0, cmfptr = (int __user *) CMSG_COMPAT_DATA(cm); i < fdmax; i++, cmfptr++) { - int new_fd; - err = security_file_receive(fp[i]); + for (i = 0; i < fdmax; i++) { + err = __scm_install_fd(scm->fp->fp[i], cmsg_data + i, o_flags); if (err) break; - err = get_unused_fd_flags(MSG_CMSG_CLOEXEC & kmsg->msg_flags - ? O_CLOEXEC : 0); - if (err < 0) - break; - new_fd = err; - err = put_user(new_fd, cmfptr); - if (err) { - put_unused_fd(new_fd); - break; - } - /* Bump the usage count and install the file. */ - __receive_sock(fp[i]); - fd_install(new_fd, get_file(fp[i])); } if (i > 0) { int cmlen = CMSG_COMPAT_LEN(i * sizeof(int)); + err = put_user(SOL_SOCKET, &cm->cmsg_level); if (!err) err = put_user(SCM_RIGHTS, &cm->cmsg_type); @@ -322,16 +313,19 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) err = put_user(cmlen, &cm->cmsg_len); if (!err) { cmlen = CMSG_COMPAT_SPACE(i * sizeof(int)); - kmsg->msg_control += cmlen; - kmsg->msg_controllen -= cmlen; + if (msg->msg_controllen < cmlen) + cmlen = msg->msg_controllen; + msg->msg_control += cmlen; + msg->msg_controllen -= cmlen; } } - if (i < fdnum) - kmsg->msg_flags |= MSG_CTRUNC; + + if (i < scm->fp->count || (scm->fp->count && fdmax <= 0)) + msg->msg_flags |= MSG_CTRUNC; /* - * All of the files that fit in the message have had their - * usage counts incremented, so we just free the list. + * All of the files that fit in the message have had their usage counts + * incremented, so we just free the list. */ __scm_destroy(scm); } diff --git a/net/core/scm.c b/net/core/scm.c index 875df1c2989d..44f03213dcab 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -280,9 +280,8 @@ void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_inter } EXPORT_SYMBOL(put_cmsg_scm_timestamping); -static int __scm_install_fd(struct file *file, int __user *ufd, int o_flags) +int __scm_install_fd(struct file *file, int __user *ufd, unsigned int o_flags) { - struct socket *sock; int new_fd; int error; @@ -300,12 +299,8 @@ static int __scm_install_fd(struct file *file, int __user *ufd, int o_flags) return error; } - /* Bump the usage count and install the file. */ - sock = sock_from_file(file, &error); - if (sock) { - sock_update_netprioidx(&sock->sk->sk_cgrp_data); - sock_update_classid(&sock->sk->sk_cgrp_data); - } + /* Bump the sock usage counts, if any. */ + __receive_sock(file); fd_install(new_fd, get_file(file)); return 0; } @@ -319,29 +314,29 @@ static int scm_max_fds(struct msghdr *msg) void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) { - struct cmsghdr __user *cm - = (__force struct cmsghdr __user*)msg->msg_control; - int o_flags = (msg->msg_flags & MSG_CMSG_CLOEXEC) ? O_CLOEXEC : 0; + struct cmsghdr __user *cm = + (__force struct cmsghdr __user *)msg->msg_control; + unsigned int o_flags = (msg->msg_flags & MSG_CMSG_CLOEXEC) ? O_CLOEXEC : 0; int fdmax = min_t(int, scm_max_fds(msg), scm->fp->count); int __user *cmsg_data = CMSG_USER_DATA(cm); int err = 0, i; + /* no use for FD passing from kernel space callers */ + if (WARN_ON_ONCE(!msg->msg_control_is_user)) + return; + if (msg->msg_flags & MSG_CMSG_COMPAT) { scm_detach_fds_compat(msg, scm); return; } - /* no use for FD passing from kernel space callers */ - if (WARN_ON_ONCE(!msg->msg_control_is_user)) - return; - for (i = 0; i < fdmax; i++) { err = __scm_install_fd(scm->fp->fp[i], cmsg_data + i, o_flags); if (err) break; } - if (i > 0) { + if (i > 0) { int cmlen = CMSG_LEN(i * sizeof(int)); err = put_user(SOL_SOCKET, &cm->cmsg_level); -- cgit v1.2.3 From 6659061045cc93f609e100b128f30581e5f012e9 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 10 Jun 2020 08:20:05 -0700 Subject: fs: Move __scm_install_fd() to __receive_fd() In preparation for users of the "install a received file" logic outside of net/ (pidfd and seccomp), relocate and rename __scm_install_fd() from net/core/scm.c to __receive_fd() in fs/file.c, and provide a wrapper named receive_fd_user(), as future patches will change the interface to __receive_fd(). Additionally add a comment to fd_install() as a counterpoint to how __receive_fd() interacts with fput(). Cc: Alexander Viro Cc: "David S. Miller" Cc: Jakub Kicinski Cc: Dmitry Kadashev Cc: Jens Axboe Cc: Arnd Bergmann Cc: Sargun Dhillon Cc: Ido Schimmel Cc: Ioana Ciornei Cc: linux-fsdevel@vger.kernel.org Cc: netdev@vger.kernel.org Reviewed-by: Sargun Dhillon Acked-by: Christian Brauner Signed-off-by: Kees Cook --- fs/file.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/file.h | 8 ++++++++ include/net/scm.h | 1 - net/compat.c | 2 +- net/core/scm.c | 27 +-------------------------- 5 files changed, 55 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/fs/file.c b/fs/file.c index abb8b7081d7a..0cd598cab476 100644 --- a/fs/file.c +++ b/fs/file.c @@ -18,6 +18,7 @@ #include #include #include +#include unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open_min = BITS_PER_LONG; @@ -613,6 +614,10 @@ void __fd_install(struct files_struct *files, unsigned int fd, rcu_read_unlock_sched(); } +/* + * This consumes the "file" refcount, so callers should treat it + * as if they had called fput(file). + */ void fd_install(unsigned int fd, struct file *file) { __fd_install(current->files, fd, file); @@ -931,6 +936,46 @@ out_unlock: return err; } +/** + * __receive_fd() - Install received file into file descriptor table + * + * @file: struct file that was received from another process + * @ufd: __user pointer to write new fd number to + * @o_flags: the O_* flags to apply to the new fd entry + * + * Installs a received file into the file descriptor table, with appropriate + * checks and count updates. Writes the fd number to userspace. + * + * This helper handles its own reference counting of the incoming + * struct file. + * + * Returns -ve on error. + */ +int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) +{ + int new_fd; + int error; + + error = security_file_receive(file); + if (error) + return error; + + new_fd = get_unused_fd_flags(o_flags); + if (new_fd < 0) + return new_fd; + + error = put_user(new_fd, ufd); + if (error) { + put_unused_fd(new_fd); + return error; + } + + /* Bump the sock usage counts, if any. */ + __receive_sock(file); + fd_install(new_fd, get_file(file)); + return 0; +} + static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) { int err = -EBADF; diff --git a/include/linux/file.h b/include/linux/file.h index 122f80084a3e..b14ff2ffd0bd 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -91,6 +91,14 @@ extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); +extern int __receive_fd(struct file *file, int __user *ufd, + unsigned int o_flags); +static inline int receive_fd_user(struct file *file, int __user *ufd, + unsigned int o_flags) +{ + return __receive_fd(file, ufd, o_flags); +} + extern void flush_delayed_fput(void); extern void __fput_sync(struct file *); diff --git a/include/net/scm.h b/include/net/scm.h index 581a94d6c613..1ce365f4c256 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -37,7 +37,6 @@ struct scm_cookie { #endif }; -int __scm_install_fd(struct file *file, int __user *ufd, unsigned int o_flags); void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm); void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm); int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); diff --git a/net/compat.c b/net/compat.c index 27d477fdcaa0..e74cd3dae8b0 100644 --- a/net/compat.c +++ b/net/compat.c @@ -298,7 +298,7 @@ void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm) int err = 0, i; for (i = 0; i < fdmax; i++) { - err = __scm_install_fd(scm->fp->fp[i], cmsg_data + i, o_flags); + err = receive_fd_user(scm->fp->fp[i], cmsg_data + i, o_flags); if (err) break; } diff --git a/net/core/scm.c b/net/core/scm.c index 44f03213dcab..67c166a7820d 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -280,31 +280,6 @@ void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_inter } EXPORT_SYMBOL(put_cmsg_scm_timestamping); -int __scm_install_fd(struct file *file, int __user *ufd, unsigned int o_flags) -{ - int new_fd; - int error; - - error = security_file_receive(file); - if (error) - return error; - - new_fd = get_unused_fd_flags(o_flags); - if (new_fd < 0) - return new_fd; - - error = put_user(new_fd, ufd); - if (error) { - put_unused_fd(new_fd); - return error; - } - - /* Bump the sock usage counts, if any. */ - __receive_sock(file); - fd_install(new_fd, get_file(file)); - return 0; -} - static int scm_max_fds(struct msghdr *msg) { if (msg->msg_controllen <= sizeof(struct cmsghdr)) @@ -331,7 +306,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) } for (i = 0; i < fdmax; i++) { - err = __scm_install_fd(scm->fp->fp[i], cmsg_data + i, o_flags); + err = receive_fd_user(scm->fp->fp[i], cmsg_data + i, o_flags); if (err) break; } -- cgit v1.2.3 From deefa7f3505ae2fb6a7cb75f50134b65a1dd1494 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 10 Jun 2020 20:47:45 -0700 Subject: fs: Add receive_fd() wrapper for __receive_fd() For both pidfd and seccomp, the __user pointer is not used. Update __receive_fd() to make writing to ufd optional via a NULL check. However, for the receive_fd_user() wrapper, ufd is NULL checked so an -EFAULT can be returned to avoid changing the SCM_RIGHTS interface behavior. Add new wrapper receive_fd() for pidfd and seccomp that does not use the ufd argument. For the new helper, the allocated fd needs to be returned on success. Update the existing callers to handle it. Cc: Alexander Viro Cc: linux-fsdevel@vger.kernel.org Reviewed-by: Sargun Dhillon Acked-by: Christian Brauner Signed-off-by: Kees Cook --- fs/file.c | 17 ++++++++++------- include/linux/file.h | 7 +++++++ net/compat.c | 2 +- net/core/scm.c | 2 +- 4 files changed, 19 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/fs/file.c b/fs/file.c index 0cd598cab476..56d96d5c0c9f 100644 --- a/fs/file.c +++ b/fs/file.c @@ -944,12 +944,13 @@ out_unlock: * @o_flags: the O_* flags to apply to the new fd entry * * Installs a received file into the file descriptor table, with appropriate - * checks and count updates. Writes the fd number to userspace. + * checks and count updates. Optionally writes the fd number to userspace, if + * @ufd is non-NULL. * * This helper handles its own reference counting of the incoming * struct file. * - * Returns -ve on error. + * Returns newly install fd or -ve on error. */ int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) { @@ -964,16 +965,18 @@ int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) if (new_fd < 0) return new_fd; - error = put_user(new_fd, ufd); - if (error) { - put_unused_fd(new_fd); - return error; + if (ufd) { + error = put_user(new_fd, ufd); + if (error) { + put_unused_fd(new_fd); + return error; + } } /* Bump the sock usage counts, if any. */ __receive_sock(file); fd_install(new_fd, get_file(file)); - return 0; + return new_fd; } static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) diff --git a/include/linux/file.h b/include/linux/file.h index b14ff2ffd0bd..d9fee9f5c8da 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -9,6 +9,7 @@ #include #include #include +#include struct file; @@ -96,8 +97,14 @@ extern int __receive_fd(struct file *file, int __user *ufd, static inline int receive_fd_user(struct file *file, int __user *ufd, unsigned int o_flags) { + if (ufd == NULL) + return -EFAULT; return __receive_fd(file, ufd, o_flags); } +static inline int receive_fd(struct file *file, unsigned int o_flags) +{ + return __receive_fd(file, NULL, o_flags); +} extern void flush_delayed_fput(void); extern void __fput_sync(struct file *); diff --git a/net/compat.c b/net/compat.c index e74cd3dae8b0..dc7ddbc2b15e 100644 --- a/net/compat.c +++ b/net/compat.c @@ -299,7 +299,7 @@ void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm) for (i = 0; i < fdmax; i++) { err = receive_fd_user(scm->fp->fp[i], cmsg_data + i, o_flags); - if (err) + if (err < 0) break; } diff --git a/net/core/scm.c b/net/core/scm.c index 67c166a7820d..8156d4fb8a39 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -307,7 +307,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) for (i = 0; i < fdmax; i++) { err = receive_fd_user(scm->fp->fp[i], cmsg_data + i, o_flags); - if (err) + if (err < 0) break; } -- cgit v1.2.3 From 173817151b15d5a72a9bef1d2df7e6e7f6750f2e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 10 Jun 2020 08:46:58 -0700 Subject: fs: Expand __receive_fd() to accept existing fd Expand __receive_fd() with support for replace_fd() for the coming seccomp "addfd" ioctl(). Add new wrapper receive_fd_replace() for the new behavior and update existing wrappers to retain old behavior. Thanks to Colin Ian King for pointing out an uninitialized variable exposure in an earlier version of this patch. Cc: Alexander Viro Cc: Dmitry Kadashev Cc: Jens Axboe Cc: Arnd Bergmann Cc: linux-fsdevel@vger.kernel.org Reviewed-by: Sargun Dhillon Acked-by: Christian Brauner Signed-off-by: Kees Cook --- fs/file.c | 25 +++++++++++++++++++------ include/linux/file.h | 10 +++++++--- 2 files changed, 26 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/fs/file.c b/fs/file.c index 56d96d5c0c9f..4fb111735d1d 100644 --- a/fs/file.c +++ b/fs/file.c @@ -939,6 +939,7 @@ out_unlock: /** * __receive_fd() - Install received file into file descriptor table * + * @fd: fd to install into (if negative, a new fd will be allocated) * @file: struct file that was received from another process * @ufd: __user pointer to write new fd number to * @o_flags: the O_* flags to apply to the new fd entry @@ -952,7 +953,7 @@ out_unlock: * * Returns newly install fd or -ve on error. */ -int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) +int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags) { int new_fd; int error; @@ -961,21 +962,33 @@ int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) if (error) return error; - new_fd = get_unused_fd_flags(o_flags); - if (new_fd < 0) - return new_fd; + if (fd < 0) { + new_fd = get_unused_fd_flags(o_flags); + if (new_fd < 0) + return new_fd; + } else { + new_fd = fd; + } if (ufd) { error = put_user(new_fd, ufd); if (error) { - put_unused_fd(new_fd); + if (fd < 0) + put_unused_fd(new_fd); return error; } } + if (fd < 0) { + fd_install(new_fd, get_file(file)); + } else { + error = replace_fd(new_fd, file, o_flags); + if (error) + return error; + } + /* Bump the sock usage counts, if any. */ __receive_sock(file); - fd_install(new_fd, get_file(file)); return new_fd; } diff --git a/include/linux/file.h b/include/linux/file.h index d9fee9f5c8da..225982792fa2 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -92,18 +92,22 @@ extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); -extern int __receive_fd(struct file *file, int __user *ufd, +extern int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags); static inline int receive_fd_user(struct file *file, int __user *ufd, unsigned int o_flags) { if (ufd == NULL) return -EFAULT; - return __receive_fd(file, ufd, o_flags); + return __receive_fd(-1, file, ufd, o_flags); } static inline int receive_fd(struct file *file, unsigned int o_flags) { - return __receive_fd(file, NULL, o_flags); + return __receive_fd(-1, file, NULL, o_flags); +} +static inline int receive_fd_replace(int fd, struct file *file, unsigned int o_flags) +{ + return __receive_fd(fd, file, NULL, o_flags); } extern void flush_delayed_fput(void); -- cgit v1.2.3 From c1326210477ecc06c53221f0005c64419aba30d6 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:20 +0000 Subject: nfs,nfsd: NFSv4.2 extended attribute protocol definitions Add definitions for the new operations, errors and flags as defined in RFC 8276 (File System Extended Attributes in NFSv4). Signed-off-by: Frank van der Linden Signed-off-by: Chuck Lever --- include/linux/nfs4.h | 20 ++++++++++++++++++++ include/uapi/linux/nfs4.h | 3 +++ 2 files changed, 23 insertions(+) (limited to 'include') diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 4dba3c948932..e6ca9d1d2e76 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -150,6 +150,12 @@ enum nfs_opnum4 { OP_WRITE_SAME = 70, OP_CLONE = 71, + /* xattr support (RFC8726) */ + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, }; @@ -280,6 +286,10 @@ enum nfsstat4 { NFS4ERR_WRONG_LFS = 10092, NFS4ERR_BADLABEL = 10093, NFS4ERR_OFFLOAD_NO_REQS = 10094, + + /* xattr (RFC8276) */ + NFS4ERR_NOXATTR = 10095, + NFS4ERR_XATTR2BIG = 10096, }; static inline bool seqid_mutating_err(u32 err) @@ -452,6 +462,7 @@ enum change_attr_type4 { #define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) #define FATTR4_WORD2_MODE_UMASK (1UL << 17) +#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) @@ -700,4 +711,13 @@ struct nl4_server { struct nfs42_netaddr nl4_addr; /* NL4_NETADDR */ } u; }; + +/* + * Options for setxattr. These match the flags for setxattr(2). + */ +enum nfs4_setxattr_options { + SETXATTR4_EITHER = 0, + SETXATTR4_CREATE = 1, + SETXATTR4_REPLACE = 2, +}; #endif diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index 8572930cf5b0..bf197e99b98f 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -33,6 +33,9 @@ #define NFS4_ACCESS_EXTEND 0x0008 #define NFS4_ACCESS_DELETE 0x0010 #define NFS4_ACCESS_EXECUTE 0x0020 +#define NFS4_ACCESS_XAREAD 0x0040 +#define NFS4_ACCESS_XAWRITE 0x0080 +#define NFS4_ACCESS_XALIST 0x0100 #define NFS4_FH_PERSISTENT 0x0000 #define NFS4_FH_NOEXPIRE_WITH_OPEN 0x0001 -- cgit v1.2.3 From 08b5d5014a27e717826999ad20e394a8811aae92 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:18 +0000 Subject: xattr: break delegations in {set,remove}xattr set/removexattr on an exported filesystem should break NFS delegations. This is true in general, but also for the upcoming support for RFC 8726 (NFSv4 extended attribute support). Make sure that they do. Additionally, they need to grow a _locked variant, since callers might call this with i_rwsem held (like the NFS server code). Cc: stable@vger.kernel.org # v4.9+ Cc: linux-fsdevel@vger.kernel.org Cc: Al Viro Signed-off-by: Frank van der Linden Signed-off-by: Chuck Lever --- fs/xattr.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++----- include/linux/xattr.h | 2 ++ 2 files changed, 79 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/xattr.c b/fs/xattr.c index 91608d9bfc6a..95f38f57347f 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -204,10 +204,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, return error; } - +/** + * __vfs_setxattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - xattr name to set + * @value - value to set @name to + * @size - size of @value + * @flags - flags to pass into filesystem operations + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) +__vfs_setxattr_locked(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -216,15 +228,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value, if (error) return error; - inode_lock(inode); error = security_inode_setxattr(dentry, name, value, size, flags); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_setxattr_noperm(dentry, name, value, size, flags); out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_setxattr_locked); + +int +vfs_setxattr(struct dentry *dentry, const char *name, const void *value, + size_t size, int flags) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_setxattr_locked(dentry, name, value, size, flags, + &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } return error; } EXPORT_SYMBOL_GPL(vfs_setxattr); @@ -378,8 +415,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name) } EXPORT_SYMBOL(__vfs_removexattr); +/** + * __vfs_removexattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - name of xattr to remove + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_removexattr(struct dentry *dentry, const char *name) +__vfs_removexattr_locked(struct dentry *dentry, const char *name, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -388,11 +435,14 @@ vfs_removexattr(struct dentry *dentry, const char *name) if (error) return error; - inode_lock(inode); error = security_inode_removexattr(dentry, name); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_removexattr(dentry, name); if (!error) { @@ -401,12 +451,32 @@ vfs_removexattr(struct dentry *dentry, const char *name) } out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_removexattr_locked); + +int +vfs_removexattr(struct dentry *dentry, const char *name) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_removexattr_locked(dentry, name, &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } + return error; } EXPORT_SYMBOL_GPL(vfs_removexattr); - /* * Extended attribute SET operations */ diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 47eaa34f8761..a2f3cd02653c 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -51,8 +51,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct dentry *, const char *); +int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); int vfs_removexattr(struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); -- cgit v1.2.3 From cab8d289c5ad541a5351a651d95c4086b7f84d7c Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:19 +0000 Subject: xattr: add a function to check if a namespace is supported Add a function that checks is an extended attribute namespace is supported for an inode, meaning that a handler must be present for either the whole namespace, or at least one synthetic xattr in the namespace. To be used by the nfs server code when being queried for extended attributes support. Cc: linux-fsdevel@vger.kernel.org Cc: Al Viro Signed-off-by: Frank van der Linden Signed-off-by: Chuck Lever --- fs/xattr.c | 27 +++++++++++++++++++++++++++ include/linux/xattr.h | 2 ++ 2 files changed, 29 insertions(+) (limited to 'include') diff --git a/fs/xattr.c b/fs/xattr.c index 95f38f57347f..386b45676d7e 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -134,6 +134,33 @@ xattr_permission(struct inode *inode, const char *name, int mask) return inode_permission(inode, mask); } +/* + * Look for any handler that deals with the specified namespace. + */ +int +xattr_supported_namespace(struct inode *inode, const char *prefix) +{ + const struct xattr_handler **handlers = inode->i_sb->s_xattr; + const struct xattr_handler *handler; + size_t preflen; + + if (!(inode->i_opflags & IOP_XATTR)) { + if (unlikely(is_bad_inode(inode))) + return -EIO; + return -EOPNOTSUPP; + } + + preflen = strlen(prefix); + + for_each_xattr_handler(handlers, handler) { + if (!strncmp(xattr_prefix(handler), prefix, preflen)) + return 0; + } + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(xattr_supported_namespace); + int __vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) diff --git a/include/linux/xattr.h b/include/linux/xattr.h index a2f3cd02653c..fac75810d9d3 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -61,6 +61,8 @@ ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_siz ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); +int xattr_supported_namespace(struct inode *inode, const char *prefix); + static inline const char *xattr_prefix(const struct xattr_handler *handler) { return handler->prefix ?: handler->name; -- cgit v1.2.3 From 23e50fe3a5e6045a573c69d4b0e3d78aa6183323 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:26 +0000 Subject: nfsd: implement the xattr functions and en/decode logic Implement the main entry points for the *XATTR operations. Add functions to calculate the reply size for the user extended attribute operations, and implement the XDR encode / decode logic for these operations. Add the user extended attributes operations to nfsd4_ops. Signed-off-by: Frank van der Linden Signed-off-by: Chuck Lever --- fs/nfsd/nfs4proc.c | 120 ++++++++++++++ fs/nfsd/nfs4xdr.c | 450 +++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/nfs4.h | 2 +- 3 files changed, 571 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 841aad772798..a527da3d8052 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -2097,6 +2097,68 @@ out: } #endif /* CONFIG_NFSD_PNFS */ +static __be32 +nfsd4_getxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_getxattr *getxattr = &u->getxattr; + + return nfsd_getxattr(rqstp, &cstate->current_fh, + getxattr->getxa_name, &getxattr->getxa_buf, + &getxattr->getxa_len); +} + +static __be32 +nfsd4_setxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_setxattr *setxattr = &u->setxattr; + __be32 ret; + + if (opens_in_grace(SVC_NET(rqstp))) + return nfserr_grace; + + ret = nfsd_setxattr(rqstp, &cstate->current_fh, setxattr->setxa_name, + setxattr->setxa_buf, setxattr->setxa_len, + setxattr->setxa_flags); + + if (!ret) + set_change_info(&setxattr->setxa_cinfo, &cstate->current_fh); + + return ret; +} + +static __be32 +nfsd4_listxattrs(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + /* + * Get the entire list, then copy out only the user attributes + * in the encode function. + */ + return nfsd_listxattr(rqstp, &cstate->current_fh, + &u->listxattrs.lsxa_buf, &u->listxattrs.lsxa_len); +} + +static __be32 +nfsd4_removexattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_removexattr *removexattr = &u->removexattr; + __be32 ret; + + if (opens_in_grace(SVC_NET(rqstp))) + return nfserr_grace; + + ret = nfsd_removexattr(rqstp, &cstate->current_fh, + removexattr->rmxa_name); + + if (!ret) + set_change_info(&removexattr->rmxa_cinfo, &cstate->current_fh); + + return ret; +} + /* * NULL call. */ @@ -2706,6 +2768,42 @@ static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) return (op_encode_hdr_size + 3) * sizeof(__be32); } +static inline u32 nfsd4_getxattr_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + u32 maxcount, rlen; + + maxcount = svc_max_payload(rqstp); + rlen = min_t(u32, XATTR_SIZE_MAX, maxcount); + + return (op_encode_hdr_size + 1 + XDR_QUADLEN(rlen)) * sizeof(__be32); +} + +static inline u32 nfsd4_setxattr_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + return (op_encode_hdr_size + op_encode_change_info_maxsz) + * sizeof(__be32); +} +static inline u32 nfsd4_listxattrs_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + u32 maxcount, rlen; + + maxcount = svc_max_payload(rqstp); + rlen = min(op->u.listxattrs.lsxa_maxcount, maxcount); + + return (op_encode_hdr_size + 4 + XDR_QUADLEN(rlen)) * sizeof(__be32); +} + +static inline u32 nfsd4_removexattr_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + return (op_encode_hdr_size + op_encode_change_info_maxsz) + * sizeof(__be32); +} + + static const struct nfsd4_operation nfsd4_ops[] = { [OP_ACCESS] = { .op_func = nfsd4_access, @@ -3087,6 +3185,28 @@ static const struct nfsd4_operation nfsd4_ops[] = { .op_name = "OP_COPY_NOTIFY", .op_rsize_bop = nfsd4_copy_notify_rsize, }, + [OP_GETXATTR] = { + .op_func = nfsd4_getxattr, + .op_name = "OP_GETXATTR", + .op_rsize_bop = nfsd4_getxattr_rsize, + }, + [OP_SETXATTR] = { + .op_func = nfsd4_setxattr, + .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, + .op_name = "OP_SETXATTR", + .op_rsize_bop = nfsd4_setxattr_rsize, + }, + [OP_LISTXATTRS] = { + .op_func = nfsd4_listxattrs, + .op_name = "OP_LISTXATTRS", + .op_rsize_bop = nfsd4_listxattrs_rsize, + }, + [OP_REMOVEXATTR] = { + .op_func = nfsd4_removexattr, + .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, + .op_name = "OP_REMOVEXATTR", + .op_rsize_bop = nfsd4_removexattr_rsize, + }, }; /** diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 48806b493eba..8bacc0ceae19 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -41,6 +41,8 @@ #include #include #include +#include +#include #include "idmap.h" #include "acl.h" @@ -1877,6 +1879,208 @@ nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek) DECODE_TAIL; } +/* + * XDR data that is more than PAGE_SIZE in size is normally part of a + * read or write. However, the size of extended attributes is limited + * by the maximum request size, and then further limited by the underlying + * filesystem limits. This can exceed PAGE_SIZE (currently, XATTR_SIZE_MAX + * is 64k). Since there is no kvec- or page-based interface to xattrs, + * and we're not dealing with contiguous pages, we need to do some copying. + */ + +/* + * Decode data into buffer. Uses head and pages constructed by + * svcxdr_construct_vector. + */ +static __be32 +nfsd4_vbuf_from_vector(struct nfsd4_compoundargs *argp, struct kvec *head, + struct page **pages, char **bufp, u32 buflen) +{ + char *tmp, *dp; + u32 len; + + if (buflen <= head->iov_len) { + /* + * We're in luck, the head has enough space. Just return + * the head, no need for copying. + */ + *bufp = head->iov_base; + return 0; + } + + tmp = svcxdr_tmpalloc(argp, buflen); + if (tmp == NULL) + return nfserr_jukebox; + + dp = tmp; + memcpy(dp, head->iov_base, head->iov_len); + buflen -= head->iov_len; + dp += head->iov_len; + + while (buflen > 0) { + len = min_t(u32, buflen, PAGE_SIZE); + memcpy(dp, page_address(*pages), len); + + buflen -= len; + dp += len; + pages++; + } + + *bufp = tmp; + return 0; +} + +/* + * Get a user extended attribute name from the XDR buffer. + * It will not have the "user." prefix, so prepend it. + * Lastly, check for nul characters in the name. + */ +static __be32 +nfsd4_decode_xattr_name(struct nfsd4_compoundargs *argp, char **namep) +{ + DECODE_HEAD; + char *name, *sp, *dp; + u32 namelen, cnt; + + READ_BUF(4); + namelen = be32_to_cpup(p++); + + if (namelen > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) + return nfserr_nametoolong; + + if (namelen == 0) + goto xdr_error; + + READ_BUF(namelen); + + name = svcxdr_tmpalloc(argp, namelen + XATTR_USER_PREFIX_LEN + 1); + if (!name) + return nfserr_jukebox; + + memcpy(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); + + /* + * Copy the extended attribute name over while checking for 0 + * characters. + */ + sp = (char *)p; + dp = name + XATTR_USER_PREFIX_LEN; + cnt = namelen; + + while (cnt-- > 0) { + if (*sp == '\0') + goto xdr_error; + *dp++ = *sp++; + } + *dp = '\0'; + + *namep = name; + + DECODE_TAIL; +} + +/* + * A GETXATTR op request comes without a length specifier. We just set the + * maximum length for the reply based on XATTR_SIZE_MAX and the maximum + * channel reply size. nfsd_getxattr will probe the length of the xattr, + * check it against getxa_len, and allocate + return the value. + */ +static __be32 +nfsd4_decode_getxattr(struct nfsd4_compoundargs *argp, + struct nfsd4_getxattr *getxattr) +{ + __be32 status; + u32 maxcount; + + status = nfsd4_decode_xattr_name(argp, &getxattr->getxa_name); + if (status) + return status; + + maxcount = svc_max_payload(argp->rqstp); + maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount); + + getxattr->getxa_len = maxcount; + + return status; +} + +static __be32 +nfsd4_decode_setxattr(struct nfsd4_compoundargs *argp, + struct nfsd4_setxattr *setxattr) +{ + DECODE_HEAD; + u32 flags, maxcount, size; + struct kvec head; + struct page **pagelist; + + READ_BUF(4); + flags = be32_to_cpup(p++); + + if (flags > SETXATTR4_REPLACE) + return nfserr_inval; + setxattr->setxa_flags = flags; + + status = nfsd4_decode_xattr_name(argp, &setxattr->setxa_name); + if (status) + return status; + + maxcount = svc_max_payload(argp->rqstp); + maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount); + + READ_BUF(4); + size = be32_to_cpup(p++); + if (size > maxcount) + return nfserr_xattr2big; + + setxattr->setxa_len = size; + if (size > 0) { + status = svcxdr_construct_vector(argp, &head, &pagelist, size); + if (status) + return status; + + status = nfsd4_vbuf_from_vector(argp, &head, pagelist, + &setxattr->setxa_buf, size); + } + + DECODE_TAIL; +} + +static __be32 +nfsd4_decode_listxattrs(struct nfsd4_compoundargs *argp, + struct nfsd4_listxattrs *listxattrs) +{ + DECODE_HEAD; + u32 maxcount; + + READ_BUF(12); + p = xdr_decode_hyper(p, &listxattrs->lsxa_cookie); + + /* + * If the cookie is too large to have even one user.x attribute + * plus trailing '\0' left in a maximum size buffer, it's invalid. + */ + if (listxattrs->lsxa_cookie >= + (XATTR_LIST_MAX / (XATTR_USER_PREFIX_LEN + 2))) + return nfserr_badcookie; + + maxcount = be32_to_cpup(p++); + if (maxcount < 8) + /* Always need at least 2 words (length and one character) */ + return nfserr_inval; + + maxcount = min(maxcount, svc_max_payload(argp->rqstp)); + listxattrs->lsxa_maxcount = maxcount; + + DECODE_TAIL; +} + +static __be32 +nfsd4_decode_removexattr(struct nfsd4_compoundargs *argp, + struct nfsd4_removexattr *removexattr) +{ + return nfsd4_decode_xattr_name(argp, &removexattr->rmxa_name); +} + static __be32 nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p) { @@ -1973,6 +2177,11 @@ static const nfsd4_dec nfsd4_dec_ops[] = { [OP_SEEK] = (nfsd4_dec)nfsd4_decode_seek, [OP_WRITE_SAME] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_CLONE] = (nfsd4_dec)nfsd4_decode_clone, + /* RFC 8276 extended atributes operations */ + [OP_GETXATTR] = (nfsd4_dec)nfsd4_decode_getxattr, + [OP_SETXATTR] = (nfsd4_dec)nfsd4_decode_setxattr, + [OP_LISTXATTRS] = (nfsd4_dec)nfsd4_decode_listxattrs, + [OP_REMOVEXATTR] = (nfsd4_dec)nfsd4_decode_removexattr, }; static inline bool @@ -4458,6 +4667,241 @@ nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p) return nfserr; } +/* + * Encode kmalloc-ed buffer in to XDR stream. + */ +static int +nfsd4_vbuf_to_stream(struct xdr_stream *xdr, char *buf, u32 buflen) +{ + u32 cplen; + __be32 *p; + + cplen = min_t(unsigned long, buflen, + ((void *)xdr->end - (void *)xdr->p)); + p = xdr_reserve_space(xdr, cplen); + if (!p) + return nfserr_resource; + + memcpy(p, buf, cplen); + buf += cplen; + buflen -= cplen; + + while (buflen) { + cplen = min_t(u32, buflen, PAGE_SIZE); + p = xdr_reserve_space(xdr, cplen); + if (!p) + return nfserr_resource; + + memcpy(p, buf, cplen); + + if (cplen < PAGE_SIZE) { + /* + * We're done, with a length that wasn't page + * aligned, so possibly not word aligned. Pad + * any trailing bytes with 0. + */ + xdr_encode_opaque_fixed(p, NULL, cplen); + break; + } + + buflen -= PAGE_SIZE; + buf += PAGE_SIZE; + } + + return 0; +} + +static __be32 +nfsd4_encode_getxattr(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_getxattr *getxattr) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p, err; + + p = xdr_reserve_space(xdr, 4); + if (!p) + return nfserr_resource; + + *p = cpu_to_be32(getxattr->getxa_len); + + if (getxattr->getxa_len == 0) + return 0; + + err = nfsd4_vbuf_to_stream(xdr, getxattr->getxa_buf, + getxattr->getxa_len); + + kvfree(getxattr->getxa_buf); + + return err; +} + +static __be32 +nfsd4_encode_setxattr(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_setxattr *setxattr) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p; + + p = xdr_reserve_space(xdr, 20); + if (!p) + return nfserr_resource; + + encode_cinfo(p, &setxattr->setxa_cinfo); + + return 0; +} + +/* + * See if there are cookie values that can be rejected outright. + */ +static __be32 +nfsd4_listxattr_validate_cookie(struct nfsd4_listxattrs *listxattrs, + u32 *offsetp) +{ + u64 cookie = listxattrs->lsxa_cookie; + + /* + * If the cookie is larger than the maximum number we can fit + * in either the buffer we just got back from vfs_listxattr, or, + * XDR-encoded, in the return buffer, it's invalid. + */ + if (cookie > (listxattrs->lsxa_len) / (XATTR_USER_PREFIX_LEN + 2)) + return nfserr_badcookie; + + if (cookie > (listxattrs->lsxa_maxcount / + (XDR_QUADLEN(XATTR_USER_PREFIX_LEN + 2) + 4))) + return nfserr_badcookie; + + *offsetp = (u32)cookie; + return 0; +} + +static __be32 +nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_listxattrs *listxattrs) +{ + struct xdr_stream *xdr = &resp->xdr; + u32 cookie_offset, count_offset, eof; + u32 left, xdrleft, slen, count; + u32 xdrlen, offset; + u64 cookie; + char *sp; + __be32 status; + __be32 *p; + u32 nuser; + + eof = 1; + + status = nfsd4_listxattr_validate_cookie(listxattrs, &offset); + if (status) + goto out; + + /* + * Reserve space for the cookie and the name array count. Record + * the offsets to save them later. + */ + cookie_offset = xdr->buf->len; + count_offset = cookie_offset + 8; + p = xdr_reserve_space(xdr, 12); + if (!p) { + status = nfserr_resource; + goto out; + } + + count = 0; + left = listxattrs->lsxa_len; + sp = listxattrs->lsxa_buf; + nuser = 0; + + xdrleft = listxattrs->lsxa_maxcount; + + while (left > 0 && xdrleft > 0) { + slen = strlen(sp); + + /* + * Check if this a user. attribute, skip it if not. + */ + if (strncmp(sp, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) + goto contloop; + + slen -= XATTR_USER_PREFIX_LEN; + xdrlen = 4 + ((slen + 3) & ~3); + if (xdrlen > xdrleft) { + if (count == 0) { + /* + * Can't even fit the first attribute name. + */ + status = nfserr_toosmall; + goto out; + } + eof = 0; + goto wreof; + } + + left -= XATTR_USER_PREFIX_LEN; + sp += XATTR_USER_PREFIX_LEN; + if (nuser++ < offset) + goto contloop; + + + p = xdr_reserve_space(xdr, xdrlen); + if (!p) { + status = nfserr_resource; + goto out; + } + + p = xdr_encode_opaque(p, sp, slen); + + xdrleft -= xdrlen; + count++; +contloop: + sp += slen + 1; + left -= slen + 1; + } + + /* + * If there were user attributes to copy, but we didn't copy + * any, the offset was too large (e.g. the cookie was invalid). + */ + if (nuser > 0 && count == 0) { + status = nfserr_badcookie; + goto out; + } + +wreof: + p = xdr_reserve_space(xdr, 4); + if (!p) { + status = nfserr_resource; + goto out; + } + *p = cpu_to_be32(eof); + + cookie = offset + count; + + write_bytes_to_xdr_buf(xdr->buf, cookie_offset, &cookie, 8); + count = htonl(count); + write_bytes_to_xdr_buf(xdr->buf, count_offset, &count, 4); +out: + if (listxattrs->lsxa_len) + kvfree(listxattrs->lsxa_buf); + return status; +} + +static __be32 +nfsd4_encode_removexattr(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_removexattr *removexattr) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p; + + p = xdr_reserve_space(xdr, 20); + if (!p) + return nfserr_resource; + + p = encode_cinfo(p, &removexattr->rmxa_cinfo); + return 0; +} + typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *); /* @@ -4547,6 +4991,12 @@ static const nfsd4_enc nfsd4_enc_ops[] = { [OP_SEEK] = (nfsd4_enc)nfsd4_encode_seek, [OP_WRITE_SAME] = (nfsd4_enc)nfsd4_encode_noop, [OP_CLONE] = (nfsd4_enc)nfsd4_encode_noop, + + /* RFC 8276 extended atributes operations */ + [OP_GETXATTR] = (nfsd4_enc)nfsd4_encode_getxattr, + [OP_SETXATTR] = (nfsd4_enc)nfsd4_encode_setxattr, + [OP_LISTXATTRS] = (nfsd4_enc)nfsd4_encode_listxattrs, + [OP_REMOVEXATTR] = (nfsd4_enc)nfsd4_encode_removexattr, }; /* diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index e6ca9d1d2e76..33ebe476428e 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -165,7 +165,7 @@ Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS #define LAST_NFS40_OP OP_RELEASE_LOCKOWNER #define LAST_NFS41_OP OP_RECLAIM_COMPLETE -#define LAST_NFS42_OP OP_CLONE +#define LAST_NFS42_OP OP_REMOVEXATTR #define LAST_NFS4_OP LAST_NFS42_OP enum nfsstat4 { -- cgit v1.2.3 From 10b9d99a3dbbf5e9af838d1887a1047875dcafd9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sat, 18 Apr 2020 18:30:42 -0400 Subject: SUNRPC: Augment server-side rpcgss tracepoints Add similar tracepoints to those that were recently added on the client side to track failures in the integ and priv unwrap paths. And, let's collect the seqno-specific tracepoints together with a common naming convention. Regarding the gss_check_seq_num() changes: everywhere else treats the GSS sequence number as an unsigned 32-bit integer. As far back as 2.6.12, I couldn't find a compelling reason to do things differently here. As a defensive change it's better to eliminate needless implicit sign conversions. Signed-off-by: Chuck Lever --- include/trace/events/rpcgss.h | 168 ++++++++++++++++++++++++++++++++------ net/sunrpc/auth_gss/svcauth_gss.c | 117 +++++++++++++++++--------- net/sunrpc/auth_gss/trace.c | 3 + 3 files changed, 224 insertions(+), 64 deletions(-) (limited to 'include') diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index b9b51a4b1db1..ffdbe6f85da8 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -170,55 +170,144 @@ DECLARE_EVENT_CLASS(rpcgss_ctx_class, DEFINE_CTX_EVENT(init); DEFINE_CTX_EVENT(destroy); +DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class, + TP_PROTO( + const struct svc_rqst *rqstp, + u32 maj_stat + ), + + TP_ARGS(rqstp, maj_stat), + + TP_STRUCT__entry( + __field(u32, xid) + __field(u32, maj_stat) + __string(addr, rqstp->rq_xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->xid = __be32_to_cpu(rqstp->rq_xid); + __entry->maj_stat = maj_stat; + __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s xid=0x%08x maj_stat=%s", + __get_str(addr), __entry->xid, + __entry->maj_stat == 0 ? + "GSS_S_COMPLETE" : show_gss_status(__entry->maj_stat)) +); + +#define DEFINE_SVC_GSSAPI_EVENT(name) \ + DEFINE_EVENT(rpcgss_svc_gssapi_class, rpcgss_svc_##name, \ + TP_PROTO( \ + const struct svc_rqst *rqstp, \ + u32 maj_stat \ + ), \ + TP_ARGS(rqstp, maj_stat)) + +DEFINE_SVC_GSSAPI_EVENT(unwrap); +DEFINE_SVC_GSSAPI_EVENT(mic); + +TRACE_EVENT(rpcgss_svc_unwrap_failed, + TP_PROTO( + const struct svc_rqst *rqstp + ), + + TP_ARGS(rqstp), + + TP_STRUCT__entry( + __field(u32, xid) + __string(addr, rqstp->rq_xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->xid = be32_to_cpu(rqstp->rq_xid); + __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid) +); + +TRACE_EVENT(rpcgss_svc_seqno_bad, + TP_PROTO( + const struct svc_rqst *rqstp, + u32 expected, + u32 received + ), + + TP_ARGS(rqstp, expected, received), + + TP_STRUCT__entry( + __field(u32, expected) + __field(u32, received) + __field(u32, xid) + __string(addr, rqstp->rq_xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->expected = expected; + __entry->received = received; + __entry->xid = __be32_to_cpu(rqstp->rq_xid); + __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s xid=0x%08x expected seqno %u, received seqno %u", + __get_str(addr), __entry->xid, + __entry->expected, __entry->received) +); + TRACE_EVENT(rpcgss_svc_accept_upcall, TP_PROTO( - __be32 xid, + const struct svc_rqst *rqstp, u32 major_status, u32 minor_status ), - TP_ARGS(xid, major_status, minor_status), + TP_ARGS(rqstp, major_status, minor_status), TP_STRUCT__entry( - __field(u32, xid) __field(u32, minor_status) __field(unsigned long, major_status) + __field(u32, xid) + __string(addr, rqstp->rq_xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xid = be32_to_cpu(xid); __entry->minor_status = minor_status; __entry->major_status = major_status; + __entry->xid = be32_to_cpu(rqstp->rq_xid); + __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); ), - TP_printk("xid=0x%08x major_status=%s (0x%08lx) minor_status=%u", - __entry->xid, __entry->major_status == 0 ? "GSS_S_COMPLETE" : - show_gss_status(__entry->major_status), + TP_printk("addr=%s xid=0x%08x major_status=%s (0x%08lx) minor_status=%u", + __get_str(addr), __entry->xid, + (__entry->major_status == 0) ? "GSS_S_COMPLETE" : + show_gss_status(__entry->major_status), __entry->major_status, __entry->minor_status ) ); -TRACE_EVENT(rpcgss_svc_accept, +TRACE_EVENT(rpcgss_svc_authenticate, TP_PROTO( - __be32 xid, - size_t len + const struct svc_rqst *rqstp, + const struct rpc_gss_wire_cred *gc ), - TP_ARGS(xid, len), + TP_ARGS(rqstp, gc), TP_STRUCT__entry( + __field(u32, seqno) __field(u32, xid) - __field(size_t, len) + __string(addr, rqstp->rq_xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xid = be32_to_cpu(xid); - __entry->len = len; + __entry->xid = be32_to_cpu(rqstp->rq_xid); + __entry->seqno = gc->gc_seq; + __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); ), - TP_printk("xid=0x%08x len=%zu", - __entry->xid, __entry->len - ) + TP_printk("addr=%s xid=0x%08x seqno=%u", __get_str(addr), + __entry->xid, __entry->seqno) ); @@ -371,11 +460,11 @@ TRACE_EVENT(rpcgss_update_slack, DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class, TP_PROTO( - __be32 xid, + const struct svc_rqst *rqstp, u32 seqno ), - TP_ARGS(xid, seqno), + TP_ARGS(rqstp, seqno), TP_STRUCT__entry( __field(u32, xid) @@ -383,25 +472,52 @@ DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class, ), TP_fast_assign( - __entry->xid = be32_to_cpu(xid); + __entry->xid = be32_to_cpu(rqstp->rq_xid); __entry->seqno = seqno; ), - TP_printk("xid=0x%08x seqno=%u, request discarded", + TP_printk("xid=0x%08x seqno=%u", __entry->xid, __entry->seqno) ); #define DEFINE_SVC_SEQNO_EVENT(name) \ - DEFINE_EVENT(rpcgss_svc_seqno_class, rpcgss_svc_##name, \ + DEFINE_EVENT(rpcgss_svc_seqno_class, rpcgss_svc_seqno_##name, \ TP_PROTO( \ - __be32 xid, \ + const struct svc_rqst *rqstp, \ u32 seqno \ ), \ - TP_ARGS(xid, seqno)) + TP_ARGS(rqstp, seqno)) -DEFINE_SVC_SEQNO_EVENT(large_seqno); -DEFINE_SVC_SEQNO_EVENT(old_seqno); +DEFINE_SVC_SEQNO_EVENT(large); +DEFINE_SVC_SEQNO_EVENT(seen); +TRACE_EVENT(rpcgss_svc_seqno_low, + TP_PROTO( + const struct svc_rqst *rqstp, + u32 seqno, + u32 min, + u32 max + ), + + TP_ARGS(rqstp, seqno, min, max), + + TP_STRUCT__entry( + __field(u32, xid) + __field(u32, seqno) + __field(u32, min) + __field(u32, max) + ), + + TP_fast_assign( + __entry->xid = be32_to_cpu(rqstp->rq_xid); + __entry->seqno = seqno; + __entry->min = min; + __entry->max = max; + ), + + TP_printk("xid=0x%08x seqno=%u window=[%u..%u]", + __entry->xid, __entry->seqno, __entry->min, __entry->max) +); /** ** gssd upcall related trace events diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 46027d0c903f..7d83f54aaaa6 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -332,7 +332,7 @@ static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct r struct gss_svc_seq_data { /* highest seq number seen so far: */ - int sd_max; + u32 sd_max; /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of * sd_win is nonzero iff sequence number i has been seen already: */ unsigned long sd_win[GSS_SEQ_WIN/BITS_PER_LONG]; @@ -613,16 +613,29 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle) return found; } -/* Implements sequence number algorithm as specified in RFC 2203. */ -static int -gss_check_seq_num(struct rsc *rsci, int seq_num) +/** + * gss_check_seq_num - GSS sequence number window check + * @rqstp: RPC Call to use when reporting errors + * @rsci: cached GSS context state (updated on return) + * @seq_num: sequence number to check + * + * Implements sequence number algorithm as specified in + * RFC 2203, Section 5.3.3.1. "Context Management". + * + * Return values: + * %true: @rqstp's GSS sequence number is inside the window + * %false: @rqstp's GSS sequence number is outside the window + */ +static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci, + u32 seq_num) { struct gss_svc_seq_data *sd = &rsci->seqdata; + bool result = false; spin_lock(&sd->sd_lock); if (seq_num > sd->sd_max) { if (seq_num >= sd->sd_max + GSS_SEQ_WIN) { - memset(sd->sd_win,0,sizeof(sd->sd_win)); + memset(sd->sd_win, 0, sizeof(sd->sd_win)); sd->sd_max = seq_num; } else while (sd->sd_max < seq_num) { sd->sd_max++; @@ -631,17 +644,25 @@ gss_check_seq_num(struct rsc *rsci, int seq_num) __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win); goto ok; } else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) { - goto drop; + goto toolow; } - /* sd_max - GSS_SEQ_WIN < seq_num <= sd_max */ if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win)) - goto drop; + goto alreadyseen; + ok: + result = true; +out: spin_unlock(&sd->sd_lock); - return 1; -drop: - spin_unlock(&sd->sd_lock); - return 0; + return result; + +toolow: + trace_rpcgss_svc_seqno_low(rqstp, seq_num, + sd->sd_max - GSS_SEQ_WIN, + sd->sd_max); + goto out; +alreadyseen: + trace_rpcgss_svc_seqno_seen(rqstp, seq_num); + goto out; } static inline u32 round_up_to_quad(u32 i) @@ -721,14 +742,12 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, } if (gc->gc_seq > MAXSEQ) { - trace_rpcgss_svc_large_seqno(rqstp->rq_xid, gc->gc_seq); + trace_rpcgss_svc_seqno_large(rqstp, gc->gc_seq); *authp = rpcsec_gsserr_ctxproblem; return SVC_DENIED; } - if (!gss_check_seq_num(rsci, gc->gc_seq)) { - trace_rpcgss_svc_old_seqno(rqstp->rq_xid, gc->gc_seq); + if (!gss_check_seq_num(rqstp, rsci, gc->gc_seq)) return SVC_DROP; - } return SVC_OK; } @@ -866,11 +885,13 @@ read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) static int unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) { + u32 integ_len, rseqno, maj_stat; int stat = -EINVAL; - u32 integ_len, maj_stat; struct xdr_netobj mic; struct xdr_buf integ_buf; + mic.data = NULL; + /* NFS READ normally uses splice to send data in-place. However * the data in cache can change after the reply's MIC is computed * but before the RPC reply is sent. To prevent the client from @@ -885,34 +906,44 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g integ_len = svc_getnl(&buf->head[0]); if (integ_len & 3) - return stat; + goto unwrap_failed; if (integ_len > buf->len) - return stat; - if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) { - WARN_ON_ONCE(1); - return stat; - } + goto unwrap_failed; + if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) + goto unwrap_failed; + /* copy out mic... */ if (read_u32_from_xdr_buf(buf, integ_len, &mic.len)) - return stat; + goto unwrap_failed; if (mic.len > RPC_MAX_AUTH_SIZE) - return stat; + goto unwrap_failed; mic.data = kmalloc(mic.len, GFP_KERNEL); if (!mic.data) - return stat; + goto unwrap_failed; if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) - goto out; + goto unwrap_failed; maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); if (maj_stat != GSS_S_COMPLETE) - goto out; - if (svc_getnl(&buf->head[0]) != seq) - goto out; + goto bad_mic; + rseqno = svc_getnl(&buf->head[0]); + if (rseqno != seq) + goto bad_seqno; /* trim off the mic and padding at the end before returning */ xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4); stat = 0; out: kfree(mic.data); return stat; + +unwrap_failed: + trace_rpcgss_svc_unwrap_failed(rqstp); + goto out; +bad_seqno: + trace_rpcgss_svc_seqno_bad(rqstp, seq, rseqno); + goto out; +bad_mic: + trace_rpcgss_svc_mic(rqstp, maj_stat); + goto out; } static inline int @@ -937,6 +968,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs { u32 priv_len, maj_stat; int pad, remaining_len, offset; + u32 rseqno; clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); @@ -951,7 +983,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs * not yet read from the head, so these two values are different: */ remaining_len = total_buf_len(buf); if (priv_len > remaining_len) - return -EINVAL; + goto unwrap_failed; pad = remaining_len - priv_len; buf->len -= pad; fix_priv_head(buf, pad); @@ -972,11 +1004,22 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs fix_priv_head(buf, pad); } if (maj_stat != GSS_S_COMPLETE) - return -EINVAL; + goto bad_unwrap; out_seq: - if (svc_getnl(&buf->head[0]) != seq) - return -EINVAL; + rseqno = svc_getnl(&buf->head[0]); + if (rseqno != seq) + goto bad_seqno; return 0; + +unwrap_failed: + trace_rpcgss_svc_unwrap_failed(rqstp); + return -EINVAL; +bad_seqno: + trace_rpcgss_svc_seqno_bad(rqstp, seq, rseqno); + return -EINVAL; +bad_unwrap: + trace_rpcgss_svc_unwrap(rqstp, maj_stat); + return -EINVAL; } struct gss_svc_data { @@ -1314,8 +1357,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp, if (status) goto out; - trace_rpcgss_svc_accept_upcall(rqstp->rq_xid, ud.major_status, - ud.minor_status); + trace_rpcgss_svc_accept_upcall(rqstp, ud.major_status, ud.minor_status); switch (ud.major_status) { case GSS_S_CONTINUE_NEEDED: @@ -1490,8 +1532,6 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) int ret; struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); - trace_rpcgss_svc_accept(rqstp->rq_xid, argv->iov_len); - *authp = rpc_autherr_badcred; if (!svcdata) svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL); @@ -1608,6 +1648,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) GSS_C_QOP_DEFAULT, gc->gc_svc); ret = SVC_OK; + trace_rpcgss_svc_authenticate(rqstp, gc); goto out; } garbage_args: diff --git a/net/sunrpc/auth_gss/trace.c b/net/sunrpc/auth_gss/trace.c index 49fa583d7f91..d26036a57443 100644 --- a/net/sunrpc/auth_gss/trace.c +++ b/net/sunrpc/auth_gss/trace.c @@ -5,6 +5,9 @@ #include #include +#include +#include +#include #include #include -- cgit v1.2.3 From c65b326b1eb983bca35ed43d0e453d1b15705f10 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 25 Mar 2020 14:41:46 -0400 Subject: svcrdma: Make svc_rdma_send_error_msg() a global function Prepare for svc_rdma_send_error_msg() to be invoked from another source file. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 4 ++++ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 28 +++++++++++++++++++--------- 2 files changed, 23 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 7ed82625dc0b..1579f7a14ab4 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -195,6 +195,10 @@ extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, struct xdr_buf *xdr); +extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *sctxt, + struct svc_rdma_recv_ctxt *rctxt, + int status); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index fb548b548c4b..57041298fe4f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -804,16 +804,25 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, return svc_rdma_send(rdma, &sctxt->sc_send_wr); } -/* Given the client-provided Write and Reply chunks, the server was not - * able to form a complete reply. Return an RDMA_ERROR message so the - * client can retire this RPC transaction. - * - * Remote Invalidation is skipped for simplicity. +/** + * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response + * @rdma: controlling transport context + * @sctxt: Send context for the response + * @rctxt: Receive context for incoming bad message + * @status: negative errno indicating error that occurred + * + * Given the client-provided Read, Write, and Reply chunks, the + * server was not able to parse the Call or form a complete Reply. + * Return an RDMA_ERROR message so the client can retire the RPC + * transaction. + * + * The caller does not have to release @sctxt. It is released by + * Send completion, or by this function on error. */ -static void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *sctxt, - struct svc_rdma_recv_ctxt *rctxt, - int status) +void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *sctxt, + struct svc_rdma_recv_ctxt *rctxt, + int status) { __be32 *rdma_argp = rctxt->rc_recv_buf; __be32 *p; @@ -852,6 +861,7 @@ static void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, trace_svcrdma_err_chunk(*rdma_argp); } + /* Remote Invalidation is skipped for simplicity. */ sctxt->sc_send_wr.num_sge = 1; sctxt->sc_send_wr.opcode = IB_WR_SEND; sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; -- cgit v1.2.3 From 3f8f25c696f9c4e352a4d705ba767af676421564 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 30 Apr 2020 14:17:40 -0400 Subject: svcrdma: Clean up trace_svcrdma_send_failed() tracepoint - Use the _err naming convention instead - Remove display of kernel memory address of the controlling xprt Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 7 ++----- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 0f05a6e2b9cb..0eff80dee066 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1716,7 +1716,7 @@ TRACE_EVENT(svcrdma_send_pullup, TP_printk("len=%u", __entry->len) ); -TRACE_EVENT(svcrdma_send_failed, +TRACE_EVENT(svcrdma_send_err, TP_PROTO( const struct svc_rqst *rqst, int status @@ -1727,19 +1727,16 @@ TRACE_EVENT(svcrdma_send_failed, TP_STRUCT__entry( __field(int, status) __field(u32, xid) - __field(const void *, xprt) __string(addr, rqst->rq_xprt->xpt_remotebuf) ), TP_fast_assign( __entry->status = status; __entry->xid = __be32_to_cpu(rqst->rq_xid); - __entry->xprt = rqst->rq_xprt; __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s xid=0x%08x status=%d", - __entry->xprt, __get_str(addr), + TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr), __entry->xid, __entry->status ) ); diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 57041298fe4f..f985f548346a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -971,7 +971,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) err1: svc_rdma_send_ctxt_put(rdma, sctxt); err0: - trace_svcrdma_send_failed(rqstp, ret); + trace_svcrdma_send_err(rqstp, ret); set_bit(XPT_CLOSE, &xprt->xpt_flags); return -ENOTCONN; } -- cgit v1.2.3 From 0b8dc1b69995cbd81c2c9a2f1730c46cce085f62 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 18 May 2020 11:34:47 -0400 Subject: svcrdma: Remove declarations for functions long removed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pavane pour une infante défunte. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 1579f7a14ab4..d28ca1b6f2eb 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -204,10 +204,6 @@ extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); /* svc_rdma_transport.c */ -extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); -extern void svc_sq_reap(struct svcxprt_rdma *); -extern void svc_rq_reap(struct svcxprt_rdma *); - extern struct svc_xprt_class svc_rdma_class; #ifdef CONFIG_SUNRPC_BACKCHANNEL extern struct svc_xprt_class svc_rdma_bc_class; -- cgit v1.2.3 From 07e9a6325a35fb9655f7b52e2b9dc632da6eef51 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sat, 28 Mar 2020 13:43:22 -0400 Subject: SUNRPC: Add helpers for decoding list discriminators symbolically Use these helpers in a few spots to demonstrate their use. The remaining open-coded discriminator checks in rpcrdma will be addressed in subsequent patches. Signed-off-by: Chuck Lever --- include/linux/sunrpc/xdr.h | 26 ++++++++++++++++++++++++++ net/sunrpc/xprtrdma/rpc_rdma.c | 12 ++++++------ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 17 ++++++++--------- 3 files changed, 40 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 22c207b2425f..5a6a81b7cd9f 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -474,6 +474,32 @@ xdr_stream_encode_uint32_array(struct xdr_stream *xdr, return ret; } +/** + * xdr_item_is_absent - symbolically handle XDR discriminators + * @p: pointer to undecoded discriminator + * + * Return values: + * %true if the following XDR item is absent + * %false if the following XDR item is present + */ +static inline bool xdr_item_is_absent(const __be32 *p) +{ + return *p == xdr_zero; +} + +/** + * xdr_item_is_present - symbolically handle XDR discriminators + * @p: pointer to undecoded discriminator + * + * Return values: + * %true if the following XDR item is present + * %false if the following XDR item is absent + */ +static inline bool xdr_item_is_present(const __be32 *p) +{ + return *p != xdr_zero; +} + /** * xdr_stream_decode_u32 - Decode a 32-bit integer * @xdr: pointer to xdr_stream diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 935bbef2f7be..feecd1f55f18 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1133,11 +1133,11 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) p = xdr_inline_decode(xdr, 0); /* Chunk lists */ - if (*p++ != xdr_zero) + if (xdr_item_is_present(p++)) return false; - if (*p++ != xdr_zero) + if (xdr_item_is_present(p++)) return false; - if (*p++ != xdr_zero) + if (xdr_item_is_present(p++)) return false; /* RPC header */ @@ -1215,7 +1215,7 @@ static int decode_read_list(struct xdr_stream *xdr) p = xdr_inline_decode(xdr, sizeof(*p)); if (unlikely(!p)) return -EIO; - if (unlikely(*p != xdr_zero)) + if (unlikely(xdr_item_is_present(p))) return -EIO; return 0; } @@ -1234,7 +1234,7 @@ static int decode_write_list(struct xdr_stream *xdr, u32 *length) p = xdr_inline_decode(xdr, sizeof(*p)); if (unlikely(!p)) return -EIO; - if (*p == xdr_zero) + if (xdr_item_is_absent(p)) break; if (!first) return -EIO; @@ -1256,7 +1256,7 @@ static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length) return -EIO; *length = 0; - if (*p != xdr_zero) + if (xdr_item_is_present(p)) if (decode_write_chunk(xdr, length)) return -EIO; return 0; diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index c072ce61b393..5e78067889f3 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -419,7 +419,7 @@ static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt) len = 0; first = true; - while (*p != xdr_zero) { + while (xdr_item_is_present(p)) { p = xdr_inline_decode(&rctxt->rc_stream, rpcrdma_readseg_maxsz * sizeof(*p)); if (!p) @@ -500,7 +500,7 @@ static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt) if (!p) return false; rctxt->rc_write_list = p; - while (*p != xdr_zero) { + while (xdr_item_is_present(p)) { if (!xdr_check_write_chunk(rctxt, MAX_BYTES_WRITE_CHUNK)) return false; ++chcount; @@ -532,12 +532,11 @@ static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt) p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); if (!p) return false; - rctxt->rc_reply_chunk = p; - if (*p != xdr_zero) { + rctxt->rc_reply_chunk = NULL; + if (xdr_item_is_present(p)) { if (!xdr_check_write_chunk(rctxt, MAX_BYTES_SPECIAL_CHUNK)) return false; - } else { - rctxt->rc_reply_chunk = NULL; + rctxt->rc_reply_chunk = p; } return true; } @@ -568,7 +567,7 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, p += rpcrdma_fixed_maxsz; /* Read list */ - while (*p++ != xdr_zero) { + while (xdr_item_is_present(p++)) { p++; /* position */ if (inv_rkey == xdr_zero) inv_rkey = *p; @@ -578,7 +577,7 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, } /* Write list */ - while (*p++ != xdr_zero) { + while (xdr_item_is_present(p++)) { segcount = be32_to_cpup(p++); for (i = 0; i < segcount; i++) { if (inv_rkey == xdr_zero) @@ -590,7 +589,7 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, } /* Reply chunk */ - if (*p++ != xdr_zero) { + if (xdr_item_is_present(p++)) { segcount = be32_to_cpup(p++); for (i = 0; i < segcount; i++) { if (inv_rkey == xdr_zero) -- cgit v1.2.3 From f60a08697d28b138c73b14c3204947bc8e637197 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 29 Mar 2020 16:44:13 -0400 Subject: svcrdma: Add common XDR decoders for RDMA and Read segments Clean up: De-duplicate some code. Signed-off-by: Chuck Lever --- include/linux/sunrpc/rpc_rdma.h | 37 ++++++++++++++++++++++++++++++++ net/sunrpc/xprtrdma/frwr_ops.c | 1 - net/sunrpc/xprtrdma/rpc_rdma.c | 5 +---- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 4 +--- net/sunrpc/xprtrdma/svc_rdma_rw.c | 37 ++++++++++++++------------------ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 5 +---- net/sunrpc/xprtrdma/svc_rdma_transport.c | 1 - 7 files changed, 56 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 320c672d84de..db50380f64f4 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -124,4 +124,41 @@ rpcrdma_decode_buffer_size(u8 val) return ((unsigned int)val + 1) << 10; } +/** + * xdr_decode_rdma_segment - Decode contents of an RDMA segment + * @p: Pointer to the undecoded RDMA segment + * @handle: Upon return, the RDMA handle + * @length: Upon return, the RDMA length + * @offset: Upon return, the RDMA offset + * + * Return value: + * Pointer to the XDR item that follows the RDMA segment + */ +static inline __be32 *xdr_decode_rdma_segment(__be32 *p, u32 *handle, + u32 *length, u64 *offset) +{ + *handle = be32_to_cpup(p++); + *length = be32_to_cpup(p++); + return xdr_decode_hyper(p, offset); +} + +/** + * xdr_decode_read_segment - Decode contents of a Read segment + * @p: Pointer to the undecoded Read segment + * @position: Upon return, the segment's position + * @handle: Upon return, the RDMA handle + * @length: Upon return, the RDMA length + * @offset: Upon return, the RDMA offset + * + * Return value: + * Pointer to the XDR item that follows the Read segment + */ +static inline __be32 *xdr_decode_read_segment(__be32 *p, u32 *position, + u32 *handle, u32 *length, + u64 *offset) +{ + *position = be32_to_cpup(p++); + return xdr_decode_rdma_segment(p, handle, length, offset); +} + #endif /* _LINUX_SUNRPC_RPC_RDMA_H */ diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index b647562a26dd..7f94c9a19fd3 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -40,7 +40,6 @@ * New MRs are created on demand. */ -#include #include #include "xprt_rdma.h" diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index feecd1f55f18..5461f01eeca6 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1176,10 +1176,7 @@ static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length) if (unlikely(!p)) return -EIO; - handle = be32_to_cpup(p++); - *length = be32_to_cpup(p++); - xdr_decode_hyper(p, &offset); - + xdr_decode_rdma_segment(p, &handle, length, &offset); trace_xprtrdma_decode_seg(handle, *length, offset); return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 5e78067889f3..c0587d3cd389 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -466,9 +466,7 @@ static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt, u32 maxlen) if (!p) return false; - handle = be32_to_cpup(p++); - length = be32_to_cpup(p++); - xdr_decode_hyper(p, &offset); + xdr_decode_rdma_segment(p, &handle, &length, &offset); trace_svcrdma_decode_wseg(handle, length, offset); total += length; diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 83806fa94def..2038b1b286dd 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -7,6 +7,7 @@ #include +#include #include #include @@ -441,34 +442,32 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info, seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz; do { unsigned int write_len; - u32 seg_length, seg_handle; - u64 seg_offset; + u32 handle, length; + u64 offset; if (info->wi_seg_no >= info->wi_nsegs) goto out_overflow; - seg_handle = be32_to_cpup(seg); - seg_length = be32_to_cpup(seg + 1); - xdr_decode_hyper(seg + 2, &seg_offset); - seg_offset += info->wi_seg_off; + xdr_decode_rdma_segment(seg, &handle, &length, &offset); + offset += info->wi_seg_off; - write_len = min(remaining, seg_length - info->wi_seg_off); + write_len = min(remaining, length - info->wi_seg_off); ctxt = svc_rdma_get_rw_ctxt(rdma, (write_len >> PAGE_SHIFT) + 2); if (!ctxt) return -ENOMEM; constructor(info, write_len, ctxt); - ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle, + ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle, DMA_TO_DEVICE); if (ret < 0) return -EIO; - trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset); + trace_svcrdma_send_wseg(handle, write_len, offset); list_add(&ctxt->rw_list, &cc->cc_rwctxts); cc->cc_sqecount += ret; - if (write_len == seg_length - info->wi_seg_off) { + if (write_len == length - info->wi_seg_off) { seg += 4; info->wi_seg_no++; info->wi_seg_off = 0; @@ -689,21 +688,17 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, ret = -EINVAL; info->ri_chunklen = 0; while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) { - u32 rs_handle, rs_length; - u64 rs_offset; + u32 handle, length; + u64 offset; - rs_handle = be32_to_cpup(p++); - rs_length = be32_to_cpup(p++); - p = xdr_decode_hyper(p, &rs_offset); - - ret = svc_rdma_build_read_segment(info, rqstp, - rs_handle, rs_length, - rs_offset); + p = xdr_decode_rdma_segment(p, &handle, &length, &offset); + ret = svc_rdma_build_read_segment(info, rqstp, handle, length, + offset); if (ret < 0) break; - trace_svcrdma_send_rseg(rs_handle, rs_length, rs_offset); - info->ri_chunklen += rs_length; + trace_svcrdma_send_rseg(handle, length, offset); + info->ri_chunklen += length; } return ret; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index f985f548346a..a78f1d22e9bb 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -106,7 +106,6 @@ #include #include -#include #include #include "xprt_rdma.h" @@ -375,9 +374,7 @@ static ssize_t svc_rdma_encode_write_segment(__be32 *src, if (!p) return -EMSGSIZE; - handle = be32_to_cpup(src++); - length = be32_to_cpup(src++); - xdr_decode_hyper(src, &offset); + xdr_decode_rdma_segment(src, &handle, &length, &offset); *p++ = cpu_to_be32(handle); if (*remaining < length) { diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index d38be57b00ed..3da7901a49e6 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -55,7 +55,6 @@ #include #include -#include #include #include -- cgit v1.2.3 From 379c3bc6b4eb989ee37c4ce8ab403719e06fe35f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 7 Apr 2020 15:32:14 -0400 Subject: svcrdma: Add common XDR encoders for RDMA and Read segments Clean up: De-duplicate some code. Signed-off-by: Chuck Lever --- include/linux/sunrpc/rpc_rdma.h | 37 +++++++++++++++++++++++++++++++++++ net/sunrpc/xprtrdma/rpc_rdma.c | 14 +++---------- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 4 +--- 3 files changed, 41 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index db50380f64f4..4af31bbc8802 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -124,6 +124,43 @@ rpcrdma_decode_buffer_size(u8 val) return ((unsigned int)val + 1) << 10; } +/** + * xdr_encode_rdma_segment - Encode contents of an RDMA segment + * @p: Pointer into a send buffer + * @handle: The RDMA handle to encode + * @length: The RDMA length to encode + * @offset: The RDMA offset to encode + * + * Return value: + * Pointer to the XDR position that follows the encoded RDMA segment + */ +static inline __be32 *xdr_encode_rdma_segment(__be32 *p, u32 handle, + u32 length, u64 offset) +{ + *p++ = cpu_to_be32(handle); + *p++ = cpu_to_be32(length); + return xdr_encode_hyper(p, offset); +} + +/** + * xdr_encode_read_segment - Encode contents of a Read segment + * @p: Pointer into a send buffer + * @position: The position to encode + * @handle: The RDMA handle to encode + * @length: The RDMA length to encode + * @offset: The RDMA offset to encode + * + * Return value: + * Pointer to the XDR position that follows the encoded Read segment + */ +static inline __be32 *xdr_encode_read_segment(__be32 *p, u32 position, + u32 handle, u32 length, + u64 offset) +{ + *p++ = cpu_to_be32(position); + return xdr_encode_rdma_segment(p, handle, length, offset); +} + /** * xdr_decode_rdma_segment - Decode contents of an RDMA segment * @p: Pointer to the undecoded RDMA segment diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 5461f01eeca6..73ed51893175 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -275,14 +275,6 @@ out: return n; } -static void -xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr) -{ - *iptr++ = cpu_to_be32(mr->mr_handle); - *iptr++ = cpu_to_be32(mr->mr_length); - xdr_encode_hyper(iptr, mr->mr_offset); -} - static int encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr) { @@ -292,7 +284,7 @@ encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr) if (unlikely(!p)) return -EMSGSIZE; - xdr_encode_rdma_segment(p, mr); + xdr_encode_rdma_segment(p, mr->mr_handle, mr->mr_length, mr->mr_offset); return 0; } @@ -307,8 +299,8 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr, return -EMSGSIZE; *p++ = xdr_one; /* Item present */ - *p++ = cpu_to_be32(position); - xdr_encode_rdma_segment(p, mr); + xdr_encode_read_segment(p, position, mr->mr_handle, mr->mr_length, + mr->mr_offset); return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index a78f1d22e9bb..38d8f0ee35ec 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -376,7 +376,6 @@ static ssize_t svc_rdma_encode_write_segment(__be32 *src, xdr_decode_rdma_segment(src, &handle, &length, &offset); - *p++ = cpu_to_be32(handle); if (*remaining < length) { /* segment only partly filled */ length = *remaining; @@ -385,8 +384,7 @@ static ssize_t svc_rdma_encode_write_segment(__be32 *src, /* entire segment was consumed */ *remaining -= length; } - *p++ = cpu_to_be32(length); - xdr_encode_hyper(p, offset); + xdr_encode_rdma_segment(p, handle, length, offset); trace_svcrdma_encode_wseg(handle, length, offset); return len; -- cgit v1.2.3 From f7bd657b55e3484cadc37a6439de23d2fd703bd6 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 19 May 2020 09:30:32 -0400 Subject: svcrdma: Introduce infrastructure to support completion IDs The goal is to replace CQE kernel memory addresses in completion- related tracepoints. Each completion ID matches an incoming Send or Receive completion to a Completion Queue and to a previous ib_post_*(). The ID can then be displayed in an error message or recorded in a trace record. Signed-off-by: Chuck Lever --- include/linux/sunrpc/rpc_rdma_cid.h | 24 +++++++++++++++++++++ include/trace/events/rpcrdma.h | 43 +++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) create mode 100644 include/linux/sunrpc/rpc_rdma_cid.h (limited to 'include') diff --git a/include/linux/sunrpc/rpc_rdma_cid.h b/include/linux/sunrpc/rpc_rdma_cid.h new file mode 100644 index 000000000000..be24ab2baa6a --- /dev/null +++ b/include/linux/sunrpc/rpc_rdma_cid.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * * Copyright (c) 2020, Oracle and/or its affiliates. + */ + +#ifndef RPC_RDMA_CID_H +#define RPC_RDMA_CID_H + +/* + * The rpc_rdma_cid struct records completion ID information. A + * completion ID matches an incoming Send or Receive completion + * to a Completion Queue and to a previous ib_post_*(). The ID + * can then be displayed in an error message or recorded in a + * trace record. + * + * This struct is shared between the server and client RPC/RDMA + * transport implementations. + */ +struct rpc_rdma_cid { + u32 ci_queue_id; + int ci_completion_id; +}; + +#endif /* RPC_RDMA_CID_H */ diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 0eff80dee066..70ab989aa3b7 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -11,6 +11,7 @@ #define _TRACE_RPCRDMA_H #include +#include #include #include @@ -18,6 +19,48 @@ ** Event classes **/ +DECLARE_EVENT_CLASS(rpcrdma_completion_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(unsigned long, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + if (wc->status) + __entry->vendor_err = wc->vendor_err; + else + __entry->vendor_err = 0; + ), + + TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err + ) +); + +#define DEFINE_COMPLETION_EVENT(name) \ + DEFINE_EVENT(rpcrdma_completion_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + +DEFINE_COMPLETION_EVENT(dummy); + DECLARE_EVENT_CLASS(xprtrdma_reply_event, TP_PROTO( const struct rpcrdma_rep *rep -- cgit v1.2.3 From 9b3bcf8c5c134038e30624db5b57992ae50b80a9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 29 Apr 2020 16:22:26 -0400 Subject: svcrdma: Introduce Receive completion IDs Set up a completion ID in each svc_rdma_recv_ctxt. The ID is used to match an incoming Receive completion to a transport and to a previous ib_post_recv(). Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 4 +++ include/trace/events/rpcrdma.h | 51 +++++++++++++-------------------- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 15 ++++++++-- 3 files changed, 36 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index d28ca1b6f2eb..c3c1e46f510f 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -109,6 +110,8 @@ struct svcxprt_rdma { struct work_struct sc_work; struct llist_head sc_recv_ctxts; + + atomic_t sc_completion_ids; }; /* sc_flags */ #define RDMAXPRT_CONN_PENDING 3 @@ -129,6 +132,7 @@ struct svc_rdma_recv_ctxt { struct list_head rc_list; struct ib_recv_wr rc_recv_wr; struct ib_cqe rc_cqe; + struct rpc_rdma_cid rc_cid; struct ib_sge rc_recv_sge; void *rc_recv_buf; struct xdr_buf rc_arg; diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 70ab989aa3b7..a0330a557e34 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -59,8 +59,6 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) -DEFINE_COMPLETION_EVENT(dummy); - DECLARE_EVENT_CLASS(xprtrdma_reply_event, TP_PROTO( const struct rpcrdma_rep *rep @@ -1849,57 +1847,48 @@ DEFINE_SENDCOMP_EVENT(send); TRACE_EVENT(svcrdma_post_recv, TP_PROTO( - const struct ib_recv_wr *wr, - int status + const struct svc_rdma_recv_ctxt *ctxt ), - TP_ARGS(wr, status), + TP_ARGS(ctxt), TP_STRUCT__entry( - __field(const void *, cqe) - __field(int, status) + __field(u32, cq_id) + __field(int, completion_id) ), TP_fast_assign( - __entry->cqe = wr->wr_cqe; - __entry->status = status; + __entry->cq_id = ctxt->rc_cid.ci_queue_id; + __entry->completion_id = ctxt->rc_cid.ci_completion_id; ), - TP_printk("cqe=%p status=%d", - __entry->cqe, __entry->status + TP_printk("cq.id=%d cid=%d", + __entry->cq_id, __entry->completion_id ) ); -TRACE_EVENT(svcrdma_wc_receive, +DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); + +TRACE_EVENT(svcrdma_rq_post_err, TP_PROTO( - const struct ib_wc *wc + const struct svcxprt_rdma *rdma, + int status ), - TP_ARGS(wc), + TP_ARGS(rdma, status), TP_STRUCT__entry( - __field(const void *, cqe) - __field(u32, byte_len) - __field(unsigned int, status) - __field(u32, vendor_err) + __field(int, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( - __entry->cqe = wc->wr_cqe; - __entry->status = wc->status; - if (wc->status) { - __entry->byte_len = 0; - __entry->vendor_err = wc->vendor_err; - } else { - __entry->byte_len = wc->byte_len; - __entry->vendor_err = 0; - } + __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)", - __entry->cqe, __entry->byte_len, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err + TP_printk("addr=%s status=%d", + __get_str(addr), __entry->status ) ); diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index c0587d3cd389..e6d7401232d2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -117,6 +117,13 @@ svc_rdma_next_recv_ctxt(struct list_head *list) rc_list); } +static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma, + struct rpc_rdma_cid *cid) +{ + cid->ci_queue_id = rdma->sc_rq_cq->res.id; + cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); +} + static struct svc_rdma_recv_ctxt * svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) { @@ -135,6 +142,8 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) goto fail2; + svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); + ctxt->rc_recv_wr.next = NULL; ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; @@ -249,13 +258,14 @@ static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, int ret; svc_xprt_get(&rdma->sc_xprt); + trace_svcrdma_post_recv(ctxt); ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); - trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); if (ret) goto err_post; return 0; err_post: + trace_svcrdma_rq_post_err(rdma, ret); svc_rdma_recv_ctxt_put(rdma, ctxt); svc_xprt_put(&rdma->sc_xprt); return ret; @@ -309,11 +319,10 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) struct ib_cqe *cqe = wc->wr_cqe; struct svc_rdma_recv_ctxt *ctxt; - trace_svcrdma_wc_receive(wc); - /* WARNING: Only wc->wr_cqe and wc->status are reliable */ ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); + trace_svcrdma_wc_receive(wc, &ctxt->rc_cid); if (wc->status != IB_WC_SUCCESS) goto flushed; -- cgit v1.2.3 From 007140ee9b4fc4e59538677799c916890a2f13e2 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 29 Apr 2020 17:16:31 -0400 Subject: svcrdma: Record Receive completion ID in svc_rdma_decode_rqst When recording a trace event in the Receive path, tie decoding results and errors to an incoming Receive completion. Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 34 ++++++++++++++++++++++++++------- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 12 ++++++------ 2 files changed, 33 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index a0330a557e34..df49ae5d447b 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1369,13 +1369,16 @@ TRACE_DEFINE_ENUM(RDMA_ERROR); TRACE_EVENT(svcrdma_decode_rqst, TP_PROTO( + const struct svc_rdma_recv_ctxt *ctxt, __be32 *p, unsigned int hdrlen ), - TP_ARGS(p, hdrlen), + TP_ARGS(ctxt, p, hdrlen), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(u32, xid) __field(u32, vers) __field(u32, proc) @@ -1384,6 +1387,8 @@ TRACE_EVENT(svcrdma_decode_rqst, ), TP_fast_assign( + __entry->cq_id = ctxt->rc_cid.ci_queue_id; + __entry->completion_id = ctxt->rc_cid.ci_completion_id; __entry->xid = be32_to_cpup(p++); __entry->vers = be32_to_cpup(p++); __entry->credits = be32_to_cpup(p++); @@ -1391,37 +1396,48 @@ TRACE_EVENT(svcrdma_decode_rqst, __entry->hdrlen = hdrlen; ), - TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", + TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", + __entry->cq_id, __entry->completion_id, __entry->xid, __entry->vers, __entry->credits, show_rpcrdma_proc(__entry->proc), __entry->hdrlen) ); TRACE_EVENT(svcrdma_decode_short_err, TP_PROTO( + const struct svc_rdma_recv_ctxt *ctxt, unsigned int hdrlen ), - TP_ARGS(hdrlen), + TP_ARGS(ctxt, hdrlen), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, hdrlen) ), TP_fast_assign( + __entry->cq_id = ctxt->rc_cid.ci_queue_id; + __entry->completion_id = ctxt->rc_cid.ci_completion_id; __entry->hdrlen = hdrlen; ), - TP_printk("hdrlen=%u", __entry->hdrlen) + TP_printk("cq.id=%u cid=%d hdrlen=%u", + __entry->cq_id, __entry->completion_id, + __entry->hdrlen) ); DECLARE_EVENT_CLASS(svcrdma_badreq_event, TP_PROTO( + const struct svc_rdma_recv_ctxt *ctxt, __be32 *p ), - TP_ARGS(p), + TP_ARGS(ctxt, p), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(u32, xid) __field(u32, vers) __field(u32, proc) @@ -1429,13 +1445,16 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event, ), TP_fast_assign( + __entry->cq_id = ctxt->rc_cid.ci_queue_id; + __entry->completion_id = ctxt->rc_cid.ci_completion_id; __entry->xid = be32_to_cpup(p++); __entry->vers = be32_to_cpup(p++); __entry->credits = be32_to_cpup(p++); __entry->proc = be32_to_cpup(p); ), - TP_printk("xid=0x%08x vers=%u credits=%u proc=%u", + TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u", + __entry->cq_id, __entry->completion_id, __entry->xid, __entry->vers, __entry->credits, __entry->proc) ); @@ -1443,9 +1462,10 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event, DEFINE_EVENT(svcrdma_badreq_event, \ svcrdma_decode_##name##_err, \ TP_PROTO( \ + const struct svc_rdma_recv_ctxt *ctxt, \ __be32 *p \ ), \ - TP_ARGS(p)) + TP_ARGS(ctxt, p)) DEFINE_BADREQ_EVENT(badvers); DEFINE_BADREQ_EVENT(drop); diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index e6d7401232d2..d5ec85cb652c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -667,27 +667,27 @@ static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg, hdr_len = xdr_stream_pos(&rctxt->rc_stream); rq_arg->head[0].iov_len -= hdr_len; rq_arg->len -= hdr_len; - trace_svcrdma_decode_rqst(rdma_argp, hdr_len); + trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len); return hdr_len; out_short: - trace_svcrdma_decode_short_err(rq_arg->len); + trace_svcrdma_decode_short_err(rctxt, rq_arg->len); return -EINVAL; out_version: - trace_svcrdma_decode_badvers_err(rdma_argp); + trace_svcrdma_decode_badvers_err(rctxt, rdma_argp); return -EPROTONOSUPPORT; out_drop: - trace_svcrdma_decode_drop_err(rdma_argp); + trace_svcrdma_decode_drop_err(rctxt, rdma_argp); return 0; out_proc: - trace_svcrdma_decode_badproc_err(rdma_argp); + trace_svcrdma_decode_badproc_err(rctxt, rdma_argp); return -EINVAL; out_inval: - trace_svcrdma_decode_parse_err(rdma_argp); + trace_svcrdma_decode_parse_err(rctxt, rdma_argp); return -EINVAL; } -- cgit v1.2.3 From 3ac56c2fb166fea25974d8c48bb4a72ee298361b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 30 Apr 2020 13:47:07 -0400 Subject: svcrdma: Introduce Send completion IDs Set up a completion ID in each svc_rdma_send_ctxt. The ID is used to match an incoming Send completion to a transport and to a previous ib_post_send(). Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 2 ++ include/trace/events/rpcrdma.h | 2 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 15 ++++++++++++--- 3 files changed, 15 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index c3c1e46f510f..c91e00bc937e 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -151,6 +151,8 @@ struct svc_rdma_recv_ctxt { struct svc_rdma_send_ctxt { struct list_head sc_list; + struct rpc_rdma_cid sc_cid; + struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; struct xdr_buf sc_hdrbuf; diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index df49ae5d447b..782a4d826a4b 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1863,7 +1863,7 @@ TRACE_EVENT(svcrdma_post_send, ) ); -DEFINE_SENDCOMP_EVENT(send); +DEFINE_COMPLETION_EVENT(svcrdma_wc_send); TRACE_EVENT(svcrdma_post_recv, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 38d8f0ee35ec..c720dcf56231 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -122,6 +122,13 @@ svc_rdma_next_send_ctxt(struct list_head *list) sc_list); } +static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, + struct rpc_rdma_cid *cid) +{ + cid->ci_queue_id = rdma->sc_sq_cq->res.id; + cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); +} + static struct svc_rdma_send_ctxt * svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) { @@ -144,6 +151,8 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) goto fail2; + svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); + ctxt->sc_send_wr.next = NULL; ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; ctxt->sc_send_wr.sg_list = ctxt->sc_sges; @@ -268,14 +277,14 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) { struct svcxprt_rdma *rdma = cq->cq_context; struct ib_cqe *cqe = wc->wr_cqe; - struct svc_rdma_send_ctxt *ctxt; + struct svc_rdma_send_ctxt *ctxt = + container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); - trace_svcrdma_wc_send(wc); + trace_svcrdma_wc_send(wc, &ctxt->sc_cid); atomic_inc(&rdma->sc_sq_avail); wake_up(&rdma->sc_send_wait); - ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); svc_rdma_send_ctxt_put(rdma, ctxt); if (unlikely(wc->status != IB_WC_SUCCESS)) { -- cgit v1.2.3 From 17f70f8dd52be3723250d21093403bb3a9f2162f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 29 Apr 2020 11:05:33 -0400 Subject: svcrdma: Record send_ctxt completion ID in trace_svcrdma_post_send() First, refactor: Dereference the svc_rdma_send_ctxt inside svc_rdma_send() instead of at every call site. Then, it can be passed into trace_svcrdma_post_send() to get the proper completion ID. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 3 ++- include/trace/events/rpcrdma.h | 18 +++++++++++------- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 2 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 11 ++++++----- 4 files changed, 20 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index c91e00bc937e..9dc3a3b88391 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -196,7 +196,8 @@ extern struct svc_rdma_send_ctxt * svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma); extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt); -extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr); +extern int svc_rdma_send(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *ctxt); extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 782a4d826a4b..aeeba9188ed5 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1839,27 +1839,31 @@ DECLARE_EVENT_CLASS(svcrdma_sendcomp_event, TRACE_EVENT(svcrdma_post_send, TP_PROTO( - const struct ib_send_wr *wr + const struct svc_rdma_send_ctxt *ctxt ), - TP_ARGS(wr), + TP_ARGS(ctxt), TP_STRUCT__entry( - __field(const void *, cqe) + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, num_sge) __field(u32, inv_rkey) ), TP_fast_assign( - __entry->cqe = wr->wr_cqe; + const struct ib_send_wr *wr = &ctxt->sc_send_wr; + + __entry->cq_id = ctxt->sc_cid.ci_queue_id; + __entry->completion_id = ctxt->sc_cid.ci_completion_id; __entry->num_sge = wr->num_sge; __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? wr->ex.invalidate_rkey : 0; ), - TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x", - __entry->cqe, __entry->num_sge, - __entry->inv_rkey + TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", + __entry->cq_id, __entry->completion_id, + __entry->num_sge, __entry->inv_rkey ) ); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 1ee73f7cf931..5e7c4ba9e147 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -87,7 +87,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, */ get_page(virt_to_page(rqst->rq_buffer)); ctxt->sc_send_wr.opcode = IB_WR_SEND; - return svc_rdma_send(rdma, &ctxt->sc_send_wr); + return svc_rdma_send(rdma, ctxt); } /* Server-side transport endpoint wants a whole page for its send diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index c720dcf56231..73d46e8cdc16 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -298,13 +298,14 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) /** * svc_rdma_send - Post a single Send WR * @rdma: transport on which to post the WR - * @wr: prepared Send WR to post + * @ctxt: send ctxt with a Send WR ready to post * * Returns zero the Send WR was posted successfully. Otherwise, a * negative errno is returned. */ -int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) +int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) { + struct ib_send_wr *wr = &ctxt->sc_send_wr; int ret; might_sleep(); @@ -330,7 +331,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) } svc_xprt_get(&rdma->sc_xprt); - trace_svcrdma_post_send(wr); + trace_svcrdma_post_send(ctxt); ret = ib_post_send(rdma->sc_qp, wr, NULL); if (ret) break; @@ -805,7 +806,7 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, } else { sctxt->sc_send_wr.opcode = IB_WR_SEND; } - return svc_rdma_send(rdma, &sctxt->sc_send_wr); + return svc_rdma_send(rdma, sctxt); } /** @@ -869,7 +870,7 @@ void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, sctxt->sc_send_wr.num_sge = 1; sctxt->sc_send_wr.opcode = IB_WR_SEND; sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; - if (svc_rdma_send(rdma, &sctxt->sc_send_wr)) + if (svc_rdma_send(rdma, sctxt)) goto put_ctxt; return; -- cgit v1.2.3 From 6787f0bea27a24e4c306616565b02234ee558cfb Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 29 Apr 2020 17:25:36 -0400 Subject: svcrdma: Display chunk completion ID when posting a rw_ctxt Re-use the post_rw tracepoint (safely) to trace cc_info lifetime events, including completion IDs. Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 56 +++++++++------------------------------ net/sunrpc/xprtrdma/svc_rdma_rw.c | 14 ++++++++-- 2 files changed, 24 insertions(+), 46 deletions(-) (limited to 'include') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index aeeba9188ed5..abe942225637 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1802,41 +1802,6 @@ TRACE_EVENT(svcrdma_send_err, ) ); -DECLARE_EVENT_CLASS(svcrdma_sendcomp_event, - TP_PROTO( - const struct ib_wc *wc - ), - - TP_ARGS(wc), - - TP_STRUCT__entry( - __field(const void *, cqe) - __field(unsigned int, status) - __field(unsigned int, vendor_err) - ), - - TP_fast_assign( - __entry->cqe = wc->wr_cqe; - __entry->status = wc->status; - if (wc->status) - __entry->vendor_err = wc->vendor_err; - else - __entry->vendor_err = 0; - ), - - TP_printk("cqe=%p status=%s (%u/0x%x)", - __entry->cqe, rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); - -#define DEFINE_SENDCOMP_EVENT(name) \ - DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \ - TP_PROTO( \ - const struct ib_wc *wc \ - ), \ - TP_ARGS(wc)) - TRACE_EVENT(svcrdma_post_send, TP_PROTO( const struct svc_rdma_send_ctxt *ctxt @@ -1916,31 +1881,34 @@ TRACE_EVENT(svcrdma_rq_post_err, ) ); -TRACE_EVENT(svcrdma_post_rw, +TRACE_EVENT(svcrdma_post_chunk, TP_PROTO( - const void *cqe, + const struct rpc_rdma_cid *cid, int sqecount ), - TP_ARGS(cqe, sqecount), + TP_ARGS(cid, sqecount), TP_STRUCT__entry( - __field(const void *, cqe) + __field(u32, cq_id) + __field(int, completion_id) __field(int, sqecount) ), TP_fast_assign( - __entry->cqe = cqe; + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; __entry->sqecount = sqecount; ), - TP_printk("cqe=%p sqecount=%d", - __entry->cqe, __entry->sqecount + TP_printk("cq.id=%u cid=%d sqecount=%d", + __entry->cq_id, __entry->completion_id, + __entry->sqecount ) ); -DEFINE_SENDCOMP_EVENT(read); -DEFINE_SENDCOMP_EVENT(write); +DEFINE_COMPLETION_EVENT(svcrdma_wc_read); +DEFINE_COMPLETION_EVENT(svcrdma_wc_write); TRACE_EVENT(svcrdma_qp_error, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 2038b1b286dd..c16d10601d65 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -145,15 +145,24 @@ static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, * demand, and not cached. */ struct svc_rdma_chunk_ctxt { + struct rpc_rdma_cid cc_cid; struct ib_cqe cc_cqe; struct svcxprt_rdma *cc_rdma; struct list_head cc_rwctxts; int cc_sqecount; }; +static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma, + struct rpc_rdma_cid *cid) +{ + cid->ci_queue_id = rdma->sc_sq_cq->res.id; + cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); +} + static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, struct svc_rdma_chunk_ctxt *cc) { + svc_rdma_cc_cid_init(rdma, &cc->cc_cid); cc->cc_rdma = rdma; svc_xprt_get(&rdma->sc_xprt); @@ -237,7 +246,7 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) struct svc_rdma_write_info *info = container_of(cc, struct svc_rdma_write_info, wi_cc); - trace_svcrdma_wc_write(wc); + trace_svcrdma_wc_write(wc, &cc->cc_cid); atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); wake_up(&rdma->sc_send_wait); @@ -295,7 +304,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) struct svc_rdma_read_info *info = container_of(cc, struct svc_rdma_read_info, ri_cc); - trace_svcrdma_wc_read(wc); + trace_svcrdma_wc_read(wc, &cc->cc_cid); atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); wake_up(&rdma->sc_send_wait); @@ -351,6 +360,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) do { if (atomic_sub_return(cc->cc_sqecount, &rdma->sc_sq_avail) > 0) { + trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount); ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); if (ret) break; -- cgit v1.2.3 From 9a67fcc8f3fd1e294922f28f20003c31d7f6cfeb Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:53 +0000 Subject: nfs: add client side only definitions for user xattrs Add client-side only definitions for user extended attributes (RFC8276). These are the access bits as used by the client code, and the CLNT procedure number definition. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- include/linux/nfs4.h | 5 +++++ include/linux/nfs_fs.h | 3 +++ 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index e6ca9d1d2e76..db13026ac7d1 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -553,6 +553,11 @@ enum { NFSPROC4_CLNT_LAYOUTERROR, NFSPROC4_CLNT_COPY_NOTIFY, + + NFSPROC4_CLNT_GETXATTR, + NFSPROC4_CLNT_SETXATTR, + NFSPROC4_CLNT_LISTXATTRS, + NFSPROC4_CLNT_REMOVEXATTR, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 6ee9119acc5d..b743988fcbd0 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -212,6 +212,9 @@ struct nfs4_copy_state { #define NFS_ACCESS_EXTEND 0x0008 #define NFS_ACCESS_DELETE 0x0010 #define NFS_ACCESS_EXECUTE 0x0020 +#define NFS_ACCESS_XAREAD 0x0040 +#define NFS_ACCESS_XAWRITE 0x0080 +#define NFS_ACCESS_XALIST 0x0100 /* * Cache validity bit flags -- cgit v1.2.3 From 04a5da690e8f2da23c2ac940f2921e3aa622db82 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:54 +0000 Subject: NFSv4.2: define limits and sizes for user xattr handling Set limits for extended attributes (attribute value size and listxattr buffer size), based on the fs-independent limits (XATTR_*_MAX). Define the maximum XDR sizes for the RFC 8276 XATTR operations. In the case of operations that carry a larger payload (SETXATTR, GETXATTR, LISTXATTR), these exclude that payload, which is added as separate pages, like other operations do. Define, much like for read and write operations, the maximum overhead sizes for get/set/listxattr, and use them to limit the maximum payload size for those operations, in combination with the channel attributes. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 19 ++++++++++-- fs/nfs/nfs42.h | 16 ++++++++++ fs/nfs/nfs42xdr.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++ fs/nfs/nfs4_fs.h | 6 ++++ fs/nfs/nfs4client.c | 31 ++++++++++++++++++++ include/linux/nfs_fs_sb.h | 5 ++++ 6 files changed, 149 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index f1ff3076e4a4..055040bf1a8e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -50,6 +50,7 @@ #include "nfs.h" #include "netns.h" #include "sysfs.h" +#include "nfs42.h" #define NFSDBG_FACILITY NFSDBG_CLIENT @@ -749,7 +750,7 @@ error: static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *fsinfo) { - unsigned long max_rpc_payload; + unsigned long max_rpc_payload, raw_max_rpc_payload; /* Work out a lot of parameters */ if (server->rsize == 0) @@ -762,7 +763,9 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, if (fsinfo->wtmax >= 512 && server->wsize > fsinfo->wtmax) server->wsize = nfs_block_size(fsinfo->wtmax, NULL); - max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL); + raw_max_rpc_payload = rpc_max_payload(server->client); + max_rpc_payload = nfs_block_size(raw_max_rpc_payload, NULL); + if (server->rsize > max_rpc_payload) server->rsize = max_rpc_payload; if (server->rsize > NFS_MAX_FILE_IO_SIZE) @@ -795,6 +798,18 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, server->clone_blksize = fsinfo->clone_blksize; /* We're airborne Set socket buffersize */ rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100); + +#ifdef CONFIG_NFS_V4_2 + /* + * Defaults until limited by the session parameters. + */ + server->gxasize = min_t(unsigned int, raw_max_rpc_payload, + XATTR_SIZE_MAX); + server->sxasize = min_t(unsigned int, raw_max_rpc_payload, + XATTR_SIZE_MAX); + server->lxasize = min_t(unsigned int, raw_max_rpc_payload, + nfs42_listxattr_xdrsize(XATTR_LIST_MAX)); +#endif } /* diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h index c891af949886..51de8ddc7d88 100644 --- a/fs/nfs/nfs42.h +++ b/fs/nfs/nfs42.h @@ -6,6 +6,8 @@ #ifndef __LINUX_FS_NFS_NFS4_2_H #define __LINUX_FS_NFS_NFS4_2_H +#include + /* * FIXME: four LAYOUTSTATS calls per compound at most! Do we need to support * more? Need to consider not to pre-alloc too much for a compound. @@ -36,5 +38,19 @@ static inline bool nfs42_files_from_same_server(struct file *in, return nfs4_check_serverowner_major_id(c_in->cl_serverowner, c_out->cl_serverowner); } + +/* + * Maximum XDR buffer size needed for a listxattr buffer of buflen size. + * + * The upper boundary is a buffer with all 1-byte sized attribute names. + * They would be 7 bytes long in the eventual buffer ("user.x\0"), and + * 8 bytes long XDR-encoded. + * + * Include the trailing eof word as well. + */ +static inline u32 nfs42_listxattr_xdrsize(u32 buflen) +{ + return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4; +} #endif /* CONFIG_NFS_V4_2 */ #endif /* __LINUX_FS_NFS_NFS4_2_H */ diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index c03f3246d6c5..6712daa9d85b 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -169,6 +169,80 @@ decode_clone_maxsz + \ decode_getattr_maxsz) +#ifdef CONFIG_NFS_V4_2 +/* Not limited by NFS itself, limited by the generic xattr code */ +#define nfs4_xattr_name_maxsz XDR_QUADLEN(XATTR_NAME_MAX) + +#define encode_getxattr_maxsz (op_encode_hdr_maxsz + 1 + \ + nfs4_xattr_name_maxsz) +#define decode_getxattr_maxsz (op_decode_hdr_maxsz + 1 + 1) +#define encode_setxattr_maxsz (op_encode_hdr_maxsz + \ + 1 + nfs4_xattr_name_maxsz + 1) +#define decode_setxattr_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) +#define encode_listxattrs_maxsz (op_encode_hdr_maxsz + 2 + 1) +#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1) +#define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \ + nfs4_xattr_name_maxsz) +#define decode_removexattr_maxsz (op_decode_hdr_maxsz + \ + decode_change_info_maxsz) + +#define NFS4_enc_getxattr_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_getxattr_maxsz) +#define NFS4_dec_getxattr_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_getxattr_maxsz) +#define NFS4_enc_setxattr_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_setxattr_maxsz) +#define NFS4_dec_setxattr_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_setxattr_maxsz) +#define NFS4_enc_listxattrs_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_listxattrs_maxsz) +#define NFS4_dec_listxattrs_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_listxattrs_maxsz) +#define NFS4_enc_removexattr_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_removexattr_maxsz) +#define NFS4_dec_removexattr_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_removexattr_maxsz) + +/* + * These values specify the maximum amount of data that is not + * associated with the extended attribute name or extended + * attribute list in the SETXATTR, GETXATTR and LISTXATTR + * respectively. + */ +const u32 nfs42_maxsetxattr_overhead = ((RPC_MAX_HEADER_WITH_AUTH + + compound_encode_hdr_maxsz + + encode_sequence_maxsz + + encode_putfh_maxsz + 1 + + nfs4_xattr_name_maxsz) + * XDR_UNIT); + +const u32 nfs42_maxgetxattr_overhead = ((RPC_MAX_HEADER_WITH_AUTH + + compound_decode_hdr_maxsz + + decode_sequence_maxsz + + decode_putfh_maxsz + 1) * XDR_UNIT); + +const u32 nfs42_maxlistxattrs_overhead = ((RPC_MAX_HEADER_WITH_AUTH + + compound_decode_hdr_maxsz + + decode_sequence_maxsz + + decode_putfh_maxsz + 3) * XDR_UNIT); +#endif + static void encode_fallocate(struct xdr_stream *xdr, const struct nfs42_falloc_args *args) { diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 2b7f6dcd2eb8..526b3e70d57c 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -557,6 +557,12 @@ static inline void nfs4_unregister_sysctl(void) /* nfs4xdr.c */ extern const struct rpc_procinfo nfs4_procedures[]; +#ifdef CONFIG_NFS_V4_2 +extern const u32 nfs42_maxsetxattr_overhead; +extern const u32 nfs42_maxgetxattr_overhead; +extern const u32 nfs42_maxlistxattrs_overhead; +#endif + struct nfs4_mount_data; /* callback_xdr.c */ diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 0bd77cc1f639..c41cbd86612c 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -992,6 +992,36 @@ static void nfs4_session_limit_rwsize(struct nfs_server *server) #endif /* CONFIG_NFS_V4_1 */ } +/* + * Limit xattr sizes using the channel attributes. + */ +static void nfs4_session_limit_xasize(struct nfs_server *server) +{ +#ifdef CONFIG_NFS_V4_2 + struct nfs4_session *sess; + u32 server_gxa_sz; + u32 server_sxa_sz; + u32 server_lxa_sz; + + if (!nfs4_has_session(server->nfs_client)) + return; + + sess = server->nfs_client->cl_session; + + server_gxa_sz = sess->fc_attrs.max_resp_sz - nfs42_maxgetxattr_overhead; + server_sxa_sz = sess->fc_attrs.max_rqst_sz - nfs42_maxsetxattr_overhead; + server_lxa_sz = sess->fc_attrs.max_resp_sz - + nfs42_maxlistxattrs_overhead; + + if (server->gxasize > server_gxa_sz) + server->gxasize = server_gxa_sz; + if (server->sxasize > server_sxa_sz) + server->sxasize = server_sxa_sz; + if (server->lxasize > server_lxa_sz) + server->lxasize = server_lxa_sz; +#endif +} + static int nfs4_server_common_setup(struct nfs_server *server, struct nfs_fh *mntfh, bool auth_probe) { @@ -1039,6 +1069,7 @@ static int nfs4_server_common_setup(struct nfs_server *server, goto out; nfs4_session_limit_rwsize(server); + nfs4_session_limit_xasize(server); if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) server->namelen = NFS4_MAXNAMLEN; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 465fa98258a3..128e01acb4ca 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -163,6 +163,11 @@ struct nfs_server { unsigned int dtsize; /* readdir size */ unsigned short port; /* "port=" setting */ unsigned int bsize; /* server block size */ +#ifdef CONFIG_NFS_V4_2 + unsigned int gxasize; /* getxattr size */ + unsigned int sxasize; /* setxattr size */ + unsigned int lxasize; /* listxattr size */ +#endif unsigned int acregmin; /* attr cache timeouts */ unsigned int acregmax; unsigned int acdirmin; -- cgit v1.2.3 From b78ef845c35dbae25e57b598901a65b13d940c81 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:55 +0000 Subject: NFSv4.2: query the server for extended attribute support Query the server for extended attribute support, and record it as the NFS_CAP_XATTR flag in the server capabilities. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 3 +++ fs/nfs/nfs4proc.c | 3 ++- fs/nfs/nfs4xdr.c | 25 +++++++++++++++++++++++++ include/linux/nfs_fs_sb.h | 1 + include/linux/nfs_xdr.h | 1 + 5 files changed, 32 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 055040bf1a8e..4b8cc93913f7 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -809,6 +809,9 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, XATTR_SIZE_MAX); server->lxasize = min_t(unsigned int, raw_max_rpc_payload, nfs42_listxattr_xdrsize(XATTR_LIST_MAX)); + + if (fsinfo->xattr_support) + server->caps |= NFS_CAP_XATTR; #endif } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e32717fd1169..64e081459327 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -256,6 +256,7 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD1_FS_LAYOUT_TYPES, FATTR4_WORD2_LAYOUT_BLKSIZE | FATTR4_WORD2_CLONE_BLKSIZE + | FATTR4_WORD2_XATTR_SUPPORT }; const u32 nfs4_fs_locations_bitmap[3] = { @@ -3740,7 +3741,7 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) -#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_MODE_UMASK - 1UL) +#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL) static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 47817ef0aadb..9e1b07640e9a 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4201,6 +4201,26 @@ static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, str return status; } +static int decode_attr_xattrsupport(struct xdr_stream *xdr, uint32_t *bitmap, + uint32_t *res) +{ + __be32 *p; + + *res = 0; + if (unlikely(bitmap[2] & (FATTR4_WORD2_XATTR_SUPPORT - 1U))) + return -EIO; + if (likely(bitmap[2] & FATTR4_WORD2_XATTR_SUPPORT)) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + *res = be32_to_cpup(p); + bitmap[2] &= ~FATTR4_WORD2_XATTR_SUPPORT; + } + dprintk("%s: XATTR support=%s\n", __func__, + *res == 0 ? "false" : "true"); + return 0; +} + static int verify_attr_len(struct xdr_stream *xdr, unsigned int savep, uint32_t attrlen) { unsigned int attrwords = XDR_QUADLEN(attrlen); @@ -4855,6 +4875,11 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) if (status) goto xdr_error; + status = decode_attr_xattrsupport(xdr, bitmap, + &fsinfo->xattr_support); + if (status) + goto xdr_error; + status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 128e01acb4ca..7eae72a8762e 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -286,5 +286,6 @@ struct nfs_server { #define NFS_CAP_OFFLOAD_CANCEL (1U << 25) #define NFS_CAP_LAYOUTERROR (1U << 26) #define NFS_CAP_COPY_NOTIFY (1U << 27) +#define NFS_CAP_XATTR (1U << 28) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 5fd0a9ef425f..dee9b1cfa972 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -150,6 +150,7 @@ struct nfs_fsinfo { __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */ __u32 blksize; /* preferred pnfs io block size */ __u32 clone_blksize; /* granularity of a CLONE operation */ + __u32 xattr_support; /* User xattrs supported */ }; struct nfs_fsstat { -- cgit v1.2.3 From 3e1f02123fba086d32dfd5729e6f4e2b54654acc Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:56 +0000 Subject: NFSv4.2: add client side XDR handling for extended attributes Define the argument and response structures that will be used for RFC 8276 extended attribute RPC calls, and implement the necessary functions to encode/decode the extended attribute operations. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/nfs42xdr.c | 368 +++++++++++++++++++++++++++++++++++++++++++++++- fs/nfs/nfs4xdr.c | 6 + include/linux/nfs_xdr.h | 59 +++++++- 3 files changed, 430 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 6712daa9d85b..cc50085e151c 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -169,7 +169,6 @@ decode_clone_maxsz + \ decode_getattr_maxsz) -#ifdef CONFIG_NFS_V4_2 /* Not limited by NFS itself, limited by the generic xattr code */ #define nfs4_xattr_name_maxsz XDR_QUADLEN(XATTR_NAME_MAX) @@ -241,7 +240,6 @@ const u32 nfs42_maxlistxattrs_overhead = ((RPC_MAX_HEADER_WITH_AUTH + compound_decode_hdr_maxsz + decode_sequence_maxsz + decode_putfh_maxsz + 3) * XDR_UNIT); -#endif static void encode_fallocate(struct xdr_stream *xdr, const struct nfs42_falloc_args *args) @@ -407,6 +405,210 @@ static void encode_layouterror(struct xdr_stream *xdr, encode_device_error(xdr, &args->errors[0]); } +static void encode_setxattr(struct xdr_stream *xdr, + const struct nfs42_setxattrargs *arg, + struct compound_hdr *hdr) +{ + __be32 *p; + + BUILD_BUG_ON(XATTR_CREATE != SETXATTR4_CREATE); + BUILD_BUG_ON(XATTR_REPLACE != SETXATTR4_REPLACE); + + encode_op_hdr(xdr, OP_SETXATTR, decode_setxattr_maxsz, hdr); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(arg->xattr_flags); + encode_string(xdr, strlen(arg->xattr_name), arg->xattr_name); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(arg->xattr_len); + if (arg->xattr_len) + xdr_write_pages(xdr, arg->xattr_pages, 0, arg->xattr_len); +} + +static int decode_setxattr(struct xdr_stream *xdr, + struct nfs4_change_info *cinfo) +{ + int status; + + status = decode_op_hdr(xdr, OP_SETXATTR); + if (status) + goto out; + status = decode_change_info(xdr, cinfo); +out: + return status; +} + + +static void encode_getxattr(struct xdr_stream *xdr, const char *name, + struct compound_hdr *hdr) +{ + encode_op_hdr(xdr, OP_GETXATTR, decode_getxattr_maxsz, hdr); + encode_string(xdr, strlen(name), name); +} + +static int decode_getxattr(struct xdr_stream *xdr, + struct nfs42_getxattrres *res, + struct rpc_rqst *req) +{ + int status; + __be32 *p; + u32 len, rdlen; + + status = decode_op_hdr(xdr, OP_GETXATTR); + if (status) + return status; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + len = be32_to_cpup(p); + if (len > req->rq_rcv_buf.page_len) + return -ERANGE; + + res->xattr_len = len; + + if (len > 0) { + rdlen = xdr_read_pages(xdr, len); + if (rdlen < len) + return -EIO; + } + + return 0; +} + +static void encode_removexattr(struct xdr_stream *xdr, const char *name, + struct compound_hdr *hdr) +{ + encode_op_hdr(xdr, OP_REMOVEXATTR, decode_removexattr_maxsz, hdr); + encode_string(xdr, strlen(name), name); +} + + +static int decode_removexattr(struct xdr_stream *xdr, + struct nfs4_change_info *cinfo) +{ + int status; + + status = decode_op_hdr(xdr, OP_REMOVEXATTR); + if (status) + goto out; + + status = decode_change_info(xdr, cinfo); +out: + return status; +} + +static void encode_listxattrs(struct xdr_stream *xdr, + const struct nfs42_listxattrsargs *arg, + struct compound_hdr *hdr) +{ + __be32 *p; + + encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz + 1, hdr); + + p = reserve_space(xdr, 12); + if (unlikely(!p)) + return; + + p = xdr_encode_hyper(p, arg->cookie); + /* + * RFC 8276 says to specify the full max length of the LISTXATTRS + * XDR reply. Count is set to the XDR length of the names array + * plus the EOF marker. So, add the cookie and the names count. + */ + *p = cpu_to_be32(arg->count + 8 + 4); +} + +static int decode_listxattrs(struct xdr_stream *xdr, + struct nfs42_listxattrsres *res) +{ + int status; + __be32 *p; + u32 count, len, ulen; + size_t left, copied; + char *buf; + + status = decode_op_hdr(xdr, OP_LISTXATTRS); + if (status) { + /* + * Special case: for LISTXATTRS, NFS4ERR_TOOSMALL + * should be translated to ERANGE. + */ + if (status == -ETOOSMALL) + status = -ERANGE; + goto out; + } + + p = xdr_inline_decode(xdr, 8); + if (unlikely(!p)) + return -EIO; + + xdr_decode_hyper(p, &res->cookie); + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + left = res->xattr_len; + buf = res->xattr_buf; + + count = be32_to_cpup(p); + copied = 0; + + /* + * We have asked for enough room to encode the maximum number + * of possible attribute names, so everything should fit. + * + * But, don't rely on that assumption. Just decode entries + * until they don't fit anymore, just in case the server did + * something odd. + */ + while (count--) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + len = be32_to_cpup(p); + if (len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) { + status = -ERANGE; + goto out; + } + + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return -EIO; + + ulen = len + XATTR_USER_PREFIX_LEN + 1; + if (buf) { + if (ulen > left) { + status = -ERANGE; + goto out; + } + + memcpy(buf, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); + memcpy(buf + XATTR_USER_PREFIX_LEN, p, len); + + buf[ulen - 1] = 0; + buf += ulen; + left -= ulen; + } + copied += ulen; + } + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + res->eof = be32_to_cpup(p); + res->copied = copied; + +out: + if (status == -ERANGE && res->xattr_len == XATTR_LIST_MAX) + status = -E2BIG; + + return status; +} + /* * Encode ALLOCATE request */ @@ -1062,4 +1264,166 @@ out: return status; } +#ifdef CONFIG_NFS_V4_2 +static void nfs4_xdr_enc_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr, + const void *data) +{ + const struct nfs42_setxattrargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_setxattr(xdr, args, &hdr); + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr, + void *data) +{ + struct nfs42_setxattrres *res = data; + struct compound_hdr hdr; + int status; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, req); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + + status = decode_setxattr(xdr, &res->cinfo); +out: + return status; +} + +static void nfs4_xdr_enc_getxattr(struct rpc_rqst *req, struct xdr_stream *xdr, + const void *data) +{ + const struct nfs42_getxattrargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + size_t plen; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_getxattr(xdr, args->xattr_name, &hdr); + + plen = args->xattr_len ? args->xattr_len : XATTR_SIZE_MAX; + + rpc_prepare_reply_pages(req, args->xattr_pages, 0, plen, + hdr.replen); + req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; + + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_getxattr(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, void *data) +{ + struct nfs42_getxattrres *res = data; + struct compound_hdr hdr; + int status; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, rqstp); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + status = decode_getxattr(xdr, res, rqstp); +out: + return status; +} + +static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req, + struct xdr_stream *xdr, const void *data) +{ + const struct nfs42_listxattrsargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_listxattrs(xdr, args, &hdr); + + rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count, + hdr.replen); + req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; + + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_listxattrs(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, void *data) +{ + struct nfs42_listxattrsres *res = data; + struct compound_hdr hdr; + int status; + + xdr_set_scratch_buffer(xdr, page_address(res->scratch), PAGE_SIZE); + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, rqstp); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + status = decode_listxattrs(xdr, res); +out: + return status; +} + +static void nfs4_xdr_enc_removexattr(struct rpc_rqst *req, + struct xdr_stream *xdr, const void *data) +{ + const struct nfs42_removexattrargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_removexattr(xdr, args->xattr_name, &hdr); + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_removexattr(struct rpc_rqst *req, + struct xdr_stream *xdr, void *data) +{ + struct nfs42_removexattrres *res = data; + struct compound_hdr hdr; + int status; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, req); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + + status = decode_removexattr(xdr, &res->cinfo); +out: + return status; +} +#endif #endif /* __LINUX_FS_NFS_NFS4_2XDR_H */ diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 9e1b07640e9a..388ac520b104 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -7481,6 +7481,8 @@ static struct { { NFS4ERR_SYMLINK, -ELOOP }, { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, { NFS4ERR_DEADLOCK, -EDEADLK }, + { NFS4ERR_NOXATTR, -ENODATA }, + { NFS4ERR_XATTR2BIG, -E2BIG }, { -1, -EIO } }; @@ -7609,6 +7611,10 @@ const struct rpc_procinfo nfs4_procedures[] = { PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify), PROC(LOOKUPP, enc_lookupp, dec_lookupp), PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror), + PROC42(GETXATTR, enc_getxattr, dec_getxattr), + PROC42(SETXATTR, enc_setxattr, dec_setxattr), + PROC42(LISTXATTRS, enc_listxattrs, dec_listxattrs), + PROC42(REMOVEXATTR, enc_removexattr, dec_removexattr), }; static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)]; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index dee9b1cfa972..9408f3252c8e 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1498,7 +1498,64 @@ struct nfs42_seek_res { u32 sr_eof; u64 sr_offset; }; -#endif + +struct nfs42_setxattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + const char *xattr_name; + u32 xattr_flags; + size_t xattr_len; + struct page **xattr_pages; +}; + +struct nfs42_setxattrres { + struct nfs4_sequence_res seq_res; + struct nfs4_change_info cinfo; +}; + +struct nfs42_getxattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + const char *xattr_name; + size_t xattr_len; + struct page **xattr_pages; +}; + +struct nfs42_getxattrres { + struct nfs4_sequence_res seq_res; + size_t xattr_len; +}; + +struct nfs42_listxattrsargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + u32 count; + u64 cookie; + struct page **xattr_pages; +}; + +struct nfs42_listxattrsres { + struct nfs4_sequence_res seq_res; + struct page *scratch; + void *xattr_buf; + size_t xattr_len; + u64 cookie; + bool eof; + size_t copied; +}; + +struct nfs42_removexattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + const char *xattr_name; +}; + +struct nfs42_removexattrres { + struct nfs4_sequence_res seq_res; + struct nfs4_change_info cinfo; +}; + +#endif /* CONFIG_NFS_V4_2 */ struct nfs_page; -- cgit v1.2.3 From d2ae4f8b21c111bb795c557588d89dccd005828d Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:57 +0000 Subject: nfs: define nfs_access_get_cached function The only consumer of nfs_access_get_cached_rcu and nfs_access_cached calls these static functions in order to first try RCU access, and then locked access. Combine them in to a single function, and call that. Make this function available to the rest of the NFS code. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 20 ++++++++++++++++---- include/linux/nfs_fs.h | 2 ++ 2 files changed, 18 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 5a331da5f55a..f04fc0f7843b 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2460,7 +2460,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co return NULL; } -static int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block) +static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_access_entry *cache; @@ -2533,6 +2533,20 @@ out: return err; } +int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct +nfs_access_entry *res, bool may_block) +{ + int status; + + status = nfs_access_get_cached_rcu(inode, cred, res); + if (status != 0) + status = nfs_access_get_cached_locked(inode, cred, res, + may_block); + + return status; +} +EXPORT_SYMBOL_GPL(nfs_access_get_cached); + static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set) { struct nfs_inode *nfsi = NFS_I(inode); @@ -2647,9 +2661,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) trace_nfs_access_enter(inode); - status = nfs_access_get_cached_rcu(inode, cred, &cache); - if (status != 0) - status = nfs_access_get_cached(inode, cred, &cache, may_block); + status = nfs_access_get_cached(inode, cred, &cache, may_block); if (status == 0) goto out_cached; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b743988fcbd0..714b577dce19 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -493,6 +493,8 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); +extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, + bool may_block); /* * linux/fs/nfs/symlink.c -- cgit v1.2.3 From 0f44da51aeef9c974ea744c0d9e24d54eec4e94c Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:00 +0000 Subject: nfs: define and use the NFS_INO_INVALID_XATTR flag Define the NFS_INO_INVALID_XATTR flag, to be used for the NFSv4.2 xattr cache, and use it where appropriate. No functional change as yet. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 7 ++++++- fs/nfs/nfs4proc.c | 3 ++- fs/nfs/nfstrace.h | 3 ++- include/linux/nfs_fs.h | 1 + 4 files changed, 11 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 0bf1f835de01..629af798dfc9 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -204,7 +204,8 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) flags &= ~NFS_INO_INVALID_OTHER; flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE - | NFS_INO_REVAL_PAGECACHE); + | NFS_INO_REVAL_PAGECACHE + | NFS_INO_INVALID_XATTR); } if (inode->i_mapping->nrpages == 0) @@ -542,6 +543,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st inode->i_gid = fattr->gid; else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); + if (nfs_server_capable(inode, NFS_CAP_XATTR)) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR); if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) inode->i_blocks = fattr->du.nfs2.blocks; if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { @@ -1375,6 +1378,8 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode_set_iversion_raw(inode, fattr->change_attr); if (S_ISDIR(inode->i_mode)) nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA); + else if (nfs_server_capable(inode, NFS_CAP_XATTR)) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR); } /* If we have atomic WCC data, we may update some attributes */ ts = inode->i_ctime; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6540071cb228..0d123fe0a423 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1183,7 +1183,8 @@ nfs4_update_changeattr_locked(struct inode *inode, if (cinfo->before != inode_peek_iversion_raw(inode)) nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | - NFS_INO_INVALID_ACL; + NFS_INO_INVALID_ACL | + NFS_INO_INVALID_XATTR; } inode_set_iversion_raw(inode, cinfo->after); nfsi->read_cache_jiffies = timestamp; diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 547cec79899f..5a59dcdce0b2 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -59,7 +59,8 @@ TRACE_DEFINE_ENUM(NFS_INO_INVALID_OTHER); { NFS_INO_INVALID_CTIME, "INVALID_CTIME" }, \ { NFS_INO_INVALID_MTIME, "INVALID_MTIME" }, \ { NFS_INO_INVALID_SIZE, "INVALID_SIZE" }, \ - { NFS_INO_INVALID_OTHER, "INVALID_OTHER" }) + { NFS_INO_INVALID_OTHER, "INVALID_OTHER" }, \ + { NFS_INO_INVALID_XATTR, "INVALID_XATTR" }) TRACE_DEFINE_ENUM(NFS_INO_ADVISE_RDPLUS); TRACE_DEFINE_ENUM(NFS_INO_STALE); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 714b577dce19..943ee750d68c 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -234,6 +234,7 @@ struct nfs4_copy_state { #define NFS_INO_DATA_INVAL_DEFER \ BIT(13) /* Deferred cache invalidation */ #define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ +#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ -- cgit v1.2.3 From 95ad37f90c338e3fd4abf61cecfe02b6f3e080f0 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:04 +0000 Subject: NFSv4.2: add client side xattr caching. Implement client side caching for NFSv4.2 extended attributes. The cache is a per-inode hashtable, with name/value entries. There is one special entry for the listxattr cache. NFS inodes have a pointer to a cache structure. The cache structure is allocated on demand, freed when the cache is invalidated. Memory shrinkers keep the size in check. Large entries (> PAGE_SIZE) are collected by a separate shrinker, and freed more aggressively than others. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/Makefile | 2 +- fs/nfs/inode.c | 9 +- fs/nfs/nfs42proc.c | 12 + fs/nfs/nfs42xattr.c | 1083 +++++++++++++++++++++++++++++++++++++++++++ fs/nfs/nfs4_fs.h | 22 + fs/nfs/nfs4proc.c | 42 +- fs/nfs/nfs4super.c | 10 + include/linux/nfs_fs.h | 6 + include/uapi/linux/nfs_fs.h | 1 + 9 files changed, 1179 insertions(+), 8 deletions(-) create mode 100644 fs/nfs/nfs42xattr.c (limited to 'include') diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile index 2433c3e03cfa..22d11fdc6deb 100644 --- a/fs/nfs/Makefile +++ b/fs/nfs/Makefile @@ -30,7 +30,7 @@ nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o pnfs_nfs.o -nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o +nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o nfs42xattr.o obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/ obj-$(CONFIG_PNFS_BLOCK) += blocklayout/ diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 629af798dfc9..10048eed485d 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -193,6 +193,7 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags) return nfs_check_cache_invalid_not_delegated(inode, flags); } +EXPORT_SYMBOL_GPL(nfs_check_cache_invalid); static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) { @@ -234,11 +235,13 @@ static void nfs_zap_caches_locked(struct inode *inode) | NFS_INO_INVALID_DATA | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_XATTR | NFS_INO_REVAL_PAGECACHE); } else nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_XATTR | NFS_INO_REVAL_PAGECACHE); nfs_zap_label_cache_locked(nfsi); } @@ -1897,7 +1900,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (!(have_writers || have_delegation)) { invalid |= NFS_INO_INVALID_DATA | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL; + | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_XATTR; /* Force revalidate of all attributes */ save_cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME @@ -2100,6 +2104,9 @@ struct inode *nfs_alloc_inode(struct super_block *sb) #if IS_ENABLED(CONFIG_NFS_V4) nfsi->nfs4_acl = NULL; #endif /* CONFIG_NFS_V4 */ +#ifdef CONFIG_NFS_V4_2 + nfsi->xattr_cache = NULL; +#endif return &nfsi->vfs_inode; } EXPORT_SYMBOL_GPL(nfs_alloc_inode); diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 8c2e52bc986a..e200522469af 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -1182,6 +1182,18 @@ static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, if (ret < 0) return ret; + /* + * Normally, the caching is done one layer up, but for successful + * RPCS, always cache the result here, even if the caller was + * just querying the length, or if the reply was too big for + * the caller. This avoids a second RPC in the case of the + * common query-alloc-retrieve cycle for xattrs. + * + * Note that xattr_len is always capped to XATTR_SIZE_MAX. + */ + + nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); + if (buflen) { if (res.xattr_len > buflen) return -ERANGE; diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c new file mode 100644 index 000000000000..23fdab977a2a --- /dev/null +++ b/fs/nfs/nfs42xattr.c @@ -0,0 +1,1083 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved. + * + * User extended attribute client side cache functions. + * + * Author: Frank van der Linden + */ +#include +#include +#include +#include +#include + +#include "nfs4_fs.h" +#include "internal.h" + +/* + * User extended attributes client side caching is implemented by having + * a cache structure attached to NFS inodes. This structure is allocated + * when needed, and freed when the cache is zapped. + * + * The cache structure contains as hash table of entries, and a pointer + * to a special-cased entry for the listxattr cache. + * + * Accessing and allocating / freeing the caches is done via reference + * counting. The cache entries use a similar refcounting scheme. + * + * This makes freeing a cache, both from the shrinker and from the + * zap cache path, easy. It also means that, in current use cases, + * the large majority of inodes will not waste any memory, as they + * will never have any user extended attributes assigned to them. + * + * Attribute entries are hashed in to a simple hash table. They are + * also part of an LRU. + * + * There are three shrinkers. + * + * Two shrinkers deal with the cache entries themselves: one for + * large entries (> PAGE_SIZE), and one for smaller entries. The + * shrinker for the larger entries works more aggressively than + * those for the smaller entries. + * + * The other shrinker frees the cache structures themselves. + */ + +/* + * 64 buckets is a good default. There is likely no reasonable + * workload that uses more than even 64 user extended attributes. + * You can certainly add a lot more - but you get what you ask for + * in those circumstances. + */ +#define NFS4_XATTR_HASH_SIZE 64 + +#define NFSDBG_FACILITY NFSDBG_XATTRCACHE + +struct nfs4_xattr_cache; +struct nfs4_xattr_entry; + +struct nfs4_xattr_bucket { + spinlock_t lock; + struct hlist_head hlist; + struct nfs4_xattr_cache *cache; + bool draining; +}; + +struct nfs4_xattr_cache { + struct kref ref; + spinlock_t hash_lock; /* protects hashtable and lru */ + struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; + struct list_head lru; + struct list_head dispose; + atomic_long_t nent; + spinlock_t listxattr_lock; + struct inode *inode; + struct nfs4_xattr_entry *listxattr; + struct work_struct work; +}; + +struct nfs4_xattr_entry { + struct kref ref; + struct hlist_node hnode; + struct list_head lru; + struct list_head dispose; + char *xattr_name; + void *xattr_value; + size_t xattr_size; + struct nfs4_xattr_bucket *bucket; + uint32_t flags; +}; + +#define NFS4_XATTR_ENTRY_EXTVAL 0x0001 + +/* + * LRU list of NFS inodes that have xattr caches. + */ +static struct list_lru nfs4_xattr_cache_lru; +static struct list_lru nfs4_xattr_entry_lru; +static struct list_lru nfs4_xattr_large_entry_lru; + +static struct kmem_cache *nfs4_xattr_cache_cachep; + +static struct workqueue_struct *nfs4_xattr_cache_wq; + +/* + * Hashing helper functions. + */ +static void +nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache) +{ + unsigned int i; + + for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { + INIT_HLIST_HEAD(&cache->buckets[i].hlist); + spin_lock_init(&cache->buckets[i].lock); + cache->buckets[i].cache = cache; + cache->buckets[i].draining = false; + } +} + +/* + * Locking order: + * 1. inode i_lock or bucket lock + * 2. list_lru lock (taken by list_lru_* functions) + */ + +/* + * Wrapper functions to add a cache entry to the right LRU. + */ +static bool +nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry) +{ + struct list_lru *lru; + + lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + return list_lru_add(lru, &entry->lru); +} + +static bool +nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry) +{ + struct list_lru *lru; + + lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + return list_lru_del(lru, &entry->lru); +} + +/* + * This function allocates cache entries. They are the normal + * extended attribute name/value pairs, but may also be a listxattr + * cache. Those allocations use the same entry so that they can be + * treated as one by the memory shrinker. + * + * xattr cache entries are allocated together with names. If the + * value fits in to one page with the entry structure and the name, + * it will also be part of the same allocation (kmalloc). This is + * expected to be the vast majority of cases. Larger allocations + * have a value pointer that is allocated separately by kvmalloc. + * + * Parameters: + * + * @name: Name of the extended attribute. NULL for listxattr cache + * entry. + * @value: Value of attribute, or listxattr cache. NULL if the + * value is to be copied from pages instead. + * @pages: Pages to copy the value from, if not NULL. Passed in to + * make it easier to copy the value after an RPC, even if + * the value will not be passed up to application (e.g. + * for a 'query' getxattr with NULL buffer). + * @len: Length of the value. Can be 0 for zero-length attribues. + * @value and @pages will be NULL if @len is 0. + */ +static struct nfs4_xattr_entry * +nfs4_xattr_alloc_entry(const char *name, const void *value, + struct page **pages, size_t len) +{ + struct nfs4_xattr_entry *entry; + void *valp; + char *namep; + size_t alloclen, slen; + char *buf; + uint32_t flags; + + BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) + + XATTR_NAME_MAX + 1 > PAGE_SIZE); + + alloclen = sizeof(struct nfs4_xattr_entry); + if (name != NULL) { + slen = strlen(name) + 1; + alloclen += slen; + } else + slen = 0; + + if (alloclen + len <= PAGE_SIZE) { + alloclen += len; + flags = 0; + } else { + flags = NFS4_XATTR_ENTRY_EXTVAL; + } + + buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS); + if (buf == NULL) + return NULL; + entry = (struct nfs4_xattr_entry *)buf; + + if (name != NULL) { + namep = buf + sizeof(struct nfs4_xattr_entry); + memcpy(namep, name, slen); + } else { + namep = NULL; + } + + + if (flags & NFS4_XATTR_ENTRY_EXTVAL) { + valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS); + if (valp == NULL) { + kfree(buf); + return NULL; + } + } else if (len != 0) { + valp = buf + sizeof(struct nfs4_xattr_entry) + slen; + } else + valp = NULL; + + if (valp != NULL) { + if (value != NULL) + memcpy(valp, value, len); + else + _copy_from_pages(valp, pages, 0, len); + } + + entry->flags = flags; + entry->xattr_value = valp; + kref_init(&entry->ref); + entry->xattr_name = namep; + entry->xattr_size = len; + entry->bucket = NULL; + INIT_LIST_HEAD(&entry->lru); + INIT_LIST_HEAD(&entry->dispose); + INIT_HLIST_NODE(&entry->hnode); + + return entry; +} + +static void +nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry) +{ + if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) + kvfree(entry->xattr_value); + kfree(entry); +} + +static void +nfs4_xattr_free_entry_cb(struct kref *kref) +{ + struct nfs4_xattr_entry *entry; + + entry = container_of(kref, struct nfs4_xattr_entry, ref); + + if (WARN_ON(!list_empty(&entry->lru))) + return; + + nfs4_xattr_free_entry(entry); +} + +static void +nfs4_xattr_free_cache_cb(struct kref *kref) +{ + struct nfs4_xattr_cache *cache; + int i; + + cache = container_of(kref, struct nfs4_xattr_cache, ref); + + for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { + if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) + return; + cache->buckets[i].draining = false; + } + + cache->listxattr = NULL; + + kmem_cache_free(nfs4_xattr_cache_cachep, cache); + +} + +static struct nfs4_xattr_cache * +nfs4_xattr_alloc_cache(void) +{ + struct nfs4_xattr_cache *cache; + + cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, + GFP_KERNEL_ACCOUNT | GFP_NOFS); + if (cache == NULL) + return NULL; + + kref_init(&cache->ref); + atomic_long_set(&cache->nent, 0); + + return cache; +} + +/* + * Set the listxattr cache, which is a special-cased cache entry. + * The special value ERR_PTR(-ESTALE) is used to indicate that + * the cache is being drained - this prevents a new listxattr + * cache from being added to what is now a stale cache. + */ +static int +nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache, + struct nfs4_xattr_entry *new) +{ + struct nfs4_xattr_entry *old; + int ret = 1; + + spin_lock(&cache->listxattr_lock); + + old = cache->listxattr; + + if (old == ERR_PTR(-ESTALE)) { + ret = 0; + goto out; + } + + cache->listxattr = new; + if (new != NULL && new != ERR_PTR(-ESTALE)) + nfs4_xattr_entry_lru_add(new); + + if (old != NULL) { + nfs4_xattr_entry_lru_del(old); + kref_put(&old->ref, nfs4_xattr_free_entry_cb); + } +out: + spin_unlock(&cache->listxattr_lock); + + return ret; +} + +/* + * Unlink a cache from its parent inode, clearing out an invalid + * cache. Must be called with i_lock held. + */ +static struct nfs4_xattr_cache * +nfs4_xattr_cache_unlink(struct inode *inode) +{ + struct nfs_inode *nfsi; + struct nfs4_xattr_cache *oldcache; + + nfsi = NFS_I(inode); + + oldcache = nfsi->xattr_cache; + if (oldcache != NULL) { + list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru); + oldcache->inode = NULL; + } + nfsi->xattr_cache = NULL; + nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR; + + return oldcache; + +} + +/* + * Discard a cache. Usually called by a worker, since walking all + * the entries can take up some cycles that we don't want to waste + * in the I/O path. Can also be called from the shrinker callback. + * + * The cache is dead, it has already been unlinked from its inode, + * and no longer appears on the cache LRU list. + * + * Mark all buckets as draining, so that no new entries are added. This + * could still happen in the unlikely, but possible case that another + * thread had grabbed a reference before it was unlinked from the inode, + * and is still holding it for an add operation. + * + * Remove all entries from the LRU lists, so that there is no longer + * any way to 'find' this cache. Then, remove the entries from the hash + * table. + * + * At that point, the cache will remain empty and can be freed when the final + * reference drops, which is very likely the kref_put at the end of + * this function, or the one called immediately afterwards in the + * shrinker callback. + */ +static void +nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache) +{ + unsigned int i; + struct nfs4_xattr_entry *entry; + struct nfs4_xattr_bucket *bucket; + struct hlist_node *n; + + nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE)); + + for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { + bucket = &cache->buckets[i]; + + spin_lock(&bucket->lock); + bucket->draining = true; + hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { + nfs4_xattr_entry_lru_del(entry); + hlist_del_init(&entry->hnode); + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + } + spin_unlock(&bucket->lock); + } + + atomic_long_set(&cache->nent, 0); + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + +static void +nfs4_xattr_discard_cache_worker(struct work_struct *work) +{ + struct nfs4_xattr_cache *cache = container_of(work, + struct nfs4_xattr_cache, work); + + nfs4_xattr_discard_cache(cache); +} + +static void +nfs4_xattr_reap_cache(struct nfs4_xattr_cache *cache) +{ + queue_work(nfs4_xattr_cache_wq, &cache->work); +} + +/* + * Get a referenced copy of the cache structure. Avoid doing allocs + * while holding i_lock. Which means that we do some optimistic allocation, + * and might have to free the result in rare cases. + * + * This function only checks the NFS_INO_INVALID_XATTR cache validity bit + * and acts accordingly, replacing the cache when needed. For the read case + * (!add), this means that the caller must make sure that the cache + * is valid before caling this function. getxattr and listxattr call + * revalidate_inode to do this. The attribute cache timeout (for the + * non-delegated case) is expected to be dealt with in the revalidate + * call. + */ + +static struct nfs4_xattr_cache * +nfs4_xattr_get_cache(struct inode *inode, int add) +{ + struct nfs_inode *nfsi; + struct nfs4_xattr_cache *cache, *oldcache, *newcache; + + nfsi = NFS_I(inode); + + cache = oldcache = NULL; + + spin_lock(&inode->i_lock); + + if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) + oldcache = nfs4_xattr_cache_unlink(inode); + else + cache = nfsi->xattr_cache; + + if (cache != NULL) + kref_get(&cache->ref); + + spin_unlock(&inode->i_lock); + + if (add && cache == NULL) { + newcache = NULL; + + cache = nfs4_xattr_alloc_cache(); + if (cache == NULL) + goto out; + + spin_lock(&inode->i_lock); + if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) { + /* + * The cache was invalidated again. Give up, + * since what we want to enter is now likely + * outdated anyway. + */ + spin_unlock(&inode->i_lock); + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + cache = NULL; + goto out; + } + + /* + * Check if someone beat us to it. + */ + if (nfsi->xattr_cache != NULL) { + newcache = nfsi->xattr_cache; + kref_get(&newcache->ref); + } else { + kref_get(&cache->ref); + nfsi->xattr_cache = cache; + cache->inode = inode; + list_lru_add(&nfs4_xattr_cache_lru, &cache->lru); + } + + spin_unlock(&inode->i_lock); + + /* + * If there was a race, throw away the cache we just + * allocated, and use the new one allocated by someone + * else. + */ + if (newcache != NULL) { + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + cache = newcache; + } + } + +out: + /* + * Discarding an old cache is done via a workqueue. + */ + if (oldcache != NULL) + nfs4_xattr_reap_cache(oldcache); + + return cache; +} + +static inline struct nfs4_xattr_bucket * +nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name) +{ + return &cache->buckets[jhash(name, strlen(name), 0) & + (ARRAY_SIZE(cache->buckets) - 1)]; +} + +static struct nfs4_xattr_entry * +nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) +{ + struct nfs4_xattr_entry *entry; + + entry = NULL; + + hlist_for_each_entry(entry, &bucket->hlist, hnode) { + if (!strcmp(entry->xattr_name, name)) + break; + } + + return entry; +} + +static int +nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache, + struct nfs4_xattr_entry *entry) +{ + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_entry *oldentry = NULL; + int ret = 1; + + bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name); + entry->bucket = bucket; + + spin_lock(&bucket->lock); + + if (bucket->draining) { + ret = 0; + goto out; + } + + oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name); + if (oldentry != NULL) { + hlist_del_init(&oldentry->hnode); + nfs4_xattr_entry_lru_del(oldentry); + } else { + atomic_long_inc(&cache->nent); + } + + hlist_add_head(&entry->hnode, &bucket->hlist); + nfs4_xattr_entry_lru_add(entry); + +out: + spin_unlock(&bucket->lock); + + if (oldentry != NULL) + kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb); + + return ret; +} + +static void +nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name) +{ + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_entry *entry; + + bucket = nfs4_xattr_hash_bucket(cache, name); + + spin_lock(&bucket->lock); + + entry = nfs4_xattr_get_entry(bucket, name); + if (entry != NULL) { + hlist_del_init(&entry->hnode); + nfs4_xattr_entry_lru_del(entry); + atomic_long_dec(&cache->nent); + } + + spin_unlock(&bucket->lock); + + if (entry != NULL) + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); +} + +static struct nfs4_xattr_entry * +nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name) +{ + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_entry *entry; + + bucket = nfs4_xattr_hash_bucket(cache, name); + + spin_lock(&bucket->lock); + + entry = nfs4_xattr_get_entry(bucket, name); + if (entry != NULL) + kref_get(&entry->ref); + + spin_unlock(&bucket->lock); + + return entry; +} + +/* + * Entry point to retrieve an entry from the cache. + */ +ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf, + ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + ssize_t ret; + + cache = nfs4_xattr_get_cache(inode, 0); + if (cache == NULL) + return -ENOENT; + + ret = 0; + entry = nfs4_xattr_hash_find(cache, name); + + if (entry != NULL) { + dprintk("%s: cache hit '%s', len %lu\n", __func__, + entry->xattr_name, (unsigned long)entry->xattr_size); + if (buflen == 0) { + /* Length probe only */ + ret = entry->xattr_size; + } else if (buflen < entry->xattr_size) + ret = -ERANGE; + else { + memcpy(buf, entry->xattr_value, entry->xattr_size); + ret = entry->xattr_size; + } + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + } else { + dprintk("%s: cache miss '%s'\n", __func__, name); + ret = -ENOENT; + } + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + + return ret; +} + +/* + * Retrieve a cached list of xattrs from the cache. + */ +ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + ssize_t ret; + + cache = nfs4_xattr_get_cache(inode, 0); + if (cache == NULL) + return -ENOENT; + + spin_lock(&cache->listxattr_lock); + + entry = cache->listxattr; + + if (entry != NULL && entry != ERR_PTR(-ESTALE)) { + if (buflen == 0) { + /* Length probe only */ + ret = entry->xattr_size; + } else if (entry->xattr_size > buflen) + ret = -ERANGE; + else { + memcpy(buf, entry->xattr_value, entry->xattr_size); + ret = entry->xattr_size; + } + } else { + ret = -ENOENT; + } + + spin_unlock(&cache->listxattr_lock); + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + + return ret; +} + +/* + * Add an xattr to the cache. + * + * This also invalidates the xattr list cache. + */ +void nfs4_xattr_cache_add(struct inode *inode, const char *name, + const char *buf, struct page **pages, ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + + dprintk("%s: add '%s' len %lu\n", __func__, + name, (unsigned long)buflen); + + cache = nfs4_xattr_get_cache(inode, 1); + if (cache == NULL) + return; + + entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen); + if (entry == NULL) + goto out; + + (void)nfs4_xattr_set_listcache(cache, NULL); + + if (!nfs4_xattr_hash_add(cache, entry)) + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + +out: + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + + +/* + * Remove an xattr from the cache. + * + * This also invalidates the xattr list cache. + */ +void nfs4_xattr_cache_remove(struct inode *inode, const char *name) +{ + struct nfs4_xattr_cache *cache; + + dprintk("%s: remove '%s'\n", __func__, name); + + cache = nfs4_xattr_get_cache(inode, 0); + if (cache == NULL) + return; + + (void)nfs4_xattr_set_listcache(cache, NULL); + nfs4_xattr_hash_remove(cache, name); + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + +/* + * Cache listxattr output, replacing any possible old one. + */ +void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf, + ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + + cache = nfs4_xattr_get_cache(inode, 1); + if (cache == NULL) + return; + + entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen); + if (entry == NULL) + goto out; + + /* + * This is just there to be able to get to bucket->cache, + * which is obviously the same for all buckets, so just + * use bucket 0. + */ + entry->bucket = &cache->buckets[0]; + + if (!nfs4_xattr_set_listcache(cache, entry)) + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + +out: + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + +/* + * Zap the entire cache. Called when an inode is evicted. + */ +void nfs4_xattr_cache_zap(struct inode *inode) +{ + struct nfs4_xattr_cache *oldcache; + + spin_lock(&inode->i_lock); + oldcache = nfs4_xattr_cache_unlink(inode); + spin_unlock(&inode->i_lock); + + if (oldcache) + nfs4_xattr_discard_cache(oldcache); +} + +/* + * The entry LRU is shrunk more aggressively than the cache LRU, + * by settings @seeks to 1. + * + * Cache structures are freed only when they've become empty, after + * pruning all but one entry. + */ + +static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink, + struct shrink_control *sc); +static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink, + struct shrink_control *sc); +static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink, + struct shrink_control *sc); +static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink, + struct shrink_control *sc); + +static struct shrinker nfs4_xattr_cache_shrinker = { + .count_objects = nfs4_xattr_cache_count, + .scan_objects = nfs4_xattr_cache_scan, + .seeks = DEFAULT_SEEKS, + .flags = SHRINKER_MEMCG_AWARE, +}; + +static struct shrinker nfs4_xattr_entry_shrinker = { + .count_objects = nfs4_xattr_entry_count, + .scan_objects = nfs4_xattr_entry_scan, + .seeks = DEFAULT_SEEKS, + .batch = 512, + .flags = SHRINKER_MEMCG_AWARE, +}; + +static struct shrinker nfs4_xattr_large_entry_shrinker = { + .count_objects = nfs4_xattr_entry_count, + .scan_objects = nfs4_xattr_entry_scan, + .seeks = 1, + .batch = 512, + .flags = SHRINKER_MEMCG_AWARE, +}; + +static enum lru_status +cache_lru_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) +{ + struct list_head *dispose = arg; + struct inode *inode; + struct nfs4_xattr_cache *cache = container_of(item, + struct nfs4_xattr_cache, lru); + + if (atomic_long_read(&cache->nent) > 1) + return LRU_SKIP; + + /* + * If a cache structure is on the LRU list, we know that + * its inode is valid. Try to lock it to break the link. + * Since we're inverting the lock order here, only try. + */ + inode = cache->inode; + + if (!spin_trylock(&inode->i_lock)) + return LRU_SKIP; + + kref_get(&cache->ref); + + cache->inode = NULL; + NFS_I(inode)->xattr_cache = NULL; + NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR; + list_lru_isolate(lru, &cache->lru); + + spin_unlock(&inode->i_lock); + + list_add_tail(&cache->dispose, dispose); + return LRU_REMOVED; +} + +static unsigned long +nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + LIST_HEAD(dispose); + unsigned long freed; + struct nfs4_xattr_cache *cache; + + freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc, + cache_lru_isolate, &dispose); + while (!list_empty(&dispose)) { + cache = list_first_entry(&dispose, struct nfs4_xattr_cache, + dispose); + list_del_init(&cache->dispose); + nfs4_xattr_discard_cache(cache); + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + } + + return freed; +} + + +static unsigned long +nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long count; + + count = list_lru_count(&nfs4_xattr_cache_lru); + return vfs_pressure_ratio(count); +} + +static enum lru_status +entry_lru_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) +{ + struct list_head *dispose = arg; + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry = container_of(item, + struct nfs4_xattr_entry, lru); + + bucket = entry->bucket; + cache = bucket->cache; + + /* + * Unhook the entry from its parent (either a cache bucket + * or a cache structure if it's a listxattr buf), so that + * it's no longer found. Then add it to the isolate list, + * to be freed later. + * + * In both cases, we're reverting lock order, so use + * trylock and skip the entry if we can't get the lock. + */ + if (entry->xattr_name != NULL) { + /* Regular cache entry */ + if (!spin_trylock(&bucket->lock)) + return LRU_SKIP; + + kref_get(&entry->ref); + + hlist_del_init(&entry->hnode); + atomic_long_dec(&cache->nent); + list_lru_isolate(lru, &entry->lru); + + spin_unlock(&bucket->lock); + } else { + /* Listxattr cache entry */ + if (!spin_trylock(&cache->listxattr_lock)) + return LRU_SKIP; + + kref_get(&entry->ref); + + cache->listxattr = NULL; + list_lru_isolate(lru, &entry->lru); + + spin_unlock(&cache->listxattr_lock); + } + + list_add_tail(&entry->dispose, dispose); + return LRU_REMOVED; +} + +static unsigned long +nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + LIST_HEAD(dispose); + unsigned long freed; + struct nfs4_xattr_entry *entry; + struct list_lru *lru; + + lru = (shrink == &nfs4_xattr_large_entry_shrinker) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose); + + while (!list_empty(&dispose)) { + entry = list_first_entry(&dispose, struct nfs4_xattr_entry, + dispose); + list_del_init(&entry->dispose); + + /* + * Drop two references: the one that we just grabbed + * in entry_lru_isolate, and the one that was set + * when the entry was first allocated. + */ + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + } + + return freed; +} + +static unsigned long +nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long count; + struct list_lru *lru; + + lru = (shrink == &nfs4_xattr_large_entry_shrinker) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + count = list_lru_count(lru); + return vfs_pressure_ratio(count); +} + + +static void nfs4_xattr_cache_init_once(void *p) +{ + struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p; + + spin_lock_init(&cache->listxattr_lock); + atomic_long_set(&cache->nent, 0); + nfs4_xattr_hash_init(cache); + cache->listxattr = NULL; + INIT_WORK(&cache->work, nfs4_xattr_discard_cache_worker); + INIT_LIST_HEAD(&cache->lru); + INIT_LIST_HEAD(&cache->dispose); +} + +int __init nfs4_xattr_cache_init(void) +{ + int ret = 0; + + nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache", + sizeof(struct nfs4_xattr_cache), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT), + nfs4_xattr_cache_init_once); + if (nfs4_xattr_cache_cachep == NULL) + return -ENOMEM; + + ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru, + &nfs4_xattr_large_entry_shrinker); + if (ret) + goto out4; + + ret = list_lru_init_memcg(&nfs4_xattr_entry_lru, + &nfs4_xattr_entry_shrinker); + if (ret) + goto out3; + + ret = list_lru_init_memcg(&nfs4_xattr_cache_lru, + &nfs4_xattr_cache_shrinker); + if (ret) + goto out2; + + nfs4_xattr_cache_wq = alloc_workqueue("nfs4_xattr", WQ_MEM_RECLAIM, 0); + if (nfs4_xattr_cache_wq == NULL) + goto out1; + + ret = register_shrinker(&nfs4_xattr_cache_shrinker); + if (ret) + goto out0; + + ret = register_shrinker(&nfs4_xattr_entry_shrinker); + if (ret) + goto out; + + ret = register_shrinker(&nfs4_xattr_large_entry_shrinker); + if (!ret) + return 0; + + unregister_shrinker(&nfs4_xattr_entry_shrinker); +out: + unregister_shrinker(&nfs4_xattr_cache_shrinker); +out0: + destroy_workqueue(nfs4_xattr_cache_wq); +out1: + list_lru_destroy(&nfs4_xattr_cache_lru); +out2: + list_lru_destroy(&nfs4_xattr_entry_lru); +out3: + list_lru_destroy(&nfs4_xattr_large_entry_lru); +out4: + kmem_cache_destroy(nfs4_xattr_cache_cachep); + + return ret; +} + +void nfs4_xattr_cache_exit(void) +{ + unregister_shrinker(&nfs4_xattr_entry_shrinker); + unregister_shrinker(&nfs4_xattr_cache_shrinker); + list_lru_destroy(&nfs4_xattr_entry_lru); + list_lru_destroy(&nfs4_xattr_cache_lru); + kmem_cache_destroy(nfs4_xattr_cache_cachep); + destroy_workqueue(nfs4_xattr_cache_wq); +} diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 2fa9e4ea98d2..7e16a586f3fc 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -626,12 +626,34 @@ static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state * nfs4_stateid_match_other(&state->open_stateid, stateid); } +/* nfs42xattr.c */ +#ifdef CONFIG_NFS_V4_2 +extern int __init nfs4_xattr_cache_init(void); +extern void nfs4_xattr_cache_exit(void); +extern void nfs4_xattr_cache_add(struct inode *inode, const char *name, + const char *buf, struct page **pages, + ssize_t buflen); +extern void nfs4_xattr_cache_remove(struct inode *inode, const char *name); +extern ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, + char *buf, ssize_t buflen); +extern void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf, + ssize_t buflen); +extern ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, + ssize_t buflen); +extern void nfs4_xattr_cache_zap(struct inode *inode); #else +static inline void nfs4_xattr_cache_zap(struct inode *inode) +{ +} +#endif /* CONFIG_NFS_V4_2 */ + +#else /* CONFIG_NFS_V4 */ #define nfs4_close_state(a, b) do { } while (0) #define nfs4_close_sync(a, b) do { } while (0) #define nfs4_state_protect(a, b, c, d) do { } while (0) #define nfs4_state_protect_write(a, b, c, d) do { } while (0) + #endif /* CONFIG_NFS_V4 */ #endif /* __LINUX_FS_NFS_NFS4_FS.H */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 92a07956f07b..f670ff64b31e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7448,6 +7448,7 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, size_t buflen, int flags) { struct nfs_access_entry cache; + int ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return -EOPNOTSUPP; @@ -7466,10 +7467,17 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, return -EACCES; } - if (buf == NULL) - return nfs42_proc_removexattr(inode, key); - else - return nfs42_proc_setxattr(inode, key, buf, buflen, flags); + if (buf == NULL) { + ret = nfs42_proc_removexattr(inode, key); + if (!ret) + nfs4_xattr_cache_remove(inode, key); + } else { + ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); + if (!ret) + nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); + } + + return ret; } static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, @@ -7477,6 +7485,7 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, const char *key, void *buf, size_t buflen) { struct nfs_access_entry cache; + ssize_t ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return -EOPNOTSUPP; @@ -7486,7 +7495,17 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, return -EACCES; } - return nfs42_proc_getxattr(inode, key, buf, buflen); + ret = nfs_revalidate_inode(NFS_SERVER(inode), inode); + if (ret) + return ret; + + ret = nfs4_xattr_cache_get(inode, key, buf, buflen); + if (ret >= 0 || (ret < 0 && ret != -ENOENT)) + return ret; + + ret = nfs42_proc_getxattr(inode, key, buf, buflen); + + return ret; } static ssize_t @@ -7494,7 +7513,7 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) { u64 cookie; bool eof; - int ret, size; + ssize_t ret, size; char *buf; size_t buflen; struct nfs_access_entry cache; @@ -7507,6 +7526,14 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) return 0; } + ret = nfs_revalidate_inode(NFS_SERVER(inode), inode); + if (ret) + return ret; + + ret = nfs4_xattr_cache_list(inode, list, list_len); + if (ret >= 0 || (ret < 0 && ret != -ENOENT)) + return ret; + cookie = 0; eof = false; buflen = list_len ? list_len : XATTR_LIST_MAX; @@ -7526,6 +7553,9 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) size += ret; } + if (list_len) + nfs4_xattr_cache_set_list(inode, list, size); + return size; } diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 1475f932d7da..0c1ab846b83d 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c @@ -69,6 +69,7 @@ static void nfs4_evict_inode(struct inode *inode) pnfs_destroy_layout(NFS_I(inode)); /* First call standard NFS clear_inode() code */ nfs_clear_inode(inode); + nfs4_xattr_cache_zap(inode); } struct nfs_referral_count { @@ -268,6 +269,12 @@ static int __init init_nfs_v4(void) if (err) goto out1; +#ifdef CONFIG_NFS_V4_2 + err = nfs4_xattr_cache_init(); + if (err) + goto out2; +#endif + err = nfs4_register_sysctl(); if (err) goto out2; @@ -288,6 +295,9 @@ static void __exit exit_nfs_v4(void) nfs4_pnfs_v3_ds_connect_unload(); unregister_nfs_version(&nfs_v4); +#ifdef CONFIG_NFS_V4_2 + nfs4_xattr_cache_exit(); +#endif nfs4_unregister_sysctl(); nfs_idmap_quit(); nfs_dns_resolver_destroy(); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 943ee750d68c..a2c6455ea3fa 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -102,6 +102,8 @@ struct nfs_delegation; struct posix_acl; +struct nfs4_xattr_cache; + /* * nfs fs inode data in memory */ @@ -188,6 +190,10 @@ struct nfs_inode { struct fscache_cookie *fscache; #endif struct inode vfs_inode; + +#ifdef CONFIG_NFS_V4_2 + struct nfs4_xattr_cache *xattr_cache; +#endif }; struct nfs4_copy_state { diff --git a/include/uapi/linux/nfs_fs.h b/include/uapi/linux/nfs_fs.h index 7bcc8cd6831d..3afe3767c55d 100644 --- a/include/uapi/linux/nfs_fs.h +++ b/include/uapi/linux/nfs_fs.h @@ -56,6 +56,7 @@ #define NFSDBG_PNFS 0x1000 #define NFSDBG_PNFS_LD 0x2000 #define NFSDBG_STATE 0x4000 +#define NFSDBG_XATTRCACHE 0x8000 #define NFSDBG_ALL 0xFFFF -- cgit v1.2.3 From 8aa5a33578e9685d06020bd10d1637557423e945 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Wed, 8 Jul 2020 07:28:33 +0000 Subject: xsk: Add new statistics It can be useful for the user to know the reason behind a dropped packet. Introduce new counters which track drops on the receive path caused by: 1. rx ring being full 2. fill ring being empty Also, on the tx path introduce a counter which tracks the number of times we attempt pull from the tx ring when it is empty. Signed-off-by: Ciara Loftus Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200708072835.4427-2-ciara.loftus@intel.com --- include/net/xdp_sock.h | 4 ++++ include/uapi/linux/if_xdp.h | 5 ++++- net/xdp/xsk.c | 36 +++++++++++++++++++++++++++++++----- net/xdp/xsk_buff_pool.c | 1 + net/xdp/xsk_queue.h | 6 ++++++ tools/include/uapi/linux/if_xdp.h | 5 ++++- 6 files changed, 50 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 96bfc5f5f24e..c9d87cc40c11 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -69,7 +69,11 @@ struct xdp_sock { spinlock_t tx_completion_lock; /* Protects generic receive. */ spinlock_t rx_lock; + + /* Statistics */ u64 rx_dropped; + u64 rx_queue_full; + struct list_head map_list; /* Protects map_list */ spinlock_t map_list_lock; diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index be328c59389d..a78a8096f4ce 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -73,9 +73,12 @@ struct xdp_umem_reg { }; struct xdp_statistics { - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ + __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ + __u64 rx_ring_full; /* Dropped due to rx ring being full */ + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 3700266229f6..26e3bba8c204 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -123,7 +123,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) addr = xp_get_handle(xskb); err = xskq_prod_reserve_desc(xs->rx, addr, len); if (err) { - xs->rx_dropped++; + xs->rx_queue_full++; return err; } @@ -274,8 +274,10 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) rcu_read_lock(); list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { - if (!xskq_cons_peek_desc(xs->tx, desc, umem)) + if (!xskq_cons_peek_desc(xs->tx, desc, umem)) { + xs->tx->queue_empty_descs++; continue; + } /* This is the backpressure mechanism for the Tx path. * Reserve space in the completion queue and only proceed @@ -387,6 +389,8 @@ static int xsk_generic_xmit(struct sock *sk) sent_frame = true; } + xs->tx->queue_empty_descs++; + out: if (sent_frame) sk->sk_write_space(sk); @@ -812,6 +816,12 @@ static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) ring->desc = offsetof(struct xdp_umem_ring, desc); } +struct xdp_statistics_v1 { + __u64 rx_dropped; + __u64 rx_invalid_descs; + __u64 tx_invalid_descs; +}; + static int xsk_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { @@ -831,19 +841,35 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname, case XDP_STATISTICS: { struct xdp_statistics stats; + bool extra_stats = true; + size_t stats_size; - if (len < sizeof(stats)) + if (len < sizeof(struct xdp_statistics_v1)) { return -EINVAL; + } else if (len < sizeof(stats)) { + extra_stats = false; + stats_size = sizeof(struct xdp_statistics_v1); + } else { + stats_size = sizeof(stats); + } mutex_lock(&xs->mutex); stats.rx_dropped = xs->rx_dropped; + if (extra_stats) { + stats.rx_ring_full = xs->rx_queue_full; + stats.rx_fill_ring_empty_descs = + xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; + stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); + } else { + stats.rx_dropped += xs->rx_queue_full; + } stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); mutex_unlock(&xs->mutex); - if (copy_to_user(optval, &stats, sizeof(stats))) + if (copy_to_user(optval, &stats, stats_size)) return -EFAULT; - if (put_user(sizeof(stats), optlen)) + if (put_user(stats_size, optlen)) return -EFAULT; return 0; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 540ed75e4482..89cf3551d3e9 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -235,6 +235,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) for (;;) { if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { + pool->fq->queue_empty_descs++; xp_release(xskb); return NULL; } diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 5b5d24d2dd37..bf42cfd74b89 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -38,6 +38,7 @@ struct xsk_queue { u32 cached_cons; struct xdp_ring *ring; u64 invalid_descs; + u64 queue_empty_descs; }; /* The structure of the shared state of the rings are the same as the @@ -354,6 +355,11 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) return q ? q->invalid_descs : 0; } +static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) +{ + return q ? q->queue_empty_descs : 0; +} + struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h index be328c59389d..a78a8096f4ce 100644 --- a/tools/include/uapi/linux/if_xdp.h +++ b/tools/include/uapi/linux/if_xdp.h @@ -73,9 +73,12 @@ struct xdp_umem_reg { }; struct xdp_statistics { - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ + __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ + __u64 rx_ring_full; /* Dropped due to rx ring being full */ + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { -- cgit v1.2.3 From 0d80cb4612aa32dc0faa17fa3ab6f96f33e2b4a7 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Wed, 8 Jul 2020 07:28:35 +0000 Subject: xsk: Add xdp statistics to xsk_diag Add xdp statistics to the information dumped through the xsk_diag interface Signed-off-by: Ciara Loftus Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200708072835.4427-4-ciara.loftus@intel.com --- include/uapi/linux/xdp_diag.h | 11 +++++++++++ net/xdp/xsk_diag.c | 17 +++++++++++++++++ 2 files changed, 28 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/xdp_diag.h b/include/uapi/linux/xdp_diag.h index 78b2591a7782..66b9973b4f4c 100644 --- a/include/uapi/linux/xdp_diag.h +++ b/include/uapi/linux/xdp_diag.h @@ -30,6 +30,7 @@ struct xdp_diag_msg { #define XDP_SHOW_RING_CFG (1 << 1) #define XDP_SHOW_UMEM (1 << 2) #define XDP_SHOW_MEMINFO (1 << 3) +#define XDP_SHOW_STATS (1 << 4) enum { XDP_DIAG_NONE, @@ -41,6 +42,7 @@ enum { XDP_DIAG_UMEM_FILL_RING, XDP_DIAG_UMEM_COMPLETION_RING, XDP_DIAG_MEMINFO, + XDP_DIAG_STATS, __XDP_DIAG_MAX, }; @@ -69,4 +71,13 @@ struct xdp_diag_umem { __u32 refs; }; +struct xdp_diag_stats { + __u64 n_rx_dropped; + __u64 n_rx_invalid; + __u64 n_rx_full; + __u64 n_fill_ring_empty; + __u64 n_tx_invalid; + __u64 n_tx_ring_empty; +}; + #endif /* _LINUX_XDP_DIAG_H */ diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c index 0163b26aaf63..21e9c2d123ee 100644 --- a/net/xdp/xsk_diag.c +++ b/net/xdp/xsk_diag.c @@ -76,6 +76,19 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) return err; } +static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb) +{ + struct xdp_diag_stats du = {}; + + du.n_rx_dropped = xs->rx_dropped; + du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx); + du.n_rx_full = xs->rx_queue_full; + du.n_fill_ring_empty = xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; + du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx); + du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx); + return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du); +} + static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, struct xdp_diag_req *req, struct user_namespace *user_ns, @@ -118,6 +131,10 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) goto out_nlmsg_trim; + if ((req->xdiag_show & XDP_SHOW_STATS) && + xsk_diag_put_stats(xs, nlskb)) + goto out_nlmsg_trim; + mutex_unlock(&xs->mutex); nlmsg_end(nlskb, nlh); return 0; -- cgit v1.2.3 From ed757328c34015be4ec51861a90bb3bcc807ad58 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 12:24:18 +0200 Subject: atm: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- drivers/atm/solos-pci.c | 2 +- include/uapi/linux/atmioc.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index c32f7dd9879a..b7646ae55942 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the Solos PCI ADSL2+ card, designed to support Linux by - * Traverse Technologies -- http://www.traverse.com.au/ + * Traverse Technologies -- https://www.traverse.com.au/ * Xrio Limited -- http://www.xrio.com/ * * Copyright © 2008 Traverse Technologies diff --git a/include/uapi/linux/atmioc.h b/include/uapi/linux/atmioc.h index cd7655e40c77..a9030bcc8d56 100644 --- a/include/uapi/linux/atmioc.h +++ b/include/uapi/linux/atmioc.h @@ -5,7 +5,7 @@ /* - * See http://icawww1.epfl.ch/linux-atm/magic.html for the complete list of + * See https://icawww1.epfl.ch/linux-atm/magic.html for the complete list of * "magic" ioctl numbers. */ -- cgit v1.2.3 From c40f4e50b6cfc7c66f69d12c6b3fbcd954f1ded5 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 11 Jul 2020 00:55:03 +0300 Subject: net: sched: Pass qdisc reference in struct flow_block_offload Previously, shared blocks were only relevant for the pseudo-qdiscs ingress and clsact. Recently, a qevent facility was introduced, which allows to bind blocks to well-defined slots of a qdisc instance. RED in particular got two qevents: early_drop and mark. Drivers that wish to offload these blocks will be sent the usual notification, and need to know which qdisc it is related to. To that end, extend flow_block_offload with a "sch" pointer, and initialize as appropriate. This prompts changes in the indirect block facility, which now tracks the scheduler in addition to the netdevice. Update signatures of several functions similarly. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 9 ++++----- drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c | 10 +++++----- drivers/net/ethernet/netronome/nfp/flower/main.h | 2 +- drivers/net/ethernet/netronome/nfp/flower/offload.c | 8 ++++---- include/net/flow_offload.h | 9 ++++++--- net/core/flow_offload.c | 12 +++++++----- net/netfilter/nf_flow_table_offload.c | 2 +- net/netfilter/nf_tables_offload.c | 2 +- net/sched/cls_api.c | 16 +++++++++------- 9 files changed, 38 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0a9a4467d7c7..e82e5cf64d61 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1888,7 +1888,7 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv) kfree(priv); } -static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, +static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp, struct flow_block_offload *f, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { @@ -1911,7 +1911,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, cb_priv, cb_priv, bnxt_tc_setup_indr_rel, f, - netdev, data, bp, cleanup); + netdev, sch, data, bp, cleanup); if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); @@ -1946,7 +1946,7 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev) return netif_is_vxlan(netdev); } -static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, +static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) @@ -1956,8 +1956,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, switch (type) { case TC_SETUP_BLOCK: - return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data, - cleanup); + return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup); default: break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index ece8f535ce80..f8af109d34cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -404,7 +404,7 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv) static LIST_HEAD(mlx5e_block_cb_list); static int -mlx5e_rep_indr_setup_block(struct net_device *netdev, +mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, struct mlx5e_rep_priv *rpriv, struct flow_block_offload *f, flow_setup_cb_t *setup_cb, @@ -442,7 +442,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, mlx5e_rep_indr_block_unbind, - f, netdev, data, rpriv, + f, netdev, sch, data, rpriv, cleanup); if (IS_ERR(block_cb)) { list_del(&indr_priv->list); @@ -472,18 +472,18 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, } static -int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, +int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { switch (type) { case TC_SETUP_BLOCK: - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, + return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, mlx5e_rep_indr_setup_tc_cb, data, cleanup); case TC_SETUP_FT: - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, + return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, mlx5e_rep_indr_setup_ft_cb, data, cleanup); default: diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 7f54a620acad..3bf9c1afa45e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -458,7 +458,7 @@ void nfp_flower_qos_cleanup(struct nfp_app *app); int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow); void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb); -int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, +int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 3af27bb5f4b0..4651fe417b7f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1646,7 +1646,7 @@ void nfp_flower_setup_indr_tc_release(void *cb_priv) } static int -nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, +nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app, struct flow_block_offload *f, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { @@ -1680,7 +1680,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, cb_priv, cb_priv, nfp_flower_setup_indr_tc_release, - f, netdev, data, app, cleanup); + f, netdev, sch, data, app, cleanup); if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); @@ -1711,7 +1711,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, } int -nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, +nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) @@ -1721,7 +1721,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, switch (type) { case TC_SETUP_BLOCK: - return nfp_flower_setup_indr_tc_block(netdev, cb_priv, + return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv, type_data, data, cleanup); default: return -EOPNOTSUPP; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index de395498440d..9f88a7b730a8 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -444,6 +444,7 @@ struct flow_block_offload { struct list_head cb_list; struct list_head *driver_block_list; struct netlink_ext_ack *extack; + struct Qdisc *sch; }; enum tc_setup_type; @@ -455,6 +456,7 @@ struct flow_block_cb; struct flow_block_indr { struct list_head list; struct net_device *dev; + struct Qdisc *sch; enum flow_block_binder_type binder_type; void *data; void *cb_priv; @@ -479,7 +481,8 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv), struct flow_block_offload *bo, - struct net_device *dev, void *data, + struct net_device *dev, + struct Qdisc *sch, void *data, void *indr_cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)); void flow_block_cb_free(struct flow_block_cb *block_cb); @@ -553,7 +556,7 @@ static inline void flow_block_init(struct flow_block *flow_block) INIT_LIST_HEAD(&flow_block->cb_list); } -typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, +typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)); @@ -561,7 +564,7 @@ typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, void (*release)(void *cb_priv)); -int flow_indr_dev_setup_offload(struct net_device *dev, +int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)); diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index b739cfab796e..b8cf6ff5f961 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -429,7 +429,7 @@ EXPORT_SYMBOL(flow_indr_dev_unregister); static void flow_block_indr_init(struct flow_block_cb *flow_block, struct flow_block_offload *bo, - struct net_device *dev, void *data, + struct net_device *dev, struct Qdisc *sch, void *data, void *cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { @@ -437,6 +437,7 @@ static void flow_block_indr_init(struct flow_block_cb *flow_block, flow_block->indr.data = data; flow_block->indr.cb_priv = cb_priv; flow_block->indr.dev = dev; + flow_block->indr.sch = sch; flow_block->indr.cleanup = cleanup; } @@ -444,7 +445,8 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv), struct flow_block_offload *bo, - struct net_device *dev, void *data, + struct net_device *dev, + struct Qdisc *sch, void *data, void *indr_cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { @@ -454,7 +456,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, if (IS_ERR(block_cb)) goto out; - flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup); + flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup); list_add(&block_cb->indr.list, &flow_block_indr_list); out: @@ -462,7 +464,7 @@ out: } EXPORT_SYMBOL(flow_indr_block_cb_alloc); -int flow_indr_dev_setup_offload(struct net_device *dev, +int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)) @@ -471,7 +473,7 @@ int flow_indr_dev_setup_offload(struct net_device *dev, mutex_lock(&flow_indr_block_lock); list_for_each_entry(this, &flow_block_indr_dev_list, list) - this->cb(dev, this->cb_priv, type, bo, data, cleanup); + this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); mutex_unlock(&flow_indr_block_lock); diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index 5fff1e040168..2a6993fa40d7 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -964,7 +964,7 @@ static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo, nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable, extack); - return flow_indr_dev_setup_offload(dev, TC_SETUP_FT, flowtable, bo, + return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo, nf_flow_table_indr_cleanup); } diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index c7cf1cde46de..9ef37c1b7b3b 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -312,7 +312,7 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain, nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack); - err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, basechain, &bo, + err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo, nft_indr_block_cleanup); if (err < 0) return err; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index e617f3e27ec0..322b279154de 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -622,7 +622,7 @@ static int tcf_block_setup(struct tcf_block *block, struct flow_block_offload *bo); static void tcf_block_offload_init(struct flow_block_offload *bo, - struct net_device *dev, + struct net_device *dev, struct Qdisc *sch, enum flow_block_command command, enum flow_block_binder_type binder_type, struct flow_block *flow_block, @@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo, bo->block = flow_block; bo->block_shared = shared; bo->extack = extack; + bo->sch = sch; INIT_LIST_HEAD(&bo->cb_list); } @@ -644,10 +645,11 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) { struct tcf_block *block = block_cb->indr.data; struct net_device *dev = block_cb->indr.dev; + struct Qdisc *sch = block_cb->indr.sch; struct netlink_ext_ack extack = {}; struct flow_block_offload bo; - tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND, + tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, block_cb->indr.binder_type, &block->flow_block, tcf_block_shared(block), &extack); @@ -666,14 +668,14 @@ static bool tcf_block_offload_in_use(struct tcf_block *block) } static int tcf_block_offload_cmd(struct tcf_block *block, - struct net_device *dev, + struct net_device *dev, struct Qdisc *sch, struct tcf_block_ext_info *ei, enum flow_block_command command, struct netlink_ext_ack *extack) { struct flow_block_offload bo = {}; - tcf_block_offload_init(&bo, dev, command, ei->binder_type, + tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, &block->flow_block, tcf_block_shared(block), extack); @@ -690,7 +692,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block, return tcf_block_setup(block, &bo); } - flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo, + flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, tc_block_indr_cleanup); tcf_block_setup(block, &bo); @@ -717,7 +719,7 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, goto err_unlock; } - err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); + err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); if (err == -EOPNOTSUPP) goto no_offload_dev_inc; if (err) @@ -744,7 +746,7 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, int err; down_write(&block->cb_lock); - err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); + err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); if (err == -EOPNOTSUPP) goto no_offload_dev_dec; up_write(&block->cb_lock); -- cgit v1.2.3 From 91c724cfc0cbc049f18c04634ad56080650e93b8 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:01 +0300 Subject: net: mscc: ocelot: convert port registers to regmap At the moment, there are some minimal register differences between VSC7514 Ocelot and VSC9959 Felix. To be precise, the PCS1G registers are missing from Felix because it was integrated with an NXP PCS. But with VSC9953 Seville (not yet introduced), the register differences are more pronounced. The MAC registers are located at different offsets within the DEV_GMII target. So we need to refactor the driver to keep a regmap even for per-port registers. The callers of the ocelot_port_readl and ocelot_port_writel were kept unchanged, only the implementation is now more generic. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 13 ++--- drivers/net/dsa/ocelot/felix_vsc9959.c | 47 +++++++++++++++++- drivers/net/ethernet/mscc/ocelot.h | 3 +- drivers/net/ethernet/mscc/ocelot_io.c | 16 +++++- drivers/net/ethernet/mscc/ocelot_net.c | 5 +- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 53 ++++++++++++++++++-- include/soc/mscc/ocelot.h | 42 +++++++++++++++- include/soc/mscc/ocelot_dev.h | 78 ------------------------------ 8 files changed, 158 insertions(+), 99 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 75652ed99b24..bf0bd5c7b12c 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -524,7 +524,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) for (port = 0; port < num_phys_ports; port++) { struct ocelot_port *ocelot_port; - void __iomem *port_regs; + struct regmap *target; ocelot_port = devm_kzalloc(ocelot->dev, sizeof(struct ocelot_port), @@ -541,17 +541,18 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) res.start += switch_base; res.end += switch_base; - port_regs = devm_ioremap_resource(ocelot->dev, &res); - if (IS_ERR(port_regs)) { + target = ocelot_regmap_init(ocelot, &res); + if (IS_ERR(target)) { dev_err(ocelot->dev, - "failed to map registers for port %d\n", port); + "Failed to map memory space for port %d\n", + port); kfree(port_phy_modes); - return PTR_ERR(port_regs); + return PTR_ERR(target); } ocelot_port->phy_mode = port_phy_modes[port]; ocelot_port->ocelot = ocelot; - ocelot_port->regs = port_regs; + ocelot_port->target = target; ocelot->ports[port] = ocelot_port; } diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 19614537b1ba..0c54d67a4039 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -329,7 +329,49 @@ static const u32 vsc9959_gcb_regmap[] = { REG(GCB_SOFT_RST, 0x000004), }; -static const u32 *vsc9959_regmap[] = { +static const u32 vsc9959_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG(DEV_EVENTS, 0x8), + REG(DEV_EEE_CFG, 0xc), + REG(DEV_RX_PATH_DELAY, 0x10), + REG(DEV_TX_PATH_DELAY, 0x14), + REG(DEV_PTP_PREDICT_CFG, 0x18), + REG(DEV_MAC_ENA_CFG, 0x1c), + REG(DEV_MAC_MODE_CFG, 0x20), + REG(DEV_MAC_MAXLEN_CFG, 0x24), + REG(DEV_MAC_TAGS_CFG, 0x28), + REG(DEV_MAC_ADV_CHK_CFG, 0x2c), + REG(DEV_MAC_IFG_CFG, 0x30), + REG(DEV_MAC_HDX_CFG, 0x34), + REG(DEV_MAC_DBG_CFG, 0x38), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), + REG(DEV_MAC_STICKY, 0x44), + REG_RESERVED(PCS1G_CFG), + REG_RESERVED(PCS1G_MODE_CFG), + REG_RESERVED(PCS1G_SD_CFG), + REG_RESERVED(PCS1G_ANEG_CFG), + REG_RESERVED(PCS1G_ANEG_NP_CFG), + REG_RESERVED(PCS1G_LB_CFG), + REG_RESERVED(PCS1G_DBG_CFG), + REG_RESERVED(PCS1G_CDET_CFG), + REG_RESERVED(PCS1G_ANEG_STATUS), + REG_RESERVED(PCS1G_ANEG_NP_STATUS), + REG_RESERVED(PCS1G_LINK_STATUS), + REG_RESERVED(PCS1G_LINK_DOWN_CNT), + REG_RESERVED(PCS1G_STICKY), + REG_RESERVED(PCS1G_DEBUG_STATUS), + REG_RESERVED(PCS1G_LPI_CFG), + REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT), + REG_RESERVED(PCS1G_LPI_STATUS), + REG_RESERVED(PCS1G_TSTPAT_MODE_CFG), + REG_RESERVED(PCS1G_TSTPAT_STATUS), + REG_RESERVED(DEV_PCS_FX100_CFG), + REG_RESERVED(DEV_PCS_FX100_STATUS), +}; + +static const u32 *vsc9959_regmap[TARGET_MAX] = { [ANA] = vsc9959_ana_regmap, [QS] = vsc9959_qs_regmap, [QSYS] = vsc9959_qsys_regmap, @@ -338,10 +380,11 @@ static const u32 *vsc9959_regmap[] = { [S2] = vsc9959_s2_regmap, [PTP] = vsc9959_ptp_regmap, [GCB] = vsc9959_gcb_regmap, + [DEV_GMII] = vsc9959_dev_gmii_regmap, }; /* Addresses are relative to the PCI device's base address */ -static const struct resource vsc9959_target_io_res[] = { +static const struct resource vsc9959_target_io_res[TARGET_MAX] = { [ANA] = { .start = 0x0280000, .end = 0x028ffff, diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 394362e23c47..814b09dd2c11 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -105,8 +105,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); #define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) #define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) -int ocelot_probe_port(struct ocelot *ocelot, u8 port, - void __iomem *regs, +int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, struct phy_device *phy); void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index b229b1cb68ef..741f653bc85b 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -49,13 +49,25 @@ EXPORT_SYMBOL(__ocelot_rmw_ix); u32 ocelot_port_readl(struct ocelot_port *port, u32 reg) { - return readl(port->regs + reg); + struct ocelot *ocelot = port->ocelot; + u16 target = reg >> TARGET_OFFSET; + u32 val; + + WARN_ON(!target); + + regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val); + return val; } EXPORT_SYMBOL(ocelot_port_readl); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) { - writel(val, port->regs + reg); + struct ocelot *ocelot = port->ocelot; + u16 target = reg >> TARGET_OFFSET; + + WARN_ON(!target); + + regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val); } EXPORT_SYMBOL(ocelot_port_writel); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 41a1b5f6df95..0668d23cdbfa 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -1005,8 +1005,7 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { .notifier_call = ocelot_switchdev_blocking_event, }; -int ocelot_probe_port(struct ocelot *ocelot, u8 port, - void __iomem *regs, +int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, struct phy_device *phy) { struct ocelot_port_private *priv; @@ -1024,7 +1023,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, priv->chip_port = port; ocelot_port = &priv->port; ocelot_port->ocelot = ocelot; - ocelot_port->regs = regs; + ocelot_port->target = target; ocelot->ports[port] = ocelot_port; dev->netdev_ops = &ocelot_port_netdev_ops; diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 43716e8dc0ac..63af145e744c 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -263,7 +263,49 @@ static const u32 ocelot_ptp_regmap[] = { REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), }; -static const u32 *ocelot_regmap[] = { +static const u32 ocelot_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG(DEV_EVENTS, 0x8), + REG(DEV_EEE_CFG, 0xc), + REG(DEV_RX_PATH_DELAY, 0x10), + REG(DEV_TX_PATH_DELAY, 0x14), + REG(DEV_PTP_PREDICT_CFG, 0x18), + REG(DEV_MAC_ENA_CFG, 0x1c), + REG(DEV_MAC_MODE_CFG, 0x20), + REG(DEV_MAC_MAXLEN_CFG, 0x24), + REG(DEV_MAC_TAGS_CFG, 0x28), + REG(DEV_MAC_ADV_CHK_CFG, 0x2c), + REG(DEV_MAC_IFG_CFG, 0x30), + REG(DEV_MAC_HDX_CFG, 0x34), + REG(DEV_MAC_DBG_CFG, 0x38), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), + REG(DEV_MAC_STICKY, 0x44), + REG(PCS1G_CFG, 0x48), + REG(PCS1G_MODE_CFG, 0x4c), + REG(PCS1G_SD_CFG, 0x50), + REG(PCS1G_ANEG_CFG, 0x54), + REG(PCS1G_ANEG_NP_CFG, 0x58), + REG(PCS1G_LB_CFG, 0x5c), + REG(PCS1G_DBG_CFG, 0x60), + REG(PCS1G_CDET_CFG, 0x64), + REG(PCS1G_ANEG_STATUS, 0x68), + REG(PCS1G_ANEG_NP_STATUS, 0x6c), + REG(PCS1G_LINK_STATUS, 0x70), + REG(PCS1G_LINK_DOWN_CNT, 0x74), + REG(PCS1G_STICKY, 0x78), + REG(PCS1G_DEBUG_STATUS, 0x7c), + REG(PCS1G_LPI_CFG, 0x80), + REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84), + REG(PCS1G_LPI_STATUS, 0x88), + REG(PCS1G_TSTPAT_MODE_CFG, 0x8c), + REG(PCS1G_TSTPAT_STATUS, 0x90), + REG(DEV_PCS_FX100_CFG, 0x94), + REG(DEV_PCS_FX100_STATUS, 0x98), +}; + +static const u32 *ocelot_regmap[TARGET_MAX] = { [ANA] = ocelot_ana_regmap, [QS] = ocelot_qs_regmap, [QSYS] = ocelot_qsys_regmap, @@ -271,6 +313,7 @@ static const u32 *ocelot_regmap[] = { [SYS] = ocelot_sys_regmap, [S2] = ocelot_s2_regmap, [PTP] = ocelot_ptp_regmap, + [DEV_GMII] = ocelot_dev_gmii_regmap, }; static const struct reg_field ocelot_regfields[] = { @@ -948,9 +991,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct device_node *phy_node; phy_interface_t phy_mode; struct phy_device *phy; + struct regmap *target; struct resource *res; struct phy *serdes; - void __iomem *regs; char res_name[8]; u32 port; @@ -961,8 +1004,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); - regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(regs)) + target = ocelot_regmap_init(ocelot, res); + if (IS_ERR(target)) continue; phy_node = of_parse_phandle(portnp, "phy-handle", 0); @@ -974,7 +1017,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (!phy) continue; - err = ocelot_probe_port(ocelot, port, regs, phy); + err = ocelot_probe_port(ocelot, port, target, phy); if (err) { of_node_put(portnp); goto out_put_ports; diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index e050f8121ba2..c2a2d0165ef1 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -126,6 +126,7 @@ enum ocelot_target { HSIO, PTP, GCB, + DEV_GMII, TARGET_MAX, }; @@ -408,6 +409,45 @@ enum ocelot_reg { PTP_CLK_CFG_ADJ_CFG, PTP_CLK_CFG_ADJ_FREQ, GCB_SOFT_RST = GCB << TARGET_OFFSET, + DEV_CLOCK_CFG = DEV_GMII << TARGET_OFFSET, + DEV_PORT_MISC, + DEV_EVENTS, + DEV_EEE_CFG, + DEV_RX_PATH_DELAY, + DEV_TX_PATH_DELAY, + DEV_PTP_PREDICT_CFG, + DEV_MAC_ENA_CFG, + DEV_MAC_MODE_CFG, + DEV_MAC_MAXLEN_CFG, + DEV_MAC_TAGS_CFG, + DEV_MAC_ADV_CHK_CFG, + DEV_MAC_IFG_CFG, + DEV_MAC_HDX_CFG, + DEV_MAC_DBG_CFG, + DEV_MAC_FC_MAC_LOW_CFG, + DEV_MAC_FC_MAC_HIGH_CFG, + DEV_MAC_STICKY, + PCS1G_CFG, + PCS1G_MODE_CFG, + PCS1G_SD_CFG, + PCS1G_ANEG_CFG, + PCS1G_ANEG_NP_CFG, + PCS1G_LB_CFG, + PCS1G_DBG_CFG, + PCS1G_CDET_CFG, + PCS1G_ANEG_STATUS, + PCS1G_ANEG_NP_STATUS, + PCS1G_LINK_STATUS, + PCS1G_LINK_DOWN_CNT, + PCS1G_STICKY, + PCS1G_DEBUG_STATUS, + PCS1G_LPI_CFG, + PCS1G_LPI_WAKE_ERROR_CNT, + PCS1G_LPI_STATUS, + PCS1G_TSTPAT_MODE_CFG, + PCS1G_TSTPAT_STATUS, + DEV_PCS_FX100_CFG, + DEV_PCS_FX100_STATUS, }; enum ocelot_regfield { @@ -494,7 +534,7 @@ struct ocelot_vcap_block { struct ocelot_port { struct ocelot *ocelot; - void __iomem *regs; + struct regmap *target; bool vlan_aware; diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h index 7c08437061fc..0c6021f02fee 100644 --- a/include/soc/mscc/ocelot_dev.h +++ b/include/soc/mscc/ocelot_dev.h @@ -8,8 +8,6 @@ #ifndef _MSCC_OCELOT_DEV_H_ #define _MSCC_OCELOT_DEV_H_ -#define DEV_CLOCK_CFG 0x0 - #define DEV_CLOCK_CFG_MAC_TX_RST BIT(7) #define DEV_CLOCK_CFG_MAC_RX_RST BIT(6) #define DEV_CLOCK_CFG_PCS_TX_RST BIT(5) @@ -19,18 +17,12 @@ #define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0)) #define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0) -#define DEV_PORT_MISC 0x4 - #define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4) #define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3) #define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2) #define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1) #define DEV_PORT_MISC_HDX_FAST_DIS BIT(0) -#define DEV_EVENTS 0x8 - -#define DEV_EEE_CFG 0xc - #define DEV_EEE_CFG_EEE_ENA BIT(22) #define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15)) #define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15) @@ -43,33 +35,19 @@ #define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1) #define DEV_EEE_CFG_PORT_LPI BIT(0) -#define DEV_RX_PATH_DELAY 0x10 - -#define DEV_TX_PATH_DELAY 0x14 - -#define DEV_PTP_PREDICT_CFG 0x18 - #define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4)) #define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4) #define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4) #define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0)) #define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0) -#define DEV_MAC_ENA_CFG 0x1c - #define DEV_MAC_ENA_CFG_RX_ENA BIT(4) #define DEV_MAC_ENA_CFG_TX_ENA BIT(0) -#define DEV_MAC_MODE_CFG 0x20 - #define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) #define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) #define DEV_MAC_MODE_CFG_FDX_ENA BIT(0) -#define DEV_MAC_MAXLEN_CFG 0x24 - -#define DEV_MAC_TAGS_CFG 0x28 - #define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16)) #define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16) #define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16) @@ -77,12 +55,8 @@ #define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1) #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) -#define DEV_MAC_ADV_CHK_CFG 0x2c - #define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) -#define DEV_MAC_IFG_CFG 0x30 - #define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) #define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16) #define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8)) @@ -94,8 +68,6 @@ #define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0)) #define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0) -#define DEV_MAC_HDX_CFG 0x34 - #define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) #define DEV_MAC_HDX_CFG_OB_ENA BIT(25) #define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24) @@ -107,17 +79,9 @@ #define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0)) #define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0) -#define DEV_MAC_DBG_CFG 0x38 - #define DEV_MAC_DBG_CFG_TBI_MODE BIT(4) #define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0) -#define DEV_MAC_FC_MAC_LOW_CFG 0x3c - -#define DEV_MAC_FC_MAC_HIGH_CFG 0x40 - -#define DEV_MAC_STICKY 0x44 - #define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9) #define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8) #define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7) @@ -129,25 +93,17 @@ #define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1) #define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0) -#define PCS1G_CFG 0x48 - #define PCS1G_CFG_LINK_STATUS_TYPE BIT(4) #define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1) #define PCS1G_CFG_PCS_ENA BIT(0) -#define PCS1G_MODE_CFG 0x4c - #define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4) #define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0) -#define PCS1G_SD_CFG 0x50 - #define PCS1G_SD_CFG_SD_SEL BIT(8) #define PCS1G_SD_CFG_SD_POL BIT(4) #define PCS1G_SD_CFG_SD_ENA BIT(0) -#define PCS1G_ANEG_CFG 0x54 - #define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16)) #define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16) #define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16) @@ -155,29 +111,19 @@ #define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1) #define PCS1G_ANEG_CFG_ANEG_ENA BIT(0) -#define PCS1G_ANEG_NP_CFG 0x58 - #define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16)) #define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16) #define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16) #define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0) -#define PCS1G_LB_CFG 0x5c - #define PCS1G_LB_CFG_RA_ENA BIT(4) #define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1) #define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0) -#define PCS1G_DBG_CFG 0x60 - #define PCS1G_DBG_CFG_UDLT BIT(0) -#define PCS1G_CDET_CFG 0x64 - #define PCS1G_CDET_CFG_CDET_ENA BIT(0) -#define PCS1G_ANEG_STATUS 0x68 - #define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16)) #define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16) #define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16) @@ -185,10 +131,6 @@ #define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3) #define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0) -#define PCS1G_ANEG_NP_STATUS 0x6c - -#define PCS1G_LINK_STATUS 0x70 - #define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12)) #define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12) #define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12) @@ -196,17 +138,9 @@ #define PCS1G_LINK_STATUS_LINK_STATUS BIT(4) #define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0) -#define PCS1G_LINK_DOWN_CNT 0x74 - -#define PCS1G_STICKY 0x78 - #define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) #define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0) -#define PCS1G_DEBUG_STATUS 0x7c - -#define PCS1G_LPI_CFG 0x80 - #define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20) #define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17) #define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16) @@ -215,10 +149,6 @@ #define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4) #define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0) -#define PCS1G_LPI_WAKE_ERROR_CNT 0x84 - -#define PCS1G_LPI_STATUS 0x88 - #define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16) #define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12) #define PCS1G_LPI_STATUS_RX_QUIET BIT(9) @@ -227,18 +157,12 @@ #define PCS1G_LPI_STATUS_TX_QUIET BIT(1) #define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0) -#define PCS1G_TSTPAT_MODE_CFG 0x8c - -#define PCS1G_TSTPAT_STATUS 0x90 - #define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8)) #define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8) #define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8) #define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4) #define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0) -#define DEV_PCS_FX100_CFG 0x94 - #define DEV_PCS_FX100_CFG_SD_SEL BIT(26) #define DEV_PCS_FX100_CFG_SD_POL BIT(25) #define DEV_PCS_FX100_CFG_SD_ENA BIT(24) @@ -259,8 +183,6 @@ #define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1) #define DEV_PCS_FX100_CFG_PCS_ENA BIT(0) -#define DEV_PCS_FX100_STATUS 0x98 - #define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8)) #define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8) #define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8) -- cgit v1.2.3 From 2789658fa319f51db43a585e076bb99a3de3c6d1 Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Mon, 13 Jul 2020 19:57:02 +0300 Subject: soc: mscc: ocelot: add MII registers description Add the register definitions for the MSCC MIIM MDIO controller in preparation for seville_vsc9959.c to create its accessors for the internal MDIO bus. Since we've introduced elements to ocelot_regfields that are not instantiated by felix and ocelot, we need to define the size of the regfields arrays explicitly, otherwise ocelot_regfields_init, which iterates up to REGFIELD_MAX, will fault on the undefined regfield entries (if we're lucky). Signed-off-by: Maxim Kochetkov Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix_vsc9959.c | 2 +- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 2 +- include/soc/mscc/ocelot.h | 5 +++++ 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 0c54d67a4039..b97c12a783eb 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -469,7 +469,7 @@ static const struct resource vsc9959_imdio_res = { .name = "imdio", }; -static const struct reg_field vsc9959_regfields[] = { +static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6), [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5), [ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30), diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 63af145e744c..83c17c689641 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -316,7 +316,7 @@ static const u32 *ocelot_regmap[TARGET_MAX] = { [DEV_GMII] = ocelot_dev_gmii_regmap, }; -static const struct reg_field ocelot_regfields[] = { +static const struct reg_field ocelot_regfields[REGFIELD_MAX] = { [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11), [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10), [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27), diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index c2a2d0165ef1..348fa26a349c 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -409,6 +409,9 @@ enum ocelot_reg { PTP_CLK_CFG_ADJ_CFG, PTP_CLK_CFG_ADJ_FREQ, GCB_SOFT_RST = GCB << TARGET_OFFSET, + GCB_MIIM_MII_STATUS, + GCB_MIIM_MII_CMD, + GCB_MIIM_MII_DATA, DEV_CLOCK_CFG = DEV_GMII << TARGET_OFFSET, DEV_PORT_MISC, DEV_EVENTS, @@ -496,6 +499,8 @@ enum ocelot_regfield { SYS_RESET_CFG_MEM_ENA, SYS_RESET_CFG_MEM_INIT, GCB_SOFT_RST_SWC_RST, + GCB_MIIM_MII_STATUS_PENDING, + GCB_MIIM_MII_STATUS_BUSY, REGFIELD_MAX }; -- cgit v1.2.3 From 886e1387c73d895ad0eff53353913081983570c0 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:03 +0300 Subject: net: mscc: ocelot: convert QSYS_SWITCH_PORT_MODE and SYS_PORT_MODE to regfields Currently Felix and Ocelot share the same bit layout in these per-port registers, but Seville does not. So we need reg_fields for that. Actually since these are per-port registers, we need to also specify the number of ports, and register size per port, and use the regmap API for multiple ports. There's a more subtle point to be made about the other 2 register fields: - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG - QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE which we are not writing any longer, for 2 reasons: - Using the previous API (ocelot_write_rix), we were only writing 1 for Felix and Ocelot, which was their hardware-default value, and which there wasn't any intention in changing. - In the case of SCH_NEXT_CFG, in fact Seville does not have this register field at all, and therefore, if we want to have common code we would be required to not write to it. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 9 +++----- drivers/net/dsa/ocelot/felix_vsc9959.c | 11 +++++++++ drivers/net/ethernet/mscc/ocelot.c | 36 ++++++++++++------------------ drivers/net/ethernet/mscc/ocelot.h | 6 ----- drivers/net/ethernet/mscc/ocelot_io.c | 2 ++ drivers/net/ethernet/mscc/ocelot_vsc7514.c | 11 +++++++++ include/soc/mscc/ocelot.h | 15 +++++++++++++ include/soc/mscc/ocelot_qsys.h | 13 ----------- include/soc/mscc/ocelot_sys.h | 13 ----------- 9 files changed, 56 insertions(+), 60 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index bf0bd5c7b12c..4b255ed614e4 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -249,8 +249,7 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, struct ocelot_port *ocelot_port = ocelot->ports[port]; ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG); - ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, port); + ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); } static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, @@ -326,10 +325,8 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, ANA_PORT_PORT_CFG, port); /* Core: Enable port for frame transfer */ - ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | - QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, port); + ocelot_fields_write(ocelot, port, + QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); if (felix->info->pcs_link_up) felix->info->pcs_link_up(ocelot, port, link_an_mode, interface, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index b97c12a783eb..efbfbdccb2b6 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -503,6 +503,17 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10), [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0), [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0), + /* Replicated per number of ports (7), register size 4 per port */ + [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 7, 4), + [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 7, 4), + [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 7, 4), + [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 7, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 7, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 7, 4), + [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 7, 4), + [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 7, 4), + [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 7, 4), + [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 7, 4), }; static const struct ocelot_stat_layout vsc9959_stats_layout[] = { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index e815aad8d85e..36986fccedf4 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -389,10 +389,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port, ANA_PFC_PFC_CFG, port); /* Core: Enable port for frame transfer */ - ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | - QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, port); + ocelot_fields_write(ocelot, port, + QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); /* Flow control */ ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | @@ -423,8 +421,7 @@ void ocelot_port_disable(struct ocelot *ocelot, int port) struct ocelot_port *ocelot_port = ocelot->ports[port]; ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG); - ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, port); + ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); } EXPORT_SYMBOL(ocelot_port_disable); @@ -1392,27 +1389,22 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi, QSYS_EXT_CPU_CFG); /* Enable NPI port */ - ocelot_write_rix(ocelot, - QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | - QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, npi); + ocelot_fields_write(ocelot, npi, + QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); /* NPI port Injection/Extraction configuration */ - ocelot_write_rix(ocelot, - SYS_PORT_MODE_INCL_XTR_HDR(extraction) | - SYS_PORT_MODE_INCL_INJ_HDR(injection), - SYS_PORT_MODE, npi); + ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_XTR_HDR, + extraction); + ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_INJ_HDR, + injection); } /* Enable CPU port module */ - ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | - QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, cpu); + ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); /* CPU port Injection/Extraction configuration */ - ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) | - SYS_PORT_MODE_INCL_INJ_HDR(injection), - SYS_PORT_MODE, cpu); + ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR, + extraction); + ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR, + injection); /* Configure the CPU port to be VLAN aware */ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 814b09dd2c11..dc29e05103a1 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -102,9 +102,6 @@ void ocelot_port_lag_leave(struct ocelot *ocelot, int port, u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); -#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) -#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) - int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, struct phy_device *phy); @@ -116,7 +113,4 @@ extern struct notifier_block ocelot_netdevice_nb; extern struct notifier_block ocelot_switchdev_nb; extern struct notifier_block ocelot_switchdev_blocking_nb; -#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) -#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) - #endif diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index 741f653bc85b..d22711282183 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -89,6 +89,8 @@ int ocelot_regfields_init(struct ocelot *ocelot, regfield.reg = ocelot->map[target][reg & REG_MASK]; regfield.lsb = regfields[i].lsb; regfield.msb = regfields[i].msb; + regfield.id_size = regfields[i].id_size; + regfield.id_offset = regfields[i].id_offset; ocelot->regfields[i] = devm_regmap_field_alloc(ocelot->dev, diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 83c17c689641..9c6a9d44871d 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -358,6 +358,17 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = { [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2), [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1), [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0), + /* Replicated per number of ports (11), register size 4 per port */ + [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 11, 4), + [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 11, 4), + [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 11, 4), + [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 11, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 11, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 11, 4), + [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 11, 4), + [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 11, 4), + [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 11, 4), + [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4), }; static const struct ocelot_stat_layout ocelot_stats_layout[] = { diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 348fa26a349c..19d97585345a 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -490,11 +490,21 @@ enum ocelot_regfield { ANA_TABLES_MACACCESS_B_DOM, ANA_TABLES_MACTINDX_BUCKET, ANA_TABLES_MACTINDX_M_INDEX, + QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG, + QSYS_SWITCH_PORT_MODE_YEL_RSRVD, + QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE, + QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, + QSYS_SWITCH_PORT_MODE_TX_PFC_MODE, QSYS_TIMED_FRAME_ENTRY_TFRM_VLD, QSYS_TIMED_FRAME_ENTRY_TFRM_FP, QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO, QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL, QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T, + SYS_PORT_MODE_DATA_WO_TS, + SYS_PORT_MODE_INCL_INJ_HDR, + SYS_PORT_MODE_INCL_XTR_HDR, + SYS_PORT_MODE_INCL_HDR_ERR, SYS_RESET_CFG_CORE_ENA, SYS_RESET_CFG_MEM_ENA, SYS_RESET_CFG_MEM_INIT, @@ -638,6 +648,11 @@ struct ocelot_policer { #define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri)) #define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0) +#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) +#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) +#define ocelot_fields_write(ocelot, id, reg, val) regmap_fields_write((ocelot)->regfields[(reg)], (id), (val)) +#define ocelot_fields_read(ocelot, id, reg, val) regmap_fields_read((ocelot)->regfields[(reg)], (id), (val)) + /* I/O */ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h index d8c63aa761be..a814bc2017d8 100644 --- a/include/soc/mscc/ocelot_qsys.h +++ b/include/soc/mscc/ocelot_qsys.h @@ -13,19 +13,6 @@ #define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1) #define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0) -#define QSYS_SWITCH_PORT_MODE_RSZ 0x4 - -#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14) -#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11)) -#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11) -#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11) -#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10) -#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9) -#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1)) -#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1) -#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1) -#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0) - #define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5) #define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4) #define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3) diff --git a/include/soc/mscc/ocelot_sys.h b/include/soc/mscc/ocelot_sys.h index 16f91e172bcb..8a95fc93fde5 100644 --- a/include/soc/mscc/ocelot_sys.h +++ b/include/soc/mscc/ocelot_sys.h @@ -12,19 +12,6 @@ #define SYS_COUNT_TX_OCTETS_RSZ 0x4 -#define SYS_PORT_MODE_RSZ 0x4 - -#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5)) -#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5) -#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5) -#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3)) -#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3) -#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3) -#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1)) -#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1) -#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1) -#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0) - #define SYS_FRONT_PORT_MODE_RSZ 0x4 #define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0) -- cgit v1.2.3 From 67c2404922c2c3f9cc0898aafaa4e3bea2bde084 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:04 +0300 Subject: net: dsa: felix: create a template for the DSA tags on xmit With this patch we try to kill 2 birds with 1 stone. First of all, some switches that use tag_ocelot.c don't have the exact same bitfield layout for the DSA tags. The destination ports field is different for Seville VSC9953 for example. So the choices are to either duplicate tag_ocelot.c into a new tag_seville.c (sub-optimal) or somehow take into account a supposed ocelot->dest_ports_offset when packing this field into the DSA injection header (again not ideal). Secondly, tag_ocelot.c already needs to memset a 128-bit area to zero and call some packing() functions of dubious performance in the fastpath. And most of the values it needs to pack are pretty much constant (BYPASS=1, SRC_PORT=CPU, DEST=port index). So it would be good if we could improve that. The proposed solution is to allocate a memory area per port at probe time, initialize that with the statically defined bits as per chip hardware revision, and just perform a simpler memcpy in the fastpath. Other alternatives have been analyzed, such as: - Create a separate tag_seville.c: too much code duplication for just 1 bit field difference. - Create a separate DSA_TAG_PROTO_SEVILLE under tag_ocelot.c, just like tag_brcm.c, which would have a separate .xmit function. Again, too much code duplication for just 1 bit field difference. - Allocate the template from the init function of the tag_ocelot.c module, instead of from the driver: couldn't figure out a method of accessing the correct port template corresponding to the correct tagger in the .xmit function. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 13 +++++++++++++ drivers/net/dsa/ocelot/felix.h | 1 + drivers/net/dsa/ocelot/felix_vsc9959.c | 20 ++++++++++++++++++++ include/soc/mscc/ocelot.h | 2 ++ net/dsa/tag_ocelot.c | 21 ++++++++------------- 5 files changed, 44 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 4b255ed614e4..b9981d8c4c98 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -522,6 +522,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) for (port = 0; port < num_phys_ports; port++) { struct ocelot_port *ocelot_port; struct regmap *target; + u8 *template; ocelot_port = devm_kzalloc(ocelot->dev, sizeof(struct ocelot_port), @@ -547,10 +548,22 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) return PTR_ERR(target); } + template = devm_kzalloc(ocelot->dev, OCELOT_TAG_LEN, + GFP_KERNEL); + if (!template) { + dev_err(ocelot->dev, + "Failed to allocate memory for DSA tag\n"); + kfree(port_phy_modes); + return -ENOMEM; + } + ocelot_port->phy_mode = port_phy_modes[port]; ocelot_port->ocelot = ocelot; ocelot_port->target = target; + ocelot_port->xmit_template = template; ocelot->ports[port] = ocelot_port; + + felix->info->xmit_template_populate(ocelot, port); } kfree(port_phy_modes); diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 00137b64132b..a85631d716b9 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -43,6 +43,7 @@ struct felix_info { enum tc_setup_type type, void *type_data); void (*port_sched_speed_set)(struct ocelot *ocelot, int port, u32 speed); + void (*xmit_template_populate)(struct ocelot *ocelot, int port); }; extern struct felix_info felix_info_vsc9959; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index efbfbdccb2b6..d640146acc3d 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -1432,6 +1433,24 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port, } } +static void vsc9959_xmit_template_populate(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u8 *template = ocelot_port->xmit_template; + u64 bypass, dest, src; + + /* Set the source port as the CPU port module and not the + * NPI port + */ + src = ocelot->num_phys_ports; + dest = BIT(port); + bypass = true; + + packing(template, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0); + packing(template, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0); + packing(template, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0); +} + struct felix_info felix_info_vsc9959 = { .target_io_res = vsc9959_target_io_res, .port_io_res = vsc9959_port_io_res, @@ -1458,4 +1477,5 @@ struct felix_info felix_info_vsc9959 = { .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode, .port_setup_tc = vsc9959_port_setup_tc, .port_sched_speed_set = vsc9959_sched_speed_set, + .xmit_template_populate = vsc9959_xmit_template_populate, }; diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 19d97585345a..6cfbace57770 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -564,6 +564,8 @@ struct ocelot_port { u8 ts_id; phy_interface_t phy_mode; + + u8 *xmit_template; }; struct ocelot { diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index b0c98ee4e13b..42f327c06dca 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -137,11 +137,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, struct net_device *netdev) { struct dsa_port *dp = dsa_slave_to_port(netdev); - u64 bypass, dest, src, qos_class, rew_op; struct dsa_switch *ds = dp->ds; - int port = dp->index; struct ocelot *ocelot = ds->priv; - struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port *ocelot_port; + u64 qos_class, rew_op; u8 *injection; if (unlikely(skb_cow_head(skb, OCELOT_TAG_LEN) < 0)) { @@ -149,19 +148,15 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, return NULL; } - injection = skb_push(skb, OCELOT_TAG_LEN); + ocelot_port = ocelot->ports[dp->index]; - memset(injection, 0, OCELOT_TAG_LEN); + injection = skb_push(skb, OCELOT_TAG_LEN); - /* Set the source port as the CPU port module and not the NPI port */ - src = ocelot->num_phys_ports; - dest = BIT(port); - bypass = true; + memcpy(injection, ocelot_port->xmit_template, OCELOT_TAG_LEN); + /* Fix up the fields which are not statically determined + * in the template + */ qos_class = skb->priority; - - packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0); - packing(injection, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0); - packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0); packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0); if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { -- cgit v1.2.3 From 541132f0961a4f17b02974902085af964adf5966 Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Mon, 13 Jul 2020 19:57:07 +0300 Subject: net: mscc: ocelot: convert SYS_PAUSE_CFG register access to regfield Seville has a different bitwise layout than Ocelot and Felix. Signed-off-by: Maxim Kochetkov Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix_vsc9959.c | 3 +++ drivers/net/ethernet/mscc/ocelot.c | 14 ++++++-------- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 3 +++ include/soc/mscc/ocelot.h | 3 +++ include/soc/mscc/ocelot_sys.h | 10 ---------- 5 files changed, 15 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index d640146acc3d..fea482ad92c7 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -515,6 +515,9 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 7, 4), [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 7, 4), [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 7, 4), + [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 7, 4), + [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 7, 4), + [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4), }; static const struct ocelot_stat_layout vsc9959_stats_layout[] = { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 2a44305912d2..4d5222fa3397 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1276,10 +1276,10 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) /* Set Pause watermark hysteresis */ pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ; pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ; - ocelot_rmw_rix(ocelot, SYS_PAUSE_CFG_PAUSE_START(pause_start), - SYS_PAUSE_CFG_PAUSE_START_M, SYS_PAUSE_CFG, port); - ocelot_rmw_rix(ocelot, SYS_PAUSE_CFG_PAUSE_STOP(pause_stop), - SYS_PAUSE_CFG_PAUSE_STOP_M, SYS_PAUSE_CFG, port); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START, + pause_start); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP, + pause_stop); /* Tail dropping watermark */ atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) / @@ -1343,8 +1343,7 @@ void ocelot_init_port(struct ocelot *ocelot, int port) ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG); /* Enable transmission of pause frames */ - ocelot_rmw_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA, SYS_PAUSE_CFG_PAUSE_ENA, - SYS_PAUSE_CFG, port); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); /* Drop frames with multicast source address */ ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, @@ -1403,8 +1402,7 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi, injection); /* Disable transmission of pause frames */ - ocelot_rmw_rix(ocelot, 0, SYS_PAUSE_CFG_PAUSE_ENA, - SYS_PAUSE_CFG, npi); + ocelot_fields_write(ocelot, npi, SYS_PAUSE_CFG_PAUSE_ENA, 0); } /* Enable CPU port module */ diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 9c6a9d44871d..e9cbfbed1fc6 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -369,6 +369,9 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = { [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 11, 4), [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 11, 4), [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4), + [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 11, 4), + [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 11, 4), + [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4), }; static const struct ocelot_stat_layout ocelot_stats_layout[] = { diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 6cfbace57770..71bb92bcfdf7 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -511,6 +511,9 @@ enum ocelot_regfield { GCB_SOFT_RST_SWC_RST, GCB_MIIM_MII_STATUS_PENDING, GCB_MIIM_MII_STATUS_BUSY, + SYS_PAUSE_CFG_PAUSE_START, + SYS_PAUSE_CFG_PAUSE_STOP, + SYS_PAUSE_CFG_PAUSE_ENA, REGFIELD_MAX }; diff --git a/include/soc/mscc/ocelot_sys.h b/include/soc/mscc/ocelot_sys.h index 8a95fc93fde5..79cf40ccdbe6 100644 --- a/include/soc/mscc/ocelot_sys.h +++ b/include/soc/mscc/ocelot_sys.h @@ -43,16 +43,6 @@ #define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0)) #define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0) -#define SYS_PAUSE_CFG_RSZ 0x4 - -#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10)) -#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10) -#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10) -#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1)) -#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1) -#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1) -#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0) - #define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9)) #define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9) #define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9) -- cgit v1.2.3 From aa92d836d5c40a7e21e563a272ad177f1bfd44dd Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Mon, 13 Jul 2020 19:57:08 +0300 Subject: net: mscc: ocelot: extend watermark encoding function The ocelot_wm_encode function deals with setting thresholds for pause frame start and stop. In Ocelot and Felix the register layout is the same, but for Seville, it isn't. The easiest way to accommodate Seville hardware configuration is to introduce a function pointer for setting this up. Signed-off-by: Maxim Kochetkov Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix_vsc9959.c | 13 +++++++++++++ drivers/net/ethernet/mscc/ocelot.c | 16 ++-------------- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 13 +++++++++++++ include/soc/mscc/ocelot.h | 1 + 4 files changed, 29 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index fea482ad92c7..7e8a99455670 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1149,8 +1149,21 @@ static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port, } } +/* Watermark encode + * Bit 8: Unit; 0:1, 1:16 + * Bit 7-0: Value to be multiplied with unit + */ +static u16 vsc9959_wm_enc(u16 value) +{ + if (value >= BIT(8)) + return BIT(8) | (value / 16); + + return value; +} + static const struct ocelot_ops vsc9959_ops = { .reset = vsc9959_reset, + .wm_enc = vsc9959_wm_enc, }; static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 4d5222fa3397..f2d94b026d88 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -309,18 +309,6 @@ static void ocelot_vlan_init(struct ocelot *ocelot) } } -/* Watermark encode - * Bit 8: Unit; 0:1, 1:16 - * Bit 7-0: Value to be multiplied with unit - */ -static u16 ocelot_wm_enc(u16 value) -{ - if (value >= BIT(8)) - return BIT(8) | (value / 16); - - return value; -} - void ocelot_adjust_link(struct ocelot *ocelot, int port, struct phy_device *phydev) { @@ -1284,9 +1272,9 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) /* Tail dropping watermark */ atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) / OCELOT_BUFFER_CELL_SZ; - ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen), + ocelot_write_rix(ocelot, ocelot->ops->wm_enc(9 * maxlen), SYS_ATOP, port); - ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); + ocelot_write(ocelot, ocelot->ops->wm_enc(atop_wm), SYS_ATOP_TOT_CFG); } EXPORT_SYMBOL(ocelot_port_set_maxlen); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index e9cbfbed1fc6..0ead1ef11c6c 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -739,8 +739,21 @@ static int ocelot_reset(struct ocelot *ocelot) return 0; } +/* Watermark encode + * Bit 8: Unit; 0:1, 1:16 + * Bit 7-0: Value to be multiplied with unit + */ +static u16 ocelot_wm_enc(u16 value) +{ + if (value >= BIT(8)) + return BIT(8) | (value / 16); + + return value; +} + static const struct ocelot_ops ocelot_ops = { .reset = ocelot_reset, + .wm_enc = ocelot_wm_enc, }; static const struct vcap_field vsc7514_vcap_is2_keys[] = { diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 71bb92bcfdf7..da369b12005f 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -541,6 +541,7 @@ struct ocelot; struct ocelot_ops { int (*reset)(struct ocelot *ocelot); + u16 (*wm_enc)(u16 value); }; struct ocelot_vcap_block { -- cgit v1.2.3 From 4a3107f61f1ce2c8ccd4dde8ae655ae3f2996f35 Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Tue, 14 Jul 2020 11:01:49 +0530 Subject: tty: serial: qcom-geni-serial: Drop the icc bw votes in suspend for console When using the geni-serial as console, its important to be able to hit the lowest possible power state in suspend, even with no_console_suspend. The only thing that prevents it today on platforms like the sc7180 is the interconnect BW votes, which we certainly don't need when the system is in suspend. So in the suspend handler mark them as ACTIVE_ONLY (0x3) and on resume switch them back to the ALWAYS tag (0x7) Signed-off-by: Rajendra Nayak Reviewed-by: Akash Asthana Tested-by: Matthias Kaehlcke Acked-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/1594704709-26072-1-git-send-email-rnayak@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/soc/qcom/qcom-geni-se.c | 9 +++++++++ drivers/tty/serial/qcom_geni_serial.c | 16 +++++++++++++++- include/linux/qcom-geni-se.h | 1 + 3 files changed, 25 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index e2a0ba278b6b..355d503b7008 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -771,6 +771,15 @@ int geni_icc_set_bw(struct geni_se *se) } EXPORT_SYMBOL(geni_icc_set_bw); +void geni_icc_set_tag(struct geni_se *se, u32 tag) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) + icc_set_tag(se->icc_paths[i].path, tag); +} +EXPORT_SYMBOL(geni_icc_set_tag); + /* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */ int geni_icc_enable(struct geni_se *se) { diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 583d903321b5..07b7b6b05b8b 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1505,16 +1505,30 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev) struct uart_port *uport = &port->uport; struct qcom_geni_private_data *private_data = uport->private_data; + /* + * This is done so we can hit the lowest possible state in suspend + * even with no_console_suspend + */ + if (uart_console(uport)) { + geni_icc_set_tag(&port->se, 0x3); + geni_icc_set_bw(&port->se); + } return uart_suspend_port(private_data->drv, uport); } static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev) { + int ret; struct qcom_geni_serial_port *port = dev_get_drvdata(dev); struct uart_port *uport = &port->uport; struct qcom_geni_private_data *private_data = uport->private_data; - return uart_resume_port(private_data->drv, uport); + ret = uart_resume_port(private_data->drv, uport); + if (uart_console(uport)) { + geni_icc_set_tag(&port->se, 0x7); + geni_icc_set_bw(&port->se); + } + return ret; } static const struct dev_pm_ops qcom_geni_serial_pm_ops = { diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index afa511ef1457..8f385fbe5a0e 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -454,6 +454,7 @@ void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); int geni_icc_get(struct geni_se *se, const char *icc_ddr); int geni_icc_set_bw(struct geni_se *se); +void geni_icc_set_tag(struct geni_se *se, u32 tag); int geni_icc_enable(struct geni_se *se); -- cgit v1.2.3 From afb0367a80553e795e7ad055299096544454f3f6 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 14 Jul 2020 14:56:25 +0200 Subject: PM: domains: Restore comment indentation for generic_pm_domain.child_links The rename of generic_pm_domain.slave_links to generic_pm_domain.child_links accidentally dropped the TAB to align the member's comment. Re-add the lost TAB to restore indentation. Fixes: 8d87ae48ced2dffd ("PM: domains: Fix up terminology with parent/child") Signed-off-by: Geert Uytterhoeven [ rjw: Minor subject edit ] Signed-off-by: Rafael J. Wysocki --- include/linux/pm_domain.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 574a1fadb1e5..ee11502a575b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -96,7 +96,7 @@ struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ struct list_head parent_links; /* Links with PM domain as a parent */ - struct list_head child_links;/* Links with PM domain as a child */ + struct list_head child_links; /* Links with PM domain as a child */ struct list_head dev_list; /* List of devices */ struct dev_power_governor *gov; struct work_struct power_off_work; -- cgit v1.2.3 From 5e37b9c137ee5a3a9dc2815ca51f71746c2609a6 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Sun, 12 Jul 2020 11:01:17 +0100 Subject: firmware: tegra: Add support for in-band debug Add support for retrieving BPMP debug information via in-band messaging as opposed to using shared-memory which older BPMP firmware used. Note that it is possible to detect at runtime whether the BPMP firmware being used supports the in-band messaging for retrieving the debug informaation. Therefore, if the BPMP firmware supports the in-band messaging for debug use this and otherwise fall-back to using shared memory. Signed-off-by: Jon Hunter Signed-off-by: Thierry Reding --- drivers/firmware/tegra/bpmp-debugfs.c | 377 +++++++++++++++++++++++++++++++++- include/soc/tegra/bpmp-abi.h | 189 ++++++++++++++++- 2 files changed, 557 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index cfc06ff4266a..c1bbba9ee93a 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -4,11 +4,14 @@ */ #include #include +#include #include #include #include +static DEFINE_MUTEX(bpmp_debug_lock); + struct seqbuf { char *buf; size_t pos; @@ -96,6 +99,354 @@ static const char *get_filename(struct tegra_bpmp *bpmp, return filename; } +static int mrq_debug_open(struct tegra_bpmp *bpmp, const char *name, + uint32_t *fd, uint32_t *len, bool write) +{ + struct mrq_debug_request req = { + .cmd = cpu_to_le32(write ? CMD_DEBUG_OPEN_WO : CMD_DEBUG_OPEN_RO), + }; + struct mrq_debug_response resp; + struct tegra_bpmp_message msg = { + .mrq = MRQ_DEBUG, + .tx = { + .data = &req, + .size = sizeof(req), + }, + .rx = { + .data = &resp, + .size = sizeof(resp), + }, + }; + ssize_t sz_name; + int err = 0; + + sz_name = strscpy(req.fop.name, name, sizeof(req.fop.name)); + if (sz_name < 0) { + pr_err("File name too large: %s\n", name); + return -EINVAL; + } + + err = tegra_bpmp_transfer(bpmp, &msg); + if (err < 0) + return err; + else if (msg.rx.ret < 0) + return -EINVAL; + + *len = resp.fop.datalen; + *fd = resp.fop.fd; + + return 0; +} + +static int mrq_debug_close(struct tegra_bpmp *bpmp, uint32_t fd) +{ + struct mrq_debug_request req = { + .cmd = cpu_to_le32(CMD_DEBUG_CLOSE), + .frd = { + .fd = fd, + }, + }; + struct mrq_debug_response resp; + struct tegra_bpmp_message msg = { + .mrq = MRQ_DEBUG, + .tx = { + .data = &req, + .size = sizeof(req), + }, + .rx = { + .data = &resp, + .size = sizeof(resp), + }, + }; + int err = 0; + + err = tegra_bpmp_transfer(bpmp, &msg); + if (err < 0) + return err; + else if (msg.rx.ret < 0) + return -EINVAL; + + return 0; +} + +static int mrq_debug_read(struct tegra_bpmp *bpmp, const char *name, + char *data, size_t sz_data, uint32_t *nbytes) +{ + struct mrq_debug_request req = { + .cmd = cpu_to_le32(CMD_DEBUG_READ), + }; + struct mrq_debug_response resp; + struct tegra_bpmp_message msg = { + .mrq = MRQ_DEBUG, + .tx = { + .data = &req, + .size = sizeof(req), + }, + .rx = { + .data = &resp, + .size = sizeof(resp), + }, + }; + uint32_t fd = 0, len = 0; + int remaining, err; + + mutex_lock(&bpmp_debug_lock); + err = mrq_debug_open(bpmp, name, &fd, &len, 0); + if (err) + goto out; + + if (len > sz_data) { + err = -EFBIG; + goto close; + } + + req.frd.fd = fd; + remaining = len; + + while (remaining > 0) { + err = tegra_bpmp_transfer(bpmp, &msg); + if (err < 0) { + goto close; + } else if (msg.rx.ret < 0) { + err = -EINVAL; + goto close; + } + + if (resp.frd.readlen > remaining) { + pr_err("%s: read data length invalid\n", __func__); + err = -EINVAL; + goto close; + } + + memcpy(data, resp.frd.data, resp.frd.readlen); + data += resp.frd.readlen; + remaining -= resp.frd.readlen; + } + + *nbytes = len; + +close: + err = mrq_debug_close(bpmp, fd); +out: + mutex_unlock(&bpmp_debug_lock); + return err; +} + +static int mrq_debug_write(struct tegra_bpmp *bpmp, const char *name, + uint8_t *data, size_t sz_data) +{ + struct mrq_debug_request req = { + .cmd = cpu_to_le32(CMD_DEBUG_WRITE) + }; + struct mrq_debug_response resp; + struct tegra_bpmp_message msg = { + .mrq = MRQ_DEBUG, + .tx = { + .data = &req, + .size = sizeof(req), + }, + .rx = { + .data = &resp, + .size = sizeof(resp), + }, + }; + uint32_t fd = 0, len = 0; + size_t remaining; + int err; + + mutex_lock(&bpmp_debug_lock); + err = mrq_debug_open(bpmp, name, &fd, &len, 1); + if (err) + goto out; + + if (sz_data > len) { + err = -EINVAL; + goto close; + } + + req.fwr.fd = fd; + remaining = sz_data; + + while (remaining > 0) { + len = min(remaining, sizeof(req.fwr.data)); + memcpy(req.fwr.data, data, len); + req.fwr.datalen = len; + + err = tegra_bpmp_transfer(bpmp, &msg); + if (err < 0) { + goto close; + } else if (msg.rx.ret < 0) { + err = -EINVAL; + goto close; + } + + data += req.fwr.datalen; + remaining -= req.fwr.datalen; + } + +close: + err = mrq_debug_close(bpmp, fd); +out: + mutex_unlock(&bpmp_debug_lock); + return err; +} + +static int bpmp_debug_show(struct seq_file *m, void *p) +{ + struct file *file = m->private; + struct inode *inode = file_inode(file); + struct tegra_bpmp *bpmp = inode->i_private; + char *databuf = NULL; + char fnamebuf[256]; + const char *filename; + uint32_t nbytes = 0; + size_t len; + int err; + + len = seq_get_buf(m, &databuf); + if (!databuf) + return -ENOMEM; + + filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf)); + if (!filename) + return -ENOENT; + + err = mrq_debug_read(bpmp, filename, databuf, len, &nbytes); + if (!err) + seq_commit(m, nbytes); + + return err; +} + +static ssize_t bpmp_debug_store(struct file *file, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct inode *inode = file_inode(file); + struct tegra_bpmp *bpmp = inode->i_private; + char *databuf = NULL; + char fnamebuf[256]; + const char *filename; + ssize_t err; + + filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf)); + if (!filename) + return -ENOENT; + + databuf = kmalloc(count, GFP_KERNEL); + if (!databuf) + return -ENOMEM; + + if (copy_from_user(databuf, buf, count)) { + err = -EFAULT; + goto free_ret; + } + + err = mrq_debug_write(bpmp, filename, databuf, count); + +free_ret: + kfree(databuf); + + return err ?: count; +} + +static int bpmp_debug_open(struct inode *inode, struct file *file) +{ + return single_open_size(file, bpmp_debug_show, file, SZ_256K); +} + +static const struct file_operations bpmp_debug_fops = { + .open = bpmp_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .write = bpmp_debug_store, + .release = single_release, +}; + +static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp, + struct dentry *parent, + char *ppath) +{ + const size_t pathlen = SZ_256; + const size_t bufsize = SZ_16K; + uint32_t dsize, attrs = 0; + struct dentry *dentry; + struct seqbuf seqbuf; + char *buf, *pathbuf; + const char *name; + int err = 0; + + if (!bpmp || !parent || !ppath) + return -EINVAL; + + buf = kmalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pathbuf = kzalloc(pathlen, GFP_KERNEL); + if (!pathbuf) { + kfree(buf); + return -ENOMEM; + } + + err = mrq_debug_read(bpmp, ppath, buf, bufsize, &dsize); + if (err) + goto out; + + seqbuf_init(&seqbuf, buf, dsize); + + while (!seqbuf_eof(&seqbuf)) { + err = seqbuf_read_u32(&seqbuf, &attrs); + if (err) + goto out; + + err = seqbuf_read_str(&seqbuf, &name); + if (err < 0) + goto out; + + if (attrs & DEBUGFS_S_ISDIR) { + size_t len; + + dentry = debugfs_create_dir(name, parent); + if (IS_ERR(dentry)) { + err = PTR_ERR(dentry); + goto out; + } + + len = strlen(ppath) + strlen(name) + 1; + if (len >= pathlen) { + err = -EINVAL; + goto out; + } + + strncpy(pathbuf, ppath, pathlen); + strncat(pathbuf, name, strlen(name)); + strcat(pathbuf, "/"); + + err = bpmp_populate_debugfs_inband(bpmp, dentry, + pathbuf); + if (err < 0) + goto out; + } else { + umode_t mode; + + mode = attrs & DEBUGFS_S_IRUSR ? 0400 : 0; + mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0; + dentry = debugfs_create_file(name, mode, parent, bpmp, + &bpmp_debug_fops); + if (!dentry) { + err = -ENOMEM; + goto out; + } + } + } + +out: + kfree(pathbuf); + kfree(buf); + + return err; +} + static int mrq_debugfs_read(struct tegra_bpmp *bpmp, dma_addr_t name, size_t sz_name, dma_addr_t data, size_t sz_data, @@ -354,8 +705,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, return 0; } -static int bpmp_populate_debugfs_shmem(struct tegra_bpmp *bpmp, - struct dentry *root) +static int bpmp_populate_debugfs_shmem(struct tegra_bpmp *bpmp) { struct seqbuf seqbuf; const size_t sz = SZ_512K; @@ -364,10 +714,6 @@ static int bpmp_populate_debugfs_shmem(struct tegra_bpmp *bpmp, void *virt; int err; - bpmp->debugfs_mirror = debugfs_create_dir("debug", root); - if (!bpmp->debugfs_mirror) - return -ENOMEM; - virt = dma_alloc_coherent(bpmp->dev, sz, &phys, GFP_KERNEL | GFP_DMA32); if (!virt) @@ -392,16 +738,31 @@ free: int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp) { struct dentry *root; + bool inband; int err; - if (!tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS)) + inband = tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUG); + + if (!inband && !tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS)) return 0; root = debugfs_create_dir("bpmp", NULL); if (!root) return -ENOMEM; - err = bpmp_populate_debugfs_shmem(bpmp, root); + bpmp->debugfs_mirror = debugfs_create_dir("debug", root); + if (!bpmp->debugfs_mirror) { + err = -ENOMEM; + goto out; + } + + if (inband) + err = bpmp_populate_debugfs_inband(bpmp, bpmp->debugfs_mirror, + "/"); + else + err = bpmp_populate_debugfs_shmem(bpmp); + +out: if (err < 0) debugfs_remove_recursive(root); diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h index 8f8e73e5cd45..4896227eef1a 100644 --- a/include/soc/tegra/bpmp-abi.h +++ b/include/soc/tegra/bpmp-abi.h @@ -148,6 +148,7 @@ struct mrq_response { #define MRQ_FMON 72 #define MRQ_EC 73 #define MRQ_FBVOLT_STATUS 74 +#define MRQ_DEBUG 75 /** @} */ @@ -156,7 +157,7 @@ struct mrq_response { * @brief Maximum MRQ code to be sent by CPU software to * BPMP. Subject to change in future */ -#define MAX_CPU_MRQ_ID 74 +#define MAX_CPU_MRQ_ID 75 /** * @addtogroup MRQ_Payloads @@ -532,6 +533,8 @@ struct mrq_module_mail_response { * @def MRQ_DEBUGFS * @brief Interact with BPMP's debugfs file nodes * + * @deprecated use MRQ_DEBUG instead. + * * * Platforms: T186, T194 * * Initiators: Any * * Targets: BPMP @@ -671,6 +674,190 @@ struct mrq_debugfs_response { #define DEBUGFS_S_IWUSR (1 << 7) /** @} */ +/** + * @ingroup MRQ_Codes + * @def MRQ_DEBUG + * @brief Interact with BPMP's debugfs file nodes. Use message payload + * for exchanging data. This is functionally equivalent to + * @ref MRQ_DEBUGFS. But the way in which data is exchanged is different. + * When software running on CPU tries to read a debugfs file, + * the file path and read data will be stored in message payload. + * Since the message payload size is limited, a debugfs file + * transaction might require multiple frames of data exchanged + * between BPMP and CPU until the transaction completes. + * + * * Platforms: T194 + * * Initiators: Any + * * Targets: BPMP + * * Request Payload: @ref mrq_debug_request + * * Response Payload: @ref mrq_debug_response + */ + +/** @ingroup Debugfs */ +enum mrq_debug_commands { + /** @brief Open required file for read operation */ + CMD_DEBUG_OPEN_RO = 0, + /** @brief Open required file for write operation */ + CMD_DEBUG_OPEN_WO = 1, + /** @brief Perform read */ + CMD_DEBUG_READ = 2, + /** @brief Perform write */ + CMD_DEBUG_WRITE = 3, + /** @brief Close file */ + CMD_DEBUG_CLOSE = 4, + /** @brief Not a command */ + CMD_DEBUG_MAX +}; + +/** + * @ingroup Debugfs + * @brief Maximum number of files that can be open at a given time + */ +#define DEBUG_MAX_OPEN_FILES 1 + +/** + * @ingroup Debugfs + * @brief Maximum size of null-terminated file name string in bytes. + * Value is derived from memory available in message payload while + * using @ref cmd_debug_fopen_request + * Value 4 corresponds to size of @ref mrq_debug_commands + * in @ref mrq_debug_request. + * 120 - 4 dbg_cmd(32bit) = 116 + */ +#define DEBUG_FNAME_MAX_SZ (MSG_DATA_MIN_SZ - 4) + +/** + * @ingroup Debugfs + * @brief Parameters for CMD_DEBUG_OPEN command + */ +struct cmd_debug_fopen_request { + /** @brief File name - Null-terminated string with maximum + * length @ref DEBUG_FNAME_MAX_SZ + */ + char name[DEBUG_FNAME_MAX_SZ]; +} __ABI_PACKED; + +/** + * @ingroup Debugfs + * @brief Response data for CMD_DEBUG_OPEN_RO/WO command + */ +struct cmd_debug_fopen_response { + /** @brief Identifier for file access */ + uint32_t fd; + /** @brief Data length. File data size for READ command. + * Maximum allowed length for WRITE command + */ + uint32_t datalen; +} __ABI_PACKED; + +/** + * @ingroup Debugfs + * @brief Parameters for CMD_DEBUG_READ command + */ +struct cmd_debug_fread_request { + /** @brief File access identifier received in response + * to CMD_DEBUG_OPEN_RO request + */ + uint32_t fd; +} __ABI_PACKED; + +/** + * @ingroup Debugfs + * @brief Maximum size of read data in bytes. + * Value is derived from memory available in message payload while + * using @ref cmd_debug_fread_response. + */ +#define DEBUG_READ_MAX_SZ (MSG_DATA_MIN_SZ - 4) + +/** + * @ingroup Debugfs + * @brief Response data for CMD_DEBUG_READ command + */ +struct cmd_debug_fread_response { + /** @brief Size of data provided in this response in bytes */ + uint32_t readlen; + /** @brief File data from seek position */ + char data[DEBUG_READ_MAX_SZ]; +} __ABI_PACKED; + +/** + * @ingroup Debugfs + * @brief Maximum size of write data in bytes. + * Value is derived from memory available in message payload while + * using @ref cmd_debug_fwrite_request. + */ +#define DEBUG_WRITE_MAX_SZ (MSG_DATA_MIN_SZ - 12) + +/** + * @ingroup Debugfs + * @brief Parameters for CMD_DEBUG_WRITE command + */ +struct cmd_debug_fwrite_request { + /** @brief File access identifier received in response + * to CMD_DEBUG_OPEN_RO request + */ + uint32_t fd; + /** @brief Size of write data in bytes */ + uint32_t datalen; + /** @brief Data to be written */ + char data[DEBUG_WRITE_MAX_SZ]; +} __ABI_PACKED; + +/** + * @ingroup Debugfs + * @brief Parameters for CMD_DEBUG_CLOSE command + */ +struct cmd_debug_fclose_request { + /** @brief File access identifier received in response + * to CMD_DEBUG_OPEN_RO request + */ + uint32_t fd; +} __ABI_PACKED; + +/** + * @ingroup Debugfs + * @brief Request with #MRQ_DEBUG. + * + * The sender of an MRQ_DEBUG message uses #cmd to specify a debugfs + * command to execute. Legal commands are the values of @ref + * mrq_debug_commands. Each command requires a specific additional + * payload of data. + * + * |command |payload| + * |-------------------|-------| + * |CMD_DEBUG_OPEN_RO |fop | + * |CMD_DEBUG_OPEN_WO |fop | + * |CMD_DEBUG_READ |frd | + * |CMD_DEBUG_WRITE |fwr | + * |CMD_DEBUG_CLOSE |fcl | + */ +struct mrq_debug_request { + /** @brief Sub-command (@ref mrq_debug_commands) */ + uint32_t cmd; + union { + /** @brief Request payload for CMD_DEBUG_OPEN_RO/WO command */ + struct cmd_debug_fopen_request fop; + /** @brief Request payload for CMD_DEBUG_READ command */ + struct cmd_debug_fread_request frd; + /** @brief Request payload for CMD_DEBUG_WRITE command */ + struct cmd_debug_fwrite_request fwr; + /** @brief Request payload for CMD_DEBUG_CLOSE command */ + struct cmd_debug_fclose_request fcl; + } __UNION_ANON; +} __ABI_PACKED; + +/** + * @ingroup Debugfs + */ +struct mrq_debug_response { + union { + /** @brief Response data for CMD_DEBUG_OPEN_RO/WO command */ + struct cmd_debug_fopen_response fop; + /** @brief Response data for CMD_DEBUG_READ command */ + struct cmd_debug_fread_response frd; + } __UNION_ANON; +} __ABI_PACKED; + /** * @ingroup MRQ_Codes * @def MRQ_RESET -- cgit v1.2.3 From 4e87189912bd2167998d82c95bb68f73185069e2 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Sun, 12 Jul 2020 11:01:18 +0100 Subject: firmware: tegra: Update BPMP ABI Update the BPMP ABI to align with the the latest version. Signed-off-by: Jon Hunter Signed-off-by: Thierry Reding --- include/soc/tegra/bpmp-abi.h | 748 ++++++++++++++++++++++++++----------------- 1 file changed, 460 insertions(+), 288 deletions(-) (limited to 'include') diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h index 4896227eef1a..bff99f23860c 100644 --- a/include/soc/tegra/bpmp-abi.h +++ b/include/soc/tegra/bpmp-abi.h @@ -3,28 +3,38 @@ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. */ -#ifndef _ABI_BPMP_ABI_H_ -#define _ABI_BPMP_ABI_H_ +#ifndef ABI_BPMP_ABI_H +#define ABI_BPMP_ABI_H -#ifdef LK +#if defined(LK) || defined(BPMP_ABI_HAVE_STDC) +#include #include #endif -#ifndef __ABI_PACKED -#define __ABI_PACKED __attribute__((packed)) +#ifndef BPMP_ABI_PACKED +#ifdef __ABI_PACKED +#define BPMP_ABI_PACKED __ABI_PACKED +#else +#define BPMP_ABI_PACKED __attribute__((packed)) +#endif #endif #ifdef NO_GCC_EXTENSIONS -#define EMPTY char empty; -#define EMPTY_ARRAY 1 +#define BPMP_ABI_EMPTY char empty; +#define BPMP_ABI_EMPTY_ARRAY 1 #else -#define EMPTY -#define EMPTY_ARRAY 0 +#define BPMP_ABI_EMPTY +#define BPMP_ABI_EMPTY_ARRAY 0 #endif -#ifndef __UNION_ANON -#define __UNION_ANON +#ifndef BPMP_UNION_ANON +#ifdef __UNION_ANON +#define BPMP_UNION_ANON __UNION_ANON +#else +#define BPMP_UNION_ANON +#endif #endif + /** * @file */ @@ -73,6 +83,7 @@ struct mrq_request { /** @brief MRQ number of the request */ uint32_t mrq; + /** * @brief Flags providing follow up directions to the receiver * @@ -82,7 +93,7 @@ struct mrq_request { * | 0 | should be 1 | */ uint32_t flags; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup MRQ_Format @@ -98,18 +109,18 @@ struct mrq_response { int32_t err; /** @brief Reserved for future use */ uint32_t flags; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup MRQ_Format * Minimum needed size for an IPC message buffer */ -#define MSG_MIN_SZ 128 +#define MSG_MIN_SZ 128U /** * @ingroup MRQ_Format * Minimum size guaranteed for data in an IPC message buffer */ -#define MSG_DATA_MIN_SZ 120 +#define MSG_DATA_MIN_SZ 120U /** * @ingroup MRQ_Codes @@ -118,37 +129,36 @@ struct mrq_response { * @{ */ -#define MRQ_PING 0 -#define MRQ_QUERY_TAG 1 -#define MRQ_MODULE_LOAD 4 -#define MRQ_MODULE_UNLOAD 5 -#define MRQ_TRACE_MODIFY 7 -#define MRQ_WRITE_TRACE 8 -#define MRQ_THREADED_PING 9 -#define MRQ_MODULE_MAIL 11 -#define MRQ_DEBUGFS 19 -#define MRQ_RESET 20 -#define MRQ_I2C 21 -#define MRQ_CLK 22 -#define MRQ_QUERY_ABI 23 -#define MRQ_PG_READ_STATE 25 -#define MRQ_PG_UPDATE_STATE 26 -#define MRQ_THERMAL 27 -#define MRQ_CPU_VHINT 28 -#define MRQ_ABI_RATCHET 29 -#define MRQ_EMC_DVFS_LATENCY 31 -#define MRQ_TRACE_ITER 64 -#define MRQ_RINGBUF_CONSOLE 65 -#define MRQ_PG 66 -#define MRQ_CPU_NDIV_LIMITS 67 -#define MRQ_STRAP 68 -#define MRQ_UPHY 69 -#define MRQ_CPU_AUTO_CC3 70 -#define MRQ_QUERY_FW_TAG 71 -#define MRQ_FMON 72 -#define MRQ_EC 73 -#define MRQ_FBVOLT_STATUS 74 -#define MRQ_DEBUG 75 +#define MRQ_PING 0U +#define MRQ_QUERY_TAG 1U +#define MRQ_MODULE_LOAD 4U +#define MRQ_MODULE_UNLOAD 5U +#define MRQ_TRACE_MODIFY 7U +#define MRQ_WRITE_TRACE 8U +#define MRQ_THREADED_PING 9U +#define MRQ_MODULE_MAIL 11U +#define MRQ_DEBUGFS 19U +#define MRQ_RESET 20U +#define MRQ_I2C 21U +#define MRQ_CLK 22U +#define MRQ_QUERY_ABI 23U +#define MRQ_PG_READ_STATE 25U +#define MRQ_PG_UPDATE_STATE 26U +#define MRQ_THERMAL 27U +#define MRQ_CPU_VHINT 28U +#define MRQ_ABI_RATCHET 29U +#define MRQ_EMC_DVFS_LATENCY 31U +#define MRQ_TRACE_ITER 64U +#define MRQ_RINGBUF_CONSOLE 65U +#define MRQ_PG 66U +#define MRQ_CPU_NDIV_LIMITS 67U +#define MRQ_STRAP 68U +#define MRQ_UPHY 69U +#define MRQ_CPU_AUTO_CC3 70U +#define MRQ_QUERY_FW_TAG 71U +#define MRQ_FMON 72U +#define MRQ_EC 73U +#define MRQ_DEBUG 75U /** @} */ @@ -157,7 +167,7 @@ struct mrq_response { * @brief Maximum MRQ code to be sent by CPU software to * BPMP. Subject to change in future */ -#define MAX_CPU_MRQ_ID 75 +#define MAX_CPU_MRQ_ID 75U /** * @addtogroup MRQ_Payloads @@ -224,7 +234,7 @@ struct mrq_response { struct mrq_ping_request { /** @brief Arbitrarily chosen value */ uint32_t challenge; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Ping @@ -238,7 +248,7 @@ struct mrq_ping_request { struct mrq_ping_response { /** @brief Response to the MRQ_PING challege */ uint32_t reply; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup MRQ_Codes @@ -265,7 +275,7 @@ struct mrq_ping_response { struct mrq_query_tag_request { /** @brief Base address to store the firmware tag */ uint32_t addr; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @@ -292,15 +302,15 @@ struct mrq_query_tag_request { struct mrq_query_fw_tag_response { /** @brief Array to store tag information */ uint8_t tag[32]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup MRQ_Codes * @def MRQ_MODULE_LOAD * @brief Dynamically load a BPMP code module * - * * Platforms: T210, T214, T186 - * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186) + * * Platforms: T210, T210B01, T186 + * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186) * * Initiators: CCPLEX * * Targets: BPMP * * Request Payload: @ref mrq_module_load_request @@ -328,11 +338,11 @@ struct mrq_query_fw_tag_response { * */ struct mrq_module_load_request { - /** @brief Base address of the code to load. Treated as (void *) */ - uint32_t phys_addr; /* (void *) */ + /** @brief Base address of the code to load */ + uint32_t phys_addr; /** @brief Size in bytes of code to load */ uint32_t size; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Module @@ -343,7 +353,7 @@ struct mrq_module_load_request { struct mrq_module_load_response { /** @brief Handle to the loaded module */ uint32_t base; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @endcond*/ /** @@ -351,8 +361,8 @@ struct mrq_module_load_response { * @def MRQ_MODULE_UNLOAD * @brief Unload a previously loaded code module * - * * Platforms: T210, T214, T186 - * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186) + * * Platforms: T210, T210B01, T186 + * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186) * * Initiators: CCPLEX * * Targets: BPMP * * Request Payload: @ref mrq_module_unload_request @@ -371,7 +381,7 @@ struct mrq_module_load_response { struct mrq_module_unload_request { /** @brief Handle of the module to unload */ uint32_t base; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @endcond*/ /** @@ -379,6 +389,8 @@ struct mrq_module_unload_request { * @def MRQ_TRACE_MODIFY * @brief Modify the set of enabled trace events * + * @deprecated + * * * Platforms: All * * Initiators: CCPLEX * * Targets: BPMP @@ -401,7 +413,7 @@ struct mrq_trace_modify_request { uint32_t clr; /** @brief Bit mask of trace events to enable */ uint32_t set; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Trace @@ -415,13 +427,15 @@ struct mrq_trace_modify_request { struct mrq_trace_modify_response { /** @brief Bit mask of trace event enable states */ uint32_t mask; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup MRQ_Codes * @def MRQ_WRITE_TRACE * @brief Write trace data to a buffer * + * @deprecated + * * * Platforms: All * * Initiators: CCPLEX * * Targets: BPMP @@ -455,7 +469,7 @@ struct mrq_write_trace_request { uint32_t area; /** @brief Size in bytes of the output buffer */ uint32_t size; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Trace @@ -472,25 +486,25 @@ struct mrq_write_trace_response { * drained to the outputbuffer. Value is 0 otherwise. */ uint32_t eof; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct mrq_threaded_ping_request { uint32_t challenge; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct mrq_threaded_ping_response { uint32_t reply; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup MRQ_Codes * @def MRQ_MODULE_MAIL * @brief Send a message to a loadable module * - * * Platforms: T210, T214, T186 - * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186) + * * Platforms: T210, T210B01, T186 + * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186) * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_module_mail_request @@ -511,8 +525,8 @@ struct mrq_module_mail_request { * The length of data[ ] is unknown to the BPMP core firmware * but it is limited to the size of an IPC message. */ - uint8_t data[EMPTY_ARRAY]; -} __ABI_PACKED; + uint8_t data[BPMP_ABI_EMPTY_ARRAY]; +} BPMP_ABI_PACKED; /** * @ingroup Module @@ -524,8 +538,8 @@ struct mrq_module_mail_response { * The length of data[ ] is unknown to the BPMP core firmware * but it is limited to the size of an IPC message. */ - uint8_t data[EMPTY_ARRAY]; -} __ABI_PACKED; + uint8_t data[BPMP_ABI_EMPTY_ARRAY]; +} BPMP_ABI_PACKED; /** @endcond */ /** @@ -590,7 +604,7 @@ struct cmd_debugfs_fileop_request { uint32_t dataaddr; /** @brief Length in bytes of data buffer */ uint32_t datalen; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -601,7 +615,7 @@ struct cmd_debugfs_dumpdir_request { uint32_t dataaddr; /** @brief Length in bytes of data buffer */ uint32_t datalen; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -612,7 +626,7 @@ struct cmd_debugfs_fileop_response { uint32_t reserved; /** @brief Number of bytes read from or written to data buffer */ uint32_t nbytes; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -623,7 +637,7 @@ struct cmd_debugfs_dumpdir_response { uint32_t reserved; /** @brief Number of bytes read from or written to data buffer */ uint32_t nbytes; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -646,8 +660,8 @@ struct mrq_debugfs_request { union { struct cmd_debugfs_fileop_request fop; struct cmd_debugfs_dumpdir_request dumpdir; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -662,8 +676,8 @@ struct mrq_debugfs_response { struct cmd_debugfs_fileop_response fop; /** @brief Response data for CMD_DEBUGFS_DUMPDIR command */ struct cmd_debugfs_dumpdir_response dumpdir; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @addtogroup Debugfs @@ -735,7 +749,7 @@ struct cmd_debug_fopen_request { * length @ref DEBUG_FNAME_MAX_SZ */ char name[DEBUG_FNAME_MAX_SZ]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -748,7 +762,7 @@ struct cmd_debug_fopen_response { * Maximum allowed length for WRITE command */ uint32_t datalen; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -759,7 +773,7 @@ struct cmd_debug_fread_request { * to CMD_DEBUG_OPEN_RO request */ uint32_t fd; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -778,7 +792,7 @@ struct cmd_debug_fread_response { uint32_t readlen; /** @brief File data from seek position */ char data[DEBUG_READ_MAX_SZ]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -801,7 +815,7 @@ struct cmd_debug_fwrite_request { uint32_t datalen; /** @brief Data to be written */ char data[DEBUG_WRITE_MAX_SZ]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -812,7 +826,7 @@ struct cmd_debug_fclose_request { * to CMD_DEBUG_OPEN_RO request */ uint32_t fd; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -843,8 +857,8 @@ struct mrq_debug_request { struct cmd_debug_fwrite_request fwr; /** @brief Request payload for CMD_DEBUG_CLOSE command */ struct cmd_debug_fclose_request fcl; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @ingroup Debugfs @@ -855,8 +869,8 @@ struct mrq_debug_response { struct cmd_debug_fopen_response fop; /** @brief Response data for CMD_DEBUG_READ command */ struct cmd_debug_fread_response frd; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @ingroup MRQ_Codes @@ -874,14 +888,41 @@ struct mrq_debug_response { */ enum mrq_reset_commands { - /** @brief Assert module reset */ + /** + * @brief Assert module reset + * + * mrq_response::err is 0 if the operation was successful, or @n + * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n + * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n + * -#BPMP_ENOTSUP if target domain h/w state does not allow reset + */ CMD_RESET_ASSERT = 1, - /** @brief Deassert module reset */ + /** + * @brief Deassert module reset + * + * mrq_response::err is 0 if the operation was successful, or @n + * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n + * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n + * -#BPMP_ENOTSUP if target domain h/w state does not allow reset + */ CMD_RESET_DEASSERT = 2, - /** @brief Assert and deassert the module reset */ + /** + * @brief Assert and deassert the module reset + * + * mrq_response::err is 0 if the operation was successful, or @n + * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n + * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n + * -#BPMP_ENOTSUP if target domain h/w state does not allow reset + */ CMD_RESET_MODULE = 3, - /** @brief Get the highest reset ID */ + /** + * @brief Get the highest reset ID + * + * mrq_response::err is 0 if the operation was successful, or @n + * -#BPMP_ENODEV if no reset domains are supported (number of IDs is 0) + */ CMD_RESET_GET_MAX_ID = 4, + /** @brief Not part of ABI and subject to change */ CMD_RESET_MAX, }; @@ -897,7 +938,7 @@ struct mrq_reset_request { uint32_t cmd; /** @brief Id of the reset to affected */ uint32_t reset_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Response for MRQ_RESET sub-command CMD_RESET_GET_MAX_ID. When @@ -907,7 +948,7 @@ struct mrq_reset_request { struct cmd_reset_get_max_id_response { /** @brief Max reset id */ uint32_t max_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Response with MRQ_RESET @@ -926,8 +967,8 @@ struct cmd_reset_get_max_id_response { struct mrq_reset_response { union { struct cmd_reset_get_max_id_response reset_get_max_id; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** @} */ @@ -945,17 +986,17 @@ struct mrq_reset_response { * @addtogroup I2C * @{ */ -#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12) -#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4) +#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12U) +#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4U) -#define SERIALI2C_TEN 0x0010 -#define SERIALI2C_RD 0x0001 -#define SERIALI2C_STOP 0x8000 -#define SERIALI2C_NOSTART 0x4000 -#define SERIALI2C_REV_DIR_ADDR 0x2000 -#define SERIALI2C_IGNORE_NAK 0x1000 -#define SERIALI2C_NO_RD_ACK 0x0800 -#define SERIALI2C_RECV_LEN 0x0400 +#define SERIALI2C_TEN 0x0010U +#define SERIALI2C_RD 0x0001U +#define SERIALI2C_STOP 0x8000U +#define SERIALI2C_NOSTART 0x4000U +#define SERIALI2C_REV_DIR_ADDR 0x2000U +#define SERIALI2C_IGNORE_NAK 0x1000U +#define SERIALI2C_NO_RD_ACK 0x0800U +#define SERIALI2C_RECV_LEN 0x0400U enum { CMD_I2C_XFER = 1 @@ -985,7 +1026,7 @@ struct serial_i2c_request { uint16_t len; /** @brief For write transactions only, #len bytes of data */ uint8_t data[]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Trigger one or more i2c transactions @@ -999,7 +1040,7 @@ struct cmd_i2c_xfer_request { /** @brief Serialized packed instances of @ref serial_i2c_request*/ uint8_t data_buf[TEGRA_I2C_IPC_MAX_IN_BUF_SIZE]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Container for data read from the i2c bus @@ -1013,7 +1054,7 @@ struct cmd_i2c_xfer_response { uint32_t data_size; /** @brief I2c read data */ uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Request with #MRQ_I2C @@ -1023,14 +1064,25 @@ struct mrq_i2c_request { uint32_t cmd; /** @brief Parameters of the transfer request */ struct cmd_i2c_xfer_request xfer; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Response to #MRQ_I2C + * + * mrq_response:err is + * 0: Success + * -#BPMP_EBADCMD: if mrq_i2c_request::cmd is other than 1 + * -#BPMP_EINVAL: if cmd_i2c_xfer_request does not contain correctly formatted request + * -#BPMP_ENODEV: if cmd_i2c_xfer_request::bus_id is not supported by BPMP + * -#BPMP_EACCES: if i2c transaction is not allowed due to firewall rules + * -#BPMP_ETIMEDOUT: if i2c transaction times out + * -#BPMP_ENXIO: if i2c slave device does not reply with ACK to the transaction + * -#BPMP_EAGAIN: if ARB_LOST condition is detected by the i2c controller + * -#BPMP_EIO: any other i2c controller error code than NO_ACK or ARB_LOST */ struct mrq_i2c_response { struct cmd_i2c_xfer_response xfer; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @} */ @@ -1063,90 +1115,105 @@ enum { CMD_CLK_MAX, }; -#define BPMP_CLK_HAS_MUX (1 << 0) -#define BPMP_CLK_HAS_SET_RATE (1 << 1) -#define BPMP_CLK_IS_ROOT (1 << 2) +#define BPMP_CLK_HAS_MUX (1U << 0U) +#define BPMP_CLK_HAS_SET_RATE (1U << 1U) +#define BPMP_CLK_IS_ROOT (1U << 2U) +#define BPMP_CLK_IS_VAR_ROOT (1U << 3U) -#define MRQ_CLK_NAME_MAXLEN 40 -#define MRQ_CLK_MAX_PARENTS 16 +#define MRQ_CLK_NAME_MAXLEN 40U +#define MRQ_CLK_MAX_PARENTS 16U /** @private */ struct cmd_clk_get_rate_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; struct cmd_clk_get_rate_response { int64_t rate; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_clk_set_rate_request { int32_t unused; int64_t rate; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_clk_set_rate_response { int64_t rate; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_clk_round_rate_request { int32_t unused; int64_t rate; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_clk_round_rate_response { int64_t rate; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct cmd_clk_get_parent_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; struct cmd_clk_get_parent_response { uint32_t parent_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_clk_set_parent_request { uint32_t parent_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_clk_set_parent_response { uint32_t parent_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct cmd_clk_is_enabled_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; +/** + * @brief Response data to #MRQ_CLK sub-command CMD_CLK_IS_ENABLED + */ struct cmd_clk_is_enabled_response { + /** + * @brief The state of the clock that has been succesfully + * requested with CMD_CLK_ENABLE or CMD_CLK_DISABLE by the + * master invoking the command earlier. + * + * The state may not reflect the physical state of the clock + * if there are some other masters requesting it to be + * enabled. + * + * Value 0 is disabled, all other values indicate enabled. + */ int32_t state; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct cmd_clk_enable_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; /** @private */ struct cmd_clk_enable_response { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; /** @private */ struct cmd_clk_disable_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; /** @private */ struct cmd_clk_disable_response { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; /** @private */ struct cmd_clk_get_all_info_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; struct cmd_clk_get_all_info_response { uint32_t flags; @@ -1154,25 +1221,25 @@ struct cmd_clk_get_all_info_response { uint32_t parents[MRQ_CLK_MAX_PARENTS]; uint8_t num_parents; uint8_t name[MRQ_CLK_NAME_MAXLEN]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct cmd_clk_get_max_clk_id_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; struct cmd_clk_get_max_clk_id_response { uint32_t max_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct cmd_clk_get_fmax_at_vmin_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; struct cmd_clk_get_fmax_at_vmin_response { int64_t rate; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Clocks @@ -1227,8 +1294,8 @@ struct mrq_clk_request { struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id; /** @private */ struct cmd_clk_get_fmax_at_vmin_request clk_get_fmax_at_vmin; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @ingroup Clocks @@ -1269,8 +1336,8 @@ struct mrq_clk_response { struct cmd_clk_get_all_info_response clk_get_all_info; struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id; struct cmd_clk_get_fmax_at_vmin_response clk_get_fmax_at_vmin; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** @} */ @@ -1296,7 +1363,7 @@ struct mrq_clk_response { struct mrq_query_abi_request { /** @brief MRQ code to query */ uint32_t mrq; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup ABI_info @@ -1308,7 +1375,7 @@ struct mrq_query_abi_request { struct mrq_query_abi_response { /** @brief 0 if queried MRQ is supported. Else, -#BPMP_ENODEV */ int32_t status; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup MRQ_Codes @@ -1333,7 +1400,7 @@ struct mrq_query_abi_response { struct mrq_pg_read_state_request { /** @brief ID of partition */ uint32_t partition_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup Powergating @@ -1348,7 +1415,7 @@ struct mrq_pg_read_state_response { * * 1 : on */ uint32_t logic_state; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @endcond*/ /** @} */ @@ -1398,7 +1465,7 @@ struct mrq_pg_update_state_request { * @ref logic_state == 0x3) */ uint32_t clock_state; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @endcond*/ /** @@ -1494,25 +1561,38 @@ enum pg_states { struct cmd_pg_query_abi_request { /** @ref mrq_pg_cmd */ uint32_t type; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_pg_set_state_request { /** @ref pg_states */ uint32_t state; -} __ABI_PACKED; +} BPMP_ABI_PACKED; +/** + * @brief Response data to #MRQ_PG sub command #CMD_PG_GET_STATE + */ struct cmd_pg_get_state_response { - /** @ref pg_states */ + /** + * @brief The state of the power partition that has been + * succesfuly requested by the master earlier using #MRQ_PG + * command #CMD_PG_SET_STATE. + * + * The state may not reflect the physical state of the power + * partition if there are some other masters requesting it to + * be enabled. + * + * See @ref pg_states for possible values + */ uint32_t state; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_pg_get_name_response { uint8_t name[MRQ_PG_NAME_MAXLEN]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_pg_get_max_id_response { uint32_t max_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Request with #MRQ_PG @@ -1537,8 +1617,8 @@ struct mrq_pg_request { union { struct cmd_pg_query_abi_request query_abi; struct cmd_pg_set_state_request set_state; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @brief Response to MRQ_PG @@ -1560,8 +1640,8 @@ struct mrq_pg_response { struct cmd_pg_get_state_response get_state; struct cmd_pg_get_name_response get_name; struct cmd_pg_get_max_id_response get_max_id; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** @} */ @@ -1650,6 +1730,20 @@ enum mrq_thermal_host_to_bpmp_cmd { */ CMD_THERMAL_GET_NUM_ZONES = 3, + /** + * @brief Get the thermtrip of the specified zone. + * + * Host needs to supply request parameters. + * + * mrq_response::err is + * * 0: Valid zone information returned. + * * -#BPMP_EINVAL: Invalid request parameters. + * * -#BPMP_ENOENT: No driver registered for thermal zone. + * * -#BPMP_ERANGE if thermtrip is invalid or disabled. + * * -#BPMP_EFAULT: Problem reading zone information. + */ + CMD_THERMAL_GET_THERMTRIP = 4, + /** @brief: number of supported host-to-bpmp commands. May * increase in future */ @@ -1680,7 +1774,7 @@ enum mrq_thermal_bpmp_to_host_cmd { */ struct cmd_thermal_query_abi_request { uint32_t type; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /* * Host->BPMP request data for request type CMD_THERMAL_GET_TEMP @@ -1689,7 +1783,7 @@ struct cmd_thermal_query_abi_request { */ struct cmd_thermal_get_temp_request { uint32_t zone; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /* * BPMP->Host reply data for request CMD_THERMAL_GET_TEMP @@ -1702,7 +1796,7 @@ struct cmd_thermal_get_temp_request { */ struct cmd_thermal_get_temp_response { int32_t temp; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /* * Host->BPMP request data for request type CMD_THERMAL_SET_TRIP @@ -1717,7 +1811,7 @@ struct cmd_thermal_set_trip_request { int32_t low; int32_t high; uint32_t enabled; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /* * BPMP->Host request data for request type CMD_THERMAL_HOST_TRIP_REACHED @@ -1726,7 +1820,7 @@ struct cmd_thermal_set_trip_request { */ struct cmd_thermal_host_trip_reached_request { uint32_t zone; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /* * BPMP->Host reply data for request type CMD_THERMAL_GET_NUM_ZONES @@ -1736,7 +1830,25 @@ struct cmd_thermal_host_trip_reached_request { */ struct cmd_thermal_get_num_zones_response { uint32_t num; -} __ABI_PACKED; +} BPMP_ABI_PACKED; + +/* + * Host->BPMP request data for request type CMD_THERMAL_GET_THERMTRIP + * + * zone: Number of thermal zone. + */ +struct cmd_thermal_get_thermtrip_request { + uint32_t zone; +} BPMP_ABI_PACKED; + +/* + * BPMP->Host reply data for request CMD_THERMAL_GET_THERMTRIP + * + * thermtrip: HW shutdown temperature in millicelsius. + */ +struct cmd_thermal_get_thermtrip_response { + int32_t thermtrip; +} BPMP_ABI_PACKED; /* * Host->BPMP request data. @@ -1752,8 +1864,9 @@ struct mrq_thermal_host_to_bpmp_request { struct cmd_thermal_query_abi_request query_abi; struct cmd_thermal_get_temp_request get_temp; struct cmd_thermal_set_trip_request set_trip; - } __UNION_ANON; -} __ABI_PACKED; + struct cmd_thermal_get_thermtrip_request get_thermtrip; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /* * BPMP->Host request data. @@ -1765,16 +1878,17 @@ struct mrq_thermal_bpmp_to_host_request { uint32_t type; union { struct cmd_thermal_host_trip_reached_request host_trip_reached; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /* * Data in reply to a Host->BPMP request. */ union mrq_thermal_bpmp_to_host_response { struct cmd_thermal_get_temp_response get_temp; + struct cmd_thermal_get_thermtrip_response get_thermtrip; struct cmd_thermal_get_num_zones_response get_num_zones; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @} */ /** @@ -1806,7 +1920,7 @@ struct mrq_cpu_vhint_request { uint32_t addr; /** @brief ID of the cluster whose data is requested */ uint32_t cluster_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Description of the CPU v/f relation @@ -1833,7 +1947,7 @@ struct cpu_vhint_data { uint16_t vindex_div; /** reserved for future use */ uint16_t reserved[328]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @endcond */ /** @} */ @@ -1922,11 +2036,11 @@ struct mrq_abi_ratchet_response { * @brief Used by @ref mrq_emc_dvfs_latency_response */ struct emc_dvfs_latency { - /** @brief EMC frequency in kHz */ + /** @brief EMC DVFS node frequency in kHz */ uint32_t freq; /** @brief EMC DVFS latency in nanoseconds */ uint32_t latency; -} __ABI_PACKED; +} BPMP_ABI_PACKED; #define EMC_DVFS_LATENCY_MAX_SIZE 14 /** @@ -1935,9 +2049,9 @@ struct emc_dvfs_latency { struct mrq_emc_dvfs_latency_response { /** @brief The number valid entries in #pairs */ uint32_t num_pairs; - /** @brief EMC information */ + /** @brief EMC DVFS node information */ struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @} */ @@ -1962,7 +2076,7 @@ struct mrq_emc_dvfs_latency_response { struct mrq_cpu_ndiv_limits_request { /** @brief Enum cluster_id */ uint32_t cluster_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Response to #MRQ_CPU_NDIV_LIMITS @@ -1978,7 +2092,7 @@ struct mrq_cpu_ndiv_limits_response { uint16_t ndiv_max; /** @brief Minimum allowed NDIV value */ uint16_t ndiv_min; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @} */ /** @endcond */ @@ -2010,7 +2124,7 @@ struct mrq_cpu_ndiv_limits_response { struct mrq_cpu_auto_cc3_request { /** @brief Enum cluster_id (logical cluster id, known to CCPLEX s/w) */ uint32_t cluster_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @brief Response to #MRQ_CPU_AUTO_CC3 @@ -2024,7 +2138,7 @@ struct mrq_cpu_auto_cc3_response { * - bit [0] if "1" auto-CC3 is allowed, if "0" auto-CC3 is not allowed */ uint32_t auto_cc3_config; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @} */ /** @endcond */ @@ -2034,6 +2148,8 @@ struct mrq_cpu_auto_cc3_response { * @def MRQ_TRACE_ITER * @brief Manage the trace iterator * + * @deprecated + * * * Platforms: All * * Initiators: CCPLEX * * Targets: BPMP @@ -2055,7 +2171,7 @@ enum { struct mrq_trace_iter_request { /** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */ uint32_t cmd; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @} */ @@ -2131,12 +2247,12 @@ enum mrq_ringbuf_console_host_to_bpmp_cmd { struct cmd_ringbuf_console_query_abi_req { /** @brief Command identifier to be queried */ uint32_t cmd; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct cmd_ringbuf_console_query_abi_resp { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; /** * @ingroup RingbufConsole @@ -2147,7 +2263,7 @@ struct cmd_ringbuf_console_read_req { * @brief Number of bytes requested to be read from the BPMP TX buffer */ uint8_t len; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup RingbufConsole @@ -2158,7 +2274,7 @@ struct cmd_ringbuf_console_read_resp { uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_READ_LEN]; /** @brief Number of bytes in cmd_ringbuf_console_read_resp::data */ uint8_t len; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup RingbufConsole @@ -2169,7 +2285,7 @@ struct cmd_ringbuf_console_write_req { uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_WRITE_LEN]; /** @brief Number of bytes in cmd_ringbuf_console_write_req::data */ uint8_t len; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup RingbufConsole @@ -2180,12 +2296,12 @@ struct cmd_ringbuf_console_write_resp { uint32_t space_avail; /** @brief Number of bytes that were written to the BPMP RX buffer */ uint8_t len; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct cmd_ringbuf_console_get_fifo_req { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; /** * @ingroup RingbufConsole @@ -2200,7 +2316,7 @@ struct cmd_ringbuf_console_get_fifo_resp { uint64_t bpmp_tx_tail_addr; /** @brief Length of the BPMP TX buffer */ uint32_t bpmp_tx_buf_len; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup RingbufConsole @@ -2220,8 +2336,8 @@ struct mrq_ringbuf_console_host_to_bpmp_request { struct cmd_ringbuf_console_read_req read; struct cmd_ringbuf_console_write_req write; struct cmd_ringbuf_console_get_fifo_req get_fifo; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @ingroup RingbufConsole @@ -2234,7 +2350,7 @@ union mrq_ringbuf_console_bpmp_to_host_response { struct cmd_ringbuf_console_read_resp read; struct cmd_ringbuf_console_write_resp write; struct cmd_ringbuf_console_get_fifo_resp get_fifo; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @} */ /** @@ -2278,7 +2394,7 @@ struct mrq_strap_request { uint32_t id; /** @brief Desired value for strap (if cmd is #STRAP_SET) */ uint32_t value; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @defgroup Strap_Ids Strap Identifiers @@ -2321,28 +2437,28 @@ struct cmd_uphy_margin_control_request { uint32_t y; /** @brief Set number of bit blocks for each margin section */ uint32_t nblks; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_uphy_margin_status_response { /** @brief Number of errors observed */ uint32_t status; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_uphy_ep_controller_pll_init_request { /** @brief EP controller number, valid: 0, 4, 5 */ uint8_t ep_controller; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_uphy_pcie_controller_state_request { /** @brief PCIE controller number, valid: 0, 1, 2, 3, 4 */ uint8_t pcie_controller; uint8_t enable; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_uphy_ep_controller_pll_off_request { /** @brief EP controller number, valid: 0, 4, 5 */ uint8_t ep_controller; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup UPHY @@ -2373,8 +2489,8 @@ struct mrq_uphy_request { struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init; struct cmd_uphy_pcie_controller_state_request controller_state; struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @ingroup UPHY @@ -2394,8 +2510,8 @@ struct mrq_uphy_request { struct mrq_uphy_response { union { struct cmd_uphy_margin_status_response uphy_get_margin_status; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** @} */ /** @endcond */ @@ -2445,31 +2561,31 @@ enum { struct cmd_fmon_gear_clamp_request { int32_t unused; int64_t rate; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @private */ struct cmd_fmon_gear_clamp_response { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; /** @private */ struct cmd_fmon_gear_free_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; /** @private */ struct cmd_fmon_gear_free_response { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; /** @private */ struct cmd_fmon_gear_get_request { - EMPTY -} __ABI_PACKED; + BPMP_ABI_EMPTY +} BPMP_ABI_PACKED; struct cmd_fmon_gear_get_response { int64_t rate; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * @ingroup FMON @@ -2502,8 +2618,8 @@ struct mrq_fmon_request { struct cmd_fmon_gear_free_request fmon_gear_free; /** @private */ struct cmd_fmon_gear_get_request fmon_gear_get; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @ingroup FMON @@ -2527,8 +2643,8 @@ struct mrq_fmon_response { /** @private */ struct cmd_fmon_gear_free_response fmon_gear_free; struct cmd_fmon_gear_get_response fmon_gear_get; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** @} */ /** @endcond */ @@ -2553,13 +2669,27 @@ struct mrq_fmon_response { */ enum { /** + * @cond DEPRECATED * @brief Retrieve specified EC status. * * mrq_response::err is 0 if the operation was successful, or @n * -#BPMP_ENODEV if target EC is not owned by BPMP @n - * -#BPMP_EACCES if target EC power domain is turned off + * -#BPMP_EACCES if target EC power domain is turned off @n + * -#BPMP_EBADCMD if subcommand is not supported + * @endcond */ - CMD_EC_STATUS_GET = 1, + CMD_EC_STATUS_GET = 1, /* deprecated */ + + /** + * @brief Retrieve specified EC extended status (includes error + * counter and user values). + * + * mrq_response::err is 0 if the operation was successful, or @n + * -#BPMP_ENODEV if target EC is not owned by BPMP @n + * -#BPMP_EACCES if target EC power domain is turned off @n + * -#BPMP_EBADCMD if subcommand is not supported + */ + CMD_EC_STATUS_EX_GET = 2, CMD_EC_NUM, }; @@ -2615,13 +2745,13 @@ enum bpmp_ec_err_type { /** @brief SW Correctable error * - * Error descriptor @ref ec_err_simple_desc. + * Error descriptor @ref ec_err_sw_error_desc. */ EC_ERR_TYPE_SW_CORRECTABLE = 16, /** @brief SW Uncorrectable error * - * Error descriptor @ref ec_err_simple_desc. + * Error descriptor @ref ec_err_sw_error_desc. */ EC_ERR_TYPE_SW_UNCORRECTABLE = 17, @@ -2641,9 +2771,9 @@ enum bpmp_ec_err_type { /** @brief Group of registers with parity error. */ enum ec_registers_group { /** @brief Functional registers group */ - EC_ERR_GROUP_FUNC_REG = 0, + EC_ERR_GROUP_FUNC_REG = 0U, /** @brief SCR registers group */ - EC_ERR_GROUP_SCR_REG = 1, + EC_ERR_GROUP_SCR_REG = 1U, }; /** @@ -2652,11 +2782,11 @@ enum ec_registers_group { * @{ */ /** @brief No EC error found flag */ -#define EC_STATUS_FLAG_NO_ERROR 0x0001 +#define EC_STATUS_FLAG_NO_ERROR 0x0001U /** @brief Last EC error found flag */ -#define EC_STATUS_FLAG_LAST_ERROR 0x0002 +#define EC_STATUS_FLAG_LAST_ERROR 0x0002U /** @brief EC latent error flag */ -#define EC_STATUS_FLAG_LATENT_ERROR 0x0004 +#define EC_STATUS_FLAG_LATENT_ERROR 0x0004U /** @} */ /** @@ -2665,9 +2795,9 @@ enum ec_registers_group { * @{ */ /** @brief EC descriptor error resolved flag */ -#define EC_DESC_FLAG_RESOLVED 0x0001 +#define EC_DESC_FLAG_RESOLVED 0x0001U /** @brief EC descriptor failed to retrieve id flag */ -#define EC_DESC_FLAG_NO_ID 0x0002 +#define EC_DESC_FLAG_NO_ID 0x0002U /** @} */ /** @@ -2684,7 +2814,7 @@ struct ec_err_fmon_desc { uint32_t fmon_faults; /** @brief FMON faults access error */ int32_t fmon_access_error; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * |error type | vmon_adc_id values | @@ -2700,7 +2830,7 @@ struct ec_err_vmon_desc { uint32_t vmon_faults; /** @brief VMON faults access error */ int32_t vmon_access_error; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** * |error type | reg_id values | @@ -2714,7 +2844,22 @@ struct ec_err_reg_parity_desc { uint16_t reg_id; /** @brief Register group @ref ec_registers_group */ uint16_t reg_group; -} __ABI_PACKED; +} BPMP_ABI_PACKED; + +/** + * |error type | err_source_id values | + * |--------------------------------- |--------------------------| + * |@ref EC_ERR_TYPE_SW_CORRECTABLE | @ref bpmp_ec_ce_swd_ids | + * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE | @ref bpmp_ec_ue_swd_ids | + */ +struct ec_err_sw_error_desc { + /** @brief Bitmask of @ref bpmp_ec_desc_flags */ + uint16_t desc_flags; + /** @brief Error source id */ + uint16_t err_source_id; + /** @brief Sw error data */ + uint32_t sw_error_data; +} BPMP_ABI_PACKED; /** * |error type | err_source_id values | @@ -2724,34 +2869,36 @@ struct ec_err_reg_parity_desc { * |@ref EC_ERR_TYPE_ECC_DED_INTERNAL |@ref bpmp_ec_ipath_ids | * |@ref EC_ERR_TYPE_COMPARATOR |@ref bpmp_ec_comparator_ids| * |@ref EC_ERR_TYPE_PARITY_SRAM |@ref bpmp_clock_ids | - * |@ref EC_ERR_TYPE_SW_CORRECTABLE |@ref bpmp_ec_misc_ids | - * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE |@ref bpmp_ec_misc_ids | - * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_ids | - * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_ids | + * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_hwd_ids | + * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_hwd_ids | */ struct ec_err_simple_desc { /** @brief Bitmask of @ref bpmp_ec_desc_flags */ uint16_t desc_flags; /** @brief Error source id. Id space depends on error type. */ uint16_t err_source_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** @brief Union of EC error descriptors */ union ec_err_desc { struct ec_err_fmon_desc fmon_desc; struct ec_err_vmon_desc vmon_desc; struct ec_err_reg_parity_desc reg_parity_desc; + struct ec_err_sw_error_desc sw_error_desc; struct ec_err_simple_desc simple_desc; -} __ABI_PACKED; +} BPMP_ABI_PACKED; struct cmd_ec_status_get_request { /** @brief HSM error line number that identifies target EC. */ uint32_t ec_hsm_id; -} __ABI_PACKED; +} BPMP_ABI_PACKED; /** EC status maximum number of descriptors */ -#define EC_ERR_STATUS_DESC_MAX_NUM 4 +#define EC_ERR_STATUS_DESC_MAX_NUM 4U +/** + * @cond DEPRECATED + */ struct cmd_ec_status_get_response { /** @brief Target EC id (the same id received with request). */ uint32_t ec_hsm_id; @@ -2769,7 +2916,33 @@ struct cmd_ec_status_get_response { uint32_t error_desc_num; /** @brief EC error descriptors */ union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM]; -} __ABI_PACKED; +} BPMP_ABI_PACKED; +/** @endcond */ + +struct cmd_ec_status_ex_get_response { + /** @brief Target EC id (the same id received with request). */ + uint32_t ec_hsm_id; + /** + * @brief Bitmask of @ref bpmp_ec_status_flags + * + * If NO_ERROR flag is set, error_ fields should be ignored + */ + uint32_t ec_status_flags; + /** @brief Found EC error index. */ + uint32_t error_idx; + /** @brief Found EC error type @ref bpmp_ec_err_type. */ + uint32_t error_type; + /** @brief Found EC mission error counter value */ + uint32_t error_counter; + /** @brief Found EC mission error user value */ + uint32_t error_uval; + /** @brief Reserved entry */ + uint32_t reserved; + /** @brief Number of returned EC error descriptors */ + uint32_t error_desc_num; + /** @brief EC error descriptors */ + union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM]; +} BPMP_ABI_PACKED; /** * @ingroup EC @@ -2778,9 +2951,15 @@ struct cmd_ec_status_get_response { * Used by the sender of an #MRQ_EC message to access ECs owned * by BPMP. * + * @cond DEPRECATED * |sub-command |payload | * |----------------------------|-----------------------| * |@ref CMD_EC_STATUS_GET |ec_status_get | + * @endcond + * + * |sub-command |payload | + * |----------------------------|-----------------------| + * |@ref CMD_EC_STATUS_EX_GET |ec_status_get | * */ @@ -2790,8 +2969,8 @@ struct mrq_ec_request { union { struct cmd_ec_status_get_request ec_status_get; - } __UNION_ANON; -} __ABI_PACKED; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** * @ingroup EC @@ -2800,49 +2979,28 @@ struct mrq_ec_request { * Each sub-command supported by @ref mrq_ec_request may return * sub-command-specific data as indicated below. * + * @cond DEPRECATED * |sub-command |payload | * |----------------------------|------------------------| * |@ref CMD_EC_STATUS_GET |ec_status_get | + * @endcond + * + * |sub-command |payload | + * |----------------------------|------------------------| + * |@ref CMD_EC_STATUS_EX_GET |ec_status_ex_get | * */ struct mrq_ec_response { union { + /** + * @cond DEPRECATED + */ struct cmd_ec_status_get_response ec_status_get; - } __UNION_ANON; -} __ABI_PACKED; - -/** @} */ -/** @endcond */ - -/** - * @ingroup MRQ_Codes - * @def MRQ_FBVOLT_STATUS - * @brief Provides status information about voltage state for fuse burning - * - * * Platforms: T194 onwards - * @cond bpmp_t194 - * * Initiators: CCPLEX - * * Target: BPMP - * * Request Payload: None - * * Response Payload: @ref mrq_fbvolt_status_response - * @{ - */ - -/** - * @ingroup Fbvolt_status - * @brief Response to #MRQ_FBVOLT_STATUS - * - * Value of #ready reflects if core voltages are in a suitable state for buring - * fuses. A value of 0x1 indicates that core voltages are ready for burning - * fuses. A value of 0x0 indicates that core voltages are not ready. - */ -struct mrq_fbvolt_status_response { - /** @brief Bit [0:0] - ready status, bits [31:1] - reserved */ - uint32_t ready; - /** @brief Reserved */ - uint32_t unused; -} __ABI_PACKED; + /** @endcond */ + struct cmd_ec_status_ex_get_response ec_status_ex_get; + } BPMP_UNION_ANON; +} BPMP_ABI_PACKED; /** @} */ /** @endcond */ @@ -2855,6 +3013,8 @@ struct mrq_fbvolt_status_response { * @{ */ +/** @brief Operation not permitted */ +#define BPMP_EPERM 1 /** @brief No such file or directory */ #define BPMP_ENOENT 2 /** @brief No MRQ handler */ @@ -2863,12 +3023,16 @@ struct mrq_fbvolt_status_response { #define BPMP_EIO 5 /** @brief Bad sub-MRQ command */ #define BPMP_EBADCMD 6 +/** @brief Resource temporarily unavailable */ +#define BPMP_EAGAIN 11 /** @brief Not enough memory */ #define BPMP_ENOMEM 12 /** @brief Permission denied */ #define BPMP_EACCES 13 /** @brief Bad address */ #define BPMP_EFAULT 14 +/** @brief Resource busy */ +#define BPMP_EBUSY 16 /** @brief No such device */ #define BPMP_ENODEV 19 /** @brief Argument is a directory */ @@ -2880,10 +3044,18 @@ struct mrq_fbvolt_status_response { /** @brief Out of range */ #define BPMP_ERANGE 34 /** @brief Function not implemented */ -#define BPMP_ENOSYS 38 +#define BPMP_ENOSYS 38 /** @brief Invalid slot */ #define BPMP_EBADSLT 57 +/** @brief Not supported */ +#define BPMP_ENOTSUP 134 +/** @brief No such device or address */ +#define BPMP_ENXIO 140 /** @} */ +#if defined(BPMP_ABI_CHECKS) +#include "bpmp_abi_checks.h" +#endif + #endif -- cgit v1.2.3 From 4c5e2bba30e49b970a0fd07b43e0b7a3b5fd5ea7 Mon Sep 17 00:00:00 2001 From: Pratyush Yadav Date: Wed, 24 Jun 2020 00:00:14 +0530 Subject: spi: spi-mem: allow specifying whether an op is DTR or not Each phase is given a separate 'dtr' field so mixed protocols like 4S-4D-4D can be supported. Signed-off-by: Pratyush Yadav Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/20200623183030.26591-2-p.yadav@ti.com Signed-off-by: Mark Brown --- drivers/spi/spi-mem.c | 3 +++ include/linux/spi/spi-mem.h | 8 ++++++++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 9a86cc27fcc0..93e255287ab9 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -156,6 +156,9 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, op->data.dir == SPI_MEM_DATA_OUT)) return false; + if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) + return false; + return true; } EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index af9ff2f0f1b2..e3dcb956bf61 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -71,9 +71,11 @@ enum spi_mem_data_dir { * struct spi_mem_op - describes a SPI memory operation * @cmd.buswidth: number of IO lines used to transmit the command * @cmd.opcode: operation opcode + * @cmd.dtr: whether the command opcode should be sent in DTR mode or not * @addr.nbytes: number of address bytes to send. Can be zero if the operation * does not need to send an address * @addr.buswidth: number of IO lines used to transmit the address cycles + * @addr.dtr: whether the address should be sent in DTR mode or not * @addr.val: address value. This value is always sent MSB first on the bus. * Note that only @addr.nbytes are taken into account in this * address value, so users should make sure the value fits in the @@ -81,7 +83,9 @@ enum spi_mem_data_dir { * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can * be zero if the operation does not require dummy bytes * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes + * @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not * @data.buswidth: number of IO lanes used to send/receive the data + * @data.dtr: whether the data should be sent in DTR mode or not * @data.dir: direction of the transfer * @data.nbytes: number of data bytes to send/receive. Can be zero if the * operation does not involve transferring data @@ -91,22 +95,26 @@ enum spi_mem_data_dir { struct spi_mem_op { struct { u8 buswidth; + u8 dtr : 1; u8 opcode; } cmd; struct { u8 nbytes; u8 buswidth; + u8 dtr : 1; u64 val; } addr; struct { u8 nbytes; u8 buswidth; + u8 dtr : 1; } dummy; struct { u8 buswidth; + u8 dtr : 1; enum spi_mem_data_dir dir; unsigned int nbytes; union { -- cgit v1.2.3 From caf72df48be32c39f74287976ae843501ae06949 Mon Sep 17 00:00:00 2001 From: Pratyush Yadav Date: Wed, 24 Jun 2020 00:00:15 +0530 Subject: spi: spi-mem: allow specifying a command's extension In xSPI mode, flashes expect 2-byte opcodes. The second byte is called the "command extension". There can be 3 types of extensions in xSPI: repeat, invert, and hex. When the extension type is "repeat", the same opcode is sent twice. When it is "invert", the second byte is the inverse of the opcode. When it is "hex" an additional opcode byte based is sent with the command whose value can be anything. So, make opcode a 16-bit value and add a 'nbytes', similar to how multiple address widths are handled. Some places use sizeof(op->cmd.opcode). Replace them with op->cmd.nbytes The spi-mxic and spi-zynq-qspi drivers directly use op->cmd.opcode as a buffer. Now that opcode is a 2-byte field, this can result in different behaviour depending on if the machine is little endian or big endian. Extract the opcode in a local 1-byte variable and use that as the buffer instead. Both these drivers would reject multi-byte opcodes in their supports_op() hook anyway, so we only need to worry about single-byte opcodes for now. The above two changes are put in this commit to keep the series bisectable. Signed-off-by: Pratyush Yadav Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/20200623183030.26591-3-p.yadav@ti.com Signed-off-by: Mark Brown --- drivers/spi/spi-mem.c | 13 +++++++------ drivers/spi/spi-mtk-nor.c | 4 ++-- drivers/spi/spi-mxic.c | 3 ++- drivers/spi/spi-zynq-qspi.c | 11 ++++++----- include/linux/spi/spi-mem.h | 6 +++++- 5 files changed, 22 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 93e255287ab9..ef53290b7d24 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -159,6 +159,9 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) return false; + if (op->cmd.nbytes != 1) + return false; + return true; } EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); @@ -173,7 +176,7 @@ static bool spi_mem_buswidth_is_valid(u8 buswidth) static int spi_mem_check_op(const struct spi_mem_op *op) { - if (!op->cmd.buswidth) + if (!op->cmd.buswidth || !op->cmd.nbytes) return -EINVAL; if ((op->addr.nbytes && !op->addr.buswidth) || @@ -309,8 +312,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) return ret; } - tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + - op->dummy.nbytes; + tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; /* * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so @@ -325,7 +327,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) tmpbuf[0] = op->cmd.opcode; xfers[xferpos].tx_buf = tmpbuf; - xfers[xferpos].len = sizeof(op->cmd.opcode); + xfers[xferpos].len = op->cmd.nbytes; xfers[xferpos].tx_nbits = op->cmd.buswidth; spi_message_add_tail(&xfers[xferpos], &msg); xferpos++; @@ -427,8 +429,7 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) return ctlr->mem_ops->adjust_op_size(mem, op); if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { - len = sizeof(op->cmd.opcode) + op->addr.nbytes + - op->dummy.nbytes; + len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; if (len > spi_max_transfer_size(mem->spi)) return -EINVAL; diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c index 7bc302b50396..d5f393871619 100644 --- a/drivers/spi/spi-mtk-nor.c +++ b/drivers/spi/spi-mtk-nor.c @@ -195,7 +195,7 @@ static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) } } - len = MTK_NOR_PRG_MAX_SIZE - sizeof(op->cmd.opcode) - op->addr.nbytes - + len = MTK_NOR_PRG_MAX_SIZE - op->cmd.nbytes - op->addr.nbytes - op->dummy.nbytes; if (op->data.nbytes > len) op->data.nbytes = len; @@ -219,7 +219,7 @@ static bool mtk_nor_supports_op(struct spi_mem *mem, (op->dummy.buswidth == 0) && (op->data.buswidth == 1); } - len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; if ((len > MTK_NOR_PRG_MAX_SIZE) || ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE))) return false; diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 69491f3a515d..8c630acb0110 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -356,6 +356,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem, int nio = 1, i, ret; u32 ss_ctrl; u8 addr[8]; + u8 opcode = op->cmd.opcode; ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz); if (ret) @@ -393,7 +394,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem, writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT, mxic->regs + HC_CFG); - ret = mxic_spi_data_xfer(mxic, &op->cmd.opcode, NULL, 1); + ret = mxic_spi_data_xfer(mxic, &opcode, NULL, 1); if (ret) goto out; diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index 17641157354d..bbf3d90561f5 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -527,20 +527,21 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master); int err = 0, i; u8 *tmpbuf; + u8 opcode = op->cmd.opcode; dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, + opcode, op->cmd.buswidth, op->addr.buswidth, op->dummy.buswidth, op->data.buswidth); zynq_qspi_chipselect(mem->spi, true); zynq_qspi_config_op(xqspi, mem->spi); - if (op->cmd.opcode) { + if (op->cmd.nbytes) { reinit_completion(&xqspi->data_completion); - xqspi->txbuf = (u8 *)&op->cmd.opcode; + xqspi->txbuf = &opcode; xqspi->rxbuf = NULL; - xqspi->tx_bytes = sizeof(op->cmd.opcode); - xqspi->rx_bytes = sizeof(op->cmd.opcode); + xqspi->tx_bytes = op->cmd.nbytes; + xqspi->rx_bytes = op->cmd.nbytes; zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true); zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET, ZYNQ_QSPI_IXR_RXTX_MASK); diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index e3dcb956bf61..159463cc659c 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -17,6 +17,7 @@ { \ .buswidth = __buswidth, \ .opcode = __opcode, \ + .nbytes = 1, \ } #define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \ @@ -69,6 +70,8 @@ enum spi_mem_data_dir { /** * struct spi_mem_op - describes a SPI memory operation + * @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is + * sent MSB-first. * @cmd.buswidth: number of IO lines used to transmit the command * @cmd.opcode: operation opcode * @cmd.dtr: whether the command opcode should be sent in DTR mode or not @@ -94,9 +97,10 @@ enum spi_mem_data_dir { */ struct spi_mem_op { struct { + u8 nbytes; u8 buswidth; u8 dtr : 1; - u8 opcode; + u16 opcode; } cmd; struct { -- cgit v1.2.3 From 079ef53673f2e3b3ee1728800311f20f28eed4f7 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 14 Jul 2020 12:25:33 +0200 Subject: bpf: Fix build for disabled CONFIG_DEBUG_INFO_BTF option Stephen reported following linker warnings on powerpc build: ld: warning: orphan section `.BTF_ids' from `kernel/trace/bpf_trace.o' being placed in section `.BTF_ids' ld: warning: orphan section `.BTF_ids' from `kernel/bpf/btf.o' being placed in section `.BTF_ids' ld: warning: orphan section `.BTF_ids' from `kernel/bpf/stackmap.o' being placed in section `.BTF_ids' ld: warning: orphan section `.BTF_ids' from `net/core/filter.o' being placed in section `.BTF_ids' ld: warning: orphan section `.BTF_ids' from `kernel/trace/bpf_trace.o' being placed in section `.BTF_ids' It's because we generated .BTF_ids section even when CONFIG_DEBUG_INFO_BTF is not enabled. Fixing this by generating empty btf_id arrays for this case. Reported-by: Stephen Rothwell Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Geert Uytterhoeven Link: https://lore.kernel.org/bpf/20200714102534.299280-1-jolsa@kernel.org --- include/linux/btf_ids.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index fe019774f8a7..b3c73db9587c 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -3,6 +3,8 @@ #ifndef _LINUX_BTF_IDS_H #define _LINUX_BTF_IDS_H +#ifdef CONFIG_DEBUG_INFO_BTF + #include /* for __PASTE */ /* @@ -83,5 +85,12 @@ asm( \ ".zero 4 \n" \ ".popsection; \n"); +#else + +#define BTF_ID_LIST(name) static u32 name[5]; +#define BTF_ID(prefix, name) +#define BTF_ID_UNUSED + +#endif /* CONFIG_DEBUG_INFO_BTF */ #endif -- cgit v1.2.3 From 11bb2f7a45909f4f64afe471875672ae1b84a380 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 14 Jul 2020 12:25:34 +0200 Subject: bpf: Fix cross build for CONFIG_DEBUG_INFO_BTF option Stephen and 0-DAY CI Kernel Test Service reported broken cross build for arm (arm-linux-gnueabi-gcc (GCC) 9.3.0), with following output: /tmp/ccMS5uth.s: Assembler messages: /tmp/ccMS5uth.s:69: Error: unrecognized symbol type "" /tmp/ccMS5uth.s:82: Error: unrecognized symbol type "" Having '@object' for .type diretive is wrong because '@' is comment character for some architectures. Using STT_OBJECT instead that should work everywhere. Also using HOST* variables to build resolve_btfids so it's properly build in crossbuilds (stolen from objtool's Makefile). Reported-by: kernel test robot Reported-by: Stephen Rothwell Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Geert Uytterhoeven Link: https://lore.kernel.org/bpf/20200714102534.299280-2-jolsa@kernel.org --- include/linux/btf_ids.h | 2 +- tools/bpf/resolve_btfids/Makefile | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index b3c73db9587c..1cdb56950ffe 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -23,7 +23,7 @@ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ ".local " #symbol " ; \n" \ -".type " #symbol ", @object; \n" \ +".type " #symbol ", STT_OBJECT; \n" \ ".size " #symbol ", 4; \n" \ #symbol ": \n" \ ".zero 4 \n" \ diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index 948378ca73d4..a88cd4426398 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -16,6 +16,20 @@ else MAKEFLAGS=--no-print-directory endif +# always use the host compiler +ifneq ($(LLVM),) +HOSTAR ?= llvm-ar +HOSTCC ?= clang +HOSTLD ?= ld.lld +else +HOSTAR ?= ar +HOSTCC ?= gcc +HOSTLD ?= ld +endif +AR = $(HOSTAR) +CC = $(HOSTCC) +LD = $(HOSTLD) + OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/ LIBBPF_SRC := $(srctree)/tools/lib/bpf/ -- cgit v1.2.3 From 59679d9933ab897d197297eaf37e5b3788c052a5 Mon Sep 17 00:00:00 2001 From: He Zhe Date: Mon, 6 Jul 2020 17:52:24 +0800 Subject: freezer: Add unsafe version of freezable_schedule_timeout_interruptible() for NFS commit 0688e64bc600 ("NFS: Allow signal interruption of NFS4ERR_DELAYed operations") introduces nfs4_delay_interruptible which also needs an _unsafe version to avoid the following call trace for the same reason explained in commit 416ad3c9c006 ("freezer: add unsafe versions of freezable helpers for NFS") CPU: 4 PID: 3968 Comm: rm Tainted: G W 5.8.0-rc4 #1 Hardware name: Marvell OcteonTX CN96XX board (DT) Call trace: dump_backtrace+0x0/0x1dc show_stack+0x20/0x30 dump_stack+0xdc/0x150 debug_check_no_locks_held+0x98/0xa0 nfs4_delay_interruptible+0xd8/0x120 nfs4_handle_exception+0x130/0x170 nfs4_proc_rmdir+0x8c/0x220 nfs_rmdir+0xa4/0x360 vfs_rmdir.part.0+0x6c/0x1b0 do_rmdir+0x18c/0x210 __arm64_sys_unlinkat+0x64/0x7c el0_svc_common.constprop.0+0x7c/0x110 do_el0_svc+0x24/0xa0 el0_sync_handler+0x13c/0x1b8 el0_sync+0x158/0x180 Signed-off-by: He Zhe Signed-off-by: Rafael J. Wysocki --- fs/nfs/nfs4proc.c | 2 +- include/linux/freezer.h | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e32717fd1169..15ecfa474e37 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -414,7 +414,7 @@ static int nfs4_delay_interruptible(long *timeout) { might_sleep(); - freezable_schedule_timeout_interruptible(nfs4_update_delay(timeout)); + freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout)); if (!signal_pending(current)) return 0; return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 21f5aa0b217f..27828145ca09 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -207,6 +207,17 @@ static inline long freezable_schedule_timeout_interruptible(long timeout) return __retval; } +/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ +static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout) +{ + long __retval; + + freezer_do_not_count(); + __retval = schedule_timeout_interruptible(timeout); + freezer_count_unsafe(); + return __retval; +} + /* Like schedule_timeout_killable(), but should not block the freezer. */ static inline long freezable_schedule_timeout_killable(long timeout) { @@ -285,6 +296,9 @@ static inline void set_freezable(void) {} #define freezable_schedule_timeout_interruptible(timeout) \ schedule_timeout_interruptible(timeout) +#define freezable_schedule_timeout_interruptible_unsafe(timeout) \ + schedule_timeout_interruptible(timeout) + #define freezable_schedule_timeout_killable(timeout) \ schedule_timeout_killable(timeout) -- cgit v1.2.3 From cf7c52748f64606f5f9111e7cbdb2ffb281a60af Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:47 +0200 Subject: switchdev: mrp: Extend switchdev API for MRP Interconnect Extend switchdev API to add support for MRP interconnect. The HW is notified in the following cases: SWITCHDEV_OBJ_ID_IN_ROLE_MRP: This is used when the interconnect role of the node changes. The supported roles are MIM and MIC. SWITCHDEV_OBJ_ID_IN_STATE_MRP: This is used when the interconnect ring changes it states to open or closed. SWITCHDEV_OBJ_ID_IN_TEST_MRP: This is used to start/stop sending MRP_InTest frames on all MRP ports. This is called only on nodes that have the interconnect role MIM. Signed-off-by: Horatiu Vultur Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/net/switchdev.h | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'include') diff --git a/include/net/switchdev.h b/include/net/switchdev.h index b8c059b4e06d..ff2246914301 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -76,6 +76,10 @@ enum switchdev_obj_id { SWITCHDEV_OBJ_ID_RING_TEST_MRP, SWITCHDEV_OBJ_ID_RING_ROLE_MRP, SWITCHDEV_OBJ_ID_RING_STATE_MRP, + SWITCHDEV_OBJ_ID_IN_TEST_MRP, + SWITCHDEV_OBJ_ID_IN_ROLE_MRP, + SWITCHDEV_OBJ_ID_IN_STATE_MRP, + #endif }; @@ -155,6 +159,40 @@ struct switchdev_obj_ring_state_mrp { #define SWITCHDEV_OBJ_RING_STATE_MRP(OBJ) \ container_of((OBJ), struct switchdev_obj_ring_state_mrp, obj) +/* SWITCHDEV_OBJ_ID_IN_TEST_MRP */ +struct switchdev_obj_in_test_mrp { + struct switchdev_obj obj; + /* The value is in us and a value of 0 represents to stop */ + u32 interval; + u32 in_id; + u32 period; + u8 max_miss; +}; + +#define SWITCHDEV_OBJ_IN_TEST_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_in_test_mrp, obj) + +/* SWICHDEV_OBJ_ID_IN_ROLE_MRP */ +struct switchdev_obj_in_role_mrp { + struct switchdev_obj obj; + struct net_device *i_port; + u32 ring_id; + u16 in_id; + u8 in_role; +}; + +#define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_in_role_mrp, obj) + +struct switchdev_obj_in_state_mrp { + struct switchdev_obj obj; + u32 in_id; + u8 in_state; +}; + +#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_in_state_mrp, obj) + #endif typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); -- cgit v1.2.3 From 2801758391ba6b0c20e253b956355e1b15ad85a2 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:48 +0200 Subject: bridge: uapi: mrp: Extend MRP attributes for MRP interconnect Extend the existing MRP netlink attributes to allow to configure MRP Interconnect: IFLA_BRIDGE_MRP_IN_ROLE - the parameter type is br_mrp_in_role which contains the interconnect id, the ring id, the interconnect role(MIM or MIC) and the port ifindex that represents the interconnect port. IFLA_BRIDGE_MRP_IN_STATE - the parameter type is br_mrp_in_state which contains the interconnect id and the interconnect state. IFLA_BRIDGE_MRP_IN_TEST - the parameter type is br_mrp_start_in_test which contains the interconnect id, the interval at which to send MRP_InTest frames, how many test frames can be missed before declaring the interconnect ring open and the period which represents for how long to send MRP_InTest frames. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_bridge.h | 53 +++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/mrp_bridge.h | 38 +++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index c114c1c2bd53..d840a3e37a37 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -167,6 +167,9 @@ enum { IFLA_BRIDGE_MRP_RING_ROLE, IFLA_BRIDGE_MRP_START_TEST, IFLA_BRIDGE_MRP_INFO, + IFLA_BRIDGE_MRP_IN_ROLE, + IFLA_BRIDGE_MRP_IN_STATE, + IFLA_BRIDGE_MRP_START_IN_TEST, __IFLA_BRIDGE_MRP_MAX, }; @@ -245,6 +248,37 @@ enum { #define IFLA_BRIDGE_MRP_INFO_MAX (__IFLA_BRIDGE_MRP_INFO_MAX - 1) +enum { + IFLA_BRIDGE_MRP_IN_STATE_UNSPEC, + IFLA_BRIDGE_MRP_IN_STATE_IN_ID, + IFLA_BRIDGE_MRP_IN_STATE_STATE, + __IFLA_BRIDGE_MRP_IN_STATE_MAX, +}; + +#define IFLA_BRIDGE_MRP_IN_STATE_MAX (__IFLA_BRIDGE_MRP_IN_STATE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC, + IFLA_BRIDGE_MRP_IN_ROLE_RING_ID, + IFLA_BRIDGE_MRP_IN_ROLE_IN_ID, + IFLA_BRIDGE_MRP_IN_ROLE_ROLE, + IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX, + __IFLA_BRIDGE_MRP_IN_ROLE_MAX, +}; + +#define IFLA_BRIDGE_MRP_IN_ROLE_MAX (__IFLA_BRIDGE_MRP_IN_ROLE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC, + IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID, + IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL, + IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS, + IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD, + __IFLA_BRIDGE_MRP_START_IN_TEST_MAX, +}; + +#define IFLA_BRIDGE_MRP_START_IN_TEST_MAX (__IFLA_BRIDGE_MRP_START_IN_TEST_MAX - 1) + struct br_mrp_instance { __u32 ring_id; __u32 p_ifindex; @@ -270,6 +304,25 @@ struct br_mrp_start_test { __u32 monitor; }; +struct br_mrp_in_state { + __u32 in_state; + __u16 in_id; +}; + +struct br_mrp_in_role { + __u32 ring_id; + __u32 in_role; + __u32 i_ifindex; + __u16 in_id; +}; + +struct br_mrp_start_in_test { + __u32 interval; + __u32 max_miss; + __u32 period; + __u16 in_id; +}; + struct bridge_stp_xstats { __u64 transition_blk; __u64 transition_fwd; diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h index bee366540212..6aeb13ef0b1e 100644 --- a/include/uapi/linux/mrp_bridge.h +++ b/include/uapi/linux/mrp_bridge.h @@ -21,11 +21,22 @@ enum br_mrp_ring_role_type { BR_MRP_RING_ROLE_MRA, }; +enum br_mrp_in_role_type { + BR_MRP_IN_ROLE_DISABLED, + BR_MRP_IN_ROLE_MIC, + BR_MRP_IN_ROLE_MIM, +}; + enum br_mrp_ring_state_type { BR_MRP_RING_STATE_OPEN, BR_MRP_RING_STATE_CLOSED, }; +enum br_mrp_in_state_type { + BR_MRP_IN_STATE_OPEN, + BR_MRP_IN_STATE_CLOSED, +}; + enum br_mrp_port_state_type { BR_MRP_PORT_STATE_DISABLED, BR_MRP_PORT_STATE_BLOCKED, @@ -36,6 +47,7 @@ enum br_mrp_port_state_type { enum br_mrp_port_role_type { BR_MRP_PORT_ROLE_PRIMARY, BR_MRP_PORT_ROLE_SECONDARY, + BR_MRP_PORT_ROLE_INTER, }; enum br_mrp_tlv_header_type { @@ -45,6 +57,10 @@ enum br_mrp_tlv_header_type { BR_MRP_TLV_HEADER_RING_TOPO = 0x3, BR_MRP_TLV_HEADER_RING_LINK_DOWN = 0x4, BR_MRP_TLV_HEADER_RING_LINK_UP = 0x5, + BR_MRP_TLV_HEADER_IN_TEST = 0x6, + BR_MRP_TLV_HEADER_IN_TOPO = 0x7, + BR_MRP_TLV_HEADER_IN_LINK_DOWN = 0x8, + BR_MRP_TLV_HEADER_IN_LINK_UP = 0x9, BR_MRP_TLV_HEADER_OPTION = 0x7f, }; @@ -118,4 +134,26 @@ struct br_mrp_oui_hdr { __u8 oui[MRP_OUI_LENGTH]; }; +struct br_mrp_in_test_hdr { + __be16 id; + __u8 sa[ETH_ALEN]; + __be16 port_role; + __be16 state; + __be16 transitions; + __be32 timestamp; +}; + +struct br_mrp_in_topo_hdr { + __u8 sa[ETH_ALEN]; + __be16 id; + __be16 interval; +}; + +struct br_mrp_in_link_hdr { + __u8 sa[ETH_ALEN]; + __be16 port_role; + __be16 id; + __be16 interval; +}; + #endif -- cgit v1.2.3 From 43364ef1a12a4236b7956b076649ddd080764cd1 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:49 +0200 Subject: bridge: mrp: Extend bridge interface This patch adds a new flag(BR_MRP_LOST_IN_CONT) to the net bridge ports. This bit will be set when the port lost the continuity of MRP_InTest frames. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/linux/if_bridge.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index b3a8d3054af0..6479a38e52fa 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -49,6 +49,7 @@ struct br_ip_list { #define BR_ISOLATED BIT(16) #define BR_MRP_AWARE BIT(17) #define BR_MRP_LOST_CONT BIT(18) +#define BR_MRP_LOST_IN_CONT BIT(19) #define BR_DEFAULT_AGEING_TIME (300 * HZ) -- cgit v1.2.3 From 559139cb0405d38816e5e725adee9000db993235 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:56 +0200 Subject: bridge: uapi: mrp: Extend MRP_INFO attributes for interconnect status Extend the existing MRP_INFO to return status of MRP interconnect. In case there is no MRP interconnect on the node then the role will be disabled so the other attributes can be ignored. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_bridge.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index d840a3e37a37..c1227aecd38f 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -243,6 +243,11 @@ enum { IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL, IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS, IFLA_BRIDGE_MRP_INFO_TEST_MONITOR, + IFLA_BRIDGE_MRP_INFO_I_IFINDEX, + IFLA_BRIDGE_MRP_INFO_IN_STATE, + IFLA_BRIDGE_MRP_INFO_IN_ROLE, + IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL, + IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS, __IFLA_BRIDGE_MRP_INFO_MAX, }; -- cgit v1.2.3 From ffb3adba64801f70c472303c9e386eb5eaec193d Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:58 +0200 Subject: net: bridge: Add port attribute IFLA_BRPORT_MRP_IN_OPEN This patch adds a new port attribute, IFLA_BRPORT_MRP_IN_OPEN, which allows to notify the userspace when the node lost the contiuity of MRP_InTest frames. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 1 + net/bridge/br_netlink.c | 3 +++ tools/include/uapi/linux/if_link.h | 1 + 3 files changed, 5 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cc185a007ade..26842ffd0501 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -344,6 +344,7 @@ enum { IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, + IFLA_BRPORT_MRP_IN_OPEN, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index c532fa65c983..147d52596e17 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -152,6 +152,7 @@ static inline size_t br_port_info_size(void) #endif + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_RING_OPEN */ + + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_IN_OPEN */ + 0; } @@ -216,6 +217,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, !!(p->flags & BR_NEIGH_SUPPRESS)) || nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags & BR_MRP_LOST_CONT)) || + nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN, + !!(p->flags & BR_MRP_LOST_IN_CONT)) || nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED))) return -EMSGSIZE; diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index cafedbbfefbe..781e482dc499 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -344,6 +344,7 @@ enum { IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, + IFLA_BRPORT_MRP_IN_OPEN, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) -- cgit v1.2.3 From 7cf97b12545503992020796c74bd84078eb39299 Mon Sep 17 00:00:00 2001 From: Sargun Dhillon Date: Tue, 2 Jun 2020 18:10:43 -0700 Subject: seccomp: Introduce addfd ioctl to seccomp user notifier The current SECCOMP_RET_USER_NOTIF API allows for syscall supervision over an fd. It is often used in settings where a supervising task emulates syscalls on behalf of a supervised task in userspace, either to further restrict the supervisee's syscall abilities or to circumvent kernel enforced restrictions the supervisor deems safe to lift (e.g. actually performing a mount(2) for an unprivileged container). While SECCOMP_RET_USER_NOTIF allows for the interception of any syscall, only a certain subset of syscalls could be correctly emulated. Over the last few development cycles, the set of syscalls which can't be emulated has been reduced due to the addition of pidfd_getfd(2). With this we are now able to, for example, intercept syscalls that require the supervisor to operate on file descriptors of the supervisee such as connect(2). However, syscalls that cause new file descriptors to be installed can not currently be correctly emulated since there is no way for the supervisor to inject file descriptors into the supervisee. This patch adds a new addfd ioctl to remove this restriction by allowing the supervisor to install file descriptors into the intercepted task. By implementing this feature via seccomp the supervisor effectively instructs the supervisee to install a set of file descriptors into its own file descriptor table during the intercepted syscall. This way it is possible to intercept syscalls such as open() or accept(), and install (or replace, like dup2(2)) the supervisor's resulting fd into the supervisee. One replacement use-case would be to redirect the stdout and stderr of a supervisee into log file descriptors opened by the supervisor. The ioctl handling is based on the discussions[1] of how Extensible Arguments should interact with ioctls. Instead of building size into the addfd structure, make it a function of the ioctl command (which is how sizes are normally passed to ioctls). To support forward and backward compatibility, just mask out the direction and size, and match everything. The size (and any future direction) checks are done along with copy_struct_from_user() logic. As a note, the seccomp_notif_addfd structure is laid out based on 8-byte alignment without requiring packing as there have been packing issues with uapi highlighted before[2][3]. Although we could overload the newfd field and use -1 to indicate that it is not to be used, doing so requires changing the size of the fd field, and introduces struct packing complexity. [1]: https://lore.kernel.org/lkml/87o8w9bcaf.fsf@mid.deneb.enyo.de/ [2]: https://lore.kernel.org/lkml/a328b91d-fd8f-4f27-b3c2-91a9c45f18c0@rasmusvillemoes.dk/ [3]: https://lore.kernel.org/lkml/20200612104629.GA15814@ircssh-2.c.rugged-nimbus-611.internal Cc: Christoph Hellwig Cc: Christian Brauner Cc: Tycho Andersen Cc: Jann Horn Cc: Robert Sesek Cc: Chris Palmer Cc: Al Viro Cc: linux-fsdevel@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-api@vger.kernel.org Suggested-by: Matt Denton Link: https://lore.kernel.org/r/20200603011044.7972-4-sargun@sargun.me Signed-off-by: Sargun Dhillon Reviewed-by: Will Drewry Co-developed-by: Kees Cook Signed-off-by: Kees Cook --- include/linux/seccomp.h | 4 + include/uapi/linux/seccomp.h | 22 ++++++ kernel/seccomp.c | 175 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 199 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index babcd6c02d09..881c90b6aa25 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -10,6 +10,10 @@ SECCOMP_FILTER_FLAG_NEW_LISTENER | \ SECCOMP_FILTER_FLAG_TSYNC_ESRCH) +/* sizeof() the first published struct seccomp_notif_addfd */ +#define SECCOMP_NOTIFY_ADDFD_SIZE_VER0 24 +#define SECCOMP_NOTIFY_ADDFD_SIZE_LATEST SECCOMP_NOTIFY_ADDFD_SIZE_VER0 + #ifdef CONFIG_SECCOMP #include diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 965290f7dcc2..6ba18b82a02e 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -113,6 +113,25 @@ struct seccomp_notif_resp { __u32 flags; }; +/* valid flags for seccomp_notif_addfd */ +#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */ + +/** + * struct seccomp_notif_addfd + * @id: The ID of the seccomp notification + * @flags: SECCOMP_ADDFD_FLAG_* + * @srcfd: The local fd number + * @newfd: Optional remote FD number if SETFD option is set, otherwise 0. + * @newfd_flags: The O_* flags the remote FD should have applied + */ +struct seccomp_notif_addfd { + __u64 id; + __u32 flags; + __u32 srcfd; + __u32 newfd; + __u32 newfd_flags; +}; + #define SECCOMP_IOC_MAGIC '!' #define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr) #define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type) @@ -124,5 +143,8 @@ struct seccomp_notif_resp { #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ struct seccomp_notif_resp) #define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) +/* On success, the return value is the remote process's added fd number */ +#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \ + struct seccomp_notif_addfd) #endif /* _UAPI_LINUX_SECCOMP_H */ diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 866a432cd746..3ee59ce0a323 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -87,10 +87,42 @@ struct seccomp_knotif { long val; u32 flags; - /* Signals when this has entered SECCOMP_NOTIFY_REPLIED */ + /* + * Signals when this has changed states, such as the listener + * dying, a new seccomp addfd message, or changing to REPLIED + */ struct completion ready; struct list_head list; + + /* outstanding addfd requests */ + struct list_head addfd; +}; + +/** + * struct seccomp_kaddfd - container for seccomp_addfd ioctl messages + * + * @file: A reference to the file to install in the other task + * @fd: The fd number to install it at. If the fd number is -1, it means the + * installing process should allocate the fd as normal. + * @flags: The flags for the new file descriptor. At the moment, only O_CLOEXEC + * is allowed. + * @ret: The return value of the installing process. It is set to the fd num + * upon success (>= 0). + * @completion: Indicates that the installing process has completed fd + * installation, or gone away (either due to successful + * reply, or signal) + * + */ +struct seccomp_kaddfd { + struct file *file; + int fd; + unsigned int flags; + + /* To only be set on reply */ + int ret; + struct completion completion; + struct list_head list; }; /** @@ -793,6 +825,17 @@ static u64 seccomp_next_notify_id(struct seccomp_filter *filter) return filter->notif->next_id++; } +static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd) +{ + /* + * Remove the notification, and reset the list pointers, indicating + * that it has been handled. + */ + list_del_init(&addfd->list); + addfd->ret = receive_fd_replace(addfd->fd, addfd->file, addfd->flags); + complete(&addfd->completion); +} + static int seccomp_do_user_notification(int this_syscall, struct seccomp_filter *match, const struct seccomp_data *sd) @@ -801,6 +844,7 @@ static int seccomp_do_user_notification(int this_syscall, u32 flags = 0; long ret = 0; struct seccomp_knotif n = {}; + struct seccomp_kaddfd *addfd, *tmp; mutex_lock(&match->notify_lock); err = -ENOSYS; @@ -813,6 +857,7 @@ static int seccomp_do_user_notification(int this_syscall, n.id = seccomp_next_notify_id(match); init_completion(&n.ready); list_add(&n.list, &match->notif->notifications); + INIT_LIST_HEAD(&n.addfd); up(&match->notif->request); wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM); @@ -821,17 +866,34 @@ static int seccomp_do_user_notification(int this_syscall, /* * This is where we wait for a reply from userspace. */ +wait: err = wait_for_completion_interruptible(&n.ready); mutex_lock(&match->notify_lock); if (err == 0) { + /* Check if we were woken up by a addfd message */ + addfd = list_first_entry_or_null(&n.addfd, + struct seccomp_kaddfd, list); + if (addfd && n.state != SECCOMP_NOTIFY_REPLIED) { + seccomp_handle_addfd(addfd); + mutex_unlock(&match->notify_lock); + goto wait; + } ret = n.val; err = n.error; flags = n.flags; } + /* If there were any pending addfd calls, clear them out */ + list_for_each_entry_safe(addfd, tmp, &n.addfd, list) { + /* The process went away before we got a chance to handle it */ + addfd->ret = -ESRCH; + list_del_init(&addfd->list); + complete(&addfd->completion); + } + /* * Note that it's possible the listener died in between the time when - * we were notified of a respons (or a signal) and when we were able to + * we were notified of a response (or a signal) and when we were able to * re-acquire the lock, so only delete from the list if the * notification actually exists. * @@ -1069,6 +1131,11 @@ static int seccomp_notify_release(struct inode *inode, struct file *file) knotif->error = -ENOSYS; knotif->val = 0; + /* + * We do not need to wake up any pending addfd messages, as + * the notifier will do that for us, as this just looks + * like a standard reply. + */ complete(&knotif->ready); } @@ -1233,12 +1300,109 @@ static long seccomp_notify_id_valid(struct seccomp_filter *filter, return ret; } +static long seccomp_notify_addfd(struct seccomp_filter *filter, + struct seccomp_notif_addfd __user *uaddfd, + unsigned int size) +{ + struct seccomp_notif_addfd addfd; + struct seccomp_knotif *knotif; + struct seccomp_kaddfd kaddfd; + int ret; + + BUILD_BUG_ON(sizeof(addfd) < SECCOMP_NOTIFY_ADDFD_SIZE_VER0); + BUILD_BUG_ON(sizeof(addfd) != SECCOMP_NOTIFY_ADDFD_SIZE_LATEST); + + if (size < SECCOMP_NOTIFY_ADDFD_SIZE_VER0 || size >= PAGE_SIZE) + return -EINVAL; + + ret = copy_struct_from_user(&addfd, sizeof(addfd), uaddfd, size); + if (ret) + return ret; + + if (addfd.newfd_flags & ~O_CLOEXEC) + return -EINVAL; + + if (addfd.flags & ~SECCOMP_ADDFD_FLAG_SETFD) + return -EINVAL; + + if (addfd.newfd && !(addfd.flags & SECCOMP_ADDFD_FLAG_SETFD)) + return -EINVAL; + + kaddfd.file = fget(addfd.srcfd); + if (!kaddfd.file) + return -EBADF; + + kaddfd.flags = addfd.newfd_flags; + kaddfd.fd = (addfd.flags & SECCOMP_ADDFD_FLAG_SETFD) ? + addfd.newfd : -1; + init_completion(&kaddfd.completion); + + ret = mutex_lock_interruptible(&filter->notify_lock); + if (ret < 0) + goto out; + + knotif = find_notification(filter, addfd.id); + if (!knotif) { + ret = -ENOENT; + goto out_unlock; + } + + /* + * We do not want to allow for FD injection to occur before the + * notification has been picked up by a userspace handler, or after + * the notification has been replied to. + */ + if (knotif->state != SECCOMP_NOTIFY_SENT) { + ret = -EINPROGRESS; + goto out_unlock; + } + + list_add(&kaddfd.list, &knotif->addfd); + complete(&knotif->ready); + mutex_unlock(&filter->notify_lock); + + /* Now we wait for it to be processed or be interrupted */ + ret = wait_for_completion_interruptible(&kaddfd.completion); + if (ret == 0) { + /* + * We had a successful completion. The other side has already + * removed us from the addfd queue, and + * wait_for_completion_interruptible has a memory barrier upon + * success that lets us read this value directly without + * locking. + */ + ret = kaddfd.ret; + goto out; + } + + mutex_lock(&filter->notify_lock); + /* + * Even though we were woken up by a signal and not a successful + * completion, a completion may have happened in the mean time. + * + * We need to check again if the addfd request has been handled, + * and if not, we will remove it from the queue. + */ + if (list_empty(&kaddfd.list)) + ret = kaddfd.ret; + else + list_del(&kaddfd.list); + +out_unlock: + mutex_unlock(&filter->notify_lock); +out: + fput(kaddfd.file); + + return ret; +} + static long seccomp_notify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct seccomp_filter *filter = file->private_data; void __user *buf = (void __user *)arg; + /* Fixed-size ioctls */ switch (cmd) { case SECCOMP_IOCTL_NOTIF_RECV: return seccomp_notify_recv(filter, buf); @@ -1247,6 +1411,13 @@ static long seccomp_notify_ioctl(struct file *file, unsigned int cmd, case SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR: case SECCOMP_IOCTL_NOTIF_ID_VALID: return seccomp_notify_id_valid(filter, buf); + } + + /* Extensible Argument ioctls */ +#define EA_IOCTL(cmd) ((cmd) & ~(IOC_INOUT | IOCSIZE_MASK)) + switch (EA_IOCTL(cmd)) { + case EA_IOCTL(SECCOMP_IOCTL_NOTIF_ADDFD): + return seccomp_notify_addfd(filter, buf, _IOC_SIZE(cmd)); default: return -EINVAL; } -- cgit v1.2.3 From 87db7579ebd5ded337056eb765542eb2608f16e3 Mon Sep 17 00:00:00 2001 From: Philippe Bergheaud Date: Fri, 19 Jun 2020 16:04:39 +0200 Subject: ocxl: control via sysfs whether the FPGA is reloaded on a link reset Some opencapi FPGA images allow to control if the FPGA should be reloaded on the next adapter reset. If it is supported, the image specifies it through a Vendor Specific DVSEC in the config space of function 0. Signed-off-by: Philippe Bergheaud Signed-off-by: Frederic Barrat Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200619140439.153962-1-fbarrat@linux.ibm.com --- Documentation/ABI/testing/sysfs-class-ocxl | 11 ++++ drivers/misc/ocxl/config.c | 81 ++++++++++++++++++++++++++++-- drivers/misc/ocxl/ocxl_internal.h | 6 +++ drivers/misc/ocxl/sysfs.c | 35 +++++++++++++ include/misc/ocxl-config.h | 1 + 5 files changed, 129 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-class-ocxl b/Documentation/ABI/testing/sysfs-class-ocxl index b5b1fa197592..ae1276efa45a 100644 --- a/Documentation/ABI/testing/sysfs-class-ocxl +++ b/Documentation/ABI/testing/sysfs-class-ocxl @@ -33,3 +33,14 @@ Date: January 2018 Contact: linuxppc-dev@lists.ozlabs.org Description: read/write Give access the global mmio area for the AFU + +What: /sys/class/ocxl//reload_on_reset +Date: February 2020 +Contact: linuxppc-dev@lists.ozlabs.org +Description: read/write + Control whether the FPGA is reloaded on a link reset. Enabled + through a vendor-specific logic block on the FPGA. + 0 Do not reload FPGA image from flash + 1 Reload FPGA image from flash + unavailable + The device does not support this capability diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index c8e19bfb5ef9..42f7a1298775 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -71,6 +71,20 @@ static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx) return 0; } +/** + * get_function_0() - Find a related PCI device (function 0) + * @device: PCI device to match + * + * Returns a pointer to the related device, or null if not found + */ +static struct pci_dev *get_function_0(struct pci_dev *dev) +{ + unsigned int devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); + + return pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), + dev->bus->number, devfn); +} + static void read_pasid(struct pci_dev *dev, struct ocxl_fn_config *fn) { u16 val; @@ -159,14 +173,15 @@ static int read_dvsec_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn) static int read_dvsec_vendor(struct pci_dev *dev) { int pos; - u32 cfg, tlx, dlx; + u32 cfg, tlx, dlx, reset_reload; /* - * vendor specific DVSEC is optional + * vendor specific DVSEC, for IBM images only. Some older + * images may not have it * - * It's currently only used on function 0 to specify the - * version of some logic blocks. Some older images may not - * even have it so we ignore any errors + * It's only used on function 0 to specify the version of some + * logic blocks and to give access to special registers to + * enable host-based flashing. */ if (PCI_FUNC(dev->devfn) != 0) return 0; @@ -178,11 +193,67 @@ static int read_dvsec_vendor(struct pci_dev *dev) pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_CFG_VERS, &cfg); pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_TLX_VERS, &tlx); pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_DLX_VERS, &dlx); + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); dev_dbg(&dev->dev, "Vendor specific DVSEC:\n"); dev_dbg(&dev->dev, " CFG version = 0x%x\n", cfg); dev_dbg(&dev->dev, " TLX version = 0x%x\n", tlx); dev_dbg(&dev->dev, " DLX version = 0x%x\n", dlx); + dev_dbg(&dev->dev, " ResetReload = 0x%x\n", reset_reload); + return 0; +} + +static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, + int *out_pos) +{ + int pos; + + if (PCI_FUNC(dev->devfn) != 0) { + dev = get_function_0(dev); + if (!dev) + return -1; + } + pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); + if (!pos) + return -1; + *dev0 = dev; + *out_pos = pos; + return 0; +} + +int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val) +{ + struct pci_dev *dev0; + u32 reset_reload; + int pos; + + if (get_dvsec_vendor0(dev, &dev0, &pos)) + return -1; + + pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + *val = !!(reset_reload & BIT(0)); + return 0; +} + +int ocxl_config_set_reset_reload(struct pci_dev *dev, int val) +{ + struct pci_dev *dev0; + u32 reset_reload; + int pos; + + if (get_dvsec_vendor0(dev, &dev0, &pos)) + return -1; + + pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + if (val) + reset_reload |= BIT(0); + else + reset_reload &= ~BIT(0); + pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + reset_reload); return 0; } diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h index 345bf843a38e..af9a84aeee6f 100644 --- a/drivers/misc/ocxl/ocxl_internal.h +++ b/drivers/misc/ocxl/ocxl_internal.h @@ -112,6 +112,12 @@ void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size); */ int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count); +/* + * Control whether the FPGA is reloaded on a link reset + */ +int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val); +int ocxl_config_set_reset_reload(struct pci_dev *dev, int val); + /* * Check if an AFU index is valid for the given function. * diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c index 58f1ba264206..25c78df8055d 100644 --- a/drivers/misc/ocxl/sysfs.c +++ b/drivers/misc/ocxl/sysfs.c @@ -51,11 +51,46 @@ static ssize_t contexts_show(struct device *device, afu->pasid_count, afu->pasid_max); } +static ssize_t reload_on_reset_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + int val; + + if (ocxl_config_get_reset_reload(pci_dev, &val)) + return scnprintf(buf, PAGE_SIZE, "unavailable\n"); + + return scnprintf(buf, PAGE_SIZE, "%d\n", val); +} + +static ssize_t reload_on_reset_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ocxl_afu *afu = to_afu(device); + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + int rc, val; + + rc = kstrtoint(buf, 0, &val); + if (rc || (val != 0 && val != 1)) + return -EINVAL; + + if (ocxl_config_set_reset_reload(pci_dev, val)) + return -ENODEV; + + return count; +} + static struct device_attribute afu_attrs[] = { __ATTR_RO(global_mmio_size), __ATTR_RO(pp_mmio_size), __ATTR_RO(afu_version), __ATTR_RO(contexts), + __ATTR_RW(reload_on_reset), }; static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj, diff --git a/include/misc/ocxl-config.h b/include/misc/ocxl-config.h index 3526fa996a22..ccfd3b463517 100644 --- a/include/misc/ocxl-config.h +++ b/include/misc/ocxl-config.h @@ -41,5 +41,6 @@ #define OCXL_DVSEC_VENDOR_CFG_VERS 0x0C #define OCXL_DVSEC_VENDOR_TLX_VERS 0x10 #define OCXL_DVSEC_VENDOR_DLX_VERS 0x20 +#define OCXL_DVSEC_VENDOR_RESET_RELOAD 0x38 #endif /* _OCXL_CONFIG_H_ */ -- cgit v1.2.3 From 4550569bd779f25398503ad5556f8dc7c1f216c2 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Wed, 1 Jul 2020 02:43:53 +0800 Subject: soundwire: stream: add helper to startup/shutdown streams To handle streams at the dailink level, expose two helpers that will be called from machine drivers. Reviewed-by: Ranjani Sridharan Reviewed-by: Guennadi Liakhovetski Reviewed-by: Kai Vehmanen Signed-off-by: Pierre-Louis Bossart Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20200630184356.24939-3-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- Documentation/driver-api/soundwire/stream.rst | 11 ++- drivers/soundwire/stream.c | 98 +++++++++++++++++++++++++++ include/linux/soundwire/sdw.h | 2 + 3 files changed, 110 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/driver-api/soundwire/stream.rst b/Documentation/driver-api/soundwire/stream.rst index 1b386076402c..8858cea7bfe0 100644 --- a/Documentation/driver-api/soundwire/stream.rst +++ b/Documentation/driver-api/soundwire/stream.rst @@ -293,6 +293,10 @@ per stream. From ASoC DPCM framework, this stream state maybe linked to int sdw_alloc_stream(char * stream_name); +The SoundWire core provides a sdw_startup_stream() helper function, +typically called during a dailink .startup() callback, which performs +stream allocation and sets the stream pointer for all DAIs +connected to a stream. SDW_STREAM_CONFIGURED ~~~~~~~~~~~~~~~~~~~~~ @@ -509,7 +513,12 @@ In .shutdown() the data structure maintaining stream state are freed up. void sdw_release_stream(struct sdw_stream_runtime * stream); -Not Supported +The SoundWire core provides a sdw_shutdown_stream() helper function, +typically called during a dailink .shutdown() callback, which clears +the stream pointer for all DAIS connected to a stream and releases the +memory allocated for the stream. + + Not Supported ============= 1. A single port with multiple channels supported cannot be used between two diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index a9a72574b34a..6bc2ff29c202 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "bus.h" /* @@ -1826,3 +1827,100 @@ state_err: return ret; } EXPORT_SYMBOL(sdw_deprepare_stream); + +static int set_stream(struct snd_pcm_substream *substream, + struct sdw_stream_runtime *sdw_stream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_dai *dai; + int ret = 0; + int i; + + /* Set stream pointer on all DAIs */ + for_each_rtd_dais(rtd, i, dai) { + ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream); + if (ret < 0) { + dev_err(rtd->dev, "failed to set stream pointer on dai %s", dai->name); + break; + } + } + + return ret; +} + +/** + * sdw_startup_stream() - Startup SoundWire stream + * + * @stream: Soundwire stream + * + * Documentation/driver-api/soundwire/stream.rst explains this API in detail + */ +int sdw_startup_stream(void *sdw_substream) +{ + struct snd_pcm_substream *substream = sdw_substream; + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct sdw_stream_runtime *sdw_stream; + char *name; + int ret; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + name = kasprintf(GFP_KERNEL, "%s-Playback", substream->name); + else + name = kasprintf(GFP_KERNEL, "%s-Capture", substream->name); + + if (!name) + return -ENOMEM; + + sdw_stream = sdw_alloc_stream(name); + if (!sdw_stream) { + dev_err(rtd->dev, "alloc stream failed for substream DAI %s", substream->name); + ret = -ENOMEM; + goto error; + } + + ret = set_stream(substream, sdw_stream); + if (ret < 0) + goto release_stream; + return 0; + +release_stream: + sdw_release_stream(sdw_stream); + set_stream(substream, NULL); +error: + kfree(name); + return ret; +} +EXPORT_SYMBOL(sdw_startup_stream); + +/** + * sdw_shutdown_stream() - Shutdown SoundWire stream + * + * @stream: Soundwire stream + * + * Documentation/driver-api/soundwire/stream.rst explains this API in detail + */ +void sdw_shutdown_stream(void *sdw_substream) +{ + struct snd_pcm_substream *substream = sdw_substream; + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct sdw_stream_runtime *sdw_stream; + struct snd_soc_dai *dai; + + /* Find stream from first CPU DAI */ + dai = asoc_rtd_to_cpu(rtd, 0); + + sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream); + + if (!sdw_stream) { + dev_err(rtd->dev, "no stream found for DAI %s", dai->name); + return; + } + + /* release memory */ + kfree(sdw_stream->name); + sdw_release_stream(sdw_stream); + + /* clear DAI data */ + set_stream(substream, NULL); +} +EXPORT_SYMBOL(sdw_shutdown_stream); diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 64c9314cb903..4057adf7f049 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -955,10 +955,12 @@ int sdw_stream_remove_master(struct sdw_bus *bus, struct sdw_stream_runtime *stream); int sdw_stream_remove_slave(struct sdw_slave *slave, struct sdw_stream_runtime *stream); +int sdw_startup_stream(void *sdw_substream); int sdw_prepare_stream(struct sdw_stream_runtime *stream); int sdw_enable_stream(struct sdw_stream_runtime *stream); int sdw_disable_stream(struct sdw_stream_runtime *stream); int sdw_deprepare_stream(struct sdw_stream_runtime *stream); +void sdw_shutdown_stream(void *sdw_substream); int sdw_bus_prep_clk_stop(struct sdw_bus *bus); int sdw_bus_clk_stop(struct sdw_bus *bus); int sdw_bus_exit_clk_stop(struct sdw_bus *bus); -- cgit v1.2.3 From 58eeba0bdb52afe5c18ce2a760ca9fe2901943e9 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 14 Jul 2020 15:01:53 +0300 Subject: lib/string_helpers: Introduce string_upper() and string_lower() helpers Provide the helpers for string conversions to upper and lower cases. Signed-off-by: Vadim Pasternak Signed-off-by: Andy Shevchenko --- include/linux/string_helpers.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include') diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index c28955132234..86f150c2a6b6 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -2,6 +2,7 @@ #ifndef _LINUX_STRING_HELPERS_H_ #define _LINUX_STRING_HELPERS_H_ +#include #include struct file; @@ -75,6 +76,20 @@ static inline int string_escape_str_any_np(const char *src, char *dst, return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only); } +static inline void string_upper(char *dst, const char *src) +{ + do { + *dst++ = toupper(*src); + } while (*src++); +} + +static inline void string_lower(char *dst, const char *src) +{ + do { + *dst++ = tolower(*src); + } while (*src++); +} + char *kstrdup_quotable(const char *src, gfp_t gfp); char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp); char *kstrdup_quotable_file(struct file *file, gfp_t gfp); -- cgit v1.2.3 From 13e52e63a4462e2bd0ef249fb535a599ea158725 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 14 Jul 2020 15:01:58 +0300 Subject: platform_data/mlxreg: Add support for complex attributes Add new field 'regnum' to the structure 'mlxreg_core_data' to specify the number of registers occupied by multi-register attribute. Signed-off-by: Vadim Pasternak Signed-off-by: Andy Shevchenko --- include/linux/platform_data/mlxreg.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index b8da8aef2446..a2adc3ad45f2 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -80,6 +80,7 @@ struct mlxreg_hotplug_device { * @hpdev - hotplug device data; * @health_cntr: dynamic device health indication counter; * @attached: true if device has been attached after good health indication; + * @regnum: number of registers occupied by multi-register attribute; */ struct mlxreg_core_data { char label[MLXREG_CORE_LABEL_MAX_SIZE]; @@ -92,6 +93,7 @@ struct mlxreg_core_data { struct mlxreg_hotplug_device hpdev; u8 health_cntr; bool attached; + u8 regnum; }; /** -- cgit v1.2.3 From 17727a3b4879324818ea6f2ebc3f68432173ce24 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 14 Jul 2020 15:02:02 +0300 Subject: platform_data/mlxreg: Add presence register field for FAN devices Add new field 'reg_prsnt' to the structure 'mlxreg_core_data' to provide the number FAN drawers equpped within the system. The purpose is to allow mapping between FAN drawers and FAN rotors (tachometer), since FAN drawer can be eqipped with a few rotors. Signed-off-by: Vadim Pasternak Signed-off-by: Andy Shevchenko --- include/linux/platform_data/mlxreg.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index a2adc3ad45f2..9cffa9a64ab3 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -75,6 +75,7 @@ struct mlxreg_hotplug_device { * @mask: attribute access mask; * @bit: attribute effective bit; * @capability: attribute capability register; + * @reg_prsnt: attribute presence register; * @mode: access mode; * @np - pointer to node platform associated with attribute; * @hpdev - hotplug device data; @@ -88,6 +89,7 @@ struct mlxreg_core_data { u32 mask; u32 bit; u32 capability; + u32 reg_prsnt; umode_t mode; struct device_node *np; struct mlxreg_hotplug_device hpdev; -- cgit v1.2.3 From a8209dd42a60dca0fd15bc73d19fc4009e704c17 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Wed, 15 Jul 2020 05:37:43 +0800 Subject: soundwire: sdw.h: fix PRBS/Static_1 swapped definitions Table 110 "Port Data Modes" of the SoundWire 1.2 specification lists PRBS as b01 and Static_1 as b11. The existing headers swapped the two values, fix. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Reviewed-by: Rander Wang Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20200714213744.24674-2-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/soundwire/sdw.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 4057adf7f049..6452bac957b3 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -152,19 +152,19 @@ enum sdw_data_direction { * * @SDW_PORT_DATA_MODE_NORMAL: Normal data mode where audio data is received * and transmitted. + * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce + * a pseudo random data pattern that is transferred + * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of + * logic 0. The encoding will result in no signal transitions * @SDW_PORT_DATA_MODE_STATIC_1: Simple test mode which uses static value of * logic 1. The encoding will result in signal transitions at every bitslot * owned by this Port - * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of - * logic 0. The encoding will result in no signal transitions - * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce - * a pseudo random data pattern that is transferred */ enum sdw_port_data_mode { SDW_PORT_DATA_MODE_NORMAL = 0, - SDW_PORT_DATA_MODE_STATIC_1 = 1, + SDW_PORT_DATA_MODE_PRBS = 1, SDW_PORT_DATA_MODE_STATIC_0 = 2, - SDW_PORT_DATA_MODE_PRBS = 3, + SDW_PORT_DATA_MODE_STATIC_1 = 3, }; /* -- cgit v1.2.3 From 9256686898881b0694fe6af2c3283b69677838de Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Wed, 15 Jul 2020 05:37:44 +0800 Subject: soundwire: sdw.h: fix indentation Not sure how this went undetected for years. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Reviewed-by: Rander Wang Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20200714213744.24674-3-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/soundwire/sdw.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 6452bac957b3..76052f12c9f7 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -88,10 +88,10 @@ enum sdw_slave_status { * @SDW_CLK_POST_DEPREPARE: post clock stop de-prepare */ enum sdw_clk_stop_type { - SDW_CLK_PRE_PREPARE = 0, - SDW_CLK_POST_PREPARE, - SDW_CLK_PRE_DEPREPARE, - SDW_CLK_POST_DEPREPARE, + SDW_CLK_PRE_PREPARE = 0, + SDW_CLK_POST_PREPARE, + SDW_CLK_PRE_DEPREPARE, + SDW_CLK_POST_DEPREPARE, }; /** -- cgit v1.2.3 From 71d734103edfa2b4c6657578a3082ee0e51d767e Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 8 Jul 2020 14:11:36 +0300 Subject: fsnotify: Rearrange fast path to minimise overhead when there is no watcher The fsnotify paths are trivial to hit even when there are no watchers and they are surprisingly expensive. For example, every successful vfs_write() hits fsnotify_modify which calls both fsnotify_parent and fsnotify unless FMODE_NONOTIFY is set which is an internal flag invisible to userspace. As it stands, fsnotify_parent is a guaranteed functional call even if there are no watchers and fsnotify() does a substantial amount of unnecessary work before it checks if there are any watchers. A perf profile showed that applying mnt->mnt_fsnotify_mask in fnotify() was almost half of the total samples taken in that function during a test. This patch rearranges the fast paths to reduce the amount of work done when there are no watchers. The test motivating this was "perf bench sched messaging --pipe". Despite the fact the pipes are anonymous, fsnotify is still called a lot and the overhead is noticeable even though it's completely pointless. It's likely the overhead is negligible for real IO so this is an extreme example. This is a comparison of hackbench using processes and pipes on a 1-socket machine with 8 CPU threads without fanotify watchers. 5.7.0 5.7.0 vanilla fastfsnotify-v1r1 Amean 1 0.4837 ( 0.00%) 0.4630 * 4.27%* Amean 3 1.5447 ( 0.00%) 1.4557 ( 5.76%) Amean 5 2.6037 ( 0.00%) 2.4363 ( 6.43%) Amean 7 3.5987 ( 0.00%) 3.4757 ( 3.42%) Amean 12 5.8267 ( 0.00%) 5.6983 ( 2.20%) Amean 18 8.4400 ( 0.00%) 8.1327 ( 3.64%) Amean 24 11.0187 ( 0.00%) 10.0290 * 8.98%* Amean 30 13.1013 ( 0.00%) 12.8510 ( 1.91%) Amean 32 13.9190 ( 0.00%) 13.2410 ( 4.87%) 5.7.0 5.7.0 vanilla fastfsnotify-v1r1 Duration User 157.05 152.79 Duration System 1279.98 1219.32 Duration Elapsed 182.81 174.52 This is showing that the latencies are improved by roughly 2-9%. The variability is not shown but some of these results are within the noise as this workload heavily overloads the machine. That said, the system CPU usage is reduced by quite a bit so it makes sense to avoid the overhead even if it is a bit tricky to detect at times. A perf profile of just 1 group of tasks showed that 5.14% of samples taken were in either fsnotify() or fsnotify_parent(). With the patch, 2.8% of samples were in fsnotify, mostly function entry and the initial check for watchers. The check for watchers is complicated enough that inlining it may be controversial. [Amir] Slightly simplify with mnt_or_sb_mask => marks_mask Link: https://lore.kernel.org/r/20200708111156.24659-1-amir73il@gmail.com Signed-off-by: Mel Gorman Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fsnotify.c | 27 +++++++++++++++------------ include/linux/fsnotify.h | 10 ++++++++++ include/linux/fsnotify_backend.h | 4 ++-- 3 files changed, 27 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 72d332ce8e12..d59a58d10b84 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -143,7 +143,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) } /* Notify this dentry's parent about a child's events. */ -int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, +int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { struct dentry *parent; @@ -174,7 +174,7 @@ int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, return ret; } -EXPORT_SYMBOL_GPL(fsnotify_parent); +EXPORT_SYMBOL_GPL(__fsnotify_parent); static int send_to_group(struct inode *to_tell, __u32 mask, const void *data, @@ -315,17 +315,11 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, struct fsnotify_iter_info iter_info = {}; struct super_block *sb = to_tell->i_sb; struct mount *mnt = NULL; - __u32 mnt_or_sb_mask = sb->s_fsnotify_mask; int ret = 0; - __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); + __u32 test_mask, marks_mask; - if (path) { + if (path) mnt = real_mount(path->mnt); - mnt_or_sb_mask |= mnt->mnt_fsnotify_mask; - } - /* An event "on child" is not intended for a mount/sb mark */ - if (mask & FS_EVENT_ON_CHILD) - mnt_or_sb_mask = 0; /* * Optimization: srcu_read_lock() has a memory barrier which can @@ -337,13 +331,22 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, if (!to_tell->i_fsnotify_marks && !sb->s_fsnotify_marks && (!mnt || !mnt->mnt_fsnotify_marks)) return 0; + + /* An event "on child" is not intended for a mount/sb mark */ + marks_mask = to_tell->i_fsnotify_mask; + if (!(mask & FS_EVENT_ON_CHILD)) { + marks_mask |= sb->s_fsnotify_mask; + if (mnt) + marks_mask |= mnt->mnt_fsnotify_mask; + } + /* * if this is a modify event we may need to clear the ignored masks * otherwise return if neither the inode nor the vfsmount/sb care about * this type of event. */ - if (!(mask & FS_MODIFY) && - !(test_mask & (to_tell->i_fsnotify_mask | mnt_or_sb_mask))) + test_mask = (mask & ALL_FSNOTIFY_EVENTS); + if (!(mask & FS_MODIFY) && !(test_mask & marks_mask)) return 0; iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 5ab28f6c7d26..508f6bb0b06b 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -44,6 +44,16 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); } +/* Notify this dentry's parent about a child's events. */ +static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, + const void *data, int data_type) +{ + if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) + return 0; + + return __fsnotify_parent(dentry, mask, data, data_type); +} + /* * Simple wrappers to consolidate calls fsnotify_parent()/fsnotify() when * an event is on a file/dentry. diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index f0c506405b54..1626fa7d10ff 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -379,7 +379,7 @@ struct fsnotify_mark { /* main fsnotify call to send events */ extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_type, const struct qstr *name, u32 cookie); -extern int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, +extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); @@ -541,7 +541,7 @@ static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, return 0; } -static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, +static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { return 0; -- cgit v1.2.3 From 17c7b8b1cadc0ae95ee2a9fa18022dd0c901af5f Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 10:18:46 +0800 Subject: cipso: Remove unused inline functions They are not used any more since commit b1edeb102397 ("netlabel: Replace protocol/NetLabel linking with refrerence counts") Signed-off-by: YueHaibing Acked-by: Paul Moore Signed-off-by: David S. Miller --- include/net/cipso_ipv4.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'include') diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index 428b6725b248..53dd7d988a2d 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h @@ -150,18 +150,6 @@ static inline int cipso_v4_doi_walk(u32 *skip_cnt, { return 0; } - -static inline int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def, - const char *domain) -{ - return -ENOSYS; -} - -static inline int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def, - const char *domain) -{ - return 0; -} #endif /* CONFIG_NETLABEL */ /* -- cgit v1.2.3 From 054848d21bc19992f2c3540244bd6defbc833aa2 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 10:31:19 +0800 Subject: net: flow: Remove unused inline function It is not used since commit 09c7570480f7 ("xfrm: remove flow cache") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- include/net/flow.h | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'include') diff --git a/include/net/flow.h b/include/net/flow.h index a50fb77a0b27..929d3ca614d0 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -204,24 +204,6 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) return container_of(fldn, struct flowi, u.dn); } -typedef unsigned long flow_compare_t; - -static inline unsigned int flow_key_size(u16 family) -{ - switch (family) { - case AF_INET: - BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t)); - return sizeof(struct flowi4) / sizeof(flow_compare_t); - case AF_INET6: - BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t)); - return sizeof(struct flowi6) / sizeof(flow_compare_t); - case AF_DECnet: - BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t)); - return sizeof(struct flowidn) / sizeof(flow_compare_t); - } - return 0; -} - __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys); #endif -- cgit v1.2.3 From 0d40efabe3e383dd7e347debd85a0d4754402efb Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 10:36:13 +0800 Subject: mptcp: Remove unused inline function mptcp_rcv_synsent() commit 263e1201a2c3 ("mptcp: consolidate synack processing.") left behind this, remove it. Signed-off-by: YueHaibing Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- include/net/mptcp.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 46d0487d2b22..02158c257bd4 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -164,10 +164,6 @@ static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, return false; } -static inline void mptcp_rcv_synsent(struct sock *sk) -{ -} - static inline bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, struct mptcp_out_options *opts) -- cgit v1.2.3 From 8635764bcf0f109933b593d79ac2247b1e863d0a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 10:52:03 +0800 Subject: netpoll: Remove unused inline function netpoll_netdev_init() commit d565b0a1a9b6 ("net: Add Generic Receive Offload infrastructure") left behind this, remove it. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- include/linux/netpoll.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index f47af135bd56..e6a2d72e0dc7 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -102,9 +102,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) static inline void netpoll_poll_unlock(void *have) { } -static inline void netpoll_netdev_init(struct net_device *dev) -{ -} static inline bool netpoll_tx_running(struct net_device *dev) { return false; -- cgit v1.2.3 From f7d7ad42a9dc2d63cab6a79fe31e6732a30dacf5 Mon Sep 17 00:00:00 2001 From: Sumit Semwal Date: Mon, 22 Jun 2020 18:11:07 +0530 Subject: regulator: Allow regulators to verify enabled during enable() Some regulators might need to verify that they have indeed been enabled after the enable() call is made and enable_time delay has passed. This is implemented by repeatedly checking is_enabled() upto poll_enabled_time, waiting for the already calculated enable delay in each iteration. Signed-off-by: Sumit Semwal Link: https://lore.kernel.org/r/20200622124110.20971-2-sumit.semwal@linaro.org Signed-off-by: Mark Brown --- drivers/regulator/core.c | 63 +++++++++++++++++++++++++++++++++++++++- include/linux/regulator/driver.h | 5 ++++ 2 files changed, 67 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 03154f5b939f..538a2779986a 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2347,6 +2347,37 @@ static void _regulator_enable_delay(unsigned int delay) udelay(us); } +/** + * _regulator_check_status_enabled + * + * A helper function to check if the regulator status can be interpreted + * as 'regulator is enabled'. + * @rdev: the regulator device to check + * + * Return: + * * 1 - if status shows regulator is in enabled state + * * 0 - if not enabled state + * * Error Value - as received from ops->get_status() + */ +static inline int _regulator_check_status_enabled(struct regulator_dev *rdev) +{ + int ret = rdev->desc->ops->get_status(rdev); + + if (ret < 0) { + rdev_info(rdev, "get_status returned error: %d\n", ret); + return ret; + } + + switch (ret) { + case REGULATOR_STATUS_OFF: + case REGULATOR_STATUS_ERROR: + case REGULATOR_STATUS_UNDEFINED: + return 0; + default: + return 1; + } +} + static int _regulator_do_enable(struct regulator_dev *rdev) { int ret, delay; @@ -2407,7 +2438,37 @@ static int _regulator_do_enable(struct regulator_dev *rdev) * together. */ trace_regulator_enable_delay(rdev_get_name(rdev)); - _regulator_enable_delay(delay); + /* If poll_enabled_time is set, poll upto the delay calculated + * above, delaying poll_enabled_time uS to check if the regulator + * actually got enabled. + * If the regulator isn't enabled after enable_delay has + * expired, return -ETIMEDOUT. + */ + if (rdev->desc->poll_enabled_time) { + unsigned int time_remaining = delay; + + while (time_remaining > 0) { + _regulator_enable_delay(rdev->desc->poll_enabled_time); + + if (rdev->desc->ops->get_status) { + ret = _regulator_check_status_enabled(rdev); + if (ret < 0) + return ret; + else if (ret) + break; + } else if (rdev->desc->ops->is_enabled(rdev)) + break; + + time_remaining -= rdev->desc->poll_enabled_time; + } + + if (time_remaining <= 0) { + rdev_err(rdev, "Enabled check timed out\n"); + return -ETIMEDOUT; + } + } else { + _regulator_enable_delay(delay); + } trace_regulator_enable_complete(rdev_get_name(rdev)); diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 7eb9fea8e482..436df3ba0b2a 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -305,6 +305,9 @@ enum regulator_type { * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator * + * @poll_enabled_time: The polling interval (in uS) to use while checking that + * the regulator was actually enabled. Max upto enable_time. + * * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode */ struct regulator_desc { @@ -372,6 +375,8 @@ struct regulator_desc { unsigned int off_on_delay; + unsigned int poll_enabled_time; + unsigned int (*of_map_mode)(unsigned int mode); }; -- cgit v1.2.3 From ded071f475cb5b67fda412f88fd5fd4d9c27916c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 14 Jul 2020 21:56:58 -0700 Subject: usb: linux/usb.h: drop duplicated word in comment Drop the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Cc: Greg Kroah-Hartman Cc: linux-usb@vger.kernel.org Link: https://lore.kernel.org/r/20200715045701.22949-1-rdunlap@infradead.org Signed-off-by: Greg Kroah-Hartman --- include/linux/usb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/usb.h b/include/linux/usb.h index c28fc391444a..20c555db4621 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -341,7 +341,7 @@ struct usb_interface_cache { * @interface: array of pointers to usb_interface structures, one for each * interface in the configuration. The number of interfaces is stored * in desc.bNumInterfaces. These pointers are valid only while the - * the configuration is active. + * configuration is active. * @intf_cache: array of pointers to usb_interface_cache structures, one * for each interface in the configuration. These structures exist * for the entire life of the device. -- cgit v1.2.3 From c76ae34b5af22497b660f6baeed182869c24e411 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 14 Jul 2020 21:57:00 -0700 Subject: usb: linux/usb/pd_vdo.h: drop duplicated word in comment Drop the doubled word "all" in a comment. Signed-off-by: Randy Dunlap Cc: Greg Kroah-Hartman Cc: linux-usb@vger.kernel.org Link: https://lore.kernel.org/r/20200715045701.22949-3-rdunlap@infradead.org Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/pd_vdo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h index 35b8e15efaa0..68bdc4e2f5a9 100644 --- a/include/linux/usb/pd_vdo.h +++ b/include/linux/usb/pd_vdo.h @@ -249,7 +249,7 @@ * SVDM Discover SVIDs request -> response * * Request is properly formatted VDM Header with discover SVIDs command. - * Response is a set of SVIDs of all all supported SVIDs with all zero's to + * Response is a set of SVIDs of all supported SVIDs with all zero's to * mark the end of SVIDs. If more than 12 SVIDs are supported command SHOULD be * repeated. */ -- cgit v1.2.3 From 4e28335f2b442662148f9803f3e7e053e6074815 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 14 Jul 2020 21:57:01 -0700 Subject: usb: linux/usb/serial.h: drop duplicated word in comment Drop the doubled word "set" in a comment. Signed-off-by: Randy Dunlap Cc: Greg Kroah-Hartman Cc: linux-usb@vger.kernel.org Link: https://lore.kernel.org/r/20200715045701.22949-4-rdunlap@infradead.org Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/serial.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 14cac4a1ae8f..315cfc6f99a9 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -213,7 +213,7 @@ struct usb_serial_endpoints { * Return 0 to continue on with the initialization sequence. Anything * else will abort it. * @attach: pointer to the driver's attach function. - * This will be called when the struct usb_serial structure is fully set + * This will be called when the struct usb_serial structure is fully * set up. Do any local initialization of the device, or any private * memory structure allocation at this point in time. * @disconnect: pointer to the driver's disconnect function. This will be -- cgit v1.2.3 From e4dfa8029925f6200f6ab2db5a25d86fd082e9cf Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 14 Jul 2020 21:56:59 -0700 Subject: usb: linux/usb/gadget.h: fix duplicated word in comment Change the doubled word "in" to "be in" in a comment. Signed-off-by: Randy Dunlap Cc: Greg Kroah-Hartman Cc: linux-usb@vger.kernel.org Link: https://lore.kernel.org/r/20200715045701.22949-2-rdunlap@infradead.org Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/gadget.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 298b334e2951..52ce1f6b8f83 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -731,7 +731,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver); * it will first disconnect(). The driver is also requested * to unbind() and clean up any device state, before this procedure * finally returns. It's expected that the unbind() functions - * will in in exit sections, so may not be linked in some kernels. + * will be in exit sections, so may not be linked in some kernels. */ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver); -- cgit v1.2.3 From c738fbabb0ff62d0f9a9572e56e65d05a1b34c6a Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 8 Jul 2020 14:11:37 +0300 Subject: fsnotify: fold fsnotify() call into fsnotify_parent() All (two) callers of fsnotify_parent() also call fsnotify() to notify the child inode. Move the second fsnotify() call into fsnotify_parent(). This will allow more flexibility in making decisions about which of the two event falvors should be sent. Using 'goto notify_child' in the inline helper seems a bit strange, but it mimics the code in __fsnotify_parent() for clarity and the goto pattern will become less strage after following patches are applied. Link: https://lore.kernel.org/r/20200708111156.24659-2-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fsnotify.c | 27 ++++++++++++++++++--------- include/linux/fsnotify.h | 33 +++++++++++++-------------------- 2 files changed, 31 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index d59a58d10b84..30628a72ca01 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -142,16 +142,20 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) spin_unlock(&inode->i_lock); } -/* Notify this dentry's parent about a child's events. */ +/* + * Notify this dentry's parent about a child's events with child name info + * if parent is watching. + * Notify also the child without name info if child inode is watching. + */ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, - int data_type) + int data_type) { struct dentry *parent; struct inode *p_inode; int ret = 0; if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) - return 0; + goto notify_child; parent = dget_parent(dentry); p_inode = parent->d_inode; @@ -161,18 +165,23 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, } else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) { struct name_snapshot name; - /* we are notifying a parent so come up with the new mask which - * specifies these are events which came from a child. */ - mask |= FS_EVENT_ON_CHILD; - + /* + * We are notifying a parent, so set a flag in mask to inform + * backend that event has information about a child entry. + */ take_dentry_name_snapshot(&name, dentry); - ret = fsnotify(p_inode, mask, data, data_type, &name.name, 0); + ret = fsnotify(p_inode, mask | FS_EVENT_ON_CHILD, data, + data_type, &name.name, 0); release_dentry_name_snapshot(&name); } dput(parent); - return ret; + if (ret) + return ret; + +notify_child: + return fsnotify(d_inode(dentry), mask, data, data_type, NULL, 0); } EXPORT_SYMBOL_GPL(__fsnotify_parent); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 508f6bb0b06b..316c9b820517 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -48,44 +48,37 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { + struct inode *inode = d_inode(dentry); + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) - return 0; + goto notify_child; return __fsnotify_parent(dentry, mask, data, data_type); + +notify_child: + return fsnotify(inode, mask, data, data_type, NULL, 0); } /* - * Simple wrappers to consolidate calls fsnotify_parent()/fsnotify() when - * an event is on a file/dentry. + * Simple wrappers to consolidate calls to fsnotify_parent() when an event + * is on a file/dentry. */ static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) { - struct inode *inode = d_inode(dentry); - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify_parent(dentry, mask, inode, FSNOTIFY_EVENT_INODE); - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_parent(dentry, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE); } static inline int fsnotify_file(struct file *file, __u32 mask) { const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); - int ret; if (file->f_mode & FMODE_NONOTIFY) return 0; - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - ret = fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); - if (ret) - return ret; - - return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); } /* Simple call site for access decisions */ -- cgit v1.2.3 From cbcf47adc8aadbcaa741391ccfd96f764b50be7e Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 8 Jul 2020 14:11:38 +0300 Subject: fsnotify: return non const from fsnotify_data_inode() Return non const inode pointer from fsnotify_data_inode(). None of the fsnotify hooks pass const inode pointer as data and callers often need to cast to a non const pointer. Link: https://lore.kernel.org/r/20200708111156.24659-3-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.c | 2 +- include/linux/fsnotify_backend.h | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 85eda539b35f..d9fc83dd994a 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -341,7 +341,7 @@ static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask, if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) return to_tell; - return (struct inode *)fsnotify_data_inode(data, data_type); + return fsnotify_data_inode(data, data_type); } struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1626fa7d10ff..97300f3b8ff0 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -220,12 +220,11 @@ enum fsnotify_data_type { FSNOTIFY_EVENT_INODE, }; -static inline const struct inode *fsnotify_data_inode(const void *data, - int data_type) +static inline struct inode *fsnotify_data_inode(const void *data, int data_type) { switch (data_type) { case FSNOTIFY_EVENT_INODE: - return data; + return (struct inode *)data; case FSNOTIFY_EVENT_PATH: return d_inode(((const struct path *)data)->dentry); default: -- cgit v1.2.3 From 5c7f8ffe741daae7f8d811a2037b2693f02c90c5 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Mon, 13 Jul 2020 10:45:31 -0500 Subject: dt: bindings: Add multicolor class dt bindings documention MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add DT bindings for the LEDs multicolor class framework. Add multicolor ID to the color ID list for device tree bindings. CC: Rob Herring Reviewed-by: Rob Herring Acked-by: Pavel Machek Acked-by: Jacek Anaszewski Signed-off-by: Dan Murphy Reviewed-by: Marek Behún Signed-off-by: Pavel Machek --- .../bindings/leds/leds-class-multicolor.yaml | 37 ++++++++++++++++++++++ include/dt-bindings/leds/common.h | 3 +- 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml (limited to 'include') diff --git a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml new file mode 100644 index 000000000000..b55e1f1308a4 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/leds/leds-class-multicolor.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Common properties for the multicolor LED class. + +maintainers: + - Dan Murphy + +description: | + Bindings for multi color LEDs show how to describe current outputs of + either integrated multi-color LED elements (like RGB, RGBW, RGBWA-UV + etc.) or standalone LEDs, to achieve logically grouped multi-color LED + modules. This is achieved by adding multi-led nodes layer to the + monochrome LED bindings. + The nodes and properties defined in this document are unique to the multicolor + LED class. Common LED nodes and properties are inherited from the common.txt + within this documentation directory. + +patternProperties: + "^multi-led@([0-9a-f])$": + type: object + description: Represents the LEDs that are to be grouped. + properties: + color: + const: 8 # LED_COLOR_ID_MULTI + description: | + For multicolor LED support this property should be defined as + LED_COLOR_ID_MULTI which can be found in include/linux/leds/common.h. + + $ref: "common.yaml#" + + required: + - color +... diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h index 0ce7dfc00dcb..a463ce6a8794 100644 --- a/include/dt-bindings/leds/common.h +++ b/include/dt-bindings/leds/common.h @@ -30,7 +30,8 @@ #define LED_COLOR_ID_VIOLET 5 #define LED_COLOR_ID_YELLOW 6 #define LED_COLOR_ID_IR 7 -#define LED_COLOR_ID_MAX 8 +#define LED_COLOR_ID_MULTI 8 +#define LED_COLOR_ID_MAX 9 /* Standard LED functions */ /* Keyboard LEDs, usually it would be input4::capslock etc. */ -- cgit v1.2.3 From e721eb0616f62e766882b80fd3433b80635abd5f Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 13 Jul 2020 08:46:18 +0100 Subject: scsi: scsi_transport_fc: Match HBA Attribute Length with HBAAPI V2.0 definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to 'include/scsi/scsi_transport_fc.h': "Attributes are based on HBAAPI V2.0 definitions" ... so it seems sane to match the 'HBA Attribute Length' to them. If we don't, the compiler complains that the copied data will be truncated. Fixes the following W=1 kernel build warning(s): In file included from include/linux/bitmap.h:9, from include/linux/cpumask.h:12, from include/linux/smp.h:13, from include/linux/percpu.h:7, from include/scsi/libfc.h:13, from drivers/scsi/libfc/fc_elsct.c:17: In function ‘strncpy’, inlined from ‘fc_ct_ms_fill.constprop’ at include/scsi/fc_encode.h:263:3: include/linux/string.h:297:30: warning: ‘__builtin_strncpy’ output may be truncated copying 64 bytes from a string of length 79 [-Wstringop-truncation] 297 | #define __underlying_strncpy __builtin_strncpy | ^ include/linux/string.h:307:9: note: in expansion of macro ‘__underlying_strncpy’ 307 | return __underlying_strncpy(p, q, size); | ^~~~~~~~~~~~~~~~~~~~ In function ‘strncpy’, inlined from ‘fc_ct_ms_fill.constprop’ at include/scsi/fc_encode.h:275:3: include/linux/string.h:297:30: warning: ‘__builtin_strncpy’ output may be truncated copying 64 bytes from a string of length 79 [-Wstringop-truncation] 297 | #define __underlying_strncpy __builtin_strncpy | ^ include/linux/string.h:307:9: note: in expansion of macro ‘__underlying_strncpy’ 307 | return __underlying_strncpy(p, q, size); | ^~~~~~~~~~~~~~~~~~~~ Link: https://lore.kernel.org/r/20200713074645.126138-3-lee.jones@linaro.org Reviewed-by: Hannes Reinecke Signed-off-by: Lee Jones Signed-off-by: Martin K. Petersen --- include/scsi/fc/fc_ms.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h index 800d53dc9470..9e273fed0a85 100644 --- a/include/scsi/fc/fc_ms.h +++ b/include/scsi/fc/fc_ms.h @@ -63,8 +63,8 @@ enum fc_fdmi_hba_attr_type { * HBA Attribute Length */ #define FC_FDMI_HBA_ATTR_NODENAME_LEN 8 -#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64 -#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64 +#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 80 +#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 80 #define FC_FDMI_HBA_ATTR_MODEL_LEN 256 #define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256 #define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256 -- cgit v1.2.3 From e15864f8ea05b24071b07300459ae7e511d0b938 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Tue, 14 Jul 2020 23:18:23 +0200 Subject: block: add max_open_zones to blk-sysfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new max_open_zones definition in the sysfs documentation. This definition will be common for all devices utilizing the zoned block device support in the kernel. Export max open zones according to this new definition for NVMe Zoned Namespace devices, ZAC ATA devices (which are treated as SCSI devices by the kernel), and ZBC SCSI devices. Add the new max_open_zones member to struct request_queue, rather than as a queue limit, since this property cannot be split across stacking drivers. Signed-off-by: Niklas Cassel Reviewed-by: Javier González Reviewed-by: Damien Le Moal Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Jens Axboe --- Documentation/ABI/testing/sysfs-block | 9 +++++++++ Documentation/block/queue-sysfs.rst | 7 +++++++ block/blk-sysfs.c | 15 +++++++++++++++ drivers/nvme/host/zns.c | 1 + drivers/scsi/sd_zbc.c | 4 ++++ include/linux/blkdev.h | 25 +++++++++++++++++++++++++ 6 files changed, 61 insertions(+) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index ed8c14f161ee..f151d9cf90de 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block @@ -273,6 +273,15 @@ Description: device ("host-aware" or "host-managed" zone model). For regular block devices, the value is always 0. +What: /sys/block//queue/max_open_zones +Date: July 2020 +Contact: Niklas Cassel +Description: + For zoned block devices (zoned attribute indicating + "host-managed" or "host-aware"), the sum of zones belonging to + any of the zone states: EXPLICIT OPEN or IMPLICIT OPEN, + is limited by this value. If this value is 0, there is no limit. + What: /sys/block//queue/chunk_sectors Date: September 2016 Contact: Hannes Reinecke diff --git a/Documentation/block/queue-sysfs.rst b/Documentation/block/queue-sysfs.rst index 6a8513af9201..f01cf8530ae4 100644 --- a/Documentation/block/queue-sysfs.rst +++ b/Documentation/block/queue-sysfs.rst @@ -117,6 +117,13 @@ Maximum number of elements in a DMA scatter/gather list with integrity data that will be submitted by the block layer core to the associated block driver. +max_open_zones (RO) +------------------- +For zoned block devices (zoned attribute indicating "host-managed" or +"host-aware"), the sum of zones belonging to any of the zone states: +EXPLICIT OPEN or IMPLICIT OPEN, is limited by this value. +If this value is 0, there is no limit. + max_sectors_kb (RW) ------------------- This is the maximum number of kilobytes that the block layer will allow diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index be67952e7be2..414f04579d77 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -306,6 +306,11 @@ static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) return queue_var_show(blk_queue_nr_zones(q), page); } +static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_max_open_zones(q), page); +} + static ssize_t queue_nomerges_show(struct request_queue *q, char *page) { return queue_var_show((blk_queue_nomerges(q) << 1) | @@ -668,6 +673,11 @@ static struct queue_sysfs_entry queue_nr_zones_entry = { .show = queue_nr_zones_show, }; +static struct queue_sysfs_entry queue_max_open_zones_entry = { + .attr = {.name = "max_open_zones", .mode = 0444 }, + .show = queue_max_open_zones_show, +}; + static struct queue_sysfs_entry queue_nomerges_entry = { .attr = {.name = "nomerges", .mode = 0644 }, .show = queue_nomerges_show, @@ -766,6 +776,7 @@ static struct attribute *queue_attrs[] = { &queue_nonrot_entry.attr, &queue_zoned_entry.attr, &queue_nr_zones_entry.attr, + &queue_max_open_zones_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, &queue_iostats_entry.attr, @@ -793,6 +804,10 @@ static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, (!q->mq_ops || !q->mq_ops->timeout)) return 0; + if (attr == &queue_max_open_zones_entry.attr && + !blk_queue_is_zoned(q)) + return 0; + return attr->mode; } diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 04e5b991c00c..3d80b9cf6bfc 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -96,6 +96,7 @@ int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns, q->limits.zoned = BLK_ZONED_HM; blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); + blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1); free_data: kfree(id); return status; diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 183a20720da9..aa3564139b40 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -717,6 +717,10 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) /* The drive satisfies the kernel restrictions: set it up */ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); + if (sdkp->zones_max_open == U32_MAX) + blk_queue_max_open_zones(q, 0); + else + blk_queue_max_open_zones(q, sdkp->zones_max_open); nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); /* READ16/WRITE16 is mandatory for ZBC disks */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index de7adc59b993..c8beb8bbdb08 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -513,6 +513,7 @@ struct request_queue { unsigned int nr_zones; unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; + unsigned int max_open_zones; #endif /* CONFIG_BLK_DEV_ZONED */ /* @@ -722,6 +723,17 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q, return true; return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); } + +static inline void blk_queue_max_open_zones(struct request_queue *q, + unsigned int max_open_zones) +{ + q->max_open_zones = max_open_zones; +} + +static inline unsigned int queue_max_open_zones(const struct request_queue *q) +{ + return q->max_open_zones; +} #else /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int blk_queue_nr_zones(struct request_queue *q) { @@ -737,6 +749,10 @@ static inline unsigned int blk_queue_zone_no(struct request_queue *q, { return 0; } +static inline unsigned int queue_max_open_zones(const struct request_queue *q) +{ + return 0; +} #endif /* CONFIG_BLK_DEV_ZONED */ static inline bool rq_is_sync(struct request *rq) @@ -1519,6 +1535,15 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev) return 0; } +static inline unsigned int bdev_max_open_zones(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return queue_max_open_zones(q); + return 0; +} + static inline int queue_dma_alignment(const struct request_queue *q) { return q ? q->dma_alignment : 511; -- cgit v1.2.3 From 659bf827ba8f1183b714341d8a1d4b1e446178d9 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Tue, 14 Jul 2020 23:18:24 +0200 Subject: block: add max_active_zones to blk-sysfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new max_active zones definition in the sysfs documentation. This definition will be common for all devices utilizing the zoned block device support in the kernel. Export max_active_zones according to this new definition for NVMe Zoned Namespace devices, ZAC ATA devices (which are treated as SCSI devices by the kernel), and ZBC SCSI devices. Add the new max_active_zones member to struct request_queue, rather than as a queue limit, since this property cannot be split across stacking drivers. For SCSI devices, even though max active zones is not part of the ZBC/ZAC spec, export max_active_zones as 0, signifying "no limit". Signed-off-by: Niklas Cassel Reviewed-by: Javier González Reviewed-by: Damien Le Moal Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Jens Axboe --- Documentation/ABI/testing/sysfs-block | 9 +++++++++ Documentation/block/queue-sysfs.rst | 7 +++++++ block/blk-sysfs.c | 14 +++++++++++++- drivers/nvme/host/zns.c | 1 + drivers/scsi/sd_zbc.c | 1 + include/linux/blkdev.h | 25 +++++++++++++++++++++++++ 6 files changed, 56 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index f151d9cf90de..2322eb748b38 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block @@ -273,6 +273,15 @@ Description: device ("host-aware" or "host-managed" zone model). For regular block devices, the value is always 0. +What: /sys/block//queue/max_active_zones +Date: July 2020 +Contact: Niklas Cassel +Description: + For zoned block devices (zoned attribute indicating + "host-managed" or "host-aware"), the sum of zones belonging to + any of the zone states: EXPLICIT OPEN, IMPLICIT OPEN or CLOSED, + is limited by this value. If this value is 0, there is no limit. + What: /sys/block//queue/max_open_zones Date: July 2020 Contact: Niklas Cassel diff --git a/Documentation/block/queue-sysfs.rst b/Documentation/block/queue-sysfs.rst index f01cf8530ae4..f261a5c84170 100644 --- a/Documentation/block/queue-sysfs.rst +++ b/Documentation/block/queue-sysfs.rst @@ -117,6 +117,13 @@ Maximum number of elements in a DMA scatter/gather list with integrity data that will be submitted by the block layer core to the associated block driver. +max_active_zones (RO) +--------------------- +For zoned block devices (zoned attribute indicating "host-managed" or +"host-aware"), the sum of zones belonging to any of the zone states: +EXPLICIT OPEN, IMPLICIT OPEN or CLOSED, is limited by this value. +If this value is 0, there is no limit. + max_open_zones (RO) ------------------- For zoned block devices (zoned attribute indicating "host-managed" or diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 414f04579d77..7dda709f3ccb 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -311,6 +311,11 @@ static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) return queue_var_show(queue_max_open_zones(q), page); } +static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_max_active_zones(q), page); +} + static ssize_t queue_nomerges_show(struct request_queue *q, char *page) { return queue_var_show((blk_queue_nomerges(q) << 1) | @@ -678,6 +683,11 @@ static struct queue_sysfs_entry queue_max_open_zones_entry = { .show = queue_max_open_zones_show, }; +static struct queue_sysfs_entry queue_max_active_zones_entry = { + .attr = {.name = "max_active_zones", .mode = 0444 }, + .show = queue_max_active_zones_show, +}; + static struct queue_sysfs_entry queue_nomerges_entry = { .attr = {.name = "nomerges", .mode = 0644 }, .show = queue_nomerges_show, @@ -777,6 +787,7 @@ static struct attribute *queue_attrs[] = { &queue_zoned_entry.attr, &queue_nr_zones_entry.attr, &queue_max_open_zones_entry.attr, + &queue_max_active_zones_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, &queue_iostats_entry.attr, @@ -804,7 +815,8 @@ static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, (!q->mq_ops || !q->mq_ops->timeout)) return 0; - if (attr == &queue_max_open_zones_entry.attr && + if ((attr == &queue_max_open_zones_entry.attr || + attr == &queue_max_active_zones_entry.attr) && !blk_queue_is_zoned(q)) return 0; diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 3d80b9cf6bfc..57cfd78731fb 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -97,6 +97,7 @@ int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns, q->limits.zoned = BLK_ZONED_HM; blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1); + blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1); free_data: kfree(id); return status; diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index aa3564139b40..d8b2c49d645b 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -721,6 +721,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) blk_queue_max_open_zones(q, 0); else blk_queue_max_open_zones(q, sdkp->zones_max_open); + blk_queue_max_active_zones(q, 0); nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); /* READ16/WRITE16 is mandatory for ZBC disks */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c8beb8bbdb08..285b59cfc064 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -514,6 +514,7 @@ struct request_queue { unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; unsigned int max_open_zones; + unsigned int max_active_zones; #endif /* CONFIG_BLK_DEV_ZONED */ /* @@ -734,6 +735,17 @@ static inline unsigned int queue_max_open_zones(const struct request_queue *q) { return q->max_open_zones; } + +static inline void blk_queue_max_active_zones(struct request_queue *q, + unsigned int max_active_zones) +{ + q->max_active_zones = max_active_zones; +} + +static inline unsigned int queue_max_active_zones(const struct request_queue *q) +{ + return q->max_active_zones; +} #else /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int blk_queue_nr_zones(struct request_queue *q) { @@ -753,6 +765,10 @@ static inline unsigned int queue_max_open_zones(const struct request_queue *q) { return 0; } +static inline unsigned int queue_max_active_zones(const struct request_queue *q) +{ + return 0; +} #endif /* CONFIG_BLK_DEV_ZONED */ static inline bool rq_is_sync(struct request *rq) @@ -1544,6 +1560,15 @@ static inline unsigned int bdev_max_open_zones(struct block_device *bdev) return 0; } +static inline unsigned int bdev_max_active_zones(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return queue_max_active_zones(q); + return 0; +} + static inline int queue_dma_alignment(const struct request_queue *q) { return q ? q->dma_alignment : 511; -- cgit v1.2.3 From 3e79f082ebfc130360bcee23e4dd74729dcafdf4 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 1 Jul 2020 12:52:32 +0530 Subject: libnvdimm/nvdimm/flush: Allow architecture to override the flush barrier Architectures like ppc64 provide persistent memory specific barriers that will ensure that all stores for which the modifications are written to persistent storage by preceding dcbfps and dcbstps instructions have updated persistent storage before any data access or data transfer caused by subsequent instructions is initiated. This is in addition to the ordering done by wmb() Update nvdimm core such that architecture can use barriers other than wmb to ensure all previous writes are architecturally visible for the platform buffer flush. Signed-off-by: Aneesh Kumar K.V Reviewed-by: Dan Williams Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200701072235.223558-5-aneesh.kumar@linux.ibm.com --- Documentation/memory-barriers.txt | 14 ++++++++++++++ drivers/md/dm-writecache.c | 2 +- drivers/nvdimm/region_devs.c | 8 ++++---- include/asm-generic/barrier.h | 10 ++++++++++ 4 files changed, 29 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index eaabc3134294..ff07cd3b2f82 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -1935,6 +1935,20 @@ There are some more advanced barrier functions: relaxed I/O accessors and the Documentation/DMA-API.txt file for more information on consistent memory. + (*) pmem_wmb(); + + This is for use with persistent memory to ensure that stores for which + modifications are written to persistent storage reached a platform + durability domain. + + For example, after a non-temporal write to pmem region, we use pmem_wmb() + to ensure that stores have reached a platform durability domain. This ensures + that stores have updated persistent storage before any data access or + data transfer caused by subsequent instructions is initiated. This is + in addition to the ordering done by wmb(). + + For load from persistent memory, existing read memory barriers are sufficient + to ensure read ordering. =============================== IMPLICIT KERNEL MEMORY BARRIERS diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 74f3c506f084..00534fa4a384 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -536,7 +536,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc) static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) { if (WC_MODE_PMEM(wc)) - wmb(); + pmem_wmb(); else ssd_commit_flushed(wc, wait_for_ios); } diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 4502f9c4708d..c3237c2b03a6 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1206,13 +1206,13 @@ int generic_nvdimm_flush(struct nd_region *nd_region) idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); /* - * The first wmb() is needed to 'sfence' all previous writes - * such that they are architecturally visible for the platform - * buffer flush. Note that we've already arranged for pmem + * The pmem_wmb() is needed to 'sfence' all + * previous writes such that they are architecturally visible for + * the platform buffer flush. Note that we've already arranged for pmem * writes to avoid the cache via memcpy_flushcache(). The final * wmb() ensures ordering for the NVDIMM flush write. */ - wmb(); + pmem_wmb(); for (i = 0; i < nd_region->ndr_mappings; i++) if (ndrd_get_flush_wpq(ndrd, i, 0)) writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 2eacaf7d62f6..b589bb216ee5 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -257,5 +257,15 @@ do { \ }) #endif +/* + * pmem_wmb() ensures that all stores for which the modification + * are written to persistent storage by preceding instructions have + * updated persistent storage before any data access or data transfer + * caused by subsequent instructions is initiated. + */ +#ifndef pmem_wmb +#define pmem_wmb() wmb() +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ -- cgit v1.2.3 From 1a8f0886a6008c98a926bdeca49f2ef33015a491 Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Thu, 9 Jul 2020 10:48:35 +0530 Subject: powerpc/perf/hv-24x7: Add cpu hotplug support Patch here adds cpu hotplug functions to hv_24x7 pmu. A new cpuhp_state "CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE" enum is added. The online callback function updates the cpumask only if its empty. As the primary intention of adding hotplug support is to designate a CPU to make HCALL to collect the counter data. The offline function test and clear corresponding cpu in a cpumask and update cpumask to any other active cpu. Signed-off-by: Kajol Jain Reviewed-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709051836.723765-2-kjain@linux.ibm.com --- arch/powerpc/perf/hv-24x7.c | 46 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + 2 files changed, 47 insertions(+) (limited to 'include') diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index db213eb7cb02..93b4700dcf8c 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -31,6 +31,8 @@ static int interface_version; /* Whether we have to aggregate result data for some domains. */ static bool aggregate_result_elements; +static cpumask_t hv_24x7_cpumask; + static bool domain_is_valid(unsigned domain) { switch (domain) { @@ -1641,6 +1643,45 @@ static struct pmu h_24x7_pmu = { .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; +static int ppc_hv_24x7_cpu_online(unsigned int cpu) +{ + if (cpumask_empty(&hv_24x7_cpumask)) + cpumask_set_cpu(cpu, &hv_24x7_cpumask); + + return 0; +} + +static int ppc_hv_24x7_cpu_offline(unsigned int cpu) +{ + int target; + + /* Check if exiting cpu is used for collecting 24x7 events */ + if (!cpumask_test_and_clear_cpu(cpu, &hv_24x7_cpumask)) + return 0; + + /* Find a new cpu to collect 24x7 events */ + target = cpumask_last(cpu_active_mask); + + if (target < 0 || target >= nr_cpu_ids) { + pr_err("hv_24x7: CPU hotplug init failed\n"); + return -1; + } + + /* Migrate 24x7 events to the new target */ + cpumask_set_cpu(target, &hv_24x7_cpumask); + perf_pmu_migrate_context(&h_24x7_pmu, cpu, target); + + return 0; +} + +static int hv_24x7_cpu_hotplug_init(void) +{ + return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, + "perf/powerpc/hv_24x7:online", + ppc_hv_24x7_cpu_online, + ppc_hv_24x7_cpu_offline); +} + static int hv_24x7_init(void) { int r; @@ -1685,6 +1726,11 @@ static int hv_24x7_init(void) if (r) return r; + /* init cpuhotplug */ + r = hv_24x7_cpu_hotplug_init(); + if (r) + return r; + r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1); if (r) return r; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 191772d4a4d7..a2710e654b64 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -181,6 +181,7 @@ enum cpuhp_state { CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, -- cgit v1.2.3 From 0d80b76184ac9f2dfb939e39ad1f961fc006b99d Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:38 -0700 Subject: net: qed: drop duplicate words in comments Drop doubled word "the" in two comments. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/linux/qed/qed_chain.h | 2 +- include/linux/qed/qed_if.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 92cdc79e5019..7071dc92b4e2 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -130,7 +130,7 @@ struct qed_chain { } pbl_sp; /* Address of first page of the chain - the address is required - * for fastpath operation [consume/produce] but only for the the SINGLE + * for fastpath operation [consume/produce] but only for the SINGLE * flavour which isn't considered fastpath [== SPQ]. */ void *p_virt_addr; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 90e1060da02b..8a6e3ad436d1 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -498,7 +498,7 @@ struct qed_fcoe_pf_params { u8 bdq_pbl_num_entries[2]; }; -/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ +/* Most of the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; u64 bdq_pbl_base_addr[3]; -- cgit v1.2.3 From 2ff17117e60572d6974c766d6b1a225ec7d68795 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:39 -0700 Subject: net: skbuff.h: drop duplicate words in comments Drop doubled words in several comments. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0c0377fc00c2..6a82d4a8229e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1328,7 +1328,7 @@ void skb_flow_dissect_meta(const struct sk_buff *skb, void *target_container); /* Gets a skb connection tracking info, ctinfo map should be a - * a map of mapsize to translate enum ip_conntrack_info states + * map of mapsize to translate enum ip_conntrack_info states * to user states. */ void @@ -3812,7 +3812,7 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) * must call this function to return the skb back to the stack with a * timestamp. * - * @skb: clone of the the original outgoing packet + * @skb: clone of the original outgoing packet * @hwtstamps: hardware time stamps * */ -- cgit v1.2.3 From 158e89639166461e225a855bce6e3ee6cd8bb1c0 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:40 -0700 Subject: net: wimax: fix duplicate words in comments Drop doubled words in two comments. Fix a spello/typo. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/linux/wimax/debug.h | 4 ++-- include/net/wimax.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h index 4dd2c1cea6a9..cdae052bcdcd 100644 --- a/include/linux/wimax/debug.h +++ b/include/linux/wimax/debug.h @@ -184,8 +184,8 @@ do { \ /* - * CPP sintatic sugar to generate A_B like symbol names when one of - * the arguments is a a preprocessor #define. + * CPP syntactic sugar to generate A_B like symbol names when one of + * the arguments is a preprocessor #define. */ #define __D_PASTE__(varname, modulename) varname##_##modulename #define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename)) diff --git a/include/net/wimax.h b/include/net/wimax.h index 24ba7e89c26c..f6e31d2f47aa 100644 --- a/include/net/wimax.h +++ b/include/net/wimax.h @@ -28,7 +28,7 @@ * * USAGE * - * Embed a `struct wimax_dev` at the beginning of the the device's + * Embed a `struct wimax_dev` at the beginning of the device's * private structure, initialize and register it. For details, see * `struct wimax_dev`s documentation. * -- cgit v1.2.3 From cee50c2a028432dadacdf55950c6c6a7875e8172 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:41 -0700 Subject: net: 9p: drop duplicate word in comment Drop doubled word "not" in a comment. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/9p/transport.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h index 98a2be2de04a..3eb4261b2958 100644 --- a/include/net/9p/transport.h +++ b/include/net/9p/transport.h @@ -25,7 +25,7 @@ * @request: member function to issue a request to the transport * @cancel: member function to cancel a request (if it hasn't been sent) * @cancelled: member function to notify that a cancelled request will not - * not receive a reply + * receive a reply * * This is the basic API for a transport module which is registered by the * transport module with the 9P core network module and used by the client -- cgit v1.2.3 From c201324b54553aebb32845193680f21eb493c6e5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:42 -0700 Subject: net: caif: drop duplicate words in comments Drop doubled words "or" and "the" in several comments. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/caif/caif_layer.h | 4 ++-- include/uapi/linux/caif/caif_socket.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h index 064094101cb5..51f7bb42a936 100644 --- a/include/net/caif/caif_layer.h +++ b/include/net/caif/caif_layer.h @@ -156,7 +156,7 @@ struct cflayer { * CAIF packets upwards in the stack. * Packet handling rules: * - The CAIF packet (cfpkt) ownership is passed to the - * called receive function. This means that the the + * called receive function. This means that the * packet cannot be accessed after passing it to the * above layer using up->receive(). * @@ -184,7 +184,7 @@ struct cflayer { * CAIF packet downwards in the stack. * Packet handling rules: * - The CAIF packet (cfpkt) ownership is passed to the - * transmit function. This means that the the packet + * transmit function. This means that the packet * cannot be accessed after passing it to the below * layer using dn->transmit(). * diff --git a/include/uapi/linux/caif/caif_socket.h b/include/uapi/linux/caif/caif_socket.h index 10ec1d1cf68e..d9970bbaa156 100644 --- a/include/uapi/linux/caif/caif_socket.h +++ b/include/uapi/linux/caif/caif_socket.h @@ -169,7 +169,7 @@ struct sockaddr_caif { * @CAIFSO_LINK_SELECT: Selector used if multiple CAIF Link layers are * available. Either a high bandwidth * link can be selected (CAIF_LINK_HIGH_BANDW) or - * or a low latency link (CAIF_LINK_LOW_LATENCY). + * a low latency link (CAIF_LINK_LOW_LATENCY). * This option is of type __u32. * Alternatively SO_BINDTODEVICE can be used. * -- cgit v1.2.3 From ab88d64a90951a95c4e08971b02fcb781d2067f0 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:43 -0700 Subject: net: dsa.h: drop duplicate word in comment Drop doubled word "to" in a comment. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/dsa.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index b28c95c76762..6fa418ff1175 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -612,7 +612,7 @@ struct dsa_switch_ops { * MTU change functionality. Switches can also adjust their MRU through * this method. By MTU, one understands the SDU (L2 payload) length. * If the switch needs to account for the DSA tag on the CPU port, this - * method needs to to do so privately. + * method needs to do so privately. */ int (*port_change_mtu)(struct dsa_switch *ds, int port, int new_mtu); -- cgit v1.2.3 From 4b48b0a3aa0df49eb6d80a86c2b016bf6c3eebf9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:44 -0700 Subject: net: ip6_fib.h: drop duplicate word in comment Drop doubled word "the" in a comment. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/ip6_fib.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index cc8356fd927f..ac5ff3c3afb1 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -166,7 +166,7 @@ struct fib6_info { struct fib6_node __rcu *fib6_node; /* Multipath routes: - * siblings is a list of fib6_info that have the the same metric/weight, + * siblings is a list of fib6_info that have the same metric/weight, * destination, but not the same gateway. nsiblings is just a cache * to speed up lookup. */ -- cgit v1.2.3 From d86f9868bdb40fc11c1e8c176ae11fb897b9d5f4 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:45 -0700 Subject: net: sctp: drop duplicate words in comments Drop doubled words in several comments. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/sctp/sctp.h | 2 +- include/net/sctp/structs.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index e3bd198b00ae..4fc747b778eb 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -291,7 +291,7 @@ atomic_dec(&sctp_dbg_objcnt_## name) #define SCTP_DBG_OBJCNT(name) \ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0) -/* Macro to help create new entries in in the global array of +/* Macro to help create new entries in the global array of * objcnt counters. */ #define SCTP_DBG_OBJCNT_ENTRY(name) \ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index fb42c90348d3..9bbb2f60db92 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1398,7 +1398,7 @@ struct sctp_stream_priorities { struct list_head prio_sched; /* List of streams scheduled */ struct list_head active; - /* The next stream stream in line */ + /* The next stream in line */ struct sctp_stream_out_ext *next; __u16 prio; }; @@ -1460,7 +1460,7 @@ struct sctp_stream { struct { /* List of streams scheduled */ struct list_head rr_list; - /* The next stream stream in line */ + /* The next stream in line */ struct sctp_stream_out_ext *rr_next; }; }; @@ -1770,7 +1770,7 @@ struct sctp_association { int max_burst; /* This is the max_retrans value for the association. This value will - * be initialized initialized from system defaults, but can be + * be initialized from system defaults, but can be * modified by the SCTP_ASSOCINFO socket option. */ int max_retrans; -- cgit v1.2.3 From 59632b220f2d61df274ed3a14a204e941051fdad Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:46 -0700 Subject: net: ipv6: drop duplicate word in comment Drop the doubled word "by" in a comment. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/linux/ipv6.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 2cb445a8fc9e..8d8f877e7f81 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -223,7 +223,7 @@ struct ipv6_pinfo { /* * Packed in 16bits. - * Omit one shift by by putting the signed field at MSB. + * Omit one shift by putting the signed field at MSB. */ #if defined(__BIG_ENDIAN_BITFIELD) __s16 hop_limit:9; -- cgit v1.2.3 From 1dcb6c36a5ebac46099b6363ccf8f4e7563d51e2 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jul 2020 21:28:32 -0700 Subject: net/mlx5: Support setting access rights of dma addresses mlx5_fill_page_frag_array() is used to populate dma addresses to resources that require it, such as QPs, RQs etc. When the resource is used, PA list permissions are ignored. For resources that use MTT list, the user is required to provide the access rights. Subsequent patches use resources that require MTT lists, so modify API and implementation to support that. Signed-off-by: Eli Cohen Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 11 +++++++++-- include/linux/mlx5/driver.h | 1 + include/linux/mlx5/mlx5_ifc.h | 6 ++++++ 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 42198e64a7f4..8db4b5f0f963 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -299,11 +299,18 @@ void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas) } EXPORT_SYMBOL_GPL(mlx5_fill_page_array); -void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas) +void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm) { int i; + WARN_ON(perm & 0xfc); for (i = 0; i < buf->npages; i++) - pas[i] = cpu_to_be64(buf->frags[i].map); + pas[i] = cpu_to_be64(buf->frags[i].map | perm); +} +EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array_perm); + +void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas) +{ + mlx5_fill_page_frag_array_perm(buf, pas, 0); } EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 13c0e4556eda..f2557d7e1355 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -971,6 +971,7 @@ void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); +void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 3786888cb1ba..5890e5c9da77 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -10653,4 +10653,10 @@ struct mlx5_ifc_tls_progress_params_bits { u8 hw_offset_record_number[0x18]; }; +enum { + MLX5_MTT_PERM_READ = 1 << 0, + MLX5_MTT_PERM_WRITE = 1 << 1, + MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE, +}; + #endif /* MLX5_IFC_H */ -- cgit v1.2.3 From 2a913f23447ce7ef2a4dcaaa230ff43116cf5249 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jul 2020 21:28:33 -0700 Subject: net/mlx5: Add VDPA interface type to supported enumerations VDPA is a new interface that will be added in subsequent patches. It uses mlx5 core devices and resources. Add an interface type for it. Signed-off-by: Eli Cohen Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- include/linux/mlx5/driver.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f2557d7e1355..5ecc48831ae8 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1054,6 +1054,7 @@ enum { enum { MLX5_INTERFACE_PROTOCOL_IB = 0, MLX5_INTERFACE_PROTOCOL_ETH = 1, + MLX5_INTERFACE_PROTOCOL_VDPA = 2, }; struct mlx5_interface { -- cgit v1.2.3 From 8a06a79b0aa811eee6d56b3cfc738c5d08b0dc74 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Jul 2020 21:28:34 -0700 Subject: net/mlx5: Add interface changes required for VDPA Rename mlx5_ifc_device_virtio_emulation_cap_bits to mlx5_ifc_virtio_emulation_cap_bits to match names produced by the tools producing these auto generated files. In addition missing capabilities that will be required by VDPA implementation. Signed-off-by: Eli Cohen Reviewed-by: Parav Pandit Signed-off-by: Saeed Mahameed --- include/linux/mlx5/device.h | 4 +- include/linux/mlx5/mlx5_ifc.h | 112 ++++++++++++++++++++++++++++++++++++------ 2 files changed, 100 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 57db125e5802..2aacf9a8ee4d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1361,11 +1361,11 @@ enum mlx5_qcam_feature_groups { MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ - MLX5_GET(device_virtio_emulation_cap, \ + MLX5_GET(virtio_emulation_cap, \ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ - MLX5_GET64(device_virtio_emulation_cap, \ + MLX5_GET64(virtio_emulation_cap, \ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) #define MLX5_CAP_IPSEC(mdev, cap)\ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 5890e5c9da77..435ab47d5362 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -93,6 +93,7 @@ enum { enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, + MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d, MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_QP = 0xff02, MLX5_OBJ_TYPE_PSV = 0xff03, @@ -981,17 +982,40 @@ struct mlx5_ifc_device_event_cap_bits { u8 user_unaffiliated_events[4][0x40]; }; -struct mlx5_ifc_device_virtio_emulation_cap_bits { - u8 reserved_at_0[0x20]; +struct mlx5_ifc_virtio_emulation_cap_bits { + u8 desc_tunnel_offload_type[0x1]; + u8 eth_frame_offload_type[0x1]; + u8 virtio_version_1_0[0x1]; + u8 device_features_bits_mask[0xd]; + u8 event_mode[0x8]; + u8 virtio_queue_type[0x8]; - u8 reserved_at_20[0x13]; + u8 max_tunnel_desc[0x10]; + u8 reserved_at_30[0x3]; u8 log_doorbell_stride[0x5]; u8 reserved_at_38[0x3]; u8 log_doorbell_bar_size[0x5]; u8 doorbell_bar_offset[0x40]; - u8 reserved_at_80[0x780]; + u8 max_emulated_devices[0x8]; + u8 max_num_virtio_queues[0x18]; + + u8 reserved_at_a0[0x60]; + + u8 umem_1_buffer_param_a[0x20]; + + u8 umem_1_buffer_param_b[0x20]; + + u8 umem_2_buffer_param_a[0x20]; + + u8 umem_2_buffer_param_b[0x20]; + + u8 umem_3_buffer_param_a[0x20]; + + u8 umem_3_buffer_param_b[0x20]; + + u8 reserved_at_1c0[0x640]; }; enum { @@ -1216,7 +1240,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_sgl_for_optimized_performance[0x8]; u8 log_max_cq_sz[0x8]; - u8 reserved_at_d0[0xb]; + u8 reserved_at_d0[0x9]; + u8 virtio_net_device_emualtion_manager[0x1]; + u8 virtio_blk_device_emualtion_manager[0x1]; u8 log_max_cq[0x5]; u8 log_max_eq_sz[0x8]; @@ -2952,7 +2978,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_fpga_cap_bits fpga_cap; struct mlx5_ifc_tls_cap_bits tls_cap; struct mlx5_ifc_device_mem_cap_bits device_mem_cap; - struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap; + struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap; u8 reserved_at_0[0x8000]; }; @@ -3298,15 +3324,18 @@ struct mlx5_ifc_scheduling_context_bits { }; struct mlx5_ifc_rqtc_bits { - u8 reserved_at_0[0xa0]; + u8 reserved_at_0[0xa0]; - u8 reserved_at_a0[0x10]; - u8 rqt_max_size[0x10]; + u8 reserved_at_a0[0x5]; + u8 list_q_type[0x3]; + u8 reserved_at_a8[0x8]; + u8 rqt_max_size[0x10]; - u8 reserved_at_c0[0x10]; - u8 rqt_actual_size[0x10]; + u8 rq_vhca_id_format[0x1]; + u8 reserved_at_c1[0xf]; + u8 rqt_actual_size[0x10]; - u8 reserved_at_e0[0x6a0]; + u8 reserved_at_e0[0x6a0]; struct mlx5_ifc_rq_num_bits rq_num[]; }; @@ -7084,7 +7113,7 @@ struct mlx5_ifc_destroy_mkey_out_bits { struct mlx5_ifc_destroy_mkey_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7782,7 +7811,7 @@ struct mlx5_ifc_create_mkey_out_bits { struct mlx5_ifc_create_mkey_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -10312,6 +10341,40 @@ struct mlx5_ifc_create_umem_in_bits { struct mlx5_ifc_umem_bits umem; }; +struct mlx5_ifc_create_umem_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 umem_id[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_umem_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 umem_id[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_umem_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_create_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -10324,6 +10387,18 @@ struct mlx5_ifc_create_uctx_in_bits { struct mlx5_ifc_uctx_bits uctx; }; +struct mlx5_ifc_create_uctx_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 uid[0x10]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_destroy_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -10337,6 +10412,15 @@ struct mlx5_ifc_destroy_uctx_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_destroy_uctx_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_create_sw_icm_in_bits { struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; struct mlx5_ifc_sw_icm_bits sw_icm; -- cgit v1.2.3 From 339ddaa626995bc6218972ca241471f3717cc5f4 Mon Sep 17 00:00:00 2001 From: Patrick Steinhardt Date: Wed, 15 Jul 2020 19:43:33 +0200 Subject: Bluetooth: Fix update of connection state in `hci_encrypt_cfm` Starting with the upgrade to v5.8-rc3, I've noticed I wasn't able to connect to my Bluetooth headset properly anymore. While connecting to the device would eventually succeed, bluetoothd seemed to be confused about the current connection state where the state was flapping hence and forth. Bisecting this issue led to commit 3ca44c16b0dc (Bluetooth: Consolidate encryption handling in hci_encrypt_cfm, 2020-05-19), which refactored `hci_encrypt_cfm` to also handle updating the connection state. The commit in question changed the code to call `hci_connect_cfm` inside `hci_encrypt_cfm` and to change the connection state. But with the conversion, we now only update the connection state if a status was set already. In fact, the reverse should be true: the status should be updated if no status is yet set. So let's fix the isuse by reversing the condition. Fixes: 3ca44c16b0dc ("Bluetooth: Consolidate encryption handling in hci_encrypt_cfm") Signed-off-by: Patrick Steinhardt Acked-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 34ad5b207598..bee1b4778ccc 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1467,7 +1467,7 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) __u8 encrypt; if (conn->state == BT_CONFIG) { - if (status) + if (!status) conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); -- cgit v1.2.3 From a5e6f964bb2c613933de58a35ddfa306128ba004 Mon Sep 17 00:00:00 2001 From: Misono Tomohiro Date: Fri, 10 Jul 2020 17:00:03 +0900 Subject: rtc: cleanup obsolete comment about struct rtc_class_ops Commit ea369ea6d828 ("rtc: remove .open() and .release()") removes open/release callback from struct rtc_class_ops. Also commit 80d4bb515b78 ("RTC: Cleanup rtc_class_ops->irq_set_state") and commit 696160fec162 ("RTC: Cleanup rtc_class_ops->irq_set_freq()") removes irq callbacks. So, just remove related comments so that readers will not be confused. Signed-off-by: Misono Tomohiro Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20200710080003.7986-1-misono.tomohiro@jp.fujitsu.com --- include/linux/rtc.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/linux/rtc.h b/include/linux/rtc.h index bba3db3f7efa..22d1575e4991 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -55,10 +55,6 @@ extern struct class *rtc_class; * * The (current) exceptions are mostly filesystem hooks: * - the proc() hook for procfs - * - non-ioctl() chardev hooks: open(), release() - * - * REVISIT those periodic irq calls *do* have ops_lock when they're - * issued through ioctl() ... */ struct rtc_class_ops { int (*ioctl)(struct device *, unsigned int, unsigned long); -- cgit v1.2.3 From 06cc2afbbdf9a9e8df3e2f8db724997dd6e1b4ac Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 8 Jul 2020 12:41:13 +1000 Subject: crypto: lib/chacha20poly1305 - Add missing function declaration This patch adds a declaration for chacha20poly1305_selftest to silence a sparse warning. Signed-off-by: Herbert Xu --- include/crypto/chacha20poly1305.h | 2 ++ lib/crypto/chacha20poly1305.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h index 234ee28078ef..d2ac3ff7dc1e 100644 --- a/include/crypto/chacha20poly1305.h +++ b/include/crypto/chacha20poly1305.h @@ -45,4 +45,6 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len const u64 nonce, const u8 key[CHACHA20POLY1305_KEY_SIZE]); +bool chacha20poly1305_selftest(void); + #endif /* __CHACHA20POLY1305_H */ diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index ad0699ce702f..431e04280332 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -21,8 +21,6 @@ #define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) -bool __init chacha20poly1305_selftest(void); - static void chacha_load_key(u32 *k, const u8 *in) { k[0] = get_unaligned_le32(in); -- cgit v1.2.3 From e79a31715193686e92dadb4caedfbb1f5de3659c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 8 Jul 2020 12:11:18 +0300 Subject: crypto: x86/chacha-sse3 - use unaligned loads for state array Due to the fact that the x86 port does not support allocating objects on the stack with an alignment that exceeds 8 bytes, we have a rather ugly hack in the x86 code for ChaCha to ensure that the state array is aligned to 16 bytes, allowing the SSE3 implementation of the algorithm to use aligned loads. Given that the performance benefit of using of aligned loads appears to be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U), and the fact that this hack has leaked into generic ChaCha code, let's just remove it. Cc: Martin Willi Cc: Herbert Xu Cc: Eric Biggers Signed-off-by: Ard Biesheuvel Reviewed-by: Martin Willi Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/chacha-ssse3-x86_64.S | 16 ++++++++-------- arch/x86/crypto/chacha_glue.c | 17 ++--------------- include/crypto/chacha.h | 4 ---- 3 files changed, 10 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S index a38ab2512a6f..ca1788bfee16 100644 --- a/arch/x86/crypto/chacha-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha-ssse3-x86_64.S @@ -120,10 +120,10 @@ SYM_FUNC_START(chacha_block_xor_ssse3) FRAME_BEGIN # x0..3 = s0..3 - movdqa 0x00(%rdi),%xmm0 - movdqa 0x10(%rdi),%xmm1 - movdqa 0x20(%rdi),%xmm2 - movdqa 0x30(%rdi),%xmm3 + movdqu 0x00(%rdi),%xmm0 + movdqu 0x10(%rdi),%xmm1 + movdqu 0x20(%rdi),%xmm2 + movdqu 0x30(%rdi),%xmm3 movdqa %xmm0,%xmm8 movdqa %xmm1,%xmm9 movdqa %xmm2,%xmm10 @@ -205,10 +205,10 @@ SYM_FUNC_START(hchacha_block_ssse3) # %edx: nrounds FRAME_BEGIN - movdqa 0x00(%rdi),%xmm0 - movdqa 0x10(%rdi),%xmm1 - movdqa 0x20(%rdi),%xmm2 - movdqa 0x30(%rdi),%xmm3 + movdqu 0x00(%rdi),%xmm0 + movdqu 0x10(%rdi),%xmm1 + movdqu 0x20(%rdi),%xmm2 + movdqu 0x30(%rdi),%xmm3 mov %edx,%r8d call chacha_permute diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index 22250091cdbe..e67a59130025 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c @@ -14,8 +14,6 @@ #include #include -#define CHACHA_STATE_ALIGN 16 - asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, @@ -124,8 +122,6 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) { - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) { hchacha_block_generic(state, stream, nrounds); } else { @@ -138,8 +134,6 @@ EXPORT_SYMBOL(hchacha_block_arch); void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) { - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); - chacha_init_generic(state, key, iv); } EXPORT_SYMBOL(chacha_init_arch); @@ -147,8 +141,6 @@ EXPORT_SYMBOL(chacha_init_arch); void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() || bytes <= CHACHA_BLOCK_SIZE) return chacha_crypt_generic(state, dst, src, bytes, nrounds); @@ -170,15 +162,12 @@ EXPORT_SYMBOL(chacha_crypt_arch); static int chacha_simd_stream_xor(struct skcipher_request *req, const struct chacha_ctx *ctx, const u8 *iv) { - u32 *state, state_buf[16 + 2] __aligned(8); + u32 state[CHACHA_STATE_WORDS] __aligned(8); struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); - chacha_init_generic(state, ctx->key, iv); while (walk.nbytes > 0) { @@ -217,12 +206,10 @@ static int xchacha_simd(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 *state, state_buf[16 + 2] __aligned(8); + u32 state[CHACHA_STATE_WORDS] __aligned(8); struct chacha_ctx subctx; u8 real_iv[16]; - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); chacha_init_generic(state, ctx->key, req->iv); if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) { diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 2676f4fbd4c1..3a1c72fdb7cf 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -25,11 +25,7 @@ #define CHACHA_BLOCK_SIZE 64 #define CHACHAPOLY_IV_SIZE 12 -#ifdef CONFIG_X86_64 -#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32)) -#else #define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32)) -#endif /* 192-bit nonce, then 64-bit stream position */ #define XCHACHA_IV_SIZE 32 -- cgit v1.2.3 From 9ea9c58b40a441a0babef8c615acedcfb3733919 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 8 Jul 2020 09:39:40 -0700 Subject: crypto: lib/sha256 - add sha256() function Add a function sha256() which computes a SHA-256 digest in one step, combining sha256_init() + sha256_update() + sha256_final(). This is similar to how we also have blake2s(). Reviewed-by: Ard Biesheuvel Tested-by: Hans de Goede Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/sha.h | 1 + lib/crypto/sha256.c | 10 ++++++++++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 10753ff71d46..4ff3da816630 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -147,6 +147,7 @@ static inline void sha256_init(struct sha256_state *sctx) } void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); void sha256_final(struct sha256_state *sctx, u8 *out); +void sha256(const u8 *data, unsigned int len, u8 *out); static inline void sha224_init(struct sha256_state *sctx) { diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 2e621697c5c3..2321f6cb322f 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -280,4 +280,14 @@ void sha224_final(struct sha256_state *sctx, u8 *out) } EXPORT_SYMBOL(sha224_final); +void sha256(const u8 *data, unsigned int len, u8 *out) +{ + struct sha256_state sctx; + + sha256_init(&sctx); + sha256_update(&sctx, data, len); + sha256_final(&sctx, out); +} +EXPORT_SYMBOL(sha256); + MODULE_LICENSE("GPL"); -- cgit v1.2.3 From e72b48c5e7fe0c9fabeb23385b6e6f02f0a78d37 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:36 -0700 Subject: crypto: geniv - remove unneeded arguments from aead_geniv_alloc() The type and mask arguments to aead_geniv_alloc() are always 0, so remove them. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/echainiv.c | 2 +- crypto/geniv.c | 7 ++++--- crypto/seqiv.c | 2 +- include/crypto/internal/geniv.h | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 4a2f02baba14..69686668625e 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -115,7 +115,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl, struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); diff --git a/crypto/geniv.c b/crypto/geniv.c index 6a90c52d49ad..07496c8af0ab 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -39,7 +39,7 @@ static void aead_geniv_free(struct aead_instance *inst) } struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, u32 mask) + struct rtattr **tb) { struct crypto_aead_spawn *spawn; struct crypto_attr_type *algt; @@ -47,6 +47,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, struct aead_alg *alg; unsigned int ivsize; unsigned int maxauthsize; + u32 mask; int err; algt = crypto_get_attr_type(tb); @@ -63,10 +64,10 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); /* Ignore async algorithms if necessary. */ - mask |= crypto_requires_sync(algt->type, algt->mask); + mask = crypto_requires_sync(algt->type, algt->mask); err = crypto_grab_aead(spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), type, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; diff --git a/crypto/seqiv.c b/crypto/seqiv.c index f124b9b54e15..e48f875a7aac 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -138,7 +138,7 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 229d37681a9d..7fd7126f593a 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -20,7 +20,7 @@ struct aead_geniv_ctx { }; struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, u32 mask); + struct rtattr **tb); int aead_init_geniv(struct crypto_aead *tfm); void aead_exit_geniv(struct crypto_aead *tfm); -- cgit v1.2.3 From 7bcb2c99f8ed032cfb3f5596b4dccac6b1f501df Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:38 -0700 Subject: crypto: algapi - use common mechanism for inheriting flags The flag CRYPTO_ALG_ASYNC is "inherited" in the sense that when a template is instantiated, the template will have CRYPTO_ALG_ASYNC set if any of the algorithms it uses has CRYPTO_ALG_ASYNC set. We'd like to add a second flag (CRYPTO_ALG_ALLOCATES_MEMORY) that gets "inherited" in the same way. This is difficult because the handling of CRYPTO_ALG_ASYNC is hardcoded everywhere. Address this by: - Add CRYPTO_ALG_INHERITED_FLAGS, which contains the set of flags that have these inheritance semantics. - Add crypto_algt_inherited_mask(), for use by template ->create() methods. It returns any of these flags that the user asked to be unset and thus must be passed in the 'mask' to crypto_grab_*(). - Also modify crypto_check_attr_type() to handle computing the 'mask' so that most templates can just use this. - Make crypto_grab_*() propagate these flags to the template instance being created so that templates don't have to do this themselves. Make crypto/simd.c propagate these flags too, since it "wraps" another algorithm, similar to a template. Based on a patch by Mikulas Patocka (https://lore.kernel.org/r/alpine.LRH.2.02.2006301414580.30526@file01.intranet.prod.int.rdu2.redhat.com). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 14 +++-------- crypto/algapi.c | 21 ++++++++++++++++- crypto/authenc.c | 14 +++-------- crypto/authencesn.c | 14 +++-------- crypto/ccm.c | 33 ++++++++------------------ crypto/chacha20poly1305.c | 14 +++-------- crypto/cmac.c | 5 ++-- crypto/cryptd.c | 59 ++++++++++++++++++++++++----------------------- crypto/ctr.c | 19 ++++----------- crypto/cts.c | 13 +++-------- crypto/essiv.c | 11 ++++++--- crypto/gcm.c | 40 ++++++++------------------------ crypto/geniv.c | 14 +++-------- crypto/hmac.c | 5 ++-- crypto/lrw.c | 13 +++-------- crypto/pcrypt.c | 14 ++++------- crypto/rsa-pkcs1pad.c | 13 +++-------- crypto/simd.c | 6 +++-- crypto/skcipher.c | 15 ++++-------- crypto/vmac.c | 5 ++-- crypto/xcbc.c | 5 ++-- crypto/xts.c | 17 ++++---------- include/crypto/algapi.h | 23 ++++++++++++------ 23 files changed, 153 insertions(+), 234 deletions(-) (limited to 'include') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index cf2b9f4103dd..7fbdc3270984 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -490,7 +490,6 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; const char *nhpoly1305_name; struct skcipher_instance *inst; @@ -500,14 +499,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_alg *hash_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -565,8 +559,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags & - CRYPTO_ALG_ASYNC; inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | diff --git a/crypto/algapi.c b/crypto/algapi.c index 92abdf675992..fdabf2675b63 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -690,6 +690,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, spawn->mask = mask; spawn->next = inst->spawns; inst->spawns = spawn; + inst->alg.cra_flags |= + (alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS); err = 0; } up_write(&crypto_alg_sem); @@ -816,7 +818,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) } EXPORT_SYMBOL_GPL(crypto_get_attr_type); -int crypto_check_attr_type(struct rtattr **tb, u32 type) +/** + * crypto_check_attr_type() - check algorithm type and compute inherited mask + * @tb: the template parameters + * @type: the algorithm type the template would be instantiated as + * @mask_ret: (output) the mask that should be passed to crypto_grab_*() + * to restrict the flags of any inner algorithms + * + * Validate that the algorithm type the user requested is compatible with the + * one the template would actually be instantiated as. E.g., if the user is + * doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because + * the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm. + * + * Also compute the mask to use to restrict the flags of any inner algorithms. + * + * Return: 0 on success; -errno on failure + */ +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret) { struct crypto_attr_type *algt; @@ -827,6 +845,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type) if ((algt->type ^ type) & algt->mask) return -EINVAL; + *mask_ret = crypto_algt_inherited_mask(algt); return 0; } EXPORT_SYMBOL_GPL(crypto_check_attr_type); diff --git a/crypto/authenc.c b/crypto/authenc.c index 775e7138fd10..670bf1a01d00 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -372,7 +372,6 @@ static void crypto_authenc_free(struct aead_instance *inst) static int crypto_authenc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_instance_ctx *ctx; @@ -381,14 +380,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -423,8 +417,6 @@ static int crypto_authenc_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 149b70df2a91..b60e61b1904c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -390,7 +390,6 @@ static void crypto_authenc_esn_free(struct aead_instance *inst) static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_esn_instance_ctx *ctx; @@ -399,14 +398,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -437,8 +431,6 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/ccm.c b/crypto/ccm.c index d1fb01bbc814..494d70901186 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -447,7 +447,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *mac_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct ccm_instance_ctx *ictx; @@ -455,14 +454,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, struct hash_alg_common *mac; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -470,7 +464,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ictx = aead_instance_ctx(inst); err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst), - mac_name, 0, CRYPTO_ALG_ASYNC); + mac_name, 0, mask | CRYPTO_ALG_ASYNC); if (err) goto err_free_inst; mac = crypto_spawn_ahash_alg(&ictx->mac); @@ -507,7 +501,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -712,21 +705,15 @@ static void crypto_rfc4309_free(struct aead_instance *inst) static int crypto_rfc4309_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -759,7 +746,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -878,9 +864,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -890,7 +877,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index ccaea5cb66d1..97bbb135e9a6 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -555,7 +555,6 @@ static void chachapoly_free(struct aead_instance *inst) static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, const char *name, unsigned int ivsize) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; @@ -566,14 +565,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if (ivsize > CHACHAPOLY_IV_SIZE) return -EINVAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -613,8 +607,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (chacha->base.cra_flags | - poly->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; diff --git a/crypto/cmac.c b/crypto/cmac.c index 143a6544c873..df36be1efb81 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -225,9 +225,10 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -237,7 +238,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 283212262adb..a1bea0f4baa8 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -191,17 +191,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) return ictx->queue; } -static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, - u32 *mask) +static void cryptd_type_and_mask(struct crypto_attr_type *algt, + u32 *type, u32 *mask) { - struct crypto_attr_type *algt; + /* + * cryptd is allowed to wrap internal algorithms, but in that case the + * resulting cryptd instance will be marked as internal as well. + */ + *type = algt->type & CRYPTO_ALG_INTERNAL; + *mask = algt->mask & CRYPTO_ALG_INTERNAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return; + /* No point in cryptd wrapping an algorithm that's already async. */ + *mask |= CRYPTO_ALG_ASYNC; - *type |= algt->type & CRYPTO_ALG_INTERNAL; - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; + *mask |= crypto_algt_inherited_mask(algt); } static int cryptd_init_instance(struct crypto_instance *inst, @@ -364,6 +367,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst) static int cryptd_create_skcipher(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct skcipherd_instance_ctx *ctx; @@ -373,10 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, u32 mask; int err; - type = 0; - mask = CRYPTO_ALG_ASYNC; - - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -395,9 +396,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); - + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); @@ -633,16 +633,17 @@ static void cryptd_hash_free(struct ahash_instance *inst) } static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; struct shash_alg *alg; - u32 type = 0; - u32 mask = 0; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -661,10 +662,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL | + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| CRYPTO_ALG_OPTIONAL_KEY)); - inst->alg.halg.digestsize = alg->digestsize; inst->alg.halg.statesize = alg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); @@ -820,16 +820,17 @@ static void cryptd_aead_free(struct aead_instance *inst) static int cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct aead_instance_ctx *ctx; struct aead_instance *inst; struct aead_alg *alg; - u32 type = 0; - u32 mask = CRYPTO_ALG_ASYNC; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -848,8 +849,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); inst->alg.ivsize = crypto_aead_alg_ivsize(alg); @@ -884,11 +885,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_SKCIPHER: - return cryptd_create_skcipher(tmpl, tb, &queue); + return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: - return cryptd_create_hash(tmpl, tb, &queue); + return cryptd_create_hash(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_AEAD: - return cryptd_create_aead(tmpl, tb, &queue); + return cryptd_create_aead(tmpl, tb, algt, &queue); } return -EINVAL; diff --git a/crypto/ctr.c b/crypto/ctr.c index 31ac4ae598e1..ae8d88c715d6 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -256,29 +256,22 @@ static void crypto_rfc3686_free(struct skcipher_instance *inst) static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; struct skcipher_instance *inst; struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; u32 mask; - int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; + mask |= crypto_requires_off(crypto_get_attr_type(tb), + CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; - mask = crypto_requires_sync(algt->type, algt->mask) | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - spawn = skcipher_instance_ctx(inst); err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), @@ -310,8 +303,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.ivsize = CTR_RFC3686_IV_SIZE; inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + diff --git a/crypto/cts.c b/crypto/cts.c index 5e005c4f0221..3766d47ebcc0 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -325,19 +325,13 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -364,7 +358,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/essiv.c b/crypto/essiv.c index a7f45dbc4ee2..d012be23d496 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -466,7 +466,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(shash_name); type = algt->type & algt->mask; - mask = crypto_requires_sync(algt->type, algt->mask); + mask = crypto_algt_inherited_mask(algt); switch (type) { case CRYPTO_ALG_TYPE_SKCIPHER: @@ -525,7 +525,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) /* Synchronous hash, e.g., "sha256" */ _hash_alg = crypto_alg_mod_lookup(shash_name, CRYPTO_ALG_TYPE_SHASH, - CRYPTO_ALG_TYPE_MASK); + CRYPTO_ALG_TYPE_MASK | mask); if (IS_ERR(_hash_alg)) { err = PTR_ERR(_hash_alg); goto out_drop_skcipher; @@ -557,7 +557,12 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_free_hash; - base->cra_flags = block_base->cra_flags & CRYPTO_ALG_ASYNC; + /* + * hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its + * flags manually. + */ + base->cra_flags |= (hash_alg->base.cra_flags & + CRYPTO_ALG_INHERITED_FLAGS); base->cra_blocksize = block_base->cra_blocksize; base->cra_ctxsize = sizeof(struct essiv_tfm_ctx); base->cra_alignmask = block_base->cra_alignmask; diff --git a/crypto/gcm.c b/crypto/gcm.c index 0103d28c541e..3a36a9533c96 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -578,7 +578,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *ghash_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; @@ -586,14 +585,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, struct hash_alg_common *ghash; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -635,8 +629,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (ghash->base.cra_flags | - ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -835,21 +827,15 @@ static void crypto_rfc4106_free(struct aead_instance *inst) static int crypto_rfc4106_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -882,7 +868,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -1057,21 +1042,15 @@ static void crypto_rfc4543_free(struct aead_instance *inst) static int crypto_rfc4543_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct aead_alg *alg; struct crypto_rfc4543_instance_ctx *ctx; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -1104,7 +1083,6 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/geniv.c b/crypto/geniv.c index 07496c8af0ab..bee4621b4f12 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -42,7 +42,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_aead_spawn *spawn; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; unsigned int ivsize; @@ -50,12 +49,9 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return ERR_PTR(-EINVAL); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -63,9 +59,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); - /* Ignore async algorithms if necessary. */ - mask = crypto_requires_sync(algt->type, algt->mask); - err = crypto_grab_aead(spawn, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) @@ -90,7 +83,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/hmac.c b/crypto/hmac.c index e38bfb948278..25856aa7ccbf 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -168,11 +168,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; + u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -182,7 +183,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_shash(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; salg = crypto_spawn_shash_alg(spawn); diff --git a/crypto/lrw.c b/crypto/lrw.c index 5b07a7c09296..a709c801ee45 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -297,21 +297,15 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -379,7 +373,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 8bddc65cd509..cbc383a1a3fe 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -226,18 +226,14 @@ static int pcrypt_init_instance(struct crypto_instance *inst, } static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, - u32 type, u32 mask) + struct crypto_attr_type *algt) { struct pcrypt_instance_ctx *ctx; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; + u32 mask = crypto_algt_inherited_mask(algt); int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; @@ -254,7 +250,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, goto err_free_inst; err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -263,7 +259,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); @@ -298,7 +294,7 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: - return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); + return pcrypt_create_aead(tmpl, tb, algt); } return -EINVAL; diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index d31031de51bc..4983b2b4a223 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -596,7 +596,6 @@ static void pkcs1pad_free(struct akcipher_instance *inst) static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct akcipher_instance *inst; struct pkcs1pad_inst_ctx *ctx; @@ -604,14 +603,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) const char *hash_name; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -658,7 +652,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) goto err_free_inst; } - inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = rsa_alg->base.cra_priority; inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); diff --git a/crypto/simd.c b/crypto/simd.c index 56885af49c24..edaa479a1ec5 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -171,7 +171,8 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; @@ -417,7 +418,8 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7221def7b9a7..3b93a74ad124 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -934,22 +934,17 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst) struct skcipher_instance *skcipher_alloc_instance_simple( struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct skcipher_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *cipher_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return ERR_PTR(-EINVAL); - - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return ERR_PTR(err); + mask |= crypto_requires_off(crypto_get_attr_type(tb), + CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/vmac.c b/crypto/vmac.c index 2d906830df96..9b565d1040d6 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -620,9 +620,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -632,7 +633,7 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 598ec88abf0f..af3b7eb5d7c7 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -191,9 +191,10 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -203,7 +204,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xts.c b/crypto/xts.c index 3565f3b863a6..35a30610569b 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -331,19 +331,17 @@ static void crypto_xts_free(struct skcipher_instance *inst) static int create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct xts_instance_ctx *ctx; struct skcipher_alg *alg; const char *cipher_name; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; + mask |= crypto_requires_off(crypto_get_attr_type(tb), + CRYPTO_ALG_NEED_FALLBACK); cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -355,10 +353,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ctx = skcipher_instance_ctx(inst); - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK | - CRYPTO_ALG_ASYNC); - err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); if (err == -ENOENT) { @@ -415,7 +409,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 00a9cf98debe..da64c37482b4 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -116,7 +116,7 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, void *crypto_spawn_tfm2(struct crypto_spawn *spawn); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); -int crypto_check_attr_type(struct rtattr **tb, u32 type); +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); const char *crypto_attr_alg_name(struct rtattr *rta); int crypto_attr_u32(struct rtattr *rta, u32 *num); int crypto_inst_setname(struct crypto_instance *inst, const char *name, @@ -235,18 +235,27 @@ static inline struct crypto_async_request *crypto_get_backlog( container_of(queue->backlog, struct crypto_async_request, list); } -static inline int crypto_requires_off(u32 type, u32 mask, u32 off) +static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) { - return (type ^ off) & mask & off; + return (algt->type ^ off) & algt->mask & off; } /* - * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. - * Otherwise returns zero. + * When an algorithm uses another algorithm (e.g., if it's an instance of a + * template), these are the flags that should always be set on the "outer" + * algorithm if any "inner" algorithm has them set. */ -static inline int crypto_requires_sync(u32 type, u32 mask) +#define CRYPTO_ALG_INHERITED_FLAGS CRYPTO_ALG_ASYNC + +/* + * Given the type and mask that specify the flags restrictions on a template + * instance being created, return the mask that should be passed to + * crypto_grab_*() (along with type=0) to honor any request the user made to + * have any of the CRYPTO_ALG_INHERITED_FLAGS clear. + */ +static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt) { - return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); + return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS); } noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); -- cgit v1.2.3 From 2eb27c11937ee9984c04b75d213a737291c5f58c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:39 -0700 Subject: crypto: algapi - add NEED_FALLBACK to INHERITED_FLAGS CRYPTO_ALG_NEED_FALLBACK is handled inconsistently. When it's requested to be clear, some templates propagate that request to child algorithms, while others don't. It's apparently desired for NEED_FALLBACK to be propagated, to avoid deadlocks where a module tries to load itself while it's being initialized, and to avoid unnecessarily complex fallback chains where we have e.g. cbc-aes-$driver falling back to cbc(aes-$driver) where aes-$driver itself falls back to aes-generic, instead of cbc-aes-$driver simply falling back to cbc(aes-generic). There have been a number of fixes to this effect: commit 89027579bc6c ("crypto: xts - Propagate NEED_FALLBACK bit") commit d2c2a85cfe82 ("crypto: ctr - Propagate NEED_FALLBACK bit") commit e6c2e65c70a6 ("crypto: cbc - Propagate NEED_FALLBACK bit") But it seems that other templates can have the same problems too. To avoid this whack-a-mole, just add NEED_FALLBACK to INHERITED_FLAGS so that it's always inherited. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ctr.c | 2 -- crypto/skcipher.c | 2 -- crypto/xts.c | 2 -- include/crypto/algapi.h | 3 ++- include/linux/crypto.h | 4 ++-- 5 files changed, 4 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/crypto/ctr.c b/crypto/ctr.c index ae8d88c715d6..c39fcffba27f 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -265,8 +265,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return err; - mask |= crypto_requires_off(crypto_get_attr_type(tb), - CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 3b93a74ad124..467af525848a 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -943,8 +943,6 @@ struct skcipher_instance *skcipher_alloc_instance_simple( err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return ERR_PTR(err); - mask |= crypto_requires_off(crypto_get_attr_type(tb), - CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/xts.c b/crypto/xts.c index 35a30610569b..9a7adab6c3e1 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -340,8 +340,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return err; - mask |= crypto_requires_off(crypto_get_attr_type(tb), - CRYPTO_ALG_NEED_FALLBACK); cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index da64c37482b4..22cf4d80959f 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -245,7 +245,8 @@ static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) * template), these are the flags that should always be set on the "outer" * algorithm if any "inner" algorithm has them set. */ -#define CRYPTO_ALG_INHERITED_FLAGS CRYPTO_ALG_ASYNC +#define CRYPTO_ALG_INHERITED_FLAGS \ + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK) /* * Given the type and mask that specify the flags restrictions on a template diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 7cd2d00f0a05..f73f0b51e1cd 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -60,8 +60,8 @@ #define CRYPTO_ALG_ASYNC 0x00000080 /* - * Set this bit if and only if the algorithm requires another algorithm of - * the same type to handle corner cases. + * Set if the algorithm (or an algorithm which it uses) requires another + * algorithm of the same type to handle corner cases. */ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 -- cgit v1.2.3 From fbb6cda44190d72aa5199d728797aabc6d2ed816 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:40 -0700 Subject: crypto: algapi - introduce the flag CRYPTO_ALG_ALLOCATES_MEMORY Introduce a new algorithm flag CRYPTO_ALG_ALLOCATES_MEMORY. If this flag is set, then the driver allocates memory in its request routine. Such drivers are not suitable for disk encryption because GFP_ATOMIC allocation can fail anytime (causing random I/O errors) and GFP_KERNEL allocation can recurse into the block layer, causing a deadlock. For now, this flag is only implemented for some algorithm types. We also assume some usage constraints for it to be meaningful, since there are lots of edge cases the crypto API allows (e.g., misaligned or fragmented scatterlists) that mean that nearly any crypto algorithm can allocate memory in some case. See the comment for details. Also add this flag to CRYPTO_ALG_INHERITED_FLAGS so that when a template is instantiated, this flag is set on the template instance if it is set on any algorithm the instance uses. Based on a patch by Mikulas Patocka (https://lore.kernel.org/r/alpine.LRH.2.02.2006301414580.30526@file01.intranet.prod.int.rdu2.redhat.com). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/algapi.h | 3 ++- include/linux/crypto.h | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 22cf4d80959f..143d884d65c7 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -246,7 +246,8 @@ static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) * algorithm if any "inner" algorithm has them set. */ #define CRYPTO_ALG_INHERITED_FLAGS \ - (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK) + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \ + CRYPTO_ALG_ALLOCATES_MEMORY) /* * Given the type and mask that specify the flags restrictions on a template diff --git a/include/linux/crypto.h b/include/linux/crypto.h index f73f0b51e1cd..ef90e07c9635 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -100,6 +100,38 @@ */ #define CRYPTO_NOLOAD 0x00008000 +/* + * The algorithm may allocate memory during request processing, i.e. during + * encryption, decryption, or hashing. Users can request an algorithm with this + * flag unset if they can't handle memory allocation failures. + * + * This flag is currently only implemented for algorithms of type "skcipher", + * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not + * have this flag set even if they allocate memory. + * + * In some edge cases, algorithms can allocate memory regardless of this flag. + * To avoid these cases, users must obey the following usage constraints: + * skcipher: + * - The IV buffer and all scatterlist elements must be aligned to the + * algorithm's alignmask. + * - If the data were to be divided into chunks of size + * crypto_skcipher_walksize() (with any remainder going at the end), no + * chunk can cross a page boundary or a scatterlist element boundary. + * aead: + * - The IV buffer and all scatterlist elements must be aligned to the + * algorithm's alignmask. + * - The first scatterlist element must contain all the associated data, + * and its pages must be !PageHighMem. + * - If the plaintext/ciphertext were to be divided into chunks of size + * crypto_aead_walksize() (with the remainder going at the end), no chunk + * can cross a page boundary or a scatterlist element boundary. + * ahash: + * - The result buffer must be aligned to the algorithm's alignmask. + * - crypto_ahash_finup() must not be used unless the algorithm implements + * ->finup() natively. + */ +#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 + /* * Transform masks and values (for crt_flags). */ -- cgit v1.2.3 From c04011fe8cbd80af1be6e12b53193bf3846750d7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 14 Jul 2020 08:47:43 +0200 Subject: fs: add a vfs_fchown helper Add a helper for struct file based chown operations. To be used by the initramfs code soon. Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- fs/open.c | 29 +++++++++++++++++------------ include/linux/fs.h | 2 ++ 2 files changed, 19 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/fs/open.c b/fs/open.c index 6cd48a61cda3..103c66309bee 100644 --- a/fs/open.c +++ b/fs/open.c @@ -740,23 +740,28 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group AT_SYMLINK_NOFOLLOW); } +int vfs_fchown(struct file *file, uid_t user, gid_t group) +{ + int error; + + error = mnt_want_write_file(file); + if (error) + return error; + audit_file(file); + error = chown_common(&file->f_path, user, group); + mnt_drop_write_file(file); + return error; +} + int ksys_fchown(unsigned int fd, uid_t user, gid_t group) { struct fd f = fdget(fd); int error = -EBADF; - if (!f.file) - goto out; - - error = mnt_want_write_file(f.file); - if (error) - goto out_fput; - audit_file(f.file); - error = chown_common(&f.file->f_path, user, group); - mnt_drop_write_file(f.file); -out_fput: - fdput(f); -out: + if (f.file) { + error = vfs_fchown(f.file, user, group); + fdput(f); + } return error; } diff --git a/include/linux/fs.h b/include/linux/fs.h index f5abba86107d..0ddd64ca0b45 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1744,6 +1744,8 @@ int vfs_mkobj(struct dentry *, umode_t, int (*f)(struct dentry *, umode_t, void *), void *); +int vfs_fchown(struct file *file, uid_t user, gid_t group); + extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT -- cgit v1.2.3 From 9e96c8c0e94eea2f69a9705f5d0f51928ea26c17 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 14 Jul 2020 08:55:05 +0200 Subject: fs: add a vfs_fchmod helper Add a helper for struct file based chmode operations. To be used by the initramfs code soon. Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- fs/open.c | 9 +++++++-- include/linux/fs.h | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/open.c b/fs/open.c index 103c66309bee..75166f071d28 100644 --- a/fs/open.c +++ b/fs/open.c @@ -602,14 +602,19 @@ out_unlock: return error; } +int vfs_fchmod(struct file *file, umode_t mode) +{ + audit_file(file); + return chmod_common(&file->f_path, mode); +} + int ksys_fchmod(unsigned int fd, umode_t mode) { struct fd f = fdget(fd); int err = -EBADF; if (f.file) { - audit_file(f.file); - err = chmod_common(&f.file->f_path, mode); + err = vfs_fchmod(f.file, mode); fdput(f); } return err; diff --git a/include/linux/fs.h b/include/linux/fs.h index 0ddd64ca0b45..635086726f20 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1745,6 +1745,7 @@ int vfs_mkobj(struct dentry *, umode_t, void *); int vfs_fchown(struct file *file, uid_t user, gid_t group); +int vfs_fchmod(struct file *file, umode_t mode); extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -- cgit v1.2.3 From 4f5b246b37e024955c0fcca0c7f5952089052d1d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 7 Jun 2020 16:18:59 +0200 Subject: md: move the early init autodetect code to drivers/md/ Just like the NFS and CIFS root code this better lives with the driver it is tightly integrated with. Signed-off-by: Christoph Hellwig Acked-by: Song Liu Acked-by: Linus Torvalds --- drivers/md/Makefile | 3 + drivers/md/md-autodetect.c | 315 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/raid/detect.h | 8 ++ init/Makefile | 1 - init/do_mounts.c | 1 + init/do_mounts.h | 10 -- init/do_mounts_md.c | 304 ------------------------------------------ 7 files changed, 327 insertions(+), 315 deletions(-) create mode 100644 drivers/md/md-autodetect.c delete mode 100644 init/do_mounts_md.c (limited to 'include') diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 31840f95cd40..6d3e234dc46a 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -43,6 +43,9 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o obj-$(CONFIG_MD_CLUSTER) += md-cluster.o obj-$(CONFIG_BCACHE) += bcache/ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o +ifeq ($(CONFIG_BLK_DEV_MD),y) +obj-y += md-autodetect.o +endif obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o obj-$(CONFIG_DM_UNSTRIPED) += dm-unstripe.o diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c new file mode 100644 index 000000000000..fe806f7b9759 --- /dev/null +++ b/drivers/md/md-autodetect.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * When md (and any require personalities) are compiled into the kernel + * (not a module), arrays can be assembles are boot time using with AUTODETECT + * where specially marked partitions are registered with md_autodetect_dev(), + * and with MD_BOOT where devices to be collected are given on the boot line + * with md=..... + * The code for that is here. + */ + +#ifdef CONFIG_MD_AUTODETECT +static int __initdata raid_noautodetect; +#else +static int __initdata raid_noautodetect=1; +#endif +static int __initdata raid_autopart; + +static struct { + int minor; + int partitioned; + int level; + int chunk; + char *device_names; +} md_setup_args[256] __initdata; + +static int md_setup_ents __initdata; + +/* + * Parse the command-line parameters given our kernel, but do not + * actually try to invoke the MD device now; that is handled by + * md_setup_drive after the low-level disk drivers have initialised. + * + * 27/11/1999: Fixed to work correctly with the 2.3 kernel (which + * assigns the task of parsing integer arguments to the + * invoked program now). Added ability to initialise all + * the MD devices (by specifying multiple "md=" lines) + * instead of just one. -- KTK + * 18May2000: Added support for persistent-superblock arrays: + * md=n,0,factor,fault,device-list uses RAID0 for device n + * md=n,-1,factor,fault,device-list uses LINEAR for device n + * md=n,device-list reads a RAID superblock from the devices + * elements in device-list are read by name_to_kdev_t so can be + * a hex number or something like /dev/hda1 /dev/sdb + * 2001-06-03: Dave Cinege + * Shifted name_to_kdev_t() and related operations to md_set_drive() + * for later execution. Rewrote section to make devfs compatible. + */ +static int __init md_setup(char *str) +{ + int minor, level, factor, fault, partitioned = 0; + char *pername = ""; + char *str1; + int ent; + + if (*str == 'd') { + partitioned = 1; + str++; + } + if (get_option(&str, &minor) != 2) { /* MD Number */ + printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); + return 0; + } + str1 = str; + for (ent=0 ; ent< md_setup_ents ; ent++) + if (md_setup_args[ent].minor == minor && + md_setup_args[ent].partitioned == partitioned) { + printk(KERN_WARNING "md: md=%s%d, Specified more than once. " + "Replacing previous definition.\n", partitioned?"d":"", minor); + break; + } + if (ent >= ARRAY_SIZE(md_setup_args)) { + printk(KERN_WARNING "md: md=%s%d - too many md initialisations\n", partitioned?"d":"", minor); + return 0; + } + if (ent >= md_setup_ents) + md_setup_ents++; + switch (get_option(&str, &level)) { /* RAID level */ + case 2: /* could be 0 or -1.. */ + if (level == 0 || level == LEVEL_LINEAR) { + if (get_option(&str, &factor) != 2 || /* Chunk Size */ + get_option(&str, &fault) != 2) { + printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); + return 0; + } + md_setup_args[ent].level = level; + md_setup_args[ent].chunk = 1 << (factor+12); + if (level == LEVEL_LINEAR) + pername = "linear"; + else + pername = "raid0"; + break; + } + /* FALL THROUGH */ + case 1: /* the first device is numeric */ + str = str1; + /* FALL THROUGH */ + case 0: + md_setup_args[ent].level = LEVEL_NONE; + pername="super-block"; + } + + printk(KERN_INFO "md: Will configure md%d (%s) from %s, below.\n", + minor, pername, str); + md_setup_args[ent].device_names = str; + md_setup_args[ent].partitioned = partitioned; + md_setup_args[ent].minor = minor; + + return 1; +} + +static inline int create_dev(char *name, dev_t dev) +{ + ksys_unlink(name); + return ksys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); +} + +static void __init md_setup_drive(void) +{ + int minor, i, ent, partitioned; + dev_t dev; + dev_t devices[MD_SB_DISKS+1]; + + for (ent = 0; ent < md_setup_ents ; ent++) { + int fd; + int err = 0; + char *devname; + mdu_disk_info_t dinfo; + char name[16]; + + minor = md_setup_args[ent].minor; + partitioned = md_setup_args[ent].partitioned; + devname = md_setup_args[ent].device_names; + + sprintf(name, "/dev/md%s%d", partitioned?"_d":"", minor); + if (partitioned) + dev = MKDEV(mdp_major, minor << MdpMinorShift); + else + dev = MKDEV(MD_MAJOR, minor); + create_dev(name, dev); + for (i = 0; i < MD_SB_DISKS && devname != NULL; i++) { + struct kstat stat; + char *p; + char comp_name[64]; + + p = strchr(devname, ','); + if (p) + *p++ = 0; + + dev = name_to_dev_t(devname); + if (strncmp(devname, "/dev/", 5) == 0) + devname += 5; + snprintf(comp_name, 63, "/dev/%s", devname); + if (vfs_stat(comp_name, &stat) == 0 && + S_ISBLK(stat.mode)) + dev = new_decode_dev(stat.rdev); + if (!dev) { + printk(KERN_WARNING "md: Unknown device name: %s\n", devname); + break; + } + + devices[i] = dev; + + devname = p; + } + devices[i] = 0; + + if (!i) + continue; + + printk(KERN_INFO "md: Loading md%s%d: %s\n", + partitioned ? "_d" : "", minor, + md_setup_args[ent].device_names); + + fd = ksys_open(name, 0, 0); + if (fd < 0) { + printk(KERN_ERR "md: open failed - cannot start " + "array %s\n", name); + continue; + } + if (ksys_ioctl(fd, SET_ARRAY_INFO, 0) == -EBUSY) { + printk(KERN_WARNING + "md: Ignoring md=%d, already autodetected. (Use raid=noautodetect)\n", + minor); + ksys_close(fd); + continue; + } + + if (md_setup_args[ent].level != LEVEL_NONE) { + /* non-persistent */ + mdu_array_info_t ainfo; + ainfo.level = md_setup_args[ent].level; + ainfo.size = 0; + ainfo.nr_disks =0; + ainfo.raid_disks =0; + while (devices[ainfo.raid_disks]) + ainfo.raid_disks++; + ainfo.md_minor =minor; + ainfo.not_persistent = 1; + + ainfo.state = (1 << MD_SB_CLEAN); + ainfo.layout = 0; + ainfo.chunk_size = md_setup_args[ent].chunk; + err = ksys_ioctl(fd, SET_ARRAY_INFO, (long)&ainfo); + for (i = 0; !err && i <= MD_SB_DISKS; i++) { + dev = devices[i]; + if (!dev) + break; + dinfo.number = i; + dinfo.raid_disk = i; + dinfo.state = (1<= 0) { + ksys_ioctl(fd, RAID_AUTORUN, raid_autopart); + ksys_close(fd); + } +} + +void __init md_run_setup(void) +{ + create_dev("/dev/md0", MKDEV(MD_MAJOR, 0)); + + if (raid_noautodetect) + printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=autodetect will force)\n"); + else + autodetect_raid(); + md_setup_drive(); +} diff --git a/include/linux/raid/detect.h b/include/linux/raid/detect.h index 37dd3f40cd31..1f029a71c3ef 100644 --- a/include/linux/raid/detect.h +++ b/include/linux/raid/detect.h @@ -1,3 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ void md_autodetect_dev(dev_t dev); + +#ifdef CONFIG_BLK_DEV_MD +void md_run_setup(void); +#else +static inline void md_run_setup(void) +{ +} +#endif diff --git a/init/Makefile b/init/Makefile index 57499b1ff471..6bc37f64b361 100644 --- a/init/Makefile +++ b/init/Makefile @@ -18,7 +18,6 @@ obj-y += init_task.o mounts-y := do_mounts.o mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o -mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o # dependencies on generated files need to be listed explicitly $(obj)/version.o: include/generated/compile.h diff --git a/init/do_mounts.c b/init/do_mounts.c index 29d326b6c29d..1a4dfa17fb28 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "do_mounts.h" diff --git a/init/do_mounts.h b/init/do_mounts.h index 7513d1c14d13..50d6c8941e15 100644 --- a/init/do_mounts.h +++ b/init/do_mounts.h @@ -41,13 +41,3 @@ bool __init initrd_load(void); static inline bool initrd_load(void) { return false; } #endif - -#ifdef CONFIG_BLK_DEV_MD - -void md_run_setup(void); - -#else - -static inline void md_run_setup(void) {} - -#endif diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c deleted file mode 100644 index 359363e85ccd..000000000000 --- a/init/do_mounts_md.c +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include - -#include "do_mounts.h" - -/* - * When md (and any require personalities) are compiled into the kernel - * (not a module), arrays can be assembles are boot time using with AUTODETECT - * where specially marked partitions are registered with md_autodetect_dev(), - * and with MD_BOOT where devices to be collected are given on the boot line - * with md=..... - * The code for that is here. - */ - -#ifdef CONFIG_MD_AUTODETECT -static int __initdata raid_noautodetect; -#else -static int __initdata raid_noautodetect=1; -#endif -static int __initdata raid_autopart; - -static struct { - int minor; - int partitioned; - int level; - int chunk; - char *device_names; -} md_setup_args[256] __initdata; - -static int md_setup_ents __initdata; - -/* - * Parse the command-line parameters given our kernel, but do not - * actually try to invoke the MD device now; that is handled by - * md_setup_drive after the low-level disk drivers have initialised. - * - * 27/11/1999: Fixed to work correctly with the 2.3 kernel (which - * assigns the task of parsing integer arguments to the - * invoked program now). Added ability to initialise all - * the MD devices (by specifying multiple "md=" lines) - * instead of just one. -- KTK - * 18May2000: Added support for persistent-superblock arrays: - * md=n,0,factor,fault,device-list uses RAID0 for device n - * md=n,-1,factor,fault,device-list uses LINEAR for device n - * md=n,device-list reads a RAID superblock from the devices - * elements in device-list are read by name_to_kdev_t so can be - * a hex number or something like /dev/hda1 /dev/sdb - * 2001-06-03: Dave Cinege - * Shifted name_to_kdev_t() and related operations to md_set_drive() - * for later execution. Rewrote section to make devfs compatible. - */ -static int __init md_setup(char *str) -{ - int minor, level, factor, fault, partitioned = 0; - char *pername = ""; - char *str1; - int ent; - - if (*str == 'd') { - partitioned = 1; - str++; - } - if (get_option(&str, &minor) != 2) { /* MD Number */ - printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); - return 0; - } - str1 = str; - for (ent=0 ; ent< md_setup_ents ; ent++) - if (md_setup_args[ent].minor == minor && - md_setup_args[ent].partitioned == partitioned) { - printk(KERN_WARNING "md: md=%s%d, Specified more than once. " - "Replacing previous definition.\n", partitioned?"d":"", minor); - break; - } - if (ent >= ARRAY_SIZE(md_setup_args)) { - printk(KERN_WARNING "md: md=%s%d - too many md initialisations\n", partitioned?"d":"", minor); - return 0; - } - if (ent >= md_setup_ents) - md_setup_ents++; - switch (get_option(&str, &level)) { /* RAID level */ - case 2: /* could be 0 or -1.. */ - if (level == 0 || level == LEVEL_LINEAR) { - if (get_option(&str, &factor) != 2 || /* Chunk Size */ - get_option(&str, &fault) != 2) { - printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); - return 0; - } - md_setup_args[ent].level = level; - md_setup_args[ent].chunk = 1 << (factor+12); - if (level == LEVEL_LINEAR) - pername = "linear"; - else - pername = "raid0"; - break; - } - /* FALL THROUGH */ - case 1: /* the first device is numeric */ - str = str1; - /* FALL THROUGH */ - case 0: - md_setup_args[ent].level = LEVEL_NONE; - pername="super-block"; - } - - printk(KERN_INFO "md: Will configure md%d (%s) from %s, below.\n", - minor, pername, str); - md_setup_args[ent].device_names = str; - md_setup_args[ent].partitioned = partitioned; - md_setup_args[ent].minor = minor; - - return 1; -} - -static void __init md_setup_drive(void) -{ - int minor, i, ent, partitioned; - dev_t dev; - dev_t devices[MD_SB_DISKS+1]; - - for (ent = 0; ent < md_setup_ents ; ent++) { - int fd; - int err = 0; - char *devname; - mdu_disk_info_t dinfo; - char name[16]; - - minor = md_setup_args[ent].minor; - partitioned = md_setup_args[ent].partitioned; - devname = md_setup_args[ent].device_names; - - sprintf(name, "/dev/md%s%d", partitioned?"_d":"", minor); - if (partitioned) - dev = MKDEV(mdp_major, minor << MdpMinorShift); - else - dev = MKDEV(MD_MAJOR, minor); - create_dev(name, dev); - for (i = 0; i < MD_SB_DISKS && devname != NULL; i++) { - struct kstat stat; - char *p; - char comp_name[64]; - - p = strchr(devname, ','); - if (p) - *p++ = 0; - - dev = name_to_dev_t(devname); - if (strncmp(devname, "/dev/", 5) == 0) - devname += 5; - snprintf(comp_name, 63, "/dev/%s", devname); - if (vfs_stat(comp_name, &stat) == 0 && - S_ISBLK(stat.mode)) - dev = new_decode_dev(stat.rdev); - if (!dev) { - printk(KERN_WARNING "md: Unknown device name: %s\n", devname); - break; - } - - devices[i] = dev; - - devname = p; - } - devices[i] = 0; - - if (!i) - continue; - - printk(KERN_INFO "md: Loading md%s%d: %s\n", - partitioned ? "_d" : "", minor, - md_setup_args[ent].device_names); - - fd = ksys_open(name, 0, 0); - if (fd < 0) { - printk(KERN_ERR "md: open failed - cannot start " - "array %s\n", name); - continue; - } - if (ksys_ioctl(fd, SET_ARRAY_INFO, 0) == -EBUSY) { - printk(KERN_WARNING - "md: Ignoring md=%d, already autodetected. (Use raid=noautodetect)\n", - minor); - ksys_close(fd); - continue; - } - - if (md_setup_args[ent].level != LEVEL_NONE) { - /* non-persistent */ - mdu_array_info_t ainfo; - ainfo.level = md_setup_args[ent].level; - ainfo.size = 0; - ainfo.nr_disks =0; - ainfo.raid_disks =0; - while (devices[ainfo.raid_disks]) - ainfo.raid_disks++; - ainfo.md_minor =minor; - ainfo.not_persistent = 1; - - ainfo.state = (1 << MD_SB_CLEAN); - ainfo.layout = 0; - ainfo.chunk_size = md_setup_args[ent].chunk; - err = ksys_ioctl(fd, SET_ARRAY_INFO, (long)&ainfo); - for (i = 0; !err && i <= MD_SB_DISKS; i++) { - dev = devices[i]; - if (!dev) - break; - dinfo.number = i; - dinfo.raid_disk = i; - dinfo.state = (1<= 0) { - ksys_ioctl(fd, RAID_AUTORUN, raid_autopart); - ksys_close(fd); - } -} - -void __init md_run_setup(void) -{ - create_dev("/dev/md0", MKDEV(MD_MAJOR, 0)); - - if (raid_noautodetect) - printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=autodetect will force)\n"); - else - autodetect_raid(); - md_setup_drive(); -} -- cgit v1.2.3 From 1a6a050620e496abf42749a2e1d3882645cc053f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 7 Jun 2020 16:33:01 +0200 Subject: md: remove the kernel version of md_u.h mdp_major can just move to drivers/md/md.h. Signed-off-by: Christoph Hellwig Acked-by: Song Liu Acked-by: Linus Torvalds --- drivers/md/md.h | 1 + include/linux/raid/md_u.h | 13 ------------- 2 files changed, 1 insertion(+), 13 deletions(-) delete mode 100644 include/linux/raid/md_u.h (limited to 'include') diff --git a/drivers/md/md.h b/drivers/md/md.h index 37315a3f28e9..6f8fff77ce10 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -801,6 +801,7 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio mddev->queue->limits.max_write_zeroes_sectors = 0; } +extern int mdp_major; void md_autostart_arrays(int part); #endif /* _MD_MD_H */ diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h deleted file mode 100644 index 8dfec085a20e..000000000000 --- a/include/linux/raid/md_u.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - md_u.h : user <=> kernel API between Linux raidtools and RAID drivers - Copyright (C) 1998 Ingo Molnar - -*/ -#ifndef _MD_U_H -#define _MD_U_H - -#include - -extern int mdp_major; -#endif -- cgit v1.2.3 From d3fa60d7bfdc9a0ff1a524bdef96b3db1fd62022 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 8 Jul 2020 09:45:11 +0200 Subject: dma-mapping: move the remaining DMA API calls out of line For a long time the DMA API has been implemented inline in dma-mapping.h, but the function bodies can be quite large. Move them all out of line. This also removes all the dma_direct_* exports as those are just implementation details and should never be used by drivers directly. Signed-off-by: Christoph Hellwig Tested-by: Alexey Kardashevskiy Reviewed-by: Alexey Kardashevskiy --- include/linux/dma-direct.h | 58 +++++++++++ include/linux/dma-mapping.h | 247 ++++---------------------------------------- kernel/dma/direct.c | 9 -- kernel/dma/mapping.c | 164 +++++++++++++++++++++++++++++ 4 files changed, 244 insertions(+), 234 deletions(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index ab2e20cba951..2b045c509146 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -87,4 +87,62 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long attrs); int dma_direct_supported(struct device *dev, u64 mask); bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs); +dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir, unsigned long attrs); + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_SWIOTLB) +void dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir); +void dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir); +#else +static inline void dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ +} +#endif + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ + defined(CONFIG_SWIOTLB) +void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs); +void dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir); +void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir); +#else +static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ +} +static inline void dma_direct_unmap_sg(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} +static inline void dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ +} +#endif + +size_t dma_direct_max_mapping_size(struct device *dev); + #endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index a33ed3954ed4..bd0a6f5ee445 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -188,73 +188,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, } #endif /* CONFIG_DMA_DECLARE_COHERENT */ -static inline bool dma_is_direct(const struct dma_map_ops *ops) -{ - return likely(!ops); -} - -/* - * All the dma_direct_* declarations are here just for the indirect call bypass, - * and must not be used directly drivers! - */ -dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs); -int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, - enum dma_data_direction dir, unsigned long attrs); -dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, - size_t size, enum dma_data_direction dir, unsigned long attrs); - -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ - defined(CONFIG_SWIOTLB) -void dma_direct_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir); -void dma_direct_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir); -#else -static inline void dma_direct_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_direct_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ -} -#endif - -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ - defined(CONFIG_SWIOTLB) -void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs); -void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs); -void dma_direct_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir); -void dma_direct_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir); -#else -static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ -} -static inline void dma_direct_unmap_sg(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir, - unsigned long attrs) -{ -} -static inline void dma_direct_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_direct_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ -} -#endif - -size_t dma_direct_max_mapping_size(struct device *dev); - #ifdef CONFIG_HAS_DMA #include @@ -271,164 +204,6 @@ static inline void set_dma_ops(struct device *dev, dev->dma_ops = dma_ops; } -static inline dma_addr_t dma_map_page_attrs(struct device *dev, - struct page *page, size_t offset, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - dma_addr_t addr; - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); - else - addr = ops->map_page(dev, page, offset, size, dir, attrs); - debug_dma_map_page(dev, page, offset, size, dir, addr); - - return addr; -} - -static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_unmap_page(dev, addr, size, dir, attrs); - else if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, attrs); - debug_dma_unmap_page(dev, addr, size, dir); -} - -/* - * dma_maps_sg_attrs returns 0 on error and > 0 on success. - * It should never return a value < 0. - */ -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - int ents; - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); - else - ents = ops->map_sg(dev, sg, nents, dir, attrs); - BUG_ON(ents < 0); - debug_dma_map_sg(dev, sg, nents, ents, dir); - - return ents; -} - -static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - debug_dma_unmap_sg(dev, sg, nents, dir); - if (dma_is_direct(ops)) - dma_direct_unmap_sg(dev, sg, nents, dir, attrs); - else if (ops->unmap_sg) - ops->unmap_sg(dev, sg, nents, dir, attrs); -} - -static inline dma_addr_t dma_map_resource(struct device *dev, - phys_addr_t phys_addr, - size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - dma_addr_t addr = DMA_MAPPING_ERROR; - - BUG_ON(!valid_dma_direction(dir)); - - /* Don't allow RAM to be mapped */ - if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) - return DMA_MAPPING_ERROR; - - if (dma_is_direct(ops)) - addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); - else if (ops->map_resource) - addr = ops->map_resource(dev, phys_addr, size, dir, attrs); - - debug_dma_map_resource(dev, phys_addr, size, dir, addr); - return addr; -} - -static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (!dma_is_direct(ops) && ops->unmap_resource) - ops->unmap_resource(dev, addr, size, dir, attrs); - debug_dma_unmap_resource(dev, addr, size, dir); -} - -static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, - size_t size, - enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_sync_single_for_cpu(dev, addr, size, dir); - else if (ops->sync_single_for_cpu) - ops->sync_single_for_cpu(dev, addr, size, dir); - debug_dma_sync_single_for_cpu(dev, addr, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, - enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_sync_single_for_device(dev, addr, size, dir); - else if (ops->sync_single_for_device) - ops->sync_single_for_device(dev, addr, size, dir); - debug_dma_sync_single_for_device(dev, addr, size, dir); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); - else if (ops->sync_sg_for_cpu) - ops->sync_sg_for_cpu(dev, sg, nelems, dir); - debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_sync_sg_for_device(dev, sg, nelems, dir); - else if (ops->sync_sg_for_device) - ops->sync_sg_for_device(dev, sg, nelems, dir); - debug_dma_sync_sg_for_device(dev, sg, nelems, dir); - -} static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { @@ -439,6 +214,28 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return 0; } +dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, + size_t offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs); +int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs); +void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs); +dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs); +void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir); +void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir); +void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); +void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 67f060b86a73..e545d14fccee 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -315,7 +315,6 @@ void dma_direct_sync_single_for_device(struct device *dev, if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(paddr, size, dir); } -EXPORT_SYMBOL(dma_direct_sync_single_for_device); void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir) @@ -335,7 +334,6 @@ void dma_direct_sync_sg_for_device(struct device *dev, dir); } } -EXPORT_SYMBOL(dma_direct_sync_sg_for_device); #endif #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ @@ -354,7 +352,6 @@ void dma_direct_sync_single_for_cpu(struct device *dev, if (unlikely(is_swiotlb_buffer(paddr))) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); } -EXPORT_SYMBOL(dma_direct_sync_single_for_cpu); void dma_direct_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir) @@ -376,7 +373,6 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_cpu_all(); } -EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu); void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -389,7 +385,6 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, if (unlikely(is_swiotlb_buffer(phys))) swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); } -EXPORT_SYMBOL(dma_direct_unmap_page); void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) @@ -401,7 +396,6 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, attrs); } -EXPORT_SYMBOL(dma_direct_unmap_sg); #endif dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, @@ -428,7 +422,6 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, arch_sync_dma_for_device(phys, size, dir); return dma_addr; } -EXPORT_SYMBOL(dma_direct_map_page); int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) @@ -450,7 +443,6 @@ out_unmap: dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return 0; } -EXPORT_SYMBOL(dma_direct_map_sg); dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -467,7 +459,6 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, return dma_addr; } -EXPORT_SYMBOL(dma_direct_map_resource); int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index a8c18c9a796f..b53953024512 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -105,6 +105,170 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, } EXPORT_SYMBOL(dmam_alloc_attrs); +static inline bool dma_is_direct(const struct dma_map_ops *ops) +{ + return likely(!ops); +} + +dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, + size_t offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + BUG_ON(!valid_dma_direction(dir)); + if (dma_is_direct(ops)) + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); + else + addr = ops->map_page(dev, page, offset, size, dir, attrs); + debug_dma_map_page(dev, page, offset, size, dir, addr); + + return addr; +} +EXPORT_SYMBOL(dma_map_page_attrs); + +void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (dma_is_direct(ops)) + dma_direct_unmap_page(dev, addr, size, dir, attrs); + else if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir); +} +EXPORT_SYMBOL(dma_unmap_page_attrs); + +/* + * dma_maps_sg_attrs returns 0 on error and > 0 on success. + * It should never return a value < 0. + */ +int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + int ents; + + BUG_ON(!valid_dma_direction(dir)); + if (dma_is_direct(ops)) + ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); + else + ents = ops->map_sg(dev, sg, nents, dir, attrs); + BUG_ON(ents < 0); + debug_dma_map_sg(dev, sg, nents, ents, dir); + + return ents; +} +EXPORT_SYMBOL(dma_map_sg_attrs); + +void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); + if (dma_is_direct(ops)) + dma_direct_unmap_sg(dev, sg, nents, dir, attrs); + else if (ops->unmap_sg) + ops->unmap_sg(dev, sg, nents, dir, attrs); +} +EXPORT_SYMBOL(dma_unmap_sg_attrs); + +dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr = DMA_MAPPING_ERROR; + + BUG_ON(!valid_dma_direction(dir)); + + /* Don't allow RAM to be mapped */ + if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) + return DMA_MAPPING_ERROR; + + if (dma_is_direct(ops)) + addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); + else if (ops->map_resource) + addr = ops->map_resource(dev, phys_addr, size, dir, attrs); + + debug_dma_map_resource(dev, phys_addr, size, dir, addr); + return addr; +} +EXPORT_SYMBOL(dma_map_resource); + +void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (!dma_is_direct(ops) && ops->unmap_resource) + ops->unmap_resource(dev, addr, size, dir, attrs); + debug_dma_unmap_resource(dev, addr, size, dir); +} +EXPORT_SYMBOL(dma_unmap_resource); + +void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (dma_is_direct(ops)) + dma_direct_sync_single_for_cpu(dev, addr, size, dir); + else if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr, size, dir); + debug_dma_sync_single_for_cpu(dev, addr, size, dir); +} +EXPORT_SYMBOL(dma_sync_single_for_cpu); + +void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (dma_is_direct(ops)) + dma_direct_sync_single_for_device(dev, addr, size, dir); + else if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr, size, dir); + debug_dma_sync_single_for_device(dev, addr, size, dir); +} +EXPORT_SYMBOL(dma_sync_single_for_device); + +void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (dma_is_direct(ops)) + dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); + else if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); +} +EXPORT_SYMBOL(dma_sync_sg_for_cpu); + +void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (dma_is_direct(ops)) + dma_direct_sync_sg_for_device(dev, sg, nelems, dir); + else if (ops->sync_sg_for_device) + ops->sync_sg_for_device(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); +} +EXPORT_SYMBOL(dma_sync_sg_for_device); + /* * Create scatter-list for the already allocated DMA buffer. */ -- cgit v1.2.3 From b4174173005972f8f6497883d08d87e0aba1b604 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 8 Jul 2020 09:47:08 +0200 Subject: dma-mapping: inline the fast path dma-direct calls Inline the single page map/unmap/sync dma-direct calls into the now out of line generic wrappers. This restores the behavior of a single function call that we had before moving the generic calls out of line. Besides the dma-mapping callers there are just a few callers in IOMMU drivers that have a bypass mode, and more of those are going to be switched to the generic bypass soon. Signed-off-by: Christoph Hellwig Tested-by: Alexey Kardashevskiy Reviewed-by: Alexey Kardashevskiy --- include/linux/dma-direct.h | 92 ++++++++++++++++++++++++++++++++++------------ kernel/dma/direct.c | 65 -------------------------------- 2 files changed, 69 insertions(+), 88 deletions(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 2b045c509146..5a3ce2a24794 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -1,10 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * Internals of the DMA direct mapping implementation. Only for use by the + * DMA mapping code and IOMMU drivers. + */ #ifndef _LINUX_DMA_DIRECT_H #define _LINUX_DMA_DIRECT_H 1 #include +#include #include /* for min_low_pfn */ #include +#include extern unsigned int zone_dma_bits; @@ -87,25 +93,17 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long attrs); int dma_direct_supported(struct device *dev, u64 mask); bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); -dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs); int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs); dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir, unsigned long attrs); +size_t dma_direct_max_mapping_size(struct device *dev); #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_SWIOTLB) -void dma_direct_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir); -void dma_direct_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir); +void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir); #else -static inline void dma_direct_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ -} static inline void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir) { @@ -115,34 +113,82 @@ static inline void dma_direct_sync_sg_for_device(struct device *dev, #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ defined(CONFIG_SWIOTLB) -void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs); void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs); -void dma_direct_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir); void dma_direct_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir); #else -static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ -} static inline void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { } +static inline void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ +} +#endif + +static inline void dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + phys_addr_t paddr = dma_to_phys(dev, addr); + + if (unlikely(is_swiotlb_buffer(paddr))) + swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_device(paddr, size, dir); +} + static inline void dma_direct_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { + phys_addr_t paddr = dma_to_phys(dev, addr); + + if (!dev_is_dma_coherent(dev)) { + arch_sync_dma_for_cpu(paddr, size, dir); + arch_sync_dma_for_cpu_all(); + } + + if (unlikely(is_swiotlb_buffer(paddr))) + swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); } -static inline void dma_direct_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) + +static inline dma_addr_t dma_direct_map_page(struct device *dev, + struct page *page, unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) { + phys_addr_t phys = page_to_phys(page) + offset; + dma_addr_t dma_addr = phys_to_dma(dev, phys); + + if (unlikely(swiotlb_force == SWIOTLB_FORCE)) + return swiotlb_map(dev, phys, size, dir, attrs); + + if (unlikely(!dma_capable(dev, dma_addr, size, true))) { + if (swiotlb_force != SWIOTLB_NO_FORCE) + return swiotlb_map(dev, phys, size, dir, attrs); + + dev_WARN_ONCE(dev, 1, + "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", + &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); + return DMA_MAPPING_ERROR; + } + + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(phys, size, dir); + return dma_addr; } -#endif -size_t dma_direct_max_mapping_size(struct device *dev); +static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + phys_addr_t phys = dma_to_phys(dev, addr); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_single_for_cpu(dev, addr, size, dir); + + if (unlikely(is_swiotlb_buffer(phys))) + swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); +} #endif /* _LINUX_DMA_DIRECT_H */ diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index e545d14fccee..bb0041e99659 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -10,11 +10,9 @@ #include #include #include -#include #include #include #include -#include /* * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it @@ -304,18 +302,6 @@ void dma_direct_free(struct device *dev, size_t size, #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_SWIOTLB) -void dma_direct_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - phys_addr_t paddr = dma_to_phys(dev, addr); - - if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); - - if (!dev_is_dma_coherent(dev)) - arch_sync_dma_for_device(paddr, size, dir); -} - void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir) { @@ -339,20 +325,6 @@ void dma_direct_sync_sg_for_device(struct device *dev, #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ defined(CONFIG_SWIOTLB) -void dma_direct_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - phys_addr_t paddr = dma_to_phys(dev, addr); - - if (!dev_is_dma_coherent(dev)) { - arch_sync_dma_for_cpu(paddr, size, dir); - arch_sync_dma_for_cpu_all(); - } - - if (unlikely(is_swiotlb_buffer(paddr))) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); -} - void dma_direct_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir) { @@ -374,18 +346,6 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, arch_sync_dma_for_cpu_all(); } -void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - phys_addr_t phys = dma_to_phys(dev, addr); - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_direct_sync_single_for_cpu(dev, addr, size, dir); - - if (unlikely(is_swiotlb_buffer(phys))) - swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); -} - void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { @@ -398,31 +358,6 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, } #endif -dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t phys = page_to_phys(page) + offset; - dma_addr_t dma_addr = phys_to_dma(dev, phys); - - if (unlikely(swiotlb_force == SWIOTLB_FORCE)) - return swiotlb_map(dev, phys, size, dir, attrs); - - if (unlikely(!dma_capable(dev, dma_addr, size, true))) { - if (swiotlb_force != SWIOTLB_NO_FORCE) - return swiotlb_map(dev, phys, size, dir, attrs); - - dev_WARN_ONCE(dev, 1, - "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", - &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); - return DMA_MAPPING_ERROR; - } - - if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - arch_sync_dma_for_device(phys, size, dir); - return dma_addr; -} - int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { -- cgit v1.2.3 From daa5cdc3fd08407048538585b2433601d4089a82 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 14 Jul 2020 15:56:35 +0200 Subject: net: Refactor xdp_convert_buff_to_frame Move the guts of xdp_convert_buff_to_frame to a new helper, xdp_update_frame_from_buff so it can be reused removing code duplication Suggested-by: Jesper Dangaard Brouer Co-developed-by: Lorenzo Bianconi Signed-off-by: Lorenzo Bianconi Signed-off-by: David Ahern Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/90a68c283d7ebeb48924934c9b7ac79492300472.1594734381.git.lorenzo@kernel.org --- include/net/xdp.h | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/net/xdp.h b/include/net/xdp.h index 609f819ed08b..5b383c450858 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -121,39 +121,48 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) xdp->frame_sz = frame->frame_sz; } -/* Convert xdp_buff to xdp_frame */ static inline -struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) +int xdp_update_frame_from_buff(struct xdp_buff *xdp, + struct xdp_frame *xdp_frame) { - struct xdp_frame *xdp_frame; - int metasize; - int headroom; - - if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) - return xdp_convert_zc_to_xdp_frame(xdp); + int metasize, headroom; /* Assure headroom is available for storing info */ headroom = xdp->data - xdp->data_hard_start; metasize = xdp->data - xdp->data_meta; metasize = metasize > 0 ? metasize : 0; if (unlikely((headroom - metasize) < sizeof(*xdp_frame))) - return NULL; + return -ENOSPC; /* Catch if driver didn't reserve tailroom for skb_shared_info */ if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { XDP_WARN("Driver BUG: missing reserved tailroom"); - return NULL; + return -ENOSPC; } - /* Store info in top of packet */ - xdp_frame = xdp->data_hard_start; - xdp_frame->data = xdp->data; xdp_frame->len = xdp->data_end - xdp->data; xdp_frame->headroom = headroom - sizeof(*xdp_frame); xdp_frame->metasize = metasize; xdp_frame->frame_sz = xdp->frame_sz; + return 0; +} + +/* Convert xdp_buff to xdp_frame */ +static inline +struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) +{ + struct xdp_frame *xdp_frame; + + if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) + return xdp_convert_zc_to_xdp_frame(xdp); + + /* Store info in top of packet */ + xdp_frame = xdp->data_hard_start; + if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0)) + return NULL; + /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */ xdp_frame->mem = xdp->rxq->mem; -- cgit v1.2.3 From 644bfe51fa49c22244d24e896cd3fe3ee2f2cfd1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:37 +0200 Subject: cpumap: Formalize map value as a named struct As it has been already done for devmap, introduce 'struct bpf_cpumap_val' to formalize the expected values that can be passed in for a CPUMAP. Update cpumap code to use the struct. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/754f950674665dae6139c061d28c1d982aaf4170.1594734381.git.lorenzo@kernel.org --- include/uapi/linux/bpf.h | 9 +++++++++ kernel/bpf/cpumap.c | 28 +++++++++++++++------------- tools/include/uapi/linux/bpf.h | 9 +++++++++ 3 files changed, 33 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 5e386389913a..109623527358 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3849,6 +3849,15 @@ struct bpf_devmap_val { } bpf_prog; }; +/* CPUMAP map-value layout + * + * The struct data-layout of map-value is a configuration interface. + * New members can only be added to the end of this structure. + */ +struct bpf_cpumap_val { + __u32 qsize; /* queue size to remote target CPU */ +}; + enum sk_action { SK_DROP = 0, SK_PASS, diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 323c91c4fab0..ff48dc00e8d0 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -52,7 +52,6 @@ struct xdp_bulk_queue { struct bpf_cpu_map_entry { u32 cpu; /* kthread CPU and map index */ int map_id; /* Back reference to map */ - u32 qsize; /* Queue size placeholder for map lookup */ /* XDP can run multiple RX-ring queues, need __percpu enqueue store */ struct xdp_bulk_queue __percpu *bulkq; @@ -62,10 +61,13 @@ struct bpf_cpu_map_entry { /* Queue with potential multi-producers, and single-consumer kthread */ struct ptr_ring *queue; struct task_struct *kthread; - struct work_struct kthread_stop_wq; + + struct bpf_cpumap_val value; atomic_t refcnt; /* Control when this struct can be free'ed */ struct rcu_head rcu; + + struct work_struct kthread_stop_wq; }; struct bpf_cpu_map { @@ -307,8 +309,8 @@ static int cpu_map_kthread_run(void *data) return 0; } -static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, - int map_id) +static struct bpf_cpu_map_entry * +__cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) { gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; struct bpf_cpu_map_entry *rcpu; @@ -338,13 +340,13 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, if (!rcpu->queue) goto free_bulkq; - err = ptr_ring_init(rcpu->queue, qsize, gfp); + err = ptr_ring_init(rcpu->queue, value->qsize, gfp); if (err) goto free_queue; rcpu->cpu = cpu; rcpu->map_id = map_id; - rcpu->qsize = qsize; + rcpu->value.qsize = value->qsize; /* Setup kthread */ rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, @@ -437,12 +439,12 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); + struct bpf_cpumap_val cpumap_value = {}; struct bpf_cpu_map_entry *rcpu; - /* Array index key correspond to CPU number */ u32 key_cpu = *(u32 *)key; - /* Value is the queue size */ - u32 qsize = *(u32 *)value; + + memcpy(&cpumap_value, value, map->value_size); if (unlikely(map_flags > BPF_EXIST)) return -EINVAL; @@ -450,18 +452,18 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, return -E2BIG; if (unlikely(map_flags == BPF_NOEXIST)) return -EEXIST; - if (unlikely(qsize > 16384)) /* sanity limit on qsize */ + if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */ return -EOVERFLOW; /* Make sure CPU is a valid possible cpu */ if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) return -ENODEV; - if (qsize == 0) { + if (cpumap_value.qsize == 0) { rcpu = NULL; /* Same as deleting */ } else { /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ - rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id); + rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id); if (!rcpu) return -ENOMEM; rcpu->cmap = cmap; @@ -523,7 +525,7 @@ static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) struct bpf_cpu_map_entry *rcpu = __cpu_map_lookup_elem(map, *(u32 *)key); - return rcpu ? &rcpu->qsize : NULL; + return rcpu ? &rcpu->value : NULL; } static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 5e386389913a..109623527358 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3849,6 +3849,15 @@ struct bpf_devmap_val { } bpf_prog; }; +/* CPUMAP map-value layout + * + * The struct data-layout of map-value is a configuration interface. + * New members can only be added to the end of this structure. + */ +struct bpf_cpumap_val { + __u32 qsize; /* queue size to remote target CPU */ +}; + enum sk_action { SK_DROP = 0, SK_PASS, -- cgit v1.2.3 From 9216477449f33cdbc9c9a99d49f500b7fbb81702 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:38 +0200 Subject: bpf: cpumap: Add the possibility to attach an eBPF program to cpumap Introduce the capability to attach an eBPF program to cpumap entries. The idea behind this feature is to add the possibility to define on which CPU run the eBPF program if the underlying hw does not support RSS. Current supported verdicts are XDP_DROP and XDP_PASS. This patch has been tested on Marvell ESPRESSObin using xdp_redirect_cpu sample available in the kernel tree to identify possible performance regressions. Results show there are no observable differences in packet-per-second: $./xdp_redirect_cpu --progname xdp_cpu_map0 --dev eth0 --cpu 1 rx: 354.8 Kpps rx: 356.0 Kpps rx: 356.8 Kpps rx: 356.3 Kpps rx: 356.6 Kpps rx: 356.6 Kpps rx: 356.7 Kpps rx: 355.8 Kpps rx: 356.8 Kpps rx: 356.8 Kpps Co-developed-by: Jesper Dangaard Brouer Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/5c9febdf903d810b3415732e5cd98491d7d9067a.1594734381.git.lorenzo@kernel.org --- include/linux/bpf.h | 6 ++ include/net/xdp.h | 5 ++ include/trace/events/xdp.h | 14 +++-- include/uapi/linux/bpf.h | 5 ++ kernel/bpf/cpumap.c | 121 ++++++++++++++++++++++++++++++++++++----- net/core/dev.c | 9 +++ tools/include/uapi/linux/bpf.h | 5 ++ 7 files changed, 148 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c67c88ad35f8..54ad426dbea1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1272,6 +1272,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); void __cpu_map_flush(void); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); +bool cpu_map_prog_allowed(struct bpf_map *map); /* Return map's numa specified by userspace */ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) @@ -1432,6 +1433,11 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, return 0; } +static inline bool cpu_map_prog_allowed(struct bpf_map *map) +{ + return false; +} + static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) { diff --git a/include/net/xdp.h b/include/net/xdp.h index 5b383c450858..83b9e0142b52 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -98,6 +98,11 @@ struct xdp_frame { struct net_device *dev_rx; /* used by cpumap */ }; +struct xdp_cpumap_stats { + unsigned int pass; + unsigned int drop; +}; + /* Clear kernel pointers in xdp_frame */ static inline void xdp_scrub_frame(struct xdp_frame *frame) { diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index b73d3e141323..e2c99f5bee39 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -177,9 +177,9 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err, TRACE_EVENT(xdp_cpumap_kthread, TP_PROTO(int map_id, unsigned int processed, unsigned int drops, - int sched), + int sched, struct xdp_cpumap_stats *xdp_stats), - TP_ARGS(map_id, processed, drops, sched), + TP_ARGS(map_id, processed, drops, sched, xdp_stats), TP_STRUCT__entry( __field(int, map_id) @@ -188,6 +188,8 @@ TRACE_EVENT(xdp_cpumap_kthread, __field(unsigned int, drops) __field(unsigned int, processed) __field(int, sched) + __field(unsigned int, xdp_pass) + __field(unsigned int, xdp_drop) ), TP_fast_assign( @@ -197,16 +199,20 @@ TRACE_EVENT(xdp_cpumap_kthread, __entry->drops = drops; __entry->processed = processed; __entry->sched = sched; + __entry->xdp_pass = xdp_stats->pass; + __entry->xdp_drop = xdp_stats->drop; ), TP_printk("kthread" " cpu=%d map_id=%d action=%s" " processed=%u drops=%u" - " sched=%d", + " sched=%d" + " xdp_pass=%u xdp_drop=%u", __entry->cpu, __entry->map_id, __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), __entry->processed, __entry->drops, - __entry->sched) + __entry->sched, + __entry->xdp_pass, __entry->xdp_drop) ); TRACE_EVENT(xdp_cpumap_enqueue, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 109623527358..c010b57fce3f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -227,6 +227,7 @@ enum bpf_attach_type { BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, BPF_CGROUP_INET_SOCK_RELEASE, + BPF_XDP_CPUMAP, __MAX_BPF_ATTACH_TYPE }; @@ -3856,6 +3857,10 @@ struct bpf_devmap_val { */ struct bpf_cpumap_val { __u32 qsize; /* queue size to remote target CPU */ + union { + int fd; /* prog fd on map write */ + __u32 id; /* prog id on map read */ + } bpf_prog; }; enum sk_action { diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index ff48dc00e8d0..b3a8aea81ee5 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -63,6 +63,7 @@ struct bpf_cpu_map_entry { struct task_struct *kthread; struct bpf_cpumap_val value; + struct bpf_prog *prog; atomic_t refcnt; /* Control when this struct can be free'ed */ struct rcu_head rcu; @@ -82,6 +83,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq); static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) { + u32 value_size = attr->value_size; struct bpf_cpu_map *cmap; int err = -ENOMEM; u64 cost; @@ -92,7 +94,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) + (value_size != offsetofend(struct bpf_cpumap_val, qsize) && + value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) || + attr->map_flags & ~BPF_F_NUMA_NODE) return ERR_PTR(-EINVAL); cmap = kzalloc(sizeof(*cmap), GFP_USER); @@ -214,6 +218,8 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring) static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) { if (atomic_dec_and_test(&rcpu->refcnt)) { + if (rcpu->prog) + bpf_prog_put(rcpu->prog); /* The queue should be empty at this point */ __cpu_map_ring_cleanup(rcpu->queue); ptr_ring_cleanup(rcpu->queue, NULL); @@ -222,6 +228,62 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) } } +static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, + void **frames, int n, + struct xdp_cpumap_stats *stats) +{ + struct xdp_rxq_info rxq; + struct xdp_buff xdp; + int i, nframes = 0; + + if (!rcpu->prog) + return n; + + rcu_read_lock(); + + xdp_set_return_frame_no_direct(); + xdp.rxq = &rxq; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + u32 act; + int err; + + rxq.dev = xdpf->dev_rx; + rxq.mem = xdpf->mem; + /* TODO: report queue_index to xdp_rxq_info */ + + xdp_convert_frame_to_buff(xdpf, &xdp); + + act = bpf_prog_run_xdp(rcpu->prog, &xdp); + switch (act) { + case XDP_PASS: + err = xdp_update_frame_from_buff(&xdp, xdpf); + if (err < 0) { + xdp_return_frame(xdpf); + stats->drop++; + } else { + frames[nframes++] = xdpf; + stats->pass++; + } + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough */ + case XDP_DROP: + xdp_return_frame(xdpf); + stats->drop++; + break; + } + } + + xdp_clear_return_frame_no_direct(); + + rcu_read_unlock(); + + return nframes; +} + #define CPUMAP_BATCH 8 static int cpu_map_kthread_run(void *data) @@ -236,11 +298,12 @@ static int cpu_map_kthread_run(void *data) * kthread_stop signal until queue is empty. */ while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { + struct xdp_cpumap_stats stats = {}; /* zero stats */ + gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; unsigned int drops = 0, sched = 0; void *frames[CPUMAP_BATCH]; void *skbs[CPUMAP_BATCH]; - gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; - int i, n, m; + int i, n, m, nframes; /* Release CPU reschedule checks */ if (__ptr_ring_empty(rcpu->queue)) { @@ -261,8 +324,8 @@ static int cpu_map_kthread_run(void *data) * kthread CPU pinned. Lockless access to ptr_ring * consume side valid as no-resize allowed of queue. */ - n = __ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH); - + n = __ptr_ring_consume_batched(rcpu->queue, frames, + CPUMAP_BATCH); for (i = 0; i < n; i++) { void *f = frames[i]; struct page *page = virt_to_page(f); @@ -274,15 +337,19 @@ static int cpu_map_kthread_run(void *data) prefetchw(page); } - m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n, skbs); - if (unlikely(m == 0)) { - for (i = 0; i < n; i++) - skbs[i] = NULL; /* effect: xdp_return_frame */ - drops = n; + /* Support running another XDP prog on this CPU */ + nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats); + if (nframes) { + m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs); + if (unlikely(m == 0)) { + for (i = 0; i < nframes; i++) + skbs[i] = NULL; /* effect: xdp_return_frame */ + drops += nframes; + } } local_bh_disable(); - for (i = 0; i < n; i++) { + for (i = 0; i < nframes; i++) { struct xdp_frame *xdpf = frames[i]; struct sk_buff *skb = skbs[i]; int ret; @@ -299,7 +366,7 @@ static int cpu_map_kthread_run(void *data) drops++; } /* Feedback loop via tracepoint */ - trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched); + trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats); local_bh_enable(); /* resched point, may call do_softirq() */ } @@ -309,13 +376,38 @@ static int cpu_map_kthread_run(void *data) return 0; } +bool cpu_map_prog_allowed(struct bpf_map *map) +{ + return map->map_type == BPF_MAP_TYPE_CPUMAP && + map->value_size != offsetofend(struct bpf_cpumap_val, qsize); +} + +static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) +{ + struct bpf_prog *prog; + + prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + if (prog->expected_attach_type != BPF_XDP_CPUMAP) { + bpf_prog_put(prog); + return -EINVAL; + } + + rcpu->value.bpf_prog.id = prog->aux->id; + rcpu->prog = prog; + + return 0; +} + static struct bpf_cpu_map_entry * __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) { + int numa, err, i, fd = value->bpf_prog.fd; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; struct bpf_cpu_map_entry *rcpu; struct xdp_bulk_queue *bq; - int numa, err, i; /* Have map->numa_node, but choose node of redirect target CPU */ numa = cpu_to_node(cpu); @@ -357,6 +449,9 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ + if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) + goto free_ptr_ring; + /* Make sure kthread runs on a single CPU */ kthread_bind(rcpu->kthread, cpu); wake_up_process(rcpu->kthread); diff --git a/net/core/dev.c b/net/core/dev.c index b61075828358..b820527f0a8d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5448,6 +5448,8 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) for (i = 0; i < new->aux->used_map_cnt; i++) { if (dev_map_can_have_prog(new->aux->used_maps[i])) return -EINVAL; + if (cpu_map_prog_allowed(new->aux->used_maps[i])) + return -EINVAL; } } @@ -8875,6 +8877,13 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, return -EINVAL; } + if (prog->expected_attach_type == BPF_XDP_CPUMAP) { + NL_SET_ERR_MSG(extack, + "BPF_XDP_CPUMAP programs can not be attached to a device"); + bpf_prog_put(prog); + return -EINVAL; + } + /* prog->aux->id may be 0 for orphaned device-bound progs */ if (prog->aux->id && prog->aux->id == prog_id) { bpf_prog_put(prog); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 109623527358..c010b57fce3f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -227,6 +227,7 @@ enum bpf_attach_type { BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, BPF_CGROUP_INET_SOCK_RELEASE, + BPF_XDP_CPUMAP, __MAX_BPF_ATTACH_TYPE }; @@ -3856,6 +3857,10 @@ struct bpf_devmap_val { */ struct bpf_cpumap_val { __u32 qsize; /* queue size to remote target CPU */ + union { + int fd; /* prog fd on map write */ + __u32 id; /* prog id on map read */ + } bpf_prog; }; enum sk_action { -- cgit v1.2.3 From 28b1520ebf81ced970141640d90279ac7b9f1f9a Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:39 +0200 Subject: bpf: cpumap: Implement XDP_REDIRECT for eBPF programs attached to map entries Introduce XDP_REDIRECT support for eBPF programs attached to cpumap entries. This patch has been tested on Marvell ESPRESSObin using a modified version of xdp_redirect_cpu sample in order to attach a XDP program to CPUMAP entries to perform a redirect on the mvneta interface. In particular the following scenario has been tested: rq (cpu0) --> mvneta - XDP_REDIRECT (cpu0) --> CPUMAP - XDP_REDIRECT (cpu1) --> mvneta $./xdp_redirect_cpu -p xdp_cpu_map0 -d eth0 -c 1 -e xdp_redirect \ -f xdp_redirect_kern.o -m tx_port -r eth0 tx: 285.2 Kpps rx: 285.2 Kpps Attaching a simple XDP program on eth0 to perform XDP_TX gives comparable results: tx: 288.4 Kpps rx: 288.4 Kpps Co-developed-by: Jesper Dangaard Brouer Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/2cf8373a731867af302b00c4ff16c122630c4980.1594734381.git.lorenzo@kernel.org --- include/net/xdp.h | 1 + include/trace/events/xdp.h | 6 ++++-- kernel/bpf/cpumap.c | 17 +++++++++++++++-- 3 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/xdp.h b/include/net/xdp.h index 83b9e0142b52..5be0d4d65b94 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -99,6 +99,7 @@ struct xdp_frame { }; struct xdp_cpumap_stats { + unsigned int redirect; unsigned int pass; unsigned int drop; }; diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index e2c99f5bee39..cd24e8a59529 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -190,6 +190,7 @@ TRACE_EVENT(xdp_cpumap_kthread, __field(int, sched) __field(unsigned int, xdp_pass) __field(unsigned int, xdp_drop) + __field(unsigned int, xdp_redirect) ), TP_fast_assign( @@ -201,18 +202,19 @@ TRACE_EVENT(xdp_cpumap_kthread, __entry->sched = sched; __entry->xdp_pass = xdp_stats->pass; __entry->xdp_drop = xdp_stats->drop; + __entry->xdp_redirect = xdp_stats->redirect; ), TP_printk("kthread" " cpu=%d map_id=%d action=%s" " processed=%u drops=%u" " sched=%d" - " xdp_pass=%u xdp_drop=%u", + " xdp_pass=%u xdp_drop=%u xdp_redirect=%u", __entry->cpu, __entry->map_id, __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), __entry->processed, __entry->drops, __entry->sched, - __entry->xdp_pass, __entry->xdp_drop) + __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect) ); TRACE_EVENT(xdp_cpumap_enqueue, diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index b3a8aea81ee5..4c95d0615ca2 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -239,7 +239,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, if (!rcpu->prog) return n; - rcu_read_lock(); + rcu_read_lock_bh(); xdp_set_return_frame_no_direct(); xdp.rxq = &rxq; @@ -267,6 +267,16 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, stats->pass++; } break; + case XDP_REDIRECT: + err = xdp_do_redirect(xdpf->dev_rx, &xdp, + rcpu->prog); + if (unlikely(err)) { + xdp_return_frame(xdpf); + stats->drop++; + } else { + stats->redirect++; + } + break; default: bpf_warn_invalid_xdp_action(act); /* fallthrough */ @@ -277,9 +287,12 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, } } + if (stats->redirect) + xdp_do_flush_map(); + xdp_clear_return_frame_no_direct(); - rcu_read_unlock(); + rcu_read_unlock_bh(); /* resched point, may call do_softirq() */ return nframes; } -- cgit v1.2.3 From ecbe6bc0003bfd5bf8581cb679cae0eb944432cb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 16 Jul 2020 16:33:09 +0200 Subject: block: use bd_prepare_to_claim directly in the loop driver The arcane magic in bd_start_claiming is only needed to be able to claim a block_device that hasn't been fully set up. Switch the loop driver that claims from the ioctl path with a fully set up struct block_device to just use the much simpler bd_prepare_to_claim directly. Signed-off-by: Christoph Hellwig Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- drivers/block/loop.c | 7 +++---- fs/block_dev.c | 9 +++++---- include/linux/blkdev.h | 3 ++- 3 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/block/loop.c b/drivers/block/loop.c index a943207705dd..d18160146226 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1090,11 +1090,10 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, * here to avoid changing device under exclusive owner. */ if (!(mode & FMODE_EXCL)) { - claimed_bdev = bd_start_claiming(bdev, loop_configure); - if (IS_ERR(claimed_bdev)) { - error = PTR_ERR(claimed_bdev); + claimed_bdev = bdev->bd_contains; + error = bd_prepare_to_claim(bdev, claimed_bdev, loop_configure); + if (error) goto out_putf; - } } error = mutex_lock_killable(&loop_ctl_mutex); diff --git a/fs/block_dev.c b/fs/block_dev.c index b7b2ee4b288a..ee80bd81af74 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1027,8 +1027,8 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, * RETURNS: * 0 if @bdev can be claimed, -EBUSY otherwise. */ -static int bd_prepare_to_claim(struct block_device *bdev, - struct block_device *whole, void *holder) +int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole, + void *holder) { retry: spin_lock(&bdev_lock); @@ -1055,6 +1055,7 @@ retry: spin_unlock(&bdev_lock); return 0; } +EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) { @@ -1100,7 +1101,8 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) * Pointer to the block device containing @bdev on success, ERR_PTR() * value on failure. */ -struct block_device *bd_start_claiming(struct block_device *bdev, void *holder) +static struct block_device *bd_start_claiming(struct block_device *bdev, + void *holder) { struct gendisk *disk; struct block_device *whole; @@ -1141,7 +1143,6 @@ struct block_device *bd_start_claiming(struct block_device *bdev, void *holder) return whole; } -EXPORT_SYMBOL(bd_start_claiming); static void bd_clear_claiming(struct block_device *whole, void *holder) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 71173a1ffa8b..06995b96e946 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1919,7 +1919,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, void *holder); struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); -struct block_device *bd_start_claiming(struct block_device *bdev, void *holder); +int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole, + void *holder); void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, void *holder); void blkdev_put(struct block_device *bdev, fmode_t mode); -- cgit v1.2.3 From bfdfa51702dec67e9fcd52568b4cf3c7f799db8b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 18:29:11 -0700 Subject: bpf: Drop duplicated words in uapi helper comments Drop doubled words "will" and "attach". Signed-off-by: Randy Dunlap Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/6b9f71ae-4f8e-0259-2c5d-187ddaefe6eb@infradead.org --- include/uapi/linux/bpf.h | 6 +++--- tools/include/uapi/linux/bpf.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index c010b57fce3f..7ac3992dacfe 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2420,7 +2420,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -2457,7 +2457,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -4000,7 +4000,7 @@ struct bpf_link_info { /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on - * attach attach type). + * attach type). */ struct bpf_sock_addr { __u32 user_family; /* Allows 4-byte read, but no write. */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index c010b57fce3f..7ac3992dacfe 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2420,7 +2420,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -2457,7 +2457,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -4000,7 +4000,7 @@ struct bpf_link_info { /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on - * attach attach type). + * attach type). */ struct bpf_sock_addr { __u32 user_family; /* Allows 4-byte read, but no write. */ -- cgit v1.2.3 From bbe4f4245271bd0f21bf826996c0c5d87a3529c9 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Tue, 7 Jul 2020 09:30:59 +0300 Subject: RDMA/qedr: Add EDPM mode type for user-fw compatibility In older FW versions the completion flag was treated as the ack flag in edpm messages. commit ff937b916eb6 ("qed: Add EDPM mode type for user-fw compatibility") exposed the FW option of setting which mode the QP is in by adding a flag to the qedr <-> qed API. This patch adds the qedr <-> libqedr interface so that the libqedr can set the flag appropriately and qedr can pass it down to FW. Flag is added for backward compatibility with libqedr. For older libs, this flag didn't exist and therefore set to zero. Fixes: ac1b36e55a51 ("qedr: Add support for user context verbs") Link: https://lore.kernel.org/r/20200707063100.3811-2-michal.kalderon@marvell.com Signed-off-by: Yuval Bason Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/qedr.h | 1 + drivers/infiniband/hw/qedr/verbs.c | 11 ++++++++--- include/uapi/rdma/qedr-abi.h | 4 ++-- 3 files changed, 11 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index aa332027da86..460292179b32 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -235,6 +235,7 @@ struct qedr_ucontext { u32 dpi_size; u16 dpi; bool db_rec; + u8 edpm_mode; }; union db_prod32 { diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 42273aa0b5e1..5008149ea116 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -275,7 +275,8 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) DP_ERR(dev, "Problem copying data from user space\n"); return -EFAULT; } - + ctx->edpm_mode = !!(ureq.context_flags & + QEDR_ALLOC_UCTX_EDPM_MODE); ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC); } @@ -316,7 +317,8 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY; else uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED | - QEDR_DPM_TYPE_ROCE_LEGACY; + QEDR_DPM_TYPE_ROCE_LEGACY | + QEDR_DPM_TYPE_ROCE_EDPM_MODE; uresp.dpm_flags |= QEDR_DPM_SIZES_SET; uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE; @@ -1750,7 +1752,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, struct qed_rdma_create_qp_out_params out_params; struct qedr_pd *pd = get_qedr_pd(ibpd); struct qedr_create_qp_uresp uresp; - struct qedr_ucontext *ctx = NULL; + struct qedr_ucontext *ctx = pd ? pd->uctx : NULL; struct qedr_create_qp_ureq ureq; int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1); int rc = -EINVAL; @@ -1788,6 +1790,9 @@ static int qedr_create_user_qp(struct qedr_dev *dev, in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa; } + if (ctx) + SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode); + qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, &in_params, &out_params); diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h index a0b83c9d4498..b261c9fca07b 100644 --- a/include/uapi/rdma/qedr-abi.h +++ b/include/uapi/rdma/qedr-abi.h @@ -39,7 +39,7 @@ /* user kernel communication data structures. */ enum qedr_alloc_ucontext_flags { - QEDR_ALLOC_UCTX_RESERVED = 1 << 0, + QEDR_ALLOC_UCTX_EDPM_MODE = 1 << 0, QEDR_ALLOC_UCTX_DB_REC = 1 << 1 }; @@ -56,7 +56,7 @@ enum qedr_rdma_dpm_type { QEDR_DPM_TYPE_ROCE_ENHANCED = 1 << 0, QEDR_DPM_TYPE_ROCE_LEGACY = 1 << 1, QEDR_DPM_TYPE_IWARP_LEGACY = 1 << 2, - QEDR_DPM_TYPE_RESERVED = 1 << 3, + QEDR_DPM_TYPE_ROCE_EDPM_MODE = 1 << 3, QEDR_DPM_SIZES_SET = 1 << 4, }; -- cgit v1.2.3 From eb7f84e379daad69b4c92538baeaf93bbf493c14 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Tue, 7 Jul 2020 09:31:00 +0300 Subject: RDMA/qedr: Add EDPM max size to alloc ucontext response User space should receive the maximum edpm size from kernel driver, similar to other edpm/ldpm related limits. Add an additional parameter to the alloc_ucontext_resp structure for the edpm maximum size. In addition, pass an indication from user-space to kernel (and not just kernel to user) that the DPM sizes are supported. This is for supporting backward-forward compatibility between driver and lib for everything related to DPM transaction and limit sizes. This should have been part of commit mentioned in Fixes tag. Link: https://lore.kernel.org/r/20200707063100.3811-3-michal.kalderon@marvell.com Fixes: 93a3d05f9d68 ("RDMA/qedr: Add kernel capability flags for dpm enabled mode") Signed-off-by: Ariel Elior Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 9 ++++++--- include/uapi/rdma/qedr-abi.h | 6 +++++- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 5008149ea116..fcf2eaa3b459 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -320,9 +320,12 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) QEDR_DPM_TYPE_ROCE_LEGACY | QEDR_DPM_TYPE_ROCE_EDPM_MODE; - uresp.dpm_flags |= QEDR_DPM_SIZES_SET; - uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE; - uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE; + if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) { + uresp.dpm_flags |= QEDR_DPM_SIZES_SET; + uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE; + uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE; + uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE; + } uresp.wids_enabled = 1; uresp.wid_count = oparams.wid_count; diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h index b261c9fca07b..bf7333b2b5d7 100644 --- a/include/uapi/rdma/qedr-abi.h +++ b/include/uapi/rdma/qedr-abi.h @@ -40,7 +40,8 @@ /* user kernel communication data structures. */ enum qedr_alloc_ucontext_flags { QEDR_ALLOC_UCTX_EDPM_MODE = 1 << 0, - QEDR_ALLOC_UCTX_DB_REC = 1 << 1 + QEDR_ALLOC_UCTX_DB_REC = 1 << 1, + QEDR_SUPPORT_DPM_SIZES = 1 << 2, }; struct qedr_alloc_ucontext_req { @@ -50,6 +51,7 @@ struct qedr_alloc_ucontext_req { #define QEDR_LDPM_MAX_SIZE (8192) #define QEDR_EDPM_TRANS_SIZE (64) +#define QEDR_EDPM_MAX_SIZE (ROCE_REQ_MAX_INLINE_DATA_SIZE) enum qedr_rdma_dpm_type { QEDR_DPM_TYPE_NONE = 0, @@ -77,6 +79,8 @@ struct qedr_alloc_ucontext_resp { __u16 ldpm_limit_size; __u8 edpm_trans_size; __u8 reserved; + __u16 edpm_limit_size; + __u8 padding[6]; }; struct qedr_alloc_pd_ureq { -- cgit v1.2.3 From aecfd220b223043475fa515aa563249f683fcd04 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 3 Jun 2020 13:28:45 -0700 Subject: x86/mm/numa: Remove uninitialized_var() usage Using uninitialized_var() is dangerous as it papers over real bugs[1] (or can in the future), and suppresses unrelated compiler warnings (e.g. "unused variable"). If the compiler thinks it is uninitialized, either simply initialize the variable or make compiler changes. As a precursor to removing[2] this[3] macro[4], refactor code to avoid its need. The original reason for its use here was to work around the #ifdef being the only place the variable was used. This is better expressed using IS_ENABLED() and a new code block where the variable can be used unconditionally. [1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/ [2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/ [3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/ [4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/ Fixes: 1e01979c8f50 ("x86, numa: Implement pfn -> nid mapping granularity check") Signed-off-by: Kees Cook --- arch/x86/mm/numa.c | 18 +++++++++--------- include/linux/page-flags-layout.h | 4 +++- 2 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 8ee952038c80..b05f45e5e8e2 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -543,7 +543,6 @@ static void __init numa_clear_kernel_node_hotplug(void) static int __init numa_register_memblks(struct numa_meminfo *mi) { - unsigned long uninitialized_var(pfn_align); int i, nid; /* Account for nodes with cpus and no memory */ @@ -571,15 +570,16 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) * If sections array is gonna be used for pfn -> nid mapping, check * whether its granularity is fine enough. */ -#ifdef NODE_NOT_IN_PAGE_FLAGS - pfn_align = node_map_pfn_alignment(); - if (pfn_align && pfn_align < PAGES_PER_SECTION) { - printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", - PFN_PHYS(pfn_align) >> 20, - PFN_PHYS(PAGES_PER_SECTION) >> 20); - return -EINVAL; + if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) { + unsigned long pfn_align = node_map_pfn_alignment(); + + if (pfn_align && pfn_align < PAGES_PER_SECTION) { + pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", + PFN_PHYS(pfn_align) >> 20, + PFN_PHYS(PAGES_PER_SECTION) >> 20); + return -EINVAL; + } } -#endif if (!numa_meminfo_cover_memory(mi)) return -EINVAL; diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 71283739ffd2..e200eef6a7fd 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -98,9 +98,11 @@ /* * We are going to use the flags for the page to node mapping if its in * there. This includes the case where there is no node, so it is implicit. + * Note that this #define MUST have a value so that it can be tested with + * the IS_ENABLED() macro. */ #if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) -#define NODE_NOT_IN_PAGE_FLAGS +#define NODE_NOT_IN_PAGE_FLAGS 1 #endif #if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0 -- cgit v1.2.3 From 3f649ab728cda8038259d8f14492fe400fbab911 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 3 Jun 2020 13:09:38 -0700 Subject: treewide: Remove uninitialized_var() usage Using uninitialized_var() is dangerous as it papers over real bugs[1] (or can in the future), and suppresses unrelated compiler warnings (e.g. "unused variable"). If the compiler thinks it is uninitialized, either simply initialize the variable or make compiler changes. In preparation for removing[2] the[3] macro[4], remove all remaining needless uses with the following script: git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \ xargs perl -pi -e \ 's/\buninitialized_var\(([^\)]+)\)/\1/g; s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;' drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid pathological white-space. No outstanding warnings were found building allmodconfig with GCC 9.3.0 for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64, alpha, and m68k. [1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/ [2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/ [3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/ [4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/ Reviewed-by: Leon Romanovsky # drivers/infiniband and mlx4/mlx5 Acked-by: Jason Gunthorpe # IB Acked-by: Kalle Valo # wireless drivers Reviewed-by: Chao Yu # erofs Signed-off-by: Kees Cook --- arch/arm/mach-sa1100/assabet.c | 2 +- arch/arm/mm/alignment.c | 2 +- arch/ia64/kernel/process.c | 2 +- arch/ia64/mm/discontig.c | 2 +- arch/ia64/mm/tlb.c | 2 +- arch/mips/lib/dump_tlb.c | 2 +- arch/mips/mm/init.c | 2 +- arch/mips/mm/tlb-r4k.c | 6 +++--- arch/powerpc/kvm/book3s_64_mmu_radix.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- arch/powerpc/platforms/52xx/mpc52xx_pic.c | 2 +- arch/s390/kernel/smp.c | 2 +- arch/x86/kernel/quirks.c | 10 +++++----- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- arch/x86/kvm/x86.c | 2 +- block/blk-merge.c | 2 +- drivers/acpi/acpi_pad.c | 2 +- drivers/ata/libata-scsi.c | 2 +- drivers/atm/zatm.c | 2 +- drivers/block/drbd/drbd_nl.c | 6 +++--- drivers/block/rbd.c | 2 +- drivers/clk/clk-gate.c | 2 +- drivers/firewire/ohci.c | 14 +++++++------- drivers/gpu/drm/bridge/sil-sii8620.c | 2 +- drivers/gpu/drm/drm_edid.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 6 +++--- drivers/gpu/drm/i915/display/intel_fbc.c | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +- drivers/gpu/drm/i915/intel_uncore.c | 2 +- drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c | 4 ++-- drivers/i2c/busses/i2c-rk3x.c | 2 +- drivers/ide/ide-acpi.c | 2 +- drivers/ide/ide-atapi.c | 2 +- drivers/ide/ide-io-std.c | 4 ++-- drivers/ide/ide-io.c | 8 ++++---- drivers/ide/ide-sysfs.c | 2 +- drivers/ide/umc8672.c | 2 +- drivers/idle/intel_idle.c | 2 +- drivers/infiniband/core/uverbs_cmd.c | 4 ++-- drivers/infiniband/hw/cxgb4/cm.c | 2 +- drivers/infiniband/hw/cxgb4/cq.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 6 +++--- drivers/infiniband/hw/mlx5/cq.c | 6 +++--- drivers/infiniband/hw/mlx5/devx.c | 2 +- drivers/infiniband/hw/mlx5/wr.c | 2 +- drivers/infiniband/hw/mthca/mthca_qp.c | 10 +++++----- drivers/infiniband/sw/siw/siw_qp_rx.c | 2 +- drivers/input/serio/serio_raw.c | 2 +- drivers/iommu/intel/iommu.c | 2 +- drivers/md/dm-io.c | 2 +- drivers/md/dm-ioctl.c | 2 +- drivers/md/dm-snap-persistent.c | 2 +- drivers/md/dm-table.c | 2 +- drivers/md/dm-writecache.c | 2 +- drivers/md/raid5.c | 2 +- drivers/media/dvb-frontends/rtl2832.c | 2 +- drivers/media/tuners/qt1010.c | 4 ++-- drivers/media/usb/gspca/vicam.c | 2 +- drivers/media/usb/uvc/uvc_video.c | 8 ++++---- drivers/memstick/host/jmb38x_ms.c | 2 +- drivers/memstick/host/tifm_ms.c | 2 +- drivers/mmc/host/sdhci.c | 2 +- drivers/mtd/nand/raw/nand_ecc.c | 2 +- drivers/mtd/nand/raw/s3c2410.c | 2 +- drivers/mtd/parsers/afs.c | 4 ++-- drivers/mtd/ubi/eba.c | 2 +- drivers/net/can/janz-ican3.c | 2 +- drivers/net/ethernet/broadcom/bnx2.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 4 ++-- drivers/net/ethernet/neterion/s2io.c | 2 +- drivers/net/ethernet/qlogic/qla3xxx.c | 2 +- drivers/net/ethernet/sun/cassini.c | 2 +- drivers/net/ethernet/sun/niu.c | 6 +++--- drivers/net/wan/z85230.c | 2 +- drivers/net/wireless/ath/ath10k/core.c | 2 +- drivers/net/wireless/ath/ath6kl/init.c | 2 +- drivers/net/wireless/ath/ath9k/init.c | 2 +- drivers/net/wireless/broadcom/b43/debugfs.c | 2 +- drivers/net/wireless/broadcom/b43/dma.c | 2 +- drivers/net/wireless/broadcom/b43/lo.c | 2 +- drivers/net/wireless/broadcom/b43/phy_n.c | 2 +- drivers/net/wireless/broadcom/b43/xmit.c | 12 ++++++------ drivers/net/wireless/broadcom/b43legacy/debugfs.c | 2 +- drivers/net/wireless/broadcom/b43legacy/main.c | 2 +- drivers/net/wireless/intel/iwlegacy/3945.c | 2 +- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 4 ++-- drivers/pci/pcie/aer.c | 2 +- drivers/platform/x86/hdaps.c | 4 ++-- drivers/scsi/dc395x.c | 2 +- drivers/scsi/pm8001/pm8001_hwi.c | 2 +- drivers/scsi/pm8001/pm80xx_hwi.c | 2 +- drivers/ssb/driver_chipcommon.c | 4 ++-- drivers/tty/cyclades.c | 2 +- drivers/tty/isicom.c | 2 +- drivers/usb/musb/cppi_dma.c | 2 +- drivers/usb/storage/sddr55.c | 4 ++-- drivers/vhost/net.c | 6 +++--- drivers/video/fbdev/matrox/matroxfb_maven.c | 6 +++--- drivers/video/fbdev/pm3fb.c | 6 +++--- drivers/video/fbdev/riva/riva_hw.c | 3 +-- drivers/virtio/virtio_ring.c | 6 +++--- fs/afs/dir.c | 2 +- fs/afs/security.c | 2 +- fs/dlm/netlink.c | 2 +- fs/erofs/data.c | 4 ++-- fs/erofs/zdata.c | 2 +- fs/fat/dir.c | 2 +- fs/fuse/control.c | 4 ++-- fs/fuse/cuse.c | 2 +- fs/fuse/file.c | 2 +- fs/gfs2/aops.c | 2 +- fs/gfs2/bmap.c | 2 +- fs/gfs2/lops.c | 2 +- fs/hfsplus/unicode.c | 2 +- fs/isofs/namei.c | 4 ++-- fs/jffs2/erase.c | 2 +- fs/nfsd/nfsctl.c | 2 +- fs/ocfs2/alloc.c | 4 ++-- fs/ocfs2/dir.c | 14 +++++++------- fs/ocfs2/extent_map.c | 4 ++-- fs/ocfs2/namei.c | 2 +- fs/ocfs2/refcounttree.c | 2 +- fs/ocfs2/xattr.c | 2 +- fs/omfs/file.c | 2 +- fs/overlayfs/copy_up.c | 2 +- fs/ubifs/commit.c | 6 +++--- fs/ubifs/dir.c | 2 +- fs/ubifs/file.c | 4 ++-- fs/ubifs/journal.c | 4 ++-- fs/ubifs/lpt.c | 2 +- fs/ubifs/tnc.c | 6 +++--- fs/ubifs/tnc_misc.c | 4 ++-- fs/udf/balloc.c | 2 +- fs/xfs/xfs_bmap_util.c | 2 +- include/net/flow_offload.h | 2 +- kernel/async.c | 4 ++-- kernel/audit.c | 2 +- kernel/debug/kdb/kdb_io.c | 2 +- kernel/dma/debug.c | 2 +- kernel/events/core.c | 2 +- kernel/events/uprobes.c | 2 +- kernel/exit.c | 2 +- kernel/futex.c | 14 +++++++------- kernel/locking/lockdep.c | 16 ++++++++-------- kernel/trace/ring_buffer.c | 2 +- lib/radix-tree.c | 2 +- lib/test_lockup.c | 2 +- mm/frontswap.c | 2 +- mm/ksm.c | 2 +- mm/memcontrol.c | 2 +- mm/memory.c | 2 +- mm/mempolicy.c | 4 ++-- mm/page_alloc.c | 2 +- mm/percpu.c | 2 +- mm/slub.c | 4 ++-- mm/swap.c | 4 ++-- net/dccp/options.c | 2 +- net/ipv4/netfilter/nf_socket_ipv4.c | 6 +++--- net/ipv6/ip6_flowlabel.c | 2 +- net/ipv6/netfilter/nf_socket_ipv6.c | 2 +- net/netfilter/nf_conntrack_ftp.c | 2 +- net/netfilter/nfnetlink_log.c | 2 +- net/netfilter/nfnetlink_queue.c | 4 ++-- net/sched/cls_flow.c | 2 +- net/sched/sch_cake.c | 2 +- net/sched/sch_cbq.c | 2 +- net/sched/sch_fq_codel.c | 2 +- net/sched/sch_fq_pie.c | 2 +- net/sched/sch_hfsc.c | 2 +- net/sched/sch_htb.c | 2 +- net/sched/sch_sfq.c | 2 +- net/sunrpc/svcsock.c | 4 ++-- net/sunrpc/xprtsock.c | 10 +++++----- net/tls/tls_sw.c | 2 +- sound/core/control_compat.c | 2 +- sound/isa/sb/sb16_csp.c | 2 +- sound/usb/endpoint.c | 2 +- 179 files changed, 278 insertions(+), 279 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index aa265ede5730..2012fa8c28cf 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -652,7 +652,7 @@ static void __init map_sa1100_gpio_regs( void ) */ static void __init get_assabet_scr(void) { - unsigned long uninitialized_var(scr), i; + unsigned long scr, i; GPDR |= 0x3fc; /* Configure GPIO 9:2 as outputs */ GPSR = 0x3fc; /* Write 0xFF to GPIO 9:2 */ diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 81a627e6e1c5..f4bfc1cac91a 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -799,7 +799,7 @@ static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst) static int do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - union offset_union uninitialized_var(offset); + union offset_union offset; unsigned long instrptr; int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs); unsigned int type; diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 96dfb9e4b16f..da55b41ae33e 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -445,7 +445,7 @@ static void do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) { unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm; - unsigned long uninitialized_var(ip); /* GCC be quiet */ + unsigned long ip; elf_greg_t *dst = arg; struct pt_regs *pt; char nat; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index dd8284bcbf16..da810ca234da 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -180,7 +180,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node) void __init setup_per_cpu_areas(void) { struct pcpu_alloc_info *ai; - struct pcpu_group_info *uninitialized_var(gi); + struct pcpu_group_info *gi; unsigned int *cpu_map; void *base; unsigned long base_offset; diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 72cc568bc841..71c19918e387 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -369,7 +369,7 @@ EXPORT_SYMBOL(flush_tlb_range); void ia64_tlb_init(void) { - ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */ + ia64_ptce_info_t ptce_info; u64 tr_pgbits; long status; pal_vm_info_1_u_t vm_info_1; diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 5a418ba5e75f..4256423632c4 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -79,7 +79,7 @@ static void dump_tlb(int first, int last) unsigned int pagemask, guestctl1 = 0, c0, c1, i; unsigned long asidmask = cpu_asid_mask(¤t_cpu_data); int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4); - unsigned long uninitialized_var(s_mmid); + unsigned long s_mmid; #ifdef CONFIG_32BIT bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); int pwidth = xpa ? 11 : 8; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 336b58173dc7..6c7bbfe35ba3 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -83,7 +83,7 @@ void setup_zero_pages(void) static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) { enum fixed_addresses idx; - unsigned int uninitialized_var(old_mmid); + unsigned int old_mmid; unsigned long vaddr, flags, entrylo; unsigned long old_ctx; pte_t pte; diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 6677dcb72580..38e2894d5fa3 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -119,7 +119,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, if (size <= (current_cpu_data.tlbsizeftlbsets ? current_cpu_data.tlbsize / 8 : current_cpu_data.tlbsize / 2)) { - unsigned long old_entryhi, uninitialized_var(old_mmid); + unsigned long old_entryhi, old_mmid; int newpid = cpu_asid(cpu, mm); old_entryhi = read_c0_entryhi(); @@ -213,7 +213,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) int cpu = smp_processor_id(); if (cpu_context(cpu, vma->vm_mm) != 0) { - unsigned long uninitialized_var(old_mmid); + unsigned long old_mmid; unsigned long flags, old_entryhi; int idx; @@ -382,7 +382,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #ifdef CONFIG_XPA panic("Broken for XPA kernels"); #else - unsigned int uninitialized_var(old_mmid); + unsigned int old_mmid; unsigned long flags; unsigned long wired; unsigned long old_pagemask; diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 6a73714759ba..777aa5625d5f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -33,7 +33,7 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, gva_t eaddr, void *to, void *from, unsigned long n) { - int uninitialized_var(old_pid), old_lpid; + int old_pid, old_lpid; unsigned long quadrant, ret = n; bool is_load = !!to; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index dd7d141e33e8..aaa7b62f2f82 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1110,7 +1110,7 @@ static inline u32 dp_to_sp(u64 fprd) static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - u64 uninitialized_var(gpr); + u64 gpr; if (run->mmio.len > sizeof(gpr)) { printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index fc98912f42cf..76a8102bdb98 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, { int l1irq; int l2irq; - struct irq_chip *uninitialized_var(irqchip); + struct irq_chip *irqchip; void *hndlr; int type; u32 reg; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index e6be63ff162a..c2181471ac63 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -146,7 +146,7 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) static inline int pcpu_stopped(struct pcpu *pcpu) { - u32 uninitialized_var(status); + u32 status; if (__pcpu_sigp(pcpu->address, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED) diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 896d74cb5081..1b10717c9321 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -95,7 +95,7 @@ static void ich_force_hpet_resume(void) static void ich_force_enable_hpet(struct pci_dev *dev) { u32 val; - u32 uninitialized_var(rcba); + u32 rcba; int err = 0; if (hpet_address || force_hpet_address) @@ -185,7 +185,7 @@ static void hpet_print_force_info(void) static void old_ich_force_hpet_resume(void) { u32 val; - u32 uninitialized_var(gen_cntl); + u32 gen_cntl; if (!force_hpet_address || !cached_dev) return; @@ -207,7 +207,7 @@ static void old_ich_force_hpet_resume(void) static void old_ich_force_enable_hpet(struct pci_dev *dev) { u32 val; - u32 uninitialized_var(gen_cntl); + u32 gen_cntl; if (hpet_address || force_hpet_address) return; @@ -298,7 +298,7 @@ static void vt8237_force_hpet_resume(void) static void vt8237_force_enable_hpet(struct pci_dev *dev) { - u32 uninitialized_var(val); + u32 val; if (hpet_address || force_hpet_address) return; @@ -429,7 +429,7 @@ static void nvidia_force_hpet_resume(void) static void nvidia_force_enable_hpet(struct pci_dev *dev) { - u32 uninitialized_var(val); + u32 val; if (hpet_address || force_hpet_address) return; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 76817d13c86e..deafcced65d2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1986,7 +1986,7 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, unsigned long data) { u64 *sptep; - struct rmap_iterator uninitialized_var(iter); + struct rmap_iterator iter; int young = 0; for_each_rmap_spte(rmap_head, &iter, sptep) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index bd70ece1ef8b..275564a0ebdb 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -314,7 +314,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, { int ret; pt_element_t pte; - pt_element_t __user *uninitialized_var(ptep_user); + pt_element_t __user *ptep_user; gfn_t table_gfn; u64 pt_access, pte_access; unsigned index, accessed_dirty, pte_pkey; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3b92db412335..598d5be960c9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9927,7 +9927,7 @@ void kvm_arch_sync_events(struct kvm *kvm) int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) { int i, r; - unsigned long hva, uninitialized_var(old_npages); + unsigned long hva, old_npages; struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memory_slot *slot; diff --git a/block/blk-merge.c b/block/blk-merge.c index f0b0bae075a0..006402edef6b 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -473,7 +473,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist, struct scatterlist **sg) { - struct bio_vec uninitialized_var(bvec), bvprv = { NULL }; + struct bio_vec bvec, bvprv = { NULL }; struct bvec_iter iter; int nsegs = 0; bool new_bio = false; diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index e7dc0133f817..6cc4c92d9ff9 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -88,7 +88,7 @@ static void round_robin_cpu(unsigned int tsk_index) cpumask_var_t tmp; int cpu; unsigned long min_weight = -1; - unsigned long uninitialized_var(preferred_cpu); + unsigned long preferred_cpu; if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) return; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 46336084b1a9..ec233208585b 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -93,7 +93,7 @@ static ssize_t ata_scsi_park_show(struct device *device, struct ata_link *link; struct ata_device *dev; unsigned long now; - unsigned int uninitialized_var(msecs); + unsigned int msecs; int rc = 0; ap = ata_shost_to_port(sdev->host); diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 57f97b95a453..165eebe06e39 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -940,7 +940,7 @@ static int open_tx_first(struct atm_vcc *vcc) vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; else { - int uninitialized_var(pcr); + int pcr; if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index da4a3ebe04ef..c0017cc51ecc 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -3423,7 +3423,7 @@ int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *resource_filter; struct drbd_resource *resource; - struct drbd_device *uninitialized_var(device); + struct drbd_device *device; int minor, err, retcode; struct drbd_genlmsghdr *dh; struct device_info device_info; @@ -3512,7 +3512,7 @@ int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *resource_filter; struct drbd_resource *resource = NULL, *next_resource; - struct drbd_connection *uninitialized_var(connection); + struct drbd_connection *connection; int err = 0, retcode; struct drbd_genlmsghdr *dh; struct connection_info connection_info; @@ -3674,7 +3674,7 @@ int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *resource_filter; struct drbd_resource *resource; - struct drbd_device *uninitialized_var(device); + struct drbd_device *device; struct drbd_peer_device *peer_device = NULL; int minor, err, retcode; struct drbd_genlmsghdr *dh; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4f61e9209461..d9c0e7d154f9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1993,7 +1993,7 @@ static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req, struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; struct ceph_osd_data *osd_data; u64 objno; - u8 state, new_state, uninitialized_var(current_state); + u8 state, new_state, current_state; bool has_current_state; void *p; diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index 2ca1f2ac38a6..070dc47e95a1 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -56,7 +56,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable) { struct clk_gate *gate = to_clk_gate(hw); int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0; - unsigned long uninitialized_var(flags); + unsigned long flags; u32 reg; set ^= enable; diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 54fdc39cd0bc..7dde21b18b04 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -1099,7 +1099,7 @@ static void context_tasklet(unsigned long data) static int context_add_buffer(struct context *ctx) { struct descriptor_buffer *desc; - dma_addr_t uninitialized_var(bus_addr); + dma_addr_t bus_addr; int offset; /* @@ -1289,7 +1289,7 @@ static int at_context_queue_packet(struct context *ctx, struct fw_packet *packet) { struct fw_ohci *ohci = ctx->ohci; - dma_addr_t d_bus, uninitialized_var(payload_bus); + dma_addr_t d_bus, payload_bus; struct driver_data *driver_data; struct descriptor *d, *last; __le32 *header; @@ -2445,7 +2445,7 @@ static int ohci_set_config_rom(struct fw_card *card, { struct fw_ohci *ohci; __be32 *next_config_rom; - dma_addr_t uninitialized_var(next_config_rom_bus); + dma_addr_t next_config_rom_bus; ohci = fw_ohci(card); @@ -2933,10 +2933,10 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, int type, int channel, size_t header_size) { struct fw_ohci *ohci = fw_ohci(card); - struct iso_context *uninitialized_var(ctx); - descriptor_callback_t uninitialized_var(callback); - u64 *uninitialized_var(channels); - u32 *uninitialized_var(mask), uninitialized_var(regs); + struct iso_context *ctx; + descriptor_callback_t callback; + u64 *channels; + u32 *mask, regs; int index, ret = -EBUSY; spin_lock_irq(&ohci->lock); diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 92acd336aa89..6cd8e012de5d 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -986,7 +986,7 @@ static void sii8620_set_auto_zone(struct sii8620 *ctx) static void sii8620_stop_video(struct sii8620 *ctx) { - u8 uninitialized_var(val); + u8 val; sii8620_write_seq_static(ctx, REG_TPI_INTR_EN, 0, diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index fed653f13c26..b98fa573e706 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3051,7 +3051,7 @@ static int drm_cvt_modes(struct drm_connector *connector, const u8 empty[3] = { 0, 0, 0 }; for (i = 0; i < 4; i++) { - int uninitialized_var(width), height; + int width, height; cvt = &(timing->data.other_data.data.cvt[i]); if (!memcmp(cvt->code, empty, 3)) diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index ee96a95fb6be..7a6f6df5e954 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -547,9 +547,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi, unsigned long best_freq = 0; u32 min_delta = 0xffffffff; u8 p_min, p_max; - u8 _p, uninitialized_var(best_p); - u16 _m, uninitialized_var(best_m); - u8 _s, uninitialized_var(best_s); + u8 _p, best_p; + u16 _m, best_m; + u8 _s, best_s; p_min = DIV_ROUND_UP(fin, (12 * MHZ)); p_max = fin / (6 * MHZ); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 1c26673acb2d..c3a83c06c1b5 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -474,7 +474,7 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, unsigned int size, unsigned int fb_cpp) { struct intel_fbc *fbc = &dev_priv->fbc; - struct drm_mm_node *uninitialized_var(compressed_llb); + struct drm_mm_node *compressed_llb; int ret; drm_WARN_ON(&dev_priv->drm, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7c3d8ef4a47c..234bf45c290b 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1103,7 +1103,7 @@ static struct i915_request * __unwind_incomplete_requests(struct intel_engine_cs *engine) { struct i915_request *rq, *rn, *active = NULL; - struct list_head *uninitialized_var(pl); + struct list_head *pl; int prio = I915_PRIORITY_INVALID; lockdep_assert_held(&engine->active.lock); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index a61cb8ca4d50..c8fd2bcb17ee 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1991,7 +1991,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore, unsigned int slow_timeout_ms, u32 *out_value) { - u32 uninitialized_var(reg_value); + u32 reg_value; #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) int ret; diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 3feff0c45b3f..542dcf7eddd6 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -517,8 +517,8 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode, unsigned long best_freq = 0; unsigned long fvco_min, fvco_max, fin, fout; unsigned int min_prediv, max_prediv; - unsigned int _prediv, uninitialized_var(best_prediv); - unsigned long _fbdiv, uninitialized_var(best_fbdiv); + unsigned int _prediv, best_prediv; + unsigned long _fbdiv, best_fbdiv; unsigned long min_delta = ULONG_MAX; dsi->format = format; diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index bc698240c4aa..15324bfbc6cb 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -415,7 +415,7 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) { unsigned int i; unsigned int len = i2c->msg->len - i2c->processed; - u32 uninitialized_var(val); + u32 val; u8 byte; /* we only care for MBRF here. */ diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 7d4e5c08f133..05e18d658141 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c @@ -180,7 +180,7 @@ err: static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif) { struct device *dev = hwif->gendev.parent; - acpi_handle uninitialized_var(dev_handle); + acpi_handle dev_handle; u64 pcidevfn; acpi_handle chan_handle; int err; diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 80bc3bf82f4d..2162bc80f09e 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -609,7 +609,7 @@ static int ide_delayed_transfer_pc(ide_drive_t *drive) static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) { - struct ide_atapi_pc *uninitialized_var(pc); + struct ide_atapi_pc *pc; ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; ide_expiry_t *expiry; diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c index 18c20a7aa0ce..94bdcf1ea186 100644 --- a/drivers/ide/ide-io-std.c +++ b/drivers/ide/ide-io-std.c @@ -173,7 +173,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; if (io_32bit) { - unsigned long uninitialized_var(flags); + unsigned long flags; if ((io_32bit & 2) && !mmio) { local_irq_save(flags); @@ -217,7 +217,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; if (io_32bit) { - unsigned long uninitialized_var(flags); + unsigned long flags; if ((io_32bit & 2) && !mmio) { local_irq_save(flags); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index c31f1d2b3b07..1a53c7a75224 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -622,12 +622,12 @@ static int drive_is_ready(ide_drive_t *drive) void ide_timer_expiry (struct timer_list *t) { ide_hwif_t *hwif = from_timer(hwif, t, timer); - ide_drive_t *uninitialized_var(drive); + ide_drive_t *drive; ide_handler_t *handler; unsigned long flags; int wait = -1; int plug_device = 0; - struct request *uninitialized_var(rq_in_flight); + struct request *rq_in_flight; spin_lock_irqsave(&hwif->lock, flags); @@ -780,13 +780,13 @@ irqreturn_t ide_intr (int irq, void *dev_id) { ide_hwif_t *hwif = (ide_hwif_t *)dev_id; struct ide_host *host = hwif->host; - ide_drive_t *uninitialized_var(drive); + ide_drive_t *drive; ide_handler_t *handler; unsigned long flags; ide_startstop_t startstop; irqreturn_t irq_ret = IRQ_NONE; int plug_device = 0; - struct request *uninitialized_var(rq_in_flight); + struct request *rq_in_flight; if (host->host_flags & IDE_HFLAG_SERIALIZE) { if (hwif != host->cur_port) diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c index b9dfeb2e8bd6..c08a8a0916e2 100644 --- a/drivers/ide/ide-sysfs.c +++ b/drivers/ide/ide-sysfs.c @@ -131,7 +131,7 @@ static struct device_attribute *ide_port_attrs[] = { int ide_sysfs_register_port(ide_hwif_t *hwif) { - int i, uninitialized_var(rc); + int i, rc; for (i = 0; ide_port_attrs[i]; i++) { rc = device_create_file(hwif->portdev, ide_port_attrs[i]); diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c index 870e235e30af..cf996f788292 100644 --- a/drivers/ide/umc8672.c +++ b/drivers/ide/umc8672.c @@ -108,7 +108,7 @@ static void umc_set_speeds(u8 speeds[]) static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { ide_hwif_t *mate = hwif->mate; - unsigned long uninitialized_var(flags); + unsigned long flags; const u8 pio = drive->pio_mode - XFER_PIO_0; printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index f4495841bf68..aae53e650638 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -132,7 +132,7 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state = &drv->states[index]; unsigned long eax = flg2MWAIT(state->flags); unsigned long ecx = 1; /* break on interrupt flag */ - bool uninitialized_var(tick); + bool tick; int cpu = smp_processor_id(); /* diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b48b3f6e632d..76e7ec0f0775 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1584,7 +1584,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs) struct ib_uverbs_create_qp_resp resp; struct ib_uqp_object *obj; struct ib_xrcd *xrcd; - struct ib_uobject *uninitialized_var(xrcd_uobj); + struct ib_uobject *xrcd_uobj; struct ib_qp *qp; struct ib_qp_open_attr attr = {}; int ret; @@ -3406,7 +3406,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, struct ib_usrq_object *obj; struct ib_pd *pd; struct ib_srq *srq; - struct ib_uobject *uninitialized_var(xrcd_uobj); + struct ib_uobject *xrcd_uobj; struct ib_srq_init_attr attr; int ret; struct ib_device *ib_dev; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 30e08bcc9afb..77bc02a9228e 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3282,7 +3282,7 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr, static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) { - struct in6_addr uninitialized_var(addr); + struct in6_addr addr; struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index b1bb61c65f4f..352b8af1998a 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -754,7 +754,7 @@ skip_cqe: static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp, struct ib_wc *wc, struct c4iw_srq *srq) { - struct t4_cqe uninitialized_var(cqe); + struct t4_cqe cqe; struct t4_wq *wq = qhp ? &qhp->wq : NULL; u32 credit = 0; u8 cqe_flushed; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index cf51e3cbd969..f9ca6e000a81 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -3541,11 +3541,11 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, int nreq; int err = 0; unsigned ind; - int uninitialized_var(size); - unsigned uninitialized_var(seglen); + int size; + unsigned seglen; __be32 dummy; __be32 *lso_wqe; - __be32 uninitialized_var(lso_hdr_sz); + __be32 lso_hdr_sz; __be32 blh; int i; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 0c18cb6a2f14..0133ebb8d740 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -925,8 +925,8 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_cq *cq = to_mcq(ibcq); u32 out[MLX5_ST_SZ_DW(create_cq_out)]; - int uninitialized_var(index); - int uninitialized_var(inlen); + int index; + int inlen; u32 *cqb = NULL; void *cqc; int cqe_size; @@ -1246,7 +1246,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) __be64 *pas; int page_shift; int inlen; - int uninitialized_var(cqe_size); + int cqe_size; unsigned long flags; if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 9454a66c12cc..655ea9c984e1 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -2536,7 +2536,7 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf, { struct devx_async_event_file *ev_file = filp->private_data; struct devx_event_subscription *event_sub; - struct devx_async_event_data *uninitialized_var(event); + struct devx_async_event_data *event; int ret = 0; size_t eventsz; bool omit_data; diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c index 2c6df1c43b55..bc35dbe4855b 100644 --- a/drivers/infiniband/hw/mlx5/wr.c +++ b/drivers/infiniband/hw/mlx5/wr.c @@ -1249,7 +1249,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, struct mlx5_wqe_xrc_seg *xrc; struct mlx5_bf *bf; void *cur_edge; - int uninitialized_var(size); + int size; unsigned long flags; unsigned int idx; int err = 0; diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index d04c245359eb..c6e95d0d760a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1639,8 +1639,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, * without initializing f0 and size0, and they are in fact * never used uninitialized. */ - int uninitialized_var(size0); - u32 uninitialized_var(f0); + int size0; + u32 f0; int ind; u8 op0 = 0; @@ -1835,7 +1835,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, * without initializing size0, and it is in fact never used * uninitialized. */ - int uninitialized_var(size0); + int size0; int ind; void *wqe; void *prev_wqe; @@ -1943,8 +1943,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, * without initializing f0 and size0, and they are in fact * never used uninitialized. */ - int uninitialized_var(size0); - u32 uninitialized_var(f0); + int size0; + u32 f0; int ind; u8 op0 = 0; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 7271d705f4b0..857be5a7d0bd 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -333,7 +333,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp) struct siw_srq *srq; struct siw_wqe *wqe = NULL; bool srq_event = false; - unsigned long uninitialized_var(flags); + unsigned long flags; srq = qp->srq; if (srq) { diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index e9647ebff187..1e4770094415 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -159,7 +159,7 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer, { struct serio_raw_client *client = file->private_data; struct serio_raw *serio_raw = client->serio_raw; - char uninitialized_var(c); + char c; ssize_t read = 0; int error; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index d759e7234e98..c29fd0991857 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2236,7 +2236,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long nr_pages, int prot) { struct dma_pte *first_pte = NULL, *pte = NULL; - phys_addr_t uninitialized_var(pteval); + phys_addr_t pteval; unsigned long sg_res = 0; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 81ffc59d05c9..4312007d2d34 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -306,7 +306,7 @@ static void do_region(int op, int op_flags, unsigned region, struct request_queue *q = bdev_get_queue(where->bdev); unsigned short logical_block_size = queue_logical_block_size(q); sector_t num_sectors; - unsigned int uninitialized_var(special_cmd_max_sectors); + unsigned int special_cmd_max_sectors; /* * Reject unsupported discard and write same requests. diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 489935d5f22d..056d891a32a9 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1844,7 +1844,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us int ioctl_flags; int param_flags; unsigned int cmd; - struct dm_ioctl *uninitialized_var(param); + struct dm_ioctl *param; ioctl_fn fn = NULL; size_t input_param_size; struct dm_ioctl param_kernel; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 963d3774c93e..247089c2be25 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -613,7 +613,7 @@ static int persistent_read_metadata(struct dm_exception_store *store, chunk_t old, chunk_t new), void *callback_context) { - int r, uninitialized_var(new_snapshot); + int r, new_snapshot; struct pstore *ps = get_info(store); /* diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 8277b959e00b..89d7cda07640 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -642,7 +642,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, */ unsigned short remaining = 0; - struct dm_target *uninitialized_var(ti); + struct dm_target *ti; struct queue_limits ti_limits; unsigned i; diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 30505d70f423..ec83edff25bf 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1752,7 +1752,7 @@ static void writecache_writeback(struct work_struct *work) { struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work); struct blk_plug plug; - struct wc_entry *f, *uninitialized_var(g), *e = NULL; + struct wc_entry *f, *g, *e = NULL; struct rb_node *node, *next_node; struct list_head skipped; struct writeback_list wbl; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ab8067f9ce8c..401b366af076 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2601,7 +2601,7 @@ static void raid5_end_write_request(struct bio *bi) struct stripe_head *sh = bi->bi_private; struct r5conf *conf = sh->raid_conf; int disks = sh->disks, i; - struct md_rdev *uninitialized_var(rdev); + struct md_rdev *rdev; sector_t first_bad; int bad_sectors; int replacement = 0; diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index 6ec277421390..e5bffaaeed38 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -640,7 +640,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status) struct i2c_client *client = dev->client; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret; - u32 uninitialized_var(tmp); + u32 tmp; u8 u8tmp, buf[2]; u16 u16tmp; diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c index 85bbdd4ecdbb..e48faf942830 100644 --- a/drivers/media/tuners/qt1010.c +++ b/drivers/media/tuners/qt1010.c @@ -215,7 +215,7 @@ static int qt1010_set_params(struct dvb_frontend *fe) static int qt1010_init_meas1(struct qt1010_priv *priv, u8 oper, u8 reg, u8 reg_init_val, u8 *retval) { - u8 i, val1, uninitialized_var(val2); + u8 i, val1, val2; int err; qt1010_i2c_oper_t i2c_data[] = { @@ -250,7 +250,7 @@ static int qt1010_init_meas1(struct qt1010_priv *priv, static int qt1010_init_meas2(struct qt1010_priv *priv, u8 reg_init_val, u8 *retval) { - u8 i, uninitialized_var(val); + u8 i, val; int err; qt1010_i2c_oper_t i2c_data[] = { { QT1010_WR, 0x07, reg_init_val }, diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c index 179b2ec3df57..d98343fd33fe 100644 --- a/drivers/media/usb/gspca/vicam.c +++ b/drivers/media/usb/gspca/vicam.c @@ -225,7 +225,7 @@ static int sd_init(struct gspca_dev *gspca_dev) { int ret; const struct ihex_binrec *rec; - const struct firmware *uninitialized_var(fw); + const struct firmware *fw; u8 *firmware_buf; ret = request_ihex_firmware(&fw, VICAM_FIRMWARE, diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 8fa77a81dd7f..a65d5353a441 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -765,9 +765,9 @@ static void uvc_video_stats_decode(struct uvc_streaming *stream, unsigned int header_size; bool has_pts = false; bool has_scr = false; - u16 uninitialized_var(scr_sof); - u32 uninitialized_var(scr_stc); - u32 uninitialized_var(pts); + u16 scr_sof; + u32 scr_stc; + u32 pts; if (stream->stats.stream.nb_frames == 0 && stream->stats.frame.nb_packets == 0) @@ -1828,7 +1828,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream, struct usb_host_endpoint *best_ep = NULL; unsigned int best_psize = UINT_MAX; unsigned int bandwidth; - unsigned int uninitialized_var(altsetting); + unsigned int altsetting; int intfnum = stream->intfnum; /* Isochronous endpoint, select the alternate setting. */ diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index 0a9c5ddf2f59..383091517ed7 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -314,7 +314,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host) } while (length) { - unsigned int uninitialized_var(p_off); + unsigned int p_off; if (host->req->long_data) { pg = nth_page(sg_page(&host->req->sg), diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index 5b966b54d6e9..fc35c7404429 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c @@ -198,7 +198,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host) host->block_pos); while (length) { - unsigned int uninitialized_var(p_off); + unsigned int p_off; if (host->req->long_data) { pg = nth_page(sg_page(&host->req->sg), diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 37b1158c1c0c..1ee866a38794 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -493,7 +493,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host) { unsigned long flags; size_t blksize, len, chunk; - u32 uninitialized_var(scratch); + u32 scratch; u8 *buf; DBG("PIO reading\n"); diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/raw/nand_ecc.c index 09fdced659f5..b6a46b1b7781 100644 --- a/drivers/mtd/nand/raw/nand_ecc.c +++ b/drivers/mtd/nand/raw/nand_ecc.c @@ -131,7 +131,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16; - uint32_t uninitialized_var(rp17); /* to make compiler happy */ + uint32_t rp17; uint32_t par; /* the cumulative parity for all data */ uint32_t tmppar; /* the cumulative parity for this iteration; for rp12, rp14 and rp16 at the end of the diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index f86dff311464..d0dd0c446e4d 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -291,7 +291,7 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info) int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; int tacls, twrph0, twrph1; unsigned long clkrate = clk_get_rate(info->clk); - unsigned long uninitialized_var(set), cfg, uninitialized_var(mask); + unsigned long set, cfg, mask; unsigned long flags; /* calculate the timing information for the controller */ diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c index 752b6cf005f7..980e332bdac4 100644 --- a/drivers/mtd/parsers/afs.c +++ b/drivers/mtd/parsers/afs.c @@ -126,8 +126,8 @@ static int afs_parse_v1_partition(struct mtd_info *mtd, * Static checks cannot see that we bail out if we have an error * reading the footer. */ - u_int uninitialized_var(iis_ptr); - u_int uninitialized_var(img_ptr); + u_int iis_ptr; + u_int img_ptr; u_int ptr; size_t sz; int ret; diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 5133e1be5331..0edecfdbd01f 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -599,7 +599,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, int err, pnum, scrub = 0, vol_id = vol->vol_id; struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; - uint32_t uninitialized_var(crc); + uint32_t crc; err = leb_read_lock(ubi, vol_id, lnum); if (err) diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index a761092e6ac9..f929db893957 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1451,7 +1451,7 @@ static int ican3_napi(struct napi_struct *napi, int budget) /* process all communication messages */ while (true) { - struct ican3_msg uninitialized_var(msg); + struct ican3_msg msg; ret = ican3_recv_msg(mod, &msg); if (ret) break; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e1c236cab2a7..c8cc14eadbb4 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1455,7 +1455,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp) static void bnx2_enable_forced_2g5(struct bnx2 *bp) { - u32 uninitialized_var(bmcr); + u32 bmcr; int err; if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) @@ -1499,7 +1499,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp) static void bnx2_disable_forced_2g5(struct bnx2 *bp) { - u32 uninitialized_var(bmcr); + u32 bmcr; int err; if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 5ddd18639a1e..c410a0ce35c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -529,8 +529,8 @@ static int req_pages_handler(struct notifier_block *nb, int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) { - u16 uninitialized_var(func_id); - s32 uninitialized_var(npages); + u16 func_id; + s32 npages; int err; err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 67e62603fe3b..15b8b1bf8163 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7276,7 +7276,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) int ring_no = ring_data->ring_no; u16 l3_csum, l4_csum; unsigned long long err = rxdp->Control_1 & RXD_T_CODE; - struct lro *uninitialized_var(lro); + struct lro *lro; u8 err_mask; struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 0fade19e00d4..0d0e38debbc2 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -3769,7 +3769,7 @@ static int ql3xxx_probe(struct pci_dev *pdev, struct net_device *ndev = NULL; struct ql3_adapter *qdev = NULL; static int cards_found; - int uninitialized_var(pci_using_dac), err; + int pci_using_dac, err; err = pci_enable_device(pdev); if (err) { diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index debd3c3fa6fb..015fdb851cdb 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -2271,7 +2271,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget) drops = 0; while (1) { struct cas_rx_comp *rxc = rxcs + entry; - struct sk_buff *uninitialized_var(skb); + struct sk_buff *skb; int type, len; u64 words[4]; int i, dring; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 9a5004f674c7..1b697e4cd7dc 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -429,7 +429,7 @@ static int serdes_init_niu_1g_serdes(struct niu *np) struct niu_link_config *lp = &np->link_config; u16 pll_cfg, pll_sts; int max_retry = 100; - u64 uninitialized_var(sig), mask, val; + u64 sig, mask, val; u32 tx_cfg, rx_cfg; unsigned long i; int err; @@ -526,7 +526,7 @@ static int serdes_init_niu_10g_serdes(struct niu *np) struct niu_link_config *lp = &np->link_config; u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; int max_retry = 100; - u64 uninitialized_var(sig), mask, val; + u64 sig, mask, val; unsigned long i; int err; @@ -714,7 +714,7 @@ static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) static int esr_reset(struct niu *np) { - u32 uninitialized_var(reset); + u32 reset; int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 7ad3d24195ba..138930c66ad2 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -702,7 +702,7 @@ EXPORT_SYMBOL(z8530_nop); irqreturn_t z8530_interrupt(int irq, void *dev_id) { struct z8530_dev *dev=dev_id; - u8 uninitialized_var(intr); + u8 intr; static volatile int locker=0; int work=0; struct z8530_irqhandler *irqs; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 22b6937ac225..340ce327ac14 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -2240,7 +2240,7 @@ static int ath10k_init_uart(struct ath10k *ar) static int ath10k_init_hw_params(struct ath10k *ar) { - const struct ath10k_hw_params *uninitialized_var(hw_params); + const struct ath10k_hw_params *hw_params; int i; for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index aa1c71a76ef7..811fad6d60c0 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1575,7 +1575,7 @@ static int ath6kl_init_upload(struct ath6kl *ar) int ath6kl_init_hw_params(struct ath6kl *ar) { - const struct ath6kl_hw *uninitialized_var(hw); + const struct ath6kl_hw *hw; int i; for (i = 0; i < ARRAY_SIZE(hw_list); i++) { diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 289a2444d534..4d72cd7daaa2 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -230,7 +230,7 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl struct ath_hw *ah = hw_priv; struct ath_common *common = ath9k_hw_common(ah); struct ath_softc *sc = (struct ath_softc *) common->priv; - unsigned long uninitialized_var(flags); + unsigned long flags; u32 val; if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c index dc1819ca52ac..89a25aefb327 100644 --- a/drivers/net/wireless/broadcom/b43/debugfs.c +++ b/drivers/net/wireless/broadcom/b43/debugfs.c @@ -493,7 +493,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf, struct b43_wldev *dev; struct b43_debugfs_fops *dfops; struct b43_dfs_file *dfile; - ssize_t uninitialized_var(ret); + ssize_t ret; char *buf; const size_t bufsize = 1024 * 16; /* 16 kiB buffer */ const size_t buforder = get_order(bufsize); diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index 9733c64bf978..ca671fc13116 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -37,7 +37,7 @@ static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr, enum b43_addrtype addrtype) { - u32 uninitialized_var(addr); + u32 addr; switch (addrtype) { case B43_DMA_ADDR_LOW: diff --git a/drivers/net/wireless/broadcom/b43/lo.c b/drivers/net/wireless/broadcom/b43/lo.c index 5d97cf06eceb..338b6545a1e7 100644 --- a/drivers/net/wireless/broadcom/b43/lo.c +++ b/drivers/net/wireless/broadcom/b43/lo.c @@ -729,7 +729,7 @@ struct b43_lo_calib *b43_calibrate_lo_setting(struct b43_wldev *dev, }; int max_rx_gain; struct b43_lo_calib *cal; - struct lo_g_saved_values uninitialized_var(saved_regs); + struct lo_g_saved_values saved_regs; /* Values from the "TXCTL Register and Value Table" */ u16 txctl_reg; u16 txctl_value; diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index 46db91846007..39de18d3ce91 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -5643,7 +5643,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, u8 rfctl[2]; u8 afectl_core; u16 tmp[6]; - u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna; + u16 cur_hpf1, cur_hpf2, cur_lna; u32 real, imag; enum nl80211_band band; diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c index 55babc6d1091..7651b1bdb592 100644 --- a/drivers/net/wireless/broadcom/b43/xmit.c +++ b/drivers/net/wireless/broadcom/b43/xmit.c @@ -422,10 +422,10 @@ int b43_generate_txhdr(struct b43_wldev *dev, if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) { unsigned int len; - struct ieee80211_hdr *uninitialized_var(hdr); + struct ieee80211_hdr *hdr; int rts_rate, rts_rate_fb; int rts_rate_ofdm, rts_rate_fb_ofdm; - struct b43_plcp_hdr6 *uninitialized_var(plcp); + struct b43_plcp_hdr6 *plcp; struct ieee80211_rate *rts_cts_rate; rts_cts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info); @@ -436,7 +436,7 @@ int b43_generate_txhdr(struct b43_wldev *dev, rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { - struct ieee80211_cts *uninitialized_var(cts); + struct ieee80211_cts *cts; switch (dev->fw.hdr_format) { case B43_FW_HDR_598: @@ -458,7 +458,7 @@ int b43_generate_txhdr(struct b43_wldev *dev, mac_ctl |= B43_TXH_MAC_SENDCTS; len = sizeof(struct ieee80211_cts); } else { - struct ieee80211_rts *uninitialized_var(rts); + struct ieee80211_rts *rts; switch (dev->fw.hdr_format) { case B43_FW_HDR_598: @@ -637,8 +637,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) const struct b43_rxhdr_fw4 *rxhdr = _rxhdr; __le16 fctl; u16 phystat0, phystat3; - u16 uninitialized_var(chanstat), uninitialized_var(mactime); - u32 uninitialized_var(macstat); + u16 chanstat, mactime; + u32 macstat; u16 chanid; int padding, rate_idx; diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c index fa133dfb2ecb..e7e4293c01f2 100644 --- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c +++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c @@ -190,7 +190,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf, struct b43legacy_wldev *dev; struct b43legacy_debugfs_fops *dfops; struct b43legacy_dfs_file *dfile; - ssize_t uninitialized_var(ret); + ssize_t ret; char *buf; const size_t bufsize = 1024 * 16; /* 16 KiB buffer */ const size_t buforder = get_order(bufsize); diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 5208a39fd6f7..220c11d34c23 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -2580,7 +2580,7 @@ static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev) static int b43legacy_switch_phymode(struct b43legacy_wl *wl, unsigned int new_mode) { - struct b43legacy_wldev *uninitialized_var(up_dev); + struct b43legacy_wldev *up_dev; struct b43legacy_wldev *down_dev; int err; bool gmode = false; diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 2ac494f5ae22..fd63eba47ba2 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -2100,7 +2100,7 @@ il3945_txpower_set_from_eeprom(struct il_priv *il) /* set tx power value for all OFDM rates */ for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) { - s32 uninitialized_var(power_idx); + s32 power_idx; int rc; /* use channel group's clip-power table, diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index da6d4202611c..a159d1d18c2c 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -2769,7 +2769,7 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) struct ieee80211_tx_info *info; struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; u32 status = le32_to_cpu(tx_resp->u.status); - int uninitialized_var(tid); + int tid; int sta_id; int freed; u8 *qc = NULL; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 5b071b70bc08..0ae9cfc65272 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -618,8 +618,8 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw, u8 queue_sel) { u16 beq, bkq, viq, voq, mgtq, hiq; - u16 uninitialized_var(valuehi); - u16 uninitialized_var(valuelow); + u16 valuehi; + u16 valuelow; switch (queue_sel) { case (TX_SELE_HQ | TX_SELE_LQ): diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 3acf56683915..14af4c97c626 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -1138,7 +1138,7 @@ static irqreturn_t aer_isr(int irq, void *context) { struct pcie_device *dev = (struct pcie_device *)context; struct aer_rpc *rpc = get_service_data(dev); - struct aer_err_source uninitialized_var(e_src); + struct aer_err_source e_src; if (kfifo_is_empty(&rpc->aer_fifo)) return IRQ_NONE; diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c index 04c4da6692d7..a72270932ec3 100644 --- a/drivers/platform/x86/hdaps.c +++ b/drivers/platform/x86/hdaps.c @@ -365,7 +365,7 @@ static ssize_t hdaps_variance_show(struct device *dev, static ssize_t hdaps_temp1_show(struct device *dev, struct device_attribute *attr, char *buf) { - u8 uninitialized_var(temp); + u8 temp; int ret; ret = hdaps_readb_one(HDAPS_PORT_TEMP1, &temp); @@ -378,7 +378,7 @@ static ssize_t hdaps_temp1_show(struct device *dev, static ssize_t hdaps_temp2_show(struct device *dev, struct device_attribute *attr, char *buf) { - u8 uninitialized_var(temp); + u8 temp; int ret; ret = hdaps_readb_one(HDAPS_PORT_TEMP2, &temp); diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index e95f5b3bef4d..37c6cc374079 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -4126,7 +4126,7 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; int srb_idx = 0; unsigned i = 0; - struct SGentry *uninitialized_var(ptr); + struct SGentry *ptr; for (i = 0; i < DC395x_MAX_SRB_CNT; i++) acb->srb_array[i].segment_x = NULL; diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index fb9848e1d481..0b4499210b95 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -4202,7 +4202,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; - u8 uninitialized_var(bc); + u8 bc; u32 ret = MPI_IO_STATUS_FAIL; unsigned long flags; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 4d205ebaee87..05c944a3bdca 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -4182,7 +4182,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; - u8 uninitialized_var(bc); + u8 bc; u32 ret = MPI_IO_STATUS_FAIL; unsigned long flags; u32 regval; diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 3861cb659cb9..6c647ba4ba0b 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -119,7 +119,7 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc) { struct ssb_bus *bus = cc->dev->bus; - u32 uninitialized_var(tmp); + u32 tmp; if (cc->dev->id.revision < 6) { if (bus->bustype == SSB_BUSTYPE_SSB || @@ -149,7 +149,7 @@ static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc) /* Get maximum or minimum (depending on get_max flag) slowclock frequency. */ static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max) { - int uninitialized_var(limit); + int limit; enum ssb_clksrc clocksrc; int divisor = 1; u32 tmp; diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index a6aabfd6e2da..097266342e5e 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -3643,7 +3643,7 @@ static int cy_pci_probe(struct pci_dev *pdev, struct cyclades_card *card; void __iomem *addr0 = NULL, *addr2 = NULL; char *card_name = NULL; - u32 uninitialized_var(mailbox); + u32 mailbox; unsigned int device_id, nchan = 0, card_no, i, j; unsigned char plx_ver; int retval, irq; diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c index fc38f96475bf..3b2f9fb01aa0 100644 --- a/drivers/tty/isicom.c +++ b/drivers/tty/isicom.c @@ -1514,7 +1514,7 @@ static unsigned int card_count; static int isicom_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - unsigned int uninitialized_var(signature), index; + unsigned int signature, index; int retval = -EPERM; struct isi_board *board = NULL; diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index b4d6d9bb3239..c545b27ea568 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -1146,7 +1146,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) struct musb_hw_ep *hw_ep = NULL; u32 rx, tx; int i, index; - unsigned long uninitialized_var(flags); + unsigned long flags; cppi = container_of(musb->dma_controller, struct cppi, controller); if (cppi->irq) diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c index ba955d65eb0e..c8a988d2cfdd 100644 --- a/drivers/usb/storage/sddr55.c +++ b/drivers/usb/storage/sddr55.c @@ -554,8 +554,8 @@ static int sddr55_reset(struct us_data *us) static unsigned long sddr55_get_capacity(struct us_data *us) { - unsigned char uninitialized_var(manufacturerID); - unsigned char uninitialized_var(deviceID); + unsigned char manufacturerID; + unsigned char deviceID; int result; struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index e992decfec53..eea902b83afe 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -862,7 +862,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) struct tun_msg_ctl ctl; size_t len, total_len = 0; int err; - struct vhost_net_ubuf_ref *uninitialized_var(ubufs); + struct vhost_net_ubuf_ref *ubufs; bool zcopy_used; int sent_pkts = 0; @@ -1042,7 +1042,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, /* len is always initialized before use since we are always called with * datalen > 0. */ - u32 uninitialized_var(len); + u32 len; while (datalen > 0 && headcount < quota) { if (unlikely(seg >= UIO_MAXIOV)) { @@ -1099,7 +1099,7 @@ static void handle_rx(struct vhost_net *net) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_virtqueue *vq = &nvq->vq; - unsigned uninitialized_var(in), log; + unsigned in, log; struct vhost_log *vq_log; struct msghdr msg = { .msg_name = NULL, diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c index eda893b7a2e9..9a98c4a6ba33 100644 --- a/drivers/video/fbdev/matrox/matroxfb_maven.c +++ b/drivers/video/fbdev/matrox/matroxfb_maven.c @@ -300,7 +300,7 @@ static int matroxfb_mavenclock(const struct matrox_pll_ctl *ctl, unsigned int* in, unsigned int* feed, unsigned int* post, unsigned int* htotal2) { unsigned int fvco; - unsigned int uninitialized_var(p); + unsigned int p; fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2); if (!fvco) @@ -732,8 +732,8 @@ static int maven_find_exact_clocks(unsigned int ht, unsigned int vt, for (x = 0; x < 8; x++) { unsigned int c; - unsigned int uninitialized_var(a), uninitialized_var(b), - uninitialized_var(h2); + unsigned int a, b, + h2; unsigned int h = ht + 2 + x; if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) { diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c index 7497bd36334c..a8faf46adeb1 100644 --- a/drivers/video/fbdev/pm3fb.c +++ b/drivers/video/fbdev/pm3fb.c @@ -821,9 +821,9 @@ static void pm3fb_write_mode(struct fb_info *info) wmb(); { - unsigned char uninitialized_var(m); /* ClkPreScale */ - unsigned char uninitialized_var(n); /* ClkFeedBackScale */ - unsigned char uninitialized_var(p); /* ClkPostScale */ + unsigned char m; /* ClkPreScale */ + unsigned char n; /* ClkFeedBackScale */ + unsigned char p; /* ClkPostScale */ unsigned long pixclock = PICOS2KHZ(info->var.pixclock); (void)pm3fb_calculate_clock(pixclock, &m, &n, &p); diff --git a/drivers/video/fbdev/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c index 08c9ee46978e..4168ac464565 100644 --- a/drivers/video/fbdev/riva/riva_hw.c +++ b/drivers/video/fbdev/riva/riva_hw.c @@ -1245,8 +1245,7 @@ int CalcStateExt ) { int pixelDepth; - int uninitialized_var(VClk),uninitialized_var(m), - uninitialized_var(n), uninitialized_var(p); + int VClk, m, n, p; /* * Save mode parameters. diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 58b96baa8d48..a2de775801af 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -424,7 +424,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; struct vring_desc *desc; - unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; + unsigned int i, n, avail, descs_used, prev, err_idx; int head; bool indirect; @@ -1101,8 +1101,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, struct vring_packed_desc *desc; struct scatterlist *sg; unsigned int i, n, c, descs_used, err_idx; - __le16 uninitialized_var(head_flags), flags; - u16 head, id, uninitialized_var(prev), curr, avail_used_flags; + __le16 head_flags, flags; + u16 head, id, prev, curr, avail_used_flags; START_USE(vq); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 96757f3abd74..1d2e61e0ab04 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -1053,7 +1053,7 @@ static int afs_d_revalidate_rcu(struct dentry *dentry) static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) { struct afs_vnode *vnode, *dir; - struct afs_fid uninitialized_var(fid); + struct afs_fid fid; struct dentry *parent; struct inode *inode; struct key *key; diff --git a/fs/afs/security.c b/fs/afs/security.c index 90d852704328..9cf3102f370c 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c @@ -399,7 +399,7 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key, int afs_permission(struct inode *inode, int mask) { struct afs_vnode *vnode = AFS_FS_I(inode); - afs_access_t uninitialized_var(access); + afs_access_t access; struct key *key; int ret = 0; diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c index e7f550327d5d..e338c407cb75 100644 --- a/fs/dlm/netlink.c +++ b/fs/dlm/netlink.c @@ -113,7 +113,7 @@ static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) void dlm_timeout_warn(struct dlm_lkb *lkb) { - struct sk_buff *uninitialized_var(send_skb); + struct sk_buff *send_skb; struct dlm_lock_data *data; size_t size; int rv; diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 64b56c7df023..d0542151e8c4 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -265,7 +265,7 @@ submit_bio_out: */ static int erofs_raw_access_readpage(struct file *file, struct page *page) { - erofs_off_t uninitialized_var(last_block); + erofs_off_t last_block; struct bio *bio; trace_erofs_readpage(page, true); @@ -282,7 +282,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page) static void erofs_raw_access_readahead(struct readahead_control *rac) { - erofs_off_t uninitialized_var(last_block); + erofs_off_t last_block; struct bio *bio = NULL; struct page *page; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index be50a4d9d273..24a26aaf847f 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1161,7 +1161,7 @@ static void z_erofs_submit_queue(struct super_block *sb, struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; void *bi_private; /* since bio will be NULL, no need to initialize last_index */ - pgoff_t uninitialized_var(last_index); + pgoff_t last_index; unsigned int nr_bios = 0; struct bio *bio = NULL; diff --git a/fs/fat/dir.c b/fs/fat/dir.c index b4ddf48fa444..c4a274285858 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -1284,7 +1284,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots, struct super_block *sb = dir->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */ - struct msdos_dir_entry *uninitialized_var(de); + struct msdos_dir_entry *de; int err, free_slots, i, nr_bhs; loff_t pos, i_pos; diff --git a/fs/fuse/control.c b/fs/fuse/control.c index c23f6f243ad4..a1303ad303ba 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -120,7 +120,7 @@ static ssize_t fuse_conn_max_background_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned uninitialized_var(val); + unsigned val; ssize_t ret; ret = fuse_conn_limit_write(file, buf, count, ppos, &val, @@ -162,7 +162,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned uninitialized_var(val); + unsigned val; struct fuse_conn *fc; ssize_t ret; diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index 030f094910c3..2cc17816d7b1 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -270,7 +270,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) { char *end = p + len; - char *uninitialized_var(key), *uninitialized_var(val); + char *key, *val; int rc; while (true) { diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e573b0cd2737..32301fe626ba 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2963,7 +2963,7 @@ static void fuse_register_polled_file(struct fuse_conn *fc, { spin_lock(&fc->lock); if (RB_EMPTY_NODE(&ff->polled_node)) { - struct rb_node **link, *uninitialized_var(parent); + struct rb_node **link, *parent; link = fuse_find_polled_node(fc, ff->kh, &parent); BUG_ON(*link); diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 72c9560f4467..baa17c781870 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -335,7 +335,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping, int done = 0; struct pagevec pvec; int nr_pages; - pgoff_t uninitialized_var(writeback_index); + pgoff_t writeback_index; pgoff_t index; pgoff_t end; pgoff_t done_index; diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 6306eaae378b..8dfe09f52cbc 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1761,7 +1761,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift; __u16 start_list[GFS2_MAX_META_HEIGHT]; __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL; - unsigned int start_aligned, uninitialized_var(end_aligned); + unsigned int start_aligned, end_aligned; unsigned int strip_h = ip->i_height - 1; u32 btotal = 0; int ret, state; diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index cb2a11b458c6..ed1da4323967 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -419,7 +419,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, struct page *page) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); - struct gfs2_log_header_host uninitialized_var(lh); + struct gfs2_log_header_host lh; void *kaddr = kmap_atomic(page); unsigned int offset; bool ret = false; diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c index c8d1b2be7854..73342c925a4b 100644 --- a/fs/hfsplus/unicode.c +++ b/fs/hfsplus/unicode.c @@ -398,7 +398,7 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str) astr = str->name; len = str->len; while (len > 0) { - int uninitialized_var(dsize); + int dsize; size = asc2unichar(sb, astr, len, &c); astr += size; len -= size; diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index cac468f04820..402769881c32 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -152,8 +152,8 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry, struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { int found; - unsigned long uninitialized_var(block); - unsigned long uninitialized_var(offset); + unsigned long block; + unsigned long offset; struct inode *inode; struct page *page; diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 83b8f06b4a64..7e9abdb89712 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c @@ -401,7 +401,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb { size_t retlen; int ret; - uint32_t uninitialized_var(bad_offset); + uint32_t bad_offset; switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { case -EAGAIN: goto refile; diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index b68e96681522..bb1804bab1e1 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -351,7 +351,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size) static ssize_t write_filehandle(struct file *file, char *buf, size_t size) { char *dname, *path; - int uninitialized_var(maxsize); + int maxsize; char *mesg = buf; int len; struct auth_domain *dom; diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 2f834add165b..4c1b90442d6f 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -4707,7 +4707,7 @@ int ocfs2_insert_extent(handle_t *handle, struct ocfs2_alloc_context *meta_ac) { int status; - int uninitialized_var(free_records); + int free_records; struct buffer_head *last_eb_bh = NULL; struct ocfs2_insert_type insert = {0, }; struct ocfs2_extent_rec rec; @@ -7051,7 +7051,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, int need_free = 0; u32 bit_off, num; handle_t *handle; - u64 uninitialized_var(block); + u64 block; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 5761060d2ba8..bdfba9db558a 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -848,9 +848,9 @@ static int ocfs2_dx_dir_lookup(struct inode *inode, u64 *ret_phys_blkno) { int ret = 0; - unsigned int cend, uninitialized_var(clen); - u32 uninitialized_var(cpos); - u64 uninitialized_var(blkno); + unsigned int cend, clen; + u32 cpos; + u64 blkno; u32 name_hash = hinfo->major_hash; ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno, @@ -894,7 +894,7 @@ static int ocfs2_dx_dir_search(const char *name, int namelen, struct ocfs2_dir_lookup_result *res) { int ret, i, found; - u64 uninitialized_var(phys); + u64 phys; struct buffer_head *dx_leaf_bh = NULL; struct ocfs2_dx_leaf *dx_leaf; struct ocfs2_dx_entry *dx_entry = NULL; @@ -4393,9 +4393,9 @@ out: int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh) { int ret; - unsigned int uninitialized_var(clen); - u32 major_hash = UINT_MAX, p_cpos, uninitialized_var(cpos); - u64 uninitialized_var(blkno); + unsigned int clen; + u32 major_hash = UINT_MAX, p_cpos, cpos; + u64 blkno; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct buffer_head *dx_root_bh = NULL; struct ocfs2_dx_root_block *dx_root; diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index a94852af5510..7b93e9c766f6 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -403,7 +403,7 @@ static int ocfs2_get_clusters_nocache(struct inode *inode, { int i, ret, tree_height, len; struct ocfs2_dinode *di; - struct ocfs2_extent_block *uninitialized_var(eb); + struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; struct buffer_head *eb_bh = NULL; @@ -599,7 +599,7 @@ int ocfs2_get_clusters(struct inode *inode, u32 v_cluster, unsigned int *extent_flags) { int ret; - unsigned int uninitialized_var(hole_len), flags = 0; + unsigned int hole_len, flags = 0; struct buffer_head *di_bh = NULL; struct ocfs2_extent_rec rec; diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 5381020aaa9a..c46bf7f581a1 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -2498,7 +2498,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, struct buffer_head *new_di_bh = NULL; struct ocfs2_alloc_context *inode_ac = NULL; struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; - u64 uninitialized_var(di_blkno), suballoc_loc; + u64 di_blkno, suballoc_loc; u16 suballoc_bit; status = ocfs2_inode_lock(dir, &parent_di_bh, 1); diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index cfb77f70c888..3b397fa9c9e8 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -1063,7 +1063,7 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, struct buffer_head **ret_bh) { int ret = 0, i, found; - u32 low_cpos, uninitialized_var(cpos_end); + u32 low_cpos, cpos_end; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec = NULL; struct ocfs2_extent_block *eb = NULL; diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 90c830e3758e..9ccd19d8f7b1 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1211,7 +1211,7 @@ static int ocfs2_xattr_block_get(struct inode *inode, struct ocfs2_xattr_value_root *xv; size_t size; int ret = -ENODATA, name_offset, name_len, i; - int uninitialized_var(block_off); + int block_off; xs->bucket = ocfs2_xattr_bucket_new(inode); if (!xs->bucket) { diff --git a/fs/omfs/file.c b/fs/omfs/file.c index d7b5f09d298c..2c7b70ee1388 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -220,7 +220,7 @@ static int omfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh; sector_t next, offset; int ret; - u64 uninitialized_var(new_block); + u64 new_block; u32 max_extents; int extent_count; struct omfs_extent *oe; diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 79dd052c7dbf..8e1c308524a2 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -787,7 +787,7 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c) struct path upperpath, datapath; int err; char *capability = NULL; - ssize_t uninitialized_var(cap_size); + ssize_t cap_size; ovl_path_upper(c->dentry, &upperpath); if (WARN_ON(upperpath.dentry == NULL)) diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index ad292c5a43a9..b5cdac9b0368 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -552,11 +552,11 @@ out: */ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) { - int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt; + int lnum, offs, len, err = 0, last_level, child_cnt; int first = 1, iip; struct ubifs_debug_info *d = c->dbg; - union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key; - unsigned long long uninitialized_var(last_sqnum); + union ubifs_key lower_key, upper_key, l_key, u_key; + unsigned long long last_sqnum; struct ubifs_idx_node *idx; struct list_head list; struct idx_node *i; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index ef85ec167a84..9d042942d8b2 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -1260,7 +1260,7 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry, struct ubifs_budget_req ino_req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) }; struct timespec64 time; - unsigned int uninitialized_var(saved_nlink); + unsigned int saved_nlink; struct fscrypt_name old_nm, new_nm; /* diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 49fe062ce45e..b77d1637bbbc 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -222,7 +222,7 @@ static int write_begin_slow(struct address_space *mapping, struct ubifs_info *c = inode->i_sb->s_fs_info; pgoff_t index = pos >> PAGE_SHIFT; struct ubifs_budget_req req = { .new_page = 1 }; - int uninitialized_var(err), appending = !!(pos + len > inode->i_size); + int err, appending = !!(pos + len > inode->i_size); struct page *page; dbg_gen("ino %lu, pos %llu, len %u, i_size %lld", @@ -426,7 +426,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = pos >> PAGE_SHIFT; - int uninitialized_var(err), appending = !!(pos + len > inode->i_size); + int err, appending = !!(pos + len > inode->i_size); int skipped_read = 0; struct page *page; diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index e5ec1afe1c66..2e6264318bd9 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -1222,7 +1222,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; int last_reference = !!(new_inode && new_inode->i_nlink == 0); int move = (old_dir != new_dir); - struct ubifs_inode *uninitialized_var(new_ui); + struct ubifs_inode *new_ui; u8 hash_old_dir[UBIFS_HASH_ARR_SZ]; u8 hash_new_dir[UBIFS_HASH_ARR_SZ]; u8 hash_new_inode[UBIFS_HASH_ARR_SZ]; @@ -1507,7 +1507,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, union ubifs_key key, to_key; struct ubifs_ino_node *ino; struct ubifs_trun_node *trun; - struct ubifs_data_node *uninitialized_var(dn); + struct ubifs_data_node *dn; int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode); struct ubifs_inode *ui = ubifs_inode(inode); ino_t inum = inode->i_ino; diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index e21abf250951..6e0a153b7194 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -275,7 +275,7 @@ uint32_t ubifs_unpack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, const int k = 32 - nrbits; uint8_t *p = *addr; int b = *pos; - uint32_t uninitialized_var(val); + uint32_t val; const int bytes = (nrbits + b + 7) >> 3; ubifs_assert(c, nrbits > 0); diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index e8e7b0e9532e..f609f6cdde70 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -892,7 +892,7 @@ static int fallible_resolve_collision(struct ubifs_info *c, int adding) { struct ubifs_znode *o_znode = NULL, *znode = *zn; - int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n; + int o_n, err, cmp, unsure = 0, nn = *n; cmp = fallible_matches_name(c, &znode->zbranch[nn], nm); if (unlikely(cmp < 0)) @@ -1514,8 +1514,8 @@ out: */ int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) { - int n, err = 0, lnum = -1, uninitialized_var(offs); - int uninitialized_var(len); + int n, err = 0, lnum = -1, offs; + int len; unsigned int block = key_block(c, &bu->key); struct ubifs_znode *znode; diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c index 49cb34c3f324..ccaf94ea5be3 100644 --- a/fs/ubifs/tnc_misc.c +++ b/fs/ubifs/tnc_misc.c @@ -126,8 +126,8 @@ int ubifs_search_zbranch(const struct ubifs_info *c, const struct ubifs_znode *znode, const union ubifs_key *key, int *n) { - int beg = 0, end = znode->child_cnt, uninitialized_var(mid); - int uninitialized_var(cmp); + int beg = 0, end = znode->child_cnt, mid; + int cmp; const struct ubifs_zbranch *zbr = &znode->zbranch[0]; ubifs_assert(c, end > beg); diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 02f03fadb75b..8e597db4d971 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -564,7 +564,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb, udf_pblk_t newblock = 0; uint32_t adsize; uint32_t elen, goal_elen = 0; - struct kernel_lb_addr eloc, uninitialized_var(goal_eloc); + struct kernel_lb_addr eloc, goal_eloc; struct extent_position epos, goal_epos; int8_t etype; struct udf_inode_info *iinfo = UDF_I(table); diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index f37f5cc4b19f..30525861c596 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -126,7 +126,7 @@ xfs_bmap_rtalloc( * pick an extent that will space things out in the rt area. */ if (ap->eof && ap->offset == 0) { - xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */ + xfs_rtblock_t rtx; /* realtime extent no */ error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); if (error) diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 6315324b9dc2..56e4afb2e729 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -305,7 +305,7 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action, struct netlink_ext_ack *extack) { const struct flow_action_entry *action_entry; - u8 uninitialized_var(last_hw_stats); + u8 last_hw_stats; int i; if (flow_offload_has_one_action(action)) diff --git a/kernel/async.c b/kernel/async.c index 4f9c1d614016..33258e6e20f8 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -111,7 +111,7 @@ static void async_run_entry_fn(struct work_struct *work) struct async_entry *entry = container_of(work, struct async_entry, work); unsigned long flags; - ktime_t uninitialized_var(calltime), delta, rettime; + ktime_t calltime, delta, rettime; /* 1) run (and print duration) */ if (initcall_debug && system_state < SYSTEM_RUNNING) { @@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain); */ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) { - ktime_t uninitialized_var(starttime), delta, endtime; + ktime_t starttime, delta, endtime; if (initcall_debug && system_state < SYSTEM_RUNNING) { pr_debug("async_waiting @ %i\n", task_pid_nr(current)); diff --git a/kernel/audit.c b/kernel/audit.c index 8c201f414226..ec38479f9228 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1800,7 +1800,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, { struct audit_buffer *ab; struct timespec64 t; - unsigned int uninitialized_var(serial); + unsigned int serial; if (audit_initialized != AUDIT_INITIALIZED) return NULL; diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 683a799618ad..9d847ab851db 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -591,7 +591,7 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) int this_cpu, old_cpu; char *cp, *cp2, *cphold = NULL, replaced_byte = ' '; char *moreprompt = "more> "; - unsigned long uninitialized_var(flags); + unsigned long flags; /* Serialize kdb_printf if multiple cpus try to write at once. * But if any cpu goes recursive in kdb, just print the output, diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 36c962a86bf2..d628ab09d97b 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -882,7 +882,7 @@ static int device_dma_allocations(struct device *dev, struct dma_debug_entry **o static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; - struct dma_debug_entry *uninitialized_var(entry); + struct dma_debug_entry *entry; int count; if (dma_debug_disabled()) diff --git a/kernel/events/core.c b/kernel/events/core.c index 856d98c36f56..851fc5e0e24b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -11483,7 +11483,7 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event *group_leader = NULL, *output_event = NULL; struct perf_event *event, *sibling; struct perf_event_attr attr; - struct perf_event_context *ctx, *uninitialized_var(gctx); + struct perf_event_context *ctx, *gctx; struct file *event_file = NULL; struct fd group = {NULL, 0}; struct task_struct *task = NULL; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index bb0862873dba..e84eb52b646b 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -2189,7 +2189,7 @@ static void handle_swbp(struct pt_regs *regs) { struct uprobe *uprobe; unsigned long bp_vaddr; - int uninitialized_var(is_swbp); + int is_swbp; bp_vaddr = uprobe_get_swbp_addr(regs); if (bp_vaddr == get_trampoline_vaddr()) diff --git a/kernel/exit.c b/kernel/exit.c index 727150f28103..7bcd571618dd 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -93,7 +93,7 @@ static void __exit_signal(struct task_struct *tsk) struct signal_struct *sig = tsk->signal; bool group_dead = thread_group_leader(tsk); struct sighand_struct *sighand; - struct tty_struct *uninitialized_var(tty); + struct tty_struct *tty; u64 utime, stime; sighand = rcu_dereference_check(tsk->sighand, diff --git a/kernel/futex.c b/kernel/futex.c index e646661f6282..05e88562de68 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1326,7 +1326,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval, static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) { int err; - u32 uninitialized_var(curval); + u32 curval; if (unlikely(should_fail_futex(true))) return -EFAULT; @@ -1496,7 +1496,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) */ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) { - u32 uninitialized_var(curval), newval; + u32 curval, newval; struct task_struct *new_owner; bool postunlock = false; DEFINE_WAKE_Q(wake_q); @@ -2370,7 +2370,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *argowner) { struct futex_pi_state *pi_state = q->pi_state; - u32 uval, uninitialized_var(curval), newval; + u32 uval, curval, newval; struct task_struct *oldowner, *newowner; u32 newtid; int ret, err = 0; @@ -2996,7 +2996,7 @@ uaddr_faulted: */ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) { - u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current); + u32 curval, uval, vpid = task_pid_vnr(current); union futex_key key = FUTEX_KEY_INIT; struct futex_hash_bucket *hb; struct futex_q *top_waiter; @@ -3479,7 +3479,7 @@ err_unlock: static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, bool pi, bool pending_op) { - u32 uval, uninitialized_var(nval), mval; + u32 uval, nval, mval; int err; /* Futex address must be 32bit aligned */ @@ -3609,7 +3609,7 @@ static void exit_robust_list(struct task_struct *curr) struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; - unsigned int uninitialized_var(next_pi); + unsigned int next_pi; unsigned long futex_offset; int rc; @@ -3909,7 +3909,7 @@ static void compat_exit_robust_list(struct task_struct *curr) struct compat_robust_list_head __user *head = curr->compat_robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; - unsigned int uninitialized_var(next_pi); + unsigned int next_pi; compat_uptr_t uentry, next_uentry, upending; compat_long_t futex_offset; int rc; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 29a8de4c50b9..84ed1d1d5013 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1723,7 +1723,7 @@ static int noop_count(struct lock_list *entry, void *data) static unsigned long __lockdep_count_forward_deps(struct lock_list *this) { unsigned long count = 0; - struct lock_list *uninitialized_var(target_entry); + struct lock_list *target_entry; __bfs_forwards(this, (void *)&count, noop_count, &target_entry); @@ -1749,7 +1749,7 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) static unsigned long __lockdep_count_backward_deps(struct lock_list *this) { unsigned long count = 0; - struct lock_list *uninitialized_var(target_entry); + struct lock_list *target_entry; __bfs_backwards(this, (void *)&count, noop_count, &target_entry); @@ -1804,7 +1804,7 @@ check_noncircular(struct held_lock *src, struct held_lock *target, struct lock_trace **const trace) { int ret; - struct lock_list *uninitialized_var(target_entry); + struct lock_list *target_entry; struct lock_list src_entry = { .class = hlock_class(src), .parent = NULL, @@ -1842,7 +1842,7 @@ static noinline int check_redundant(struct held_lock *src, struct held_lock *target) { int ret; - struct lock_list *uninitialized_var(target_entry); + struct lock_list *target_entry; struct lock_list src_entry = { .class = hlock_class(src), .parent = NULL, @@ -2244,8 +2244,8 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, { unsigned long usage_mask = 0, forward_mask, backward_mask; enum lock_usage_bit forward_bit = 0, backward_bit = 0; - struct lock_list *uninitialized_var(target_entry1); - struct lock_list *uninitialized_var(target_entry); + struct lock_list *target_entry1; + struct lock_list *target_entry; struct lock_list this, that; int ret; @@ -3438,7 +3438,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, { int ret; struct lock_list root; - struct lock_list *uninitialized_var(target_entry); + struct lock_list *target_entry; root.parent = NULL; root.class = hlock_class(this); @@ -3465,7 +3465,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, { int ret; struct lock_list root; - struct lock_list *uninitialized_var(target_entry); + struct lock_list *target_entry; root.parent = NULL; root.class = hlock_class(this); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 00867ff82412..f15471ce969e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -577,7 +577,7 @@ static void rb_wake_up_waiters(struct irq_work *work) */ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) { - struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); + struct ring_buffer_per_cpu *cpu_buffer; DEFINE_WAIT(wait); struct rb_irq_work *work; int ret = 0; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 34e406fe561f..8e4a3a4397f2 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1029,7 +1029,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, { struct radix_tree_node *node, *parent; unsigned long maxindex; - int uninitialized_var(offset); + int offset; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) diff --git a/lib/test_lockup.c b/lib/test_lockup.c index bd7c7ff39f6b..ff26f36d729f 100644 --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -168,7 +168,7 @@ static int master_cpu; static void test_lock(bool master, bool verbose) { - u64 uninitialized_var(wait_start); + u64 wait_start; if (measure_lock_wait) wait_start = local_clock(); diff --git a/mm/frontswap.c b/mm/frontswap.c index bfa3a339253e..9d977b1fc016 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -446,7 +446,7 @@ static int __frontswap_shrink(unsigned long target_pages, void frontswap_shrink(unsigned long target_pages) { unsigned long pages_to_unuse = 0; - int uninitialized_var(type), ret; + int type, ret; /* * we don't want to hold swap_lock while doing a very diff --git a/mm/ksm.c b/mm/ksm.c index 4102034cd55a..5fb176d497ea 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2387,7 +2387,7 @@ next_mm: static void ksm_do_scan(unsigned int scan_npages) { struct rmap_item *rmap_item; - struct page *uninitialized_var(page); + struct page *page; while (scan_npages-- && likely(!freezing(current))) { cond_resched(); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 19622328e4b5..c888d9b5c745 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1004,7 +1004,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, struct mem_cgroup_reclaim_cookie *reclaim) { - struct mem_cgroup_reclaim_iter *uninitialized_var(iter); + struct mem_cgroup_reclaim_iter *iter; struct cgroup_subsys_state *css = NULL; struct mem_cgroup *memcg = NULL; struct mem_cgroup *pos = NULL; diff --git a/mm/memory.c b/mm/memory.c index 87ec87cdc1ff..5b5887b0e7f4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2205,7 +2205,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, { pte_t *pte; int err = 0; - spinlock_t *uninitialized_var(ptl); + spinlock_t *ptl; if (create) { pte = (mm == &init_mm) ? diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 381320671677..b9e85d467352 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1234,7 +1234,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, static struct page *new_page(struct page *page, unsigned long start) { struct vm_area_struct *vma; - unsigned long uninitialized_var(address); + unsigned long address; vma = find_vma(current->mm, start); while (vma) { @@ -1629,7 +1629,7 @@ static int kernel_get_mempolicy(int __user *policy, unsigned long flags) { int err; - int uninitialized_var(pval); + int pval; nodemask_t nodes; addr = untagged_addr(addr); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 48eb0f1410d4..b52a3a2a5edd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -961,7 +961,7 @@ static inline void __free_one_page(struct page *page, int migratetype, bool report) { struct capture_control *capc = task_capc(zone); - unsigned long uninitialized_var(buddy_pfn); + unsigned long buddy_pfn; unsigned long combined_pfn; unsigned int max_order; struct page *buddy; diff --git a/mm/percpu.c b/mm/percpu.c index 696367b18222..b626766160ce 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -2513,7 +2513,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( const size_t static_size = __per_cpu_end - __per_cpu_start; int nr_groups = 1, nr_units = 0; size_t size_sum, min_unit_size, alloc_size; - int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ + int upa, max_upa, best_upa; /* units_per_alloc */ int last_allocs, group, unit; unsigned int cpu, tcpu; struct pcpu_alloc_info *ai; diff --git a/mm/slub.c b/mm/slub.c index ef303070d175..f226d66408ee 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1218,7 +1218,7 @@ static noinline int free_debug_processing( struct kmem_cache_node *n = get_node(s, page_to_nid(page)); void *object = head; int cnt = 0; - unsigned long uninitialized_var(flags); + unsigned long flags; int ret = 0; spin_lock_irqsave(&n->list_lock, flags); @@ -2901,7 +2901,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, struct page new; unsigned long counters; struct kmem_cache_node *n = NULL; - unsigned long uninitialized_var(flags); + unsigned long flags; stat(s, FREE_SLOWPATH); diff --git a/mm/swap.c b/mm/swap.c index a82efc33411f..de257c0a89b1 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -830,8 +830,8 @@ void release_pages(struct page **pages, int nr) LIST_HEAD(pages_to_free); struct pglist_data *locked_pgdat = NULL; struct lruvec *lruvec; - unsigned long uninitialized_var(flags); - unsigned int uninitialized_var(lock_batch); + unsigned long flags; + unsigned int lock_batch; for (i = 0; i < nr; i++) { struct page *page = pages[i]; diff --git a/net/dccp/options.c b/net/dccp/options.c index 3b42f5c6a63d..9fed0ae21e63 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -56,7 +56,7 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, (dh->dccph_doff * 4); struct dccp_options_received *opt_recv = &dp->dccps_options_received; unsigned char opt, len; - unsigned char *uninitialized_var(value); + unsigned char *value; u32 elapsed_time; __be32 opt_val; int rc; diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c index c94445b44d8c..2d42e4c35a20 100644 --- a/net/ipv4/netfilter/nf_socket_ipv4.c +++ b/net/ipv4/netfilter/nf_socket_ipv4.c @@ -84,11 +84,11 @@ nf_socket_get_sock_v4(struct net *net, struct sk_buff *skb, const int doff, struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb, const struct net_device *indev) { - __be32 uninitialized_var(daddr), uninitialized_var(saddr); - __be16 uninitialized_var(dport), uninitialized_var(sport); + __be32 daddr, saddr; + __be16 dport, sport; const struct iphdr *iph = ip_hdr(skb); struct sk_buff *data_skb = NULL; - u8 uninitialized_var(protocol); + u8 protocol; #if IS_ENABLED(CONFIG_NF_CONNTRACK) enum ip_conntrack_info ctinfo; struct nf_conn const *ct; diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index ce4fbba4acce..73bb047e6037 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -535,7 +535,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) { - int uninitialized_var(err); + int err; struct net *net = sock_net(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct in6_flowlabel_req freq; diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c index b9df879c48d3..6fd54744cbc3 100644 --- a/net/ipv6/netfilter/nf_socket_ipv6.c +++ b/net/ipv6/netfilter/nf_socket_ipv6.c @@ -97,7 +97,7 @@ nf_socket_get_sock_v6(struct net *net, struct sk_buff *skb, int doff, struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, const struct net_device *indev) { - __be16 uninitialized_var(dport), uninitialized_var(sport); + __be16 dport, sport; const struct in6_addr *daddr = NULL, *saddr = NULL; struct ipv6hdr *iph = ipv6_hdr(skb); struct sk_buff *data_skb = NULL; diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 9eca90414bb7..b22801f97bce 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -382,7 +382,7 @@ static int help(struct sk_buff *skb, int ret; u32 seq; int dir = CTINFO2DIR(ctinfo); - unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff); + unsigned int matchlen, matchoff; struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct); struct nf_conntrack_expect *exp; union nf_inet_addr *daddr; diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 0ba020ca38e6..f02992419850 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -689,7 +689,7 @@ nfulnl_log_packet(struct net *net, struct nfnl_log_net *log = nfnl_log_pernet(net); const struct nfnl_ct_hook *nfnl_ct = NULL; struct nf_conn *ct = NULL; - enum ip_conntrack_info uninitialized_var(ctinfo); + enum ip_conntrack_info ctinfo; if (li_user && li_user->type == NF_LOG_TYPE_ULOG) li = li_user; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 3243a31f6e82..dadfc06245a3 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -388,7 +388,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, struct net_device *indev; struct net_device *outdev; struct nf_conn *ct = NULL; - enum ip_conntrack_info uninitialized_var(ctinfo); + enum ip_conntrack_info ctinfo; struct nfnl_ct_hook *nfnl_ct; bool csum_verify; char *secdata = NULL; @@ -1168,7 +1168,7 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl, struct nfqnl_instance *queue; unsigned int verdict; struct nf_queue_entry *entry; - enum ip_conntrack_info uninitialized_var(ctinfo); + enum ip_conntrack_info ctinfo; struct nfnl_ct_hook *nfnl_ct; struct nf_conn *ct = NULL; struct nfnl_queue_net *q = nfnl_queue_pernet(net); diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 80ae7b9fa90a..354c3cc90741 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -225,7 +225,7 @@ static u32 flow_get_skgid(const struct sk_buff *skb) static u32 flow_get_vlan_tag(const struct sk_buff *skb) { - u16 uninitialized_var(tag); + u16 tag; if (vlan_get_tag(skb, &tag) < 0) return 0; diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index ca813697728e..4f823a8223d8 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1692,7 +1692,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, { struct cake_sched_data *q = qdisc_priv(sch); int len = qdisc_pkt_len(skb); - int uninitialized_var(ret); + int ret; struct sk_buff *ack = NULL; ktime_t now = ktime_get(); struct cake_tin_data *b; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 39b427dc7512..ce4519358106 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -360,7 +360,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbq_sched_data *q = qdisc_priv(sch); - int uninitialized_var(ret); + int ret; struct cbq_class *cl = cbq_classify(skb, sch, &ret); #ifdef CONFIG_NET_CLS_ACT diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 459a784056c0..985d5208f563 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -187,7 +187,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct fq_codel_sched_data *q = qdisc_priv(sch); unsigned int idx, prev_backlog, prev_qlen; struct fq_codel_flow *flow; - int uninitialized_var(ret); + int ret; unsigned int pkt_len; bool memory_limited; diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index fb760cee824e..4d307f17e084 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -130,7 +130,7 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, { struct fq_pie_sched_data *q = qdisc_priv(sch); struct fq_pie_flow *sel_flow; - int uninitialized_var(ret); + int ret; u8 memory_limited = false; u8 enqueue = false; u32 pkt_len; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 433f2190960f..92ad4115e473 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1533,7 +1533,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); struct hfsc_class *cl; - int uninitialized_var(err); + int err; bool first; cl = hfsc_classify(skb, sch, &err); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 8184c87da8be..6feab225b4ba 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -579,7 +579,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { - int uninitialized_var(ret); + int ret; unsigned int len = qdisc_pkt_len(skb); struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = htb_classify(skb, sch, &ret); diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 5a6def5e4e6d..15f400dcb400 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -349,7 +349,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) unsigned int hash, dropped; sfq_index x, qlen; struct sfq_slot *slot; - int uninitialized_var(ret); + int ret; struct sk_buff *head; int delta; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 5c4ec9386f81..b97947e9ca45 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -563,7 +563,7 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) .msg_control = cmh, .msg_controllen = sizeof(buffer), }; - unsigned int uninitialized_var(sent); + unsigned int sent; int err; svc_udp_release_rqst(rqstp); @@ -1080,7 +1080,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) struct msghdr msg = { .msg_flags = 0, }; - unsigned int uninitialized_var(sent); + unsigned int sent; int err; svc_tcp_release_rqst(rqstp); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 914508ea9b84..c57aef829403 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -496,8 +496,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, int flags, struct rpc_rqst *req) { struct xdr_buf *buf = &req->rq_private_buf; - size_t want, uninitialized_var(read); - ssize_t uninitialized_var(ret); + size_t want, read; + ssize_t ret; xs_read_header(transport, buf); @@ -844,7 +844,7 @@ static int xs_local_send_request(struct rpc_rqst *req) struct msghdr msg = { .msg_flags = XS_SENDMSG_FLAGS, }; - unsigned int uninitialized_var(sent); + unsigned int sent; int status; /* Close the stream if the previous transmission was incomplete */ @@ -915,7 +915,7 @@ static int xs_udp_send_request(struct rpc_rqst *req) .msg_namelen = xprt->addrlen, .msg_flags = XS_SENDMSG_FLAGS, }; - unsigned int uninitialized_var(sent); + unsigned int sent; int status; xs_pktdump("packet data:", @@ -999,7 +999,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) .msg_flags = XS_SENDMSG_FLAGS, }; bool vm_wait = false; - unsigned int uninitialized_var(sent); + unsigned int sent; int status; /* Close the stream if the previous transmission was incomplete */ diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 24f64bc0de18..710bd44eaa49 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -670,7 +670,7 @@ static int tls_push_record(struct sock *sk, int flags, struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); struct tls_rec *rec = ctx->open_rec, *tmp = NULL; - u32 i, split_point, uninitialized_var(orig_end); + u32 i, split_point, orig_end; struct sk_msg *msg_pl, *msg_en; struct aead_request *req; bool split; diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index d55be1db1a8a..02df1d7db9a1 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c @@ -223,7 +223,7 @@ static int copy_ctl_value_from_user(struct snd_card *card, { struct snd_ctl_elem_value32 __user *data32 = userdata; int i, type, size; - int uninitialized_var(count); + int count; unsigned int indirect; if (copy_from_user(&data->id, &data32->id, sizeof(data->id))) diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c index 4ad0ff0c4508..270af863e198 100644 --- a/sound/isa/sb/sb16_csp.c +++ b/sound/isa/sb/sb16_csp.c @@ -102,7 +102,7 @@ static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buff int snd_sb_csp_new(struct snd_sb *chip, int device, struct snd_hwdep ** rhwdep) { struct snd_sb_csp *p; - int uninitialized_var(version); + int version; int err; struct snd_hwdep *hw; diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 9bea7d3f99f8..9468a2de8c9b 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -335,7 +335,7 @@ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep) while (test_bit(EP_FLAG_RUNNING, &ep->flags)) { unsigned long flags; - struct snd_usb_packet_info *uninitialized_var(packet); + struct snd_usb_packet_info *packet; struct snd_urb_ctx *ctx = NULL; int err, i; -- cgit v1.2.3 From 63a0895d960aa3d3653ef0ecad5bd8579388f14b Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 3 Jun 2020 13:09:38 -0700 Subject: compiler: Remove uninitialized_var() macro Using uninitialized_var() is dangerous as it papers over real bugs[1] (or can in the future), and suppresses unrelated compiler warnings (e.g. "unused variable"). If the compiler thinks it is uninitialized, either simply initialize the variable or make compiler changes. As recommended[2] by[3] Linus[4], remove the macro. With the recent change to disable -Wmaybe-uninitialized in v5.7 in commit 78a5255ffb6a ("Stop the ad-hoc games with -Wno-maybe-initialized"), this is likely the best time to make this treewide change. [1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/ [2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/ [3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/ [4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/ Suggested-by: Linus Torvalds Reviewed-by: Bart van Assche Reviewed-by: Miguel Ojeda Tested-by: Nathan Chancellor Tested-by: Sedat Dilek Signed-off-by: Kees Cook --- include/linux/compiler-clang.h | 2 -- include/linux/compiler-gcc.h | 6 ------ tools/include/linux/compiler.h | 2 -- tools/virtio/linux/kernel.h | 2 -- 4 files changed, 12 deletions(-) (limited to 'include') diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 5e55302e3bf6..8a072d00e688 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -5,8 +5,6 @@ /* Compiler specific definitions for Clang compiler */ -#define uninitialized_var(x) x = *(&(x)) - /* same as gcc, this was present in clang-2.6 so we can assume it works * with any version that can compile the kernel */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 1c74464c80c6..4099ac8b7f10 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -59,12 +59,6 @@ (typeof(ptr)) (__ptr + (off)); \ }) -/* - * A trick to suppress uninitialized variable warning without generating any - * code - */ -#define uninitialized_var(x) x = x - #ifdef CONFIG_RETPOLINE #define __noretpoline __attribute__((__indirect_branch__("keep"))) #endif diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 9f9002734e19..2f2f4082225e 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -111,8 +111,6 @@ # define noinline #endif -#define uninitialized_var(x) x = *(&(x)) - #include /* diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index caab980211a6..315e85cabeda 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h @@ -110,8 +110,6 @@ static inline void free_page(unsigned long addr) const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) -#define uninitialized_var(x) x = x - # ifndef likely # define likely(x) (__builtin_expect(!!(x), 1)) # endif -- cgit v1.2.3 From cc1d0cd817bad5dab50732a6668a87ff3eac3b5c Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Wed, 15 Jul 2020 20:45:50 +0300 Subject: ALSA: hda: export snd_hda_codec_cleanup_for_unbind() To avoid duplicated code for cleanup, and match the already exported snd_hda_codec_pcm_new(), also export snd_hda_codec_cleanup_for_unbind(). Signed-off-by: Kai Vehmanen Reviewed-by: Pierre-Louis Bossart Reviewed-by: Ranjani Sridharan Reviewed-by: Takashi Iwai Link: https://lore.kernel.org/r/20200715174551.3730165-2-kai.vehmanen@linux.intel.com Signed-off-by: Mark Brown --- include/sound/hda_codec.h | 2 ++ sound/pci/hda/hda_codec.c | 1 + 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index d16a4229209b..e378ed7f4824 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -415,6 +415,8 @@ __printf(2, 3) struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec, const char *fmt, ...); +void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec); + static inline void snd_hda_codec_pcm_get(struct hda_pcm *pcm) { kref_get(&pcm->kref); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 7e3ae4534df9..b4331355602c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -792,6 +792,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec) remove_conn_list(codec); snd_hdac_regmap_exit(&codec->core); } +EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind); static unsigned int hda_set_power_state(struct hda_codec *codec, unsigned int power_state); -- cgit v1.2.3 From 5be542e945cb39a2457aa2cfe8b84aac95ef0f2d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 16 Jul 2020 16:36:50 +1000 Subject: lockdep: Move list.h inclusion into lockdep.h Currently lockdep_types.h includes list.h without actually using any of its macros or functions. All it needs are the type definitions which were moved into types.h long ago. This potentially causes inclusion loops because both are included by many core header files. This patch moves the list.h inclusion into lockdep.h. Note that we could probably remove it completely but that could potentially result in compile failures should any end users not include list.h directly and also be unlucky enough to not get list.h via some other header file. Reported-by: Petr Mladek Signed-off-by: Herbert Xu Signed-off-by: Peter Zijlstra (Intel) Tested-by: Petr Mladek Link: https://lkml.kernel.org/r/20200716063649.GA23065@gondor.apana.org.au --- include/linux/lockdep.h | 1 + include/linux/lockdep_types.h | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index fd04b9e96091..7aafba0ddcf9 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -22,6 +22,7 @@ extern int lock_stat; #ifdef CONFIG_LOCKDEP #include +#include #include #include diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 7b9350624577..bb35b449f533 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -32,8 +32,6 @@ enum lockdep_wait_type { #ifdef CONFIG_LOCKDEP -#include - /* * We'd rather not expose kernel/lockdep_states.h this wide, but we do need * the total number of states... :-( -- cgit v1.2.3 From a9232dc5607dbada801f2fe83ea307cda762969a Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sat, 11 Jul 2020 17:59:54 +0300 Subject: rwsem: fix commas in initialisation Leading comma prevents arbitrary reordering of initialisation clauses. The whole point of C99 initialisation is to allow any such reordering. Signed-off-by: Alexey Dobriyan Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200711145954.GA1178171@localhost.localdomain --- include/linux/rwsem.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 7e5b2a4eb560..25e3fde85617 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -60,39 +60,39 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) } #define RWSEM_UNLOCKED_VALUE 0L -#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) +#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) /* Common initializer macros and functions */ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __RWSEM_DEP_MAP_INIT(lockname) \ - , .dep_map = { \ + .dep_map = { \ .name = #lockname, \ .wait_type_inner = LD_WAIT_SLEEP, \ - } + }, #else # define __RWSEM_DEP_MAP_INIT(lockname) #endif #ifdef CONFIG_DEBUG_RWSEMS -# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname +# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname, #else -# define __DEBUG_RWSEM_INITIALIZER(lockname) +# define __RWSEM_DEBUG_INIT(lockname) #endif #ifdef CONFIG_RWSEM_SPIN_ON_OWNER -#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED +#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED, #else #define __RWSEM_OPT_INIT(lockname) #endif #define __RWSEM_INITIALIZER(name) \ - { __RWSEM_INIT_COUNT(name), \ + { __RWSEM_COUNT_INIT(name), \ .owner = ATOMIC_LONG_INIT(0), \ - .wait_list = LIST_HEAD_INIT((name).wait_list), \ - .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ - __DEBUG_RWSEM_INITIALIZER(name) \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\ + .wait_list = LIST_HEAD_INIT((name).wait_list), \ + __RWSEM_DEBUG_INIT(name) \ __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ -- cgit v1.2.3 From 350d993510115e3d9e78f1b3359bff7b68e88418 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 9 Jul 2020 10:55:41 +0900 Subject: ASoC: soc-dai.c: add .no_capture_mute support snd_soc_dai_digital_mute() is internally using both mute_stream() (1) or digital_mute() (2), but the difference between these 2 are only handling "direction". We can merge digital_mute() into mute_stream int snd_soc_dai_digital_mute(xxx, int direction) { ... else if (dai->driver->ops->mute_stream) (1) return dai->driver->ops->mute_stream(xxx, direction); else if (direction == SNDRV_PCM_STREAM_PLAYBACK && dai->driver->ops->digital_mute) (2) return dai->driver->ops->digital_mute(xxx); ... } To prepare merging mute_stream()/digital_mute(), this patch adds .no_capture_mute support to emulate .digital_mute(). Signed-off-by: Kuninori Morimoto Link: https://lore.kernel.org/r/87eeplxxj7.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-dai.h | 3 +++ sound/soc/soc-dai.c | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 212257e84fac..e0e061b8e9bd 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -280,6 +280,9 @@ struct snd_soc_dai_ops { */ snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *, struct snd_soc_dai *); + + /* bit field */ + unsigned int no_capture_mute:1; }; struct snd_soc_cdai_ops { diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c index b05e18b63a1c..458d2ea44329 100644 --- a/sound/soc/soc-dai.c +++ b/sound/soc/soc-dai.c @@ -298,8 +298,14 @@ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute, { int ret = -ENOTSUPP; + /* + * ignore if direction was CAPTURE + * and it had .no_capture_mute flag + */ if (dai->driver->ops && - dai->driver->ops->mute_stream) + dai->driver->ops->mute_stream && + (direction == SNDRV_PCM_STREAM_PLAYBACK || + !dai->driver->ops->no_capture_mute)) ret = dai->driver->ops->mute_stream(dai, mute, direction); else if (direction == SNDRV_PCM_STREAM_PLAYBACK && dai->driver->ops && -- cgit v1.2.3 From d789710fb248df0c2279a785c7b9beb313629c0a Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 9 Jul 2020 10:55:45 +0900 Subject: ASoC: hdmi-codec: merge .digital_mute() into .mute_stream() snd_soc_dai_digital_mute() is internally using both mute_stream() (1) or digital_mute() (2), but the difference between these 2 are only handling direction. We can merge digital_mute() into mute_stream int snd_soc_dai_digital_mute(xxx, int direction) { ... else if (dai->driver->ops->mute_stream) (1) return dai->driver->ops->mute_stream(xxx, direction); else if (direction == SNDRV_PCM_STREAM_PLAYBACK && dai->driver->ops->digital_mute) (2) return dai->driver->ops->digital_mute(xxx); ... } For hdmi-codec, we need to update struct hdmi_codec_ops, and all its users in the same time. Signed-off-by: Kuninori Morimoto Reviewed-by: Peter Ujfalusi Link: https://lore.kernel.org/r/87d055xxj2.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- drivers/gpu/drm/bridge/sii902x.c | 7 ++++--- drivers/gpu/drm/exynos/exynos_hdmi.c | 6 ++++-- drivers/gpu/drm/i2c/tda998x_drv.c | 7 ++++--- drivers/gpu/drm/mediatek/mtk_hdmi.c | 6 ++++-- drivers/gpu/drm/rockchip/cdn-dp-core.c | 7 ++++--- drivers/gpu/drm/sti/sti_hdmi.c | 6 ++++-- drivers/gpu/drm/zte/zx_hdmi.c | 7 ++++--- include/sound/hdmi-codec.h | 6 +++++- sound/soc/codecs/hdmi-codec.c | 21 +++++++++++++++------ 9 files changed, 48 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 6dad025f8da7..c751baf3d064 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -672,8 +672,8 @@ static void sii902x_audio_shutdown(struct device *dev, void *data) clk_disable_unprepare(sii902x->audio.mclk); } -static int sii902x_audio_digital_mute(struct device *dev, - void *data, bool enable) +static int sii902x_audio_mute(struct device *dev, void *data, + bool enable, int direction) { struct sii902x *sii902x = dev_get_drvdata(dev); @@ -724,9 +724,10 @@ static int sii902x_audio_get_dai_id(struct snd_soc_component *component, static const struct hdmi_codec_ops sii902x_audio_codec_ops = { .hw_params = sii902x_audio_hw_params, .audio_shutdown = sii902x_audio_shutdown, - .digital_mute = sii902x_audio_digital_mute, + .mute_stream = sii902x_audio_mute, .get_eld = sii902x_audio_get_eld, .get_dai_id = sii902x_audio_get_dai_id, + .no_capture_mute = 1, }; static int sii902x_audio_codec_init(struct sii902x *sii902x, diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 95dd399aa9cc..68d7b1ce1b7c 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1604,7 +1604,8 @@ static int hdmi_audio_hw_params(struct device *dev, void *data, return 0; } -static int hdmi_audio_digital_mute(struct device *dev, void *data, bool mute) +static int hdmi_audio_mute(struct device *dev, void *data, + bool mute, int direction) { struct hdmi_context *hdata = dev_get_drvdata(dev); @@ -1634,8 +1635,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = hdmi_audio_hw_params, .audio_shutdown = hdmi_audio_shutdown, - .digital_mute = hdmi_audio_digital_mute, + .mute_stream = hdmi_audio_mute, .get_eld = hdmi_audio_get_eld, + .no_capture_mute = 1, }; static int hdmi_register_audio_device(struct hdmi_context *hdata) diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 9517f522dcb9..3010a4536da3 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -1133,8 +1133,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data) mutex_unlock(&priv->audio_mutex); } -static int tda998x_audio_digital_mute(struct device *dev, void *data, - bool enable) +static int tda998x_audio_mute_stream(struct device *dev, void *data, + bool enable, int direction) { struct tda998x_priv *priv = dev_get_drvdata(dev); @@ -1162,8 +1162,9 @@ static int tda998x_audio_get_eld(struct device *dev, void *data, static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = tda998x_audio_hw_params, .audio_shutdown = tda998x_audio_shutdown, - .digital_mute = tda998x_audio_digital_mute, + .mute_stream = tda998x_audio_mute_stream, .get_eld = tda998x_audio_get_eld, + .no_capture_mute = 1, }; static int tda998x_audio_codec_init(struct tda998x_priv *priv, diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 5feb760617cb..37b4420a0b22 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1647,7 +1647,8 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data) } static int -mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) +mtk_hdmi_audio_mute(struct device *dev, void *data, + bool enable, int direction) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); @@ -1692,9 +1693,10 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { .hw_params = mtk_hdmi_audio_hw_params, .audio_startup = mtk_hdmi_audio_startup, .audio_shutdown = mtk_hdmi_audio_shutdown, - .digital_mute = mtk_hdmi_audio_digital_mute, + .mute_stream = mtk_hdmi_audio_mute, .get_eld = mtk_hdmi_audio_get_eld, .hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb, + .no_capture_mute = 1, }; static int mtk_hdmi_register_audio_driver(struct device *dev) diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index c634b95b50f7..a4a45daf93f2 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -817,8 +817,8 @@ out: mutex_unlock(&dp->lock); } -static int cdn_dp_audio_digital_mute(struct device *dev, void *data, - bool enable) +static int cdn_dp_audio_mute_stream(struct device *dev, void *data, + bool enable, int direction) { struct cdn_dp_device *dp = dev_get_drvdata(dev); int ret; @@ -849,8 +849,9 @@ static int cdn_dp_audio_get_eld(struct device *dev, void *data, static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = cdn_dp_audio_hw_params, .audio_shutdown = cdn_dp_audio_shutdown, - .digital_mute = cdn_dp_audio_digital_mute, + .mute_stream = cdn_dp_audio_mute_stream, .get_eld = cdn_dp_audio_get_eld, + .no_capture_mute = 1, }; static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 5b15c4974e6b..008f07923bbc 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1191,7 +1191,8 @@ static int hdmi_audio_hw_params(struct device *dev, return 0; } -static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) +static int hdmi_audio_mute(struct device *dev, void *data, + bool enable, int direction) { struct sti_hdmi *hdmi = dev_get_drvdata(dev); @@ -1219,8 +1220,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = hdmi_audio_hw_params, .audio_shutdown = hdmi_audio_shutdown, - .digital_mute = hdmi_audio_digital_mute, + .mute_stream = hdmi_audio_mute, .get_eld = hdmi_audio_get_eld, + .no_capture_mute = 1, }; static int sti_hdmi_register_audio_driver(struct device *dev, diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c index 76a16d997a23..cd79ca0a92a9 100644 --- a/drivers/gpu/drm/zte/zx_hdmi.c +++ b/drivers/gpu/drm/zte/zx_hdmi.c @@ -439,8 +439,8 @@ static int zx_hdmi_audio_hw_params(struct device *dev, return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO); } -static int zx_hdmi_audio_digital_mute(struct device *dev, void *data, - bool enable) +static int zx_hdmi_audio_mute(struct device *dev, void *data, + bool enable, int direction) { struct zx_hdmi *hdmi = dev_get_drvdata(dev); @@ -468,8 +468,9 @@ static const struct hdmi_codec_ops zx_hdmi_codec_ops = { .audio_startup = zx_hdmi_audio_startup, .hw_params = zx_hdmi_audio_hw_params, .audio_shutdown = zx_hdmi_audio_shutdown, - .digital_mute = zx_hdmi_audio_digital_mute, + .mute_stream = zx_hdmi_audio_mute, .get_eld = zx_hdmi_audio_get_eld, + .no_capture_mute = 1, }; static struct hdmi_codec_pdata zx_hdmi_codec_pdata = { diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h index 83b17682e01c..17eebd34835a 100644 --- a/include/sound/hdmi-codec.h +++ b/include/sound/hdmi-codec.h @@ -76,7 +76,8 @@ struct hdmi_codec_ops { * Mute/unmute HDMI audio stream. * Optional */ - int (*digital_mute)(struct device *dev, void *data, bool enable); + int (*mute_stream)(struct device *dev, void *data, + bool enable, int direction); /* * Provides EDID-Like-Data from connected HDMI device. @@ -99,6 +100,9 @@ struct hdmi_codec_ops { int (*hook_plugged_cb)(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev); + + /* bit field */ + unsigned int no_capture_mute:1; }; /* HDMI codec initalization data */ diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index 926ab447a96b..bc760a81e217 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -558,13 +558,22 @@ static int hdmi_codec_i2s_set_fmt(struct snd_soc_dai *dai, return 0; } -static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute) +static int hdmi_codec_mute(struct snd_soc_dai *dai, int mute, int direction) { struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai); - if (hcp->hcd.ops->digital_mute) - return hcp->hcd.ops->digital_mute(dai->dev->parent, - hcp->hcd.data, mute); + /* + * ignore if direction was CAPTURE + * and it had .no_capture_mute flag + * see + * snd_soc_dai_digital_mute() + */ + if (hcp->hcd.ops->mute_stream && + (direction == SNDRV_PCM_STREAM_PLAYBACK || + !hcp->hcd.ops->no_capture_mute)) + return hcp->hcd.ops->mute_stream(dai->dev->parent, + hcp->hcd.data, + mute, direction); return -ENOTSUPP; } @@ -574,14 +583,14 @@ static const struct snd_soc_dai_ops hdmi_codec_i2s_dai_ops = { .shutdown = hdmi_codec_shutdown, .hw_params = hdmi_codec_hw_params, .set_fmt = hdmi_codec_i2s_set_fmt, - .digital_mute = hdmi_codec_digital_mute, + .mute_stream = hdmi_codec_mute, }; static const struct snd_soc_dai_ops hdmi_codec_spdif_dai_ops = { .startup = hdmi_codec_startup, .shutdown = hdmi_codec_shutdown, .hw_params = hdmi_codec_hw_params, - .digital_mute = hdmi_codec_digital_mute, + .mute_stream = hdmi_codec_mute, }; #define HDMI_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\ -- cgit v1.2.3 From 764aafdb985b182bce0c91503e9233cb97a2f0d7 Mon Sep 17 00:00:00 2001 From: Shengjiu Wang Date: Wed, 15 Jul 2020 22:09:37 +0800 Subject: ASoC: simple-card-utils: Support configure pin_name for asoc_simple_init_jack Currently the pin_name is fixed in asoc_simple_init_jack, but some driver may use a different pin_name. So add a new parameter in asoc_simple_init_jack for configuring pin_name. If this parameter is NULL, then the default pin_name is used. Signed-off-by: Shengjiu Wang Acked-by: Nicolin Chen Link: https://lore.kernel.org/r/1594822179-1849-2-git-send-email-shengjiu.wang@nxp.com Signed-off-by: Mark Brown --- include/sound/simple_card_utils.h | 6 +++--- sound/soc/generic/simple-card-utils.c | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index bbdd1542d6f1..86a1e956991e 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -12,9 +12,9 @@ #include #define asoc_simple_init_hp(card, sjack, prefix) \ - asoc_simple_init_jack(card, sjack, 1, prefix) + asoc_simple_init_jack(card, sjack, 1, prefix, NULL) #define asoc_simple_init_mic(card, sjack, prefix) \ - asoc_simple_init_jack(card, sjack, 0, prefix) + asoc_simple_init_jack(card, sjack, 0, prefix, NULL) struct asoc_simple_dai { const char *name; @@ -131,7 +131,7 @@ int asoc_simple_parse_pin_switches(struct snd_soc_card *card, int asoc_simple_init_jack(struct snd_soc_card *card, struct asoc_simple_jack *sjack, - int is_hp, char *prefix); + int is_hp, char *prefix, char *pin); int asoc_simple_init_priv(struct asoc_simple_priv *priv, struct link_info *li); diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index 8c54dc6710fe..b408cb5ed644 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -540,7 +540,8 @@ EXPORT_SYMBOL_GPL(asoc_simple_parse_pin_switches); int asoc_simple_init_jack(struct snd_soc_card *card, struct asoc_simple_jack *sjack, - int is_hp, char *prefix) + int is_hp, char *prefix, + char *pin) { struct device *dev = card->dev; enum of_gpio_flags flags; @@ -557,12 +558,12 @@ int asoc_simple_init_jack(struct snd_soc_card *card, if (is_hp) { snprintf(prop, sizeof(prop), "%shp-det-gpio", prefix); - pin_name = "Headphones"; + pin_name = pin ? pin : "Headphones"; gpio_name = "Headphone detection"; mask = SND_JACK_HEADPHONE; } else { snprintf(prop, sizeof(prop), "%smic-det-gpio", prefix); - pin_name = "Mic Jack"; + pin_name = pin ? pin : "Mic Jack"; gpio_name = "Mic detection"; mask = SND_JACK_MICROPHONE; } -- cgit v1.2.3 From 6611561a7a7ef925294353a4c2124bdb66eb831c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 18:30:12 -0700 Subject: regmap: fix duplicated word in Change doubled word "be" to "to be". Signed-off-by: Randy Dunlap Cc: Mark Brown Link: https://lore.kernel.org/r/7ef41bfc-de3e-073a-8746-0b3fdf7628c0@infradead.org Signed-off-by: Mark Brown --- include/linux/regmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/regmap.h b/include/linux/regmap.h index e3817c097791..1970ed59d49f 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -275,7 +275,7 @@ typedef void (*regmap_unlock)(void *); * readable if it belongs to one of the ranges specified * by rd_noinc_table). * @disable_locking: This regmap is either protected by external means or - * is guaranteed not be be accessed from multiple threads. + * is guaranteed not to be accessed from multiple threads. * Don't use any locking mechanisms. * @lock: Optional lock callback (overrides regmap's default lock * function, based on spinlock or mutex). -- cgit v1.2.3 From a98bcaa92d3d7a7753e23b3363d90ffdb82e8edb Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Wed, 15 Jul 2020 19:15:00 +0000 Subject: regulator: Correct kernel-doc inconsistency Silence documentation build warning by correcting kernel-doc comments. ./include/linux/regulator/machine.h:196: warning: Function parameter or member 'max_uV_step' not described in 'regulation_constraints' ./include/linux/regulator/driver.h:206: warning: Function parameter or member 'resume' not described in 'regulator_ops' Signed-off-by: Colton Lewis Link: https://lore.kernel.org/r/20200715191438.29312-1-colton.w.lewis@protonmail.com Signed-off-by: Mark Brown --- include/linux/regulator/driver.h | 2 +- include/linux/regulator/machine.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 436df3ba0b2a..8539f34ae42b 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -117,7 +117,7 @@ enum regulator_status { * suspended. * @set_suspend_mode: Set the operating mode for the regulator when the * system is suspended. - * + * @resume: Resume operation of suspended regulator. * @set_pull_down: Configure the regulator to pull down when the regulator * is disabled. * diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index a84cc8879c3e..8a56f033b6cd 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -101,6 +101,7 @@ struct regulator_state { * @system_load: Load that isn't captured by any consumer requests. * * @max_spread: Max possible spread between coupled regulators + * @max_uV_step: Max possible step change in voltage * @valid_modes_mask: Mask of modes which may be configured by consumers. * @valid_ops_mask: Operations which may be performed by consumers. * -- cgit v1.2.3 From 9a6ad1ad71fbc5a52617e016a3608d71b91f62e8 Mon Sep 17 00:00:00 2001 From: Raed Salem Date: Mon, 18 Nov 2019 14:30:20 +0200 Subject: net/mlx5: Accel, Add core IPsec support for the Connect-X family This to set the base for downstream patches to support the new IPsec implementation of the Connect-X family. Following modifications made: - Remove accel layer dependency from MLX5_FPGA_IPSEC. - Introduce accel_ipsec_ops, each IPsec device will have to support these ops. Signed-off-by: Raed Salem Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.c | 103 +++++++++++++++------ .../net/ethernet/mellanox/mlx5/core/accel/ipsec.h | 45 +++++---- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 4 +- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 51 ++++++---- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.h | 37 ++------ drivers/net/ethernet/mellanox/mlx5/core/main.c | 9 +- include/linux/mlx5/accel.h | 6 +- include/linux/mlx5/driver.h | 3 + 8 files changed, 154 insertions(+), 104 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index 8a4985d8cbfe..628c8887f086 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -31,37 +31,83 @@ * */ -#ifdef CONFIG_MLX5_FPGA_IPSEC - #include #include "accel/ipsec.h" #include "mlx5_core.h" #include "fpga/ipsec.h" +void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mlx5_fpga_ipsec_ops(mdev); + int err = 0; + + if (!ipsec_ops || !ipsec_ops->init) { + mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); + return; + } + + err = ipsec_ops->init(mdev); + if (err) { + mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err); + return; + } + + mdev->ipsec_ops = ipsec_ops; +} + +void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->cleanup) + return; + + ipsec_ops->cleanup(mdev); +} + u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { - return mlx5_fpga_ipsec_device_caps(mdev); + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->device_caps) + return 0; + + return ipsec_ops->device_caps(mdev); } EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) { - return mlx5_fpga_ipsec_counters_count(mdev); + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->counters_count) + return -EOPNOTSUPP; + + return ipsec_ops->counters_count(mdev); } int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count) { - return mlx5_fpga_ipsec_counters_read(mdev, counters, count); + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->counters_read) + return -EOPNOTSUPP; + + return ipsec_ops->counters_read(mdev, counters, count); } void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, struct mlx5_accel_esp_xfrm *xfrm, u32 *sa_handle) { + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; __be32 saddr[4] = {}, daddr[4] = {}; + if (!ipsec_ops || !ipsec_ops->create_hw_context) + return ERR_PTR(-EOPNOTSUPP); + if (!xfrm->attrs.is_ipv6) { saddr[3] = xfrm->attrs.saddr.a4; daddr[3] = xfrm->attrs.daddr.a4; @@ -70,29 +116,18 @@ void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); } - return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, - daddr, xfrm->attrs.spi, - xfrm->attrs.is_ipv6, sa_handle); + return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi, + xfrm->attrs.is_ipv6, sa_handle); } -void mlx5_accel_esp_free_hw_context(void *context) +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) { - mlx5_fpga_ipsec_delete_sa_ctx(context); -} + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; -int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) -{ - return mlx5_fpga_ipsec_init(mdev); -} - -void mlx5_accel_ipsec_build_fs_cmds(void) -{ - mlx5_fpga_ipsec_build_fs_cmds(); -} + if (!ipsec_ops || !ipsec_ops->free_hw_context) + return; -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ - mlx5_fpga_ipsec_cleanup(mdev); + ipsec_ops->free_hw_context(context); } struct mlx5_accel_esp_xfrm * @@ -100,9 +135,13 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags) { + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; struct mlx5_accel_esp_xfrm *xfrm; - xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags); + if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) + return ERR_PTR(-EOPNOTSUPP); + + xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags); if (IS_ERR(xfrm)) return xfrm; @@ -113,15 +152,23 @@ EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) { - mlx5_fpga_esp_destroy_xfrm(xfrm); + const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm) + return; + + ipsec_ops->esp_destroy_xfrm(xfrm); } EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs) { - return mlx5_fpga_esp_modify_xfrm(xfrm, attrs); + const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm) + return -EOPNOTSUPP; + + return ipsec_ops->esp_modify_xfrm(xfrm, attrs); } EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); - -#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h index e89747674712..fbb9c5415d53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h @@ -37,7 +37,7 @@ #include #include -#ifdef CONFIG_MLX5_FPGA_IPSEC +#ifdef CONFIG_MLX5_ACCEL #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ MLX5_ACCEL_IPSEC_CAP_DEVICE) @@ -49,12 +49,30 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, struct mlx5_accel_esp_xfrm *xfrm, u32 *sa_handle); -void mlx5_accel_esp_free_hw_context(void *context); +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); -int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); -void mlx5_accel_ipsec_build_fs_cmds(void); +void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); +struct mlx5_accel_ipsec_ops { + u32 (*device_caps)(struct mlx5_core_dev *mdev); + unsigned int (*counters_count)(struct mlx5_core_dev *mdev); + int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); + void* (*create_hw_context)(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], const __be32 daddr[4], + const __be32 spi, bool is_ipv6, u32 *sa_handle); + void (*free_hw_context)(void *context); + int (*init)(struct mlx5_core_dev *mdev); + void (*cleanup)(struct mlx5_core_dev *mdev); + struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags); + int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); + void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); +}; + #else #define MLX5_IPSEC_DEV(mdev) false @@ -67,23 +85,12 @@ mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, return NULL; } -static inline void mlx5_accel_esp_free_hw_context(void *context) -{ -} - -static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) -{ - return 0; -} +static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {} -static inline void mlx5_accel_ipsec_build_fs_cmds(void) -{ -} +static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} -static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ -} +static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} -#endif +#endif /* CONFIG_MLX5_ACCEL */ #endif /* __MLX5_ACCEL_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index bc55c82b55ba..8d797cd56e26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -342,7 +342,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) goto out; err_hw_ctx: - mlx5_accel_esp_free_hw_context(sa_entry->hw_context); + mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context); err_xfrm: mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); err_sa_entry: @@ -372,7 +372,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) if (sa_entry->hw_context) { flush_workqueue(sa_entry->ipsec->wq); - mlx5_accel_esp_free_hw_context(sa_entry->hw_context); + mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context); mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index b463787d6ca1..cc67366495b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -359,7 +359,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) return ret; } -unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) +static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; @@ -370,8 +370,8 @@ unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) number_of_ipsec_counters); } -int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int counters_count) +static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, + unsigned int counters_count) { struct mlx5_fpga_device *fdev = mdev->fpga; unsigned int i; @@ -665,12 +665,10 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, return true; } -void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *accel_xfrm, - const __be32 saddr[4], - const __be32 daddr[4], - const __be32 spi, bool is_ipv6, - u32 *sa_handle) +static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], const __be32 daddr[4], + const __be32 spi, bool is_ipv6, u32 *sa_handle) { struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; struct mlx5_fpga_esp_xfrm *fpga_xfrm = @@ -862,7 +860,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) mutex_unlock(&fipsec->sa_hash_lock); } -void mlx5_fpga_ipsec_delete_sa_ctx(void *context) +static void mlx5_fpga_ipsec_delete_sa_ctx(void *context) { struct mlx5_fpga_esp_xfrm *fpga_xfrm = ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm; @@ -1264,7 +1262,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flo } } -int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) +static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) { struct mlx5_fpga_conn_attr init_attr = {0}; struct mlx5_fpga_device *fdev = mdev->fpga; @@ -1346,7 +1344,7 @@ static void destroy_rules_rb(struct rb_root *root) } } -void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) +static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; @@ -1451,7 +1449,7 @@ mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, return 0; } -struct mlx5_accel_esp_xfrm * +static struct mlx5_accel_esp_xfrm * mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags) @@ -1479,7 +1477,7 @@ mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, return &fpga_xfrm->accel_xfrm; } -void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) { struct mlx5_fpga_esp_xfrm *fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, @@ -1488,8 +1486,8 @@ void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) kfree(fpga_xfrm); } -int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) +static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_core_dev *mdev = xfrm->mdev; struct mlx5_fpga_device *fdev = mdev->fpga; @@ -1560,3 +1558,24 @@ change_sw_xfrm_attrs: mutex_unlock(&fpga_xfrm->lock); return err; } + +static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = { + .device_caps = mlx5_fpga_ipsec_device_caps, + .counters_count = mlx5_fpga_ipsec_counters_count, + .counters_read = mlx5_fpga_ipsec_counters_read, + .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx, + .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx, + .init = mlx5_fpga_ipsec_init, + .cleanup = mlx5_fpga_ipsec_cleanup, + .esp_create_xfrm = mlx5_fpga_esp_create_xfrm, + .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm, + .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm, +}; + +const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) +{ + if (!mlx5_fpga_is_ipsec_device(mdev)) + return NULL; + + return &fpga_ipsec_ops; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h index 9ba637f0f0f2..db88eb4c49e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h @@ -38,44 +38,23 @@ #include "fs_cmd.h" #ifdef CONFIG_MLX5_FPGA_IPSEC +const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev); u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); -unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev); -int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int counters_count); - -void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *accel_xfrm, - const __be32 saddr[4], - const __be32 daddr[4], - const __be32 spi, bool is_ipv6, - u32 *sa_handle); -void mlx5_fpga_ipsec_delete_sa_ctx(void *context); - -int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev); -void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev); -void mlx5_fpga_ipsec_build_fs_cmds(void); - -struct mlx5_accel_esp_xfrm * -mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags); -void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); -int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs); - const struct mlx5_flow_cmds * mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); +void mlx5_fpga_ipsec_build_fs_cmds(void); #else -static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) -{ - return 0; -} - +static inline +const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) +{ return NULL; } +static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } static inline const struct mlx5_flow_cmds * mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) { return mlx5_fs_cmd_get_default(type); } +static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {}; + #endif /* CONFIG_MLX5_FPGA_IPSEC */ #endif /* __MLX5_FPGA_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8b658908f044..e32d46c33701 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1089,11 +1089,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_fpga_start; } - err = mlx5_accel_ipsec_init(dev); - if (err) { - mlx5_core_err(dev, "IPSec device start failed %d\n", err); - goto err_ipsec_start; - } + mlx5_accel_ipsec_init(dev); err = mlx5_accel_tls_init(dev); if (err) { @@ -1135,7 +1131,6 @@ err_fs: mlx5_accel_tls_cleanup(dev); err_tls_start: mlx5_accel_ipsec_cleanup(dev); -err_ipsec_start: mlx5_fpga_device_stop(dev); err_fpga_start: mlx5_rsc_dump_cleanup(dev); @@ -1628,7 +1623,7 @@ static int __init init(void) get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); mlx5_core_verify_params(); - mlx5_accel_ipsec_build_fs_cmds(); + mlx5_fpga_ipsec_build_fs_cmds(); mlx5_register_debugfs(); err = pci_register_driver(&mlx5_core_driver); diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 96ebaa94a92e..dacf69516002 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -126,7 +126,7 @@ enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, }; -#ifdef CONFIG_MLX5_FPGA_IPSEC +#ifdef CONFIG_MLX5_ACCEL u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); @@ -152,5 +152,5 @@ static inline int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } -#endif -#endif +#endif /* CONFIG_MLX5_ACCEL */ +#endif /* __MLX5_ACCEL_H__ */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1e6ca716635a..6a97ad601991 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -707,6 +707,9 @@ struct mlx5_core_dev { } roce; #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; +#endif +#ifdef CONFIG_MLX5_ACCEL + const struct mlx5_accel_ipsec_ops *ipsec_ops; #endif struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; -- cgit v1.2.3 From 78fb6122fa2b6b55fafee1b32cd94913ad72f8a4 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Wed, 8 Apr 2020 20:09:05 -0500 Subject: net/mlx5: Add IPsec related Flow steering entry's fields Add FTE actions IPsec ENCRYPT/DECRYPT Add ipsec_obj_id field in FTE Add new action field MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME Signed-off-by: Huy Nguyen Reviewed-by: Raed Salem Signed-off-by: Saeed Mahameed --- include/linux/mlx5/fs.h | 5 ++++- include/linux/mlx5/mlx5_ifc.h | 12 ++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 6c5aa0a21425..92d991d93757 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -207,7 +207,10 @@ struct mlx5_flow_act { u32 action; struct mlx5_modify_hdr *modify_hdr; struct mlx5_pkt_reformat *pkt_reformat; - uintptr_t esp_id; + union { + u32 ipsec_obj_id; + uintptr_t esp_id; + }; u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 791766e15d5c..9e64710bc54f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -416,7 +416,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 table_miss_action_domain[0x1]; u8 termination_table[0x1]; u8 reformat_and_fwd_to_table[0x1]; - u8 reserved_at_1a[0x6]; + u8 reserved_at_1a[0x2]; + u8 ipsec_encrypt[0x1]; + u8 ipsec_decrypt[0x1]; + u8 reserved_at_1e[0x2]; + u8 termination_table_raw_traffic[0x1]; u8 reserved_at_21[0x1]; u8 log_max_ft_size[0x6]; @@ -2965,6 +2969,8 @@ enum { MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, + MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000, + MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000, }; enum { @@ -3006,7 +3012,8 @@ struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_vlan_bits push_vlan_2; - u8 reserved_at_120[0xe0]; + u8 ipsec_obj_id[0x20]; + u8 reserved_at_140[0xc0]; struct mlx5_ifc_fte_match_param_bits match_value; @@ -5752,6 +5759,7 @@ enum { MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58, MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, + MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { -- cgit v1.2.3 From 55f656cdb851bae32980d83d2494201e79d93b63 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 14 Jul 2020 20:03:07 +0300 Subject: net: sched: Do not drop root lock in tcf_qevent_handle() Mirred currently does not mix well with blocks executed after the qdisc root lock is taken. This includes classification blocks (such as in PRIO, ETS, DRR qdiscs) and qevents. The locking caused by the packet mirrored by mirred can cause deadlocks: either when the thread of execution attempts to take the lock a second time, or when two threads end up waiting on each other's locks. The qevent patchset attempted to not introduce further badness of this sort, and dropped the lock before executing the qevent block. However this lead to too little locking and races between qdisc configuration and packet enqueue in the RED qdisc. Before the deadlock issues are solved in a way that can be applied across many qdiscs reasonably easily, do for qevents what is done for the classification blocks and just keep holding the root lock. Signed-off-by: Petr Machata Signed-off-by: Jakub Kicinski --- include/net/pkt_cls.h | 4 ++-- net/sched/cls_api.c | 8 +------- net/sched/sch_red.c | 6 +++--- 3 files changed, 6 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 690a7f49c8f9..d4d461236351 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -568,7 +568,7 @@ void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, struct netlink_ext_ack *extack); struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, - spinlock_t *root_lock, struct sk_buff **to_free, int *ret); + struct sk_buff **to_free, int *ret); int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); #else static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, @@ -591,7 +591,7 @@ static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlatt static inline struct sk_buff * tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, - spinlock_t *root_lock, struct sk_buff **to_free, int *ret) + struct sk_buff **to_free, int *ret) { return skb; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 322b279154de..b2b7440c2ae7 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3822,7 +3822,7 @@ int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index EXPORT_SYMBOL(tcf_qevent_validate_change); struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, - spinlock_t *root_lock, struct sk_buff **to_free, int *ret) + struct sk_buff **to_free, int *ret) { struct tcf_result cl_res; struct tcf_proto *fl; @@ -3832,9 +3832,6 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru fl = rcu_dereference_bh(qe->filter_chain); - if (root_lock) - spin_unlock(root_lock); - switch (tcf_classify(skb, fl, &cl_res, false)) { case TC_ACT_SHOT: qdisc_qstats_drop(sch); @@ -3853,9 +3850,6 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru return NULL; } - if (root_lock) - spin_lock(root_lock); - return skb; } EXPORT_SYMBOL(tcf_qevent_handle); diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index de2be4d04ed6..a79602f7fab8 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -94,7 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; - skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret); + skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret); if (!skb) return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { @@ -114,7 +114,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; - skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret); + skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret); if (!skb) return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { @@ -137,7 +137,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ return ret; congestion_drop: - skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret); + skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret); if (!skb) return NET_XMIT_CN | ret; -- cgit v1.2.3 From ac5c66f261b7174d0d9aaeb2bf9f8c2c2dbad0bd Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 14 Jul 2020 20:03:08 +0300 Subject: Revert "net: sched: Pass root lock to Qdisc_ops.enqueue" This reverts commit aebe4426ccaa4838f36ea805cdf7d76503e65117. Signed-off-by: Petr Machata Signed-off-by: Jakub Kicinski --- include/net/sch_generic.h | 6 ++---- net/core/dev.c | 4 ++-- net/sched/sch_atm.c | 4 ++-- net/sched/sch_blackhole.c | 2 +- net/sched/sch_cake.c | 2 +- net/sched/sch_cbq.c | 4 ++-- net/sched/sch_cbs.c | 18 +++++++++--------- net/sched/sch_choke.c | 2 +- net/sched/sch_codel.c | 2 +- net/sched/sch_drr.c | 4 ++-- net/sched/sch_dsmark.c | 4 ++-- net/sched/sch_etf.c | 2 +- net/sched/sch_ets.c | 4 ++-- net/sched/sch_fifo.c | 6 +++--- net/sched/sch_fq.c | 2 +- net/sched/sch_fq_codel.c | 2 +- net/sched/sch_fq_pie.c | 2 +- net/sched/sch_generic.c | 4 ++-- net/sched/sch_gred.c | 2 +- net/sched/sch_hfsc.c | 6 +++--- net/sched/sch_hhf.c | 2 +- net/sched/sch_htb.c | 4 ++-- net/sched/sch_multiq.c | 4 ++-- net/sched/sch_netem.c | 8 ++++---- net/sched/sch_pie.c | 2 +- net/sched/sch_plug.c | 2 +- net/sched/sch_prio.c | 6 +++--- net/sched/sch_qfq.c | 4 ++-- net/sched/sch_red.c | 4 ++-- net/sched/sch_sfb.c | 4 ++-- net/sched/sch_sfq.c | 2 +- net/sched/sch_skbprio.c | 2 +- net/sched/sch_taprio.c | 4 ++-- net/sched/sch_tbf.c | 10 +++++----- net/sched/sch_teql.c | 4 ++-- 35 files changed, 71 insertions(+), 73 deletions(-) (limited to 'include') diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index fceb3d63c925..c510b03b9751 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -57,7 +57,6 @@ struct qdisc_skb_head { struct Qdisc { int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, - spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff * (*dequeue)(struct Qdisc *sch); unsigned int flags; @@ -242,7 +241,6 @@ struct Qdisc_ops { int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, - spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); @@ -790,11 +788,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, #endif } -static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { qdisc_calculate_pkt_len(skb, sch); - return sch->enqueue(skb, sch, root_lock, to_free); + return sch->enqueue(skb, sch, to_free); } static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, diff --git a/net/core/dev.c b/net/core/dev.c index b61075828358..062a00fdca9b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3749,7 +3749,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_calculate_pkt_len(skb, q); if (q->flags & TCQ_F_NOLOCK) { - rc = q->enqueue(skb, q, NULL, &to_free) & NET_XMIT_MASK; + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; qdisc_run(q); if (unlikely(to_free)) @@ -3792,7 +3792,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { - rc = q->enqueue(skb, q, root_lock, &to_free) & NET_XMIT_MASK; + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 1d5e422d9be2..1c281cc81f57 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -374,7 +374,7 @@ static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl, /* --------------------------- Qdisc operations ---------------------------- */ -static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct atm_qdisc_data *p = qdisc_priv(sch); @@ -432,7 +432,7 @@ done: #endif } - ret = qdisc_enqueue(skb, flow->q, root_lock, to_free); + ret = qdisc_enqueue(skb, flow->q, to_free); if (ret != NET_XMIT_SUCCESS) { drop: __maybe_unused if (net_xmit_drop_count(ret)) { diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c index 187644657c4f..a7f7667ae984 100644 --- a/net/sched/sch_blackhole.c +++ b/net/sched/sch_blackhole.c @@ -13,7 +13,7 @@ #include #include -static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { qdisc_drop(skb, sch, to_free); diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index b3cdcd86cbfd..561d20c9adca 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1687,7 +1687,7 @@ hash: static void cake_reconfigure(struct Qdisc *sch); -static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cake_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 9e16fdf47fe7..b2130df933a7 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -356,7 +356,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) } static int -cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbq_sched_data *q = qdisc_priv(sch); @@ -373,7 +373,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, return ret; } - ret = qdisc_enqueue(skb, cl->q, root_lock, to_free); + ret = qdisc_enqueue(skb, cl->q, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; cbq_mark_toplevel(q, cl); diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index 7af15ebe07f7..2eaac2ff380f 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -77,7 +77,7 @@ struct cbs_sched_data { s64 sendslope; /* in bytes/s */ s64 idleslope; /* in bytes/s */ struct qdisc_watchdog watchdog; - int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, + int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free); struct sk_buff *(*dequeue)(struct Qdisc *sch); struct Qdisc *qdisc; @@ -85,13 +85,13 @@ struct cbs_sched_data { }; static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, - struct Qdisc *child, spinlock_t *root_lock, + struct Qdisc *child, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); int err; - err = child->ops->enqueue(skb, child, root_lock, to_free); + err = child->ops->enqueue(skb, child, to_free); if (err != NET_XMIT_SUCCESS) return err; @@ -101,16 +101,16 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, return NET_XMIT_SUCCESS; } -static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; - return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free); + return cbs_child_enqueue(skb, sch, qdisc, to_free); } -static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); @@ -124,15 +124,15 @@ static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t * q->last = ktime_get_ns(); } - return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free); + return cbs_child_enqueue(skb, sch, qdisc, to_free); } -static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); - return q->enqueue(skb, sch, root_lock, to_free); + return q->enqueue(skb, sch, to_free); } /* timediff is in ns, slope is in bytes/s */ diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index baf3faee31aa..bd618b00d319 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -210,7 +210,7 @@ static bool choke_match_random(const struct choke_sched_data *q, return choke_match_flow(oskb, nskb); } -static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct choke_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 1d94837abdd8..30169b3adbbb 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -108,7 +108,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) return skb; } -static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct codel_sched_data *q; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index ad14df2ecf3a..dde564670ad8 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -337,7 +337,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, return NULL; } -static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -355,7 +355,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 76a9c4f277f2..2b88710994d7 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -198,7 +198,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl, /* --------------------------- Qdisc operations ---------------------------- */ -static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -267,7 +267,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *ro } } - err = qdisc_enqueue(skb, p->q, root_lock, to_free); + err = qdisc_enqueue(skb, p->q, to_free); if (err != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(err)) qdisc_qstats_drop(sch); diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index 7a7c50a68115..c48f91075b5c 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -160,7 +160,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) } static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, - spinlock_t *root_lock, struct sk_buff **to_free) + struct sk_buff **to_free) { struct etf_sched_data *q = qdisc_priv(sch); struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL; diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 1af86f30b18e..c1e84d1eeaba 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -415,7 +415,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch, return &q->classes[band]; } -static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -433,7 +433,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index b4da5b624ad8..a579a4131d22 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -16,7 +16,7 @@ /* 1 band FIFO pseudo-"scheduler" */ -static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) @@ -25,7 +25,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo return qdisc_drop(skb, sch, to_free); } -static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { if (likely(sch->q.qlen < sch->limit)) @@ -34,7 +34,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo return qdisc_drop(skb, sch, to_free); } -static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int prev_backlog; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index a90d745c41e0..2fb76fc0cc31 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -439,7 +439,7 @@ static bool fq_packet_beyond_horizon(const struct sk_buff *skb, return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon)); } -static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct fq_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index bca016ffc069..3106653c17f3 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -181,7 +181,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, return idx; } -static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct fq_codel_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 741840b9994f..f98c74018805 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -125,7 +125,7 @@ static inline void flow_queue_add(struct fq_pie_flow *flow, skb->next = NULL; } -static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct fq_pie_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 715cde1df9e4..265a61d011df 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -520,7 +520,7 @@ EXPORT_SYMBOL(netif_carrier_off); cheaper. */ -static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock, +static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { __qdisc_drop(skb, to_free); @@ -614,7 +614,7 @@ static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, return &priv->q[band]; } -static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock, +static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { int band = prio2band[skb->priority & TC_PRIO_MAX]; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 7d67c6cd6605..8599c6f31b05 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -161,7 +161,7 @@ static bool gred_per_vq_red_flags_used(struct gred_sched *table) return false; } -static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct gred_sched_data *q = NULL; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 13ba8648bb63..0f5f121404f3 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1528,8 +1528,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) return -1; } -static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, - struct sk_buff **to_free) +static int +hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); struct hfsc_class *cl; @@ -1545,7 +1545,7 @@ static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index ddc6bf1d85d0..420ede875322 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -368,7 +368,7 @@ static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free) return bucket - q->buckets; } -static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct hhf_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index b07f29059f47..ba37defaca7a 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -576,7 +576,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) cl->prio_activity = 0; } -static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { int uninitialized_var(ret); @@ -599,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ __qdisc_drop(skb, to_free); return ret; #endif - } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, root_lock, + } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, to_free)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { qdisc_qstats_drop(sch); diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 56bdc4bcdc63..5c27b4270b90 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -57,7 +57,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) } static int -multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct Qdisc *qdisc; @@ -74,7 +74,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, } #endif - ret = qdisc_enqueue(skb, qdisc, root_lock, to_free); + ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 8fb17483a34f..84f82771cdf5 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -431,7 +431,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ -static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct netem_sched_data *q = qdisc_priv(sch); @@ -480,7 +480,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; - rootq->enqueue(skb2, rootq, root_lock, to_free); + rootq->enqueue(skb2, rootq, to_free); q->duplicate = dupsave; rc_drop = NET_XMIT_SUCCESS; } @@ -604,7 +604,7 @@ finish_segs: skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; last_len = segs->len; - rc = qdisc_enqueue(segs, sch, root_lock, to_free); + rc = qdisc_enqueue(segs, sch, to_free); if (rc != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(rc)) qdisc_qstats_drop(sch); @@ -720,7 +720,7 @@ deliver: struct sk_buff *to_free = NULL; int err; - err = qdisc_enqueue(skb, q->qdisc, NULL, &to_free); + err = qdisc_enqueue(skb, q->qdisc, &to_free); kfree_skb_list(to_free); if (err != NET_XMIT_SUCCESS && net_xmit_drop_count(err)) { diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index b305313b64e3..c65077f0c0f3 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -82,7 +82,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, } EXPORT_SYMBOL_GPL(pie_drop_early); -static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct pie_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index e5f8b4769b4d..cbc2ebca4548 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -84,7 +84,7 @@ struct plug_sched_data { u32 pkts_to_release; }; -static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct plug_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 46b7ce81c6e3..3eabb871a1d5 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -65,8 +65,8 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return q->queues[band]; } -static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, - struct sk_buff **to_free) +static int +prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); struct Qdisc *qdisc; @@ -83,7 +83,7 @@ static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root } #endif - ret = qdisc_enqueue(skb, qdisc, root_lock, to_free); + ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { sch->qstats.backlog += len; sch->q.qlen++; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 3cfe6262eb00..6335230a971e 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1194,7 +1194,7 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) return agg; } -static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb), gso_segs; @@ -1225,7 +1225,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); if (net_xmit_drop_count(err)) { diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a79602f7fab8..4cc0ad0b1189 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -67,7 +67,7 @@ static int red_use_nodrop(struct red_sched_data *q) return q->flags & TC_RED_NODROP; } -static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct red_sched_data *q = qdisc_priv(sch); @@ -126,7 +126,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ break; } - ret = qdisc_enqueue(skb, child, root_lock, to_free); + ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 356f6d1d30db..da047a37a3bf 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -276,7 +276,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, return false; } -static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { @@ -399,7 +399,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ } enqueue: - ret = qdisc_enqueue(skb, child, root_lock, to_free); + ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index a1314510dc69..cae5dbbadc1c 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q) } static int -sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) +sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash, dropped; diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index f75f237c4436..7a5e4c454715 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -65,7 +65,7 @@ static u16 calc_new_low_prio(const struct skbprio_sched_data *q) return SKBPRIO_MAX_PRIORITY - 1; } -static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index daef2ff60a98..e981992634dd 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -410,7 +410,7 @@ done: return txtime; } -static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct taprio_sched *q = qdisc_priv(sch); @@ -435,7 +435,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *ro qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; - return qdisc_enqueue(skb, child, root_lock, to_free); + return qdisc_enqueue(skb, child, to_free); } static struct sk_buff *taprio_peek_soft(struct Qdisc *sch) diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index c3eb5cdb83a8..78e79029dc63 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -187,7 +187,7 @@ static int tbf_offload_dump(struct Qdisc *sch) /* GSO packet is too big, segment it so that tbf can transmit * each segment in time */ -static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -206,7 +206,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; len += segs->len; - ret = qdisc_enqueue(segs, q->qdisc, root_lock, to_free); + ret = qdisc_enqueue(segs, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); @@ -221,7 +221,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; } -static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -231,10 +231,10 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (qdisc_pkt_len(skb) > q->max_size) { if (skb_is_gso(skb) && skb_gso_validate_mac_len(skb, q->max_size)) - return tbf_segment(skb, sch, root_lock, to_free); + return tbf_segment(skb, sch, to_free); return qdisc_drop(skb, sch, to_free); } - ret = qdisc_enqueue(skb, q->qdisc, root_lock, to_free); + ret = qdisc_enqueue(skb, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index b586eec2eaeb..2f1f0a378408 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -72,8 +72,8 @@ struct teql_sched_data { /* "teql*" qdisc routines */ -static int teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, - struct sk_buff **to_free) +static int +teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct net_device *dev = qdisc_dev(sch); struct teql_sched_data *q = qdisc_priv(sch); -- cgit v1.2.3 From 2ae3de10abfe0be40c9d93ebc2f429b969abf008 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 18:30:48 -0700 Subject: spi: fix duplicated word in Change doubled word "as" to "as a". Change "Return: Return:" in kernel-doc notation to have only one "Return:". Signed-off-by: Randy Dunlap Cc: Mark Brown Cc: linux-spi@vger.kernel.org Link: https://lore.kernel.org/r/40354d64-be71-3952-a980-63a76a278145@infradead.org Signed-off-by: Mark Brown --- include/linux/spi/spi.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 5fcf5da13fdb..f8b721fcd5c6 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -971,7 +971,7 @@ struct spi_transfer { * each represented by a struct spi_transfer. The sequence is "atomic" * in the sense that no other spi_message may use that SPI bus until that * sequence completes. On some systems, many such sequences can execute as - * as single programmed DMA transfer. On all systems, these messages are + * a single programmed DMA transfer. On all systems, these messages are * queued, and might complete after transactions to other devices. Messages * sent to a given spi_device are always executed in FIFO order. * @@ -1234,7 +1234,7 @@ extern int spi_bus_unlock(struct spi_controller *ctlr); * * For more specific semantics see spi_sync(). * - * Return: Return: zero on success, else a negative error code. + * Return: zero on success, else a negative error code. */ static inline int spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, -- cgit v1.2.3 From e2e5c55eed8023ecfbf4c9b623ef7dec343d1845 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 14 Jul 2020 13:50:27 -0600 Subject: remoteproc: Add new RPROC_DETACHED state Add a new RPROC_DETACHED state to take into account scenarios where the remoteproc core needs to attach to a remote processor that is booted by another entity. Signed-off-by: Mathieu Poirier Reviewed-by: Bjorn Andersson Tested-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20200714195035.1426873-2-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_sysfs.c | 1 + include/linux/remoteproc.h | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 52b871327b55..264759713934 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -72,6 +72,7 @@ static const char * const rproc_state_string[] = { [RPROC_RUNNING] = "running", [RPROC_CRASHED] = "crashed", [RPROC_DELETED] = "deleted", + [RPROC_DETACHED] = "detached", [RPROC_LAST] = "invalid", }; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index e7b7bab8b235..21182ad2d059 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -400,6 +400,8 @@ struct rproc_ops { * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery * @RPROC_DELETED: device is deleted + * @RPROC_DETACHED: device has been booted by another entity and waiting + * for the core to attach to it * @RPROC_LAST: just keep this one at the end * * Please note that the values of these states are used as indices @@ -414,7 +416,8 @@ enum rproc_state { RPROC_RUNNING = 2, RPROC_CRASHED = 3, RPROC_DELETED = 4, - RPROC_LAST = 5, + RPROC_DETACHED = 5, + RPROC_LAST = 6, }; /** -- cgit v1.2.3 From a6a4f2857524007848f7957af432cddb4d43b593 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 14 Jul 2020 13:50:28 -0600 Subject: remoteproc: Add new attach() remoteproc operation Add an new attach() operation in order to properly deal with scenarios where the remoteproc core needs to attach to a remote processor that has been booted by another entity. Signed-off-by: Mathieu Poirier Reviewed-by: Bjorn Andersson Tested-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20200714195035.1426873-3-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_internal.h | 8 ++++++++ include/linux/remoteproc.h | 2 ++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index 4ba7cb59d3e8..fc710866f8ce 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -79,6 +79,14 @@ static inline int rproc_unprepare_device(struct rproc *rproc) return 0; } +static inline int rproc_attach_device(struct rproc *rproc) +{ + if (rproc->ops->attach) + return rproc->ops->attach(rproc); + + return 0; +} + static inline int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw) { diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 21182ad2d059..bf6a310ba870 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -359,6 +359,7 @@ enum rsc_handling_status { * @unprepare: unprepare device after stop * @start: power on the device and boot it * @stop: power off the device + * @attach: attach to a device that his already powered up * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) @@ -379,6 +380,7 @@ struct rproc_ops { int (*unprepare)(struct rproc *rproc); int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); + int (*attach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); -- cgit v1.2.3 From 4a4dca1941fedc1b02635ff0b4ed51b9857d0382 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 14 Jul 2020 13:50:35 -0600 Subject: remoteproc: Properly handle firmware name when attaching This patch prevents the firmware image name from being displayed when the remoteproc core is attaching to a remote processor. This is needed needed since there is no guarantee about the nature of the firmware image that is loaded by the external entity. Signed-off-by: Mathieu Poirier Reviewed-by: Arnaud Pouliquen Reviewed-by: Bjorn Andersson Tested-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20200714195035.1426873-10-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 18 ++++++++++++++++++ drivers/remoteproc/remoteproc_sysfs.c | 16 ++++++++++++++-- include/linux/remoteproc.h | 2 ++ 3 files changed, 34 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 565778ccb39c..1b52004655ab 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1627,6 +1627,14 @@ static int rproc_stop(struct rproc *rproc, bool crashed) rproc->state = RPROC_OFFLINE; + /* + * The remote processor has been stopped and is now offline, which means + * that the next time it is brought back online the remoteproc core will + * be responsible to load its firmware. As such it is no longer + * autonomous. + */ + rproc->autonomous = false; + dev_info(dev, "stopped remote processor %s\n", rproc->name); return 0; @@ -2145,6 +2153,16 @@ int rproc_add(struct rproc *rproc) /* create debugfs entries */ rproc_create_debug_dir(rproc); + /* + * Remind ourselves the remote processor has been attached to rather + * than booted by the remoteproc core. This is important because the + * RPROC_DETACHED state will be lost as soon as the remote processor + * has been attached to. Used in firmware_show() and reset in + * rproc_stop(). + */ + if (rproc->state == RPROC_DETACHED) + rproc->autonomous = true; + /* if rproc is marked always-on, request it to boot */ if (rproc->auto_boot) { ret = rproc_trigger_auto_boot(rproc); diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 264759713934..eea514cec50e 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -15,8 +15,20 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rproc *rproc = to_rproc(dev); - - return sprintf(buf, "%s\n", rproc->firmware); + const char *firmware = rproc->firmware; + + /* + * If the remote processor has been started by an external + * entity we have no idea of what image it is running. As such + * simply display a generic string rather then rproc->firmware. + * + * Here we rely on the autonomous flag because a remote processor + * may have been attached to and currently in a running state. + */ + if (rproc->autonomous) + firmware = "unknown"; + + return sprintf(buf, "%s\n", firmware); } /* Change firmware name via sysfs */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index bf6a310ba870..cf5e31556780 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -491,6 +491,7 @@ struct rproc_dump_segment { * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU * @auto_boot: flag to indicate if remote processor should be auto-started + * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc */ @@ -524,6 +525,7 @@ struct rproc { size_t table_sz; bool has_iommu; bool auto_boot; + bool autonomous; struct list_head dump_segments; int nb_vdev; u8 elf_class; -- cgit v1.2.3 From 303d6f62eb8f30516eed01c3faff188f4780de67 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 12:14:09 +0200 Subject: arm64: dts: ti: k3-*: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Tero Kristo --- arch/arm64/boot/dts/ti/Makefile | 2 +- arch/arm64/boot/dts/ti/k3-am65-main.dtsi | 2 +- arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi | 2 +- arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi | 2 +- arch/arm64/boot/dts/ti/k3-am65.dtsi | 2 +- arch/arm64/boot/dts/ti/k3-am654-base-board.dts | 2 +- arch/arm64/boot/dts/ti/k3-am654.dtsi | 2 +- arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts | 2 +- arch/arm64/boot/dts/ti/k3-j721e-main.dtsi | 2 +- arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi | 2 +- arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi | 2 +- arch/arm64/boot/dts/ti/k3-j721e.dtsi | 2 +- include/dt-bindings/pinctrl/k3.h | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile index b397945fdf73..05c0bebf65d4 100644 --- a/arch/arm64/boot/dts/ti/Makefile +++ b/arch/arm64/boot/dts/ti/Makefile @@ -3,7 +3,7 @@ # Make file to build device tree binaries for boards based on # Texas Instruments Inc processors # -# Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/ +# Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/ # dtb-$(CONFIG_ARCH_K3_AM6_SOC) += k3-am654-base-board.dtb diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index dfb429bed56d..5b15fe24429c 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for AM6 SoC Family Main Domain peripherals * - * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/ */ #include diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi index ae5f813d0cac..8c1abcfe0860 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for AM6 SoC Family MCU Domain peripherals * - * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/ */ &cbass_mcu { diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi index f7b9bc562e00..5f55b9e82cf1 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for AM6 SoC Family Wakeup Domain peripherals * - * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/ */ &cbass_wakeup { diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi index 5be75e430965..27c0406b10ba 100644 --- a/arch/arm64/boot/dts/ti/k3-am65.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for AM6 SoC Family * - * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/ */ #include diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts index b40153b4b400..611e66207010 100644 --- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts +++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/ */ /dts-v1/; diff --git a/arch/arm64/boot/dts/ti/k3-am654.dtsi b/arch/arm64/boot/dts/ti/k3-am654.dtsi index b221abf43ac2..f0a6541b8042 100644 --- a/arch/arm64/boot/dts/ti/k3-am654.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am654.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for AM6 SoC family in Quad core configuration * - * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/ */ #include "k3-am65.dtsi" diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts index 165907fe1b0f..861065a41c79 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ */ /dts-v1/; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index 96c929da639d..bdeda701af7c 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for J721E SoC Family Main Domain peripherals * - * Copyright (C) 2016-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016-2019 Texas Instruments Incorporated - https://www.ti.com/ */ &cbass_main { diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi index e468cc1f8e4d..30a735bcd0c8 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for J721E SoC Family MCU/WAKEUP Domain peripherals * - * Copyright (C) 2016-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016-2019 Texas Instruments Incorporated - https://www.ti.com/ */ &cbass_mcu_wakeup { diff --git a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi index 7680109ca60a..8fa3361e5e45 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ */ /dts-v1/; diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi index 2f9a56d9b114..d035b61e0e16 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi @@ -2,7 +2,7 @@ /* * Device Tree Source for J721E SoC Family * - * Copyright (C) 2016-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016-2019 Texas Instruments Incorporated - https://www.ti.com/ */ #include diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h index 499de6216581..b0eea7cc6e23 100644 --- a/include/dt-bindings/pinctrl/k3.h +++ b/include/dt-bindings/pinctrl/k3.h @@ -3,7 +3,7 @@ * This header provides constants for pinctrl bindings for TI's K3 SoC * family. * - * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _DT_BINDINGS_PINCTRL_TI_K3_H #define _DT_BINDINGS_PINCTRL_TI_K3_H -- cgit v1.2.3 From b766e3b0d5f698266ba353821fee09a3862d51d4 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 29 Jun 2020 15:52:51 +0300 Subject: arm64: dts: ti: k3-j721e-main: Add system controller node and SERDES lane mux The system controller node manages the CTRL_MMR0 region. Add serdes_ln_ctrl node which is used for controlling the SERDES lane mux. Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Roger Quadros Acked-by: Rob Herring Signed-off-by: Tero Kristo --- arch/arm64/boot/dts/ti/k3-j721e-main.dtsi | 27 ++++++++++++++++ include/dt-bindings/mux/mux-j721e-wiz.h | 53 +++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 include/dt-bindings/mux/mux-j721e-wiz.h (limited to 'include') diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index 4b4a30ddcf34..93f83a2d0935 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -5,6 +5,8 @@ * Copyright (C) 2016-2019 Texas Instruments Incorporated - https://www.ti.com/ */ #include +#include +#include &cbass_main { msmc_ram: sram@70000000 { @@ -19,6 +21,31 @@ }; }; + scm_conf: scm-conf@100000 { + compatible = "ti,j721e-system-controller", "syscon", "simple-mfd"; + reg = <0 0x00100000 0 0x1c000>; /* excludes pinctrl region */ + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x00100000 0x1c000>; + + serdes_ln_ctrl: serdes-ln-ctrl@4080 { + compatible = "mmio-mux"; + reg = <0x00004080 0x50>; + #mux-control-cells = <1>; + mux-reg-masks = <0x4080 0x3>, <0x4084 0x3>, /* SERDES0 lane0/1 select */ + <0x4090 0x3>, <0x4094 0x3>, /* SERDES1 lane0/1 select */ + <0x40a0 0x3>, <0x40a4 0x3>, /* SERDES2 lane0/1 select */ + <0x40b0 0x3>, <0x40b4 0x3>, /* SERDES3 lane0/1 select */ + <0x40c0 0x3>, <0x40c4 0x3>, <0x40c8 0x3>, <0x40cc 0x3>; + /* SERDES4 lane0/1/2/3 select */ + idle-states = , , + , , + , , + , , + , , , ; + }; + }; + gic500: interrupt-controller@1800000 { compatible = "arm,gic-v3"; #address-cells = <2>; diff --git a/include/dt-bindings/mux/mux-j721e-wiz.h b/include/dt-bindings/mux/mux-j721e-wiz.h new file mode 100644 index 000000000000..fd1c4ea9fc7f --- /dev/null +++ b/include/dt-bindings/mux/mux-j721e-wiz.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for J721E WIZ. + */ + +#ifndef _DT_BINDINGS_J721E_WIZ +#define _DT_BINDINGS_J721E_WIZ + +#define SERDES0_LANE0_QSGMII_LANE1 0x0 +#define SERDES0_LANE0_PCIE0_LANE0 0x1 +#define SERDES0_LANE0_USB3_0_SWAP 0x2 + +#define SERDES0_LANE1_QSGMII_LANE2 0x0 +#define SERDES0_LANE1_PCIE0_LANE1 0x1 +#define SERDES0_LANE1_USB3_0 0x2 + +#define SERDES1_LANE0_QSGMII_LANE3 0x0 +#define SERDES1_LANE0_PCIE1_LANE0 0x1 +#define SERDES1_LANE0_USB3_1_SWAP 0x2 +#define SERDES1_LANE0_SGMII_LANE0 0x3 + +#define SERDES1_LANE1_QSGMII_LANE4 0x0 +#define SERDES1_LANE1_PCIE1_LANE1 0x1 +#define SERDES1_LANE1_USB3_1 0x2 +#define SERDES1_LANE1_SGMII_LANE1 0x3 + +#define SERDES2_LANE0_PCIE2_LANE0 0x1 +#define SERDES2_LANE0_SGMII_LANE0 0x3 +#define SERDES2_LANE0_USB3_1_SWAP 0x2 + +#define SERDES2_LANE1_PCIE2_LANE1 0x1 +#define SERDES2_LANE1_USB3_1 0x2 +#define SERDES2_LANE1_SGMII_LANE1 0x3 + +#define SERDES3_LANE0_PCIE3_LANE0 0x1 +#define SERDES3_LANE0_USB3_0_SWAP 0x2 + +#define SERDES3_LANE1_PCIE3_LANE1 0x1 +#define SERDES3_LANE1_USB3_0 0x2 + +#define SERDES4_LANE0_EDP_LANE0 0x0 +#define SERDES4_LANE0_QSGMII_LANE5 0x2 + +#define SERDES4_LANE1_EDP_LANE1 0x0 +#define SERDES4_LANE1_QSGMII_LANE6 0x2 + +#define SERDES4_LANE2_EDP_LANE2 0x0 +#define SERDES4_LANE2_QSGMII_LANE7 0x2 + +#define SERDES4_LANE3_EDP_LANE3 0x0 +#define SERDES4_LANE3_QSGMII_LANE8 0x2 + +#endif /* _DT_BINDINGS_J721E_WIZ */ -- cgit v1.2.3 From ecdef9f459ad24bf987267df6c25967819016707 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Fri, 17 Jul 2020 10:42:29 +0800 Subject: block: change REQ_OP_ZONE_RESET and REQ_OP_ZONE_RESET_ALL to be odd numbers Currently REQ_OP_ZONE_RESET and REQ_OP_ZONE_RESET_ALL are defined as even numbers 6 and 8, such zone reset bios are treated as READ bios by bio_data_dir(), which is obviously misleading. The macro bio_data_dir() is defined in include/linux/bio.h as, 55 #define bio_data_dir(bio) \ 56 (op_is_write(bio_op(bio)) ? WRITE : READ) And op_is_write() is defined in include/linux/blk_types.h as, 397 static inline bool op_is_write(unsigned int op) 398 { 399 return (op & 1); 400 } The convention of op_is_write() is when there is data transfer then the op code should be odd number, and treat as a write op. bio_data_dir() treats all bio direction as READ if op_is_write() reports false, and WRITE if op_is_write() reports true. Because REQ_OP_ZONE_RESET and REQ_OP_ZONE_RESET_ALL are even numbers, although they don't transfer data but reporting them as READ bio by bio_data_dir() is misleading and might be wrong. Because these two commands will reset the writer pointers of the resetting zones, and all content after the reset write pointer will be invalid and unaccessible, obviously they are not READ bios in any means. This patch changes REQ_OP_ZONE_RESET from 6 to 15, and changes REQ_OP_ZONE_RESET_ALL from 8 to 17. Now bios with these two op code can be treated as WRITE by bio_data_dir(). Although they don't transfer data, now we keep them consistent with REQ_OP_DISCARD and REQ_OP_WRITE_ZEROES with the ituition that they change on-media content and should be WRITE request. Signed-off-by: Coly Li Reviewed-by: Damien Le Moal Reviewed-by: Chaitanya Kulkarni Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Keith Busch Cc: Shaun Tancheff Signed-off-by: Jens Axboe --- include/linux/blk_types.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 07facaf62b72..4ecf4fed171f 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -327,12 +327,8 @@ enum req_opf { REQ_OP_DISCARD = 3, /* securely erase sectors */ REQ_OP_SECURE_ERASE = 5, - /* reset a zone write pointer */ - REQ_OP_ZONE_RESET = 6, /* write the same sector many times */ REQ_OP_WRITE_SAME = 7, - /* reset all the zone present on the device */ - REQ_OP_ZONE_RESET_ALL = 8, /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = 9, /* Open a zone */ @@ -343,6 +339,10 @@ enum req_opf { REQ_OP_ZONE_FINISH = 12, /* write data at the current zone write pointer */ REQ_OP_ZONE_APPEND = 13, + /* reset a zone write pointer */ + REQ_OP_ZONE_RESET = 15, + /* reset all the zone present on the device */ + REQ_OP_ZONE_RESET_ALL = 17, /* SCSI passthrough using struct scsi_request */ REQ_OP_SCSI_IN = 32, -- cgit v1.2.3 From ca00e66c1bc875aef7d84ec16418e08a14d0cda9 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Fri, 17 Jul 2020 15:22:26 +0900 Subject: ASoC: soc-dai.h: don't use discriminatory terms for comment soc-dai is using discriminatory terms for comment. This patch renames "slave" to "secondary", thus we can keep M/S initials. Signed-off-by: Kuninori Morimoto Link: https://lore.kernel.org/r/875zam3bmk.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-dai.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index e0e061b8e9bd..05775f7b0bbc 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -76,12 +76,12 @@ struct snd_compr_stream; * * This is wrt the codec, the inverse is true for the interface * i.e. if the codec is clk and FRM master then the interface is - * clk and frame slave. + * clk and frame secondary. */ #define SND_SOC_DAIFMT_CBM_CFM (1 << 12) /* codec clk & FRM master */ -#define SND_SOC_DAIFMT_CBS_CFM (2 << 12) /* codec clk slave & FRM master */ -#define SND_SOC_DAIFMT_CBM_CFS (3 << 12) /* codec clk master & frame slave */ -#define SND_SOC_DAIFMT_CBS_CFS (4 << 12) /* codec clk & FRM slave */ +#define SND_SOC_DAIFMT_CBS_CFM (2 << 12) /* codec clk secondary & FRM master */ +#define SND_SOC_DAIFMT_CBM_CFS (3 << 12) /* codec clk master & frame secondary */ +#define SND_SOC_DAIFMT_CBS_CFS (4 << 12) /* codec clk & FRM secondary */ #define SND_SOC_DAIFMT_FORMAT_MASK 0x000f #define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0 -- cgit v1.2.3 From 46011d431befb2a699727c6973d9ae71907f19c8 Mon Sep 17 00:00:00 2001 From: Sandipan Patra Date: Fri, 26 Jun 2020 10:44:01 +0200 Subject: soc/tegra: fuse: Add Tegra186 and Tegra194 SoC IDs SoC IDs for these generations had never been defined. Do so now. Signed-off-by: Sandipan Patra Reviewed-by: Jon Hunter Signed-off-by: Thierry Reding --- include/soc/tegra/fuse.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h index 252ea20fe4c1..1097feca41ed 100644 --- a/include/soc/tegra/fuse.h +++ b/include/soc/tegra/fuse.h @@ -12,6 +12,8 @@ #define TEGRA124 0x40 #define TEGRA132 0x13 #define TEGRA210 0x21 +#define TEGRA186 0x18 +#define TEGRA194 0x19 #define TEGRA_FUSE_SKU_CALIB_0 0xf0 #define TEGRA30_FUSE_SATA_CALIB 0x124 -- cgit v1.2.3 From ab91e7a6da7eeb8aa54843748652c186daee43eb Mon Sep 17 00:00:00 2001 From: He Zhe Date: Mon, 6 Jul 2020 17:52:24 +0800 Subject: freezer: Add unsafe versions of freezable_schedule_timeout_interruptible for NFS commit 0688e64bc600 ("NFS: Allow signal interruption of NFS4ERR_DELAYed operations") introduces nfs4_delay_interruptible which also needs an _unsafe version to avoid the following call trace for the same reason explained in commit 416ad3c9c006 ("freezer: add unsafe versions of freezable helpers for NFS") CPU: 4 PID: 3968 Comm: rm Tainted: G W 5.8.0-rc4 #1 Hardware name: Marvell OcteonTX CN96XX board (DT) Call trace: dump_backtrace+0x0/0x1dc show_stack+0x20/0x30 dump_stack+0xdc/0x150 debug_check_no_locks_held+0x98/0xa0 nfs4_delay_interruptible+0xd8/0x120 nfs4_handle_exception+0x130/0x170 nfs4_proc_rmdir+0x8c/0x220 nfs_rmdir+0xa4/0x360 vfs_rmdir.part.0+0x6c/0x1b0 do_rmdir+0x18c/0x210 __arm64_sys_unlinkat+0x64/0x7c el0_svc_common.constprop.0+0x7c/0x110 do_el0_svc+0x24/0xa0 el0_sync_handler+0x13c/0x1b8 el0_sync+0x158/0x180 Signed-off-by: He Zhe Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 2 +- include/linux/freezer.h | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f670ff64b31e..113e0d6dd3d3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -416,7 +416,7 @@ static int nfs4_delay_interruptible(long *timeout) { might_sleep(); - freezable_schedule_timeout_interruptible(nfs4_update_delay(timeout)); + freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout)); if (!signal_pending(current)) return 0; return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 21f5aa0b217f..27828145ca09 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -207,6 +207,17 @@ static inline long freezable_schedule_timeout_interruptible(long timeout) return __retval; } +/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ +static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout) +{ + long __retval; + + freezer_do_not_count(); + __retval = schedule_timeout_interruptible(timeout); + freezer_count_unsafe(); + return __retval; +} + /* Like schedule_timeout_killable(), but should not block the freezer. */ static inline long freezable_schedule_timeout_killable(long timeout) { @@ -285,6 +296,9 @@ static inline void set_freezable(void) {} #define freezable_schedule_timeout_interruptible(timeout) \ schedule_timeout_interruptible(timeout) +#define freezable_schedule_timeout_interruptible_unsafe(timeout) \ + schedule_timeout_interruptible(timeout) + #define freezable_schedule_timeout_killable(timeout) \ schedule_timeout_killable(timeout) -- cgit v1.2.3 From e506addeff844237d60545ef4f6141de21471caf Mon Sep 17 00:00:00 2001 From: Miguel Rodríguez Pérez Date: Wed, 15 Jul 2020 20:40:57 +0200 Subject: net: cdc_ether: export usbnet_cdc_update_filter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes the function available to other drivers, like cdc_ncm. Signed-off-by: Miguel Rodríguez Pérez Acked-by: Oliver Neukum Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ether.c | 3 ++- include/linux/usb/usbnet.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 2afe258e3648..8c1d61c2cbac 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -63,7 +63,7 @@ static const u8 mbm_guid[16] = { 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, }; -static void usbnet_cdc_update_filter(struct usbnet *dev) +void usbnet_cdc_update_filter(struct usbnet *dev) { struct net_device *net = dev->net; @@ -90,6 +90,7 @@ static void usbnet_cdc_update_filter(struct usbnet *dev) USB_CTRL_SET_TIMEOUT ); } +EXPORT_SYMBOL_GPL(usbnet_cdc_update_filter); /* probes control interface, claims data interface, collects the bulk * endpoints, activates data interface (if needed), maybe sets MTU. diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index b0bff3083278..3a856963a363 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -207,6 +207,7 @@ struct cdc_state { struct usb_interface *data; }; +extern void usbnet_cdc_update_filter(struct usbnet *dev); extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf); extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); -- cgit v1.2.3 From 1ea2b748b5eb4f9b6c8f66d098f1277afb724336 Mon Sep 17 00:00:00 2001 From: Bjørn Mork Date: Wed, 15 Jul 2020 20:40:58 +0200 Subject: net: usbnet: export usbnet_set_rx_mode() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function can be reused by other usbnet minidrivers. Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/usbnet.c | 3 ++- include/linux/usb/usbnet.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 5ec97def3513..e45935a5856a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1108,12 +1108,13 @@ static void __handle_link_change(struct usbnet *dev) clear_bit(EVENT_LINK_CHANGE, &dev->flags); } -static void usbnet_set_rx_mode(struct net_device *net) +void usbnet_set_rx_mode(struct net_device *net) { struct usbnet *dev = netdev_priv(net); usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); } +EXPORT_SYMBOL_GPL(usbnet_set_rx_mode); static void __handle_set_rx_mode(struct usbnet *dev) { diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 3a856963a363..2e4f7721fc4e 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -274,6 +274,7 @@ extern int usbnet_set_link_ksettings(struct net_device *net, extern u32 usbnet_get_link(struct net_device *net); extern u32 usbnet_get_msglevel(struct net_device *); extern void usbnet_set_msglevel(struct net_device *, u32); +extern void usbnet_set_rx_mode(struct net_device *net); extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); extern int usbnet_nway_reset(struct net_device *net); -- cgit v1.2.3 From e3a5a1e8b6548f5d37328e2d3571edc5c9e6d7c0 Mon Sep 17 00:00:00 2001 From: Priyaranjan Jha Date: Thu, 16 Jul 2020 12:12:35 -0700 Subject: tcp: add SNMP counter for no. of duplicate segments reported by DSACK There are two existing SNMP counters, TCPDSACKRecv and TCPDSACKOfoRecv, which are incremented depending on whether the DSACKed range is below the cumulative ACK sequence number or not. Unfortunately, these both implicitly assume each DSACK covers only one segment. This makes these counters unusable for estimating spurious retransmit rates, or real/non-spurious loss rate. This patch introduces a new SNMP counter, TCPDSACKRecvSegs, which tracks the estimated number of duplicate segments based on: (DSACKed sequence range) / MSS. This counter is usable for estimating spurious retransmit rates, or real/non-spurious loss rate. Signed-off-by: Priyaranjan Jha Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/uapi/linux/snmp.h | 1 + net/ipv4/proc.c | 1 + net/ipv4/tcp_input.c | 1 + 3 files changed, 3 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 7d91f4debc48..cee9f8e6fce3 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -287,6 +287,7 @@ enum LINUX_MIB_TCPFASTOPENPASSIVEALTKEY, /* TCPFastOpenPassiveAltKey */ LINUX_MIB_TCPTIMEOUTREHASH, /* TCPTimeoutRehash */ LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */ + LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */ __LINUX_MIB_MAX }; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 75545a829a2b..1074df726ec0 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -292,6 +292,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPFastOpenPassiveAltKey", LINUX_MIB_TCPFASTOPENPASSIVEALTKEY), SNMP_MIB_ITEM("TcpTimeoutRehash", LINUX_MIB_TCPTIMEOUTREHASH), SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH), + SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5d6bbcb1e570..82906deb7874 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1153,6 +1153,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, } dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state); + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs); /* D-SACK for already forgotten data... Do dumb counting. */ if (tp->undo_marker && tp->undo_retrans > 0 && -- cgit v1.2.3 From d9473cbfb0c5cbb279dfdeaec780934729537d27 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 14 Jul 2020 14:04:41 -0600 Subject: remoteproc: Make function rproc_resource_cleanup() public Make function rproc_resource_cleanup() public so that it can be used by platform drivers when allocating resources to be used by a detached remote processor. Acked-by: Arnaud Pouliquen Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20200714200445.1427257-8-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 3 ++- include/linux/remoteproc.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 1b52004655ab..6fa9f75754b3 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1274,7 +1274,7 @@ static void rproc_coredump_cleanup(struct rproc *rproc) * This function will free all resources acquired for @rproc, and it * is called whenever @rproc either shuts down or fails to boot. */ -static void rproc_resource_cleanup(struct rproc *rproc) +void rproc_resource_cleanup(struct rproc *rproc) { struct rproc_mem_entry *entry, *tmp; struct rproc_debug_trace *trace, *ttmp; @@ -1318,6 +1318,7 @@ static void rproc_resource_cleanup(struct rproc *rproc) rproc_coredump_cleanup(rproc); } +EXPORT_SYMBOL(rproc_resource_cleanup); static int rproc_start(struct rproc *rproc, const struct firmware *fw) { diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index cf5e31556780..7c0567029f7c 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -610,6 +610,7 @@ void rproc_put(struct rproc *rproc); int rproc_add(struct rproc *rproc); int rproc_del(struct rproc *rproc); void rproc_free(struct rproc *rproc); +void rproc_resource_cleanup(struct rproc *rproc); struct rproc *devm_rproc_alloc(struct device *dev, const char *name, const struct rproc_ops *ops, -- cgit v1.2.3 From ef45fe470e1e5410db4af87abc5d5055427945ac Mon Sep 17 00:00:00 2001 From: Boris Burkov Date: Mon, 1 Jun 2020 13:12:05 -0700 Subject: blk-cgroup: show global disk stats in root cgroup io.stat In order to improve consistency and usability in cgroup stat accounting, we would like to support the root cgroup's io.stat. Since the root cgroup has processes doing io even if the system has no explicitly created cgroups, we need to be careful to avoid overhead in that case. For that reason, the rstat algorithms don't handle the root cgroup, so just turning the file on wouldn't give correct statistics. To get around this, we simulate flushing the iostat struct by filling it out directly from global disk stats. The result is a root cgroup io.stat file consistent with both /proc/diskstats and io.stat. Note that in order to collect the disk stats, we needed to iterate over devices. To facilitate that, we had to change the linkage of a disk_type to external so that it can be used from blk-cgroup.c to iterate over disks. Suggested-by: Tejun Heo Signed-off-by: Boris Burkov Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- Documentation/admin-guide/cgroup-v2.rst | 3 +- block/blk-cgroup.c | 57 +++++++++++++++++++++++++++++++-- block/genhd.c | 4 +-- include/linux/genhd.h | 1 + 4 files changed, 58 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index ce3e05e41724..2d7c5907e0ce 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1483,8 +1483,7 @@ IO Interface Files ~~~~~~~~~~~~~~~~~~ io.stat - A read-only nested-keyed file which exists on non-root - cgroups. + A read-only nested-keyed file. Lines are keyed by $MAJ:$MIN device numbers and not ordered. The following nested keys are defined. diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 696d28151c9a..619a79b51068 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -782,12 +782,66 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) rcu_read_unlock(); } +/* + * The rstat algorithms intentionally don't handle the root cgroup to avoid + * incurring overhead when no cgroups are defined. For that reason, + * cgroup_rstat_flush in blkcg_print_stat does not actually fill out the + * iostat in the root cgroup's blkcg_gq. + * + * However, we would like to re-use the printing code between the root and + * non-root cgroups to the extent possible. For that reason, we simulate + * flushing the root cgroup's stats by explicitly filling in the iostat + * with disk level statistics. + */ +static void blkcg_fill_root_iostats(void) +{ + struct class_dev_iter iter; + struct device *dev; + + class_dev_iter_init(&iter, &block_class, NULL, &disk_type); + while ((dev = class_dev_iter_next(&iter))) { + struct gendisk *disk = dev_to_disk(dev); + struct hd_struct *part = disk_get_part(disk, 0); + struct blkcg_gq *blkg = blk_queue_root_blkg(disk->queue); + struct blkg_iostat tmp; + int cpu; + + memset(&tmp, 0, sizeof(tmp)); + for_each_possible_cpu(cpu) { + struct disk_stats *cpu_dkstats; + + cpu_dkstats = per_cpu_ptr(part->dkstats, cpu); + tmp.ios[BLKG_IOSTAT_READ] += + cpu_dkstats->ios[STAT_READ]; + tmp.ios[BLKG_IOSTAT_WRITE] += + cpu_dkstats->ios[STAT_WRITE]; + tmp.ios[BLKG_IOSTAT_DISCARD] += + cpu_dkstats->ios[STAT_DISCARD]; + // convert sectors to bytes + tmp.bytes[BLKG_IOSTAT_READ] += + cpu_dkstats->sectors[STAT_READ] << 9; + tmp.bytes[BLKG_IOSTAT_WRITE] += + cpu_dkstats->sectors[STAT_WRITE] << 9; + tmp.bytes[BLKG_IOSTAT_DISCARD] += + cpu_dkstats->sectors[STAT_DISCARD] << 9; + + u64_stats_update_begin(&blkg->iostat.sync); + blkg_iostat_set(&blkg->iostat.cur, &tmp); + u64_stats_update_end(&blkg->iostat.sync); + } + } +} + static int blkcg_print_stat(struct seq_file *sf, void *v) { struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); struct blkcg_gq *blkg; - cgroup_rstat_flush(blkcg->css.cgroup); + if (!seq_css(sf)->parent) + blkcg_fill_root_iostats(); + else + cgroup_rstat_flush(blkcg->css.cgroup); + rcu_read_lock(); hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { @@ -876,7 +930,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) static struct cftype blkcg_files[] = { { .name = "stat", - .flags = CFTYPE_NOT_ON_ROOT, .seq_show = blkcg_print_stat, }, { } /* terminate */ diff --git a/block/genhd.c b/block/genhd.c index c42a49f2f537..8b1e9f48957c 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -38,8 +38,6 @@ static struct kobject *block_depr; static DEFINE_SPINLOCK(ext_devt_lock); static DEFINE_IDR(ext_devt_idr); -static const struct device_type disk_type; - static void disk_check_events(struct disk_events *ev, unsigned int *clearing_ptr); static void disk_alloc_events(struct gendisk *disk); @@ -1587,7 +1585,7 @@ static char *block_devnode(struct device *dev, umode_t *mode, return NULL; } -static const struct device_type disk_type = { +const struct device_type disk_type = { .name = "disk", .groups = disk_attr_groups, .release = disk_release, diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 31a54072ffd6..4ab853461dff 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -24,6 +24,7 @@ #define disk_to_dev(disk) (&(disk)->part0.__dev) #define part_to_dev(part) (&((part)->__dev)) +extern const struct device_type disk_type; extern struct device_type part_type; extern struct class block_class; -- cgit v1.2.3 From ce3aa9cc5109363099b7c4ac82e2c9768afcaf31 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:22 +0200 Subject: bpf, netns: Handle multiple link attachments Extend the BPF netns link callbacks to rebuild (grow/shrink) or update the prog_array at given position when link gets attached/updated/released. This let's us lift the limit of having just one link attached for the new attach type introduced by subsequent patch. No functional changes intended. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-2-jakub@cloudflare.com --- include/linux/bpf.h | 3 ++ kernel/bpf/core.c | 55 ++++++++++++++++++++++++++++ kernel/bpf/net_namespace.c | 90 +++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 139 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 54ad426dbea1..c8c9eabcd106 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -928,6 +928,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, struct bpf_prog *old_prog); +int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); +int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, + struct bpf_prog *prog); int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 9df4cc9a2907..7be02e555ab9 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1958,6 +1958,61 @@ void bpf_prog_array_delete_safe(struct bpf_prog_array *array, } } +/** + * bpf_prog_array_delete_safe_at() - Replaces the program at the given + * index into the program array with + * a dummy no-op program. + * @array: a bpf_prog_array + * @index: the index of the program to replace + * + * Skips over dummy programs, by not counting them, when calculating + * the the position of the program to replace. + * + * Return: + * * 0 - Success + * * -EINVAL - Invalid index value. Must be a non-negative integer. + * * -ENOENT - Index out of range + */ +int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) +{ + return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); +} + +/** + * bpf_prog_array_update_at() - Updates the program at the given index + * into the program array. + * @array: a bpf_prog_array + * @index: the index of the program to update + * @prog: the program to insert into the array + * + * Skips over dummy programs, by not counting them, when calculating + * the position of the program to update. + * + * Return: + * * 0 - Success + * * -EINVAL - Invalid index value. Must be a non-negative integer. + * * -ENOENT - Index out of range + */ +int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, + struct bpf_prog *prog) +{ + struct bpf_prog_array_item *item; + + if (unlikely(index < 0)) + return -EINVAL; + + for (item = array->items; item->prog; item++) { + if (item->prog == &dummy_bpf_prog.prog) + continue; + if (!index) { + WRITE_ONCE(item->prog, prog); + return 0; + } + index--; + } + return -ENOENT; +} + int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 310241ca7991..e9c8e26ac8f2 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -36,12 +36,50 @@ static void netns_bpf_run_array_detach(struct net *net, bpf_prog_array_free(run_array); } +static int link_index(struct net *net, enum netns_bpf_attach_type type, + struct bpf_netns_link *link) +{ + struct bpf_netns_link *pos; + int i = 0; + + list_for_each_entry(pos, &net->bpf.links[type], node) { + if (pos == link) + return i; + i++; + } + return -ENOENT; +} + +static int link_count(struct net *net, enum netns_bpf_attach_type type) +{ + struct list_head *pos; + int i = 0; + + list_for_each(pos, &net->bpf.links[type]) + i++; + return i; +} + +static void fill_prog_array(struct net *net, enum netns_bpf_attach_type type, + struct bpf_prog_array *prog_array) +{ + struct bpf_netns_link *pos; + unsigned int i = 0; + + list_for_each_entry(pos, &net->bpf.links[type], node) { + prog_array->items[i].prog = pos->link.prog; + i++; + } +} + static void bpf_netns_link_release(struct bpf_link *link) { struct bpf_netns_link *net_link = container_of(link, struct bpf_netns_link, link); enum netns_bpf_attach_type type = net_link->netns_type; + struct bpf_prog_array *old_array, *new_array; struct net *net; + int cnt, idx; mutex_lock(&netns_bpf_mutex); @@ -53,9 +91,27 @@ static void bpf_netns_link_release(struct bpf_link *link) if (!net) goto out_unlock; - netns_bpf_run_array_detach(net, type); + /* Remember link position in case of safe delete */ + idx = link_index(net, type, net_link); list_del(&net_link->node); + cnt = link_count(net, type); + if (!cnt) { + netns_bpf_run_array_detach(net, type); + goto out_unlock; + } + + old_array = rcu_dereference_protected(net->bpf.run_array[type], + lockdep_is_held(&netns_bpf_mutex)); + new_array = bpf_prog_array_alloc(cnt, GFP_KERNEL); + if (!new_array) { + WARN_ON(bpf_prog_array_delete_safe_at(old_array, idx)); + goto out_unlock; + } + fill_prog_array(net, type, new_array); + rcu_assign_pointer(net->bpf.run_array[type], new_array); + bpf_prog_array_free(old_array); + out_unlock: mutex_unlock(&netns_bpf_mutex); } @@ -77,7 +133,7 @@ static int bpf_netns_link_update_prog(struct bpf_link *link, enum netns_bpf_attach_type type = net_link->netns_type; struct bpf_prog_array *run_array; struct net *net; - int ret = 0; + int idx, ret; if (old_prog && old_prog != link->prog) return -EPERM; @@ -95,7 +151,10 @@ static int bpf_netns_link_update_prog(struct bpf_link *link, run_array = rcu_dereference_protected(net->bpf.run_array[type], lockdep_is_held(&netns_bpf_mutex)); - WRITE_ONCE(run_array->items[0].prog, new_prog); + idx = link_index(net, type, net_link); + ret = bpf_prog_array_update_at(run_array, idx, new_prog); + if (ret) + goto out_unlock; old_prog = xchg(&link->prog, new_prog); bpf_prog_put(old_prog); @@ -309,18 +368,28 @@ int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) return ret; } +static int netns_bpf_max_progs(enum netns_bpf_attach_type type) +{ + switch (type) { + case NETNS_BPF_FLOW_DISSECTOR: + return 1; + default: + return 0; + } +} + static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, enum netns_bpf_attach_type type) { struct bpf_netns_link *net_link = container_of(link, struct bpf_netns_link, link); struct bpf_prog_array *run_array; - int err; + int cnt, err; mutex_lock(&netns_bpf_mutex); - /* Allow attaching only one prog or link for now */ - if (!list_empty(&net->bpf.links[type])) { + cnt = link_count(net, type); + if (cnt >= netns_bpf_max_progs(type)) { err = -E2BIG; goto out_unlock; } @@ -341,16 +410,19 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, if (err) goto out_unlock; - run_array = bpf_prog_array_alloc(1, GFP_KERNEL); + run_array = bpf_prog_array_alloc(cnt + 1, GFP_KERNEL); if (!run_array) { err = -ENOMEM; goto out_unlock; } - run_array->items[0].prog = link->prog; - rcu_assign_pointer(net->bpf.run_array[type], run_array); list_add_tail(&net_link->node, &net->bpf.links[type]); + fill_prog_array(net, type, run_array); + run_array = rcu_replace_pointer(net->bpf.run_array[type], run_array, + lockdep_is_held(&netns_bpf_mutex)); + bpf_prog_array_free(run_array); + out_unlock: mutex_unlock(&netns_bpf_mutex); return err; -- cgit v1.2.3 From e9ddbb7707ff5891616240026062b8c1e29864ca Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:23 +0200 Subject: bpf: Introduce SK_LOOKUP program type with a dedicated attach point Add a new program type BPF_PROG_TYPE_SK_LOOKUP with a dedicated attach type BPF_SK_LOOKUP. The new program kind is to be invoked by the transport layer when looking up a listening socket for a new connection request for connection oriented protocols, or when looking up an unconnected socket for a packet for connection-less protocols. When called, SK_LOOKUP BPF program can select a socket that will receive the packet. This serves as a mechanism to overcome the limits of what bind() API allows to express. Two use-cases driving this work are: (1) steer packets destined to an IP range, on fixed port to a socket 192.0.2.0/24, port 80 -> NGINX socket (2) steer packets destined to an IP address, on any port to a socket 198.51.100.1, any port -> L7 proxy socket In its run-time context program receives information about the packet that triggered the socket lookup. Namely IP version, L4 protocol identifier, and address 4-tuple. Context can be further extended to include ingress interface identifier. To select a socket BPF program fetches it from a map holding socket references, like SOCKMAP or SOCKHASH, and calls bpf_sk_assign(ctx, sk, ...) helper to record the selection. Transport layer then uses the selected socket as a result of socket lookup. In its basic form, SK_LOOKUP acts as a filter and hence must return either SK_PASS or SK_DROP. If the program returns with SK_PASS, transport should look for a socket to receive the packet, or use the one selected by the program if available, while SK_DROP informs the transport layer that the lookup should fail. This patch only enables the user to attach an SK_LOOKUP program to a network namespace. Subsequent patches hook it up to run on local delivery path in ipv4 and ipv6 stacks. Suggested-by: Marek Majkowski Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717103536.397595-3-jakub@cloudflare.com --- include/linux/bpf-netns.h | 3 + include/linux/bpf.h | 1 + include/linux/bpf_types.h | 2 + include/linux/filter.h | 17 +++++ include/uapi/linux/bpf.h | 77 +++++++++++++++++++ kernel/bpf/net_namespace.c | 5 ++ kernel/bpf/syscall.c | 9 +++ kernel/bpf/verifier.c | 13 +++- net/core/filter.c | 180 +++++++++++++++++++++++++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 9 ++- 10 files changed, 312 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h index 47d5b0c708c9..722f799c1a2e 100644 --- a/include/linux/bpf-netns.h +++ b/include/linux/bpf-netns.h @@ -8,6 +8,7 @@ enum netns_bpf_attach_type { NETNS_BPF_INVALID = -1, NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP, MAX_NETNS_BPF_ATTACH_TYPE }; @@ -17,6 +18,8 @@ to_netns_bpf_attach_type(enum bpf_attach_type attach_type) switch (attach_type) { case BPF_FLOW_DISSECTOR: return NETNS_BPF_FLOW_DISSECTOR; + case BPF_SK_LOOKUP: + return NETNS_BPF_SK_LOOKUP; default: return NETNS_BPF_INVALID; } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c8c9eabcd106..adb16bdc5f0a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -249,6 +249,7 @@ enum bpf_arg_type { ARG_PTR_TO_INT, /* pointer to int */ ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ + ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index a18ae82a298a..a52a5688418e 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -64,6 +64,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2, #ifdef CONFIG_INET BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport, struct sk_reuseport_md, struct sk_reuseport_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup, + struct bpf_sk_lookup, struct bpf_sk_lookup_kern) #endif #if defined(CONFIG_BPF_JIT) BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops, diff --git a/include/linux/filter.h b/include/linux/filter.h index 0b0144752d78..fa1ea12ad2cd 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1278,4 +1278,21 @@ struct bpf_sockopt_kern { s32 retval; }; +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + __be16 sport; + u16 dport; + struct sock *selected_sk; + bool no_reuseport; +}; + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 7ac3992dacfe..54d0c886e3ba 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -189,6 +189,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_STRUCT_OPS, BPF_PROG_TYPE_EXT, BPF_PROG_TYPE_LSM, + BPF_PROG_TYPE_SK_LOOKUP, }; enum bpf_attach_type { @@ -228,6 +229,7 @@ enum bpf_attach_type { BPF_XDP_DEVMAP, BPF_CGROUP_INET_SOCK_RELEASE, BPF_XDP_CPUMAP, + BPF_SK_LOOKUP, __MAX_BPF_ATTACH_TYPE }; @@ -3069,6 +3071,10 @@ union bpf_attr { * * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * Description + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SCHED_CLS** and + * **BPF_PROG_TYPE_SCHED_ACT** programs. + * * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, * will cause *skb* to be delivered to the specified socket. @@ -3094,6 +3100,56 @@ union bpf_attr { * **-ESOCKTNOSUPPORT** if the socket type is not supported * (reuseport). * + * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) + * Description + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. + * + * Select the *sk* as a result of a socket lookup. + * + * For the operation to succeed passed socket must be compatible + * with the packet description provided by the *ctx* object. + * + * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must + * be an exact match. While IP family (**AF_INET** or + * **AF_INET6**) must be compatible, that is IPv6 sockets + * that are not v6-only can be selected for IPv4 packets. + * + * Only TCP listeners and UDP unconnected sockets can be + * selected. *sk* can also be NULL to reset any previous + * selection. + * + * *flags* argument can combination of following values: + * + * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous + * socket selection, potentially done by a BPF program + * that ran before us. + * + * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip + * load-balancing within reuseport group for the socket + * being selected. + * + * On success *ctx->sk* will point to the selected socket. + * + * Return + * 0 on success, or a negative errno in case of failure. + * + * * **-EAFNOSUPPORT** if socket family (*sk->family*) is + * not compatible with packet family (*ctx->family*). + * + * * **-EEXIST** if socket has been already selected, + * potentially by another program, and + * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. + * + * * **-EINVAL** if unsupported flags were specified. + * + * * **-EPROTOTYPE** if socket L4 protocol + * (*sk->protocol*) doesn't match packet protocol + * (*ctx->protocol*). + * + * * **-ESOCKTNOSUPPORT** if socket is not in allowed + * state (TCP listening or UDP unconnected). + * * u64 bpf_ktime_get_boot_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. @@ -3607,6 +3663,12 @@ enum { BPF_RINGBUF_HDR_SZ = 8, }; +/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ +enum { + BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), + BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), +}; + /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, @@ -4349,4 +4411,19 @@ struct bpf_pidns_info { __u32 pid; __u32 tgid; }; + +/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ +struct bpf_sk_lookup { + __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + + __u32 family; /* Protocol family (AF_INET, AF_INET6) */ + __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ + __u32 remote_ip4; /* Network byte order */ + __u32 remote_ip6[4]; /* Network byte order */ + __u32 remote_port; /* Network byte order */ + __u32 local_ip4; /* Network byte order */ + __u32 local_ip6[4]; /* Network byte order */ + __u32 local_port; /* Host byte order */ +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index e9c8e26ac8f2..38b368bccda2 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -373,6 +373,8 @@ static int netns_bpf_max_progs(enum netns_bpf_attach_type type) switch (type) { case NETNS_BPF_FLOW_DISSECTOR: return 1; + case NETNS_BPF_SK_LOOKUP: + return 64; default: return 0; } @@ -403,6 +405,9 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, case NETNS_BPF_FLOW_DISSECTOR: err = flow_dissector_bpf_prog_attach_check(net, link->prog); break; + case NETNS_BPF_SK_LOOKUP: + err = 0; /* nothing to check */ + break; default: err = -EINVAL; break; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 7ea9dfbebd8c..d07417d17712 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2022,6 +2022,10 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, default: return -EINVAL; } + case BPF_PROG_TYPE_SK_LOOKUP: + if (expected_attach_type == BPF_SK_LOOKUP) + return 0; + return -EINVAL; case BPF_PROG_TYPE_EXT: if (expected_attach_type) return -EINVAL; @@ -2756,6 +2760,7 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_SK_LOOKUP: return attach_type == prog->expected_attach_type ? 0 : -EINVAL; case BPF_PROG_TYPE_CGROUP_SKB: if (!capable(CAP_NET_ADMIN)) @@ -2817,6 +2822,8 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) return BPF_PROG_TYPE_CGROUP_SOCKOPT; case BPF_TRACE_ITER: return BPF_PROG_TYPE_TRACING; + case BPF_SK_LOOKUP: + return BPF_PROG_TYPE_SK_LOOKUP; default: return BPF_PROG_TYPE_UNSPEC; } @@ -2953,6 +2960,7 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_LIRC_MODE2: return lirc_prog_query(attr, uattr); case BPF_FLOW_DISSECTOR: + case BPF_SK_LOOKUP: return netns_bpf_prog_query(attr, uattr); default: return -EINVAL; @@ -3891,6 +3899,7 @@ static int link_create(union bpf_attr *attr) ret = tracing_bpf_link_attach(attr, prog); break; case BPF_PROG_TYPE_FLOW_DISSECTOR: + case BPF_PROG_TYPE_SK_LOOKUP: ret = netns_bpf_link_create(attr, prog); break; default: diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3c1efc9d08fd..9a6703bc3f36 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3878,10 +3878,14 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, } meta->ref_obj_id = reg->ref_obj_id; } - } else if (arg_type == ARG_PTR_TO_SOCKET) { + } else if (arg_type == ARG_PTR_TO_SOCKET || + arg_type == ARG_PTR_TO_SOCKET_OR_NULL) { expected_type = PTR_TO_SOCKET; - if (type != expected_type) - goto err_type; + if (!(register_is_null(reg) && + arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) { + if (type != expected_type) + goto err_type; + } } else if (arg_type == ARG_PTR_TO_BTF_ID) { expected_type = PTR_TO_BTF_ID; if (type != expected_type) @@ -7354,6 +7358,9 @@ static int check_return_code(struct bpf_verifier_env *env) return -ENOTSUPP; } break; + case BPF_PROG_TYPE_SK_LOOKUP: + range = tnum_range(SK_DROP, SK_PASS); + break; case BPF_PROG_TYPE_EXT: /* freplace program can return anything as its return value * depends on the to-be-replaced kernel func or bpf program. diff --git a/net/core/filter.c b/net/core/filter.c index bdd2382e655d..d099436b3ff5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9229,6 +9229,186 @@ const struct bpf_verifier_ops sk_reuseport_verifier_ops = { const struct bpf_prog_ops sk_reuseport_prog_ops = { }; + +BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx, + struct sock *, sk, u64, flags) +{ + if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE | + BPF_SK_LOOKUP_F_NO_REUSEPORT))) + return -EINVAL; + if (unlikely(sk && sk_is_refcounted(sk))) + return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */ + if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED)) + return -ESOCKTNOSUPPORT; /* reject connected sockets */ + + /* Check if socket is suitable for packet L3/L4 protocol */ + if (sk && sk->sk_protocol != ctx->protocol) + return -EPROTOTYPE; + if (sk && sk->sk_family != ctx->family && + (sk->sk_family == AF_INET || ipv6_only_sock(sk))) + return -EAFNOSUPPORT; + + if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE)) + return -EEXIST; + + /* Select socket as lookup result */ + ctx->selected_sk = sk; + ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT; + return 0; +} + +static const struct bpf_func_proto bpf_sk_lookup_assign_proto = { + .func = bpf_sk_lookup_assign, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_SOCKET_OR_NULL, + .arg3_type = ARG_ANYTHING, +}; + +static const struct bpf_func_proto * +sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_perf_event_output: + return &bpf_event_output_data_proto; + case BPF_FUNC_sk_assign: + return &bpf_sk_lookup_assign_proto; + case BPF_FUNC_sk_release: + return &bpf_sk_release_proto; + default: + return bpf_base_func_proto(func_id); + } +} + +static bool sk_lookup_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= sizeof(struct bpf_sk_lookup)) + return false; + if (off % size != 0) + return false; + if (type != BPF_READ) + return false; + + switch (off) { + case offsetof(struct bpf_sk_lookup, sk): + info->reg_type = PTR_TO_SOCKET_OR_NULL; + return size == sizeof(__u64); + + case bpf_ctx_range(struct bpf_sk_lookup, family): + case bpf_ctx_range(struct bpf_sk_lookup, protocol): + case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4): + case bpf_ctx_range(struct bpf_sk_lookup, local_ip4): + case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]): + case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): + case bpf_ctx_range(struct bpf_sk_lookup, remote_port): + case bpf_ctx_range(struct bpf_sk_lookup, local_port): + bpf_ctx_record_field_size(info, sizeof(__u32)); + return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); + + default: + return false; + } +} + +static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + struct bpf_insn *insn = insn_buf; + + switch (si->off) { + case offsetof(struct bpf_sk_lookup, sk): + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, + offsetof(struct bpf_sk_lookup_kern, selected_sk)); + break; + + case offsetof(struct bpf_sk_lookup, family): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + family, 2, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, protocol): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + protocol, 2, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, remote_ip4): + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + v4.saddr, 4, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, local_ip4): + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + v4.daddr, 4, target_size)); + break; + + case bpf_ctx_range_till(struct bpf_sk_lookup, + remote_ip6[0], remote_ip6[3]): { +#if IS_ENABLED(CONFIG_IPV6) + int off = si->off; + + off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]); + off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, + offsetof(struct bpf_sk_lookup_kern, v6.saddr)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); +#else + *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); +#endif + break; + } + case bpf_ctx_range_till(struct bpf_sk_lookup, + local_ip6[0], local_ip6[3]): { +#if IS_ENABLED(CONFIG_IPV6) + int off = si->off; + + off -= offsetof(struct bpf_sk_lookup, local_ip6[0]); + off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, + offsetof(struct bpf_sk_lookup_kern, v6.daddr)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); +#else + *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); +#endif + break; + } + case offsetof(struct bpf_sk_lookup, remote_port): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + sport, 2, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, local_port): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + dport, 2, target_size)); + break; + } + + return insn - insn_buf; +} + +const struct bpf_prog_ops sk_lookup_prog_ops = { +}; + +const struct bpf_verifier_ops sk_lookup_verifier_ops = { + .get_func_proto = sk_lookup_func_proto, + .is_valid_access = sk_lookup_is_valid_access, + .convert_ctx_access = sk_lookup_convert_ctx_access, +}; + #endif /* CONFIG_INET */ DEFINE_BPF_DISPATCHER(xdp) diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 6843376733df..5bfa448b4704 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -404,6 +404,7 @@ class PrinterHelpers(Printer): type_fwds = [ 'struct bpf_fib_lookup', + 'struct bpf_sk_lookup', 'struct bpf_perf_event_data', 'struct bpf_perf_event_value', 'struct bpf_pidns_info', @@ -450,6 +451,7 @@ class PrinterHelpers(Printer): 'struct bpf_perf_event_data', 'struct bpf_perf_event_value', 'struct bpf_pidns_info', + 'struct bpf_sk_lookup', 'struct bpf_sock', 'struct bpf_sock_addr', 'struct bpf_sock_ops', @@ -487,6 +489,11 @@ class PrinterHelpers(Printer): 'struct sk_msg_buff': 'struct sk_msg_md', 'struct xdp_buff': 'struct xdp_md', } + # Helpers overloaded for different context types. + overloaded_helpers = [ + 'bpf_get_socket_cookie', + 'bpf_sk_assign', + ] def print_header(self): header = '''\ @@ -543,7 +550,7 @@ class PrinterHelpers(Printer): for i, a in enumerate(proto['args']): t = a['type'] n = a['name'] - if proto['name'] == 'bpf_get_socket_cookie' and i == 0: + if proto['name'] in self.overloaded_helpers and i == 0: t = 'void' n = 'ctx' one_arg = '{}{}'.format(comma, self.map_type(t)) -- cgit v1.2.3 From 1559b4aa1db443096af493c7d621dc156054babe Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:25 +0200 Subject: inet: Run SK_LOOKUP BPF program on socket lookup Run a BPF program before looking up a listening socket on the receive path. Program selects a listening socket to yield as result of socket lookup by calling bpf_sk_assign() helper and returning SK_PASS code. Program can revert its decision by assigning a NULL socket with bpf_sk_assign(). Alternatively, BPF program can also fail the lookup by returning with SK_DROP, or let the lookup continue as usual with SK_PASS on return, when no socket has been selected with bpf_sk_assign(). This lets the user match packets with listening sockets freely at the last possible point on the receive path, where we know that packets are destined for local delivery after undergoing policing, filtering, and routing. With BPF code selecting the socket, directing packets destined to an IP range or to a port range to a single socket becomes possible. In case multiple programs are attached, they are run in series in the order in which they were attached. The end result is determined from return codes of all the programs according to following rules: 1. If any program returned SK_PASS and selected a valid socket, the socket is used as result of socket lookup. 2. If more than one program returned SK_PASS and selected a socket, last selection takes effect. 3. If any program returned SK_DROP, and no program returned SK_PASS and selected a socket, socket lookup fails with -ECONNREFUSED. 4. If all programs returned SK_PASS and none of them selected a socket, socket lookup continues to htable-based lookup. Suggested-by: Marek Majkowski Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717103536.397595-5-jakub@cloudflare.com --- include/linux/filter.h | 91 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/net_namespace.c | 32 +++++++++++++++- net/core/filter.c | 3 ++ net/ipv4/inet_hashtables.c | 31 ++++++++++++++++ 4 files changed, 156 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/filter.h b/include/linux/filter.h index fa1ea12ad2cd..c4f54c216347 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1295,4 +1295,95 @@ struct bpf_sk_lookup_kern { bool no_reuseport; }; +extern struct static_key_false bpf_sk_lookup_enabled; + +/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup. + * + * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and + * SK_DROP. Their meaning is as follows: + * + * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result + * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup + * SK_DROP : terminate lookup with -ECONNREFUSED + * + * This macro aggregates return values and selected sockets from + * multiple BPF programs according to following rules in order: + * + * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk, + * macro result is SK_PASS and last ctx.selected_sk is used. + * 2. If any program returned SK_DROP return value, + * macro result is SK_DROP. + * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL. + * + * Caller must ensure that the prog array is non-NULL, and that the + * array as well as the programs it contains remain valid. + */ +#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \ + ({ \ + struct bpf_sk_lookup_kern *_ctx = &(ctx); \ + struct bpf_prog_array_item *_item; \ + struct sock *_selected_sk = NULL; \ + bool _no_reuseport = false; \ + struct bpf_prog *_prog; \ + bool _all_pass = true; \ + u32 _ret; \ + \ + migrate_disable(); \ + _item = &(array)->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + /* restore most recent selection */ \ + _ctx->selected_sk = _selected_sk; \ + _ctx->no_reuseport = _no_reuseport; \ + \ + _ret = func(_prog, _ctx); \ + if (_ret == SK_PASS && _ctx->selected_sk) { \ + /* remember last non-NULL socket */ \ + _selected_sk = _ctx->selected_sk; \ + _no_reuseport = _ctx->no_reuseport; \ + } else if (_ret == SK_DROP && _all_pass) { \ + _all_pass = false; \ + } \ + _item++; \ + } \ + _ctx->selected_sk = _selected_sk; \ + _ctx->no_reuseport = _no_reuseport; \ + migrate_enable(); \ + _all_pass || _selected_sk ? SK_PASS : SK_DROP; \ + }) + +static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, + const __be32 saddr, const __be16 sport, + const __be32 daddr, const u16 dport, + struct sock **psk) +{ + struct bpf_prog_array *run_array; + struct sock *selected_sk = NULL; + bool no_reuseport = false; + + rcu_read_lock(); + run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); + if (run_array) { + struct bpf_sk_lookup_kern ctx = { + .family = AF_INET, + .protocol = protocol, + .v4.saddr = saddr, + .v4.daddr = daddr, + .sport = sport, + .dport = dport, + }; + u32 act; + + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + if (act == SK_PASS) { + selected_sk = ctx.selected_sk; + no_reuseport = ctx.no_reuseport; + } else { + selected_sk = ERR_PTR(-ECONNREFUSED); + } + } + rcu_read_unlock(); + *psk = selected_sk; + return no_reuseport; +} + #endif /* __LINUX_FILTER_H__ */ diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 38b368bccda2..4e1bcaa2c3cb 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -25,6 +25,28 @@ struct bpf_netns_link { /* Protects updates to netns_bpf */ DEFINE_MUTEX(netns_bpf_mutex); +static void netns_bpf_attach_type_unneed(enum netns_bpf_attach_type type) +{ + switch (type) { + case NETNS_BPF_SK_LOOKUP: + static_branch_dec(&bpf_sk_lookup_enabled); + break; + default: + break; + } +} + +static void netns_bpf_attach_type_need(enum netns_bpf_attach_type type) +{ + switch (type) { + case NETNS_BPF_SK_LOOKUP: + static_branch_inc(&bpf_sk_lookup_enabled); + break; + default: + break; + } +} + /* Must be called with netns_bpf_mutex held. */ static void netns_bpf_run_array_detach(struct net *net, enum netns_bpf_attach_type type) @@ -91,6 +113,9 @@ static void bpf_netns_link_release(struct bpf_link *link) if (!net) goto out_unlock; + /* Mark attach point as unused */ + netns_bpf_attach_type_unneed(type); + /* Remember link position in case of safe delete */ idx = link_index(net, type, net_link); list_del(&net_link->node); @@ -428,6 +453,9 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, lockdep_is_held(&netns_bpf_mutex)); bpf_prog_array_free(run_array); + /* Mark attach point as used */ + netns_bpf_attach_type_need(type); + out_unlock: mutex_unlock(&netns_bpf_mutex); return err; @@ -503,8 +531,10 @@ static void __net_exit netns_bpf_pernet_pre_exit(struct net *net) mutex_lock(&netns_bpf_mutex); for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) { netns_bpf_run_array_detach(net, type); - list_for_each_entry(net_link, &net->bpf.links[type], node) + list_for_each_entry(net_link, &net->bpf.links[type], node) { net_link->net = NULL; /* auto-detach link */ + netns_bpf_attach_type_unneed(type); + } if (net->bpf.progs[type]) bpf_prog_put(net->bpf.progs[type]); } diff --git a/net/core/filter.c b/net/core/filter.c index d099436b3ff5..2bd129b5ae74 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9230,6 +9230,9 @@ const struct bpf_verifier_ops sk_reuseport_verifier_ops = { const struct bpf_prog_ops sk_reuseport_prog_ops = { }; +DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled); +EXPORT_SYMBOL(bpf_sk_lookup_enabled); + BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx, struct sock *, sk, u64, flags) { diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index ab64834837c8..4eb4cd8d20dd 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -299,6 +299,29 @@ static struct sock *inet_lhash2_lookup(struct net *net, return result; } +static inline struct sock *inet_lookup_run_bpf(struct net *net, + struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, + __be32 saddr, __be16 sport, + __be32 daddr, u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (hashinfo != &tcp_hashinfo) + return NULL; /* only TCP is supported */ + + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -310,6 +333,14 @@ struct sock *__inet_lookup_listener(struct net *net, struct sock *result = NULL; unsigned int hash2; + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + result = inet_lookup_run_bpf(net, hashinfo, skb, doff, + saddr, sport, daddr, hnum); + if (result) + goto done; + } + hash2 = ipv4_portaddr_hash(net, daddr, hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); -- cgit v1.2.3 From 1122702f02678597c4f1c7d316365ef502aafe08 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:27 +0200 Subject: inet6: Run SK_LOOKUP BPF program on socket lookup Following ipv4 stack changes, run a BPF program attached to netns before looking up a listening socket. Program can return a listening socket to use as result of socket lookup, fail the lookup, or take no action. Suggested-by: Marek Majkowski Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717103536.397595-7-jakub@cloudflare.com --- include/linux/filter.h | 39 +++++++++++++++++++++++++++++++++++++++ net/ipv6/inet6_hashtables.c | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) (limited to 'include') diff --git a/include/linux/filter.h b/include/linux/filter.h index c4f54c216347..8252572db918 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1386,4 +1386,43 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, return no_reuseport; } +#if IS_ENABLED(CONFIG_IPV6) +static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const u16 dport, + struct sock **psk) +{ + struct bpf_prog_array *run_array; + struct sock *selected_sk = NULL; + bool no_reuseport = false; + + rcu_read_lock(); + run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); + if (run_array) { + struct bpf_sk_lookup_kern ctx = { + .family = AF_INET6, + .protocol = protocol, + .v6.saddr = saddr, + .v6.daddr = daddr, + .sport = sport, + .dport = dport, + }; + u32 act; + + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + if (act == SK_PASS) { + selected_sk = ctx.selected_sk; + no_reuseport = ctx.no_reuseport; + } else { + selected_sk = ERR_PTR(-ECONNREFUSED); + } + } + rcu_read_unlock(); + *psk = selected_sk; + return no_reuseport; +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + #endif /* __LINUX_FILTER_H__ */ diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 03942eef8ab6..2d3add9e6116 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -21,6 +21,8 @@ #include #include +extern struct inet_hashinfo tcp_hashinfo; + u32 inet6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport) @@ -159,6 +161,31 @@ static struct sock *inet6_lhash2_lookup(struct net *net, return result; } +static inline struct sock *inet6_lookup_run_bpf(struct net *net, + struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (hashinfo != &tcp_hashinfo) + return NULL; /* only TCP is supported */ + + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + struct sock *inet6_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -170,6 +197,14 @@ struct sock *inet6_lookup_listener(struct net *net, struct sock *result = NULL; unsigned int hash2; + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, + saddr, sport, daddr, hnum); + if (result) + goto done; + } + hash2 = ipv6_portaddr_hash(net, daddr, hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); -- cgit v1.2.3 From 2be90e914c12bdea70b0bd297d5715ee66eb46d0 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 19:51:18 -0700 Subject: dmaengine: linux/dmaengine.h: drop duplicated word in a comment Drop the doubled word "has" in a comment. Signed-off-by: Randy Dunlap Cc: Vinod Koul Cc: dmaengine@vger.kernel.org Link: https://lore.kernel.org/r/06e64046-ebf1-15db-dbaf-73698de3b493@infradead.org Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 9f9a13a2c01f..883e1e087de5 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -164,7 +164,7 @@ struct dma_interleaved_template { * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of * this transaction * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client - * acknowledges receipt, i.e. has has a chance to establish any dependency + * acknowledges receipt, i.e. has a chance to establish any dependency * chains * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P -- cgit v1.2.3 From e8bf419e0ab2d7401dccf737cd944191164a6028 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Tue, 30 Jun 2020 12:28:28 +0200 Subject: include/media/v4l2-subdev.h: fix typo Typo in comment block: v4l2_subdev_get_try_crop -> v4l2_subdev_get_try_compose Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-subdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index f7fe78a6f65a..2607ea85096a 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -959,7 +959,7 @@ static inline struct v4l2_rect } /** - * v4l2_subdev_get_try_crop - ancillary routine to call + * v4l2_subdev_get_try_compose - ancillary routine to call * &struct v4l2_subdev_pad_config->try_compose * * @sd: pointer to &struct v4l2_subdev -- cgit v1.2.3 From bb79974cdf953dc49ef653c59fda4daf1acc32b9 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Thu, 9 Jul 2020 09:28:14 +0200 Subject: media: davinci: replace http references with https TI DAVINCI SERIES MEDIA DRIVER: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Reviewed-by: Kieran Bingham Reviewed-by: Lad Prabhakar Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/platform/davinci/vpbe_display.c | 2 +- drivers/media/platform/davinci/vpif.c | 2 +- drivers/media/platform/davinci/vpif.h | 2 +- drivers/media/platform/davinci/vpif_display.c | 2 +- drivers/media/platform/davinci/vpif_display.h | 2 +- include/media/davinci/vpbe_display.h | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index 7ab13eb7527d..d19bad997f30 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/ */ #include #include diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c index f28c1b27eac6..5e67994e62cc 100644 --- a/drivers/media/platform/davinci/vpif.c +++ b/drivers/media/platform/davinci/vpif.c @@ -5,7 +5,7 @@ * The hardware supports SDTV, HDTV formats, raw data capture. * Currently, the driver supports NTSC and PAL standards. * - * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/drivers/media/platform/davinci/vpif.h b/drivers/media/platform/davinci/vpif.h index 2466c7c77deb..c6d1d890478a 100644 --- a/drivers/media/platform/davinci/vpif.h +++ b/drivers/media/platform/davinci/vpif.h @@ -1,7 +1,7 @@ /* * VPIF header file * - * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index 7d55fd45240e..46afc029138f 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -2,7 +2,7 @@ * vpif-display - VPIF display driver * Display driver for TI DaVinci VPIF * - * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ * Copyright (C) 2014 Lad, Prabhakar * * This program is free software; you can redistribute it and/or diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h index af2765fdcea8..f731a65eefd6 100644 --- a/drivers/media/platform/davinci/vpif_display.h +++ b/drivers/media/platform/davinci/vpif_display.h @@ -1,7 +1,7 @@ /* * VPIF display header file * - * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h index 56d05a855140..6d2a93740130 100644 --- a/include/media/davinci/vpbe_display.h +++ b/include/media/davinci/vpbe_display.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef VPBE_DISPLAY_H #define VPBE_DISPLAY_H -- cgit v1.2.3 From b3ab1c6058fad8cd5726f24e9ed9053e43bb2af4 Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Wed, 24 Jun 2020 21:28:00 +0200 Subject: media: Add V4L2_TYPE_IS_CAPTURE helper It's all too easy to get confused by the V4L2_TYPE_IS_OUTPUT macro, when it's used as !V4L2_TYPE_IS_OUTPUT. Reduce the risk of confusion with macro to explicitly check for the CAPTURE queue type case. This change does not affect functionality, and it's only intended to make the code more readable. Suggested-by: Nicolas Dufresne Signed-off-by: Ezequiel Garcia Signed-off-by: Hans Verkuil [hverkuil-cisco@xs4all.nl: checkpatch: align with parenthesis] Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/videobuf2/videobuf2-v4l2.c | 4 ++-- drivers/media/platform/exynos-gsc/gsc-core.c | 2 +- drivers/media/platform/exynos-gsc/gsc-m2m.c | 2 +- drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c | 2 +- drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c | 7 +++---- drivers/media/platform/rcar_jpu.c | 2 +- drivers/media/platform/sti/hva/hva-v4l2.c | 2 +- drivers/media/platform/ti-vpe/vpe.c | 2 +- drivers/media/test-drivers/vicodec/vicodec-core.c | 6 +++--- drivers/media/v4l2-core/v4l2-mem2mem.c | 6 +++--- drivers/staging/media/hantro/hantro_v4l2.c | 2 +- drivers/staging/media/rkvdec/rkvdec.c | 2 +- include/uapi/linux/videodev2.h | 2 ++ 13 files changed, 21 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 57aa183bd198..30caad27281e 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -97,7 +97,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) unsigned int bytesused; unsigned int plane; - if (!V4L2_TYPE_IS_OUTPUT(b->type)) + if (V4L2_TYPE_IS_CAPTURE(b->type)) return 0; if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { @@ -311,7 +311,7 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b /* Zero flags that we handle */ vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; - if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) { + if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) { /* * Non-COPY timestamps and non-OUTPUT queues will get * their timestamp and timestamp source flags from the diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c index f6650b45bc3d..9f41c2e7097a 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.c +++ b/drivers/media/platform/exynos-gsc/gsc-core.c @@ -577,7 +577,7 @@ int gsc_try_selection(struct gsc_ctx *ctx, struct v4l2_selection *s) v4l_bound_align_image(&tmp_w, min_w, max_w, mod_x, &tmp_h, min_h, max_h, mod_y, 0); - if (!V4L2_TYPE_IS_OUTPUT(s->type) && + if (V4L2_TYPE_IS_CAPTURE(s->type) && (ctx->gsc_ctrls.rotate->val == 90 || ctx->gsc_ctrls.rotate->val == 270)) gsc_check_crop_change(tmp_h, tmp_w, diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index e2c162635f72..27a3c92c73bc 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -255,7 +255,7 @@ static int gsc_m2m_buf_prepare(struct vb2_buffer *vb) if (IS_ERR(frame)) return PTR_ERR(frame); - if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { + if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) { for (i = 0; i < frame->fmt->num_planes; i++) vb2_set_plane_payload(vb, i, frame->payload[i]); } diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c index f82a81a3bdee..61fed1e35a00 100644 --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c @@ -731,7 +731,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q) * subsampling. Update capture queue when the stream is off. */ if (ctx->state == MTK_JPEG_SOURCE_CHANGE && - !V4L2_TYPE_IS_OUTPUT(q->type)) { + V4L2_TYPE_IS_CAPTURE(q->type)) { struct mtk_jpeg_src_buf *src_buf; vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c index bb9caaf513bc..724c7333b6e5 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c @@ -193,7 +193,7 @@ static const struct mtk_mdp_fmt *mtk_mdp_try_fmt_mplane(struct mtk_mdp_ctx *ctx, pix_mp->field = V4L2_FIELD_NONE; pix_mp->pixelformat = fmt->pixelformat; - if (!V4L2_TYPE_IS_OUTPUT(f->type)) { + if (V4L2_TYPE_IS_CAPTURE(f->type)) { pix_mp->colorspace = ctx->colorspace; pix_mp->xfer_func = ctx->xfer_func; pix_mp->ycbcr_enc = ctx->ycbcr_enc; @@ -327,9 +327,8 @@ static int mtk_mdp_try_crop(struct mtk_mdp_ctx *ctx, u32 type, mtk_mdp_bound_align_image(&new_w, min_w, max_w, align_w, &new_h, min_h, max_h, align_h); - if (!V4L2_TYPE_IS_OUTPUT(type) && - (ctx->ctrls.rotate->val == 90 || - ctx->ctrls.rotate->val == 270)) + if (V4L2_TYPE_IS_CAPTURE(type) && + (ctx->ctrls.rotate->val == 90 || ctx->ctrls.rotate->val == 270)) mtk_mdp_check_crop_change(new_h, new_w, &r->width, &r->height); else diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c index 5250a14324e9..9b99ff368698 100644 --- a/drivers/media/platform/rcar_jpu.c +++ b/drivers/media/platform/rcar_jpu.c @@ -1066,7 +1066,7 @@ static int jpu_buf_prepare(struct vb2_buffer *vb) } /* decoder capture queue */ - if (!ctx->encoder && !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) + if (!ctx->encoder && V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) vb2_set_plane_payload(vb, i, size); } diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c index 197b99d8fd9c..bb34d6997d99 100644 --- a/drivers/media/platform/sti/hva/hva-v4l2.c +++ b/drivers/media/platform/sti/hva/hva-v4l2.c @@ -1087,7 +1087,7 @@ static void hva_stop_streaming(struct vb2_queue *vq) if ((V4L2_TYPE_IS_OUTPUT(vq->type) && vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) || - (!V4L2_TYPE_IS_OUTPUT(vq->type) && + (V4L2_TYPE_IS_CAPTURE(vq->type) && vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) { dev_dbg(dev, "%s %s out=%d cap=%d\n", ctx->name, to_type_str(vq->type), diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c index cff2fcd6d812..346f8212791c 100644 --- a/drivers/media/platform/ti-vpe/vpe.c +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -1576,7 +1576,7 @@ static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f) *f = q_data->format; - if (!V4L2_TYPE_IS_OUTPUT(f->type)) { + if (V4L2_TYPE_IS_CAPTURE(f->type)) { struct vpe_q_data *s_q_data; struct v4l2_pix_format_mplane *spix; diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c index e879290727ef..8941d73f6611 100644 --- a/drivers/media/test-drivers/vicodec/vicodec-core.c +++ b/drivers/media/test-drivers/vicodec/vicodec-core.c @@ -1442,7 +1442,7 @@ static void vicodec_buf_queue(struct vb2_buffer *vb) .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, }; - if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type) && + if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) && vb2_is_streaming(vb->vb2_queue) && v4l2_m2m_dst_buf_is_last(ctx->fh.m2m_ctx)) { unsigned int i; @@ -1479,7 +1479,7 @@ static void vicodec_buf_queue(struct vb2_buffer *vb) * in the compressed stream */ if (ctx->is_stateless || ctx->is_enc || - !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { + V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) { v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); return; } @@ -1574,7 +1574,7 @@ static int vicodec_start_streaming(struct vb2_queue *q, state->gop_cnt = 0; if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) || - (!V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) + (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->is_enc)) return 0; if (info->id == V4L2_PIX_FMT_FWHT || diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 62ac9424c92a..95a8f2dc5341 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -556,7 +556,7 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, ret = vb2_querybuf(vq, buf); /* Adjust MMAP memory offsets for the CAPTURE queue */ - if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { + if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { for (i = 0; i < buf->length; ++i) buf->m.planes[i].m.mem_offset @@ -712,7 +712,7 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - if (!V4L2_TYPE_IS_OUTPUT(vq->type) && + if (V4L2_TYPE_IS_CAPTURE(vq->type) && (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { dprintk("%s: requests cannot be used with capture buffers\n", __func__); @@ -729,7 +729,7 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, * buffer as DONE with LAST flag since it won't be queued on the * device. */ - if (!V4L2_TYPE_IS_OUTPUT(vq->type) && + if (V4L2_TYPE_IS_CAPTURE(vq->type) && vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) v4l2_m2m_force_last_buf_done(m2m_ctx, vq); diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c index f28a94e2fa93..63859e8a0923 100644 --- a/drivers/staging/media/hantro/hantro_v4l2.c +++ b/drivers/staging/media/hantro/hantro_v4l2.c @@ -237,7 +237,7 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx, enum v4l2_buf_type type) { const struct hantro_fmt *fmt, *vpu_fmt; - bool capture = !V4L2_TYPE_IS_OUTPUT(type); + bool capture = V4L2_TYPE_IS_CAPTURE(type); bool coded; coded = capture == hantro_is_encoder_ctx(ctx); diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c index 225eeca73356..fd68671f0286 100644 --- a/drivers/staging/media/rkvdec/rkvdec.c +++ b/drivers/staging/media/rkvdec/rkvdec.c @@ -489,7 +489,7 @@ static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count) const struct rkvdec_coded_fmt_desc *desc; int ret; - if (!V4L2_TYPE_IS_OUTPUT(q->type)) + if (V4L2_TYPE_IS_CAPTURE(q->type)) return 0; desc = ctx->coded_fmt_desc; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 303805438814..c7b70ff53bc1 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -171,6 +171,8 @@ enum v4l2_buf_type { || (type) == V4L2_BUF_TYPE_SDR_OUTPUT \ || (type) == V4L2_BUF_TYPE_META_OUTPUT) +#define V4L2_TYPE_IS_CAPTURE(type) (!V4L2_TYPE_IS_OUTPUT(type)) + enum v4l2_tuner_type { V4L2_TUNER_RADIO = 1, V4L2_TUNER_ANALOG_TV = 2, -- cgit v1.2.3 From 2f9237d4f6df49b74c51cdac555b0a9979d0c237 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 8 Jul 2020 09:30:00 +0200 Subject: dma-mapping: make support for dma ops optional Avoid the overhead of the dma ops support for tiny builds that only use the direct mapping. Signed-off-by: Christoph Hellwig Tested-by: Alexey Kardashevskiy Reviewed-by: Alexey Kardashevskiy --- arch/alpha/Kconfig | 1 + arch/arm/Kconfig | 1 + arch/ia64/Kconfig | 1 + arch/mips/Kconfig | 1 + arch/parisc/Kconfig | 1 + arch/powerpc/Kconfig | 1 + arch/s390/Kconfig | 1 + arch/sparc/Kconfig | 1 + arch/x86/Kconfig | 1 + drivers/infiniband/core/device.c | 6 +++++- drivers/iommu/Kconfig | 2 ++ drivers/macintosh/macio_asic.c | 4 ++-- drivers/misc/mic/Kconfig | 4 ++++ drivers/vdpa/Kconfig | 1 + drivers/xen/Kconfig | 1 + include/linux/device.h | 3 ++- include/linux/dma-mapping.h | 12 +++++++++++- kernel/dma/Kconfig | 4 ++++ kernel/dma/Makefile | 3 ++- 19 files changed, 43 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 10862c5a8c76..9c5f06e8eb9b 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -7,6 +7,7 @@ config ALPHA select ARCH_NO_PREEMPT select ARCH_NO_SG_CHAIN select ARCH_USE_CMPXCHG_LOCKREF + select DMA_OPS if PCI select FORCE_PCI if !ALPHA_JENSEN select PCI_DOMAINS if PCI select PCI_SYSCALL if PCI diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2ac74904a3ce..bee35b0187e4 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -41,6 +41,7 @@ config ARM select CPU_PM if SUSPEND || CPU_IDLE select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS select DMA_DECLARE_COHERENT + select DMA_OPS select DMA_REMAP if MMU select EDAC_SUPPORT select EDAC_ATOMIC_SCRUB diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 1fa2fe2ef053..5b4ec80bf586 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -192,6 +192,7 @@ config IA64_SGI_UV config IA64_HP_SBA_IOMMU bool "HP SBA IOMMU support" + select DMA_OPS default y help Say Y here to add support for the SBA IOMMU found on HP zx1 and diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 6fee1a133e9d..8a458105e445 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -367,6 +367,7 @@ config MACH_JAZZ select ARC_PROMLIB select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO + select DMA_OPS select FW_ARC select FW_ARC32 select ARCH_MAY_HAVE_PC_FDC diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 8e4c3708773d..38c1eafc1f1a 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -14,6 +14,7 @@ config PARISC select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_NO_SG_CHAIN select ARCH_SUPPORTS_MEMORY_FAILURE + select DMA_OPS select RTC_CLASS select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9fa23eb320ff..e9b091d35872 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -151,6 +151,7 @@ config PPC select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN + select DMA_OPS if PPC64 select DYNAMIC_FTRACE if FUNCTION_TRACER select EDAC_ATOMIC_SCRUB select EDAC_SUPPORT diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c7d7ede6300c..687fe23f61cc 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -113,6 +113,7 @@ config S390 select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS2 + select DMA_OPS if PCI select DYNAMIC_FTRACE if FUNCTION_TRACER select GENERIC_CLOCKEVENTS select GENERIC_CPU_AUTOPROBE diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 5bf2dc163540..5db1faaaee31 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -15,6 +15,7 @@ config SPARC default y select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI select ARCH_MIGHT_HAVE_PC_SERIO + select DMA_OPS select OF select OF_PROMTREE select HAVE_ASM_MODVERSIONS diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 883da0abf779..96ab92754158 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -909,6 +909,7 @@ config DMI config GART_IOMMU bool "Old AMD GART IOMMU support" + select DMA_OPS select IOMMU_HELPER select SWIOTLB depends on X86_64 && PCI && AMD_NB diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 905a2beaf885..2927a9d16eaa 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1183,6 +1183,8 @@ static void setup_dma_device(struct ib_device *device) struct device *parent = device->dev.parent; WARN_ON_ONCE(device->dma_device); + +#ifdef CONFIG_DMA_OPS if (device->dev.dma_ops) { /* * The caller provided custom DMA operations. Copy the @@ -1203,7 +1205,9 @@ static void setup_dma_device(struct ib_device *device) else WARN_ON_ONCE(true); } - } else { + } else +#endif /* CONFIG_DMA_OPS */ + { /* * The caller did not provide custom DMA operations. Use the * DMA mapping operations of the parent device. diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b0f308cb7f7c..b622af72448f 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -97,6 +97,7 @@ config OF_IOMMU # IOMMU-agnostic DMA-mapping layer config IOMMU_DMA bool + select DMA_OPS select IOMMU_API select IOMMU_IOVA select IRQ_MSI_IOMMU @@ -183,6 +184,7 @@ config DMAR_TABLE config INTEL_IOMMU bool "Support for Intel IOMMU using DMA Remapping Devices" depends on PCI_MSI && ACPI && (X86 || IA64) + select DMA_OPS select IOMMU_API select IOMMU_IOVA select NEED_DMA_MAP_STATE diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 92d142d2b75f..49af60bdac92 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -382,7 +382,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, dma_set_max_seg_size(&dev->ofdev.dev, 65536); dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff); -#ifdef CONFIG_PCI +#if defined(CONFIG_PCI) && defined(CONFIG_DMA_OPS) /* Set the DMA ops to the ones from the PCI device, this could be * fishy if we didn't know that on PowerMac it's always direct ops * or iommu ops that will work fine @@ -391,7 +391,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, */ dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata; dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops; -#endif /* CONFIG_PCI */ +#endif /* CONFIG_PCI && CONFIG_DMA_OPS */ #ifdef DEBUG printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig index 8f201d019f5a..b9bb086785db 100644 --- a/drivers/misc/mic/Kconfig +++ b/drivers/misc/mic/Kconfig @@ -4,6 +4,7 @@ menu "Intel MIC & related support" config INTEL_MIC_BUS tristate "Intel MIC Bus Driver" depends on 64BIT && PCI && X86 + select DMA_OPS help This option is selected by any driver which registers a device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST, @@ -19,6 +20,7 @@ config INTEL_MIC_BUS config SCIF_BUS tristate "SCIF Bus Driver" depends on 64BIT && PCI && X86 + select DMA_OPS help This option is selected by any driver which registers a device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST @@ -33,6 +35,7 @@ config SCIF_BUS config VOP_BUS tristate "VOP Bus Driver" + select DMA_OPS help This option is selected by any driver which registers a device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST @@ -49,6 +52,7 @@ config INTEL_MIC_HOST tristate "Intel MIC Host Driver" depends on 64BIT && PCI && X86 depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS + select DMA_OPS help This enables Host Driver support for the Intel Many Integrated Core (MIC) family of PCIe form factor coprocessor devices that diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig index 3e1ceb8e9f2b..d93a69b12f81 100644 --- a/drivers/vdpa/Kconfig +++ b/drivers/vdpa/Kconfig @@ -11,6 +11,7 @@ if VDPA config VDPA_SIM tristate "vDPA device simulator" depends on RUNTIME_TESTING_MENU && HAS_DMA + select DMA_OPS select VHOST_RING default n help diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 727f11eb46b2..1d339ef92422 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -179,6 +179,7 @@ config XEN_GRANT_DMA_ALLOC config SWIOTLB_XEN def_bool y + select DMA_OPS select SWIOTLB config XEN_PCIDEV_BACKEND diff --git a/include/linux/device.h b/include/linux/device.h index 15460a5ac024..4c4af98321eb 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -568,8 +568,9 @@ struct device { #ifdef CONFIG_GENERIC_MSI_IRQ struct list_head msi_list; #endif - +#ifdef CONFIG_DMA_OPS const struct dma_map_ops *dma_ops; +#endif u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index bd0a6f5ee445..39da883c8619 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -191,6 +191,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, #ifdef CONFIG_HAS_DMA #include +#ifdef CONFIG_DMA_OPS static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { if (dev->dma_ops) @@ -203,7 +204,16 @@ static inline void set_dma_ops(struct device *dev, { dev->dma_ops = dma_ops; } - +#else /* CONFIG_DMA_OPS */ +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + return NULL; +} +static inline void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) +{ +} +#endif /* CONFIG_DMA_OPS */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 1da3f44f2565..5cfb2428593a 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -5,6 +5,9 @@ config HAS_DMA depends on !NO_DMA default y +config DMA_OPS + bool + config NEED_SG_DMA_LENGTH bool @@ -60,6 +63,7 @@ config DMA_NONCOHERENT_CACHE_SYNC config DMA_VIRT_OPS bool depends on HAS_DMA + select DMA_OPS config SWIOTLB bool diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile index 370f63344e9c..32c7c1942bbd 100644 --- a/kernel/dma/Makefile +++ b/kernel/dma/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_HAS_DMA) += mapping.o direct.o dummy.o +obj-$(CONFIG_HAS_DMA) += mapping.o direct.o +obj-$(CONFIG_DMA_OPS) += dummy.o obj-$(CONFIG_DMA_CMA) += contiguous.o obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o obj-$(CONFIG_DMA_VIRT_OPS) += virt.o -- cgit v1.2.3 From d35834c64820c7ef397f8a244061d4450720540e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 23 Mar 2020 18:19:30 +0100 Subject: dma-mapping: add a dma_ops_bypass flag to struct device Several IOMMU drivers have a bypass mode where they can use a direct mapping if the devices DMA mask is large enough. Add generic support to the core dma-mapping code to do that to switch those drivers to a common solution. Signed-off-by: Christoph Hellwig Tested-by: Alexey Kardashevskiy Reviewed-by: Alexey Kardashevskiy --- include/linux/device.h | 8 ++++++ kernel/dma/Kconfig | 8 ++++++ kernel/dma/mapping.c | 74 +++++++++++++++++++++++++++++++++++--------------- 3 files changed, 68 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/device.h b/include/linux/device.h index 4c4af98321eb..1f71acf37f78 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -523,6 +523,11 @@ struct dev_links_info { * sync_state() callback. * @dma_coherent: this particular device is dma coherent, even if the * architecture supports non-coherent devices. + * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the + * streaming DMA operations (->map_* / ->unmap_* / ->sync_*), + * and optionall (if the coherent mask is large enough) also + * for dma allocations. This flag is managed by the dma ops + * instance from ->dma_supported. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -623,6 +628,9 @@ struct device { defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) bool dma_coherent:1; #endif +#ifdef CONFIG_DMA_OPS_BYPASS + bool dma_ops_bypass : 1; +#endif }; static inline struct device *kobj_to_dev(struct kobject *kobj) diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 5cfb2428593a..f4770fcfa62b 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -8,6 +8,14 @@ config HAS_DMA config DMA_OPS bool +# +# IOMMU drivers that can bypass the IOMMU code and optionally use the direct +# mapping fast path should select this option and set the dma_ops_bypass +# flag in struct device where applicable +# +config DMA_OPS_BYPASS + bool + config NEED_SG_DMA_LENGTH bool diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index b53953024512..0d129421e75f 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -105,9 +105,35 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, } EXPORT_SYMBOL(dmam_alloc_attrs); -static inline bool dma_is_direct(const struct dma_map_ops *ops) +static bool dma_go_direct(struct device *dev, dma_addr_t mask, + const struct dma_map_ops *ops) { - return likely(!ops); + if (likely(!ops)) + return true; +#ifdef CONFIG_DMA_OPS_BYPASS + if (dev->dma_ops_bypass) + return min_not_zero(mask, dev->bus_dma_limit) >= + dma_direct_get_required_mask(dev); +#endif + return false; +} + + +/* + * Check if the devices uses a direct mapping for streaming DMA operations. + * This allows IOMMU drivers to set a bypass mode if the DMA mask is large + * enough. + */ +static inline bool dma_alloc_direct(struct device *dev, + const struct dma_map_ops *ops) +{ + return dma_go_direct(dev, dev->coherent_dma_mask, ops); +} + +static inline bool dma_map_direct(struct device *dev, + const struct dma_map_ops *ops) +{ + return dma_go_direct(dev, *dev->dma_mask, ops); } dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, @@ -118,7 +144,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); else addr = ops->map_page(dev, page, offset, size, dir, attrs); @@ -134,7 +160,7 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) dma_direct_unmap_page(dev, addr, size, dir, attrs); else if (ops->unmap_page) ops->unmap_page(dev, addr, size, dir, attrs); @@ -153,7 +179,7 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, int ents; BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); else ents = ops->map_sg(dev, sg, nents, dir, attrs); @@ -172,7 +198,7 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, BUG_ON(!valid_dma_direction(dir)); debug_dma_unmap_sg(dev, sg, nents, dir); - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) dma_direct_unmap_sg(dev, sg, nents, dir, attrs); else if (ops->unmap_sg) ops->unmap_sg(dev, sg, nents, dir, attrs); @@ -191,7 +217,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) return DMA_MAPPING_ERROR; - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); else if (ops->map_resource) addr = ops->map_resource(dev, phys_addr, size, dir, attrs); @@ -207,7 +233,7 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (!dma_is_direct(ops) && ops->unmap_resource) + if (!dma_map_direct(dev, ops) && ops->unmap_resource) ops->unmap_resource(dev, addr, size, dir, attrs); debug_dma_unmap_resource(dev, addr, size, dir); } @@ -219,7 +245,7 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) dma_direct_sync_single_for_cpu(dev, addr, size, dir); else if (ops->sync_single_for_cpu) ops->sync_single_for_cpu(dev, addr, size, dir); @@ -233,7 +259,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) dma_direct_sync_single_for_device(dev, addr, size, dir); else if (ops->sync_single_for_device) ops->sync_single_for_device(dev, addr, size, dir); @@ -247,7 +273,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); else if (ops->sync_sg_for_cpu) ops->sync_sg_for_cpu(dev, sg, nelems, dir); @@ -261,7 +287,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) dma_direct_sync_sg_for_device(dev, sg, nelems, dir); else if (ops->sync_sg_for_device) ops->sync_sg_for_device(dev, sg, nelems, dir); @@ -302,7 +328,7 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, { const struct dma_map_ops *ops = get_dma_ops(dev); - if (dma_is_direct(ops)) + if (dma_alloc_direct(dev, ops)) return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); if (!ops->get_sgtable) @@ -372,7 +398,7 @@ bool dma_can_mmap(struct device *dev) { const struct dma_map_ops *ops = get_dma_ops(dev); - if (dma_is_direct(ops)) + if (dma_alloc_direct(dev, ops)) return dma_direct_can_mmap(dev); return ops->mmap != NULL; } @@ -397,7 +423,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, { const struct dma_map_ops *ops = get_dma_ops(dev); - if (dma_is_direct(ops)) + if (dma_alloc_direct(dev, ops)) return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); if (!ops->mmap) @@ -410,7 +436,7 @@ u64 dma_get_required_mask(struct device *dev) { const struct dma_map_ops *ops = get_dma_ops(dev); - if (dma_is_direct(ops)) + if (dma_alloc_direct(dev, ops)) return dma_direct_get_required_mask(dev); if (ops->get_required_mask) return ops->get_required_mask(dev); @@ -441,7 +467,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, /* let the implementation decide on the zone to allocate from: */ flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); - if (dma_is_direct(ops)) + if (dma_alloc_direct(dev, ops)) cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); else if (ops->alloc) cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); @@ -473,7 +499,7 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, return; debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - if (dma_is_direct(ops)) + if (dma_alloc_direct(dev, ops)) dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); else if (ops->free) ops->free(dev, size, cpu_addr, dma_handle, attrs); @@ -484,7 +510,11 @@ int dma_supported(struct device *dev, u64 mask) { const struct dma_map_ops *ops = get_dma_ops(dev); - if (dma_is_direct(ops)) + /* + * ->dma_supported sets the bypass flag, so we must always call + * into the method here unless the device is truly direct mapped. + */ + if (!ops) return dma_direct_supported(dev, mask); if (!ops->dma_supported) return 1; @@ -540,7 +570,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) + if (dma_alloc_direct(dev, ops)) arch_dma_cache_sync(dev, vaddr, size, dir); else if (ops->cache_sync) ops->cache_sync(dev, vaddr, size, dir); @@ -552,7 +582,7 @@ size_t dma_max_mapping_size(struct device *dev) const struct dma_map_ops *ops = get_dma_ops(dev); size_t size = SIZE_MAX; - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) size = dma_direct_max_mapping_size(dev); else if (ops && ops->max_mapping_size) size = ops->max_mapping_size(dev); @@ -565,7 +595,7 @@ bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) { const struct dma_map_ops *ops = get_dma_ops(dev); - if (dma_is_direct(ops)) + if (dma_map_direct(dev, ops)) return dma_direct_need_sync(dev, dma_addr); return ops->sync_single_for_cpu || ops->sync_single_for_device; } -- cgit v1.2.3 From 9dc75e79c0dec0eb9363793efbd5f16d134169dc Mon Sep 17 00:00:00 2001 From: Kaaira Gupta Date: Sun, 5 Jul 2020 15:53:04 +0200 Subject: media: tpg: change char argument to const char Change the argument of type char * to const char * for function tpg_gen_text(). This function should take in a const char * as opposed to char * as it does not make changes to the text. This issue was found while passing the order of colors of tpg generated test image (which is a const char *) to this function. Signed-off-by: Kaaira Gupta Reviewed-by: Helen Koike Reviewed-by: Kieran Bingham Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/v4l2-tpg/v4l2-tpg-core.c | 10 +++++----- include/media/tpg/v4l2-tpg.h | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index 50f1e0b28b25..dde22a4cbd6c 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -1927,34 +1927,34 @@ typedef struct { u16 __; u8 _; } __packed x24; static noinline void tpg_print_str_2(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], unsigned p, unsigned first, unsigned div, unsigned step, - int y, int x, char *text, unsigned len) + int y, int x, const char *text, unsigned len) { PRINTSTR(u8); } static noinline void tpg_print_str_4(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], unsigned p, unsigned first, unsigned div, unsigned step, - int y, int x, char *text, unsigned len) + int y, int x, const char *text, unsigned len) { PRINTSTR(u16); } static noinline void tpg_print_str_6(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], unsigned p, unsigned first, unsigned div, unsigned step, - int y, int x, char *text, unsigned len) + int y, int x, const char *text, unsigned len) { PRINTSTR(x24); } static noinline void tpg_print_str_8(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], unsigned p, unsigned first, unsigned div, unsigned step, - int y, int x, char *text, unsigned len) + int y, int x, const char *text, unsigned len) { PRINTSTR(u32); } void tpg_gen_text(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], - int y, int x, char *text) + int y, int x, const char *text) { unsigned step = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1; unsigned div = step; diff --git a/include/media/tpg/v4l2-tpg.h b/include/media/tpg/v4l2-tpg.h index eb191e85d363..9749ed409856 100644 --- a/include/media/tpg/v4l2-tpg.h +++ b/include/media/tpg/v4l2-tpg.h @@ -241,7 +241,7 @@ void tpg_log_status(struct tpg_data *tpg); void tpg_set_font(const u8 *f); void tpg_gen_text(const struct tpg_data *tpg, - u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text); + u8 *basep[TPG_MAX_PLANES][2], int y, int x, const char *text); void tpg_calc_text_basep(struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf); unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line); -- cgit v1.2.3 From 7a785081a16968a28adce223c572111d2d57a83e Mon Sep 17 00:00:00 2001 From: Kaaira Gupta Date: Sun, 5 Jul 2020 15:53:05 +0200 Subject: media: tpg: Add function to return colors' order of test image Currently there is no method to know the correct order of the colors for a test image generated by tpg. Write a function that returns a string of colors' order given a tpg. It returns a NULL pointer in case of test patterns which do not have a well defined colors' order. Hence add a NULL check for text in tpg_gen_text(). [hverkuil: white -> White (for consistency)] Signed-off-by: Kaaira Gupta Reviewed-by: Kieran Bingham Reviewed-by: Helen Koike Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/v4l2-tpg/v4l2-tpg-core.c | 30 +++++++++++++++++++++++++-- include/media/tpg/v4l2-tpg.h | 1 + 2 files changed, 29 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index dde22a4cbd6c..630a75e0eeb1 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -1959,12 +1959,14 @@ void tpg_gen_text(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], unsigned step = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1; unsigned div = step; unsigned first = 0; - unsigned len = strlen(text); + unsigned len; unsigned p; - if (font8x16 == NULL || basep == NULL) + if (font8x16 == NULL || basep == NULL || text == NULL) return; + len = strlen(text); + /* Checks if it is possible to show string */ if (y + 16 >= tpg->compose.height || x + 8 >= tpg->compose.width) return; @@ -2006,6 +2008,30 @@ void tpg_gen_text(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], } EXPORT_SYMBOL_GPL(tpg_gen_text); +const char *tpg_g_color_order(const struct tpg_data *tpg) +{ + switch (tpg->pattern) { + case TPG_PAT_75_COLORBAR: + case TPG_PAT_100_COLORBAR: + case TPG_PAT_CSC_COLORBAR: + case TPG_PAT_100_HCOLORBAR: + return "White, yellow, cyan, green, magenta, red, blue, black"; + case TPG_PAT_BLACK: + return "Black"; + case TPG_PAT_WHITE: + return "White"; + case TPG_PAT_RED: + return "Red"; + case TPG_PAT_GREEN: + return "Green"; + case TPG_PAT_BLUE: + return "Blue"; + default: + return NULL; + } +} +EXPORT_SYMBOL_GPL(tpg_g_color_order); + void tpg_update_mv_step(struct tpg_data *tpg) { int factor = tpg->mv_hor_mode > TPG_MOVE_NONE ? -1 : 1; diff --git a/include/media/tpg/v4l2-tpg.h b/include/media/tpg/v4l2-tpg.h index 9749ed409856..0b0ddb87380e 100644 --- a/include/media/tpg/v4l2-tpg.h +++ b/include/media/tpg/v4l2-tpg.h @@ -252,6 +252,7 @@ void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc); void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop, const struct v4l2_rect *compose); +const char *tpg_g_color_order(const struct tpg_data *tpg); static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern) { -- cgit v1.2.3 From 216964fa6a63c095417cc4001adc240c0e1f1bfb Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 00:05:49 +0200 Subject: media: media-device.h: drop duplicated word in comment Delete the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/media-device.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/media/media-device.h b/include/media/media-device.h index fa0895430720..1345e6da688a 100644 --- a/include/media/media-device.h +++ b/include/media/media-device.h @@ -128,7 +128,7 @@ struct media_device_ops { * * Use-case: find tuner entity connected to the decoder * entity and check if it is available, and activate the - * the link between them from @enable_source and deactivate + * link between them from @enable_source and deactivate * from @disable_source. * * .. note:: -- cgit v1.2.3 From f040e0fd29e4a09855c8cff8c6d22b4745a19796 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 00:05:50 +0200 Subject: media: media-devnode.h: drop duplicated word in comment Delete the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/media-devnode.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h index 6393842c6b21..d27c1c646c28 100644 --- a/include/media/media-devnode.h +++ b/include/media/media-devnode.h @@ -39,7 +39,7 @@ struct media_device; * @poll: pointer to the function that implements poll() syscall * @ioctl: pointer to the function that implements ioctl() syscall * @compat_ioctl: pointer to the function that will handle 32 bits userspace - * calls to the the ioctl() syscall on a Kernel compiled with 64 bits. + * calls to the ioctl() syscall on a Kernel compiled with 64 bits. * @open: pointer to the function that implements open() syscall * @release: pointer to the function that will release the resources allocated * by the @open function. -- cgit v1.2.3 From ffb189716081ff7dafb0978d31a290adff966d81 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 00:05:51 +0200 Subject: media: dvbdev.h: fix duplicated word in comment Change the doubled word "the" in a comment to "to the". Signed-off-by: Randy Dunlap Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/dvbdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h index 551325858de3..1f7b008387f8 100644 --- a/include/media/dvbdev.h +++ b/include/media/dvbdev.h @@ -385,7 +385,7 @@ struct i2c_client; * with dvb_module_probe() should use dvb_module_release() to unbind. * * Return: - * On success, return an &struct i2c_client, pointing the the bound + * On success, return an &struct i2c_client, pointing to the bound * I2C device. %NULL otherwise. * * .. note:: -- cgit v1.2.3 From 91bbbf24c4d1d2b9365c077730bbbaf2ef3f9056 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 00:05:52 +0200 Subject: media: media-entity.h: drop duplicated word in comment Delete the doubled word "flag" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/media-entity.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/media/media-entity.h b/include/media/media-entity.h index cde80ad029b7..cbdfcb79d0d0 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -803,7 +803,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags); * @flags: the requested new link flags * * The only configurable property is the %MEDIA_LNK_FL_ENABLED link flag - * flag to enable/disable a link. Links marked with the + * to enable/disable a link. Links marked with the * %MEDIA_LNK_FL_IMMUTABLE link flag can not be enabled or disabled. * * When a link is enabled or disabled, the media framework calls the -- cgit v1.2.3 From f2f475e4c5bb5343e35c0973096a391a09677d1a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 00:05:53 +0200 Subject: media: v4l2-subdev.h: drop duplicated word in comment Delete the doubled words "the" in comments. Signed-off-by: Randy Dunlap Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-subdev.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 2607ea85096a..1b83a6c46c1a 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -566,9 +566,9 @@ struct v4l2_subdev_ir_parameters { * * @rx_read: Reads received codes or pulse width data. * The semantics are similar to a non-blocking read() call. - * @rx_g_parameters: Get the current operating parameters and state of the + * @rx_g_parameters: Get the current operating parameters and state of * the IR receiver. - * @rx_s_parameters: Set the current operating parameters and state of the + * @rx_s_parameters: Set the current operating parameters and state of * the IR receiver. It is recommended to call * [rt]x_g_parameters first to fill out the current state, and only change * the fields that need to be changed. Upon return, the actual device @@ -582,9 +582,9 @@ struct v4l2_subdev_ir_parameters { * * @tx_write: Writes codes or pulse width data for transmission. * The semantics are similar to a non-blocking write() call. - * @tx_g_parameters: Get the current operating parameters and state of the + * @tx_g_parameters: Get the current operating parameters and state of * the IR transmitter. - * @tx_s_parameters: Set the current operating parameters and state of the + * @tx_s_parameters: Set the current operating parameters and state of * the IR transmitter. It is recommended to call * [rt]x_g_parameters first to fill out the current state, and only change * the fields that need to be changed. Upon return, the actual device -- cgit v1.2.3 From 35694afc92646ac24d7f3ef34a7387876d998fe7 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 1 Jul 2020 08:21:37 +0200 Subject: media: device property: Add a function to test is a fwnode is a graph endpoint MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drivers may need to test if a fwnode is a graph endpoint. To avoid hand-written solutions that wouldn't work for all fwnode types, add a new fwnode_graph_is_endpoint() function for this purpose. We don't need to wire it up to different backends for OF and ACPI for now, as the implementation can simply be based on checkout the presence of a remote-endpoint property. Signed-off-by: Laurent Pinchart Reviewed-by: Niklas Söderlund Reviewed-by: Lad Prabhakar Reviewed-by: Kieran Bingham Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- include/linux/property.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/property.h b/include/linux/property.h index 10d03572f52e..9f805c442819 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -389,6 +389,11 @@ struct fwnode_handle * fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, u32 endpoint); +static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode) +{ + return fwnode_property_present(fwnode, "remote-endpoint"); +} + /* * Fwnode lookup flags * -- cgit v1.2.3 From 8ecbde62c063d7d499144d3491496383a16de7f2 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 17 Jul 2020 10:08:51 +0200 Subject: media: v4l2-subdev.h: keep * together with the type Having the '*' in the next line separated from the type makes it hard to see that these functions return a pointer to that type. Instead, keep it next to the type name so it is clear that it is a pointer to that type. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-subdev.h | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 1b83a6c46c1a..b855721879b8 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -930,10 +930,10 @@ struct v4l2_subdev_fh { * @cfg: pointer to &struct v4l2_subdev_pad_config array. * @pad: index of the pad in the @cfg array. */ -static inline struct v4l2_mbus_framefmt -*v4l2_subdev_get_try_format(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - unsigned int pad) +static inline struct v4l2_mbus_framefmt * +v4l2_subdev_get_try_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad) { if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; @@ -948,10 +948,10 @@ static inline struct v4l2_mbus_framefmt * @cfg: pointer to &struct v4l2_subdev_pad_config array. * @pad: index of the pad in the @cfg array. */ -static inline struct v4l2_rect -*v4l2_subdev_get_try_crop(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - unsigned int pad) +static inline struct v4l2_rect * +v4l2_subdev_get_try_crop(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad) { if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; @@ -966,15 +966,16 @@ static inline struct v4l2_rect * @cfg: pointer to &struct v4l2_subdev_pad_config array. * @pad: index of the pad in the @cfg array. */ -static inline struct v4l2_rect -*v4l2_subdev_get_try_compose(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - unsigned int pad) +static inline struct v4l2_rect * +v4l2_subdev_get_try_compose(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad) { if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; return &cfg[pad].try_compose; } + #endif extern const struct v4l2_file_operations v4l2_subdev_fops; -- cgit v1.2.3 From 8f2a4a9d5ff5202d0b3e3a144ebb9b67aabadd29 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 17 Jul 2020 10:10:07 +0200 Subject: media: dvbdev.h: keep * together with the type Having the '*' in the next line separated from the type makes it hard to see that these functions return a pointer to that type. Instead, keep it next to the type name so it is clear that it is a pointer to that type. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/dvbdev.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h index 1f7b008387f8..e547cbeee431 100644 --- a/include/media/dvbdev.h +++ b/include/media/dvbdev.h @@ -293,8 +293,8 @@ static inline void dvb_register_media_controller(struct dvb_adapter *adap, * * @adap: pointer to &struct dvb_adapter */ -static inline struct media_device -*dvb_get_media_controller(struct dvb_adapter *adap) +static inline struct media_device * +dvb_get_media_controller(struct dvb_adapter *adap) { return adap->mdev; } -- cgit v1.2.3 From 124ea650d3072b005457faed69909221c2905a1f Mon Sep 17 00:00:00 2001 From: Adrian Reber Date: Sun, 19 Jul 2020 12:04:11 +0200 Subject: capabilities: Introduce CAP_CHECKPOINT_RESTORE This patch introduces CAP_CHECKPOINT_RESTORE, a new capability facilitating checkpoint/restore for non-root users. Over the last years, The CRIU (Checkpoint/Restore In Userspace) team has been asked numerous times if it is possible to checkpoint/restore a process as non-root. The answer usually was: 'almost'. The main blocker to restore a process as non-root was to control the PID of the restored process. This feature available via the clone3 system call, or via /proc/sys/kernel/ns_last_pid is unfortunately guarded by CAP_SYS_ADMIN. In the past two years, requests for non-root checkpoint/restore have increased due to the following use cases: * Checkpoint/Restore in an HPC environment in combination with a resource manager distributing jobs where users are always running as non-root. There is a desire to provide a way to checkpoint and restore long running jobs. * Container migration as non-root * We have been in contact with JVM developers who are integrating CRIU into a Java VM to decrease the startup time. These checkpoint/restore applications are not meant to be running with CAP_SYS_ADMIN. We have seen the following workarounds: * Use a setuid wrapper around CRIU: See https://github.com/FredHutch/slurm-examples/blob/master/checkpointer/lib/checkpointer/checkpointer-suid.c * Use a setuid helper that writes to ns_last_pid. Unfortunately, this helper delegation technique is impossible to use with clone3, and is thus prone to races. See https://github.com/twosigma/set_ns_last_pid * Cycle through PIDs with fork() until the desired PID is reached: This has been demonstrated to work with cycling rates of 100,000 PIDs/s See https://github.com/twosigma/set_ns_last_pid * Patch out the CAP_SYS_ADMIN check from the kernel * Run the desired application in a new user and PID namespace to provide a local CAP_SYS_ADMIN for controlling PIDs. This technique has limited use in typical container environments (e.g., Kubernetes) as /proc is typically protected with read-only layers (e.g., /proc/sys) for hardening purposes. Read-only layers prevent additional /proc mounts (due to proc's SB_I_USERNS_VISIBLE property), making the use of new PID namespaces limited as certain applications need access to /proc matching their PID namespace. The introduced capability allows to: * Control PIDs when the current user is CAP_CHECKPOINT_RESTORE capable for the corresponding PID namespace via ns_last_pid/clone3. * Open files in /proc/pid/map_files when the current user is CAP_CHECKPOINT_RESTORE capable in the root namespace, useful for recovering files that are unreachable via the file system such as deleted files, or memfd files. See corresponding selftest for an example with clone3(). Signed-off-by: Adrian Reber Signed-off-by: Nicolas Viennot Reviewed-by: Serge Hallyn Acked-by: Christian Brauner Link: https://lore.kernel.org/r/20200719100418.2112740-2-areber@redhat.com Signed-off-by: Christian Brauner --- include/linux/capability.h | 6 ++++++ include/uapi/linux/capability.h | 9 ++++++++- security/selinux/include/classmap.h | 5 +++-- 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/capability.h b/include/linux/capability.h index b4345b38a6be..1e7fe311cabe 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -261,6 +261,12 @@ static inline bool bpf_capable(void) return capable(CAP_BPF) || capable(CAP_SYS_ADMIN); } +static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns) +{ + return ns_capable(ns, CAP_CHECKPOINT_RESTORE) || + ns_capable(ns, CAP_SYS_ADMIN); +} + /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index 48ff0757ae5e..395dd0df8d08 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h @@ -408,7 +408,14 @@ struct vfs_ns_cap_data { */ #define CAP_BPF 39 -#define CAP_LAST_CAP CAP_BPF + +/* Allow checkpoint/restore related operations */ +/* Allow PID selection during clone3() */ +/* Allow writing to ns_last_pid */ + +#define CAP_CHECKPOINT_RESTORE 40 + +#define CAP_LAST_CAP CAP_CHECKPOINT_RESTORE #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 98e1513b608a..40cebde62856 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -27,9 +27,10 @@ "audit_control", "setfcap" #define COMMON_CAP2_PERMS "mac_override", "mac_admin", "syslog", \ - "wake_alarm", "block_suspend", "audit_read", "perfmon", "bpf" + "wake_alarm", "block_suspend", "audit_read", "perfmon", "bpf", \ + "checkpoint_restore" -#if CAP_LAST_CAP > CAP_BPF +#if CAP_LAST_CAP > CAP_CHECKPOINT_RESTORE #error New capability defined, please update COMMON_CAP2_PERMS. #endif -- cgit v1.2.3 From 7ed33ea6b4faf0d63eafeef0b1a9b977b66ac88c Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sun, 19 Jul 2020 17:17:05 +0200 Subject: ALSA: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Link: https://lore.kernel.org/r/20200719151705.59624-1-grandmaster@al2klimov.de Signed-off-by: Takashi Iwai --- Documentation/sound/alsa-configuration.rst | 6 +++--- Documentation/sound/cards/audigy-mixer.rst | 2 +- Documentation/sound/cards/sb-live-mixer.rst | 2 +- Documentation/sound/hd-audio/notes.rst | 6 +++--- include/sound/hdmi-codec.h | 2 +- include/sound/omap-hdmi-audio.h | 2 +- sound/sparc/dbri.c | 2 +- sound/usb/mixer_maps.c | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst index 72f97d4b01a7..c755b1c5e16f 100644 --- a/Documentation/sound/alsa-configuration.rst +++ b/Documentation/sound/alsa-configuration.rst @@ -309,7 +309,7 @@ pcifix This module supports all ADB PCM channels, ac97 mixer, SPDIF, hardware EQ, mpu401, gameport. A3D and wavetable support are still in development. Development and reverse engineering work is being coordinated at -http://savannah.nongnu.org/projects/openvortex/ +https://savannah.nongnu.org/projects/openvortex/ SPDIF output has a copy of the AC97 codec output, unless you use the ``spdif`` pcm device, which allows raw data passthru. The hardware EQ hardware and SPDIF is only present in the Vortex2 and @@ -1575,7 +1575,7 @@ See Documentation/sound/cards/multisound.sh for important information about this driver. Note that it has been discontinued, but the Voyetra Turtle Beach knowledge base entry for it is still available at -http://www.turtlebeach.com +https://www.turtlebeach.com Module snd-msnd-pinnacle ------------------------ @@ -2703,4 +2703,4 @@ Kernel Bugzilla ALSA Developers ML mailto:alsa-devel@alsa-project.org alsa-info.sh script - http://www.alsa-project.org/alsa-info.sh + https://www.alsa-project.org/alsa-info.sh diff --git a/Documentation/sound/cards/audigy-mixer.rst b/Documentation/sound/cards/audigy-mixer.rst index 86213234435f..998f76e19cdd 100644 --- a/Documentation/sound/cards/audigy-mixer.rst +++ b/Documentation/sound/cards/audigy-mixer.rst @@ -331,7 +331,7 @@ WO 9901953 (A1) Execution and Audio Data Sequencing (Jan. 14, 1999) -US Patents (http://www.uspto.gov/) +US Patents (https://www.uspto.gov/) ---------------------------------- US 5925841 diff --git a/Documentation/sound/cards/sb-live-mixer.rst b/Documentation/sound/cards/sb-live-mixer.rst index bcb62fc99bbb..eccb0f0ffd0f 100644 --- a/Documentation/sound/cards/sb-live-mixer.rst +++ b/Documentation/sound/cards/sb-live-mixer.rst @@ -336,7 +336,7 @@ WO 9901953 (A1) Execution and Audio Data Sequencing (Jan. 14, 1999) -US Patents (http://www.uspto.gov/) +US Patents (https://www.uspto.gov/) ---------------------------------- US 5925841 diff --git a/Documentation/sound/hd-audio/notes.rst b/Documentation/sound/hd-audio/notes.rst index 0f3109d9abc8..cf4d7158af78 100644 --- a/Documentation/sound/hd-audio/notes.rst +++ b/Documentation/sound/hd-audio/notes.rst @@ -42,7 +42,7 @@ If you are interested in the deep debugging of HD-audio, read the HD-audio specification at first. The specification is found on Intel's web page, for example: -* http://www.intel.com/standards/hdaudio/ +* https://www.intel.com/standards/hdaudio/ HD-Audio Controller @@ -728,7 +728,7 @@ version can be found on git repository: The script can be fetched directly from the following URL, too: -* http://www.alsa-project.org/alsa-info.sh +* https://www.alsa-project.org/alsa-info.sh Run this script as root, and it will gather the important information such as the module lists, module parameters, proc file contents @@ -818,7 +818,7 @@ proc-compatible output. The hda-analyzer: -* http://git.alsa-project.org/?p=alsa.git;a=tree;f=hda-analyzer +* https://git.alsa-project.org/?p=alsa.git;a=tree;f=hda-analyzer is a part of alsa.git repository in alsa-project.org: diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h index 83b17682e01c..cc0b29bbcde3 100644 --- a/include/sound/hdmi-codec.h +++ b/include/sound/hdmi-codec.h @@ -2,7 +2,7 @@ /* * hdmi-codec.h - HDMI Codec driver API * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * * Author: Jyri Sarha */ diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h index 16c007b651f4..e5f82044a404 100644 --- a/include/sound/omap-hdmi-audio.h +++ b/include/sound/omap-hdmi-audio.h @@ -2,7 +2,7 @@ /* * hdmi-audio.c -- OMAP4+ DSS HDMI audio support library * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * * Author: Jyri Sarha */ diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c index 17b79d45d17f..913adc8568d5 100644 --- a/sound/sparc/dbri.c +++ b/sound/sparc/dbri.c @@ -22,7 +22,7 @@ * - Data sheet of the T7903, a newer but very similar ISA bus equivalent * available from the Lucent (formerly AT&T microelectronics) home * page. - * - http://www.freesoft.org/Linux/DBRI/ + * - https://www.freesoft.org/Linux/DBRI/ * - MMCODEC: Crystal Semiconductor CS4215 16 bit Multimedia Audio Codec * Interfaces: CHI, Audio In & Out, 2 bits parallel * Documentation: from the Crystal Semiconductor home page. diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 9af7aa93f6fa..2ec484655201 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -233,7 +233,7 @@ static const struct usbmix_name_map maya44_map[] = { }; /* Section "justlink_map" below added by James Courtier-Dutton - * sourced from Maplin Electronics (http://www.maplin.co.uk), part number A56AK + * sourced from Maplin Electronics (https://www.maplin.co.uk), part number A56AK * Part has 2 connectors that act as a single output. (TOSLINK Optical for digital out, and 3.5mm Jack for Analogue out.) * The USB Mixer publishes a Microphone and extra Volume controls for it, but none exist on the device, * so this map removes all unwanted sliders from alsamixer -- cgit v1.2.3 From a4872f7a4bc16d6ed831833b4f1ec16df642c367 Mon Sep 17 00:00:00 2001 From: Tim Harvey Date: Tue, 9 Jun 2020 07:57:20 -0700 Subject: hwmon: (gsc) add 16bit pre-scaled voltage mode add a 16-bit pre-scaled voltage mode to adc and clarify that existing pre-scaled mode is 24bit. Signed-off-by: Tim Harvey Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/1591714640-10332-3-git-send-email-tharvey@gateworks.com Signed-off-by: Guenter Roeck --- drivers/hwmon/gsc-hwmon.c | 8 +++++--- include/linux/platform_data/gsc_hwmon.h | 3 ++- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c index 2137bc65829d..3dfe2ca2f8c8 100644 --- a/drivers/hwmon/gsc-hwmon.c +++ b/drivers/hwmon/gsc-hwmon.c @@ -159,7 +159,7 @@ gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, return -EOPNOTSUPP; } - sz = (ch->mode == mode_voltage) ? 3 : 2; + sz = (ch->mode == mode_voltage_24bit) ? 3 : 2; ret = regmap_bulk_read(hwmon->regmap, ch->reg, buf, sz); if (ret) return ret; @@ -186,7 +186,8 @@ gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, /* adjust by uV offset */ tmp += ch->mvoffset; break; - case mode_voltage: + case mode_voltage_24bit: + case mode_voltage_16bit: /* no adjustment needed */ break; } @@ -336,7 +337,8 @@ static int gsc_hwmon_probe(struct platform_device *pdev) HWMON_T_LABEL; i_temp++; break; - case mode_voltage: + case mode_voltage_24bit: + case mode_voltage_16bit: case mode_voltage_raw: if (i_in == GSC_HWMON_MAX_IN_CH) { dev_err(gsc->dev, "too many input channels\n"); diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h index ec1611aff863..37a8f554da00 100644 --- a/include/linux/platform_data/gsc_hwmon.h +++ b/include/linux/platform_data/gsc_hwmon.h @@ -4,8 +4,9 @@ enum gsc_hwmon_mode { mode_temperature, - mode_voltage, + mode_voltage_24bit, mode_voltage_raw, + mode_voltage_16bit, mode_max, }; -- cgit v1.2.3 From c4471ad9a50d5548e66ae4511acfb1dc23a48744 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Mon, 20 Jul 2020 00:03:33 +0200 Subject: net: phy: add USXGMII link partner ability constants The constants are taken from the USXGMII Singleport Copper Interface specification. The naming are based on the SGMII ones, but with an MDIO_ prefix. Signed-off-by: Michael Walle Reviewed-by: Russell King Signed-off-by: David S. Miller --- include/uapi/linux/mdio.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index 4bcb41c71b8c..3f302e2523b2 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -324,4 +324,30 @@ static inline __u16 mdio_phy_id_c45(int prtad, int devad) return MDIO_PHY_ID_C45 | (prtad << 5) | devad; } +/* UsxgmiiChannelInfo[15:0] for USXGMII in-band auto-negotiation.*/ +#define MDIO_USXGMII_EEE_CLK_STP 0x0080 /* EEE clock stop supported */ +#define MDIO_USXGMII_EEE 0x0100 /* EEE supported */ +#define MDIO_USXGMII_SPD_MASK 0x0e00 /* USXGMII speed mask */ +#define MDIO_USXGMII_FULL_DUPLEX 0x1000 /* USXGMII full duplex */ +#define MDIO_USXGMII_DPX_SPD_MASK 0x1e00 /* USXGMII duplex and speed bits */ +#define MDIO_USXGMII_10 0x0000 /* 10Mbps */ +#define MDIO_USXGMII_10HALF 0x0000 /* 10Mbps half-duplex */ +#define MDIO_USXGMII_10FULL 0x1000 /* 10Mbps full-duplex */ +#define MDIO_USXGMII_100 0x0200 /* 100Mbps */ +#define MDIO_USXGMII_100HALF 0x0200 /* 100Mbps half-duplex */ +#define MDIO_USXGMII_100FULL 0x1200 /* 100Mbps full-duplex */ +#define MDIO_USXGMII_1000 0x0400 /* 1000Mbps */ +#define MDIO_USXGMII_1000HALF 0x0400 /* 1000Mbps half-duplex */ +#define MDIO_USXGMII_1000FULL 0x1400 /* 1000Mbps full-duplex */ +#define MDIO_USXGMII_10G 0x0600 /* 10Gbps */ +#define MDIO_USXGMII_10GHALF 0x0600 /* 10Gbps half-duplex */ +#define MDIO_USXGMII_10GFULL 0x1600 /* 10Gbps full-duplex */ +#define MDIO_USXGMII_2500 0x0800 /* 2500Mbps */ +#define MDIO_USXGMII_2500HALF 0x0800 /* 2500Mbps half-duplex */ +#define MDIO_USXGMII_2500FULL 0x1800 /* 2500Mbps full-duplex */ +#define MDIO_USXGMII_5000 0x0a00 /* 5000Mbps */ +#define MDIO_USXGMII_5000HALF 0x0a00 /* 5000Mbps half-duplex */ +#define MDIO_USXGMII_5000FULL 0x1a00 /* 5000Mbps full-duplex */ +#define MDIO_USXGMII_LINK 0x8000 /* PHY link with copper-side partner */ + #endif /* _UAPI__LINUX_MDIO_H__ */ -- cgit v1.2.3 From a06d30ae7af492497ffbca6abf1621d508b8fcaa Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:10 +0200 Subject: net/atm: remove the atmdev_ops {get, set}sockopt methods All implementations of these two methods are dummies that always return -EINVAL. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- drivers/atm/eni.c | 17 ----------------- drivers/atm/firestream.c | 2 -- drivers/atm/fore200e.c | 27 --------------------------- drivers/atm/horizon.c | 40 ---------------------------------------- drivers/atm/iphase.c | 16 ---------------- drivers/atm/lanai.c | 2 -- drivers/atm/solos-pci.c | 2 -- drivers/atm/zatm.c | 16 ---------------- include/linux/atmdev.h | 9 --------- net/atm/common.c | 14 ++------------ 10 files changed, 2 insertions(+), 143 deletions(-) (limited to 'include') diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 17d47ad03ab7..b3d8e00e7671 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2027,21 +2027,6 @@ static int eni_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) return dev->phy->ioctl(dev,cmd,arg); } - -static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen) -{ - return -EINVAL; -} - - -static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,unsigned int optlen) -{ - return -EINVAL; -} - - static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb) { enum enq_res res; @@ -2215,8 +2200,6 @@ static const struct atmdev_ops ops = { .open = eni_open, .close = eni_close, .ioctl = eni_ioctl, - .getsockopt = eni_getsockopt, - .setsockopt = eni_setsockopt, .send = eni_send, .phy_put = eni_phy_put, .phy_get = eni_phy_get, diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index cc87004d5e2d..2ca9ec802734 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1277,8 +1277,6 @@ static const struct atmdev_ops ops = { .send = fs_send, .owner = THIS_MODULE, /* ioctl: fs_ioctl, */ - /* getsockopt: fs_getsockopt, */ - /* setsockopt: fs_setsockopt, */ /* change_qos: fs_change_qos, */ /* For now implement these internally here... */ diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index f4ad7ce25ae8..a81bc49c14ac 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -1710,31 +1710,6 @@ fore200e_getstats(struct fore200e* fore200e) return 0; } - -static int -fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) -{ - /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ - - DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", - vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); - - return -EINVAL; -} - - -static int -fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen) -{ - /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ - - DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", - vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); - - return -EINVAL; -} - - #if 0 /* currently unused */ static int fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs) @@ -3026,8 +3001,6 @@ static const struct atmdev_ops fore200e_ops = { .open = fore200e_open, .close = fore200e_close, .ioctl = fore200e_ioctl, - .getsockopt = fore200e_getsockopt, - .setsockopt = fore200e_setsockopt, .send = fore200e_send, .change_qos = fore200e_change_qos, .proc_read = fore200e_proc_read, diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index e5da51f907a2..4f2951cbe69c 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2527,46 +2527,6 @@ static void hrz_close (struct atm_vcc * atm_vcc) { clear_bit(ATM_VF_ADDR,&atm_vcc->flags); } -#if 0 -static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, - void *optval, int optlen) { - hrz_dev * dev = HRZ_DEV(atm_vcc->dev); - PRINTD (DBG_FLOW|DBG_VCC, "hrz_getsockopt"); - switch (level) { - case SOL_SOCKET: - switch (optname) { -// case SO_BCTXOPT: -// break; -// case SO_BCRXOPT: -// break; - default: - return -ENOPROTOOPT; - }; - break; - } - return -EINVAL; -} - -static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, - void *optval, unsigned int optlen) { - hrz_dev * dev = HRZ_DEV(atm_vcc->dev); - PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt"); - switch (level) { - case SOL_SOCKET: - switch (optname) { -// case SO_BCTXOPT: -// break; -// case SO_BCRXOPT: -// break; - default: - return -ENOPROTOOPT; - }; - break; - } - return -EINVAL; -} -#endif - #if 0 static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) { hrz_dev * dev = HRZ_DEV(atm_dev); diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 8c7a996d1f16..eef637fd90b3 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2880,20 +2880,6 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) return 0; } -static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname, - void __user *optval, int optlen) -{ - IF_EVENT(printk(">ia_getsockopt\n");) - return -EINVAL; -} - -static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname, - void __user *optval, unsigned int optlen) -{ - IF_EVENT(printk(">ia_setsockopt\n");) - return -EINVAL; -} - static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { IADEV *iadev; struct dle *wr_ptr; @@ -3164,8 +3150,6 @@ static const struct atmdev_ops ops = { .open = ia_open, .close = ia_close, .ioctl = ia_ioctl, - .getsockopt = ia_getsockopt, - .setsockopt = ia_setsockopt, .send = ia_send, .phy_put = ia_phy_put, .phy_get = ia_phy_get, diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 645a6bc1df88..986c1313694c 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2537,8 +2537,6 @@ static const struct atmdev_ops ops = { .dev_close = lanai_dev_close, .open = lanai_open, .close = lanai_close, - .getsockopt = NULL, - .setsockopt = NULL, .send = lanai_send, .phy_put = NULL, .phy_get = NULL, diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index b7646ae55942..94fbc3abe60e 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1179,8 +1179,6 @@ static const struct atmdev_ops fpga_ops = { .open = popen, .close = pclose, .ioctl = NULL, - .getsockopt = NULL, - .setsockopt = NULL, .send = psend, .send_oam = NULL, .phy_put = NULL, diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 57f97b95a453..2788b985edbe 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1515,20 +1515,6 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) } } - -static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen) -{ - return -EINVAL; -} - - -static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,unsigned int optlen) -{ - return -EINVAL; -} - static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb) { int error; @@ -1582,8 +1568,6 @@ static const struct atmdev_ops ops = { .open = zatm_open, .close = zatm_close, .ioctl = zatm_ioctl, - .getsockopt = zatm_getsockopt, - .setsockopt = zatm_setsockopt, .send = zatm_send, .phy_put = zatm_phy_put, .phy_get = zatm_phy_get, diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 8124815eb121..5d5ff2203fa2 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -176,11 +176,6 @@ struct atm_dev { #define ATM_OF_IMMED 1 /* Attempt immediate delivery */ #define ATM_OF_INRATE 2 /* Attempt in-rate delivery */ - -/* - * ioctl, getsockopt, and setsockopt are optional and can be set to NULL. - */ - struct atmdev_ops { /* only send is required */ void (*dev_close)(struct atm_dev *dev); int (*open)(struct atm_vcc *vcc); @@ -190,10 +185,6 @@ struct atmdev_ops { /* only send is required */ int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, void __user *arg); #endif - int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen); - int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, - void __user *optval,unsigned int optlen); int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); void (*phy_put)(struct atm_dev *dev,unsigned char value, diff --git a/net/atm/common.c b/net/atm/common.c index 8575f5d52087..9b28f1fb3c69 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -782,13 +782,8 @@ int vcc_setsockopt(struct socket *sock, int level, int optname, vcc->atm_options &= ~ATM_ATMOPT_CLP; return 0; default: - if (level == SOL_SOCKET) - return -EINVAL; - break; - } - if (!vcc->dev || !vcc->dev->ops->setsockopt) return -EINVAL; - return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen); + } } int vcc_getsockopt(struct socket *sock, int level, int optname, @@ -826,13 +821,8 @@ int vcc_getsockopt(struct socket *sock, int level, int optname, return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0; } default: - if (level == SOL_SOCKET) - return -EINVAL; - break; - } - if (!vcc->dev || !vcc->dev->ops->getsockopt) return -EINVAL; - return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len); + } } int register_atmdevice_notifier(struct notifier_block *nb) -- cgit v1.2.3 From 4d295e54611509854a12c26f95a6f4430731d614 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:13 +0200 Subject: net: simplify cBPF setsockopt compat handling Add a helper that copies either a native or compat bpf_fprog from userspace after verifying the length, and remove the compat setsockopt handlers that now aren't required. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/filter.h | 4 ++-- include/net/compat.h | 1 - net/compat.c | 45 +-------------------------------------------- net/core/filter.c | 23 +++++++++++++++++++++++ net/core/sock.c | 30 ++++++++++-------------------- net/packet/af_packet.c | 33 ++++----------------------------- 6 files changed, 40 insertions(+), 96 deletions(-) (limited to 'include') diff --git a/include/linux/filter.h b/include/linux/filter.h index 0b0144752d78..4d049c8e1fbe 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -502,13 +502,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) offsetof(TYPE, MEMBER); \ }) -#ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { u16 len; compat_uptr_t filter; /* struct sock_filter * */ }; -#endif struct sock_fprog_kern { u16 len; @@ -1278,4 +1276,6 @@ struct bpf_sockopt_kern { s32 retval; }; +int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len); + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/net/compat.h b/include/net/compat.h index f241666117d8..745db0d605b6 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -61,7 +61,6 @@ int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg, compat_size_t *len); int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, struct sockaddr __user **, struct iovec **); -struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval); int put_cmsg_compat(struct msghdr*, int, int, int, void *); int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, diff --git a/net/compat.c b/net/compat.c index 5e3041a2c37d..3e6c2c5ff260 100644 --- a/net/compat.c +++ b/net/compat.c @@ -335,49 +335,6 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) __scm_destroy(scm); } -/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */ -struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval) -{ - struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; - struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); - struct compat_sock_fprog f32; - struct sock_fprog f; - - if (copy_from_user(&f32, fprog32, sizeof(*fprog32))) - return NULL; - memset(&f, 0, sizeof(f)); - f.len = f32.len; - f.filter = compat_ptr(f32.filter); - if (copy_to_user(kfprog, &f, sizeof(struct sock_fprog))) - return NULL; - - return kfprog; -} -EXPORT_SYMBOL_GPL(get_compat_bpf_fprog); - -static int do_set_attach_filter(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - struct sock_fprog __user *kfprog; - - kfprog = get_compat_bpf_fprog(optval); - if (!kfprog) - return -EFAULT; - - return sock_setsockopt(sock, level, optname, (char __user *)kfprog, - sizeof(struct sock_fprog)); -} - -static int compat_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (optname == SO_ATTACH_FILTER || - optname == SO_ATTACH_REUSEPORT_CBPF) - return do_set_attach_filter(sock, level, optname, - optval, optlen); - return sock_setsockopt(sock, level, optname, optval, optlen); -} - static int __compat_sys_setsockopt(int fd, int level, int optname, char __user *optval, unsigned int optlen) { @@ -396,7 +353,7 @@ static int __compat_sys_setsockopt(int fd, int level, int optname, } if (level == SOL_SOCKET) - err = compat_sock_setsockopt(sock, level, + err = sock_setsockopt(sock, level, optname, optval, optlen); else if (sock->ops->compat_setsockopt) err = sock->ops->compat_setsockopt(sock, level, diff --git a/net/core/filter.c b/net/core/filter.c index bdd2382e655d..2bf6624796d8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -77,6 +77,29 @@ #include #include +int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len) +{ + if (in_compat_syscall()) { + struct compat_sock_fprog f32; + + if (len != sizeof(f32)) + return -EINVAL; + if (copy_from_user(&f32, src, sizeof(f32))) + return -EFAULT; + memset(dst, 0, sizeof(*dst)); + dst->len = f32.len; + dst->filter = compat_ptr(f32.filter); + } else { + if (len != sizeof(*dst)) + return -EINVAL; + if (copy_from_user(dst, src, sizeof(*dst))) + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user); + /** * sk_filter_trim_cap - run a packet through a socket filter * @sk: sock associated with &sk_buff diff --git a/net/core/sock.c b/net/core/sock.c index 11d6f77dd562..e085df794825 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1059,19 +1059,14 @@ set_sndbuf: ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD); break; - case SO_ATTACH_FILTER: - ret = -EINVAL; - if (optlen == sizeof(struct sock_fprog)) { - struct sock_fprog fprog; - - ret = -EFAULT; - if (copy_from_user(&fprog, optval, sizeof(fprog))) - break; + case SO_ATTACH_FILTER: { + struct sock_fprog fprog; + ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); + if (!ret) ret = sk_attach_filter(&fprog, sk); - } break; - + } case SO_ATTACH_BPF: ret = -EINVAL; if (optlen == sizeof(u32)) { @@ -1085,19 +1080,14 @@ set_sndbuf: } break; - case SO_ATTACH_REUSEPORT_CBPF: - ret = -EINVAL; - if (optlen == sizeof(struct sock_fprog)) { - struct sock_fprog fprog; - - ret = -EFAULT; - if (copy_from_user(&fprog, optval, sizeof(fprog))) - break; + case SO_ATTACH_REUSEPORT_CBPF: { + struct sock_fprog fprog; + ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); + if (!ret) ret = sk_reuseport_attach_filter(&fprog, sk); - } break; - + } case SO_ATTACH_REUSEPORT_EBPF: ret = -EINVAL; if (optlen == sizeof(u32)) { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 781fee93b7d5..35aee9e98053 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1545,10 +1545,10 @@ static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; - if (len != sizeof(fprog)) - return -EINVAL; - if (copy_from_user(&fprog, data, len)) - return -EFAULT; + + ret = copy_bpf_fprog_from_user(&fprog, data, len); + if (ret) + return ret; ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); if (ret) @@ -4040,28 +4040,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, return 0; } - -#ifdef CONFIG_COMPAT -static int compat_packet_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - struct packet_sock *po = pkt_sk(sock->sk); - - if (level != SOL_PACKET) - return -ENOPROTOOPT; - - if (optname == PACKET_FANOUT_DATA && - po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { - optval = (char __user *)get_compat_bpf_fprog(optval); - if (!optval) - return -EFAULT; - optlen = sizeof(struct sock_fprog); - } - - return packet_setsockopt(sock, level, optname, optval, optlen); -} -#endif - static int packet_notifier(struct notifier_block *this, unsigned long msg, void *ptr) { @@ -4549,9 +4527,6 @@ static const struct proto_ops packet_ops = { .shutdown = sock_no_shutdown, .setsockopt = packet_setsockopt, .getsockopt = packet_getsockopt, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_packet_setsockopt, -#endif .sendmsg = packet_sendmsg, .recvmsg = packet_recvmsg, .mmap = packet_mmap, -- cgit v1.2.3 From 8c918ffbbad49454ed26c53eb1b90bf98bb5e394 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:14 +0200 Subject: net: remove compat_sock_common_{get,set}sockopt Add the compat handling to sock_common_{get,set}sockopt instead, keyed of in_compat_syscall(). This allow to remove the now unused ->compat_{get,set}sockopt methods from struct proto_ops. Signed-off-by: Christoph Hellwig Acked-by: Matthieu Baerts Acked-by: Stefan Schmidt Signed-off-by: David S. Miller --- include/linux/net.h | 6 ------ include/net/sock.h | 4 ---- net/core/sock.c | 30 ++++++------------------------ net/dccp/ipv4.c | 4 ---- net/dccp/ipv6.c | 2 -- net/ieee802154/socket.c | 8 -------- net/ipv4/af_inet.c | 6 ------ net/ipv6/af_inet6.c | 4 ---- net/ipv6/ipv6_sockglue.c | 12 ++---------- net/ipv6/raw.c | 2 -- net/l2tp/l2tp_ip.c | 4 ---- net/l2tp/l2tp_ip6.c | 2 -- net/mptcp/protocol.c | 6 ------ net/phonet/socket.c | 8 -------- net/sctp/ipv6.c | 2 -- net/sctp/protocol.c | 4 ---- 16 files changed, 8 insertions(+), 96 deletions(-) (limited to 'include') diff --git a/include/linux/net.h b/include/linux/net.h index 016a9c5faa34..858ff1d98154 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -165,12 +165,6 @@ struct proto_ops { int optname, char __user *optval, unsigned int optlen); int (*getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -#ifdef CONFIG_COMPAT - int (*compat_setsockopt)(struct socket *sock, int level, - int optname, char __user *optval, unsigned int optlen); - int (*compat_getsockopt)(struct socket *sock, int level, - int optname, char __user *optval, int __user *optlen); -#endif void (*show_fdinfo)(struct seq_file *m, struct socket *sock); int (*sendmsg) (struct socket *sock, struct msghdr *m, size_t total_len); diff --git a/include/net/sock.h b/include/net/sock.h index 4bf884165148..1fd7cf5fc751 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1744,10 +1744,6 @@ int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); int sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); -int compat_sock_common_getsockopt(struct socket *sock, int level, - int optname, char __user *optval, int __user *optlen); -int compat_sock_common_setsockopt(struct socket *sock, int level, - int optname, char __user *optval, unsigned int optlen); void sk_common_release(struct sock *sk); diff --git a/net/core/sock.c b/net/core/sock.c index e085df794825..018404d17626 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3199,23 +3199,14 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; - return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); -} -EXPORT_SYMBOL(sock_common_getsockopt); - #ifdef CONFIG_COMPAT -int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) -{ - struct sock *sk = sock->sk; - - if (sk->sk_prot->compat_getsockopt != NULL) + if (in_compat_syscal() && sk->sk_prot->compat_getsockopt) return sk->sk_prot->compat_getsockopt(sk, level, optname, optval, optlen); +#endif return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); } -EXPORT_SYMBOL(compat_sock_common_getsockopt); -#endif +EXPORT_SYMBOL(sock_common_getsockopt); int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) @@ -3240,23 +3231,14 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; - return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); -} -EXPORT_SYMBOL(sock_common_setsockopt); - #ifdef CONFIG_COMPAT -int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - struct sock *sk = sock->sk; - - if (sk->sk_prot->compat_setsockopt != NULL) + if (in_compat_syscall() && sk->sk_prot->compat_setsockopt) return sk->sk_prot->compat_setsockopt(sk, level, optname, optval, optlen); +#endif return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); } -EXPORT_SYMBOL(compat_sock_common_setsockopt); -#endif +EXPORT_SYMBOL(sock_common_setsockopt); void sk_common_release(struct sock *sk) { diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index a7e989919c53..316cc5ac0da7 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -999,10 +999,6 @@ static const struct proto_ops inet_dccp_ops = { .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; static struct inet_protosw dccp_v4_protosw = { diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 650187d68851..b50f85a72cd5 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1083,8 +1083,6 @@ static const struct proto_ops inet6_dccp_ops = { .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index d93d4531aa9b..94ae9662133e 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -423,10 +423,6 @@ static const struct proto_ops ieee802154_raw_ops = { .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; /* DGRAM Sockets (802.15.4 dataframes) */ @@ -986,10 +982,6 @@ static const struct proto_ops ieee802154_dgram_ops = { .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; /* Create a socket. Initialise the socket, blank the addresses diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ff141d630bdf..4307503a6f0b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1043,8 +1043,6 @@ const struct proto_ops inet_stream_ops = { .sendpage_locked = tcp_sendpage_locked, .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, .compat_ioctl = inet_compat_ioctl, #endif .set_rcvlowat = tcp_set_rcvlowat, @@ -1073,8 +1071,6 @@ const struct proto_ops inet_dgram_ops = { .sendpage = inet_sendpage, .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, .compat_ioctl = inet_compat_ioctl, #endif }; @@ -1105,8 +1101,6 @@ static const struct proto_ops inet_sockraw_ops = { .mmap = sock_no_mmap, .sendpage = inet_sendpage, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, .compat_ioctl = inet_compat_ioctl, #endif }; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index b304b882e031..0306509ab063 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -688,8 +688,6 @@ const struct proto_ops inet6_stream_ops = { .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif .set_rcvlowat = tcp_set_rcvlowat, }; @@ -717,8 +715,6 @@ const struct proto_ops inet6_dgram_ops = { .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 20576e87a5f7..6ab44ec2c369 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -914,12 +914,8 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, { int err; - if (level == SOL_IP && sk->sk_type != SOCK_RAW) { - if (udp_prot.compat_setsockopt != NULL) - return udp_prot.compat_setsockopt(sk, level, optname, - optval, optlen); + if (level == SOL_IP && sk->sk_type != SOCK_RAW) return udp_prot.setsockopt(sk, level, optname, optval, optlen); - } if (level != SOL_IPV6) return -ENOPROTOOPT; @@ -1480,12 +1476,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, { int err; - if (level == SOL_IP && sk->sk_type != SOCK_RAW) { - if (udp_prot.compat_getsockopt != NULL) - return udp_prot.compat_getsockopt(sk, level, optname, - optval, optlen); + if (level == SOL_IP && sk->sk_type != SOCK_RAW) return udp_prot.getsockopt(sk, level, optname, optval, optlen); - } if (level != SOL_IPV6) return -ENOPROTOOPT; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 8ef5a7b30524..e23c6b461758 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1378,8 +1378,6 @@ const struct proto_ops inet6_sockraw_ops = { .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 955662a6dee7..f8d7412cfb3d 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -638,10 +638,6 @@ static const struct proto_ops l2tp_ip_ops = { .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; static struct inet_protosw l2tp_ip_protosw = { diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 526ed2c24dd5..2cdc0b7a7a43 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -773,8 +773,6 @@ static const struct proto_ops l2tp_ip6_ops = { .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index dbe43e0cd734..f0b0b503c262 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2166,10 +2166,6 @@ static const struct proto_ops mptcp_stream_ops = { .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .sendpage = inet_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; static struct inet_protosw mptcp_protosw = { @@ -2222,8 +2218,6 @@ static const struct proto_ops mptcp_v6_stream_ops = { .sendpage = inet_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 76d499f6af9a..87c60f83c180 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -441,10 +441,6 @@ const struct proto_ops phonet_dgram_ops = { .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, -#ifdef CONFIG_COMPAT - .compat_setsockopt = sock_no_setsockopt, - .compat_getsockopt = sock_no_getsockopt, -#endif .sendmsg = pn_socket_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, @@ -466,10 +462,6 @@ const struct proto_ops phonet_stream_ops = { .shutdown = sock_no_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif .sendmsg = pn_socket_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ccfa0ab3e7f4..ebda31b7747d 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -1033,8 +1033,6 @@ static const struct proto_ops inet6_seqpacket_ops = { .mmap = sock_no_mmap, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index cde29f3c7fb3..8d25cc464efd 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1036,10 +1036,6 @@ static const struct proto_ops inet_seqpacket_ops = { .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; /* Registration with AF_INET family. */ -- cgit v1.2.3 From 55db9c0e853421fa71cac5e6855898601f78a1f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:15 +0200 Subject: net: remove compat_sys_{get,set}sockopt Now that the ->compat_{get,set}sockopt proto_ops methods are gone there is no good reason left to keep the compat syscalls separate. This fixes the odd use of unsigned int for the compat_setsockopt optlen and the missing sock_use_custom_sol_socket. It would also easily allow running the eBPF hooks for the compat syscalls, but such a large change in behavior does not belong into a consolidation patch like this one. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- arch/arm64/include/asm/unistd32.h | 4 +- arch/mips/kernel/syscalls/syscall_n32.tbl | 4 +- arch/mips/kernel/syscalls/syscall_o32.tbl | 4 +- arch/parisc/kernel/syscalls/syscall.tbl | 4 +- arch/powerpc/kernel/syscalls/syscall.tbl | 4 +- arch/s390/kernel/syscalls/syscall.tbl | 4 +- arch/sparc/kernel/sys32.S | 12 ++-- arch/sparc/kernel/syscalls/syscall.tbl | 4 +- arch/x86/entry/syscall_x32.c | 7 ++ arch/x86/entry/syscalls/syscall_32.tbl | 4 +- arch/x86/entry/syscalls/syscall_64.tbl | 4 +- include/linux/compat.h | 4 -- include/linux/syscalls.h | 4 ++ include/uapi/asm-generic/unistd.h | 4 +- net/compat.c | 79 +--------------------- net/socket.c | 25 ++++--- tools/include/uapi/asm-generic/unistd.h | 4 +- tools/perf/arch/powerpc/entry/syscalls/syscall.tbl | 4 +- tools/perf/arch/s390/entry/syscalls/syscall.tbl | 4 +- tools/perf/arch/x86/entry/syscalls/syscall_64.tbl | 4 +- 20 files changed, 62 insertions(+), 125 deletions(-) (limited to 'include') diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 6d95d0c8bf2f..166e36903110 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -599,9 +599,9 @@ __SYSCALL(__NR_recvfrom, compat_sys_recvfrom) #define __NR_shutdown 293 __SYSCALL(__NR_shutdown, sys_shutdown) #define __NR_setsockopt 294 -__SYSCALL(__NR_setsockopt, compat_sys_setsockopt) +__SYSCALL(__NR_setsockopt, sys_setsockopt) #define __NR_getsockopt 295 -__SYSCALL(__NR_getsockopt, compat_sys_getsockopt) +__SYSCALL(__NR_getsockopt, sys_getsockopt) #define __NR_sendmsg 296 __SYSCALL(__NR_sendmsg, compat_sys_sendmsg) #define __NR_recvmsg 297 diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index f777141f5256..8488b0d0a99e 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -60,8 +60,8 @@ 50 n32 getsockname sys_getsockname 51 n32 getpeername sys_getpeername 52 n32 socketpair sys_socketpair -53 n32 setsockopt compat_sys_setsockopt -54 n32 getsockopt compat_sys_getsockopt +53 n32 setsockopt sys_setsockopt +54 n32 getsockopt sys_getsockopt 55 n32 clone __sys_clone 56 n32 fork __sys_fork 57 n32 execve compat_sys_execve diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 13280625d312..b20522f813f9 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -184,7 +184,7 @@ 170 o32 connect sys_connect 171 o32 getpeername sys_getpeername 172 o32 getsockname sys_getsockname -173 o32 getsockopt sys_getsockopt compat_sys_getsockopt +173 o32 getsockopt sys_getsockopt sys_getsockopt 174 o32 listen sys_listen 175 o32 recv sys_recv compat_sys_recv 176 o32 recvfrom sys_recvfrom compat_sys_recvfrom @@ -192,7 +192,7 @@ 178 o32 send sys_send 179 o32 sendmsg sys_sendmsg compat_sys_sendmsg 180 o32 sendto sys_sendto -181 o32 setsockopt sys_setsockopt compat_sys_setsockopt +181 o32 setsockopt sys_setsockopt sys_setsockopt 182 o32 shutdown sys_shutdown 183 o32 socket sys_socket 184 o32 socketpair sys_socketpair diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 5a758fa6ec52..3494e4fa1a17 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -198,8 +198,8 @@ 178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 180 common chown sys_chown -181 common setsockopt sys_setsockopt compat_sys_setsockopt -182 common getsockopt sys_getsockopt compat_sys_getsockopt +181 common setsockopt sys_setsockopt sys_setsockopt +182 common getsockopt sys_getsockopt sys_getsockopt 183 common sendmsg sys_sendmsg compat_sys_sendmsg 184 common recvmsg sys_recvmsg compat_sys_recvmsg 185 common semop sys_semop diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index f833a3190822..94eb5b27ef65 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -433,8 +433,8 @@ 336 common recv sys_recv compat_sys_recv 337 common recvfrom sys_recvfrom compat_sys_recvfrom 338 common shutdown sys_shutdown -339 common setsockopt sys_setsockopt compat_sys_setsockopt -340 common getsockopt sys_getsockopt compat_sys_getsockopt +339 common setsockopt sys_setsockopt sys_setsockopt +340 common getsockopt sys_getsockopt sys_getsockopt 341 common sendmsg sys_sendmsg compat_sys_sendmsg 342 common recvmsg sys_recvmsg compat_sys_recvmsg 343 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index bfdcb7633957..0d63c71fc544 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -372,8 +372,8 @@ 362 common connect sys_connect sys_connect 363 common listen sys_listen sys_listen 364 common accept4 sys_accept4 sys_accept4 -365 common getsockopt sys_getsockopt compat_sys_getsockopt -366 common setsockopt sys_setsockopt compat_sys_setsockopt +365 common getsockopt sys_getsockopt sys_getsockopt +366 common setsockopt sys_setsockopt sys_setsockopt 367 common getsockname sys_getsockname sys_getsockname 368 common getpeername sys_getpeername sys_getpeername 369 common sendto sys_sendto sys_sendto diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index 489ffab918a8..a45f0f31fe51 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -157,22 +157,22 @@ do_sys_shutdown: /* sys_shutdown(int, int) */ nop nop nop -do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ +do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */ 47: ldswa [%o1 + 0x0] %asi, %o0 - sethi %hi(compat_sys_setsockopt), %g1 + sethi %hi(sys_setsockopt), %g1 48: ldswa [%o1 + 0x8] %asi, %o2 49: lduwa [%o1 + 0xc] %asi, %o3 50: ldswa [%o1 + 0x10] %asi, %o4 - jmpl %g1 + %lo(compat_sys_setsockopt), %g0 + jmpl %g1 + %lo(sys_setsockopt), %g0 51: ldswa [%o1 + 0x4] %asi, %o1 nop -do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ +do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */ 52: ldswa [%o1 + 0x0] %asi, %o0 - sethi %hi(compat_sys_getsockopt), %g1 + sethi %hi(sys_getsockopt), %g1 53: ldswa [%o1 + 0x8] %asi, %o2 54: lduwa [%o1 + 0xc] %asi, %o3 55: lduwa [%o1 + 0x10] %asi, %o4 - jmpl %g1 + %lo(compat_sys_getsockopt), %g0 + jmpl %g1 + %lo(sys_getsockopt), %g0 56: ldswa [%o1 + 0x4] %asi, %o1 nop do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 8004a276cb74..c59b37965add 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -147,7 +147,7 @@ 115 32 getgroups32 sys_getgroups 116 common gettimeofday sys_gettimeofday compat_sys_gettimeofday 117 common getrusage sys_getrusage compat_sys_getrusage -118 common getsockopt sys_getsockopt compat_sys_getsockopt +118 common getsockopt sys_getsockopt sys_getsockopt 119 common getcwd sys_getcwd 120 common readv sys_readv compat_sys_readv 121 common writev sys_writev compat_sys_writev @@ -425,7 +425,7 @@ 352 common userfaultfd sys_userfaultfd 353 common bind sys_bind 354 common listen sys_listen -355 common setsockopt sys_setsockopt compat_sys_setsockopt +355 common setsockopt sys_setsockopt sys_setsockopt 356 common mlock2 sys_mlock2 357 common copy_file_range sys_copy_file_range 358 common preadv2 sys_preadv2 compat_sys_preadv2 diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c index 3d8d70d3896c..1583831f61a9 100644 --- a/arch/x86/entry/syscall_x32.c +++ b/arch/x86/entry/syscall_x32.c @@ -8,6 +8,13 @@ #include #include +/* + * Reuse the 64-bit entry points for the x32 versions that occupy different + * slots in the syscall table. + */ +#define __x32_sys_getsockopt __x64_sys_getsockopt +#define __x32_sys_setsockopt __x64_sys_setsockopt + #define __SYSCALL_64(nr, sym) #define __SYSCALL_X32(nr, sym) extern long __x32_##sym(const struct pt_regs *); diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index d8f8a1a69ed1..43742a69dba1 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -376,8 +376,8 @@ 362 i386 connect sys_connect 363 i386 listen sys_listen 364 i386 accept4 sys_accept4 -365 i386 getsockopt sys_getsockopt compat_sys_getsockopt -366 i386 setsockopt sys_setsockopt compat_sys_setsockopt +365 i386 getsockopt sys_getsockopt sys_getsockopt +366 i386 setsockopt sys_setsockopt sys_setsockopt 367 i386 getsockname sys_getsockname 368 i386 getpeername sys_getpeername 369 i386 sendto sys_sendto diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 78847b32e137..e008d638e641 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -396,8 +396,8 @@ 538 x32 sendmmsg compat_sys_sendmmsg 539 x32 process_vm_readv compat_sys_process_vm_readv 540 x32 process_vm_writev compat_sys_process_vm_writev -541 x32 setsockopt compat_sys_setsockopt -542 x32 getsockopt compat_sys_getsockopt +541 x32 setsockopt sys_setsockopt +542 x32 getsockopt sys_getsockopt 543 x32 io_setup compat_sys_io_setup 544 x32 io_submit compat_sys_io_submit 545 x32 execveat compat_sys_execveat diff --git a/include/linux/compat.h b/include/linux/compat.h index e90100c0de72..c4255d8a4a8a 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -737,10 +737,6 @@ asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, unsigned flags, struct sockaddr __user *addr, int __user *addrlen); -asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, - char __user *optval, unsigned int optlen); -asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, - char __user *optval, int __user *optlen); asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags); asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index b951a87da987..aa46825c6f9d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1424,4 +1424,8 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, unsigned int nsops, const struct old_timespec32 __user *timeout); +int __sys_getsockopt(int fd, int level, int optname, char __user *optval, + int __user *optlen); +int __sys_setsockopt(int fd, int level, int optname, char __user *optval, + int optlen); #endif diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index f4a01305d9a6..c8c189a5f0a6 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -606,9 +606,9 @@ __SYSCALL(__NR_sendto, sys_sendto) #define __NR_recvfrom 207 __SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom) #define __NR_setsockopt 208 -__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt) +__SC_COMP(__NR_setsockopt, sys_setsockopt, sys_setsockopt) #define __NR_getsockopt 209 -__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt) +__SC_COMP(__NR_getsockopt, sys_getsockopt, sys_getsockopt) #define __NR_shutdown 210 __SYSCALL(__NR_shutdown, sys_shutdown) #define __NR_sendmsg 211 diff --git a/net/compat.c b/net/compat.c index 3e6c2c5ff260..091875bd6210 100644 --- a/net/compat.c +++ b/net/compat.c @@ -335,77 +335,6 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) __scm_destroy(scm); } -static int __compat_sys_setsockopt(int fd, int level, int optname, - char __user *optval, unsigned int optlen) -{ - int err; - struct socket *sock; - - if (optlen > INT_MAX) - return -EINVAL; - - sock = sockfd_lookup(fd, &err); - if (sock) { - err = security_socket_setsockopt(sock, level, optname); - if (err) { - sockfd_put(sock); - return err; - } - - if (level == SOL_SOCKET) - err = sock_setsockopt(sock, level, - optname, optval, optlen); - else if (sock->ops->compat_setsockopt) - err = sock->ops->compat_setsockopt(sock, level, - optname, optval, optlen); - else - err = sock->ops->setsockopt(sock, level, - optname, optval, optlen); - sockfd_put(sock); - } - return err; -} - -COMPAT_SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, - char __user *, optval, unsigned int, optlen) -{ - return __compat_sys_setsockopt(fd, level, optname, optval, optlen); -} - -static int __compat_sys_getsockopt(int fd, int level, int optname, - char __user *optval, - int __user *optlen) -{ - int err; - struct socket *sock = sockfd_lookup(fd, &err); - - if (sock) { - err = security_socket_getsockopt(sock, level, optname); - if (err) { - sockfd_put(sock); - return err; - } - - if (level == SOL_SOCKET) - err = sock_getsockopt(sock, level, - optname, optval, optlen); - else if (sock->ops->compat_getsockopt) - err = sock->ops->compat_getsockopt(sock, level, - optname, optval, optlen); - else - err = sock->ops->getsockopt(sock, level, - optname, optval, optlen); - sockfd_put(sock); - } - return err; -} - -COMPAT_SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, - char __user *, optval, int __user *, optlen) -{ - return __compat_sys_getsockopt(fd, level, optname, optval, optlen); -} - /* Argument list sizes for compat_sys_socketcall */ #define AL(x) ((x) * sizeof(u32)) static unsigned char nas[21] = { @@ -565,13 +494,11 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args) ret = __sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: - ret = __compat_sys_setsockopt(a0, a1, a[2], - compat_ptr(a[3]), a[4]); + ret = __sys_setsockopt(a0, a1, a[2], compat_ptr(a[3]), a[4]); break; case SYS_GETSOCKOPT: - ret = __compat_sys_getsockopt(a0, a1, a[2], - compat_ptr(a[3]), - compat_ptr(a[4])); + ret = __sys_getsockopt(a0, a1, a[2], compat_ptr(a[3]), + compat_ptr(a[4])); break; case SYS_SENDMSG: ret = __compat_sys_sendmsg(a0, compat_ptr(a1), a[2]); diff --git a/net/socket.c b/net/socket.c index b79376b17b45..dec345982abb 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2094,9 +2094,8 @@ static bool sock_use_custom_sol_socket(const struct socket *sock) * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ - -static int __sys_setsockopt(int fd, int level, int optname, - char __user *optval, int optlen) +int __sys_setsockopt(int fd, int level, int optname, char __user *optval, + int optlen) { mm_segment_t oldfs = get_fs(); char *kernel_optval = NULL; @@ -2114,8 +2113,10 @@ static int __sys_setsockopt(int fd, int level, int optname, if (err) goto out_put; - err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname, - optval, &optlen, &kernel_optval); + if (!in_compat_syscall()) + err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname, + optval, &optlen, + &kernel_optval); if (err < 0) goto out_put; if (err > 0) { @@ -2154,9 +2155,8 @@ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ - -static int __sys_getsockopt(int fd, int level, int optname, - char __user *optval, int __user *optlen) +int __sys_getsockopt(int fd, int level, int optname, char __user *optval, + int __user *optlen) { int err, fput_needed; struct socket *sock; @@ -2170,7 +2170,8 @@ static int __sys_getsockopt(int fd, int level, int optname, if (err) goto out_put; - max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen); + if (!in_compat_syscall()) + max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); @@ -2178,8 +2179,10 @@ static int __sys_getsockopt(int fd, int level, int optname, err = sock->ops->getsockopt(sock, level, optname, optval, optlen); - err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname, optval, - optlen, max_optlen, err); + if (!in_compat_syscall()) + err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname, + optval, optlen, max_optlen, + err); out_put: fput_light(sock->file, fput_needed); return err; diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index f4a01305d9a6..c8c189a5f0a6 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -606,9 +606,9 @@ __SYSCALL(__NR_sendto, sys_sendto) #define __NR_recvfrom 207 __SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom) #define __NR_setsockopt 208 -__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt) +__SC_COMP(__NR_setsockopt, sys_setsockopt, sys_setsockopt) #define __NR_getsockopt 209 -__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt) +__SC_COMP(__NR_getsockopt, sys_getsockopt, sys_getsockopt) #define __NR_shutdown 210 __SYSCALL(__NR_shutdown, sys_shutdown) #define __NR_sendmsg 211 diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 35b61bfc1b1a..b190f2eb2611 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -427,8 +427,8 @@ 336 common recv sys_recv compat_sys_recv 337 common recvfrom sys_recvfrom compat_sys_recvfrom 338 common shutdown sys_shutdown -339 common setsockopt sys_setsockopt compat_sys_setsockopt -340 common getsockopt sys_getsockopt compat_sys_getsockopt +339 common setsockopt sys_setsockopt sys_setsockopt +340 common getsockopt sys_getsockopt sys_getsockopt 341 common sendmsg sys_sendmsg compat_sys_sendmsg 342 common recvmsg sys_recvmsg compat_sys_recvmsg 343 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index b38d48464368..56ae24b6e4be 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -372,8 +372,8 @@ 362 common connect sys_connect compat_sys_connect 363 common listen sys_listen sys_listen 364 common accept4 sys_accept4 compat_sys_accept4 -365 common getsockopt sys_getsockopt compat_sys_getsockopt -366 common setsockopt sys_setsockopt compat_sys_setsockopt +365 common getsockopt sys_getsockopt sys_getsockopt +366 common setsockopt sys_setsockopt sys_setsockopt 367 common getsockname sys_getsockname compat_sys_getsockname 368 common getpeername sys_getpeername compat_sys_getpeername 369 common sendto sys_sendto compat_sys_sendto diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 78847b32e137..e008d638e641 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -396,8 +396,8 @@ 538 x32 sendmmsg compat_sys_sendmmsg 539 x32 process_vm_readv compat_sys_process_vm_readv 540 x32 process_vm_writev compat_sys_process_vm_writev -541 x32 setsockopt compat_sys_setsockopt -542 x32 getsockopt compat_sys_getsockopt +541 x32 setsockopt sys_setsockopt +542 x32 getsockopt sys_getsockopt 543 x32 io_setup compat_sys_io_setup 544 x32 io_submit compat_sys_io_submit 545 x32 execveat compat_sys_execveat -- cgit v1.2.3 From 77d4df41d53e5c2af14db26f20fe50da52e382ba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:20 +0200 Subject: netfilter: remove the compat_{get,set} methods All instances handle compat sockopts via in_compat_syscall() now, so remove the compat_{get,set} methods as well as the compat_nf_{get,set}sockopt wrappers. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/netfilter.h | 14 -------------- net/ipv4/ip_sockglue.c | 5 ++--- net/ipv6/ipv6_sockglue.c | 5 ++--- net/netfilter/nf_sockopt.c | 42 ------------------------------------------ 4 files changed, 4 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index eb312e7ca36e..711b4d4486f0 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -164,17 +164,9 @@ struct nf_sockopt_ops { int set_optmin; int set_optmax; int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); -#ifdef CONFIG_COMPAT - int (*compat_set)(struct sock *sk, int optval, - void __user *user, unsigned int len); -#endif int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); -#ifdef CONFIG_COMPAT - int (*compat_get)(struct sock *sk, int optval, - void __user *user, int *len); -#endif /* Use the module struct to lock set/get code in place */ struct module *owner; }; @@ -350,12 +342,6 @@ int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); -#ifdef CONFIG_COMPAT -int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, - char __user *opt, unsigned int len); -int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, - char __user *opt, int *len); -#endif struct flowi; struct nf_queue_entry; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 84ec3703c909..95f4248c6fc5 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1446,8 +1446,7 @@ mc_msf_out: optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) - err = compat_nf_setsockopt(sk, PF_INET, optname, optval, - optlen); + err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); #endif return err; } @@ -1821,7 +1820,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, if (get_user(len, optlen)) return -EFAULT; - err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); + err = nf_getsockopt(sk, PF_INET, optname, optval, &len); if (err >= 0) err = put_user(len, optlen); return err; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 6ab44ec2c369..6adfbdcb7979 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -1030,8 +1030,7 @@ mc_msf_out: /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && optname != IPV6_XFRM_POLICY) - err = compat_nf_setsockopt(sk, PF_INET6, optname, optval, - optlen); + err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); #endif return err; } @@ -1531,7 +1530,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, if (get_user(len, optlen)) return -EFAULT; - err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len); + err = nf_getsockopt(sk, PF_INET6, optname, optval, &len); if (err >= 0) err = put_user(len, optlen); } diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index 46cb3786e0ec..02870993d335 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c @@ -122,45 +122,3 @@ int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, return nf_sockopt(sk, pf, val, opt, len, 1); } EXPORT_SYMBOL(nf_getsockopt); - -#ifdef CONFIG_COMPAT -static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val, - char __user *opt, int *len, int get) -{ - struct nf_sockopt_ops *ops; - int ret; - - ops = nf_sockopt_find(sk, pf, val, get); - if (IS_ERR(ops)) - return PTR_ERR(ops); - - if (get) { - if (ops->compat_get) - ret = ops->compat_get(sk, val, opt, len); - else - ret = ops->get(sk, val, opt, len); - } else { - if (ops->compat_set) - ret = ops->compat_set(sk, val, opt, *len); - else - ret = ops->set(sk, val, opt, *len); - } - - module_put(ops->owner); - return ret; -} - -int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, - int val, char __user *opt, unsigned int len) -{ - return compat_nf_sockopt(sk, pf, val, opt, &len, 0); -} -EXPORT_SYMBOL(compat_nf_setsockopt); - -int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, - int val, char __user *opt, int *len) -{ - return compat_nf_sockopt(sk, pf, val, opt, len, 1); -} -EXPORT_SYMBOL(compat_nf_getsockopt); -#endif -- cgit v1.2.3 From c34bc10d2535719ddf77d44ee849f6c7589583ba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:21 +0200 Subject: netfilter: remove the compat argument to xt_copy_counters_from_user Lift the in_compat_syscall() from the callers instead. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/netfilter/x_tables.h | 2 +- net/ipv4/netfilter/arp_tables.c | 3 +-- net/ipv4/netfilter/ip_tables.c | 3 +-- net/ipv6/netfilter/ip6_tables.c | 3 +-- net/netfilter/x_tables.c | 9 ++++----- 5 files changed, 8 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5da88451853b..b8b943ee7b8b 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -302,7 +302,7 @@ int xt_data_to_user(void __user *dst, const void *src, int usersize, int size, int aligned_size); void *xt_copy_counters_from_user(const void __user *user, unsigned int len, - struct xt_counters_info *info, bool compat); + struct xt_counters_info *info); struct xt_counters *xt_counters_alloc(unsigned int counters); struct xt_table *xt_register_table(struct net *net, diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 15807fb4a65f..2c8a4dad39d7 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1008,8 +1008,7 @@ static int do_add_counters(struct net *net, const void __user *user, struct arpt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp, - in_compat_syscall()); + paddc = xt_copy_counters_from_user(user, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index fbfad38f3979..161901dd1cae 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1163,8 +1163,7 @@ do_add_counters(struct net *net, const void __user *user, struct ipt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp, - in_compat_syscall()); + paddc = xt_copy_counters_from_user(user, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 96c48e91e6c7..fd1f8f931231 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1179,8 +1179,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len) struct ip6t_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp, - in_compat_syscall()); + paddc = xt_copy_counters_from_user(user, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); t = xt_find_table_lock(net, AF_INET6, tmp.name); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 99a468be4a59..32bab45af7e4 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1033,15 +1033,14 @@ EXPORT_SYMBOL_GPL(xt_check_target); * @user: src pointer to userspace memory * @len: alleged size of userspace memory * @info: where to store the xt_counters_info metadata - * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel * * Copies counter meta data from @user and stores it in @info. * * vmallocs memory to hold the counters, then copies the counter data * from @user to the new memory and returns a pointer to it. * - * If @compat is true, @info gets converted automatically to the 64bit - * representation. + * If called from a compat syscall, @info gets converted automatically to the + * 64bit representation. * * The metadata associated with the counters is stored in @info. * @@ -1049,13 +1048,13 @@ EXPORT_SYMBOL_GPL(xt_check_target); * If IS_ERR is false, caller has to vfree the pointer. */ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, - struct xt_counters_info *info, bool compat) + struct xt_counters_info *info) { void *mem; u64 size; #ifdef CONFIG_COMPAT - if (compat) { + if (in_compat_syscall()) { /* structures only differ in size due to alignment */ struct compat_xt_counters_info compat_tmp; -- cgit v1.2.3 From b6238c04c0e5dbe7ae4ea48e96e004905b120a04 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:26 +0200 Subject: net/ipv4: remove compat_ip_{get,set}sockopt Handle the few cases that need special treatment in-line using in_compat_syscall(). Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/ip.h | 4 - net/dccp/ipv4.c | 4 - net/ipv4/ip_sockglue.c | 214 ++++++++++++++----------------------------------- net/ipv4/raw.c | 22 ----- net/ipv4/tcp_ipv4.c | 4 - net/ipv4/udp.c | 24 ------ net/ipv4/udp_impl.h | 6 -- net/ipv4/udplite.c | 4 - net/l2tp/l2tp_ip.c | 4 - net/sctp/protocol.c | 4 - 10 files changed, 61 insertions(+), 229 deletions(-) (limited to 'include') diff --git a/include/net/ip.h b/include/net/ip.h index 862c9545833a..3d34acc95ca8 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -727,10 +727,6 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -int compat_ip_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -int compat_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 316cc5ac0da7..b91373eb1c79 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -913,10 +913,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ip_setsockopt, - .compat_getsockopt = compat_ip_getsockopt, -#endif }; static int dccp_v4_init_sock(struct sock *sk) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 73bb88fbe546..86b3b9a7cea3 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -679,20 +679,48 @@ Eaddrnotavail: return -EADDRNOTAVAIL; } +static int copy_group_source_from_user(struct group_source_req *greqs, + void __user *optval, int optlen) +{ + if (in_compat_syscall()) { + struct compat_group_source_req gr32; + + if (optlen != sizeof(gr32)) + return -EINVAL; + if (copy_from_user(&gr32, optval, sizeof(gr32))) + return -EFAULT; + greqs->gsr_interface = gr32.gsr_interface; + greqs->gsr_group = gr32.gsr_group; + greqs->gsr_source = gr32.gsr_source; + } else { + if (optlen != sizeof(*greqs)) + return -EINVAL; + if (copy_from_user(greqs, optval, sizeof(*greqs))) + return -EFAULT; + } + + return 0; +} + static int do_mcast_group_source(struct sock *sk, int optname, - struct group_source_req *greqs) + void __user *optval, int optlen) { + struct group_source_req greqs; struct ip_mreq_source mreqs; struct sockaddr_in *psin; int omode, add, err; - if (greqs->gsr_group.ss_family != AF_INET || - greqs->gsr_source.ss_family != AF_INET) + err = copy_group_source_from_user(&greqs, optval, optlen); + if (err) + return err; + + if (greqs.gsr_group.ss_family != AF_INET || + greqs.gsr_source.ss_family != AF_INET) return -EADDRNOTAVAIL; - psin = (struct sockaddr_in *)&greqs->gsr_group; + psin = (struct sockaddr_in *)&greqs.gsr_group; mreqs.imr_multiaddr = psin->sin_addr.s_addr; - psin = (struct sockaddr_in *)&greqs->gsr_source; + psin = (struct sockaddr_in *)&greqs.gsr_source; mreqs.imr_sourceaddr = psin->sin_addr.s_addr; mreqs.imr_interface = 0; /* use index for mc_source */ @@ -705,21 +733,21 @@ static int do_mcast_group_source(struct sock *sk, int optname, } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct ip_mreqn mreq; - psin = (struct sockaddr_in *)&greqs->gsr_group; + psin = (struct sockaddr_in *)&greqs.gsr_group; mreq.imr_multiaddr = psin->sin_addr; mreq.imr_address.s_addr = 0; - mreq.imr_ifindex = greqs->gsr_interface; + mreq.imr_ifindex = greqs.gsr_interface; err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE); if (err && err != -EADDRINUSE) return err; - greqs->gsr_interface = mreq.imr_ifindex; + greqs.gsr_interface = mreq.imr_ifindex; omode = MCAST_INCLUDE; add = 1; } else /* MCAST_LEAVE_SOURCE_GROUP */ { omode = MCAST_INCLUDE; add = 0; } - return ip_mc_source(add, omode, sk, &mreqs, greqs->gsr_interface); + return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface); } static int ip_set_mcast_msfilter(struct sock *sk, void __user *optval, @@ -754,7 +782,6 @@ out_free_gsf: return err; } -#ifdef CONFIG_COMPAT static int compat_ip_set_mcast_msfilter(struct sock *sk, void __user *optval, int optlen) { @@ -788,23 +815,16 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, void __user *optval, if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) goto out_free_gsf; - rtnl_lock(); - lock_sock(sk); - /* numsrc >= (4G-140)/128 overflow in 32 bits */ err = -ENOBUFS; if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf) - goto out_unlock; + goto out_free_gsf; err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode, &gf32->gf_group, gf32->gf_slist); -out_unlock: - release_sock(sk); - rtnl_unlock(); out_free_gsf: kfree(p); return err; } -#endif static int ip_mcast_join_leave(struct sock *sk, int optname, void __user *optval, int optlen) @@ -828,14 +848,12 @@ static int ip_mcast_join_leave(struct sock *sk, int optname, return ip_mc_leave_group(sk, &mreq); } -#ifdef CONFIG_COMPAT static int compat_ip_mcast_join_leave(struct sock *sk, int optname, void __user *optval, int optlen) { struct compat_group_req greq; struct ip_mreqn mreq = { }; struct sockaddr_in *psin; - int err; if (optlen < sizeof(struct compat_group_req)) return -EINVAL; @@ -848,17 +866,10 @@ static int compat_ip_mcast_join_leave(struct sock *sk, int optname, mreq.imr_multiaddr = psin->sin_addr; mreq.imr_ifindex = greq.gr_interface; - rtnl_lock(); - lock_sock(sk); if (optname == MCAST_JOIN_GROUP) - err = ip_mc_join_group(sk, &mreq); - else - err = ip_mc_leave_group(sk, &mreq); - release_sock(sk); - rtnl_unlock(); - return err; + return ip_mc_join_group(sk, &mreq); + return ip_mc_leave_group(sk, &mreq); } -#endif static int do_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) @@ -1265,26 +1276,23 @@ static int do_ip_setsockopt(struct sock *sk, int level, } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: - err = ip_mcast_join_leave(sk, optname, optval, optlen); + if (in_compat_syscall()) + err = compat_ip_mcast_join_leave(sk, optname, optval, + optlen); + else + err = ip_mcast_join_leave(sk, optname, optval, optlen); break; case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: - { - struct group_source_req greqs; - - if (optlen != sizeof(struct group_source_req)) - goto e_inval; - if (copy_from_user(&greqs, optval, sizeof(greqs))) { - err = -EFAULT; - break; - } - err = do_mcast_group_source(sk, optname, &greqs); + err = do_mcast_group_source(sk, optname, optval, optlen); break; - } case MCAST_MSFILTER: - err = ip_set_mcast_msfilter(sk, optval, optlen); + if (in_compat_syscall()) + err = compat_ip_set_mcast_msfilter(sk, optval, optlen); + else + err = ip_set_mcast_msfilter(sk, optval, optlen); break; case IP_MULTICAST_ALL: if (optlen < 1) @@ -1410,62 +1418,6 @@ int ip_setsockopt(struct sock *sk, int level, } EXPORT_SYMBOL(ip_setsockopt); -#ifdef CONFIG_COMPAT -int compat_ip_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - int err; - - if (level != SOL_IP) - return -ENOPROTOOPT; - - switch (optname) { - case MCAST_JOIN_GROUP: - case MCAST_LEAVE_GROUP: - return compat_ip_mcast_join_leave(sk, optname, optval, optlen); - case MCAST_JOIN_SOURCE_GROUP: - case MCAST_LEAVE_SOURCE_GROUP: - case MCAST_BLOCK_SOURCE: - case MCAST_UNBLOCK_SOURCE: - { - struct compat_group_source_req __user *gsr32 = (void __user *)optval; - struct group_source_req greqs; - - if (optlen != sizeof(struct compat_group_source_req)) - return -EINVAL; - - if (get_user(greqs.gsr_interface, &gsr32->gsr_interface) || - copy_from_user(&greqs.gsr_group, &gsr32->gsr_group, - sizeof(greqs.gsr_group)) || - copy_from_user(&greqs.gsr_source, &gsr32->gsr_source, - sizeof(greqs.gsr_source))) - return -EFAULT; - - rtnl_lock(); - lock_sock(sk); - err = do_mcast_group_source(sk, optname, &greqs); - release_sock(sk); - rtnl_unlock(); - return err; - } - case MCAST_MSFILTER: - return compat_ip_set_mcast_msfilter(sk, optval, optlen); - } - - err = do_ip_setsockopt(sk, level, optname, optval, optlen); -#ifdef CONFIG_NETFILTER - /* we need to exclude all possible ENOPROTOOPTs except default case */ - if (err == -ENOPROTOOPT && optname != IP_HDRINCL && - optname != IP_IPSEC_POLICY && - optname != IP_XFRM_POLICY && - !ip_mroute_opt(optname)) - err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); -#endif - return err; -} -EXPORT_SYMBOL(compat_ip_setsockopt); -#endif - /* * Get the options. Note for future reference. The GET of IP options gets * the _received_ ones. The set sets the _sent_ ones. @@ -1507,22 +1459,18 @@ static int ip_get_mcast_msfilter(struct sock *sk, void __user *optval, return 0; } -#ifdef CONFIG_COMPAT static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, - int __user *optlen) + int __user *optlen, int len) { const int size0 = offsetof(struct compat_group_filter, gf_slist); struct compat_group_filter __user *p = optval; struct compat_group_filter gf32; struct group_filter gf; - int len, err; int num; + int err; - if (get_user(len, optlen)) - return -EFAULT; if (len < size0) return -EINVAL; - if (copy_from_user(&gf32, p, size0)) return -EFAULT; @@ -1531,11 +1479,7 @@ static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, num = gf.gf_numsrc = gf32.gf_numsrc; gf.gf_group = gf32.gf_group; - rtnl_lock(); - lock_sock(sk); err = ip_mc_gsfget(sk, &gf, p->gf_slist); - release_sock(sk); - rtnl_unlock(); if (err) return err; if (gf.gf_numsrc < num) @@ -1547,10 +1491,9 @@ static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, return -EFAULT; return 0; } -#endif static int do_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen, unsigned int flags) + char __user *optval, int __user *optlen) { struct inet_sock *inet = inet_sk(sk); bool needs_rtnl = getsockopt_needs_rtnl(optname); @@ -1707,7 +1650,11 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, goto out; } case MCAST_MSFILTER: - err = ip_get_mcast_msfilter(sk, optval, optlen, len); + if (in_compat_syscall()) + err = compat_ip_get_mcast_msfilter(sk, optval, optlen, + len); + else + err = ip_get_mcast_msfilter(sk, optval, optlen, len); goto out; case IP_MULTICAST_ALL: val = inet->mc_all; @@ -1724,7 +1671,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, msg.msg_control_is_user = true; msg.msg_control_user = optval; msg.msg_controllen = len; - msg.msg_flags = flags; + msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0; if (inet->cmsg_flags & IP_CMSG_PKTINFO) { struct in_pktinfo info; @@ -1788,45 +1735,7 @@ int ip_getsockopt(struct sock *sk, int level, { int err; - err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0); -#if IS_ENABLED(CONFIG_BPFILTER_UMH) - if (optname >= BPFILTER_IPT_SO_GET_INFO && - optname < BPFILTER_IPT_GET_MAX) - err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen); -#endif -#ifdef CONFIG_NETFILTER - /* we need to exclude all possible ENOPROTOOPTs except default case */ - if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && - !ip_mroute_opt(optname)) { - int len; - - if (get_user(len, optlen)) - return -EFAULT; - - err = nf_getsockopt(sk, PF_INET, optname, optval, &len); - if (err >= 0) - err = put_user(len, optlen); - return err; - } -#endif - return err; -} -EXPORT_SYMBOL(ip_getsockopt); - -#ifdef CONFIG_COMPAT -int compat_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - int err; - - if (optname == MCAST_MSFILTER) { - if (level != SOL_IP) - return -EOPNOTSUPP; - return compat_ip_get_mcast_msfilter(sk, optval, optlen); - } - - err = do_ip_getsockopt(sk, level, optname, optval, optlen, - MSG_CMSG_COMPAT); + err = do_ip_getsockopt(sk, level, optname, optval, optlen); #if IS_ENABLED(CONFIG_BPFILTER_UMH) if (optname >= BPFILTER_IPT_SO_GET_INFO && @@ -1850,5 +1759,4 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, #endif return err; } -EXPORT_SYMBOL(compat_ip_getsockopt); -#endif +EXPORT_SYMBOL(ip_getsockopt); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 47665919048f..2a57d633b31e 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -857,16 +857,6 @@ static int raw_setsockopt(struct sock *sk, int level, int optname, return do_raw_setsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -static int compat_raw_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level != SOL_RAW) - return compat_ip_setsockopt(sk, level, optname, optval, optlen); - return do_raw_setsockopt(sk, level, optname, optval, optlen); -} -#endif - static int do_raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -887,16 +877,6 @@ static int raw_getsockopt(struct sock *sk, int level, int optname, return do_raw_getsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -static int compat_raw_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level != SOL_RAW) - return compat_ip_getsockopt(sk, level, optname, optval, optlen); - return do_raw_getsockopt(sk, level, optname, optval, optlen); -} -#endif - static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { @@ -980,8 +960,6 @@ struct proto raw_prot = { .usersize = sizeof_field(struct raw_sock, filter), .h.raw_hash = &raw_v4_hashinfo, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_raw_setsockopt, - .compat_getsockopt = compat_raw_getsockopt, .compat_ioctl = compat_raw_ioctl, #endif .diag_destroy = raw_abort, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 116c11a0aaed..e5b7ef9a2887 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2134,10 +2134,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = { .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ip_setsockopt, - .compat_getsockopt = compat_ip_getsockopt, -#endif .mtu_reduced = tcp_v4_mtu_reduced, }; EXPORT_SYMBOL(ipv4_specific); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 073d346f515c..d4be4471c424 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2656,17 +2656,6 @@ int udp_setsockopt(struct sock *sk, int level, int optname, return ip_setsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -int compat_udp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_setsockopt(sk, level, optname, optval, optlen, - udp_push_pending_frames); - return compat_ip_setsockopt(sk, level, optname, optval, optlen); -} -#endif - int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -2732,15 +2721,6 @@ int udp_getsockopt(struct sock *sk, int level, int optname, return ip_getsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -int compat_udp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_getsockopt(sk, level, optname, optval, optlen); - return compat_ip_getsockopt(sk, level, optname, optval, optlen); -} -#endif /** * udp_poll - wait for a UDP event. * @file: - file struct @@ -2812,10 +2792,6 @@ struct proto udp_prot = { .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), .obj_size = sizeof(struct udp_sock), .h.udp_table = &udp_table, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_udp_setsockopt, - .compat_getsockopt = compat_udp_getsockopt, -#endif .diag_destroy = udp_abort, }; EXPORT_SYMBOL(udp_prot); diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 6b2fa77eeb1c..ab313702c87f 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -17,12 +17,6 @@ int udp_setsockopt(struct sock *sk, int level, int optname, int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -#ifdef CONFIG_COMPAT -int compat_udp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -int compat_udp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -#endif int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 5936d66d1ce2..bd8773b49e72 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -56,10 +56,6 @@ struct proto udplite_prot = { .sysctl_mem = sysctl_udp_mem, .obj_size = sizeof(struct udp_sock), .h.udp_table = &udplite_table, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_udp_setsockopt, - .compat_getsockopt = compat_udp_getsockopt, -#endif }; EXPORT_SYMBOL(udplite_prot); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index f8d7412cfb3d..2a3fd31fb589 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -612,10 +612,6 @@ static struct proto l2tp_ip_prot = { .hash = l2tp_ip_hash, .unhash = l2tp_ip_unhash, .obj_size = sizeof(struct l2tp_ip_sock), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ip_setsockopt, - .compat_getsockopt = compat_ip_getsockopt, -#endif }; static const struct proto_ops l2tp_ip_ops = { diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 8d25cc464efd..7ecaf7d575c0 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1089,10 +1089,6 @@ static struct sctp_af sctp_af_inet = { .net_header_len = sizeof(struct iphdr), .sockaddr_len = sizeof(struct sockaddr_in), .ip_options_len = sctp_v4_ip_options_len, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ip_setsockopt, - .compat_getsockopt = compat_ip_getsockopt, -#endif }; struct sctp_pf *sctp_get_pf_specific(sa_family_t family) -- cgit v1.2.3 From 3021ad529950d07e0408d65d0f1df00454c1d223 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:30 +0200 Subject: net/ipv6: remove compat_ipv6_{get,set}sockopt Handle the few cases that need special treatment in-line using in_compat_syscall(). This also removes all the now unused compat_{get,set}sockopt methods. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/inet_connection_sock.h | 13 --- include/net/ipv6.h | 4 - include/net/sctp/structs.h | 10 -- include/net/sock.h | 8 -- include/net/tcp.h | 4 - net/core/sock.c | 10 -- net/dccp/dccp.h | 6 -- net/dccp/ipv4.c | 4 - net/dccp/ipv6.c | 12 --- net/dccp/proto.c | 26 ------ net/ipv4/inet_connection_sock.c | 28 ------ net/ipv4/tcp.c | 24 ----- net/ipv4/tcp_ipv4.c | 4 - net/ipv6/ipv6_sockglue.c | 183 +++++++++++-------------------------- net/ipv6/raw.c | 50 ---------- net/ipv6/tcp_ipv6.c | 12 --- net/ipv6/udp.c | 25 ----- net/ipv6/udp_impl.h | 6 -- net/ipv6/udplite.c | 4 - net/l2tp/l2tp_ip6.c | 4 - net/sctp/ipv6.c | 4 - 21 files changed, 51 insertions(+), 390 deletions(-) (limited to 'include') diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index e5b388f5fa20..157c60cca0ca 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -48,14 +48,6 @@ struct inet_connection_sock_af_ops { char __user *optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -#ifdef CONFIG_COMPAT - int (*compat_setsockopt)(struct sock *sk, - int level, int optname, - char __user *optval, unsigned int optlen); - int (*compat_getsockopt)(struct sock *sk, - int level, int optname, - char __user *optval, int __user *optlen); -#endif void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); void (*mtu_reduced)(struct sock *sk); }; @@ -311,11 +303,6 @@ void inet_csk_listen_stop(struct sock *sk); void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); -int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); - struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); #define TCP_PINGPONG_THRESH 3 diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 5e65bf2fd32d..262fc88dbd7e 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1088,10 +1088,6 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); int ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 9bbb2f60db92..233bbf7df5d6 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -438,16 +438,6 @@ struct sctp_af { int optname, char __user *optval, int __user *optlen); - int (*compat_setsockopt) (struct sock *sk, - int level, - int optname, - char __user *optval, - unsigned int optlen); - int (*compat_getsockopt) (struct sock *sk, - int level, - int optname, - char __user *optval, - int __user *optlen); void (*get_dst) (struct sctp_transport *t, union sctp_addr *saddr, struct flowi *fl, diff --git a/include/net/sock.h b/include/net/sock.h index 1fd7cf5fc751..3bd8bc578bf3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1147,14 +1147,6 @@ struct proto { int __user *option); void (*keepalive)(struct sock *sk, int valbool); #ifdef CONFIG_COMPAT - int (*compat_setsockopt)(struct sock *sk, - int level, - int optname, char __user *optval, - unsigned int optlen); - int (*compat_getsockopt)(struct sock *sk, - int level, - int optname, char __user *optval, - int __user *option); int (*compat_ioctl)(struct sock *sk, unsigned int cmd, unsigned long arg); #endif diff --git a/include/net/tcp.h b/include/net/tcp.h index d62e24533518..9f7f7c0c1104 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -401,10 +401,6 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); -int compat_tcp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -int compat_tcp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); void tcp_set_keepalive(struct sock *sk, int val); void tcp_syn_ack_timeout(const struct request_sock *req); int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, diff --git a/net/core/sock.c b/net/core/sock.c index 018404d17626..48655d5c4cf3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3199,11 +3199,6 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; -#ifdef CONFIG_COMPAT - if (in_compat_syscal() && sk->sk_prot->compat_getsockopt) - return sk->sk_prot->compat_getsockopt(sk, level, optname, - optval, optlen); -#endif return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_getsockopt); @@ -3231,11 +3226,6 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; -#ifdef CONFIG_COMPAT - if (in_compat_syscall() && sk->sk_prot->compat_setsockopt) - return sk->sk_prot->compat_setsockopt(sk, level, optname, - optval, optlen); -#endif return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_setsockopt); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 7dce4f6c7025..434eea91b767 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -296,12 +296,6 @@ int dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); -#ifdef CONFIG_COMPAT -int compat_dccp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -int compat_dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -#endif int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index b91373eb1c79..9c28c8251125 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -959,10 +959,6 @@ static struct proto dccp_v4_prot = { .rsk_prot = &dccp_request_sock_ops, .twsk_prot = &dccp_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_dccp_setsockopt, - .compat_getsockopt = compat_dccp_getsockopt, -#endif }; static const struct net_protocol dccp_v4_protocol = { diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index b50f85a72cd5..ef4ab28cfde0 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -970,10 +970,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif }; /* @@ -990,10 +986,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif }; /* NOTE: A lot of things set to zero explicitly by call to @@ -1049,10 +1041,6 @@ static struct proto dccp_v6_prot = { .rsk_prot = &dccp6_request_sock_ops, .twsk_prot = &dccp6_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_dccp_setsockopt, - .compat_getsockopt = compat_dccp_getsockopt, -#endif }; static const struct inet6_protocol dccp_v6_protocol = { diff --git a/net/dccp/proto.c b/net/dccp/proto.c index c13b6609474b..fd92d3fe321f 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -575,19 +575,6 @@ int dccp_setsockopt(struct sock *sk, int level, int optname, EXPORT_SYMBOL_GPL(dccp_setsockopt); -#ifdef CONFIG_COMPAT -int compat_dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level != SOL_DCCP) - return inet_csk_compat_setsockopt(sk, level, optname, - optval, optlen); - return do_dccp_setsockopt(sk, level, optname, optval, optlen); -} - -EXPORT_SYMBOL_GPL(compat_dccp_setsockopt); -#endif - static int dccp_getsockopt_service(struct sock *sk, int len, __be32 __user *optval, int __user *optlen) @@ -696,19 +683,6 @@ int dccp_getsockopt(struct sock *sk, int level, int optname, EXPORT_SYMBOL_GPL(dccp_getsockopt); -#ifdef CONFIG_COMPAT -int compat_dccp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level != SOL_DCCP) - return inet_csk_compat_getsockopt(sk, level, optname, - optval, optlen); - return do_dccp_getsockopt(sk, level, optname, optval, optlen); -} - -EXPORT_SYMBOL_GPL(compat_dccp_getsockopt); -#endif - static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb) { struct cmsghdr *cmsg; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 22b0e7336360..d1a3913eebe0 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1057,34 +1057,6 @@ void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) } EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); -#ifdef CONFIG_COMPAT -int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - const struct inet_connection_sock *icsk = inet_csk(sk); - - if (icsk->icsk_af_ops->compat_getsockopt) - return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, - optval, optlen); - return icsk->icsk_af_ops->getsockopt(sk, level, optname, - optval, optlen); -} -EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); - -int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - const struct inet_connection_sock *icsk = inet_csk(sk); - - if (icsk->icsk_af_ops->compat_setsockopt) - return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, - optval, optlen); - return icsk->icsk_af_ops->setsockopt(sk, level, optname, - optval, optlen); -} -EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); -#endif - static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) { const struct inet_sock *inet = inet_sk(sk); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 254b6a4cc95b..58ede3d62b2e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3337,18 +3337,6 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, } EXPORT_SYMBOL(tcp_setsockopt); -#ifdef CONFIG_COMPAT -int compat_tcp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level != SOL_TCP) - return inet_csk_compat_setsockopt(sk, level, optname, - optval, optlen); - return do_tcp_setsockopt(sk, level, optname, optval, optlen); -} -EXPORT_SYMBOL(compat_tcp_setsockopt); -#endif - static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, struct tcp_info *info) { @@ -3896,18 +3884,6 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, } EXPORT_SYMBOL(tcp_getsockopt); -#ifdef CONFIG_COMPAT -int compat_tcp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level != SOL_TCP) - return inet_csk_compat_getsockopt(sk, level, optname, - optval, optlen); - return do_tcp_getsockopt(sk, level, optname, optval, optlen); -} -EXPORT_SYMBOL(compat_tcp_getsockopt); -#endif - #ifdef CONFIG_TCP_MD5SIG static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); static DEFINE_MUTEX(tcp_md5sig_mutex); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e5b7ef9a2887..cd81b6e04efb 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2769,10 +2769,6 @@ struct proto tcp_prot = { .rsk_prot = &tcp_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_tcp_setsockopt, - .compat_getsockopt = compat_tcp_getsockopt, -#endif .diag_destroy = tcp_abort, }; EXPORT_SYMBOL(tcp_prot); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 1ea0cd12beae..add8f7912299 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -136,13 +136,42 @@ static bool setsockopt_needs_rtnl(int optname) return false; } +static int copy_group_source_from_user(struct group_source_req *greqs, + void __user *optval, int optlen) +{ + if (in_compat_syscall()) { + struct compat_group_source_req gr32; + + if (optlen < sizeof(gr32)) + return -EINVAL; + if (copy_from_user(&gr32, optval, sizeof(gr32))) + return -EFAULT; + greqs->gsr_interface = gr32.gsr_interface; + greqs->gsr_group = gr32.gsr_group; + greqs->gsr_source = gr32.gsr_source; + } else { + if (optlen < sizeof(*greqs)) + return -EINVAL; + if (copy_from_user(greqs, optval, sizeof(*greqs))) + return -EFAULT; + } + + return 0; +} + static int do_ipv6_mcast_group_source(struct sock *sk, int optname, - struct group_source_req *greqs) + void __user *optval, int optlen) { + struct group_source_req greqs; int omode, add; + int ret; + + ret = copy_group_source_from_user(&greqs, optval, optlen); + if (ret) + return ret; - if (greqs->gsr_group.ss_family != AF_INET6 || - greqs->gsr_source.ss_family != AF_INET6) + if (greqs.gsr_group.ss_family != AF_INET6 || + greqs.gsr_source.ss_family != AF_INET6) return -EADDRNOTAVAIL; if (optname == MCAST_BLOCK_SOURCE) { @@ -155,8 +184,8 @@ static int do_ipv6_mcast_group_source(struct sock *sk, int optname, struct sockaddr_in6 *psin6; int retv; - psin6 = (struct sockaddr_in6 *)&greqs->gsr_group; - retv = ipv6_sock_mc_join_ssm(sk, greqs->gsr_interface, + psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; + retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface, &psin6->sin6_addr, MCAST_INCLUDE); /* prior join w/ different source is ok */ @@ -168,7 +197,7 @@ static int do_ipv6_mcast_group_source(struct sock *sk, int optname, omode = MCAST_INCLUDE; add = 0; } - return ip6_mc_source(add, omode, sk, greqs); + return ip6_mc_source(add, omode, sk, &greqs); } static int ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, @@ -202,7 +231,6 @@ out_free_gsf: return ret; } -#ifdef CONFIG_COMPAT static int compat_ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, int optlen) { @@ -236,21 +264,16 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) goto out_free_p; - rtnl_lock(); - lock_sock(sk); ret = ip6_mc_msfilter(sk, &(struct group_filter){ .gf_interface = gf32->gf_interface, .gf_group = gf32->gf_group, .gf_fmode = gf32->gf_fmode, .gf_numsrc = gf32->gf_numsrc}, gf32->gf_slist); - release_sock(sk); - rtnl_unlock(); out_free_p: kfree(p); return ret; } -#endif static int ipv6_mcast_join_leave(struct sock *sk, int optname, void __user *optval, int optlen) @@ -272,13 +295,11 @@ static int ipv6_mcast_join_leave(struct sock *sk, int optname, return ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr); } -#ifdef CONFIG_COMPAT static int compat_ipv6_mcast_join_leave(struct sock *sk, int optname, void __user *optval, int optlen) { struct compat_group_req gr32; struct sockaddr_in6 *psin6; - int err; if (optlen < sizeof(gr32)) return -EINVAL; @@ -287,20 +308,12 @@ static int compat_ipv6_mcast_join_leave(struct sock *sk, int optname, if (gr32.gr_group.ss_family != AF_INET6) return -EADDRNOTAVAIL; - rtnl_lock(); - lock_sock(sk); psin6 = (struct sockaddr_in6 *)&gr32.gr_group; if (optname == MCAST_JOIN_GROUP) - err = ipv6_sock_mc_join(sk, gr32.gr_interface, + return ipv6_sock_mc_join(sk, gr32.gr_interface, &psin6->sin6_addr); - else - err = ipv6_sock_mc_drop(sk, gr32.gr_interface, - &psin6->sin6_addr); - release_sock(sk); - rtnl_unlock(); - return err; + return ipv6_sock_mc_drop(sk, gr32.gr_interface, &psin6->sin6_addr); } -#endif static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) @@ -853,26 +866,25 @@ done: case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: - retv = ipv6_mcast_join_leave(sk, optname, optval, optlen); + if (in_compat_syscall()) + retv = compat_ipv6_mcast_join_leave(sk, optname, optval, + optlen); + else + retv = ipv6_mcast_join_leave(sk, optname, optval, + optlen); break; case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: - { - struct group_source_req greqs; - - if (optlen < sizeof(struct group_source_req)) - goto e_inval; - if (copy_from_user(&greqs, optval, sizeof(greqs))) { - retv = -EFAULT; - break; - } - retv = do_ipv6_mcast_group_source(sk, optname, &greqs); + retv = do_ipv6_mcast_group_source(sk, optname, optval, optlen); break; - } case MCAST_MSFILTER: - retv = ipv6_set_mcast_msfilter(sk, optval, optlen); + if (in_compat_syscall()) + retv = compat_ipv6_set_mcast_msfilter(sk, optval, + optlen); + else + retv = ipv6_set_mcast_msfilter(sk, optval, optlen); break; case IPV6_ROUTER_ALERT: if (optlen < sizeof(int)) @@ -989,64 +1001,6 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, } EXPORT_SYMBOL(ipv6_setsockopt); -#ifdef CONFIG_COMPAT -int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - int err; - - if (level == SOL_IP && sk->sk_type != SOCK_RAW) - return udp_prot.setsockopt(sk, level, optname, optval, optlen); - - if (level != SOL_IPV6) - return -ENOPROTOOPT; - - switch (optname) { - case MCAST_JOIN_GROUP: - case MCAST_LEAVE_GROUP: - return compat_ipv6_mcast_join_leave(sk, optname, optval, - optlen); - case MCAST_JOIN_SOURCE_GROUP: - case MCAST_LEAVE_SOURCE_GROUP: - case MCAST_BLOCK_SOURCE: - case MCAST_UNBLOCK_SOURCE: - { - struct compat_group_source_req __user *gsr32 = (void __user *)optval; - struct group_source_req greqs; - - if (optlen < sizeof(struct compat_group_source_req)) - return -EINVAL; - - if (get_user(greqs.gsr_interface, &gsr32->gsr_interface) || - copy_from_user(&greqs.gsr_group, &gsr32->gsr_group, - sizeof(greqs.gsr_group)) || - copy_from_user(&greqs.gsr_source, &gsr32->gsr_source, - sizeof(greqs.gsr_source))) - return -EFAULT; - - rtnl_lock(); - lock_sock(sk); - err = do_ipv6_mcast_group_source(sk, optname, &greqs); - release_sock(sk); - rtnl_unlock(); - return err; - } - case MCAST_MSFILTER: - return compat_ipv6_set_mcast_msfilter(sk, optval, optlen); - } - - err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); -#ifdef CONFIG_NETFILTER - /* we need to exclude all possible ENOPROTOOPTs except default case */ - if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && - optname != IPV6_XFRM_POLICY) - err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); -#endif - return err; -} -EXPORT_SYMBOL(compat_ipv6_setsockopt); -#endif - static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, int optname, char __user *optval, int len) { @@ -1110,7 +1064,6 @@ static int ipv6_get_msfilter(struct sock *sk, void __user *optval, return err; } -#ifdef CONFIG_COMPAT static int compat_ipv6_get_msfilter(struct sock *sk, void __user *optval, int __user *optlen) { @@ -1150,7 +1103,6 @@ static int compat_ipv6_get_msfilter(struct sock *sk, void __user *optval, return -EFAULT; return 0; } -#endif static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, unsigned int flags) @@ -1175,6 +1127,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, val = sk->sk_family; break; case MCAST_MSFILTER: + if (in_compat_syscall()) + return compat_ipv6_get_msfilter(sk, optval, optlen); return ipv6_get_msfilter(sk, optval, optlen, len); case IPV6_2292PKTOPTIONS: { @@ -1523,38 +1477,3 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, return err; } EXPORT_SYMBOL(ipv6_getsockopt); - -#ifdef CONFIG_COMPAT -int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - int err; - - if (level == SOL_IP && sk->sk_type != SOCK_RAW) - return udp_prot.getsockopt(sk, level, optname, optval, optlen); - - if (level != SOL_IPV6) - return -ENOPROTOOPT; - - if (optname == MCAST_MSFILTER) - return compat_ipv6_get_msfilter(sk, optval, optlen); - - err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, - MSG_CMSG_COMPAT); -#ifdef CONFIG_NETFILTER - /* we need to exclude all possible ENOPROTOOPTs except default case */ - if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { - int len; - - if (get_user(len, optlen)) - return -EFAULT; - - err = nf_getsockopt(sk, PF_INET6, optname, optval, &len); - if (err >= 0) - err = put_user(len, optlen); - } -#endif - return err; -} -EXPORT_SYMBOL(compat_ipv6_getsockopt); -#endif diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index e23c6b461758..594e01ad670a 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1084,30 +1084,6 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname, return do_rawv6_setsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - switch (level) { - case SOL_RAW: - break; - case SOL_ICMPV6: - if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) - return -EOPNOTSUPP; - return rawv6_seticmpfilter(sk, level, optname, optval, optlen); - case SOL_IPV6: - if (optname == IPV6_CHECKSUM || - optname == IPV6_HDRINCL) - break; - fallthrough; - default: - return compat_ipv6_setsockopt(sk, level, optname, - optval, optlen); - } - return do_rawv6_setsockopt(sk, level, optname, optval, optlen); -} -#endif - static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -1169,30 +1145,6 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname, return do_rawv6_getsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - switch (level) { - case SOL_RAW: - break; - case SOL_ICMPV6: - if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) - return -EOPNOTSUPP; - return rawv6_geticmpfilter(sk, level, optname, optval, optlen); - case SOL_IPV6: - if (optname == IPV6_CHECKSUM || - optname == IPV6_HDRINCL) - break; - fallthrough; - default: - return compat_ipv6_getsockopt(sk, level, optname, - optval, optlen); - } - return do_rawv6_getsockopt(sk, level, optname, optval, optlen); -} -#endif - static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { @@ -1297,8 +1249,6 @@ struct proto rawv6_prot = { .usersize = sizeof_field(struct raw6_sock, filter), .h.raw_hash = &raw_v6_hashinfo, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_rawv6_setsockopt, - .compat_getsockopt = compat_rawv6_getsockopt, .compat_ioctl = compat_rawv6_ioctl, #endif .diag_destroy = raw_abort, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4502db706f75..c34b7834fd84 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1831,10 +1831,6 @@ const struct inet_connection_sock_af_ops ipv6_specific = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif .mtu_reduced = tcp_v6_mtu_reduced, }; @@ -1861,10 +1857,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif .mtu_reduced = tcp_v4_mtu_reduced, }; @@ -2122,10 +2114,6 @@ struct proto tcpv6_prot = { .rsk_prot = &tcp6_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_tcp_setsockopt, - .compat_getsockopt = compat_tcp_getsockopt, -#endif .diag_destroy = tcp_abort, }; EXPORT_SYMBOL_GPL(tcpv6_prot); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 38c0d9350c6b..5aff0856a05b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1570,17 +1570,6 @@ int udpv6_setsockopt(struct sock *sk, int level, int optname, return ipv6_setsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_setsockopt(sk, level, optname, optval, optlen, - udp_v6_push_pending_frames); - return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); -} -#endif - int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -1589,16 +1578,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, return ipv6_getsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_getsockopt(sk, level, optname, optval, optlen); - return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); -} -#endif - /* thinking of making this const? Don't. * early_demux can change based on sysctl. */ @@ -1681,10 +1660,6 @@ struct proto udpv6_prot = { .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), .obj_size = sizeof(struct udp6_sock), .h.udp_table = &udp_table, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_udpv6_setsockopt, - .compat_getsockopt = compat_udpv6_getsockopt, -#endif .diag_destroy = udp_abort, }; diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 20e324b6f358..30dfb6f1b762 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -19,12 +19,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); -#ifdef CONFIG_COMPAT -int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -#endif int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index bf7a7acd39b1..fbb700d3f437 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -52,10 +52,6 @@ struct proto udplitev6_prot = { .sysctl_mem = sysctl_udp_mem, .obj_size = sizeof(struct udp6_sock), .h.udp_table = &udplite_table, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_udpv6_setsockopt, - .compat_getsockopt = compat_udpv6_getsockopt, -#endif }; static struct inet_protosw udplite6_protosw = { diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 2cdc0b7a7a43..4799bec87b33 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -745,10 +745,6 @@ static struct proto l2tp_ip6_prot = { .hash = l2tp_ip6_hash, .unhash = l2tp_ip6_unhash, .obj_size = sizeof(struct l2tp_ip6_sock), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif }; static const struct proto_ops l2tp_ip6_ops = { diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ebda31b7747d..aea2a982984d 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -1087,10 +1087,6 @@ static struct sctp_af sctp_af_inet6 = { .net_header_len = sizeof(struct ipv6hdr), .sockaddr_len = sizeof(struct sockaddr_in6), .ip_options_len = sctp_v6_ip_options_len, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif }; static struct sctp_pf sctp_pf_inet6 = { -- cgit v1.2.3 From a44d9e72100f7044ac46e4e6dc475f5b4097830f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:31 +0200 Subject: net: make ->{get,set}sockopt in proto_ops optional Just check for a NULL method instead of wiring up sock_no_{get,set}sockopt. Signed-off-by: Christoph Hellwig Acked-by: Marc Kleine-Budde Signed-off-by: David S. Miller --- crypto/af_alg.c | 1 - crypto/algif_aead.c | 4 ---- crypto/algif_hash.c | 4 ---- crypto/algif_rng.c | 2 -- crypto/algif_skcipher.c | 4 ---- drivers/isdn/mISDN/socket.c | 2 -- drivers/net/ppp/pppoe.c | 2 -- drivers/net/ppp/pptp.c | 2 -- include/net/sock.h | 2 -- net/appletalk/ddp.c | 2 -- net/bluetooth/bnep/sock.c | 2 -- net/bluetooth/cmtp/sock.c | 2 -- net/bluetooth/hidp/sock.c | 2 -- net/caif/caif_socket.c | 2 -- net/can/bcm.c | 2 -- net/core/sock.c | 14 -------------- net/key/af_key.c | 2 -- net/nfc/llcp_sock.c | 2 -- net/nfc/rawsock.c | 4 ---- net/packet/af_packet.c | 2 -- net/phonet/socket.c | 2 -- net/qrtr/qrtr.c | 2 -- net/smc/af_smc.c | 9 +++++++-- net/socket.c | 4 ++++ net/unix/af_unix.c | 6 ------ net/vmw_vsock/af_vsock.c | 2 -- 26 files changed, 11 insertions(+), 73 deletions(-) (limited to 'include') diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 28fc323e3fe3..29f71428520b 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -335,7 +335,6 @@ static const struct proto_ops alg_proto_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, .sendmsg = sock_no_sendmsg, diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0ae000a61c7f..527d09a69462 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -361,11 +361,9 @@ static struct proto_ops algif_aead_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = aead_sendmsg, @@ -454,11 +452,9 @@ static struct proto_ops algif_aead_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = aead_sendmsg_nokey, diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index e71727c25a7d..50f7b22f1b48 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -279,10 +279,8 @@ static struct proto_ops algif_hash_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = hash_sendmsg, @@ -383,10 +381,8 @@ static struct proto_ops algif_hash_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = hash_sendmsg_nokey, diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 087c0ad09d38..6300e0566dc5 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -101,11 +101,9 @@ static struct proto_ops algif_rng_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .sendmsg = sock_no_sendmsg, .sendpage = sock_no_sendpage, diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ec5567c87a6d..c487887f4671 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -188,11 +188,9 @@ static struct proto_ops algif_skcipher_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg, @@ -281,11 +279,9 @@ static struct proto_ops algif_skcipher_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg_nokey, diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index dff4132b3702..1b2b91479107 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -738,8 +738,6 @@ static const struct proto_ops base_sock_ops = { .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index beedaad08255..d7f50b835050 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1110,8 +1110,6 @@ static const struct proto_ops pppoe_ops = { .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = pppoe_sendmsg, .recvmsg = pppoe_recvmsg, .mmap = sock_no_mmap, diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index acccb747aeda..ee5058445d06 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -618,8 +618,6 @@ static const struct proto_ops pptp_ops = { .getname = pptp_getname, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .mmap = sock_no_mmap, diff --git a/include/net/sock.h b/include/net/sock.h index 3bd8bc578bf3..62e18fc8ac9f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1714,8 +1714,6 @@ int sock_no_getname(struct socket *, struct sockaddr *, int); int sock_no_ioctl(struct socket *, unsigned int, unsigned long); int sock_no_listen(struct socket *, int); int sock_no_shutdown(struct socket *, int); -int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); -int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len); int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 15787e8c0629..1d48708c5a2e 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1917,8 +1917,6 @@ static const struct proto_ops atalk_dgram_ops = { #endif .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = atalk_sendmsg, .recvmsg = atalk_recvmsg, .mmap = sock_no_mmap, diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index cfd83c5521ae..d515571b2afb 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@ -182,8 +182,6 @@ static const struct proto_ops bnep_sock_ops = { .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index defdd4871919..96d49d9fae96 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c @@ -185,8 +185,6 @@ static const struct proto_ops cmtp_sock_ops = { .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 03be6a4baef3..595fb3c9d6c3 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c @@ -233,8 +233,6 @@ static const struct proto_ops hidp_sock_ops = { .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index ef14da50a981..b94ecd931002 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -981,7 +981,6 @@ static const struct proto_ops caif_seqpacket_ops = { .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = caif_seqpkt_sendmsg, .recvmsg = caif_seqpkt_recvmsg, .mmap = sock_no_mmap, @@ -1002,7 +1001,6 @@ static const struct proto_ops caif_stream_ops = { .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = caif_stream_sendmsg, .recvmsg = caif_stream_recvmsg, .mmap = sock_no_mmap, diff --git a/net/can/bcm.c b/net/can/bcm.c index c96fa0f33db3..d14ea12affb1 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1648,8 +1648,6 @@ static const struct proto_ops bcm_ops = { .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = bcm_sendmsg, .recvmsg = bcm_recvmsg, .mmap = sock_no_mmap, diff --git a/net/core/sock.c b/net/core/sock.c index 48655d5c4cf3..d828bfe1c47d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2783,20 +2783,6 @@ int sock_no_shutdown(struct socket *sock, int how) } EXPORT_SYMBOL(sock_no_shutdown); -int sock_no_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - return -EOPNOTSUPP; -} -EXPORT_SYMBOL(sock_no_setsockopt); - -int sock_no_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) -{ - return -EOPNOTSUPP; -} -EXPORT_SYMBOL(sock_no_getsockopt); - int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) { return -EOPNOTSUPP; diff --git a/net/key/af_key.c b/net/key/af_key.c index b67ed3a8486c..f13626c1a985 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3734,8 +3734,6 @@ static const struct proto_ops pfkey_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 28604414dec1..6da1e2334bb6 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -921,8 +921,6 @@ static const struct proto_ops llcp_rawsock_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = sock_no_sendmsg, .recvmsg = llcp_sock_recvmsg, .mmap = sock_no_mmap, diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index ba5ffd3badd3..b2061b6746ea 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -276,8 +276,6 @@ static const struct proto_ops rawsock_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = rawsock_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, @@ -296,8 +294,6 @@ static const struct proto_ops rawsock_raw_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = sock_no_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 35aee9e98053..c240fb5de3f0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4503,8 +4503,6 @@ static const struct proto_ops packet_ops_spkt = { .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = packet_sendmsg_spkt, .recvmsg = packet_recvmsg, .mmap = sock_no_mmap, diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 87c60f83c180..2599235d592e 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -439,8 +439,6 @@ const struct proto_ops phonet_dgram_ops = { .ioctl = pn_socket_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = pn_socket_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 24a8c3c6da0d..0cb4adfc6641 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -1208,8 +1208,6 @@ static const struct proto_ops qrtr_proto_ops = { .gettstamp = sock_gettstamp, .poll = datagram_poll, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .release = qrtr_release, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 903321543838..9711c9e0e515 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1742,8 +1742,11 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, /* generic setsockopts reaching us here always apply to the * CLC socket */ - rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, - optval, optlen); + if (unlikely(!smc->clcsock->ops->setsockopt)) + rc = -EOPNOTSUPP; + else + rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, + optval, optlen); if (smc->clcsock->sk->sk_err) { sk->sk_err = smc->clcsock->sk->sk_err; sk->sk_error_report(sk); @@ -1808,6 +1811,8 @@ static int smc_getsockopt(struct socket *sock, int level, int optname, smc = smc_sk(sock->sk); /* socket options apply to the CLC socket */ + if (unlikely(!smc->clcsock->ops->getsockopt)) + return -EOPNOTSUPP; return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, optval, optlen); } diff --git a/net/socket.c b/net/socket.c index dec345982abb..93846568c2fb 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2131,6 +2131,8 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *optval, if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock)) err = sock_setsockopt(sock, level, optname, optval, optlen); + else if (unlikely(!sock->ops->setsockopt)) + err = -EOPNOTSUPP; else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); @@ -2175,6 +2177,8 @@ int __sys_getsockopt(int fd, int level, int optname, char __user *optval, if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); + else if (unlikely(!sock->ops->getsockopt)) + err = -EOPNOTSUPP; else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 3385a7a0b231..181ea6fb56a6 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -714,8 +714,6 @@ static const struct proto_ops unix_stream_ops = { #endif .listen = unix_listen, .shutdown = unix_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = unix_stream_sendmsg, .recvmsg = unix_stream_recvmsg, .mmap = sock_no_mmap, @@ -741,8 +739,6 @@ static const struct proto_ops unix_dgram_ops = { #endif .listen = sock_no_listen, .shutdown = unix_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = unix_dgram_sendmsg, .recvmsg = unix_dgram_recvmsg, .mmap = sock_no_mmap, @@ -767,8 +763,6 @@ static const struct proto_ops unix_seqpacket_ops = { #endif .listen = unix_listen, .shutdown = unix_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = unix_seqpacket_sendmsg, .recvmsg = unix_seqpacket_recvmsg, .mmap = sock_no_mmap, diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 626bf9044418..df204c6761c4 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1202,8 +1202,6 @@ static const struct proto_ops vsock_dgram_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = vsock_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = vsock_dgram_sendmsg, .recvmsg = vsock_dgram_recvmsg, .mmap = sock_no_mmap, -- cgit v1.2.3 From 2f0bc54ba9a85032f2402faf0fccab513848300a Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 17 Jul 2020 00:16:29 +0200 Subject: xdp: introduce xdp_get_shared_info_from_{buff, frame} utility routines Introduce xdp_get_shared_info_from_{buff,frame} utility routines to get skb_shared_info from xdp buffer/frame pointer. xdp_get_shared_info_from_{buff,frame} will be used to implement xdp multi-buffer support Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- include/net/xdp.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include') diff --git a/include/net/xdp.h b/include/net/xdp.h index 609f819ed08b..d3005bef812f 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -85,6 +85,12 @@ struct xdp_buff { ((xdp)->data_hard_start + (xdp)->frame_sz - \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +static inline struct skb_shared_info * +xdp_get_shared_info_from_buff(struct xdp_buff *xdp) +{ + return (struct skb_shared_info *)xdp_data_hard_end(xdp); +} + struct xdp_frame { void *data; u16 len; @@ -98,6 +104,15 @@ struct xdp_frame { struct net_device *dev_rx; /* used by cpumap */ }; +static inline struct skb_shared_info * +xdp_get_shared_info_from_frame(struct xdp_frame *frame) +{ + void *data_hard_start = frame->data - frame->headroom - sizeof(*frame); + + return (struct skb_shared_info *)(data_hard_start + frame->frame_sz - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); +} + /* Clear kernel pointers in xdp_frame */ static inline void xdp_scrub_frame(struct xdp_frame *frame) { -- cgit v1.2.3 From eba75c587e811d3249c8bd50d22bb2266ccd3c0f Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 10 Jul 2020 09:29:02 -0400 Subject: icmp: support rfc 4884 Add setsockopt SOL_IP/IP_RECVERR_4884 to return the offset to an extension struct if present. ICMP messages may include an extension structure after the original datagram. RFC 4884 standardized this behavior. It stores the offset in words to the extension header in u8 icmphdr.un.reserved[1]. The field is valid only for ICMP types destination unreachable, time exceeded and parameter problem, if length is at least 128 bytes and entire packet does not exceed 576 bytes. Return the offset to the start of the extension struct when reading an ICMP error from the error queue, if it matches the above constraints. Do not return the raw u8 field. Return the offset from the start of the user buffer, in bytes. The kernel does not return the network and transport headers, so subtract those. Also validate the headers. Return the offset regardless of validation, as an invalid extension must still not be misinterpreted as part of the original datagram. Note that !invalid does not imply valid. If the extension version does not match, no validation can take place, for instance. For backward compatibility, make this optional, set by setsockopt SOL_IP/IP_RECVERR_RFC4884. For API example and feature test, see github.com/wdebruij/kerneltools/blob/master/tests/recv_icmp_v2.c For forward compatibility, reserve only setsockopt value 1, leaving other bits for additional icmp extensions. Changes v1->v2: - convert word offset to byte offset from start of user buffer - return in ee_data as u8 may be insufficient - define extension struct and object header structs - return len only if constraints met - if returning len, also validate Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/icmp.h | 4 +++ include/net/inet_sock.h | 1 + include/uapi/linux/errqueue.h | 14 ++++++++- include/uapi/linux/icmp.h | 22 ++++++++++++++ include/uapi/linux/in.h | 1 + net/ipv4/icmp.c | 71 +++++++++++++++++++++++++++++++++++++++++++ net/ipv4/ip_sockglue.c | 12 ++++++++ 7 files changed, 124 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/icmp.h b/include/linux/icmp.h index 81ca84ce3119..8fc38a34cb20 100644 --- a/include/linux/icmp.h +++ b/include/linux/icmp.h @@ -15,6 +15,7 @@ #include #include +#include static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { @@ -35,4 +36,7 @@ static inline bool icmp_is_err(int type) return false; } +void ip_icmp_error_rfc4884(const struct sk_buff *skb, + struct sock_ee_data_rfc4884 *out); + #endif /* _LINUX_ICMP_H */ diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index a7ce00af6c44..a3702d1d4875 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -225,6 +225,7 @@ struct inet_sock { mc_all:1, nodefrag:1; __u8 bind_address_no_port:1, + recverr_rfc4884:1, defer_connect:1; /* Indicates that fastopen_connect is set * and cookie exists so we defer connect * until first data frame is written diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h index ca5cb3e3c6df..3c70e8ac14b8 100644 --- a/include/uapi/linux/errqueue.h +++ b/include/uapi/linux/errqueue.h @@ -5,6 +5,13 @@ #include #include +/* RFC 4884: return offset to extension struct + validation */ +struct sock_ee_data_rfc4884 { + __u16 len; + __u8 flags; + __u8 reserved; +}; + struct sock_extended_err { __u32 ee_errno; __u8 ee_origin; @@ -12,7 +19,10 @@ struct sock_extended_err { __u8 ee_code; __u8 ee_pad; __u32 ee_info; - __u32 ee_data; + union { + __u32 ee_data; + struct sock_ee_data_rfc4884 ee_rfc4884; + }; }; #define SO_EE_ORIGIN_NONE 0 @@ -31,6 +41,8 @@ struct sock_extended_err { #define SO_EE_CODE_TXTIME_INVALID_PARAM 1 #define SO_EE_CODE_TXTIME_MISSED 2 +#define SO_EE_RFC4884_FLAG_INVALID 1 + /** * struct scm_timestamping - timestamps exposed through cmsg * diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h index 5589eeb791ca..fb169a50895e 100644 --- a/include/uapi/linux/icmp.h +++ b/include/uapi/linux/icmp.h @@ -19,6 +19,7 @@ #define _UAPI_LINUX_ICMP_H #include +#include #define ICMP_ECHOREPLY 0 /* Echo Reply */ #define ICMP_DEST_UNREACH 3 /* Destination Unreachable */ @@ -95,5 +96,26 @@ struct icmp_filter { __u32 data; }; +/* RFC 4884 extension struct: one per message */ +struct icmp_ext_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 reserved1:4, + version:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 version:4, + reserved1:4; +#else +#error "Please fix " +#endif + __u8 reserved2; + __sum16 checksum; +}; + +/* RFC 4884 extension object header: one for each object */ +struct icmp_extobj_hdr { + __be16 length; + __u8 class_num; + __u8 class_type; +}; #endif /* _UAPI_LINUX_ICMP_H */ diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 8533bf07450f..3d0d8231dc19 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -123,6 +123,7 @@ struct in_addr { #define IP_CHECKSUM 23 #define IP_BIND_ADDRESS_NO_PORT 24 #define IP_RECVFRAGSIZE 25 +#define IP_RECVERR_RFC4884 26 /* IP_MTU_DISCOVER values */ #define IP_PMTUDISC_DONT 0 /* Never send DF frames */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index e30515f89802..793aebf07c2a 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1116,6 +1116,77 @@ error: goto drop; } +static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off) +{ + struct icmp_extobj_hdr *objh, _objh; + struct icmp_ext_hdr *exth, _exth; + u16 olen; + + exth = skb_header_pointer(skb, off, sizeof(_exth), &_exth); + if (!exth) + return false; + if (exth->version != 2) + return true; + + if (exth->checksum && + csum_fold(skb_checksum(skb, off, skb->len - off, 0))) + return false; + + off += sizeof(_exth); + while (off < skb->len) { + objh = skb_header_pointer(skb, off, sizeof(_objh), &_objh); + if (!objh) + return false; + + olen = ntohs(objh->length); + if (olen < sizeof(_objh)) + return false; + + off += olen; + if (off > skb->len) + return false; + } + + return true; +} + +void ip_icmp_error_rfc4884(const struct sk_buff *skb, + struct sock_ee_data_rfc4884 *out) +{ + int hlen, off; + + switch (icmp_hdr(skb)->type) { + case ICMP_DEST_UNREACH: + case ICMP_TIME_EXCEEDED: + case ICMP_PARAMETERPROB: + break; + default: + return; + } + + /* outer headers up to inner iph. skb->data is at inner payload */ + hlen = -skb_transport_offset(skb) - sizeof(struct icmphdr); + + /* per rfc 791: maximum packet length of 576 bytes */ + if (hlen + skb->len > 576) + return; + + /* per rfc 4884: minimal datagram length of 128 bytes */ + off = icmp_hdr(skb)->un.reserved[1] * sizeof(u32); + if (off < 128) + return; + + /* kernel has stripped headers: return payload offset in bytes */ + off -= hlen; + if (off + sizeof(struct icmp_ext_hdr) > skb->len) + return; + + out->len = off; + + if (!ip_icmp_error_rfc4884_validate(skb, off)) + out->flags |= SO_EE_RFC4884_FLAG_INVALID; +} + int icmp_err(struct sk_buff *skb, u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 86b3b9a7cea3..a5ea02d7a183 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -411,6 +411,9 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, serr->port = port; if (skb_pull(skb, payload - skb->data)) { + if (inet_sk(sk)->recverr_rfc4884) + ip_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884); + skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb) == 0) return; @@ -904,6 +907,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, case IP_RECVORIGDSTADDR: case IP_CHECKSUM: case IP_RECVFRAGSIZE: + case IP_RECVERR_RFC4884: if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; @@ -1063,6 +1067,11 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (!val) skb_queue_purge(&sk->sk_error_queue); break; + case IP_RECVERR_RFC4884: + if (val < 0 || val > 1) + goto e_inval; + inet->recverr_rfc4884 = !!val; + break; case IP_MULTICAST_TTL: if (sk->sk_type == SOCK_STREAM) goto e_inval; @@ -1611,6 +1620,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, case IP_RECVERR: val = inet->recverr; break; + case IP_RECVERR_RFC4884: + val = inet->recverr_rfc4884; + break; case IP_MULTICAST_TTL: val = inet->mc_ttl; break; -- cgit v1.2.3 From f65b71aa25a65e13cf3d10445a48c63d3eeb942e Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 17 Jul 2020 01:45:29 +0300 Subject: ptp: add ability to configure duty cycle for periodic output There are external event timestampers (PHCs with support for PTP_EXTTS_REQUEST) that timestamp both event edges. When those edges are very close (such as in the case of a short pulse), there is a chance that the collected timestamp might be of the rising, or of the falling edge, we never know. There are also PHCs capable of generating periodic output with a configurable duty cycle. This is good news, because we can space the rising and falling edge out enough in time, that the risks to overrun the 1-entry timestamp FIFO of the extts PHC are lower (example: the perout PHC can be configured for a period of 1 second, and an "on" time of 0.5 seconds, resulting in a duty cycle of 50%). A flag is introduced for signaling that an on time is present in the perout request structure, for preserving compatibility. Logically speaking, the duty cycle cannot exceed 100% and the PTP core checks for this. PHC drivers that don't support this flag emit a periodic output of an unspecified duty cycle, same as before. The duty cycle is encoded as an "on" time, similar to the "start" and "period" times, and reuses the reserved space while preserving overall binary layout. Pahole reported before: struct ptp_perout_request { struct ptp_clock_time start; /* 0 16 */ struct ptp_clock_time period; /* 16 16 */ unsigned int index; /* 32 4 */ unsigned int flags; /* 36 4 */ unsigned int rsv[4]; /* 40 16 */ /* size: 56, cachelines: 1, members: 5 */ /* last cacheline: 56 bytes */ }; And now: struct ptp_perout_request { struct ptp_clock_time start; /* 0 16 */ struct ptp_clock_time period; /* 16 16 */ unsigned int index; /* 32 4 */ unsigned int flags; /* 36 4 */ union { struct ptp_clock_time on; /* 40 16 */ unsigned int rsv[4]; /* 40 16 */ }; /* 40 16 */ /* size: 56, cachelines: 1, members: 5 */ /* last cacheline: 56 bytes */ }; Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/ptp/ptp_chardev.c | 33 +++++++++++++++++++++++++++------ include/uapi/linux/ptp_clock.h | 17 ++++++++++++++--- 2 files changed, 41 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 375cd6e4aade..e0e6f85966e1 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -191,12 +191,33 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) err = -EFAULT; break; } - if (((req.perout.flags & ~PTP_PEROUT_VALID_FLAGS) || - req.perout.rsv[0] || req.perout.rsv[1] || - req.perout.rsv[2] || req.perout.rsv[3]) && - cmd == PTP_PEROUT_REQUEST2) { - err = -EINVAL; - break; + if (cmd == PTP_PEROUT_REQUEST2) { + struct ptp_perout_request *perout = &req.perout; + + if (perout->flags & ~PTP_PEROUT_VALID_FLAGS) { + err = -EINVAL; + break; + } + /* + * The "on" field has undefined meaning if + * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat + * it as reserved, which must be set to zero. + */ + if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) && + (perout->rsv[0] || perout->rsv[1] || + perout->rsv[2] || perout->rsv[3])) { + err = -EINVAL; + break; + } + if (perout->flags & PTP_PEROUT_DUTY_CYCLE) { + /* The duty cycle must be subunitary. */ + if (perout->on.sec > perout->period.sec || + (perout->on.sec == perout->period.sec && + perout->on.nsec > perout->period.nsec)) { + err = -ERANGE; + break; + } + } } else if (cmd == PTP_PEROUT_REQUEST) { req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS; req.perout.rsv[0] = 0; diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index ff070aa64278..1d2841155f7d 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -53,12 +53,14 @@ /* * Bits of the ptp_perout_request.flags field: */ -#define PTP_PEROUT_ONE_SHOT (1<<0) +#define PTP_PEROUT_ONE_SHOT (1<<0) +#define PTP_PEROUT_DUTY_CYCLE (1<<1) /* * flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl. */ -#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT) +#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT | \ + PTP_PEROUT_DUTY_CYCLE) /* * No flags are valid for the original PTP_PEROUT_REQUEST ioctl @@ -105,7 +107,16 @@ struct ptp_perout_request { struct ptp_clock_time period; /* Desired period, zero means disable. */ unsigned int index; /* Which channel to configure. */ unsigned int flags; - unsigned int rsv[4]; /* Reserved for future use. */ + union { + /* + * The "on" time of the signal. + * Must be lower than the period. + * Valid only if (flags & PTP_PEROUT_DUTY_CYCLE) is set. + */ + struct ptp_clock_time on; + /* Reserved for future use. */ + unsigned int rsv[4]; + }; }; #define PTP_MAX_SAMPLES 25 /* Maximum allowed offset measurement samples. */ -- cgit v1.2.3 From b6bd41363a1ca39282496803cc32f7515ed917fe Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 17 Jul 2020 01:45:30 +0300 Subject: ptp: introduce a phase offset in the periodic output request Some PHCs like the ocelot/felix switch cannot emit generic periodic output, but just PPS (pulse per second) signals, which: - don't start from arbitrary absolute times, but are rather phase-aligned to the beginning of [the closest next] second. - have an optional phase offset relative to that beginning of the second. For those, it was initially established that they should reject any other absolute time for the PTP_PEROUT_REQUEST than 0.000000000 [1]. But when it actually came to writing an application [2] that makes use of this functionality, we realized that we can't really deal generically with PHCs that support absolute start time, and with PHCs that don't, without an explicit interface. Namely, in an ideal world, PHC drivers would ensure that the "perout.start" value written to hardware will result in a functional output. This means that if the PTP time has become in the past of this PHC's current time, it should be automatically fast-forwarded by the driver into a close enough future time that is known to work (note: this is necessary only if the hardware doesn't do this fast-forward by itself). But we don't really know what is the status for PHC drivers in use today, so in the general sense, user space would be risking to have a non-functional periodic output if it simply asked for a start time of 0.000000000. So let's introduce a flag for this type of reduced-functionality hardware, named PTP_PEROUT_PHASE. The start time is just "soon", the only thing we know for sure about this signal is that its rising edge events, Rn, occur at: Rn = perout.phase + n * perout.period The "phase" in the periodic output structure is simply an alias to the "start" time, since both cannot logically be specified at the same time. Therefore, the binary layout of the structure is not affected. [1]: https://patchwork.ozlabs.org/project/netdev/patch/20200320103726.32559-7-yangbo.lu@nxp.com/ [2]: https://www.mail-archive.com/linuxptp-devel@lists.sourceforge.net/msg04142.html Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- include/uapi/linux/ptp_clock.h | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 1d2841155f7d..1d108d597f66 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -55,12 +55,14 @@ */ #define PTP_PEROUT_ONE_SHOT (1<<0) #define PTP_PEROUT_DUTY_CYCLE (1<<1) +#define PTP_PEROUT_PHASE (1<<2) /* * flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl. */ #define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT | \ - PTP_PEROUT_DUTY_CYCLE) + PTP_PEROUT_DUTY_CYCLE | \ + PTP_PEROUT_PHASE) /* * No flags are valid for the original PTP_PEROUT_REQUEST ioctl @@ -103,7 +105,20 @@ struct ptp_extts_request { }; struct ptp_perout_request { - struct ptp_clock_time start; /* Absolute start time. */ + union { + /* + * Absolute start time. + * Valid only if (flags & PTP_PEROUT_PHASE) is unset. + */ + struct ptp_clock_time start; + /* + * Phase offset. The signal should start toggling at an + * unspecified integer multiple of the period, plus this value. + * The start time should be "as soon as possible". + * Valid only if (flags & PTP_PEROUT_PHASE) is set. + */ + struct ptp_clock_time phase; + }; struct ptp_clock_time period; /* Desired period, zero means disable. */ unsigned int index; /* Which channel to configure. */ unsigned int flags; -- cgit v1.2.3 From 9ab0cb309e7950a1649bffade985e7ccc7aaf675 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 17 Jul 2020 17:45:17 +0200 Subject: ALSA: Replace the word "slave" in vmaster API Follow the recent inclusive terminology guidelines and replace the word "slave" in vmaster API. I chose the word "follower" at this time since it seems fitting for the purpose. Note that the word "master" is kept in API, since it refers rather to audio master volume control. Also, while we're at it, a typo in comments is corrected, too. Link: https://lore.kernel.org/r/20200717154517.27599-1-tiwai@suse.de Signed-off-by: Takashi Iwai --- include/sound/control.h | 45 +++---- include/sound/hda_codec.h | 2 +- sound/core/vmaster.c | 263 ++++++++++++++++++++-------------------- sound/pci/ac97/ac97_patch.c | 20 +-- sound/pci/ca0106/ca0106_mixer.c | 18 +-- sound/pci/hda/hda_codec.c | 96 +++++++-------- sound/pci/hda/hda_generic.c | 20 +-- sound/pci/hda/hda_generic.h | 2 +- sound/pci/hda/hda_local.h | 10 +- sound/pci/hda/patch_ca0132.c | 10 +- sound/pci/hda/patch_sigmatel.c | 2 +- sound/pci/ice1712/juli.c | 20 +-- sound/pci/ice1712/quartet.c | 14 +-- sound/ppc/awacs.c | 12 +- sound/usb/6fire/control.c | 2 +- 15 files changed, 269 insertions(+), 267 deletions(-) (limited to 'include') diff --git a/include/sound/control.h b/include/sound/control.h index aeaed2a05bae..e128cff10dfa 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -188,20 +188,21 @@ int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels, */ struct snd_kcontrol *snd_ctl_make_virtual_master(char *name, const unsigned int *tlv); -int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave, - unsigned int flags); -/* optional flags for slave */ -#define SND_CTL_SLAVE_NEED_UPDATE (1 << 0) +int _snd_ctl_add_follower(struct snd_kcontrol *master, + struct snd_kcontrol *follower, + unsigned int flags); +/* optional flags for follower */ +#define SND_CTL_FOLLOWER_NEED_UPDATE (1 << 0) /** - * snd_ctl_add_slave - Add a virtual slave control + * snd_ctl_add_follower - Add a virtual follower control * @master: vmaster element - * @slave: slave element to add + * @follower: follower element to add * - * Add a virtual slave control to the given master element created via + * Add a virtual follower control to the given master element created via * snd_ctl_create_virtual_master() beforehand. * - * All slaves must be the same type (returning the same information + * All followers must be the same type (returning the same information * via info callback). The function doesn't check it, so it's your * responsibility. * @@ -213,18 +214,18 @@ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave, * Return: Zero if successful or a negative error code. */ static inline int -snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave) +snd_ctl_add_follower(struct snd_kcontrol *master, struct snd_kcontrol *follower) { - return _snd_ctl_add_slave(master, slave, 0); + return _snd_ctl_add_follower(master, follower, 0); } /** - * snd_ctl_add_slave_uncached - Add a virtual slave control + * snd_ctl_add_follower_uncached - Add a virtual follower control * @master: vmaster element - * @slave: slave element to add + * @follower: follower element to add * - * Add a virtual slave control to the given master. - * Unlike snd_ctl_add_slave(), the element added via this function + * Add a virtual follower control to the given master. + * Unlike snd_ctl_add_follower(), the element added via this function * is supposed to have volatile values, and get callback is called * at each time queried from the master. * @@ -235,10 +236,10 @@ snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave) * Return: Zero if successful or a negative error code. */ static inline int -snd_ctl_add_slave_uncached(struct snd_kcontrol *master, - struct snd_kcontrol *slave) +snd_ctl_add_follower_uncached(struct snd_kcontrol *master, + struct snd_kcontrol *follower) { - return _snd_ctl_add_slave(master, slave, SND_CTL_SLAVE_NEED_UPDATE); + return _snd_ctl_add_follower(master, follower, SND_CTL_FOLLOWER_NEED_UPDATE); } int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl, @@ -246,11 +247,11 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl, void *private_data); void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only); #define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true) -int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl, - int (*func)(struct snd_kcontrol *vslave, - struct snd_kcontrol *slave, - void *arg), - void *arg); +int snd_ctl_apply_vmaster_followers(struct snd_kcontrol *kctl, + int (*func)(struct snd_kcontrol *vfollower, + struct snd_kcontrol *follower, + void *arg), + void *arg); /* * Helper functions for jack-detection controls diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index d16a4229209b..f4cc364d837f 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -208,7 +208,7 @@ struct hda_codec { struct mutex control_mutex; struct snd_array spdif_out; unsigned int spdif_in_enable; /* SPDIF input enable? */ - const hda_nid_t *slave_dig_outs; /* optional digital out slave widgets */ + const hda_nid_t *follower_dig_outs; /* optional digital out follower widgets */ struct snd_array init_pins; /* initial (BIOS) pin configurations */ struct snd_array driver_pins; /* pin configs set by codec parser */ struct snd_array cvt_setups; /* audio convert setups */ diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c index ab54d79654c9..ab36f9898711 100644 --- a/sound/core/vmaster.c +++ b/sound/core/vmaster.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Virtual master and slave controls + * Virtual master and follower controls * * Copyright (c) 2008 by Takashi Iwai */ @@ -21,15 +21,15 @@ struct link_ctl_info { }; /* - * link master - this contains a list of slave controls that are + * link master - this contains a list of follower controls that are * identical types, i.e. info returns the same value type and value * ranges, but may have different number of counts. * * The master control is so far only mono volume/switch for simplicity. - * The same value will be applied to all slaves. + * The same value will be applied to all followers. */ struct link_master { - struct list_head slaves; + struct list_head followers; struct link_ctl_info info; int val; /* the master value */ unsigned int tlv[4]; @@ -38,23 +38,23 @@ struct link_master { }; /* - * link slave - this contains a slave control element + * link follower - this contains a follower control element * - * It fakes the control callbacsk with additional attenuation by the - * master control. A slave may have either one or two channels. + * It fakes the control callbacks with additional attenuation by the + * master control. A follower may have either one or two channels. */ -struct link_slave { +struct link_follower { struct list_head list; struct link_master *master; struct link_ctl_info info; int vals[2]; /* current values */ unsigned int flags; struct snd_kcontrol *kctl; /* original kcontrol pointer */ - struct snd_kcontrol slave; /* the copy of original control entry */ + struct snd_kcontrol follower; /* the copy of original control entry */ }; -static int slave_update(struct link_slave *slave) +static int follower_update(struct link_follower *follower) { struct snd_ctl_elem_value *uctl; int err, ch; @@ -62,68 +62,68 @@ static int slave_update(struct link_slave *slave) uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); if (!uctl) return -ENOMEM; - uctl->id = slave->slave.id; - err = slave->slave.get(&slave->slave, uctl); + uctl->id = follower->follower.id; + err = follower->follower.get(&follower->follower, uctl); if (err < 0) goto error; - for (ch = 0; ch < slave->info.count; ch++) - slave->vals[ch] = uctl->value.integer.value[ch]; + for (ch = 0; ch < follower->info.count; ch++) + follower->vals[ch] = uctl->value.integer.value[ch]; error: kfree(uctl); return err < 0 ? err : 0; } -/* get the slave ctl info and save the initial values */ -static int slave_init(struct link_slave *slave) +/* get the follower ctl info and save the initial values */ +static int follower_init(struct link_follower *follower) { struct snd_ctl_elem_info *uinfo; int err; - if (slave->info.count) { + if (follower->info.count) { /* already initialized */ - if (slave->flags & SND_CTL_SLAVE_NEED_UPDATE) - return slave_update(slave); + if (follower->flags & SND_CTL_FOLLOWER_NEED_UPDATE) + return follower_update(follower); return 0; } uinfo = kmalloc(sizeof(*uinfo), GFP_KERNEL); if (!uinfo) return -ENOMEM; - uinfo->id = slave->slave.id; - err = slave->slave.info(&slave->slave, uinfo); + uinfo->id = follower->follower.id; + err = follower->follower.info(&follower->follower, uinfo); if (err < 0) { kfree(uinfo); return err; } - slave->info.type = uinfo->type; - slave->info.count = uinfo->count; - if (slave->info.count > 2 || - (slave->info.type != SNDRV_CTL_ELEM_TYPE_INTEGER && - slave->info.type != SNDRV_CTL_ELEM_TYPE_BOOLEAN)) { - pr_err("ALSA: vmaster: invalid slave element\n"); + follower->info.type = uinfo->type; + follower->info.count = uinfo->count; + if (follower->info.count > 2 || + (follower->info.type != SNDRV_CTL_ELEM_TYPE_INTEGER && + follower->info.type != SNDRV_CTL_ELEM_TYPE_BOOLEAN)) { + pr_err("ALSA: vmaster: invalid follower element\n"); kfree(uinfo); return -EINVAL; } - slave->info.min_val = uinfo->value.integer.min; - slave->info.max_val = uinfo->value.integer.max; + follower->info.min_val = uinfo->value.integer.min; + follower->info.max_val = uinfo->value.integer.max; kfree(uinfo); - return slave_update(slave); + return follower_update(follower); } /* initialize master volume */ static int master_init(struct link_master *master) { - struct link_slave *slave; + struct link_follower *follower; if (master->info.count) return 0; /* already initialized */ - list_for_each_entry(slave, &master->slaves, list) { - int err = slave_init(slave); + list_for_each_entry(follower, &master->followers, list) { + int err = follower_init(follower); if (err < 0) return err; - master->info = slave->info; + master->info = follower->info; master->info.count = 1; /* always mono */ /* set full volume as default (= no attenuation) */ master->val = master->info.max_val; @@ -134,113 +134,113 @@ static int master_init(struct link_master *master) return -ENOENT; } -static int slave_get_val(struct link_slave *slave, - struct snd_ctl_elem_value *ucontrol) +static int follower_get_val(struct link_follower *follower, + struct snd_ctl_elem_value *ucontrol) { int err, ch; - err = slave_init(slave); + err = follower_init(follower); if (err < 0) return err; - for (ch = 0; ch < slave->info.count; ch++) - ucontrol->value.integer.value[ch] = slave->vals[ch]; + for (ch = 0; ch < follower->info.count; ch++) + ucontrol->value.integer.value[ch] = follower->vals[ch]; return 0; } -static int slave_put_val(struct link_slave *slave, - struct snd_ctl_elem_value *ucontrol) +static int follower_put_val(struct link_follower *follower, + struct snd_ctl_elem_value *ucontrol) { int err, ch, vol; - err = master_init(slave->master); + err = master_init(follower->master); if (err < 0) return err; - switch (slave->info.type) { + switch (follower->info.type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: - for (ch = 0; ch < slave->info.count; ch++) + for (ch = 0; ch < follower->info.count; ch++) ucontrol->value.integer.value[ch] &= - !!slave->master->val; + !!follower->master->val; break; case SNDRV_CTL_ELEM_TYPE_INTEGER: - for (ch = 0; ch < slave->info.count; ch++) { + for (ch = 0; ch < follower->info.count; ch++) { /* max master volume is supposed to be 0 dB */ vol = ucontrol->value.integer.value[ch]; - vol += slave->master->val - slave->master->info.max_val; - if (vol < slave->info.min_val) - vol = slave->info.min_val; - else if (vol > slave->info.max_val) - vol = slave->info.max_val; + vol += follower->master->val - follower->master->info.max_val; + if (vol < follower->info.min_val) + vol = follower->info.min_val; + else if (vol > follower->info.max_val) + vol = follower->info.max_val; ucontrol->value.integer.value[ch] = vol; } break; } - return slave->slave.put(&slave->slave, ucontrol); + return follower->follower.put(&follower->follower, ucontrol); } /* - * ctl callbacks for slaves + * ctl callbacks for followers */ -static int slave_info(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) +static int follower_info(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) { - struct link_slave *slave = snd_kcontrol_chip(kcontrol); - return slave->slave.info(&slave->slave, uinfo); + struct link_follower *follower = snd_kcontrol_chip(kcontrol); + return follower->follower.info(&follower->follower, uinfo); } -static int slave_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int follower_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { - struct link_slave *slave = snd_kcontrol_chip(kcontrol); - return slave_get_val(slave, ucontrol); + struct link_follower *follower = snd_kcontrol_chip(kcontrol); + return follower_get_val(follower, ucontrol); } -static int slave_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int follower_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { - struct link_slave *slave = snd_kcontrol_chip(kcontrol); + struct link_follower *follower = snd_kcontrol_chip(kcontrol); int err, ch, changed = 0; - err = slave_init(slave); + err = follower_init(follower); if (err < 0) return err; - for (ch = 0; ch < slave->info.count; ch++) { - if (slave->vals[ch] != ucontrol->value.integer.value[ch]) { + for (ch = 0; ch < follower->info.count; ch++) { + if (follower->vals[ch] != ucontrol->value.integer.value[ch]) { changed = 1; - slave->vals[ch] = ucontrol->value.integer.value[ch]; + follower->vals[ch] = ucontrol->value.integer.value[ch]; } } if (!changed) return 0; - err = slave_put_val(slave, ucontrol); + err = follower_put_val(follower, ucontrol); if (err < 0) return err; return 1; } -static int slave_tlv_cmd(struct snd_kcontrol *kcontrol, - int op_flag, unsigned int size, - unsigned int __user *tlv) +static int follower_tlv_cmd(struct snd_kcontrol *kcontrol, + int op_flag, unsigned int size, + unsigned int __user *tlv) { - struct link_slave *slave = snd_kcontrol_chip(kcontrol); + struct link_follower *follower = snd_kcontrol_chip(kcontrol); /* FIXME: this assumes that the max volume is 0 dB */ - return slave->slave.tlv.c(&slave->slave, op_flag, size, tlv); + return follower->follower.tlv.c(&follower->follower, op_flag, size, tlv); } -static void slave_free(struct snd_kcontrol *kcontrol) +static void follower_free(struct snd_kcontrol *kcontrol) { - struct link_slave *slave = snd_kcontrol_chip(kcontrol); - if (slave->slave.private_free) - slave->slave.private_free(&slave->slave); - if (slave->master) - list_del(&slave->list); - kfree(slave); + struct link_follower *follower = snd_kcontrol_chip(kcontrol); + if (follower->follower.private_free) + follower->follower.private_free(&follower->follower); + if (follower->master) + list_del(&follower->list); + kfree(follower); } /* - * Add a slave control to the group with the given master control + * Add a follower control to the group with the given master control * - * All slaves must be the same type (returning the same information + * All followers must be the same type (returning the same information * via info callback). The function doesn't check it, so it's your * responsibility. * @@ -249,35 +249,36 @@ static void slave_free(struct snd_kcontrol *kcontrol) * - logarithmic volume control (dB level), no linear volume * - master can only attenuate the volume, no gain */ -int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave, - unsigned int flags) +int _snd_ctl_add_follower(struct snd_kcontrol *master, + struct snd_kcontrol *follower, + unsigned int flags) { struct link_master *master_link = snd_kcontrol_chip(master); - struct link_slave *srec; + struct link_follower *srec; - srec = kzalloc(struct_size(srec, slave.vd, slave->count), + srec = kzalloc(struct_size(srec, follower.vd, follower->count), GFP_KERNEL); if (!srec) return -ENOMEM; - srec->kctl = slave; - srec->slave = *slave; - memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd)); + srec->kctl = follower; + srec->follower = *follower; + memcpy(srec->follower.vd, follower->vd, follower->count * sizeof(*follower->vd)); srec->master = master_link; srec->flags = flags; /* override callbacks */ - slave->info = slave_info; - slave->get = slave_get; - slave->put = slave_put; - if (slave->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) - slave->tlv.c = slave_tlv_cmd; - slave->private_data = srec; - slave->private_free = slave_free; - - list_add_tail(&srec->list, &master_link->slaves); + follower->info = follower_info; + follower->get = follower_get; + follower->put = follower_put; + if (follower->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) + follower->tlv.c = follower_tlv_cmd; + follower->private_data = srec; + follower->private_free = follower_free; + + list_add_tail(&srec->list, &master_link->followers); return 0; } -EXPORT_SYMBOL(_snd_ctl_add_slave); +EXPORT_SYMBOL(_snd_ctl_add_follower); /* * ctl callbacks for master controls @@ -309,20 +310,20 @@ static int master_get(struct snd_kcontrol *kcontrol, return 0; } -static int sync_slaves(struct link_master *master, int old_val, int new_val) +static int sync_followers(struct link_master *master, int old_val, int new_val) { - struct link_slave *slave; + struct link_follower *follower; struct snd_ctl_elem_value *uval; uval = kmalloc(sizeof(*uval), GFP_KERNEL); if (!uval) return -ENOMEM; - list_for_each_entry(slave, &master->slaves, list) { + list_for_each_entry(follower, &master->followers, list) { master->val = old_val; - uval->id = slave->slave.id; - slave_get_val(slave, uval); + uval->id = follower->follower.id; + follower_get_val(follower, uval); master->val = new_val; - slave_put_val(slave, uval); + follower_put_val(follower, uval); } kfree(uval); return 0; @@ -344,7 +345,7 @@ static int master_put(struct snd_kcontrol *kcontrol, if (new_val == old_val) return 0; - err = sync_slaves(master, old_val, new_val); + err = sync_followers(master, old_val, new_val); if (err < 0) return err; if (master->hook && !first_init) @@ -355,17 +356,17 @@ static int master_put(struct snd_kcontrol *kcontrol, static void master_free(struct snd_kcontrol *kcontrol) { struct link_master *master = snd_kcontrol_chip(kcontrol); - struct link_slave *slave, *n; + struct link_follower *follower, *n; - /* free all slave links and retore the original slave kctls */ - list_for_each_entry_safe(slave, n, &master->slaves, list) { - struct snd_kcontrol *sctl = slave->kctl; + /* free all follower links and retore the original follower kctls */ + list_for_each_entry_safe(follower, n, &master->followers, list) { + struct snd_kcontrol *sctl = follower->kctl; struct list_head olist = sctl->list; - memcpy(sctl, &slave->slave, sizeof(*sctl)); - memcpy(sctl->vd, slave->slave.vd, + memcpy(sctl, &follower->follower, sizeof(*sctl)); + memcpy(sctl->vd, follower->follower.vd, sctl->count * sizeof(*sctl->vd)); sctl->list = olist; /* keep the current linked-list */ - kfree(slave); + kfree(follower); } kfree(master); } @@ -378,8 +379,8 @@ static void master_free(struct snd_kcontrol *kcontrol) * * Creates a virtual master control with the given name string. * - * After creating a vmaster element, you can add the slave controls - * via snd_ctl_add_slave() or snd_ctl_add_slave_uncached(). + * After creating a vmaster element, you can add the follower controls + * via snd_ctl_add_follower() or snd_ctl_add_follower_uncached(). * * The optional argument @tlv can be used to specify the TLV information * for dB scale of the master control. It should be a single element @@ -403,7 +404,7 @@ struct snd_kcontrol *snd_ctl_make_virtual_master(char *name, master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) return NULL; - INIT_LIST_HEAD(&master->slaves); + INIT_LIST_HEAD(&master->followers); kctl = snd_ctl_new1(&knew, master); if (!kctl) { @@ -455,11 +456,11 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kcontrol, EXPORT_SYMBOL_GPL(snd_ctl_add_vmaster_hook); /** - * snd_ctl_sync_vmaster - Sync the vmaster slaves and hook + * snd_ctl_sync_vmaster - Sync the vmaster followers and hook * @kcontrol: vmaster kctl element * @hook_only: sync only the hook * - * Forcibly call the put callback of each slave and call the hook function + * Forcibly call the put callback of each follower and call the hook function * to synchronize with the current value of the given vmaster element. * NOP when NULL is passed to @kcontrol. */ @@ -476,7 +477,7 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kcontrol, bool hook_only) if (err < 0) return; first_init = err; - err = sync_slaves(master, master->val, master->val); + err = sync_followers(master, master->val, master->val); if (err < 0) return; } @@ -487,34 +488,34 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kcontrol, bool hook_only) EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster); /** - * snd_ctl_apply_vmaster_slaves - Apply function to each vmaster slave + * snd_ctl_apply_vmaster_followers - Apply function to each vmaster follower * @kctl: vmaster kctl element * @func: function to apply * @arg: optional function argument * - * Apply the function @func to each slave kctl of the given vmaster kctl. + * Apply the function @func to each follower kctl of the given vmaster kctl. * Returns 0 if successful, or a negative error code. */ -int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl, - int (*func)(struct snd_kcontrol *vslave, - struct snd_kcontrol *slave, - void *arg), - void *arg) +int snd_ctl_apply_vmaster_followers(struct snd_kcontrol *kctl, + int (*func)(struct snd_kcontrol *vfollower, + struct snd_kcontrol *follower, + void *arg), + void *arg) { struct link_master *master; - struct link_slave *slave; + struct link_follower *follower; int err; master = snd_kcontrol_chip(kctl); err = master_init(master); if (err < 0) return err; - list_for_each_entry(slave, &master->slaves, list) { - err = func(slave->kctl, &slave->slave, arg); + list_for_each_entry(follower, &master->followers, list) { + err = func(follower->kctl, &follower->follower, arg); if (err < 0) return err; } return 0; } -EXPORT_SYMBOL_GPL(snd_ctl_apply_vmaster_slaves); +EXPORT_SYMBOL_GPL(snd_ctl_apply_vmaster_followers); diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c index 40967d9cc631..1627a74baf3c 100644 --- a/sound/pci/ac97/ac97_patch.c +++ b/sound/pci/ac97/ac97_patch.c @@ -19,7 +19,7 @@ static struct snd_kcontrol *snd_ac97_find_mixer_ctl(struct snd_ac97 *ac97, const char *name); static int snd_ac97_add_vmaster(struct snd_ac97 *ac97, char *name, const unsigned int *tlv, - const char * const *slaves); + const char * const *followers); /* * Chip specific initialization @@ -3373,7 +3373,7 @@ AC97_SINGLE("Downmix LFE and Center to Front", 0x5a, 12, 1, 0), AC97_SINGLE("Downmix Surround to Front", 0x5a, 11, 1, 0), }; -static const char * const slave_vols_vt1616[] = { +static const char * const follower_vols_vt1616[] = { "Front Playback Volume", "Surround Playback Volume", "Center Playback Volume", @@ -3381,7 +3381,7 @@ static const char * const slave_vols_vt1616[] = { NULL }; -static const char * const slave_sws_vt1616[] = { +static const char * const follower_sws_vt1616[] = { "Front Playback Switch", "Surround Playback Switch", "Center Playback Switch", @@ -3400,10 +3400,10 @@ static struct snd_kcontrol *snd_ac97_find_mixer_ctl(struct snd_ac97 *ac97, return snd_ctl_find_id(ac97->bus->card, &id); } -/* create a virtual master control and add slaves */ +/* create a virtual master control and add followers */ static int snd_ac97_add_vmaster(struct snd_ac97 *ac97, char *name, const unsigned int *tlv, - const char * const *slaves) + const char * const *followers) { struct snd_kcontrol *kctl; const char * const *s; @@ -3416,16 +3416,16 @@ static int snd_ac97_add_vmaster(struct snd_ac97 *ac97, char *name, if (err < 0) return err; - for (s = slaves; *s; s++) { + for (s = followers; *s; s++) { struct snd_kcontrol *sctl; sctl = snd_ac97_find_mixer_ctl(ac97, *s); if (!sctl) { dev_dbg(ac97->bus->card->dev, - "Cannot find slave %s, skipped\n", *s); + "Cannot find follower %s, skipped\n", *s); continue; } - err = snd_ctl_add_slave(kctl, sctl); + err = snd_ctl_add_follower(kctl, sctl); if (err < 0) return err; } @@ -3451,12 +3451,12 @@ static int patch_vt1616_specific(struct snd_ac97 * ac97) snd_ac97_rename_vol_ctl(ac97, "Master Playback", "Front Playback"); err = snd_ac97_add_vmaster(ac97, "Master Playback Volume", - kctl->tlv.p, slave_vols_vt1616); + kctl->tlv.p, follower_vols_vt1616); if (err < 0) return err; err = snd_ac97_add_vmaster(ac97, "Master Playback Switch", - NULL, slave_sws_vt1616); + NULL, follower_sws_vt1616); if (err < 0) return err; diff --git a/sound/pci/ca0106/ca0106_mixer.c b/sound/pci/ca0106/ca0106_mixer.c index 3b8ec673dc0a..c852c6a75b91 100644 --- a/sound/pci/ca0106/ca0106_mixer.c +++ b/sound/pci/ca0106/ca0106_mixer.c @@ -739,7 +739,7 @@ static int rename_ctl(struct snd_card *card, const char *src, const char *dst) static DECLARE_TLV_DB_SCALE(snd_ca0106_master_db_scale, -6375, 25, 1); -static const char * const slave_vols[] = { +static const char * const follower_vols[] = { "Analog Front Playback Volume", "Analog Rear Playback Volume", "Analog Center/LFE Playback Volume", @@ -752,7 +752,7 @@ static const char * const slave_vols[] = { NULL }; -static const char * const slave_sws[] = { +static const char * const follower_sws[] = { "Analog Front Playback Switch", "Analog Rear Playback Switch", "Analog Center/LFE Playback Switch", @@ -761,13 +761,13 @@ static const char * const slave_sws[] = { NULL }; -static void add_slaves(struct snd_card *card, - struct snd_kcontrol *master, const char * const *list) +static void add_followers(struct snd_card *card, + struct snd_kcontrol *master, const char * const *list) { for (; *list; list++) { - struct snd_kcontrol *slave = ctl_find(card, *list); - if (slave) - snd_ctl_add_slave(master, slave); + struct snd_kcontrol *follower = ctl_find(card, *list); + if (follower) + snd_ctl_add_follower(master, follower); } } @@ -852,7 +852,7 @@ int snd_ca0106_mixer(struct snd_ca0106 *emu) err = snd_ctl_add(card, vmaster); if (err < 0) return err; - add_slaves(card, vmaster, slave_vols); + add_followers(card, vmaster, follower_vols); if (emu->details->spi_dac) { vmaster = snd_ctl_make_virtual_master("Master Playback Switch", @@ -862,7 +862,7 @@ int snd_ca0106_mixer(struct snd_ca0106 *emu) err = snd_ctl_add(card, vmaster); if (err < 0) return err; - add_slaves(card, vmaster, slave_sws); + add_followers(card, vmaster, follower_sws); } strcpy(card->mixername, "CA0106"); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 7e3ae4534df9..40f3c175954d 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -785,7 +785,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec) snd_array_free(&codec->spdif_out); snd_array_free(&codec->verbs); codec->preset = NULL; - codec->slave_dig_outs = NULL; + codec->follower_dig_outs = NULL; codec->spdif_status_reset = 0; snd_array_free(&codec->mixers); snd_array_free(&codec->nids); @@ -1806,11 +1806,11 @@ int snd_hda_codec_reset(struct hda_codec *codec) return 0; } -typedef int (*map_slave_func_t)(struct hda_codec *, void *, struct snd_kcontrol *); +typedef int (*map_follower_func_t)(struct hda_codec *, void *, struct snd_kcontrol *); -/* apply the function to all matching slave ctls in the mixer list */ -static int map_slaves(struct hda_codec *codec, const char * const *slaves, - const char *suffix, map_slave_func_t func, void *data) +/* apply the function to all matching follower ctls in the mixer list */ +static int map_followers(struct hda_codec *codec, const char * const *followers, + const char *suffix, map_follower_func_t func, void *data) { struct hda_nid_item *items; const char * const *s; @@ -1821,7 +1821,7 @@ static int map_slaves(struct hda_codec *codec, const char * const *slaves, struct snd_kcontrol *sctl = items[i].kctl; if (!sctl || sctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER) continue; - for (s = slaves; *s; s++) { + for (s = followers; *s; s++) { char tmpname[sizeof(sctl->id.name)]; const char *name = *s; if (suffix) { @@ -1840,8 +1840,8 @@ static int map_slaves(struct hda_codec *codec, const char * const *slaves, return 0; } -static int check_slave_present(struct hda_codec *codec, - void *data, struct snd_kcontrol *sctl) +static int check_follower_present(struct hda_codec *codec, + void *data, struct snd_kcontrol *sctl) { return 1; } @@ -1860,17 +1860,17 @@ static int put_kctl_with_value(struct snd_kcontrol *kctl, int val) return 0; } -struct slave_init_arg { +struct follower_init_arg { struct hda_codec *codec; int step; }; -/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */ -static int init_slave_0dB(struct snd_kcontrol *slave, - struct snd_kcontrol *kctl, - void *_arg) +/* initialize the follower volume with 0dB via snd_ctl_apply_vmaster_followers() */ +static int init_follower_0dB(struct snd_kcontrol *follower, + struct snd_kcontrol *kctl, + void *_arg) { - struct slave_init_arg *arg = _arg; + struct follower_init_arg *arg = _arg; int _tlv[4]; const int *tlv = NULL; int step; @@ -1879,7 +1879,7 @@ static int init_slave_0dB(struct snd_kcontrol *slave, if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { if (kctl->tlv.c != snd_hda_mixer_amp_tlv) { codec_err(arg->codec, - "Unexpected TLV callback for slave %s:%d\n", + "Unexpected TLV callback for follower %s:%d\n", kctl->id.name, kctl->id.index); return 0; /* ignore */ } @@ -1897,7 +1897,7 @@ static int init_slave_0dB(struct snd_kcontrol *slave, return 0; if (arg->step && arg->step != step) { codec_err(arg->codec, - "Mismatching dB step for vmaster slave (%d!=%d)\n", + "Mismatching dB step for vmaster follower (%d!=%d)\n", arg->step, step); return 0; } @@ -1905,49 +1905,49 @@ static int init_slave_0dB(struct snd_kcontrol *slave, arg->step = step; val = -tlv[SNDRV_CTL_TLVO_DB_SCALE_MIN] / step; if (val > 0) { - put_kctl_with_value(slave, val); + put_kctl_with_value(follower, val); return val; } return 0; } -/* unmute the slave via snd_ctl_apply_vmaster_slaves() */ -static int init_slave_unmute(struct snd_kcontrol *slave, - struct snd_kcontrol *kctl, - void *_arg) +/* unmute the follower via snd_ctl_apply_vmaster_followers() */ +static int init_follower_unmute(struct snd_kcontrol *follower, + struct snd_kcontrol *kctl, + void *_arg) { - return put_kctl_with_value(slave, 1); + return put_kctl_with_value(follower, 1); } -static int add_slave(struct hda_codec *codec, - void *data, struct snd_kcontrol *slave) +static int add_follower(struct hda_codec *codec, + void *data, struct snd_kcontrol *follower) { - return snd_ctl_add_slave(data, slave); + return snd_ctl_add_follower(data, follower); } /** - * __snd_hda_add_vmaster - create a virtual master control and add slaves + * __snd_hda_add_vmaster - create a virtual master control and add followers * @codec: HD-audio codec * @name: vmaster control name * @tlv: TLV data (optional) - * @slaves: slave control names (optional) - * @suffix: suffix string to each slave name (optional) - * @init_slave_vol: initialize slaves to unmute/0dB + * @followers: follower control names (optional) + * @suffix: suffix string to each follower name (optional) + * @init_follower_vol: initialize followers to unmute/0dB * @ctl_ret: store the vmaster kcontrol in return * * Create a virtual master control with the given name. The TLV data * must be either NULL or a valid data. * - * @slaves is a NULL-terminated array of strings, each of which is a - * slave control name. All controls with these names are assigned to + * @followers is a NULL-terminated array of strings, each of which is a + * follower control name. All controls with these names are assigned to * the new virtual master control. * * This function returns zero if successful or a negative error code. */ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name, - unsigned int *tlv, const char * const *slaves, - const char *suffix, bool init_slave_vol, + unsigned int *tlv, const char * const *followers, + const char *suffix, bool init_follower_vol, struct snd_kcontrol **ctl_ret) { struct snd_kcontrol *kctl; @@ -1956,9 +1956,9 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name, if (ctl_ret) *ctl_ret = NULL; - err = map_slaves(codec, slaves, suffix, check_slave_present, NULL); + err = map_followers(codec, followers, suffix, check_follower_present, NULL); if (err != 1) { - codec_dbg(codec, "No slave found for %s\n", name); + codec_dbg(codec, "No follower found for %s\n", name); return 0; } kctl = snd_ctl_make_virtual_master(name, tlv); @@ -1968,20 +1968,20 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name, if (err < 0) return err; - err = map_slaves(codec, slaves, suffix, add_slave, kctl); + err = map_followers(codec, followers, suffix, add_follower, kctl); if (err < 0) return err; /* init with master mute & zero volume */ put_kctl_with_value(kctl, 0); - if (init_slave_vol) { - struct slave_init_arg arg = { + if (init_follower_vol) { + struct follower_init_arg arg = { .codec = codec, .step = 0, }; - snd_ctl_apply_vmaster_slaves(kctl, - tlv ? init_slave_0dB : init_slave_unmute, - &arg); + snd_ctl_apply_vmaster_followers(kctl, + tlv ? init_follower_0dB : init_follower_unmute, + &arg); } if (ctl_ret) @@ -2284,7 +2284,7 @@ static unsigned int convert_to_spdif_status(unsigned short val) return sbits; } -/* set digital convert verbs both for the given NID and its slaves */ +/* set digital convert verbs both for the given NID and its followers */ static void set_dig_out(struct hda_codec *codec, hda_nid_t nid, int mask, int val) { @@ -2292,7 +2292,7 @@ static void set_dig_out(struct hda_codec *codec, hda_nid_t nid, snd_hdac_regmap_update(&codec->core, nid, AC_VERB_SET_DIGI_CONVERT_1, mask, val); - d = codec->slave_dig_outs; + d = codec->follower_dig_outs; if (!d) return; for (; *d; d++) @@ -3580,9 +3580,9 @@ static void setup_dig_out_stream(struct hda_codec *codec, hda_nid_t nid, spdif->ctls & ~AC_DIG1_ENABLE & 0xff, -1); snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format); - if (codec->slave_dig_outs) { + if (codec->follower_dig_outs) { const hda_nid_t *d; - for (d = codec->slave_dig_outs; *d; d++) + for (d = codec->follower_dig_outs; *d; d++) snd_hda_codec_setup_stream(codec, *d, stream_tag, 0, format); } @@ -3595,9 +3595,9 @@ static void setup_dig_out_stream(struct hda_codec *codec, hda_nid_t nid, static void cleanup_dig_out_stream(struct hda_codec *codec, hda_nid_t nid) { snd_hda_codec_cleanup_stream(codec, nid); - if (codec->slave_dig_outs) { + if (codec->follower_dig_outs) { const hda_nid_t *d; - for (d = codec->slave_dig_outs; *d; d++) + for (d = codec->follower_dig_outs; *d; d++) snd_hda_codec_cleanup_stream(codec, *d); } } @@ -3679,7 +3679,7 @@ EXPORT_SYMBOL_GPL(snd_hda_multi_out_dig_close); * @hinfo: PCM information to assign * * Open analog outputs and set up the hw-constraints. - * If the digital outputs can be opened as slave, open the digital + * If the digital outputs can be opened as follower, open the digital * outputs, too. */ int snd_hda_multi_out_analog_open(struct hda_codec *codec, diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 9074265d934a..d07947a73ac2 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -4112,7 +4112,7 @@ static void parse_digital(struct hda_codec *codec) int i, nums; hda_nid_t dig_nid, pin; - /* support multiple SPDIFs; the secondary is set up as a slave */ + /* support multiple SPDIFs; the secondary is set up as a follower */ nums = 0; for (i = 0; i < spec->autocfg.dig_outs; i++) { pin = spec->autocfg.dig_out_pins[i]; @@ -4131,10 +4131,10 @@ static void parse_digital(struct hda_codec *codec) spec->multiout.dig_out_nid = dig_nid; spec->dig_out_type = spec->autocfg.dig_out_type[0]; } else { - spec->multiout.slave_dig_outs = spec->slave_dig_outs; - if (nums >= ARRAY_SIZE(spec->slave_dig_outs) - 1) + spec->multiout.follower_dig_outs = spec->follower_dig_outs; + if (nums >= ARRAY_SIZE(spec->follower_dig_outs) - 1) break; - spec->slave_dig_outs[nums - 1] = dig_nid; + spec->follower_dig_outs[nums - 1] = dig_nid; } nums++; } @@ -4589,7 +4589,7 @@ static void call_update_outputs(struct hda_codec *codec) else snd_hda_gen_update_outputs(codec); - /* sync the whole vmaster slaves to reflect the new auto-mute status */ + /* sync the whole vmaster followers to reflect the new auto-mute status */ if (spec->auto_mute_via_amp && !codec->bus->shutdown) snd_ctl_sync_vmaster(spec->vmaster_mute.sw_kctl, false); } @@ -5233,8 +5233,8 @@ EXPORT_SYMBOL_GPL(snd_hda_gen_parse_auto_config); * Build control elements */ -/* slave controls for virtual master */ -static const char * const slave_pfxs[] = { +/* follower controls for virtual master */ +static const char * const follower_pfxs[] = { "Front", "Surround", "Center", "LFE", "Side", "Headphone", "Speaker", "Mono", "Line Out", "CLFE", "Bass Speaker", "PCM", @@ -5286,7 +5286,7 @@ int snd_hda_gen_build_controls(struct hda_codec *codec) if (!spec->no_analog && !spec->suppress_vmaster && !snd_hda_find_mixer_ctl(codec, "Master Playback Volume")) { err = snd_hda_add_vmaster(codec, "Master Playback Volume", - spec->vmaster_tlv, slave_pfxs, + spec->vmaster_tlv, follower_pfxs, "Playback Volume"); if (err < 0) return err; @@ -5294,7 +5294,7 @@ int snd_hda_gen_build_controls(struct hda_codec *codec) if (!spec->no_analog && !spec->suppress_vmaster && !snd_hda_find_mixer_ctl(codec, "Master Playback Switch")) { err = __snd_hda_add_vmaster(codec, "Master Playback Switch", - NULL, slave_pfxs, + NULL, follower_pfxs, "Playback Switch", true, &spec->vmaster_mute.sw_kctl); if (err < 0) @@ -5809,7 +5809,7 @@ int snd_hda_gen_build_pcms(struct hda_codec *codec) spec->stream_name_digital); if (!info) return -ENOMEM; - codec->slave_dig_outs = spec->multiout.slave_dig_outs; + codec->follower_dig_outs = spec->multiout.follower_dig_outs; spec->pcm_rec[1] = info; if (spec->dig_out_type) info->pcm_type = spec->dig_out_type; diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index bbd6d2b741f2..a43f0bb77dae 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -116,7 +116,7 @@ struct hda_gen_spec { * dig_out_nid and hp_nid are optional */ hda_nid_t alt_dac_nid; - hda_nid_t slave_dig_outs[3]; /* optional - for auto-parsing */ + hda_nid_t follower_dig_outs[3]; /* optional - for auto-parsing */ int dig_out_type; /* capture */ diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index 3dca65d79b02..8c28b1022f49 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -129,11 +129,11 @@ void snd_hda_set_vmaster_tlv(struct hda_codec *codec, hda_nid_t nid, int dir, struct snd_kcontrol *snd_hda_find_mixer_ctl(struct hda_codec *codec, const char *name); int __snd_hda_add_vmaster(struct hda_codec *codec, char *name, - unsigned int *tlv, const char * const *slaves, - const char *suffix, bool init_slave_vol, + unsigned int *tlv, const char * const *followers, + const char *suffix, bool init_follower_vol, struct snd_kcontrol **ctl_ret); -#define snd_hda_add_vmaster(codec, name, tlv, slaves, suffix) \ - __snd_hda_add_vmaster(codec, name, tlv, slaves, suffix, true, NULL) +#define snd_hda_add_vmaster(codec, name, tlv, followers, suffix) \ + __snd_hda_add_vmaster(codec, name, tlv, followers, suffix, true, NULL) int snd_hda_codec_reset(struct hda_codec *codec); void snd_hda_codec_register(struct hda_codec *codec); void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec); @@ -216,7 +216,7 @@ struct hda_multi_out { hda_nid_t hp_out_nid[HDA_MAX_OUTS]; /* DACs for multiple HPs */ hda_nid_t extra_out_nid[HDA_MAX_OUTS]; /* other (e.g. speaker) DACs */ hda_nid_t dig_out_nid; /* digital out audio widget */ - const hda_nid_t *slave_dig_outs; + const hda_nid_t *follower_dig_outs; int max_channels; /* currently supported analog channels */ int dig_out_used; /* current usage of digital out (HDA_DIG_XXX) */ int no_share_stream; /* don't share a stream with multiple pins */ diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 34fe753a46fb..19c575fd28a1 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -6245,10 +6245,10 @@ static int zxr_add_headphone_gain_switch(struct hda_codec *codec) } /* - * Need to create slave controls for the alternate codecs that have surround + * Need to create follower controls for the alternate codecs that have surround * capabilities. */ -static const char * const ca0132_alt_slave_pfxs[] = { +static const char * const ca0132_alt_follower_pfxs[] = { "Front", "Surround", "Center", "LFE", NULL, }; @@ -6376,15 +6376,15 @@ static int ca0132_build_controls(struct hda_codec *codec) if (err < 0) return err; } - /* Setup vmaster with surround slaves for desktop ca0132 devices */ + /* Setup vmaster with surround followers for desktop ca0132 devices */ if (ca0132_use_alt_functions(spec)) { snd_hda_set_vmaster_tlv(codec, spec->dacs[0], HDA_OUTPUT, spec->tlv); snd_hda_add_vmaster(codec, "Master Playback Volume", - spec->tlv, ca0132_alt_slave_pfxs, + spec->tlv, ca0132_alt_follower_pfxs, "Playback Volume"); err = __snd_hda_add_vmaster(codec, "Master Playback Switch", - NULL, ca0132_alt_slave_pfxs, + NULL, ca0132_alt_follower_pfxs, "Playback Switch", true, &spec->vmaster_mute.sw_kctl); if (err < 0) diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index d8ed69cb9df1..7f5ed5805d3b 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -3135,7 +3135,7 @@ static void fixup_hp_headphone(struct hda_codec *codec, hda_nid_t pin) unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin); /* It was changed in the BIOS to just satisfy MS DTM. - * Lets turn it back into slaved HP + * Lets turn it back into follower HP */ pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) | (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT); diff --git a/sound/pci/ice1712/juli.c b/sound/pci/ice1712/juli.c index 7be4eb42f05e..e57a55cebc5a 100644 --- a/sound/pci/ice1712/juli.c +++ b/sound/pci/ice1712/juli.c @@ -397,7 +397,7 @@ static const struct snd_kcontrol_new juli_mute_controls[] = { }, }; -static const char * const slave_vols[] = { +static const char * const follower_vols[] = { PCM_VOLUME, MONITOR_AN_IN_VOLUME, MONITOR_DIG_IN_VOLUME, @@ -418,16 +418,16 @@ static struct snd_kcontrol *ctl_find(struct snd_card *card, return snd_ctl_find_id(card, &sid); } -static void add_slaves(struct snd_card *card, - struct snd_kcontrol *master, - const char * const *list) +static void add_followers(struct snd_card *card, + struct snd_kcontrol *master, + const char * const *list) { for (; *list; list++) { - struct snd_kcontrol *slave = ctl_find(card, *list); - /* dev_dbg(card->dev, "add_slaves - %s\n", *list); */ - if (slave) { - /* dev_dbg(card->dev, "slave %s found\n", *list); */ - snd_ctl_add_slave(master, slave); + struct snd_kcontrol *follower = ctl_find(card, *list); + /* dev_dbg(card->dev, "add_followers - %s\n", *list); */ + if (follower) { + /* dev_dbg(card->dev, "follower %s found\n", *list); */ + snd_ctl_add_follower(master, follower); } } } @@ -454,7 +454,7 @@ static int juli_add_controls(struct snd_ice1712 *ice) juli_master_db_scale); if (!vmaster) return -ENOMEM; - add_slaves(ice->card, vmaster, slave_vols); + add_followers(ice->card, vmaster, follower_vols); err = snd_ctl_add(ice->card, vmaster); if (err < 0) return err; diff --git a/sound/pci/ice1712/quartet.c b/sound/pci/ice1712/quartet.c index 866596205710..0e3e04aa9faf 100644 --- a/sound/pci/ice1712/quartet.c +++ b/sound/pci/ice1712/quartet.c @@ -757,7 +757,7 @@ static const struct snd_kcontrol_new qtet_controls[] = { QTET_CONTROL("Output 3/4 to Monitor 1/2", sw, OUT34_MON12), }; -static const char * const slave_vols[] = { +static const char * const follower_vols[] = { PCM_12_PLAYBACK_VOLUME, PCM_34_PLAYBACK_VOLUME, NULL @@ -776,13 +776,13 @@ static struct snd_kcontrol *ctl_find(struct snd_card *card, return snd_ctl_find_id(card, &sid); } -static void add_slaves(struct snd_card *card, - struct snd_kcontrol *master, const char * const *list) +static void add_followers(struct snd_card *card, + struct snd_kcontrol *master, const char * const *list) { for (; *list; list++) { - struct snd_kcontrol *slave = ctl_find(card, *list); - if (slave) - snd_ctl_add_slave(master, slave); + struct snd_kcontrol *follower = ctl_find(card, *list); + if (follower) + snd_ctl_add_follower(master, follower); } } @@ -806,7 +806,7 @@ static int qtet_add_controls(struct snd_ice1712 *ice) qtet_master_db_scale); if (!vmaster) return -ENOMEM; - add_slaves(ice->card, vmaster, slave_vols); + add_followers(ice->card, vmaster, follower_vols); err = snd_ctl_add(ice->card, vmaster); if (err < 0) return err; diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c index 73c0fd7277e6..53d558b2806c 100644 --- a/sound/ppc/awacs.c +++ b/sound/ppc/awacs.c @@ -1063,12 +1063,12 @@ snd_pmac_awacs_init(struct snd_pmac *chip) if (pm5500 || imac || lombard) { vmaster_sw = snd_ctl_make_virtual_master( "Master Playback Switch", (unsigned int *) NULL); - err = snd_ctl_add_slave_uncached(vmaster_sw, - chip->master_sw_ctl); + err = snd_ctl_add_follower_uncached(vmaster_sw, + chip->master_sw_ctl); if (err < 0) return err; - err = snd_ctl_add_slave_uncached(vmaster_sw, - chip->speaker_sw_ctl); + err = snd_ctl_add_follower_uncached(vmaster_sw, + chip->speaker_sw_ctl); if (err < 0) return err; err = snd_ctl_add(chip->card, vmaster_sw); @@ -1076,10 +1076,10 @@ snd_pmac_awacs_init(struct snd_pmac *chip) return err; vmaster_vol = snd_ctl_make_virtual_master( "Master Playback Volume", (unsigned int *) NULL); - err = snd_ctl_add_slave(vmaster_vol, master_vol); + err = snd_ctl_add_follower(vmaster_vol, master_vol); if (err < 0) return err; - err = snd_ctl_add_slave(vmaster_vol, speaker_vol); + err = snd_ctl_add_follower(vmaster_vol, speaker_vol); if (err < 0) return err; err = snd_ctl_add(chip->card, vmaster_vol); diff --git a/sound/usb/6fire/control.c b/sound/usb/6fire/control.c index 20f34d2ace5f..9bd8dcbb68e4 100644 --- a/sound/usb/6fire/control.c +++ b/sound/usb/6fire/control.c @@ -539,7 +539,7 @@ static int usb6fire_control_add_virtual( ret = snd_ctl_add(card, control); if (ret < 0) return ret; - ret = snd_ctl_add_slave(vmaster, control); + ret = snd_ctl_add_follower(vmaster, control); if (ret < 0) return ret; i++; -- cgit v1.2.3 From c6a8b84da4c28bda61b842a089651c3ec9d89a48 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:36:50 -0700 Subject: modules: linux/moduleparam.h: drop duplicated word in a comment Drop the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Cc: Jessica Yu Signed-off-by: Jessica Yu --- include/linux/moduleparam.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 3ef917ff0964..1ad5aa3b86d9 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -108,7 +108,7 @@ struct kparam_array * ".") the kernel commandline parameter. Note that - is changed to _, so * the user can use "foo-bar=1" even for variable "foo_bar". * - * @perm is 0 if the the variable is not to appear in sysfs, or 0444 + * @perm is 0 if the variable is not to appear in sysfs, or 0444 * for world-readable, 0644 for root-writable, etc. Note that if it * is writable, you may need to use kernel_param_lock() around * accesses (esp. charp, which can be kfreed when it changes). -- cgit v1.2.3 From 1c91b46555aa1dd86331025e62d55d92459e0faf Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:26 +0200 Subject: backlight: backlight: Add backlight_is_blank() The backlight support has three properties that express the state: - power - state - fb_blank It is un-documented and easy to get wrong. Add backlight_is_blank() helper to make it simpler for drivers to get the check of the state correct. A lot of drivers also includes checks for fb_blank. This check is redundant when the state is checked and thus not needed in this helper function. But added anyway to avoid introducing subtle bugs due to the creative use of fb_blank in some drivers. Introducing this helper will for some drivers results in added support for fb_blank. This will be a change in functionality, which will improve the backlight driver. Rolling out this helper to all relevant backlight drivers will eliminate almost all accesses to fb_blank. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Peter Ujfalusi Reviewed-by: Emil Velikov Reviewed-by: Daniel Vetter Signed-off-by: Lee Jones --- include/linux/backlight.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 56e4580d4f55..56e51ebab740 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -175,6 +175,25 @@ static inline void backlight_put(struct backlight_device *bd) put_device(&bd->dev); } +/** + * backlight_is_blank - Return true if display is expected to be blank + * @bd: the backlight device + * + * Display is expected to be blank if any of these is true:: + * + * 1) if power in not UNBLANK + * 2) if fb_blank is not UNBLANK + * 3) if state indicate BLANK or SUSPENDED + * + * Returns true if display is expected to be blank, false otherwise. + */ +static inline bool backlight_is_blank(const struct backlight_device *bd) +{ + return bd->props.power != FB_BLANK_UNBLANK || + bd->props.fb_blank != FB_BLANK_UNBLANK || + bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); +} + extern struct backlight_device *backlight_device_register(const char *name, struct device *dev, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props); -- cgit v1.2.3 From ca7c20b2132d228ec76df3c96f5d0b5ae3d6f218 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:27 +0200 Subject: backlight: backlight: Improve backlight_ops documentation Improve the documentation for backlight_ops and adapt it to kernel-doc style. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 59 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 56e51ebab740..4ff60774ae22 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -55,19 +55,66 @@ enum backlight_scale { struct backlight_device; struct fb_info; +/** + * struct backlight_ops - backlight operations + * + * The backlight operations are specified when the backlight device is registered. + */ struct backlight_ops { + /** + * @options: Configure how operations are called from the core. + * + * The options parameter is used to adjust the behaviour of the core. + * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called + * upon suspend and resume. + */ unsigned int options; #define BL_CORE_SUSPENDRESUME (1 << 0) - /* Notify the backlight driver some property has changed */ + /** + * @update_status: Operation called when properties have changed. + * + * Notify the backlight driver some property has changed. + * The update_status operation is protected by the update_lock. + * + * The backlight driver is expected to use backlight_is_blank() + * to check if the display is blanked and set brightness accordingly. + * update_status() is called when any of the properties has changed. + * + * RETURNS: + * + * 0 on success, negative error code if any failure occurred. + */ int (*update_status)(struct backlight_device *); - /* Return the current backlight brightness (accounting for power, - fb_blank etc.) */ + + /** + * @get_brightness: Return the current backlight brightness. + * + * The driver may implement this as a readback from the HW. + * This operation is optional and if not present then the current + * brightness property value is used. + * + * RETURNS: + * + * A brightness value which is 0 or a positive number. + * On failure a negative error code is returned. + */ int (*get_brightness)(struct backlight_device *); - /* Check if given framebuffer device is the one bound to this backlight; - return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ - int (*check_fb)(struct backlight_device *, struct fb_info *); + + /** + * @check_fb: Check the framebuffer device. + * + * Check if given framebuffer device is the one bound to this backlight. + * This operation is optional and if not implemented it is assumed that the + * fbdev is always the one bound to the backlight. + * + * RETURNS: + * + * If info is NULL or the info matches the fbdev bound to the backlight return true. + * If info does not match the fbdev bound to the backlight return false. + */ + int (*check_fb)(struct backlight_device *bd, struct fb_info *info); }; /* This structure defines all the properties of a backlight */ -- cgit v1.2.3 From cabf161335ca9fe695bb87469ed99be881cd08f5 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:28 +0200 Subject: backlight: backlight: Improve backlight_properties documentation Improve the documentation for backlight_properties and adapt it to kernel-doc style. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 96 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 85 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 4ff60774ae22..3554aef008ea 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -117,28 +117,102 @@ struct backlight_ops { int (*check_fb)(struct backlight_device *bd, struct fb_info *info); }; -/* This structure defines all the properties of a backlight */ +/** + * struct backlight_properties - backlight properties + * + * This structure defines all the properties of a backlight. + */ struct backlight_properties { - /* Current User requested brightness (0 - max_brightness) */ + /** + * @brightness: The current brightness requested by the user. + * + * The backlight core makes sure the range is (0 to max_brightness) + * when the brightness is set via the sysfs attribute: + * /sys/class/backlight//brightness. + * + * This value can be set in the backlight_properties passed + * to devm_backlight_device_register() to set a default brightness + * value. + */ int brightness; - /* Maximal value for brightness (read-only) */ + + /** + * @max_brightness: The maximum brightness value. + * + * This value must be set in the backlight_properties passed to + * devm_backlight_device_register() and shall not be modified by the + * driver after registration. + */ int max_brightness; - /* Current FB Power mode (0: full on, 1..3: power saving - modes; 4: full off), see FB_BLANK_XXX */ + + /** + * @power: The current power mode. + * + * User space can configure the power mode using the sysfs + * attribute: /sys/class/backlight//bl_power + * When the power property is updated update_status() is called. + * + * The possible values are: (0: full on, 1 to 3: power saving + * modes; 4: full off), see FB_BLANK_XXX. + * + * When the backlight device is enabled @power is set + * to FB_BLANK_UNBLANK. When the backlight device is disabled + * @power is set to FB_BLANK_POWERDOWN. + */ int power; - /* FB Blanking active? (values as for power) */ - /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ + + /** + * @fb_blank: The power state from the FBIOBLANK ioctl. + * + * When the FBIOBLANK ioctl is called @fb_blank is set to the + * blank parameter and the update_status() operation is called. + * + * When the backlight device is enabled @fb_blank is set + * to FB_BLANK_UNBLANK. When the backlight device is disabled + * @fb_blank is set to FB_BLANK_POWERDOWN. + * + * Backlight drivers should avoid using this property. It has been + * replaced by state & BL_CORE_FBLANK (although most drivers should + * use backlight_is_blank() as the preferred means to get the blank + * state). + * + * fb_blank is deprecated and will be removed. + */ int fb_blank; - /* Backlight type */ + + /** + * @type: The type of backlight supported. + * + * The backlight type allows userspace to make appropriate + * policy decisions based on the backlight type. + * + * This value must be set in the backlight_properties + * passed to devm_backlight_device_register(). + */ enum backlight_type type; - /* Flags used to signal drivers of state changes */ + + /** + * @state: The state of the backlight core. + * + * The state is a bitmask. BL_CORE_FBBLANK is set when the display + * is expected to be blank. BL_CORE_SUSPENDED is set when the + * driver is suspended. + * + * backlight drivers are expected to use backlight_is_blank() + * in their update_status() operation rather than reading the + * state property. + * + * The state is maintained by the core and drivers may not modify it. + */ unsigned int state; - /* Type of the brightness scale (linear, non-linear, ...) */ - enum backlight_scale scale; #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ + /** + * @scale: The type of the brightness scale. + */ + enum backlight_scale scale; }; struct backlight_device { -- cgit v1.2.3 From 6f10cd124c44eea018672d5852708ca5dca4d06d Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:29 +0200 Subject: backlight: backlight: Improve backlight_device documentation Improve the documentation for backlight_device and adapt it to kernel-doc style. The updated documentation is more strict on how locking is used. With the update neither update_lock nor ops_lock may be used outside the backlight core. This restriction was introduced to keep the locking simple by keeping it in the core. It was verified that this documents the current state by renaming update_lock => bl_update_lock and ops_lock => bl_ops_lock. The rename did not reveal any uses outside the backlight core. The rename is NOT part of this patch. Signed-off-by: Sam Ravnborg Reviewed-by: Emil Velikov Reviewed-by: Daniel Thompson Reviewed-by: Jingoo Han Signed-off-by: Lee Jones --- include/linux/backlight.h | 72 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 3554aef008ea..cf9977169fe8 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -14,21 +14,6 @@ #include #include -/* Notes on locking: - * - * backlight_device->ops_lock is an internal backlight lock protecting the - * ops pointer and no code outside the core should need to touch it. - * - * Access to update_status() is serialised by the update_lock mutex since - * most drivers seem to need this and historically get it wrong. - * - * Most drivers don't need locking on their get_brightness() method. - * If yours does, you need to implement it in the driver. You can use the - * update_lock mutex if appropriate. - * - * Any other use of the locks below is probably wrong. - */ - enum backlight_update_reason { BACKLIGHT_UPDATE_HOTKEY, BACKLIGHT_UPDATE_SYSFS, @@ -215,30 +200,71 @@ struct backlight_properties { enum backlight_scale scale; }; +/** + * struct backlight_device - backlight device data + * + * This structure holds all data required by a backlight device. + */ struct backlight_device { - /* Backlight properties */ + /** + * @props: Backlight properties + */ struct backlight_properties props; - /* Serialise access to update_status method */ + /** + * @update_lock: The lock used when calling the update_status() operation. + * + * update_lock is an internal backlight lock that serialise access + * to the update_status() operation. The backlight core holds the update_lock + * when calling the update_status() operation. The update_lock shall not + * be used by backlight drivers. + */ struct mutex update_lock; - /* This protects the 'ops' field. If 'ops' is NULL, the driver that - registered this device has been unloaded, and if class_get_devdata() - points to something in the body of that driver, it is also invalid. */ + /** + * @ops_lock: The lock used around everything related to backlight_ops. + * + * ops_lock is an internal backlight lock that protects the ops pointer + * and is used around all accesses to ops and when the operations are + * invoked. The ops_lock shall not be used by backlight drivers. + */ struct mutex ops_lock; + + /** + * @ops: Pointer to the backlight operations. + * + * If ops is NULL, the driver that registered this device has been unloaded, + * and if class_get_devdata() points to something in the body of that driver, + * it is also invalid. + */ const struct backlight_ops *ops; - /* The framebuffer notifier block */ + /** + * @fb_notif: The framebuffer notifier block + */ struct notifier_block fb_notif; - /* list entry of all registered backlight devices */ + /** + * @entry: List entry of all registered backlight devices + */ struct list_head entry; + /** + * @dev: Parent device. + */ struct device dev; - /* Multiple framebuffers may share one backlight device */ + /** + * @fb_bl_on: The state of individual fbdev's. + * + * Multiple fbdev's may share one backlight device. The fb_bl_on + * records the state of the individual fbdev. + */ bool fb_bl_on[FB_MAX]; + /** + * @use_count: The number of uses of fb_bl_on. + */ int use_count; }; -- cgit v1.2.3 From d160fd4e918da33f5ffbcf005cd95888dbbe4f76 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:30 +0200 Subject: backlight: backlight: Document inline functions in backlight.h Add documentation for the inline functions in backlight.h Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index cf9977169fe8..99381b5a289d 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -268,6 +268,10 @@ struct backlight_device { int use_count; }; +/** + * backlight_update_status - force an update of the backlight device status + * @bd: the backlight device + */ static inline int backlight_update_status(struct backlight_device *bd) { int ret = -ENOENT; @@ -361,6 +365,18 @@ extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) +/** + * bl_get_data - access devdata + * @bl_dev: pointer to backlight device + * + * When a backlight device is registered the driver has the possibility + * to supply a void * devdata. bl_get_data() return a pointer to the + * devdata. + * + * RETURNS: + * + * pointer to devdata stored while registering the backlight device. + */ static inline void * bl_get_data(struct backlight_device *bl_dev) { return dev_get_drvdata(&bl_dev->dev); -- cgit v1.2.3 From 2d15bb47f3331db979bd0a1987812ca1a3ff40c6 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:31 +0200 Subject: backlight: backlight: Document enums in backlight.h Add kernel-doc documentation for the backlight enums Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 72 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) (limited to 'include') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 99381b5a289d..1d56b34ff33c 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -14,26 +14,98 @@ #include #include +/** + * enum backlight_update_reason - what method was used to update backlight + * + * A driver indicates the method (reason) used for updating the backlight + * when calling backlight_force_update(). + */ enum backlight_update_reason { + /** + * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key. + */ BACKLIGHT_UPDATE_HOTKEY, + + /** + * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs. + */ BACKLIGHT_UPDATE_SYSFS, }; +/** + * enum backlight_type - the type of backlight control + * + * The type of interface used to control the backlight. + */ enum backlight_type { + /** + * @BACKLIGHT_RAW: + * + * The backlight is controlled using hardware registers. + */ BACKLIGHT_RAW = 1, + + /** + * @BACKLIGHT_PLATFORM: + * + * The backlight is controlled using a platform-specific interface. + */ BACKLIGHT_PLATFORM, + + /** + * @BACKLIGHT_FIRMWARE: + * + * The backlight is controlled using a standard firmware interface. + */ BACKLIGHT_FIRMWARE, + + /** + * @BACKLIGHT_TYPE_MAX: Number of entries. + */ BACKLIGHT_TYPE_MAX, }; +/** + * enum backlight_notification - the type of notification + * + * The notifications that is used for notification sent to the receiver + * that registered notifications using backlight_register_notifier(). + */ enum backlight_notification { + /** + * @BACKLIGHT_REGISTERED: The backlight device is registered. + */ BACKLIGHT_REGISTERED, + + /** + * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered. + */ BACKLIGHT_UNREGISTERED, }; +/** enum backlight_scale - the type of scale used for brightness values + * + * The type of scale used for brightness values. + */ enum backlight_scale { + /** + * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown. + */ BACKLIGHT_SCALE_UNKNOWN = 0, + + /** + * @BACKLIGHT_SCALE_LINEAR: The scale is linear. + * + * The linear scale will increase brightness the same for each step. + */ BACKLIGHT_SCALE_LINEAR, + + /** + * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear. + * + * This is often used when the brightness values tries to adjust to + * the relative perception of the eye demanding a non-linear scale. + */ BACKLIGHT_SCALE_NON_LINEAR, }; -- cgit v1.2.3 From 7ecdea4a0226f6c5cd0e86859d1b38cf17bc8529 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:32 +0200 Subject: backlight: generic_bl: Remove this driver as it is unused The backlight_bl driver required initialization using struct generic_bl_info. As there are no more references to this struct there is no users left. So it is safe to delete the driver. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- drivers/video/backlight/Kconfig | 8 --- drivers/video/backlight/Makefile | 1 - drivers/video/backlight/generic_bl.c | 110 ----------------------------------- include/linux/backlight.h | 9 --- 4 files changed, 128 deletions(-) delete mode 100644 drivers/video/backlight/generic_bl.c (limited to 'include') diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 95c546cc8774..87f9fc238d28 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -173,14 +173,6 @@ config BACKLIGHT_EP93XX To compile this driver as a module, choose M here: the module will be called ep93xx_bl. -config BACKLIGHT_GENERIC - tristate "Generic (aka Sharp Corgi) Backlight Driver" - default y - help - Say y to enable the generic platform backlight driver previously - known as the Corgi backlight driver. If you have a Sharp Zaurus - SL-C7xx, SL-Cxx00 or SL-6000x say y. - config BACKLIGHT_IPAQ_MICRO tristate "iPAQ microcontroller backlight driver" depends on MFD_IPAQ_MICRO diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 2072d21b60f7..13463b99f1f9 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -31,7 +31,6 @@ obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o -obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o obj-$(CONFIG_BACKLIGHT_GPIO) += gpio_backlight.o obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c deleted file mode 100644 index 8fe63dbc8590..000000000000 --- a/drivers/video/backlight/generic_bl.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic Backlight Driver - * - * Copyright (c) 2004-2008 Richard Purdie - */ - -#include -#include -#include -#include -#include -#include -#include - -static int genericbl_intensity; -static struct backlight_device *generic_backlight_device; -static struct generic_bl_info *bl_machinfo; - -static int genericbl_send_intensity(struct backlight_device *bd) -{ - int intensity = bd->props.brightness; - - if (bd->props.power != FB_BLANK_UNBLANK) - intensity = 0; - if (bd->props.state & BL_CORE_FBBLANK) - intensity = 0; - if (bd->props.state & BL_CORE_SUSPENDED) - intensity = 0; - - bl_machinfo->set_bl_intensity(intensity); - - genericbl_intensity = intensity; - - if (bl_machinfo->kick_battery) - bl_machinfo->kick_battery(); - - return 0; -} - -static int genericbl_get_intensity(struct backlight_device *bd) -{ - return genericbl_intensity; -} - -static const struct backlight_ops genericbl_ops = { - .options = BL_CORE_SUSPENDRESUME, - .get_brightness = genericbl_get_intensity, - .update_status = genericbl_send_intensity, -}; - -static int genericbl_probe(struct platform_device *pdev) -{ - struct backlight_properties props; - struct generic_bl_info *machinfo = dev_get_platdata(&pdev->dev); - const char *name = "generic-bl"; - struct backlight_device *bd; - - bl_machinfo = machinfo; - if (!machinfo->limit_mask) - machinfo->limit_mask = -1; - - if (machinfo->name) - name = machinfo->name; - - memset(&props, 0, sizeof(struct backlight_properties)); - props.type = BACKLIGHT_RAW; - props.max_brightness = machinfo->max_intensity; - bd = devm_backlight_device_register(&pdev->dev, name, &pdev->dev, - NULL, &genericbl_ops, &props); - if (IS_ERR(bd)) - return PTR_ERR(bd); - - platform_set_drvdata(pdev, bd); - - bd->props.power = FB_BLANK_UNBLANK; - bd->props.brightness = machinfo->default_intensity; - backlight_update_status(bd); - - generic_backlight_device = bd; - - dev_info(&pdev->dev, "Generic Backlight Driver Initialized.\n"); - return 0; -} - -static int genericbl_remove(struct platform_device *pdev) -{ - struct backlight_device *bd = platform_get_drvdata(pdev); - - bd->props.power = 0; - bd->props.brightness = 0; - backlight_update_status(bd); - - dev_info(&pdev->dev, "Generic Backlight Driver Unloaded\n"); - return 0; -} - -static struct platform_driver genericbl_driver = { - .probe = genericbl_probe, - .remove = genericbl_remove, - .driver = { - .name = "generic-bl", - }, -}; - -module_platform_driver(genericbl_driver); - -MODULE_AUTHOR("Richard Purdie "); -MODULE_DESCRIPTION("Generic Backlight Driver"); -MODULE_LICENSE("GPL"); diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 1d56b34ff33c..d0f01dc3b98d 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -454,15 +454,6 @@ static inline void * bl_get_data(struct backlight_device *bl_dev) return dev_get_drvdata(&bl_dev->dev); } -struct generic_bl_info { - const char *name; - int max_intensity; - int default_intensity; - int limit_mask; - void (*set_bl_intensity)(int intensity); - void (*kick_battery)(void); -}; - #ifdef CONFIG_OF struct backlight_device *of_find_backlight_by_node(struct device_node *node); #else -- cgit v1.2.3 From 9c4aa3118bab6f10904b7bc9bc34b501a083751b Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:33 +0200 Subject: backlight: backlight: Drop extern from prototypes No need to put "extern" in front of prototypes. While touching the prototypes adjust indent to follow the kernel style. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index d0f01dc3b98d..c1824426fc9e 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -417,23 +417,26 @@ static inline bool backlight_is_blank(const struct backlight_device *bd) bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } -extern struct backlight_device *backlight_device_register(const char *name, - struct device *dev, void *devdata, const struct backlight_ops *ops, - const struct backlight_properties *props); -extern struct backlight_device *devm_backlight_device_register( - struct device *dev, const char *name, struct device *parent, - void *devdata, const struct backlight_ops *ops, - const struct backlight_properties *props); -extern void backlight_device_unregister(struct backlight_device *bd); -extern void devm_backlight_device_unregister(struct device *dev, - struct backlight_device *bd); -extern void backlight_force_update(struct backlight_device *bd, - enum backlight_update_reason reason); -extern int backlight_register_notifier(struct notifier_block *nb); -extern int backlight_unregister_notifier(struct notifier_block *nb); -extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +struct backlight_device * +backlight_device_register(const char *name, struct device *dev, void *devdata, + const struct backlight_ops *ops, + const struct backlight_properties *props); +struct backlight_device * +devm_backlight_device_register(struct device *dev, const char *name, + struct device *parent, void *devdata, + const struct backlight_ops *ops, + const struct backlight_properties *props); +void backlight_device_unregister(struct backlight_device *bd); +void devm_backlight_device_unregister(struct device *dev, + struct backlight_device *bd); +void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); +int backlight_register_notifier(struct notifier_block *nb); +int backlight_unregister_notifier(struct notifier_block *nb); struct backlight_device *backlight_device_get_by_name(const char *name); -extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); +struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +int backlight_device_set_brightness(struct backlight_device *bd, + unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) -- cgit v1.2.3 From 2144d00ed0db28c764513080f95e4c49ea9133b0 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:36 +0200 Subject: backlight: backlight: Introduce backlight_get_brightness() Based on an idea from Emil Velikov, add a helper that checks backlight_is_blank() and return 0 as brightness if display is blank or the property value if not. This allows us to simplify the update_status() implementation in most of the backlight drivers. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- include/linux/backlight.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index c1824426fc9e..26e89a8033f5 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -417,6 +417,25 @@ static inline bool backlight_is_blank(const struct backlight_device *bd) bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } +/** + * backlight_get_brightness - Returns the current brightness value + * @bd: the backlight device + * + * Returns the current brightness value, taking in consideration the current + * state. If backlight_is_blank() returns true then return 0 as brightness + * otherwise return the current brightness property value. + * + * Backlight drivers are expected to use this function in their update_status() + * operation to get the brightness value. + */ +static inline int backlight_get_brightness(const struct backlight_device *bd) +{ + if (backlight_is_blank(bd)) + return 0; + else + return bd->props.brightness; +} + struct backlight_device * backlight_device_register(const char *name, struct device *dev, void *devdata, const struct backlight_ops *ops, -- cgit v1.2.3 From 0f6a3256fd810eeca9c56cccafee46359d995138 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:42 +0200 Subject: backlight: backlight: Drop backlight_put() There are no external users of backlight_put(). Drop it and open code the two users in backlight.c. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- drivers/video/backlight/backlight.c | 7 +++++-- include/linux/backlight.h | 10 ---------- 2 files changed, 5 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 82dc93ca859a..cba8505fef5a 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -718,7 +718,10 @@ EXPORT_SYMBOL(of_find_backlight); static void devm_backlight_release(void *data) { - backlight_put(data); + struct backlight_device *bd = data; + + if (bd) + put_device(&bd->dev); } /** @@ -746,7 +749,7 @@ struct backlight_device *devm_of_find_backlight(struct device *dev) return bd; ret = devm_add_action(dev, devm_backlight_release, bd); if (ret) { - backlight_put(bd); + put_device(&bd->dev); return ERR_PTR(ret); } return bd; diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 26e89a8033f5..64f91324c911 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -388,16 +388,6 @@ static inline int backlight_disable(struct backlight_device *bd) return backlight_update_status(bd); } -/** - * backlight_put - Drop backlight reference - * @bd: the backlight device to put - */ -static inline void backlight_put(struct backlight_device *bd) -{ - if (bd) - put_device(&bd->dev); -} - /** * backlight_is_blank - Return true if display is expected to be blank * @bd: the backlight device -- cgit v1.2.3 From b6539a11e807c531e51ff7ad9a25c3a1b6ff7340 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:43 +0200 Subject: backlight: backlight: Make of_find_backlight static There are no external users of of_find_backlight, as they have all changed to use the managed version. Make of_find_backlight static to prevent new external users. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- drivers/video/backlight/backlight.c | 18 +----------------- include/linux/backlight.h | 6 ------ 2 files changed, 1 insertion(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index cba8505fef5a..537fe1b376ad 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -673,22 +673,7 @@ struct backlight_device *of_find_backlight_by_node(struct device_node *node) EXPORT_SYMBOL(of_find_backlight_by_node); #endif -/** - * of_find_backlight - Get backlight device - * @dev: Device - * - * This function looks for a property named 'backlight' on the DT node - * connected to @dev and looks up the backlight device. - * - * Call backlight_put() to drop the reference on the backlight device. - * - * Returns: - * A pointer to the backlight device if found. - * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight - * device is found. - * NULL if there's no backlight property. - */ -struct backlight_device *of_find_backlight(struct device *dev) +static struct backlight_device *of_find_backlight(struct device *dev) { struct backlight_device *bd = NULL; struct device_node *np; @@ -714,7 +699,6 @@ struct backlight_device *of_find_backlight(struct device *dev) return bd; } -EXPORT_SYMBOL(of_find_backlight); static void devm_backlight_release(void *data) { diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 64f91324c911..614653e07e3a 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -477,14 +477,8 @@ of_find_backlight_by_node(struct device_node *node) #endif #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) -struct backlight_device *of_find_backlight(struct device *dev); struct backlight_device *devm_of_find_backlight(struct device *dev); #else -static inline struct backlight_device *of_find_backlight(struct device *dev) -{ - return NULL; -} - static inline struct backlight_device * devm_of_find_backlight(struct device *dev) { -- cgit v1.2.3 From ffebbecaaa86f7cde4a6a813bed14f9d56e7c373 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sat, 18 Jul 2020 14:18:15 +0200 Subject: reset: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Philipp Zabel --- drivers/reset/reset-ti-syscon.c | 2 +- include/dt-bindings/reset/ti-syscon.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c index a2635c21db7f..ef97c4dbbb4e 100644 --- a/drivers/reset/reset-ti-syscon.c +++ b/drivers/reset/reset-ti-syscon.c @@ -1,7 +1,7 @@ /* * TI SYSCON regmap reset driver * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * Suman Anna * diff --git a/include/dt-bindings/reset/ti-syscon.h b/include/dt-bindings/reset/ti-syscon.h index 6d696d2d1508..eacc0f18083e 100644 --- a/include/dt-bindings/reset/ti-syscon.h +++ b/include/dt-bindings/reset/ti-syscon.h @@ -2,7 +2,7 @@ /* * TI Syscon Reset definitions * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __DT_BINDINGS_RESET_TI_SYSCON_H__ -- cgit v1.2.3 From 842247203c33151c5a8007f9a0bec7ba378705d2 Mon Sep 17 00:00:00 2001 From: Artur Rojek Date: Sun, 19 Jul 2020 22:53:06 +0200 Subject: dt-bindings: iio/adc: Add touchscreen idx for JZ47xx SoC ADC Introduce support for touchscreen channels found in JZ47xx SoCs. Signed-off-by: Artur Rojek Tested-by: Paul Cercueil Acked-by: Rob Herring Signed-off-by: Jonathan Cameron --- include/dt-bindings/iio/adc/ingenic,adc.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h index 42f871ab3272..4627a00e369e 100644 --- a/include/dt-bindings/iio/adc/ingenic,adc.h +++ b/include/dt-bindings/iio/adc/ingenic,adc.h @@ -7,5 +7,11 @@ #define INGENIC_ADC_AUX 0 #define INGENIC_ADC_BATTERY 1 #define INGENIC_ADC_AUX2 2 +#define INGENIC_ADC_TOUCH_XP 3 +#define INGENIC_ADC_TOUCH_YP 4 +#define INGENIC_ADC_TOUCH_XN 5 +#define INGENIC_ADC_TOUCH_YN 6 +#define INGENIC_ADC_TOUCH_XD 7 +#define INGENIC_ADC_TOUCH_YD 8 #endif -- cgit v1.2.3 From 1b86abc1c645ad5c9c7bf70910cb3ce73939d2d7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 16 Jul 2020 13:11:24 +0800 Subject: sched_clock: Expose struct clock_read_data In order to support perf_event_mmap_page::cap_time features, an architecture needs, aside from a userspace readable counter register, to expose the exact clock data so that userspace can convert the counter register into a correct timestamp. Provide struct clock_read_data and two (seqcount) helpers so that architectures (arm64 in specific) can expose the numbers to userspace. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Leo Yan Link: https://lore.kernel.org/r/20200716051130.4359-2-leo.yan@linaro.org Signed-off-by: Will Deacon --- include/linux/sched_clock.h | 28 ++++++++++++++++++++++++++++ kernel/time/sched_clock.c | 41 +++++++++++++---------------------------- 2 files changed, 41 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index 0bb04a96a6d4..528718e4ed52 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -6,6 +6,34 @@ #define LINUX_SCHED_CLOCK #ifdef CONFIG_GENERIC_SCHED_CLOCK +/** + * struct clock_read_data - data required to read from sched_clock() + * + * @epoch_ns: sched_clock() value at last update + * @epoch_cyc: Clock cycle value at last update. + * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit + * clocks. + * @read_sched_clock: Current clock source (or dummy source when suspended). + * @mult: Multipler for scaled math conversion. + * @shift: Shift value for scaled math conversion. + * + * Care must be taken when updating this structure; it is read by + * some very hot code paths. It occupies <=40 bytes and, when combined + * with the seqcount used to synchronize access, comfortably fits into + * a 64 byte cache line. + */ +struct clock_read_data { + u64 epoch_ns; + u64 epoch_cyc; + u64 sched_clock_mask; + u64 (*read_sched_clock)(void); + u32 mult; + u32 shift; +}; + +extern struct clock_read_data *sched_clock_read_begin(unsigned int *seq); +extern int sched_clock_read_retry(unsigned int seq); + extern void generic_sched_clock_init(void); extern void sched_clock_register(u64 (*read)(void), int bits, diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index fa3f800d7d76..0acaadc3156c 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -19,31 +19,6 @@ #include "timekeeping.h" -/** - * struct clock_read_data - data required to read from sched_clock() - * - * @epoch_ns: sched_clock() value at last update - * @epoch_cyc: Clock cycle value at last update. - * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit - * clocks. - * @read_sched_clock: Current clock source (or dummy source when suspended). - * @mult: Multipler for scaled math conversion. - * @shift: Shift value for scaled math conversion. - * - * Care must be taken when updating this structure; it is read by - * some very hot code paths. It occupies <=40 bytes and, when combined - * with the seqcount used to synchronize access, comfortably fits into - * a 64 byte cache line. - */ -struct clock_read_data { - u64 epoch_ns; - u64 epoch_cyc; - u64 sched_clock_mask; - u64 (*read_sched_clock)(void); - u32 mult; - u32 shift; -}; - /** * struct clock_data - all data needed for sched_clock() (including * registration of a new clock source) @@ -93,6 +68,17 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) return (cyc * mult) >> shift; } +struct clock_read_data *sched_clock_read_begin(unsigned int *seq) +{ + *seq = raw_read_seqcount(&cd.seq); + return cd.read_data + (*seq & 1); +} + +int sched_clock_read_retry(unsigned int seq) +{ + return read_seqcount_retry(&cd.seq, seq); +} + unsigned long long notrace sched_clock(void) { u64 cyc, res; @@ -100,13 +86,12 @@ unsigned long long notrace sched_clock(void) struct clock_read_data *rd; do { - seq = raw_read_seqcount(&cd.seq); - rd = cd.read_data + (seq & 1); + rd = sched_clock_read_begin(&seq); cyc = (rd->read_sched_clock() - rd->epoch_cyc) & rd->sched_clock_mask; res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); - } while (read_seqcount_retry(&cd.seq, seq)); + } while (sched_clock_read_retry(seq)); return res; } -- cgit v1.2.3 From 6c0246a4588d418f72acd40a7b7601be403d80a9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 16 Jul 2020 13:11:28 +0800 Subject: perf: Add perf_event_mmap_page::cap_user_time_short ABI In order to support short clock counters, provide an ABI extension. As a whole: u64 time, delta, cyc = read_cycle_counter(); + if (cap_user_time_short) + cyc = time_cycle + ((cyc - time_cycle) & time_mask); delta = mul_u64_u32_shr(cyc, time_mult, time_shift); if (cap_user_time_zero) time = time_zero + delta; delta += time_offset; Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Leo Yan Link: https://lore.kernel.org/r/20200716051130.4359-6-leo.yan@linaro.org Signed-off-by: Will Deacon --- include/uapi/linux/perf_event.h | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 7b2d6fc9e6ed..21a1edd08cbe 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -532,9 +532,10 @@ struct perf_event_mmap_page { cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ - cap_user_time : 1, /* The time_* fields are used */ + cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */ cap_user_time_zero : 1, /* The time_zero field is used */ - cap_____res : 59; + cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */ + cap_____res : 58; }; }; @@ -593,13 +594,29 @@ struct perf_event_mmap_page { * ((rem * time_mult) >> time_shift); */ __u64 time_zero; + __u32 size; /* Header size up to __reserved[] fields. */ + __u32 __reserved_1; + + /* + * If cap_usr_time_short, the hardware clock is less than 64bit wide + * and we must compute the 'cyc' value, as used by cap_usr_time, as: + * + * cyc = time_cycles + ((cyc - time_cycles) & time_mask) + * + * NOTE: this form is explicitly chosen such that cap_usr_time_short + * is a correction on top of cap_usr_time, and code that doesn't + * know about cap_usr_time_short still works under the assumption + * the counter doesn't wrap. + */ + __u64 time_cycles; + __u64 time_mask; /* * Hole for extension of the self monitor capabilities */ - __u8 __reserved[118*8+4]; /* align to 1k. */ + __u8 __reserved[116*8]; /* align to 1k. */ /* * Control data for the mmap() data buffer. -- cgit v1.2.3 From e2978c45e5ed3bab7f69477b882ef588185b30cc Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Fri, 17 Jul 2020 09:21:54 +0900 Subject: ASoC: soc-dai: remove .digital_mute All drivers are now using .mute_stream. Let's remove .digital_mute. Signed-off-by: Kuninori Morimoto Reviewed-by: Peter Ujfalusi Link: https://lore.kernel.org/r/87h7u72dqz.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc-dai.h | 1 - sound/soc/soc-dai.c | 4 ---- 2 files changed, 5 deletions(-) (limited to 'include') diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 05775f7b0bbc..2b51e8eb8a7a 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -246,7 +246,6 @@ struct snd_soc_dai_ops { * DAI digital mute - optional. * Called by soc-core to minimise any pops. */ - int (*digital_mute)(struct snd_soc_dai *dai, int mute); int (*mute_stream)(struct snd_soc_dai *dai, int mute, int stream); /* diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c index 458d2ea44329..c89a1929d141 100644 --- a/sound/soc/soc-dai.c +++ b/sound/soc/soc-dai.c @@ -307,10 +307,6 @@ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute, (direction == SNDRV_PCM_STREAM_PLAYBACK || !dai->driver->ops->no_capture_mute)) ret = dai->driver->ops->mute_stream(dai, mute, direction); - else if (direction == SNDRV_PCM_STREAM_PLAYBACK && - dai->driver->ops && - dai->driver->ops->digital_mute) - ret = dai->driver->ops->digital_mute(dai, mute); return soc_dai_ret(dai, ret); } -- cgit v1.2.3 From d3818c4815aab9b17e78b6afdeaf87797acba8d3 Mon Sep 17 00:00:00 2001 From: Kamel Bouhara Date: Mon, 6 Jul 2020 13:43:43 +0200 Subject: ARM: at91: add atmel tcb capabilities Some atmel socs have extra tcb capabilities that allow using a generic clock source or enabling a quadrature decoder. Signed-off-by: Kamel Bouhara Acked-by: Alexandre Belloni Signed-off-by: Jonathan Cameron --- include/soc/at91/atmel_tcb.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h index c3c7200ce151..1d7071dc0bca 100644 --- a/include/soc/at91/atmel_tcb.h +++ b/include/soc/at91/atmel_tcb.h @@ -36,9 +36,14 @@ struct clk; /** * struct atmel_tcb_config - SoC data for a Timer/Counter Block * @counter_width: size in bits of a timer counter register + * @has_gclk: boolean indicating if a timer counter has a generic clock + * @has_qdec: boolean indicating if a timer counter has a quadrature + * decoder. */ struct atmel_tcb_config { size_t counter_width; + bool has_gclk; + bool has_qdec; }; /** -- cgit v1.2.3 From 3c8387d234f75887a2d78972ab0f764fe9f756e4 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 22 Jun 2020 10:53:55 +0300 Subject: uuid: remove unused uuid_le_to_bin() definition There is no more user, so remove it. Signed-off-by: Andy Shevchenko Signed-off-by: Christoph Hellwig --- include/linux/uuid.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/uuid.h b/include/linux/uuid.h index d41b0d3e9474..8cdc0d3567cd 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -98,8 +98,6 @@ int guid_parse(const char *uuid, guid_t *u); int uuid_parse(const char *uuid, uuid_t *u); /* backwards compatibility, don't use in new code */ -#define uuid_le_to_bin(guid, u) guid_parse(guid, u) - static inline int uuid_le_cmp(const guid_t u1, const guid_t u2) { return memcmp(&u1, &u2, sizeof(guid_t)); -- cgit v1.2.3 From fc926a7c818c97e98c5c2db1cb910523df0419e5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:33:07 -0700 Subject: ASoC: soc-dai.h: drop a duplicated word Drop the repeated word "be" in a comment. Signed-off-by: Randy Dunlap Cc: Liam Girdwood Cc: Mark Brown Cc: alsa-devel@alsa-project.org Link: https://lore.kernel.org/r/20200719003307.21403-1-rdunlap@infradead.org Signed-off-by: Mark Brown --- include/sound/soc-dai.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 2b51e8eb8a7a..7ff659e28570 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -39,7 +39,7 @@ struct snd_compr_stream; /* * DAI Clock gating. * - * DAI bit clocks can be be gated (disabled) when the DAI is not + * DAI bit clocks can be gated (disabled) when the DAI is not * sending or receiving PCM data in a frame. This can be used to save power. */ #define SND_SOC_DAIFMT_CONT (1 << 4) /* continuous clock */ -- cgit v1.2.3 From 4834177e633258fbf3c5754b1220f01c705b79eb Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Thu, 9 Jul 2020 01:19:11 -0500 Subject: ima: Support additional conditionals in the KEXEC_CMDLINE hook function Take the properties of the kexec kernel's inode and the current task ownership into consideration when matching a KEXEC_CMDLINE operation to the rules in the IMA policy. This allows for some uniformity when writing IMA policy rules for KEXEC_KERNEL_CHECK, KEXEC_INITRAMFS_CHECK, and KEXEC_CMDLINE operations. Prior to this patch, it was not possible to write a set of rules like this: dont_measure func=KEXEC_KERNEL_CHECK obj_type=foo_t dont_measure func=KEXEC_INITRAMFS_CHECK obj_type=foo_t dont_measure func=KEXEC_CMDLINE obj_type=foo_t measure func=KEXEC_KERNEL_CHECK measure func=KEXEC_INITRAMFS_CHECK measure func=KEXEC_CMDLINE The inode information associated with the kernel being loaded by a kexec_kernel_load(2) syscall can now be included in the decision to measure or not Additonally, the uid, euid, and subj_* conditionals can also now be used in KEXEC_CMDLINE rules. There was no technical reason as to why those conditionals weren't being considered previously other than ima_match_rules() didn't have a valid inode to use so it immediately bailed out for KEXEC_CMDLINE operations rather than going through the full list of conditional comparisons. Signed-off-by: Tyler Hicks Cc: Eric Biederman Cc: kexec@lists.infradead.org Reviewed-by: Lakshmi Ramasubramanian Signed-off-by: Mimi Zohar --- include/linux/ima.h | 4 ++-- kernel/kexec_file.c | 2 +- security/integrity/ima/ima.h | 2 +- security/integrity/ima/ima_api.c | 2 +- security/integrity/ima/ima_appraise.c | 2 +- security/integrity/ima/ima_asymmetric_keys.c | 2 +- security/integrity/ima/ima_main.c | 23 +++++++++++++++++------ security/integrity/ima/ima_policy.c | 17 ++++++----------- security/integrity/ima/ima_queue_keys.c | 2 +- 9 files changed, 31 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/linux/ima.h b/include/linux/ima.h index 9164e1534ec9..d15100de6cdd 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -25,7 +25,7 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); extern void ima_post_path_mknod(struct dentry *dentry); extern int ima_file_hash(struct file *file, char *buf, size_t buf_size); -extern void ima_kexec_cmdline(const void *buf, int size); +extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size); #ifdef CONFIG_IMA_KEXEC extern void ima_add_kexec_buffer(struct kimage *image); @@ -103,7 +103,7 @@ static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size) return -EOPNOTSUPP; } -static inline void ima_kexec_cmdline(const void *buf, int size) {} +static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {} #endif /* CONFIG_IMA */ #ifndef CONFIG_IMA_KEXEC diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index bb05fd52de85..07df431c1f21 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -287,7 +287,7 @@ kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd, goto out; } - ima_kexec_cmdline(image->cmdline_buf, + ima_kexec_cmdline(kernel_fd, image->cmdline_buf, image->cmdline_buf_len - 1); } diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index ea7e77536f3c..576ae2c6d418 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -265,7 +265,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file, struct evm_ima_xattr_data *xattr_value, int xattr_len, const struct modsig *modsig, int pcr, struct ima_template_desc *template_desc); -void process_buffer_measurement(const void *buf, int size, +void process_buffer_measurement(struct inode *inode, const void *buf, int size, const char *eventname, enum ima_hooks func, int pcr, const char *keyring); void ima_audit_measurement(struct integrity_iint_cache *iint, diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index bf22de8b7ce0..4f39fb93f278 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -162,7 +162,7 @@ err_out: /** * ima_get_action - appraise & measure decision based on policy. - * @inode: pointer to inode to measure + * @inode: pointer to the inode associated with the object being validated * @cred: pointer to credentials structure to validate * @secid: secid of the task being validated * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXEC, diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index a9649b04b9f1..6c52bf7ea7f0 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -328,7 +328,7 @@ int ima_check_blacklist(struct integrity_iint_cache *iint, rc = is_binary_blacklisted(digest, digestsize); if ((rc == -EPERM) && (iint->flags & IMA_MEASURE)) - process_buffer_measurement(digest, digestsize, + process_buffer_measurement(NULL, digest, digestsize, "blacklisted-hash", NONE, pcr, NULL); } diff --git a/security/integrity/ima/ima_asymmetric_keys.c b/security/integrity/ima/ima_asymmetric_keys.c index aaae80c4e376..1c68c500c26f 100644 --- a/security/integrity/ima/ima_asymmetric_keys.c +++ b/security/integrity/ima/ima_asymmetric_keys.c @@ -58,7 +58,7 @@ void ima_post_key_create_or_update(struct key *keyring, struct key *key, * if the IMA policy is configured to measure a key linked * to the given keyring. */ - process_buffer_measurement(payload, payload_len, + process_buffer_measurement(NULL, payload, payload_len, keyring->description, KEY_CHECK, 0, keyring->description); } diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 8351b2fd48e0..8a91711ca79b 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -726,6 +726,7 @@ int ima_load_data(enum kernel_load_data_id id) /* * process_buffer_measurement - Measure the buffer to ima log. + * @inode: inode associated with the object being measured (NULL for KEY_CHECK) * @buf: pointer to the buffer that needs to be added to the log. * @size: size of buffer(in bytes). * @eventname: event name to be used for the buffer entry. @@ -735,7 +736,7 @@ int ima_load_data(enum kernel_load_data_id id) * * Based on policy, the buffer is measured into the ima log. */ -void process_buffer_measurement(const void *buf, int size, +void process_buffer_measurement(struct inode *inode, const void *buf, int size, const char *eventname, enum ima_hooks func, int pcr, const char *keyring) { @@ -768,7 +769,7 @@ void process_buffer_measurement(const void *buf, int size, */ if (func) { security_task_getsecid(current, &secid); - action = ima_get_action(NULL, current_cred(), secid, 0, func, + action = ima_get_action(inode, current_cred(), secid, 0, func, &pcr, &template, keyring); if (!(action & IMA_MEASURE)) return; @@ -823,16 +824,26 @@ out: /** * ima_kexec_cmdline - measure kexec cmdline boot args + * @kernel_fd: file descriptor of the kexec kernel being loaded * @buf: pointer to buffer * @size: size of buffer * * Buffers can only be measured, not appraised. */ -void ima_kexec_cmdline(const void *buf, int size) +void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) { - if (buf && size != 0) - process_buffer_measurement(buf, size, "kexec-cmdline", - KEXEC_CMDLINE, 0, NULL); + struct fd f; + + if (!buf || !size) + return; + + f = fdget(kernel_fd); + if (!f.file) + return; + + process_buffer_measurement(file_inode(f.file), buf, size, + "kexec-cmdline", KEXEC_CMDLINE, 0, NULL); + fdput(f); } static int __init init_ima(void) diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index dcd1aaac4ff0..9284055ee13a 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -443,13 +443,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode, { int i; - if ((func == KEXEC_CMDLINE) || (func == KEY_CHECK)) { - if ((rule->flags & IMA_FUNC) && (rule->func == func)) { - if (func == KEY_CHECK) - return ima_match_keyring(rule, keyring, cred); - return true; - } - return false; + if (func == KEY_CHECK) { + return (rule->flags & IMA_FUNC) && (rule->func == func) && + ima_match_keyring(rule, keyring, cred); } if ((rule->flags & IMA_FUNC) && (rule->func != func && func != POST_SETATTR)) @@ -1035,10 +1031,9 @@ static bool ima_validate_rule(struct ima_rule_entry *entry) if (entry->action & ~(MEASURE | DONT_MEASURE)) return false; - if (entry->flags & ~(IMA_FUNC | IMA_PCR)) - return false; - - if (ima_rule_contains_lsm_cond(entry)) + if (entry->flags & ~(IMA_FUNC | IMA_FSMAGIC | IMA_UID | + IMA_FOWNER | IMA_FSUUID | IMA_EUID | + IMA_PCR | IMA_FSNAME)) return false; break; diff --git a/security/integrity/ima/ima_queue_keys.c b/security/integrity/ima/ima_queue_keys.c index 56ce24a18b66..69a8626a35c0 100644 --- a/security/integrity/ima/ima_queue_keys.c +++ b/security/integrity/ima/ima_queue_keys.c @@ -158,7 +158,7 @@ void ima_process_queued_keys(void) list_for_each_entry_safe(entry, tmp, &ima_keys, list) { if (!timer_expired) - process_buffer_measurement(entry->payload, + process_buffer_measurement(NULL, entry->payload, entry->payload_len, entry->keyring_name, KEY_CHECK, 0, -- cgit v1.2.3 From 8e7eafb816ab7e5047b74cb8eb1db2f8f14f7d7a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:32:20 -0700 Subject: RDMA: rdma_user_ioctl.h: fix a duplicated word + clarify Change the repeated word "it" in a comment to "it to". Also insert a dash in the sentence to add clarity. Link: https://lore.kernel.org/r/20200719003220.21250-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap Signed-off-by: Jason Gunthorpe --- include/uapi/rdma/rdma_user_ioctl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h index d92d2721b28c..53c55188dd2a 100644 --- a/include/uapi/rdma/rdma_user_ioctl.h +++ b/include/uapi/rdma/rdma_user_ioctl.h @@ -43,7 +43,7 @@ /* * General blocks assignments - * It is closed on purpose do not expose it it user space + * It is closed on purpose - do not expose it to user space * #define MAD_CMD_BASE 0x00 * #define HFI1_CMD_BAS 0xE0 */ -- cgit v1.2.3 From 3093a479727be194996dbc40f803711af5877be4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Jul 2020 08:12:49 +0200 Subject: block: inherit the zoned characteristics in blk_stack_limits Lift the code from device mapper into blk_stack_limits to inherity the stacking limitations. This ensures we do the right thing for all stacked zoned block devices. Reviewed-by: Johannes Thumshirn Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-settings.c | 1 + drivers/md/dm-table.c | 19 ------------------- include/linux/blkdev.h | 9 ++++++--- 3 files changed, 7 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index 9a2c23cd9700..9cddbd736474 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -609,6 +609,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->chunk_sectors = min_not_zero(t->chunk_sectors, b->chunk_sectors); + t->zoned = max(t->zoned, b->zoned); return ret; } EXPORT_SYMBOL(blk_stack_limits); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 0ea5b7367179..ec5364133cef 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -467,9 +467,6 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, q->limits.logical_block_size, q->limits.alignment_offset, (unsigned long long) start << SECTOR_SHIFT); - - limits->zoned = blk_queue_zoned_model(q); - return 0; } @@ -1528,22 +1525,6 @@ combine_limits: dm_device_name(table->md), (unsigned long long) ti->begin, (unsigned long long) ti->len); - - /* - * FIXME: this should likely be moved to blk_stack_limits(), would - * also eliminate limits->zoned stacking hack in dm_set_device_limits() - */ - if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) { - /* - * By default, the stacked limits zoned model is set to - * BLK_ZONED_NONE in blk_set_stacking_limits(). Update - * this model using the first target model reported - * that is not BLK_ZONED_NONE. This will be either the - * first target device zoned model or the model reported - * by the target .io_hints. - */ - limits->zoned = ti_limits.zoned; - } } /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 63078944909c..9e331a1eb35f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -306,11 +306,14 @@ enum blk_queue_state { /* * Zoned block device models (zoned limit). + * + * Note: This needs to be ordered from the least to the most severe + * restrictions for the inheritance in blk_stack_limits() to work. */ enum blk_zoned_model { - BLK_ZONED_NONE, /* Regular block device */ - BLK_ZONED_HA, /* Host-aware zoned block device */ - BLK_ZONED_HM, /* Host-managed zoned block device */ + BLK_ZONED_NONE = 0, /* Regular block device */ + BLK_ZONED_HA, /* Host-aware zoned block device */ + BLK_ZONED_HM, /* Host-managed zoned block device */ }; struct queue_limits { -- cgit v1.2.3 From 9efa82ef2b15d1757dd6cc518988a4506554e893 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Jul 2020 08:12:50 +0200 Subject: block: remove bdev_stack_limits This function is just a tiny wrapper around blk_stack_limit and has two callers. Simplify the stack a bit by open coding it in the two callers. Reviewed-by: Johannes Thumshirn Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-settings.c | 25 ++----------------------- drivers/md/dm-table.c | 3 ++- include/linux/blkdev.h | 2 -- 3 files changed, 4 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index 9cddbd736474..8c63af772685 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -614,28 +614,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, } EXPORT_SYMBOL(blk_stack_limits); -/** - * bdev_stack_limits - adjust queue limits for stacked drivers - * @t: the stacking driver limits (top device) - * @bdev: the component block_device (bottom) - * @start: first data sector within component device - * - * Description: - * Merges queue limits for a top device and a block_device. Returns - * 0 if alignment didn't change. Returns -1 if adding the bottom - * device caused misalignment. - */ -int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, - sector_t start) -{ - struct request_queue *bq = bdev_get_queue(bdev); - - start += get_start_sect(bdev); - - return blk_stack_limits(t, &bq->limits, start); -} -EXPORT_SYMBOL(bdev_stack_limits); - /** * disk_stack_limits - adjust queue limits for stacked drivers * @disk: MD/DM gendisk (top) @@ -651,7 +629,8 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, { struct request_queue *t = disk->queue; - if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { + if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, + get_start_sect(bdev) + (offset >> 9)) < 0) { char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; disk_name(disk, 0, top); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ec5364133cef..aac4c31cfc84 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -458,7 +458,8 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, return 0; } - if (bdev_stack_limits(limits, bdev, start) < 0) + if (blk_stack_limits(limits, &q->limits, + get_start_sect(bdev) + start) < 0) DMWARN("%s: adding target device %s caused an alignment inconsistency: " "physical_block_size=%u, logical_block_size=%u, " "alignment_offset=%u, start=%llu", diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9e331a1eb35f..54b963109e64 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1139,8 +1139,6 @@ extern void blk_set_default_limits(struct queue_limits *lim); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); -extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, - sector_t offset); extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); -- cgit v1.2.3 From b9b1a5d71533f2ccd54b810dffdcf0789b30ba9b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Jul 2020 08:12:51 +0200 Subject: block: remove blk_queue_stack_limits This function is just a tiny wrapper around blk_stack_limits. Open code it int the two callers. Reviewed-by: Johannes Thumshirn Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-settings.c | 11 ----------- drivers/block/drbd/drbd_nl.c | 4 ++-- drivers/nvme/host/core.c | 3 ++- include/linux/blkdev.h | 1 - 4 files changed, 4 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/block/blk-settings.c b/block/blk-settings.c index 8c63af772685..76a7e03bcd6c 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -455,17 +455,6 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) } EXPORT_SYMBOL(blk_queue_io_opt); -/** - * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers - * @t: the stacking driver (top) - * @b: the underlying device (bottom) - **/ -void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) -{ - blk_stack_limits(&t->limits, &b->limits, 0); -} -EXPORT_SYMBOL(blk_queue_stack_limits); - /** * blk_stack_limits - adjust queue_limits for stacked devices * @t: the stacking driver limits (top device) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index da4a3ebe04ef..d0d9a549b583 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1250,7 +1250,7 @@ static void fixup_discard_if_not_supported(struct request_queue *q) static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q) { - /* Fixup max_write_zeroes_sectors after blk_queue_stack_limits(): + /* Fixup max_write_zeroes_sectors after blk_stack_limits(): * if we can handle "zeroes" efficiently on the protocol, * we want to do that, even if our backend does not announce * max_write_zeroes_sectors itself. */ @@ -1361,7 +1361,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi decide_on_write_same_support(device, q, b, o, disable_write_same); if (b) { - blk_queue_stack_limits(q, b); + blk_stack_limits(&q->limits, &b->limits, 0); if (q->backing_dev_info->ra_pages != b->backing_dev_info->ra_pages) { diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e9d83b98d79d..aa2b66edba5e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2054,7 +2054,8 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) #ifdef CONFIG_NVME_MULTIPATH if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); - blk_queue_stack_limits(ns->head->disk->queue, ns->queue); + blk_stack_limits(&ns->head->disk->queue->limits, + &ns->queue->limits, 0); nvme_mpath_update_disk_size(ns->head->disk); } #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 54b963109e64..bbdd3cf62038 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1141,7 +1141,6 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset); -extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); -- cgit v1.2.3 From 6c4411f14d1afa8ead90cd4cf18a308c43ac6908 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:28:30 -0700 Subject: clk: : drop a duplicated word Drop the repeated word "not" in a comment. Signed-off-by: Randy Dunlap Cc: Michael Turquette Cc: Stephen Boyd Cc: linux-clk@vger.kernel.org Link: https://lore.kernel.org/r/20200719002830.20319-1-rdunlap@infradead.org Signed-off-by: Stephen Boyd --- include/linux/clk-provider.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index bd1ee9039558..6f815be99b77 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -189,7 +189,7 @@ struct clk_duty { * and >= numerator) Return 0 on success, otherwise -EERROR. * * @init: Perform platform-specific initialization magic. - * This is not not used by any of the basic clock types. + * This is not used by any of the basic clock types. * This callback exist for HW which needs to perform some * initialisation magic for CCF to get an accurate view of the * clock. It may also be used dynamic resource allocation is -- cgit v1.2.3 From 4cfab3566710826e62adbf100875d2dca32434b6 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 19 Jul 2020 20:49:52 -0700 Subject: net: dsa: Add wrappers for overloaded ndo_ops Add definitions for the dsa_netdevice_ops structure which is a subset of the net_device_ops structure for the specific operations that we care about overlaying on top of the DSA CPU port net_device and provide inline stubs that take core managing whether DSA code is reachable. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 70 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index 6fa418ff1175..343642ca4f63 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -86,6 +86,18 @@ struct dsa_device_ops { enum dsa_tag_protocol proto; }; +/* This structure defines the control interfaces that are overlayed by the + * DSA layer on top of the DSA CPU/management net_device instance. This is + * used by the core net_device layer while calling various net_device_ops + * function pointers. + */ +struct dsa_netdevice_ops { + int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, + int cmd); + int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, + size_t len); +}; + #define DSA_TAG_DRIVER_ALIAS "dsa_tag-" #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \ MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE)) @@ -217,6 +229,7 @@ struct dsa_port { /* * Original copy of the master netdev net_device_ops */ + const struct dsa_netdevice_ops *netdev_ops; const struct net_device_ops *orig_ndo_ops; bool setup; @@ -679,6 +692,63 @@ static inline bool dsa_can_decode(const struct sk_buff *skb, return false; } +#if IS_ENABLED(CONFIG_NET_DSA) +static inline int __dsa_netdevice_ops_check(struct net_device *dev) +{ + int err = -EOPNOTSUPP; + + if (!dev->dsa_ptr) + return err; + + if (!dev->dsa_ptr->netdev_ops) + return err; + + return 0; +} + +static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + const struct dsa_netdevice_ops *ops; + int err; + + err = __dsa_netdevice_ops_check(dev); + if (err) + return err; + + ops = dev->dsa_ptr->netdev_ops; + + return ops->ndo_do_ioctl(dev, ifr, cmd); +} + +static inline int dsa_ndo_get_phys_port_name(struct net_device *dev, + char *name, size_t len) +{ + const struct dsa_netdevice_ops *ops; + int err; + + err = __dsa_netdevice_ops_check(dev); + if (err) + return err; + + ops = dev->dsa_ptr->netdev_ops; + + return ops->ndo_get_phys_port_name(dev, name, len); +} +#else +static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + return -EOPNOTSUPP; +} + +static inline int dsa_ndo_get_phys_port_name(struct net_device *dev, + char *name, size_t len) +{ + return -EOPNOTSUPP; +} +#endif + void dsa_unregister_switch(struct dsa_switch *ds); int dsa_register_switch(struct dsa_switch *ds); struct dsa_switch *dsa_switch_find(int tree_index, int sw_index); -- cgit v1.2.3 From 9c0c7014f38206a2b63e7e832edf2e881a7b49ad Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 19 Jul 2020 20:49:54 -0700 Subject: net: dsa: Setup dsa_netdev_ops Now that we have all the infrastructure in place for calling into the dsa_ptr->netdev_ops function pointers, install them when we configure the DSA CPU/management interface and tear them down. The flow is unchanged from before, but now we preserve equality of tests when network device drivers do tests like dev->netdev_ops == &foo_ops which was not the case before since we were allocating an entirely new structure. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 1 - net/dsa/master.c | 52 +++++++++++++--------------------------------------- 2 files changed, 13 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index 343642ca4f63..f1b63d06d132 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -230,7 +230,6 @@ struct dsa_port { * Original copy of the master netdev net_device_ops */ const struct dsa_netdevice_ops *netdev_ops; - const struct net_device_ops *orig_ndo_ops; bool setup; }; diff --git a/net/dsa/master.c b/net/dsa/master.c index 480a61460c23..0a90911ae31b 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -220,12 +220,17 @@ static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; } - if (cpu_dp->orig_ndo_ops && cpu_dp->orig_ndo_ops->ndo_do_ioctl) - err = cpu_dp->orig_ndo_ops->ndo_do_ioctl(dev, ifr, cmd); + if (dev->netdev_ops->ndo_do_ioctl) + err = dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); return err; } +static const struct dsa_netdevice_ops dsa_netdev_ops = { + .ndo_do_ioctl = dsa_master_ioctl, + .ndo_get_phys_port_name = dsa_master_get_phys_port_name, +}; + static int dsa_master_ethtool_setup(struct net_device *dev) { struct dsa_port *cpu_dp = dev->dsa_ptr; @@ -260,38 +265,10 @@ static void dsa_master_ethtool_teardown(struct net_device *dev) cpu_dp->orig_ethtool_ops = NULL; } -static int dsa_master_ndo_setup(struct net_device *dev) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - struct dsa_switch *ds = cpu_dp->ds; - struct net_device_ops *ops; - - if (dev->netdev_ops->ndo_get_phys_port_name) - return 0; - - ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL); - if (!ops) - return -ENOMEM; - - cpu_dp->orig_ndo_ops = dev->netdev_ops; - if (cpu_dp->orig_ndo_ops) - memcpy(ops, cpu_dp->orig_ndo_ops, sizeof(*ops)); - - ops->ndo_get_phys_port_name = dsa_master_get_phys_port_name; - ops->ndo_do_ioctl = dsa_master_ioctl; - - dev->netdev_ops = ops; - - return 0; -} - -static void dsa_master_ndo_teardown(struct net_device *dev) +static void dsa_netdev_ops_set(struct net_device *dev, + const struct dsa_netdevice_ops *ops) { - struct dsa_port *cpu_dp = dev->dsa_ptr; - - if (cpu_dp->orig_ndo_ops) - dev->netdev_ops = cpu_dp->orig_ndo_ops; - cpu_dp->orig_ndo_ops = NULL; + dev->dsa_ptr->netdev_ops = ops; } static ssize_t tagging_show(struct device *d, struct device_attribute *attr, @@ -353,9 +330,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) if (ret) return ret; - ret = dsa_master_ndo_setup(dev); - if (ret) - goto out_err_ethtool_teardown; + dsa_netdev_ops_set(dev, &dsa_netdev_ops); ret = sysfs_create_group(&dev->dev.kobj, &dsa_group); if (ret) @@ -364,8 +339,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) return ret; out_err_ndo_teardown: - dsa_master_ndo_teardown(dev); -out_err_ethtool_teardown: + dsa_netdev_ops_set(dev, NULL); dsa_master_ethtool_teardown(dev); return ret; } @@ -373,7 +347,7 @@ out_err_ethtool_teardown: void dsa_master_teardown(struct net_device *dev) { sysfs_remove_group(&dev->dev.kobj, &dsa_group); - dsa_master_ndo_teardown(dev); + dsa_netdev_ops_set(dev, NULL); dsa_master_ethtool_teardown(dev); dsa_master_reset_mtu(dev); -- cgit v1.2.3 From a8b7b2d0b3fc965d823e362840e5451e2eb4a71b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 20 Jul 2020 10:10:41 +0200 Subject: sched: sch_api: add missing rcu read lock to silence the warning In case the qdisc_match_from_root function() is called from non-rcu path with rtnl mutex held, a suspiciout rcu usage warning appears: [ 241.504354] ============================= [ 241.504358] WARNING: suspicious RCU usage [ 241.504366] 5.8.0-rc4-custom-01521-g72a7c7d549c3 #32 Not tainted [ 241.504370] ----------------------------- [ 241.504378] net/sched/sch_api.c:270 RCU-list traversed in non-reader section!! [ 241.504382] other info that might help us debug this: [ 241.504388] rcu_scheduler_active = 2, debug_locks = 1 [ 241.504394] 1 lock held by tc/1391: [ 241.504398] #0: ffffffff85a27850 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x49a/0xbd0 [ 241.504431] stack backtrace: [ 241.504440] CPU: 0 PID: 1391 Comm: tc Not tainted 5.8.0-rc4-custom-01521-g72a7c7d549c3 #32 [ 241.504446] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-2.fc32 04/01/2014 [ 241.504453] Call Trace: [ 241.504465] dump_stack+0x100/0x184 [ 241.504482] lockdep_rcu_suspicious+0x153/0x15d [ 241.504499] qdisc_match_from_root+0x293/0x350 Fix this by passing the rtnl held lockdep condition down to hlist_for_each_entry_rcu() Reported-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/hashtable.h | 4 ++-- net/sched/sch_api.c | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h index 78b6ea5fa8ba..f6c666730b8c 100644 --- a/include/linux/hashtable.h +++ b/include/linux/hashtable.h @@ -173,9 +173,9 @@ static inline void hash_del_rcu(struct hlist_node *node) * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ -#define hash_for_each_possible_rcu(name, obj, member, key) \ +#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \ hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\ - member) + member, ## cond) /** * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 11ebba60da3b..2a76a2f5ed88 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -267,7 +267,8 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) root->handle == handle) return root; - hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) { + hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle, + lockdep_rtnl_is_held()) { if (q->handle == handle) return q; } -- cgit v1.2.3 From 044f507dc0a3070985592d84707a9d69746d84c6 Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Thu, 16 Jul 2020 11:02:50 +0530 Subject: clk: qcom: ipq8074: Add correct index for PCIe clocks The PCIe clocks GCC_PCIE0_AXI_S_BRIDGE_CLK, GCC_PCIE0_RCHNG_CLK_SRC, GCC_PCIE0_RCHNG_CLK are wrongly added to the gcc reset group. Move them to the gcc clock group. Reported-by: kernel test robot Signed-off-by: Sivaprakash Murugesan Link: https://lore.kernel.org/r/1594877570-9280-1-git-send-email-sivaprak@codeaurora.org Fixes: e7fb524cfcca ("dt-bindings: clock: qcom: ipq8074: Add missing bindings for PCIe") Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/qcom,gcc-ipq8074.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h index e3e018565add..8e2bec1c91bf 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h @@ -230,6 +230,9 @@ #define GCC_GP1_CLK 221 #define GCC_GP2_CLK 222 #define GCC_GP3_CLK 223 +#define GCC_PCIE0_AXI_S_BRIDGE_CLK 224 +#define GCC_PCIE0_RCHNG_CLK_SRC 225 +#define GCC_PCIE0_RCHNG_CLK 226 #define GCC_BLSP1_BCR 0 #define GCC_BLSP1_QUP1_BCR 1 @@ -363,8 +366,5 @@ #define GCC_PCIE1_AHB_ARES 129 #define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130 #define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 131 -#define GCC_PCIE0_AXI_S_BRIDGE_CLK 132 -#define GCC_PCIE0_RCHNG_CLK_SRC 133 -#define GCC_PCIE0_RCHNG_CLK 134 #endif -- cgit v1.2.3 From f1bfd71c8662f20d53e71ef4e18bfb0e5677c27f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Jul 2020 13:36:09 +0200 Subject: arch, net: remove the last csum_partial_copy() leftovers Most of the tree only uses and implements csum_partial_copy_nocheck, but the c6x and lib/checksum.c implement a csum_partial_copy that isn't used anywere except to define csum_partial_copy. Get rid of this pointless alias. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- arch/c6x/lib/checksum.c | 2 +- arch/c6x/lib/csum_64plus.S | 8 ++++---- arch/nios2/include/asm/checksum.h | 5 ++--- include/asm-generic/checksum.h | 6 ++---- lib/checksum.c | 4 ++-- 5 files changed, 11 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/arch/c6x/lib/checksum.c b/arch/c6x/lib/checksum.c index 335ca4900808..dff2e2ec6e64 100644 --- a/arch/c6x/lib/checksum.c +++ b/arch/c6x/lib/checksum.c @@ -6,6 +6,6 @@ /* These are from csum_64plus.S */ EXPORT_SYMBOL(csum_partial); -EXPORT_SYMBOL(csum_partial_copy); +EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(ip_compute_csum); EXPORT_SYMBOL(ip_fast_csum); diff --git a/arch/c6x/lib/csum_64plus.S b/arch/c6x/lib/csum_64plus.S index 8e625a30fd43..9c07127485d1 100644 --- a/arch/c6x/lib/csum_64plus.S +++ b/arch/c6x/lib/csum_64plus.S @@ -10,8 +10,8 @@ #include ; -;unsigned int csum_partial_copy(const char *src, char * dst, -; int len, int sum) +;unsigned int csum_partial_copy_nocheck(const char *src, char * dst, +; int len, int sum) ; ; A4: src ; B4: dst @@ -21,7 +21,7 @@ ; .text -ENTRY(csum_partial_copy) +ENTRY(csum_partial_copy_nocheck) MVC .S2 ILC,B30 MV .D1X B6,A31 ; given csum @@ -149,7 +149,7 @@ L10: ADD .D1 A31,A9,A9 BNOP .S2 B3,4 MVC .S2 B30,ILC -ENDPROC(csum_partial_copy) +ENDPROC(csum_partial_copy_nocheck) ; ;unsigned short diff --git a/arch/nios2/include/asm/checksum.h b/arch/nios2/include/asm/checksum.h index ec39698d3bea..b4316c361729 100644 --- a/arch/nios2/include/asm/checksum.h +++ b/arch/nios2/include/asm/checksum.h @@ -12,10 +12,9 @@ /* Take these from lib/checksum.c */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); -extern __wsum csum_partial_copy(const void *src, void *dst, int len, +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); -#define csum_partial_copy_nocheck(src, dst, len, sum) \ - csum_partial_copy((src), (dst), (len), (sum)) +#define csum_partial_copy_nocheck csum_partial_copy_nocheck extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); extern __sum16 ip_compute_csum(const void *buff, int len); diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 5a80f8e54300..cd8b75aa770d 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -23,11 +23,9 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); - #ifndef csum_partial_copy_nocheck -#define csum_partial_copy_nocheck(src, dst, len, sum) \ - csum_partial_copy((src), (dst), (len), (sum)) +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, + __wsum sum); #endif #ifndef ip_fast_csum diff --git a/lib/checksum.c b/lib/checksum.c index 7ac65a0000ff..c7861e84c526 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -149,12 +149,12 @@ EXPORT_SYMBOL(ip_compute_csum); * copy from ds while checksumming, otherwise like csum_partial */ __wsum -csum_partial_copy(const void *src, void *dst, int len, __wsum sum) +csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { memcpy(dst, src, len); return csum_partial(dst, len, sum); } -EXPORT_SYMBOL(csum_partial_copy); +EXPORT_SYMBOL(csum_partial_copy_nocheck); #ifndef csum_tcpudp_nofold static inline u32 from64to32(u64 x) -- cgit v1.2.3 From e812916d3278baf03aabf10044661fc3b2848823 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:00 +0300 Subject: linkmode: introduce linkmode_intersects() Add a new helper to find intersections between Ethtool link modes, linkmode_intersects(), similar to the other linkmode helpers. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/linkmode.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index c664c27a29a0..f8397f300fcd 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -82,6 +82,12 @@ static inline int linkmode_equal(const unsigned long *src1, return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); } +static inline int linkmode_intersects(const unsigned long *src1, + const unsigned long *src2) +{ + return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + static inline int linkmode_subset(const unsigned long *src1, const unsigned long *src2) { -- cgit v1.2.3 From bdb5d8ec47611ca61e168349f233e1dd1ed063f4 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:01 +0300 Subject: qed, qede, qedf: convert link mode from u32 to ETHTOOL_LINK_MODE Currently qed driver already ran out of 32 bits to store link modes, and this doesn't allow to add and support more speeds. Convert custom link mode to generic Ethtool bitmap and definitions (convenient Phylink shorthands are used for elegance and readability). This allowed us to drop all conversions/mappings between the driver and Ethtool. This involves changes in qede and qedf as well, as they used definitions from shared "qed_if.h". Suggested-by: Andrew Lunn Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 288 +++++++++++++----------- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 200 +++++----------- drivers/scsi/qedf/qedf_main.c | 78 ++++--- include/linux/qed/qed_if.h | 47 +--- 4 files changed, 268 insertions(+), 345 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 4c5f5bd91359..afc4fa3bdcaa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "qed.h" #include "qed_sriov.h" @@ -1456,10 +1457,11 @@ static bool qed_can_link_change(struct qed_dev *cdev) static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { - struct qed_hwfn *hwfn; + __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); struct qed_mcp_link_params *link_params; + struct qed_hwfn *hwfn; struct qed_ptt *ptt; - u32 sup_caps; + u32 as; int rc; if (!cdev) @@ -1482,57 +1484,79 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) return -EBUSY; link_params = qed_mcp_get_link_params(hwfn); + if (!link_params) + return -ENODATA; + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) link_params->speed.autoneg = params->autoneg; + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { - link_params->speed.advertised_speeds = 0; - sup_caps = QED_LM_1000baseT_Full_BIT | - QED_LM_1000baseKX_Full_BIT | - QED_LM_1000baseX_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; - sup_caps = QED_LM_10000baseT_Full_BIT | - QED_LM_10000baseKR_Full_BIT | - QED_LM_10000baseKX4_Full_BIT | - QED_LM_10000baseR_FEC_BIT | - QED_LM_10000baseCR_Full_BIT | - QED_LM_10000baseSR_Full_BIT | - QED_LM_10000baseLR_Full_BIT | - QED_LM_10000baseLRM_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; - if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; - sup_caps = QED_LM_25000baseKR_Full_BIT | - QED_LM_25000baseCR_Full_BIT | - QED_LM_25000baseSR_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; - sup_caps = QED_LM_40000baseLR4_Full_BIT | - QED_LM_40000baseKR4_Full_BIT | - QED_LM_40000baseCR4_Full_BIT | - QED_LM_40000baseSR4_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; - sup_caps = QED_LM_50000baseKR2_Full_BIT | - QED_LM_50000baseCR2_Full_BIT | - QED_LM_50000baseSR2_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; - sup_caps = QED_LM_100000baseKR4_Full_BIT | - QED_LM_100000baseSR4_Full_BIT | - QED_LM_100000baseCR4_Full_BIT | - QED_LM_100000baseLR4_ER4_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; + as = 0; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 1000baseT_Full); + phylink_set(sup_caps, 1000baseKX_Full); + phylink_set(sup_caps, 1000baseX_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 10000baseT_Full); + phylink_set(sup_caps, 10000baseKR_Full); + phylink_set(sup_caps, 10000baseKX4_Full); + phylink_set(sup_caps, 10000baseR_FEC); + phylink_set(sup_caps, 10000baseCR_Full); + phylink_set(sup_caps, 10000baseSR_Full); + phylink_set(sup_caps, 10000baseLR_Full); + phylink_set(sup_caps, 10000baseLRM_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 20000baseKR2_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 25000baseKR_Full); + phylink_set(sup_caps, 25000baseCR_Full); + phylink_set(sup_caps, 25000baseSR_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 40000baseLR4_Full); + phylink_set(sup_caps, 40000baseKR4_Full); + phylink_set(sup_caps, 40000baseCR4_Full); + phylink_set(sup_caps, 40000baseSR4_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 50000baseKR2_Full); + phylink_set(sup_caps, 50000baseCR2_Full); + phylink_set(sup_caps, 50000baseSR2_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 100000baseKR4_Full); + phylink_set(sup_caps, 100000baseSR4_Full); + phylink_set(sup_caps, 100000baseCR4_Full); + phylink_set(sup_caps, 100000baseLR4_ER4_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; + + link_params->speed.advertised_speeds = as; } + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) link_params->speed.forced_speed = params->forced_speed; if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { @@ -1644,7 +1668,7 @@ static int qed_get_link_data(struct qed_hwfn *hwfn, static void qed_fill_link_capability(struct qed_hwfn *hwfn, struct qed_ptt *ptt, u32 capability, - u32 *if_capability) + unsigned long *if_caps) { u32 media_type, tcvr_state, tcvr_type; u32 speed_mask, board_cfg; @@ -1667,113 +1691,117 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, switch (media_type) { case MEDIA_DA_TWINAX: - *if_capability |= QED_LM_FIBRE_BIT; + phylink_set(if_caps, FIBRE); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) - *if_capability |= QED_LM_20000baseKR2_Full_BIT; + phylink_set(if_caps, 20000baseKR2_Full); + /* For DAC media multiple speed capabilities are supported*/ capability = capability & speed_mask; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - *if_capability |= QED_LM_1000baseKX_Full_BIT; + phylink_set(if_caps, 1000baseKX_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - *if_capability |= QED_LM_10000baseCR_Full_BIT; + phylink_set(if_caps, 10000baseCR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - *if_capability |= QED_LM_40000baseCR4_Full_BIT; + phylink_set(if_caps, 40000baseCR4_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) - *if_capability |= QED_LM_25000baseCR_Full_BIT; + phylink_set(if_caps, 25000baseCR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - *if_capability |= QED_LM_50000baseCR2_Full_BIT; + phylink_set(if_caps, 50000baseCR2_Full); if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - *if_capability |= QED_LM_100000baseCR4_Full_BIT; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + phylink_set(if_caps, 100000baseCR4_Full); + break; case MEDIA_BASE_T: - *if_capability |= QED_LM_TP_BIT; + phylink_set(if_caps, TP); + if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { - *if_capability |= QED_LM_1000baseT_Full_BIT; - } + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + phylink_set(if_caps, 1000baseT_Full); if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { - *if_capability |= QED_LM_10000baseT_Full_BIT; - } + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + phylink_set(if_caps, 10000baseT_Full); } + if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { - *if_capability |= QED_LM_FIBRE_BIT; + phylink_set(if_caps, FIBRE); + if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) - *if_capability |= QED_LM_1000baseT_Full_BIT; + phylink_set(if_caps, 1000baseT_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) - *if_capability |= QED_LM_10000baseT_Full_BIT; + phylink_set(if_caps, 10000baseT_Full); } + break; case MEDIA_SFP_1G_FIBER: case MEDIA_SFPP_10G_FIBER: case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER: - *if_capability |= QED_LM_FIBRE_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { + phylink_set(if_caps, FIBRE); + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX)) - *if_capability |= QED_LM_1000baseKX_Full_BIT; + phylink_set(if_caps, 1000baseKX_Full); } - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR) - *if_capability |= QED_LM_10000baseSR_Full_BIT; + phylink_set(if_caps, 10000baseSR_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR) - *if_capability |= QED_LM_10000baseLR_Full_BIT; + phylink_set(if_caps, 10000baseLR_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM) - *if_capability |= QED_LM_10000baseLRM_Full_BIT; + phylink_set(if_caps, 10000baseLRM_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER) - *if_capability |= QED_LM_10000baseR_FEC_BIT; + phylink_set(if_caps, 10000baseR_FEC); } + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) - *if_capability |= QED_LM_20000baseKR2_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { + phylink_set(if_caps, 20000baseKR2_Full); + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR) - *if_capability |= QED_LM_25000baseSR_Full_BIT; + phylink_set(if_caps, 25000baseSR_Full); } - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4) - *if_capability |= QED_LM_40000baseLR4_Full_BIT; + phylink_set(if_caps, 40000baseLR4_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4) - *if_capability |= QED_LM_40000baseSR4_Full_BIT; + phylink_set(if_caps, 40000baseSR4_Full); } - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - *if_capability |= QED_LM_50000baseKR2_Full_BIT; + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + phylink_set(if_caps, 50000baseKR2_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) { if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4) - *if_capability |= QED_LM_100000baseSR4_Full_BIT; + phylink_set(if_caps, 100000baseSR4_Full); } break; case MEDIA_KR: - *if_capability |= QED_LM_Backplane_BIT; + phylink_set(if_caps, Backplane); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) - *if_capability |= QED_LM_20000baseKR2_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - *if_capability |= QED_LM_1000baseKX_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - *if_capability |= QED_LM_10000baseKR_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) - *if_capability |= QED_LM_25000baseKR_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - *if_capability |= QED_LM_40000baseKR4_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - *if_capability |= QED_LM_50000baseKR2_Full_BIT; + phylink_set(if_caps, 20000baseKR2_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + phylink_set(if_caps, 1000baseKX_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + phylink_set(if_caps, 10000baseKR_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + phylink_set(if_caps, 25000baseKR_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + phylink_set(if_caps, 40000baseKR4_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + phylink_set(if_caps, 50000baseKR2_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - *if_capability |= QED_LM_100000baseKR4_Full_BIT; + phylink_set(if_caps, 100000baseKR4_Full); + break; case MEDIA_UNSPECIFIED: case MEDIA_NOT_PRESENT: @@ -1806,26 +1834,27 @@ static void qed_fill_link(struct qed_hwfn *hwfn, /* TODO - at the moment assume supported and advertised speed equal */ if (link_caps.default_speed_autoneg) - if_link->supported_caps |= QED_LM_Autoneg_BIT; + phylink_set(if_link->supported_caps, Autoneg); if (params.pause.autoneg || (params.pause.forced_rx && params.pause.forced_tx)) - if_link->supported_caps |= QED_LM_Asym_Pause_BIT; + phylink_set(if_link->supported_caps, Asym_Pause); if (params.pause.autoneg || params.pause.forced_rx || params.pause.forced_tx) - if_link->supported_caps |= QED_LM_Pause_BIT; + phylink_set(if_link->supported_caps, Pause); + + linkmode_copy(if_link->advertised_caps, if_link->supported_caps); - if_link->advertised_caps = if_link->supported_caps; if (params.speed.autoneg) - if_link->advertised_caps |= QED_LM_Autoneg_BIT; + phylink_set(if_link->advertised_caps, Autoneg); else - if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; + phylink_clear(if_link->advertised_caps, Autoneg); /* Fill link advertised capability*/ qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, - &if_link->advertised_caps); + if_link->advertised_caps); /* Fill link supported capability*/ qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, - &if_link->supported_caps); + if_link->supported_caps); if (link.link_up) if_link->speed = link.speed; @@ -1845,30 +1874,29 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; /* Link partner capabilities */ - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_1G_FD) - if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; + + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) + phylink_set(if_link->lp_caps, 1000baseT_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) - if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; + phylink_set(if_link->lp_caps, 10000baseKR_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) - if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT; + phylink_set(if_link->lp_caps, 20000baseKR2_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) - if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; + phylink_set(if_link->lp_caps, 25000baseKR_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) - if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; + phylink_set(if_link->lp_caps, 40000baseLR4_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) - if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; + phylink_set(if_link->lp_caps, 50000baseKR2_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) - if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; + phylink_set(if_link->lp_caps, 100000baseKR4_Full); if (link.an_complete) - if_link->lp_caps |= QED_LM_Autoneg_BIT; - + phylink_set(if_link->lp_caps, Autoneg); if (link.partner_adv_pause) - if_link->lp_caps |= QED_LM_Pause_BIT; + phylink_set(if_link->lp_caps, Pause); if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) - if_link->lp_caps |= QED_LM_Asym_Pause_BIT; + phylink_set(if_link->lp_caps, Asym_Pause); if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { if_link->eee_supported = false; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index d3fc7403d095..0a564b06d697 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -13,6 +13,8 @@ #include #include #include +#include + #include "qede.h" #include "qede_ptp.h" @@ -418,76 +420,10 @@ static int qede_set_priv_flags(struct net_device *dev, u32 flags) return 0; } -struct qede_link_mode_mapping { - u32 qed_link_mode; - u32 ethtool_link_mode; -}; - -static const struct qede_link_mode_mapping qed_lm_map[] = { - {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, - {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, - {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, - {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, - {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, - {QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT}, - {QED_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, - {QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, - {QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, - {QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT}, - {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, - {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, - {QED_LM_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT}, - {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT}, - {QED_LM_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, - {QED_LM_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, - {QED_LM_40000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, - {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, - {QED_LM_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, - {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, - {QED_LM_25000baseSR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, - {QED_LM_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, - {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, - {QED_LM_100000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, - {QED_LM_100000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, - {QED_LM_100000baseCR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, - {QED_LM_100000baseLR4_ER4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, - {QED_LM_50000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, - {QED_LM_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT}, - {QED_LM_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, - {QED_LM_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, - {QED_LM_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, - {QED_LM_10000baseLRM_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT}, -}; - -#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \ -{ \ - int i; \ - \ - for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ - if ((caps) & (qed_lm_map[i].qed_link_mode)) \ - __set_bit(qed_lm_map[i].ethtool_link_mode,\ - lk_ksettings->link_modes.name); \ - } \ -} - -#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \ -{ \ - int i; \ - \ - for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ - if (test_bit(qed_lm_map[i].ethtool_link_mode, \ - lk_ksettings->link_modes.name)) \ - caps |= qed_lm_map[i].qed_link_mode; \ - } \ -} - static int qede_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { + typeof(cmd->link_modes) *link_modes = &cmd->link_modes; struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; @@ -497,14 +433,9 @@ static int qede_get_link_ksettings(struct net_device *dev, memset(¤t_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, ¤t_link); - ethtool_link_ksettings_zero_link_mode(cmd, supported); - QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported) - - ethtool_link_ksettings_zero_link_mode(cmd, advertising); - QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising) - - ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); - QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising) + linkmode_copy(link_modes->supported, current_link.supported_caps); + linkmode_copy(link_modes->advertising, current_link.advertised_caps); + linkmode_copy(link_modes->lp_advertising, current_link.lp_caps); if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { base->speed = current_link.speed; @@ -527,10 +458,10 @@ static int qede_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { const struct ethtool_link_settings *base = &cmd->base; + __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; struct qed_link_params params; - u32 sup_caps; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Link settings are not allowed to be changed\n"); @@ -542,105 +473,79 @@ static int qede_set_link_ksettings(struct net_device *dev, params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; + if (base->autoneg == AUTONEG_ENABLE) { - if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { + if (!phylink_test(current_link.supported_caps, Autoneg)) { DP_INFO(edev, "Auto negotiation is not supported\n"); return -EOPNOTSUPP; } params.autoneg = true; params.forced_speed = 0; - QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising) + + linkmode_copy(params.adv_speeds, cmd->link_modes.advertising); } else { /* forced speed */ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; params.autoneg = false; params.forced_speed = base->speed; + + phylink_zero(sup_caps); + switch (base->speed) { case SPEED_1000: - sup_caps = QED_LM_1000baseT_Full_BIT | - QED_LM_1000baseKX_Full_BIT | - QED_LM_1000baseX_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "1G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 1000baseT_Full); + phylink_set(sup_caps, 1000baseKX_Full); + phylink_set(sup_caps, 1000baseX_Full); break; case SPEED_10000: - sup_caps = QED_LM_10000baseT_Full_BIT | - QED_LM_10000baseKR_Full_BIT | - QED_LM_10000baseKX4_Full_BIT | - QED_LM_10000baseR_FEC_BIT | - QED_LM_10000baseCR_Full_BIT | - QED_LM_10000baseSR_Full_BIT | - QED_LM_10000baseLR_Full_BIT | - QED_LM_10000baseLRM_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "10G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 10000baseT_Full); + phylink_set(sup_caps, 10000baseKR_Full); + phylink_set(sup_caps, 10000baseKX4_Full); + phylink_set(sup_caps, 10000baseR_FEC); + phylink_set(sup_caps, 10000baseCR_Full); + phylink_set(sup_caps, 10000baseSR_Full); + phylink_set(sup_caps, 10000baseLR_Full); + phylink_set(sup_caps, 10000baseLRM_Full); break; case SPEED_20000: - if (!(current_link.supported_caps & - QED_LM_20000baseKR2_Full_BIT)) { - DP_INFO(edev, "20G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = QED_LM_20000baseKR2_Full_BIT; + phylink_set(sup_caps, 20000baseKR2_Full); break; case SPEED_25000: - sup_caps = QED_LM_25000baseKR_Full_BIT | - QED_LM_25000baseCR_Full_BIT | - QED_LM_25000baseSR_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "25G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 25000baseKR_Full); + phylink_set(sup_caps, 25000baseCR_Full); + phylink_set(sup_caps, 25000baseSR_Full); break; case SPEED_40000: - sup_caps = QED_LM_40000baseLR4_Full_BIT | - QED_LM_40000baseKR4_Full_BIT | - QED_LM_40000baseCR4_Full_BIT | - QED_LM_40000baseSR4_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "40G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 40000baseLR4_Full); + phylink_set(sup_caps, 40000baseKR4_Full); + phylink_set(sup_caps, 40000baseCR4_Full); + phylink_set(sup_caps, 40000baseSR4_Full); break; case SPEED_50000: - sup_caps = QED_LM_50000baseKR2_Full_BIT | - QED_LM_50000baseCR2_Full_BIT | - QED_LM_50000baseSR2_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "50G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 50000baseKR2_Full); + phylink_set(sup_caps, 50000baseCR2_Full); + phylink_set(sup_caps, 50000baseSR2_Full); break; case SPEED_100000: - sup_caps = QED_LM_100000baseKR4_Full_BIT | - QED_LM_100000baseSR4_Full_BIT | - QED_LM_100000baseCR4_Full_BIT | - QED_LM_100000baseLR4_ER4_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "100G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 100000baseKR4_Full); + phylink_set(sup_caps, 100000baseSR4_Full); + phylink_set(sup_caps, 100000baseCR4_Full); + phylink_set(sup_caps, 100000baseLR4_ER4_Full); break; default: DP_INFO(edev, "Unsupported speed %u\n", base->speed); return -EINVAL; } + + if (!linkmode_intersects(current_link.supported_caps, + sup_caps)) { + DP_INFO(edev, "%uG speed not supported\n", + base->speed / 1000); + return -EINVAL; + } + + linkmode_and(params.adv_speeds, current_link.supported_caps, + sup_caps); } params.link_up = true; @@ -1006,13 +911,16 @@ static int qede_set_pauseparam(struct net_device *dev, memset(¶ms, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; + if (epause->autoneg) { - if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { + if (!phylink_test(current_link.supported_caps, Autoneg)) { DP_INFO(edev, "autoneg not supported\n"); return -EINVAL; } + params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; } + if (epause->rx_pause) params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; if (epause->tx_pause) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 36b1ca2dadbb..6e77e4908605 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -440,6 +441,7 @@ static void qedf_link_recovery(struct work_struct *work) static void qedf_update_link_speed(struct qedf_ctx *qedf, struct qed_link_output *link) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); struct fc_lport *lport = qedf->lport; lport->link_speed = FC_PORTSPEED_UNKNOWN; @@ -474,40 +476,60 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf, * Set supported link speed by querying the supported * capabilities of the link. */ - if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) || - (link->supported_caps & QED_LM_10000baseKX4_Full_BIT) || - (link->supported_caps & QED_LM_10000baseR_FEC_BIT) || - (link->supported_caps & QED_LM_10000baseCR_Full_BIT) || - (link->supported_caps & QED_LM_10000baseSR_Full_BIT) || - (link->supported_caps & QED_LM_10000baseLR_Full_BIT) || - (link->supported_caps & QED_LM_10000baseLRM_Full_BIT) || - (link->supported_caps & QED_LM_10000baseKR_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 10000baseT_Full); + phylink_set(sup_caps, 10000baseKX4_Full); + phylink_set(sup_caps, 10000baseR_FEC); + phylink_set(sup_caps, 10000baseCR_Full); + phylink_set(sup_caps, 10000baseSR_Full); + phylink_set(sup_caps, 10000baseLR_Full); + phylink_set(sup_caps, 10000baseLRM_Full); + phylink_set(sup_caps, 10000baseKR_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; - } - if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) || - (link->supported_caps & QED_LM_25000baseCR_Full_BIT) || - (link->supported_caps & QED_LM_25000baseSR_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 25000baseKR_Full); + phylink_set(sup_caps, 25000baseCR_Full); + phylink_set(sup_caps, 25000baseSR_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; - } - if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) || - (link->supported_caps & QED_LM_40000baseKR4_Full_BIT) || - (link->supported_caps & QED_LM_40000baseCR4_Full_BIT) || - (link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 40000baseLR4_Full); + phylink_set(sup_caps, 40000baseKR4_Full); + phylink_set(sup_caps, 40000baseCR4_Full); + phylink_set(sup_caps, 40000baseSR4_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; - } - if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) || - (link->supported_caps & QED_LM_50000baseCR2_Full_BIT) || - (link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 50000baseKR2_Full); + phylink_set(sup_caps, 50000baseCR2_Full); + phylink_set(sup_caps, 50000baseSR2_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; - } - if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) || - (link->supported_caps & QED_LM_100000baseSR4_Full_BIT) || - (link->supported_caps & QED_LM_100000baseCR4_Full_BIT) || - (link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 100000baseKR4_Full); + phylink_set(sup_caps, 100000baseSR4_Full); + phylink_set(sup_caps, 100000baseCR4_Full); + phylink_set(sup_caps, 100000baseLR4_ER4_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; - } - if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT) + + phylink_zero(sup_caps); + phylink_set(sup_caps, 20000baseKR2_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; + fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; } diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8a6e3ad436d1..f82db1b92d45 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -661,42 +661,6 @@ enum qed_protocol { QED_PROTOCOL_FCOE, }; -enum qed_link_mode_bits { - QED_LM_FIBRE_BIT = BIT(0), - QED_LM_Autoneg_BIT = BIT(1), - QED_LM_Asym_Pause_BIT = BIT(2), - QED_LM_Pause_BIT = BIT(3), - QED_LM_1000baseT_Full_BIT = BIT(4), - QED_LM_10000baseT_Full_BIT = BIT(5), - QED_LM_10000baseKR_Full_BIT = BIT(6), - QED_LM_20000baseKR2_Full_BIT = BIT(7), - QED_LM_25000baseKR_Full_BIT = BIT(8), - QED_LM_40000baseLR4_Full_BIT = BIT(9), - QED_LM_50000baseKR2_Full_BIT = BIT(10), - QED_LM_100000baseKR4_Full_BIT = BIT(11), - QED_LM_TP_BIT = BIT(12), - QED_LM_Backplane_BIT = BIT(13), - QED_LM_1000baseKX_Full_BIT = BIT(14), - QED_LM_10000baseKX4_Full_BIT = BIT(15), - QED_LM_10000baseR_FEC_BIT = BIT(16), - QED_LM_40000baseKR4_Full_BIT = BIT(17), - QED_LM_40000baseCR4_Full_BIT = BIT(18), - QED_LM_40000baseSR4_Full_BIT = BIT(19), - QED_LM_25000baseCR_Full_BIT = BIT(20), - QED_LM_25000baseSR_Full_BIT = BIT(21), - QED_LM_50000baseCR2_Full_BIT = BIT(22), - QED_LM_100000baseSR4_Full_BIT = BIT(23), - QED_LM_100000baseCR4_Full_BIT = BIT(24), - QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25), - QED_LM_50000baseSR2_Full_BIT = BIT(26), - QED_LM_1000baseX_Full_BIT = BIT(27), - QED_LM_10000baseCR_Full_BIT = BIT(28), - QED_LM_10000baseSR_Full_BIT = BIT(29), - QED_LM_10000baseLR_Full_BIT = BIT(30), - QED_LM_10000baseLRM_Full_BIT = BIT(31), - QED_LM_COUNT = 32 -}; - struct qed_link_params { bool link_up; @@ -708,7 +672,9 @@ struct qed_link_params { #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) u32 override_flags; bool autoneg; - u32 adv_speeds; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds); + u32 forced_speed; #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) #define QED_LINK_PAUSE_RX_ENABLE BIT(1) @@ -726,10 +692,9 @@ struct qed_link_params { struct qed_link_output { bool link_up; - /* In QED_LM_* defs */ - u32 supported_caps; - u32 advertised_caps; - u32 lp_caps; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps); u32 speed; /* In Mb/s */ u8 duplex; /* In DUPLEX defs */ -- cgit v1.2.3 From 37237b5b7104f91b519133d7862d1b5169a3ba8e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:06 +0300 Subject: qed: reformat several structures a bit Prior to adding new fields and bitfields, reformat the related structures according to the Linux style (spaces to tabs, lowercase hex, indentation etc.). Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 256 +++++++++++++++--------------- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 85 +++++----- include/linux/qed/qed_if.h | 64 ++++---- 3 files changed, 205 insertions(+), 200 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index ebc25b34e491..93d33c9cf145 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11536,34 +11536,35 @@ typedef u32 offsize_t; /* In DWORDS !!! */ /* PHY configuration */ struct eth_phy_cfg { - u32 speed; -#define ETH_SPEED_AUTONEG 0 -#define ETH_SPEED_SMARTLINQ 0x8 - - u32 pause; -#define ETH_PAUSE_NONE 0x0 -#define ETH_PAUSE_AUTONEG 0x1 -#define ETH_PAUSE_RX 0x2 -#define ETH_PAUSE_TX 0x4 - - u32 adv_speed; - u32 loopback_mode; -#define ETH_LOOPBACK_NONE (0) -#define ETH_LOOPBACK_INT_PHY (1) -#define ETH_LOOPBACK_EXT_PHY (2) -#define ETH_LOOPBACK_EXT (3) -#define ETH_LOOPBACK_MAC (4) - - u32 eee_cfg; + u32 speed; +#define ETH_SPEED_AUTONEG 0x0 +#define ETH_SPEED_SMARTLINQ 0x8 + + u32 pause; +#define ETH_PAUSE_NONE 0x0 +#define ETH_PAUSE_AUTONEG 0x1 +#define ETH_PAUSE_RX 0x2 +#define ETH_PAUSE_TX 0x4 + + u32 adv_speed; + + u32 loopback_mode; +#define ETH_LOOPBACK_NONE 0x0 +#define ETH_LOOPBACK_INT_PHY 0x1 +#define ETH_LOOPBACK_EXT_PHY 0x2 +#define ETH_LOOPBACK_EXT 0x3 +#define ETH_LOOPBACK_MAC 0x4 + + u32 eee_cfg; #define EEE_CFG_EEE_ENABLED BIT(0) #define EEE_CFG_TX_LPI BIT(1) #define EEE_CFG_ADV_SPEED_1G BIT(2) #define EEE_CFG_ADV_SPEED_10G BIT(3) -#define EEE_TX_TIMER_USEC_MASK (0xfffffff0) +#define EEE_TX_TIMER_USEC_MASK 0xfffffff0 #define EEE_TX_TIMER_USEC_OFFSET 4 -#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00) -#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100) -#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000) +#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00 +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 +#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 u32 feature_config_flags; #define ETH_EEE_MODE_ADV_LPI (1 << 0) @@ -11895,41 +11896,36 @@ struct public_path { }; struct public_port { - u32 validity_map; - - u32 link_status; -#define LINK_STATUS_LINK_UP 0x00000001 -#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e -#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) - -#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 - -#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 -#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 - + u32 validity_map; + + u32 link_status; +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 #define LINK_STATUS_PFC_ENABLED 0x00000100 -#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 -#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 #define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 #define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 #define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 #define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 #define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 #define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 - -#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000 #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18) #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) - #define LINK_STATUS_SFP_TX_FAULT 0x00100000 #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 @@ -12630,49 +12626,49 @@ struct public_drv_mb { #define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000 - u32 fw_mb_param; -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 + u32 fw_mb_param; +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 #define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff #define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 - /* get pf rdma protocol command responce */ -#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 -#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 -#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 -#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 + /* Get PF RDMA protocol command response */ +#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 +#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 +#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 +#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 -/* get MFW feature support response */ -#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 -#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 -#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 + /* Get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 -#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 -#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xFF -#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 +#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff +#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 - u32 drv_pulse_mb; -#define DRV_PULSE_SEQ_MASK 0x00007fff -#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 -#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 - u32 mcp_pulse_mb; -#define MCP_PULSE_SEQ_MASK 0x00007fff -#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 -#define MCP_EVENT_MASK 0xffff0000 -#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 - union drv_union_data union_data; + union drv_union_data union_data; }; #define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff @@ -13048,24 +13044,27 @@ struct nvm_cfg1_path { }; struct nvm_cfg1_port { - u32 reserved__m_relocated_to_option_123; - u32 reserved__m_relocated_to_option_124; - u32 generic_cont0; -#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 + u32 rel_to_opt123; + u32 rel_to_opt124; + + u32 generic_cont0; +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000 #define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 #define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 #define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 #define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 - u32 pcie_cfg; - u32 features; - u32 speed_cap_mask; -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF + + u32 pcie_cfg; + u32 features; + + u32 speed_cap_mask; +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 @@ -13074,8 +13073,9 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 - u32 link_settings; -#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F + + u32 link_settings; +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f #define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 @@ -13091,49 +13091,53 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 - u32 phy_cfg; - u32 mgmt_traffic; - u32 ext_phy; + u32 phy_cfg; + u32 mgmt_traffic; + + u32 ext_phy; /* EEE power saving mode */ -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 - u32 mba_cfg1; - u32 mba_cfg2; - u32 vf_cfg; - struct nvm_cfg_mac_address lldp_mac_address; - u32 led_port_settings; - u32 transceiver_00; - u32 device_ids; - u32 board_cfg; -#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF -#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 -#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 -#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 -#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 - u32 mnm_10g_cap; - u32 mnm_10g_ctrl; - u32 mnm_10g_misc; - u32 mnm_25g_cap; - u32 mnm_25g_ctrl; - u32 mnm_25g_misc; - u32 mnm_40g_cap; - u32 mnm_40g_ctrl; - u32 mnm_40g_misc; - u32 mnm_50g_cap; - u32 mnm_50g_ctrl; - u32 mnm_50g_misc; - u32 mnm_100g_cap; - u32 mnm_100g_ctrl; - u32 mnm_100g_misc; - u32 reserved[116]; + u32 mba_cfg1; + u32 mba_cfg2; + u32 vf_cfg; + struct nvm_cfg_mac_address lldp_mac_address; + u32 led_port_settings; + u32 transceiver_00; + u32 device_ids; + + u32 board_cfg; +#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff +#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 +#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 +#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 + + u32 mnm_10g_cap; + u32 mnm_10g_ctrl; + u32 mnm_10g_misc; + u32 mnm_25g_cap; + u32 mnm_25g_ctrl; + u32 mnm_25g_misc; + u32 mnm_40g_cap; + u32 mnm_40g_ctrl; + u32 mnm_40g_misc; + u32 mnm_50g_cap; + u32 mnm_50g_ctrl; + u32 mnm_50g_misc; + u32 mnm_100g_cap; + u32 mnm_100g_ctrl; + u32 mnm_100g_misc; + + u32 reserved[116]; }; struct nvm_cfg1_func { diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 63a22a615e94..cf678b6966f8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -34,61 +34,60 @@ enum qed_mcp_eee_mode { }; struct qed_mcp_link_params { - struct qed_mcp_link_speed_params speed; - struct qed_mcp_link_pause_params pause; - u32 loopback_mode; - struct qed_link_eee_params eee; + struct qed_mcp_link_speed_params speed; + struct qed_mcp_link_pause_params pause; + u32 loopback_mode; + struct qed_link_eee_params eee; }; struct qed_mcp_link_capabilities { - u32 speed_capabilities; - bool default_speed_autoneg; - enum qed_mcp_eee_mode default_eee; - u32 eee_lpi_timer; - u8 eee_speed_caps; + u32 speed_capabilities; + bool default_speed_autoneg; + enum qed_mcp_eee_mode default_eee; + u32 eee_lpi_timer; + u8 eee_speed_caps; }; struct qed_mcp_link_state { - bool link_up; - - u32 min_pf_rate; + bool link_up; + u32 min_pf_rate; /* Actual link speed in Mb/s */ - u32 line_speed; + u32 line_speed; /* PF max speed in Mb/s, deduced from line_speed * according to PF max bandwidth configuration. */ - u32 speed; - bool full_duplex; - - bool an; - bool an_complete; - bool parallel_detection; - bool pfc_enabled; - -#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0) -#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) -#define QED_LINK_PARTNER_SPEED_10G BIT(2) -#define QED_LINK_PARTNER_SPEED_20G BIT(3) -#define QED_LINK_PARTNER_SPEED_25G BIT(4) -#define QED_LINK_PARTNER_SPEED_40G BIT(5) -#define QED_LINK_PARTNER_SPEED_50G BIT(6) -#define QED_LINK_PARTNER_SPEED_100G BIT(7) - u32 partner_adv_speed; - - bool partner_tx_flow_ctrl_en; - bool partner_rx_flow_ctrl_en; - -#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1) -#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2) -#define QED_LINK_PARTNER_BOTH_PAUSE (3) - u8 partner_adv_pause; - - bool sfp_tx_fault; - bool eee_active; - u8 eee_adv_caps; - u8 eee_lp_adv_caps; + u32 speed; + + bool full_duplex; + bool an; + bool an_complete; + bool parallel_detection; + bool pfc_enabled; + + u32 partner_adv_speed; +#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0) +#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) +#define QED_LINK_PARTNER_SPEED_10G BIT(2) +#define QED_LINK_PARTNER_SPEED_20G BIT(3) +#define QED_LINK_PARTNER_SPEED_25G BIT(4) +#define QED_LINK_PARTNER_SPEED_40G BIT(5) +#define QED_LINK_PARTNER_SPEED_50G BIT(6) +#define QED_LINK_PARTNER_SPEED_100G BIT(7) + + bool partner_tx_flow_ctrl_en; + bool partner_rx_flow_ctrl_en; + + u8 partner_adv_pause; +#define QED_LINK_PARTNER_SYMMETRIC_PAUSE 0x1 +#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE 0x2 +#define QED_LINK_PARTNER_BOTH_PAUSE 0x3 + + bool sfp_tx_fault; + bool eee_active; + u8 eee_adv_caps; + u8 eee_lp_adv_caps; }; struct qed_mcp_function_info { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f82db1b92d45..dde48f206d0d 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -662,51 +662,53 @@ enum qed_protocol { }; struct qed_link_params { - bool link_up; + bool link_up; -#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) -#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) -#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) -#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) -#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) -#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) - u32 override_flags; - bool autoneg; + u32 override_flags; +#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) +#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) +#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) + bool autoneg; __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds); + u32 forced_speed; - u32 forced_speed; -#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) -#define QED_LINK_PAUSE_RX_ENABLE BIT(1) -#define QED_LINK_PAUSE_TX_ENABLE BIT(2) - u32 pause_config; -#define QED_LINK_LOOPBACK_NONE BIT(0) -#define QED_LINK_LOOPBACK_INT_PHY BIT(1) -#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) -#define QED_LINK_LOOPBACK_EXT BIT(3) -#define QED_LINK_LOOPBACK_MAC BIT(4) - u32 loopback_mode; - struct qed_link_eee_params eee; + u32 pause_config; +#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) +#define QED_LINK_PAUSE_RX_ENABLE BIT(1) +#define QED_LINK_PAUSE_TX_ENABLE BIT(2) + + u32 loopback_mode; +#define QED_LINK_LOOPBACK_NONE BIT(0) +#define QED_LINK_LOOPBACK_INT_PHY BIT(1) +#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) +#define QED_LINK_LOOPBACK_EXT BIT(3) +#define QED_LINK_LOOPBACK_MAC BIT(4) + + struct qed_link_eee_params eee; }; struct qed_link_output { - bool link_up; + bool link_up; __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps); - u32 speed; /* In Mb/s */ - u8 duplex; /* In DUPLEX defs */ - u8 port; /* In PORT defs */ - bool autoneg; - u32 pause_config; + u32 speed; /* In Mb/s */ + u8 duplex; /* In DUPLEX defs */ + u8 port; /* In PORT defs */ + bool autoneg; + u32 pause_config; /* EEE - capability & param */ - bool eee_supported; - bool eee_active; - u8 sup_caps; - struct qed_link_eee_params eee; + bool eee_supported; + bool eee_active; + u8 sup_caps; + struct qed_link_eee_params eee; }; struct qed_probe_params { -- cgit v1.2.3 From ae7e69379fd5a87141fd8c7f2efab8e73f2a9f7e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:07 +0300 Subject: qed: add support for Forward Error Correction Add all necessary routines for reading supported FEC modes from NVM and querying FEC control to the MFW (if the running version supports it). Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 54 ++++++++++++++++++++---------- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 24 +++++++++++-- drivers/net/ethernet/qlogic/qed/qed_main.c | 6 ++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 47 +++++++++++++++++++++----- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 4 +++ include/linux/qed/qed_if.h | 13 +++++++ 6 files changed, 121 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 491a6dbb5d73..d929556247a5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3968,7 +3968,7 @@ unlock_and_exit: static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fc; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; @@ -4081,16 +4081,38 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->mcp_info->link_capabilities.default_speed_autoneg = link->speed.autoneg; - link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; - link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; - link->pause.autoneg = !!(link_temp & - NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); - link->pause.forced_rx = !!(link_temp & - NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); - link->pause.forced_tx = !!(link_temp & - NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + fc = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); + link->pause.autoneg = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); link->loopback_mode = 0; + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + switch (GET_MFW_FIELD(link_temp, + NVM_CFG1_PORT_FEC_FORCE_MODE)) { + case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE: + p_caps->fec_default |= QED_FEC_MODE_NONE; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE: + p_caps->fec_default |= QED_FEC_MODE_FIRECODE; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_RS: + p_caps->fec_default |= QED_FEC_MODE_RS; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO: + p_caps->fec_default |= QED_FEC_MODE_AUTO; + break; + default: + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "unknown FEC mode in 0x%08x\n", link_temp); + } + } else { + p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED; + } + + link->fec = p_caps->fec_default; + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, ext_phy)); @@ -4122,14 +4144,12 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; } - DP_VERBOSE(p_hwfn, - NETIF_MSG_LINK, - "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", - link->speed.forced_speed, - link->speed.advertised_speeds, - link->speed.autoneg, - link->pause.autoneg, - p_caps->default_eee, p_caps->eee_lpi_timer); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n", + link->speed.forced_speed, link->speed.advertised_speeds, + link->speed.autoneg, link->pause.autoneg, + p_caps->default_eee, p_caps->eee_lpi_timer, + p_caps->fec_default); if (IS_LEAD_HWFN(p_hwfn)) { struct qed_dev *cdev = p_hwfn->cdev; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 93d33c9cf145..7c1d4efffbff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11566,8 +11566,15 @@ struct eth_phy_cfg { #define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 #define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 - u32 feature_config_flags; -#define ETH_EEE_MODE_ADV_LPI (1 << 0) + u32 deprecated; + + u32 fec_mode; +#define FEC_FORCE_MODE_MASK 0x000000ff +#define FEC_FORCE_MODE_OFFSET 0 +#define FEC_FORCE_MODE_NONE 0x00 +#define FEC_FORCE_MODE_FIRECODE 0x01 +#define FEC_FORCE_MODE_RS 0x02 +#define FEC_FORCE_MODE_AUTO 0x07 }; struct port_mf_cfg { @@ -11934,6 +11941,11 @@ struct public_port { #define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 #define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 +#define LINK_STATUS_FEC_MODE_MASK 0x38000000 +#define LINK_STATUS_FEC_MODE_NONE (0 << 27) +#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27) +#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) + u32 link_status1; u32 ext_phy_fw_version; u32 drv_phy_cfg_addr; @@ -12553,6 +12565,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 /* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ @@ -12641,6 +12654,7 @@ struct public_drv_mb { /* Get MFW feature support response */ #define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL 0x00000020 #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) @@ -13091,6 +13105,12 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 u32 phy_cfg; u32 mgmt_traffic; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2be9ed39c450..91e7cfc544f0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1597,6 +1597,9 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) memcpy(&link_params->eee, ¶ms->eee, sizeof(link_params->eee)); + if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) + link_params->fec = params->fec; + rc = qed_mcp_set_link(hwfn, ptt, params->link_up); qed_ptt_release(hwfn, ptt); @@ -1938,6 +1941,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn, else phylink_clear(if_link->advertised_caps, Autoneg); + if_link->sup_fec = link_caps.fec_default; + if_link->active_fec = params.fec; + /* Fill link advertised capability */ qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, if_link->advertised_caps); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index b10a92488630..78c0d3a2d164 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1446,6 +1446,25 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + switch (status & LINK_STATUS_FEC_MODE_MASK) { + case LINK_STATUS_FEC_MODE_NONE: + p_link->fec_active = QED_FEC_MODE_NONE; + break; + case LINK_STATUS_FEC_MODE_FIRECODE_CL74: + p_link->fec_active = QED_FEC_MODE_FIRECODE; + break; + case LINK_STATUS_FEC_MODE_RS_CL91: + p_link->fec_active = QED_FEC_MODE_RS; + break; + default: + p_link->fec_active = QED_FEC_MODE_AUTO; + } + } else { + p_link->fec_active = QED_FEC_MODE_UNSUPPORTED; + } + qed_link_update(p_hwfn, p_ptt); out: spin_unlock_bh(&p_hwfn->mcp_info->link_lock); @@ -1456,8 +1475,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; struct qed_mcp_mb_params mb_params; struct eth_phy_cfg phy_cfg; + u32 cmd, fec_bit = 0; int rc = 0; - u32 cmd; /* Set the shmem configuration according to params */ memset(&phy_cfg, 0, sizeof(phy_cfg)); @@ -1489,16 +1508,27 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) EEE_TX_TIMER_USEC_MASK; } + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + if (params->fec & QED_FEC_MODE_NONE) + fec_bit |= FEC_FORCE_MODE_NONE; + else if (params->fec & QED_FEC_MODE_FIRECODE) + fec_bit |= FEC_FORCE_MODE_FIRECODE; + else if (params->fec & QED_FEC_MODE_RS) + fec_bit |= FEC_FORCE_MODE_RS; + else if (params->fec & QED_FEC_MODE_AUTO) + fec_bit |= FEC_FORCE_MODE_AUTO; + + SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit); + } + p_hwfn->b_drv_link_init = b_up; if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", - phy_cfg.speed, - phy_cfg.pause, - phy_cfg.adv_speed, - phy_cfg.loopback_mode, - phy_cfg.feature_config_flags); + "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, FEC 0x%08x\n", + phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, + phy_cfg.loopback_mode, phy_cfg.fec_mode); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); @@ -3805,7 +3835,8 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) u32 mcp_resp, mcp_param, features; features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | - DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | + DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index cf678b6966f8..5e50405854e6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -38,11 +38,13 @@ struct qed_mcp_link_params { struct qed_mcp_link_pause_params pause; u32 loopback_mode; struct qed_link_eee_params eee; + u32 fec; }; struct qed_mcp_link_capabilities { u32 speed_capabilities; bool default_speed_autoneg; + u32 fec_default; enum qed_mcp_eee_mode default_eee; u32 eee_lpi_timer; u8 eee_speed_caps; @@ -88,6 +90,8 @@ struct qed_mcp_link_state { bool eee_active; u8 eee_adv_caps; u8 eee_lp_adv_caps; + + u32 fec_active; }; struct qed_mcp_function_info { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index dde48f206d0d..f0b4cdc79299 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -661,6 +661,14 @@ enum qed_protocol { QED_PROTOCOL_FCOE, }; +enum qed_fec_mode { + QED_FEC_MODE_NONE = BIT(0), + QED_FEC_MODE_FIRECODE = BIT(1), + QED_FEC_MODE_RS = BIT(2), + QED_FEC_MODE_AUTO = BIT(3), + QED_FEC_MODE_UNSUPPORTED = BIT(4), +}; + struct qed_link_params { bool link_up; @@ -671,6 +679,7 @@ struct qed_link_params { #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) +#define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6) bool autoneg; __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds); @@ -689,6 +698,7 @@ struct qed_link_params { #define QED_LINK_LOOPBACK_MAC BIT(4) struct qed_link_eee_params eee; + u32 fec; }; struct qed_link_output { @@ -709,6 +719,9 @@ struct qed_link_output { bool eee_active; u8 sup_caps; struct qed_link_eee_params eee; + + u32 sup_fec; + u32 active_fec; }; struct qed_probe_params { -- cgit v1.2.3 From 98e675ec5a92a15f6f8ade41eda883cd39df5712 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:13 +0300 Subject: qed: add missing loopback modes These modes are relevant only for several boards, but may be reported by MFW as well as the others. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 5 +++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 19 +++++++++++++++++++ include/linux/qed/qed_if.h | 5 +++++ 3 files changed, 29 insertions(+) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index debc55923251..5b81d5d42397 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11554,6 +11554,11 @@ struct eth_phy_cfg { #define ETH_LOOPBACK_EXT_PHY 0x2 #define ETH_LOOPBACK_EXT 0x3 #define ETH_LOOPBACK_MAC 0x4 +#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5 +#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6 +#define ETH_LOOPBACK_PCS_AH_ONLY 0x7 +#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8 +#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9 u32 eee_cfg; #define EEE_CFG_EEE_ENABLED BIT(0) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 91e7cfc544f0..fea155c6ff11 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1587,6 +1587,25 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) case QED_LINK_LOOPBACK_MAC: link_params->loopback_mode = ETH_LOOPBACK_MAC; break; + case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: + link_params->loopback_mode = + ETH_LOOPBACK_CNIG_AH_ONLY_0123; + break; + case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: + link_params->loopback_mode = + ETH_LOOPBACK_CNIG_AH_ONLY_2301; + break; + case QED_LINK_LOOPBACK_PCS_AH_ONLY: + link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; + break; + case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: + link_params->loopback_mode = + ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; + break; + case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: + link_params->loopback_mode = + ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; + break; default: link_params->loopback_mode = ETH_LOOPBACK_NONE; break; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f0b4cdc79299..2e780159a5fb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -696,6 +696,11 @@ struct qed_link_params { #define QED_LINK_LOOPBACK_EXT_PHY BIT(2) #define QED_LINK_LOOPBACK_EXT BIT(3) #define QED_LINK_LOOPBACK_MAC BIT(4) +#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5) +#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6) +#define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7) +#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8) +#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9) struct qed_link_eee_params eee; u32 fec; -- cgit v1.2.3 From 99785a87fc7d27207c7dca0f0fe04386f1981690 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:15 +0300 Subject: qed: add support for the extended speed and FEC modes Add all necessary code (NVM parsing, MFW and Ethtool reports etc.) to support extended speed and FEC modes. These new modes are supported by the new boards revisions and newer MFW versions. Misc: correct port type for MEDIA_KR. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 1 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 102 ++++++++++- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 85 +++++++++- drivers/net/ethernet/qlogic/qed/qed_main.c | 264 +++++++++++++++++++++++++++-- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 74 +++++++- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 45 +++++ include/linux/qed/qed_if.h | 1 + 7 files changed, 547 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 47da4f7d3be2..b2a7b53ee760 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -712,6 +712,7 @@ struct qed_dev { #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) #define QED_IS_K2(dev) QED_IS_AH(dev) #define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev)) +#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5) u16 vendor_id; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 66a520099c44..6516a1f921da 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3968,8 +3968,9 @@ unlock_and_exit: static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fc; + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + struct qed_mcp_link_speed_params *ext_speed; struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; @@ -4026,8 +4027,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; link->speed.advertised_speeds = link_temp; - link_temp = link->speed.advertised_speeds; - p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; + p_caps->speed_capabilities = link->speed.advertised_speeds; link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + @@ -4062,13 +4062,12 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); } - p_hwfn->mcp_info->link_capabilities.default_speed_autoneg = - link->speed.autoneg; + p_caps->default_speed_autoneg = link->speed.autoneg; - fc = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); - link->pause.autoneg = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); - link->pause.forced_rx = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); - link->pause.forced_tx = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); + link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); link->loopback_mode = 0; if (p_hwfn->mcp_info->capabilities & @@ -4128,6 +4127,91 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; } + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { + ext_speed = &link->ext_speed; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + extended_speed)); + + fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED); + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN) + ext_speed->autoneg = true; + + ext_speed->forced_speed = 0; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G) + ext_speed->forced_speed |= QED_EXT_SPEED_1G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G) + ext_speed->forced_speed |= QED_EXT_SPEED_10G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G) + ext_speed->forced_speed |= QED_EXT_SPEED_20G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G) + ext_speed->forced_speed |= QED_EXT_SPEED_25G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G) + ext_speed->forced_speed |= QED_EXT_SPEED_40G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R) + ext_speed->forced_speed |= QED_EXT_SPEED_50G_R; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2) + ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4; + + fld = GET_MFW_FIELD(link_temp, + NVM_CFG1_PORT_EXTENDED_SPEED_CAP); + + ext_speed->advertised_speeds = 0; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_50G_R; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_50G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_R4; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_P4; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + extended_fec_mode)); + link->ext_fec_mode = link_temp; + + p_caps->default_ext_speed_caps = ext_speed->advertised_speeds; + p_caps->default_ext_speed = ext_speed->forced_speed; + p_caps->default_ext_autoneg = ext_speed->autoneg; + p_caps->default_ext_fec = link->ext_fec_mode; + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n", + ext_speed->forced_speed, + ext_speed->advertised_speeds, ext_speed->autoneg, + p_caps->default_ext_fec); + } + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n", link->speed.forced_speed, link->speed.advertised_speeds, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 5b81d5d42397..1af3f65ab862 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11580,6 +11580,54 @@ struct eth_phy_cfg { #define FEC_FORCE_MODE_FIRECODE 0x01 #define FEC_FORCE_MODE_RS 0x02 #define FEC_FORCE_MODE_AUTO 0x07 +#define FEC_EXTENDED_MODE_MASK 0xffffff00 +#define FEC_EXTENDED_MODE_OFFSET 8 +#define ETH_EXT_FEC_NONE 0x00000100 +#define ETH_EXT_FEC_10G_NONE 0x00000200 +#define ETH_EXT_FEC_10G_BASE_R 0x00000400 +#define ETH_EXT_FEC_20G_NONE 0x00000800 +#define ETH_EXT_FEC_20G_BASE_R 0x00001000 +#define ETH_EXT_FEC_25G_NONE 0x00002000 +#define ETH_EXT_FEC_25G_BASE_R 0x00004000 +#define ETH_EXT_FEC_25G_RS528 0x00008000 +#define ETH_EXT_FEC_40G_NONE 0x00010000 +#define ETH_EXT_FEC_40G_BASE_R 0x00020000 +#define ETH_EXT_FEC_50G_NONE 0x00040000 +#define ETH_EXT_FEC_50G_BASE_R 0x00080000 +#define ETH_EXT_FEC_50G_RS528 0x00100000 +#define ETH_EXT_FEC_50G_RS544 0x00200000 +#define ETH_EXT_FEC_100G_NONE 0x00400000 +#define ETH_EXT_FEC_100G_BASE_R 0x00800000 +#define ETH_EXT_FEC_100G_RS528 0x01000000 +#define ETH_EXT_FEC_100G_RS544 0x02000000 + + u32 extended_speed; +#define ETH_EXT_SPEED_MASK 0x0000ffff +#define ETH_EXT_SPEED_OFFSET 0 +#define ETH_EXT_SPEED_AN 0x00000001 +#define ETH_EXT_SPEED_1G 0x00000002 +#define ETH_EXT_SPEED_10G 0x00000004 +#define ETH_EXT_SPEED_20G 0x00000008 +#define ETH_EXT_SPEED_25G 0x00000010 +#define ETH_EXT_SPEED_40G 0x00000020 +#define ETH_EXT_SPEED_50G_BASE_R 0x00000040 +#define ETH_EXT_SPEED_50G_BASE_R2 0x00000080 +#define ETH_EXT_SPEED_100G_BASE_R2 0x00000100 +#define ETH_EXT_SPEED_100G_BASE_R4 0x00000200 +#define ETH_EXT_SPEED_100G_BASE_P4 0x00000400 +#define ETH_EXT_ADV_SPEED_MASK 0xffff0000 +#define ETH_EXT_ADV_SPEED_OFFSET 16 +#define ETH_EXT_ADV_SPEED_RESERVED 0x00010000 +#define ETH_EXT_ADV_SPEED_1G 0x00020000 +#define ETH_EXT_ADV_SPEED_10G 0x00040000 +#define ETH_EXT_ADV_SPEED_20G 0x00080000 +#define ETH_EXT_ADV_SPEED_25G 0x00100000 +#define ETH_EXT_ADV_SPEED_40G 0x00200000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00400000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00800000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x01000000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x02000000 +#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x04000000 }; struct port_mf_cfg { @@ -12571,6 +12619,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 /* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ @@ -12660,6 +12709,7 @@ struct public_drv_mb { #define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) #define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) #define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) +#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6) #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) @@ -13174,7 +13224,40 @@ struct nvm_cfg1_port { u32 mnm_100g_ctrl; u32 mnm_100g_misc; - u32 reserved[116]; + u32 temperature; + u32 ext_phy_cfg1; + + u32 extended_speed; +#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff +#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400 + + u32 extended_fec_mode; + + u32 reserved[112]; }; struct nvm_cfg1_func { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 4fe66cf60f24..2558cb680db3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -82,6 +82,85 @@ struct qed_mfw_speed_map { .arr_size = ARRAY_SIZE(arr), \ } +static const u32 qed_mfw_ext_1g[] __initconst = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 qed_mfw_ext_10g[] __initconst = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 qed_mfw_ext_20g[] __initconst = { + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, +}; + +static const u32 qed_mfw_ext_25g[] __initconst = { + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 qed_mfw_ext_40g[] __initconst = { + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, +}; + +static const u32 qed_mfw_ext_50g_base_r[] __initconst = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, +}; + +static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, +}; + +static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, + qed_mfw_ext_50g_base_r), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, + qed_mfw_ext_50g_base_r2), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, + qed_mfw_ext_100g_base_r2), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, + qed_mfw_ext_100g_base_r4), +}; + static const u32 qed_mfw_legacy_1g[] __initconst = { ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, @@ -158,6 +237,9 @@ static void __init qed_mfw_speed_maps_init(void) { u32 i; + for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) + qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); + for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); } @@ -1553,6 +1635,147 @@ static bool qed_can_link_change(struct qed_dev *cdev) return true; } +static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, + const struct qed_link_params *params) +{ + struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; + const struct qed_mfw_speed_map *map; + u32 i; + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) + ext_speed->autoneg = !!params->autoneg; + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { + ext_speed->advertised_speeds = 0; + + for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { + map = qed_mfw_ext_maps + i; + + if (linkmode_intersects(params->adv_speeds, map->caps)) + ext_speed->advertised_speeds |= map->mfw_val; + } + } + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { + switch (params->forced_speed) { + case SPEED_1000: + ext_speed->forced_speed = QED_EXT_SPEED_1G; + break; + case SPEED_10000: + ext_speed->forced_speed = QED_EXT_SPEED_10G; + break; + case SPEED_20000: + ext_speed->forced_speed = QED_EXT_SPEED_20G; + break; + case SPEED_25000: + ext_speed->forced_speed = QED_EXT_SPEED_25G; + break; + case SPEED_40000: + ext_speed->forced_speed = QED_EXT_SPEED_40G; + break; + case SPEED_50000: + ext_speed->forced_speed = QED_EXT_SPEED_50G_R | + QED_EXT_SPEED_50G_R2; + break; + case SPEED_100000: + ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | + QED_EXT_SPEED_100G_R4 | + QED_EXT_SPEED_100G_P4; + break; + default: + break; + } + } + + if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) + return; + + switch (params->forced_speed) { + case SPEED_25000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; + break; + case FEC_FORCE_MODE_RS: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | + ETH_EXT_FEC_25G_BASE_R | + ETH_EXT_FEC_25G_NONE; + break; + default: + break; + } + + break; + case SPEED_40000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | + ETH_EXT_FEC_40G_NONE; + break; + default: + break; + } + + break; + case SPEED_50000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; + break; + case FEC_FORCE_MODE_RS: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | + ETH_EXT_FEC_50G_BASE_R | + ETH_EXT_FEC_50G_NONE; + break; + default: + break; + } + + break; + case SPEED_100000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; + break; + case FEC_FORCE_MODE_RS: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | + ETH_EXT_FEC_100G_BASE_R | + ETH_EXT_FEC_100G_NONE; + break; + default: + break; + } + + break; + default: + break; + } +} + static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { struct qed_mcp_link_params *link_params; @@ -1605,6 +1828,9 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) speed->forced_speed = params->forced_speed; + if (qed_mcp_is_ext_speed_supported(hwfn)) + qed_set_ext_speed_params(link_params, params); + if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) link_params->pause.autoneg = true; @@ -1682,7 +1908,6 @@ static int qed_get_port_type(u32 media_type) case MEDIA_SFP_1G_FIBER: case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER: - case MEDIA_KR: port_type = PORT_FIBRE; break; case MEDIA_DA_TWINAX: @@ -1691,6 +1916,7 @@ static int qed_get_port_type(u32 media_type) case MEDIA_BASE_T: port_type = PORT_TP; break; + case MEDIA_KR: case MEDIA_NOT_PRESENT: port_type = PORT_NONE; break; @@ -1990,9 +2216,32 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if (link.link_up) if_link->link_up = true; - /* TODO - at the moment assume supported and advertised speed equal */ - if (link_caps.default_speed_autoneg) - phylink_set(if_link->supported_caps, Autoneg); + if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { + if (link_caps.default_ext_autoneg) + phylink_set(if_link->supported_caps, Autoneg); + + linkmode_copy(if_link->advertised_caps, if_link->supported_caps); + + if (params.ext_speed.autoneg) + phylink_set(if_link->advertised_caps, Autoneg); + else + phylink_clear(if_link->advertised_caps, Autoneg); + + qed_fill_link_capability(hwfn, ptt, + params.ext_speed.advertised_speeds, + if_link->advertised_caps); + } else { + if (link_caps.default_speed_autoneg) + phylink_set(if_link->supported_caps, Autoneg); + + linkmode_copy(if_link->advertised_caps, if_link->supported_caps); + + if (params.speed.autoneg) + phylink_set(if_link->advertised_caps, Autoneg); + else + phylink_clear(if_link->advertised_caps, Autoneg); + } + if (params.pause.autoneg || (params.pause.forced_rx && params.pause.forced_tx)) phylink_set(if_link->supported_caps, Asym_Pause); @@ -2000,13 +2249,6 @@ static void qed_fill_link(struct qed_hwfn *hwfn, params.pause.forced_tx) phylink_set(if_link->supported_caps, Pause); - linkmode_copy(if_link->advertised_caps, if_link->supported_caps); - - if (params.speed.autoneg) - phylink_set(if_link->advertised_caps, Autoneg); - else - phylink_clear(if_link->advertised_caps, Autoneg); - if_link->sup_fec = link_caps.fec_default; if_link->active_fec = params.fec; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 78c0d3a2d164..988d84564849 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1476,6 +1476,7 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) struct qed_mcp_mb_params mb_params; struct eth_phy_cfg phy_cfg; u32 cmd, fec_bit = 0; + u32 val, ext_speed; int rc = 0; /* Set the shmem configuration according to params */ @@ -1522,16 +1523,77 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit); } + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { + ext_speed = 0; + if (params->ext_speed.autoneg) + ext_speed |= ETH_EXT_SPEED_AN; + + val = params->ext_speed.forced_speed; + if (val & QED_EXT_SPEED_1G) + ext_speed |= ETH_EXT_SPEED_1G; + if (val & QED_EXT_SPEED_10G) + ext_speed |= ETH_EXT_SPEED_10G; + if (val & QED_EXT_SPEED_20G) + ext_speed |= ETH_EXT_SPEED_20G; + if (val & QED_EXT_SPEED_25G) + ext_speed |= ETH_EXT_SPEED_25G; + if (val & QED_EXT_SPEED_40G) + ext_speed |= ETH_EXT_SPEED_40G; + if (val & QED_EXT_SPEED_50G_R) + ext_speed |= ETH_EXT_SPEED_50G_BASE_R; + if (val & QED_EXT_SPEED_50G_R2) + ext_speed |= ETH_EXT_SPEED_50G_BASE_R2; + if (val & QED_EXT_SPEED_100G_R2) + ext_speed |= ETH_EXT_SPEED_100G_BASE_R2; + if (val & QED_EXT_SPEED_100G_R4) + ext_speed |= ETH_EXT_SPEED_100G_BASE_R4; + if (val & QED_EXT_SPEED_100G_P4) + ext_speed |= ETH_EXT_SPEED_100G_BASE_P4; + + SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED, + ext_speed); + + ext_speed = 0; + + val = params->ext_speed.advertised_speeds; + if (val & QED_EXT_SPEED_MASK_1G) + ext_speed |= ETH_EXT_ADV_SPEED_1G; + if (val & QED_EXT_SPEED_MASK_10G) + ext_speed |= ETH_EXT_ADV_SPEED_10G; + if (val & QED_EXT_SPEED_MASK_20G) + ext_speed |= ETH_EXT_ADV_SPEED_20G; + if (val & QED_EXT_SPEED_MASK_25G) + ext_speed |= ETH_EXT_ADV_SPEED_25G; + if (val & QED_EXT_SPEED_MASK_40G) + ext_speed |= ETH_EXT_ADV_SPEED_40G; + if (val & QED_EXT_SPEED_MASK_50G_R) + ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R; + if (val & QED_EXT_SPEED_MASK_50G_R2) + ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2; + if (val & QED_EXT_SPEED_MASK_100G_R2) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2; + if (val & QED_EXT_SPEED_MASK_100G_R4) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4; + if (val & QED_EXT_SPEED_MASK_100G_P4) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4; + + phy_cfg.extended_speed |= ext_speed; + + SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE, + params->ext_fec_mode); + } + p_hwfn->b_drv_link_init = b_up; if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, FEC 0x%08x\n", + "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n", phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, - phy_cfg.loopback_mode, phy_cfg.fec_mode); + phy_cfg.loopback_mode, phy_cfg.fec_mode, + phy_cfg.extended_speed); } else { - DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Resetting link\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); } memset(&mb_params, 0, sizeof(mb_params)); @@ -3838,6 +3900,10 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; + if (QED_IS_E5(p_hwfn->cdev)) + features |= + DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL; + return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index ea956c43e596..8edb450d0abf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -17,8 +17,31 @@ struct qed_mcp_link_speed_params { bool autoneg; + u32 advertised_speeds; +#define QED_EXT_SPEED_MASK_RES 0x1 +#define QED_EXT_SPEED_MASK_1G 0x2 +#define QED_EXT_SPEED_MASK_10G 0x4 +#define QED_EXT_SPEED_MASK_20G 0x8 +#define QED_EXT_SPEED_MASK_25G 0x10 +#define QED_EXT_SPEED_MASK_40G 0x20 +#define QED_EXT_SPEED_MASK_50G_R 0x40 +#define QED_EXT_SPEED_MASK_50G_R2 0x80 +#define QED_EXT_SPEED_MASK_100G_R2 0x100 +#define QED_EXT_SPEED_MASK_100G_R4 0x200 +#define QED_EXT_SPEED_MASK_100G_P4 0x400 + u32 forced_speed; /* In Mb/s */ +#define QED_EXT_SPEED_1G 0x1 +#define QED_EXT_SPEED_10G 0x2 +#define QED_EXT_SPEED_20G 0x4 +#define QED_EXT_SPEED_25G 0x8 +#define QED_EXT_SPEED_40G 0x10 +#define QED_EXT_SPEED_50G_R 0x20 +#define QED_EXT_SPEED_50G_R2 0x40 +#define QED_EXT_SPEED_100G_R2 0x80 +#define QED_EXT_SPEED_100G_R4 0x100 +#define QED_EXT_SPEED_100G_P4 0x200 }; struct qed_mcp_link_pause_params { @@ -39,6 +62,9 @@ struct qed_mcp_link_params { u32 loopback_mode; struct qed_link_eee_params eee; u32 fec; + + struct qed_mcp_link_speed_params ext_speed; + u32 ext_fec_mode; }; struct qed_mcp_link_capabilities { @@ -48,6 +74,11 @@ struct qed_mcp_link_capabilities { enum qed_mcp_eee_mode default_eee; u32 eee_lpi_timer; u8 eee_speed_caps; + + u32 default_ext_speed_caps; + u32 default_ext_autoneg; + u32 default_ext_speed; + u32 default_ext_fec; }; struct qed_mcp_link_state { @@ -750,6 +781,20 @@ struct qed_drv_tlv_hdr { u8 tlv_flags; }; +/** + * qed_mcp_is_ext_speed_supported() - Check if management firmware supports + * extended speeds. + * @p_hwfn: HW device data. + * + * Return: true if supported, false otherwise. + */ +static inline bool +qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL); +} + /** * @brief Initialize the interface with the MCP * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 2e780159a5fb..a5c6854343e6 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -594,6 +594,7 @@ enum qed_hw_err_type { enum qed_dev_type { QED_DEV_TYPE_BB, QED_DEV_TYPE_AH, + QED_DEV_TYPE_E5, }; struct qed_dev_info { -- cgit v1.2.3 From af0e5f1f47d8b7139ee446bc2367f74e4f034202 Mon Sep 17 00:00:00 2001 From: Amit Kucheria Date: Wed, 1 Jul 2020 21:44:14 +0530 Subject: thermal/drivers/clock_cooling: Remove clock_cooling code clock_cooling has no in-kernel users. It has never found any use in drivers as far as I can tell. Remove the code. Signed-off-by: Amit Kucheria Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/aa5d5ac2589cf7b14ece882130731b4a916849a6.1593619943.git.amit.kucheria@linaro.org --- drivers/thermal/Kconfig | 10 - drivers/thermal/Makefile | 3 - drivers/thermal/clock_cooling.c | 445 ---------------------------------------- include/linux/clock_cooling.h | 57 ----- 4 files changed, 515 deletions(-) delete mode 100644 drivers/thermal/clock_cooling.c delete mode 100644 include/linux/clock_cooling.h (limited to 'include') diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 07983bef8d6a..13991d68c844 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -190,16 +190,6 @@ config CPU_IDLE_THERMAL idle cycle. endif -config CLOCK_THERMAL - bool "Generic clock cooling support" - depends on COMMON_CLK - depends on PM_OPP - help - This entry implements the generic clock cooling mechanism through - frequency clipping. Typically used to cool off co-processors. The - device that is configured to use this cooling mechanism will be - controlled to reduce clock frequency whenever temperature is high. - config DEVFREQ_THERMAL bool "Generic device cooling support" depends on PM_DEVFREQ diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 589f6fb0d381..b8d96d26f9ec 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -25,9 +25,6 @@ thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += gov_power_allocator.o thermal_sys-$(CONFIG_CPU_FREQ_THERMAL) += cpufreq_cooling.o thermal_sys-$(CONFIG_CPU_IDLE_THERMAL) += cpuidle_cooling.o -# clock cooling -thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o - # devfreq cooling thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c deleted file mode 100644 index 56cb1f46a428..000000000000 --- a/drivers/thermal/clock_cooling.c +++ /dev/null @@ -1,445 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * drivers/thermal/clock_cooling.c - * - * Copyright (C) 2014 Eduardo Valentin - * - * Copyright (C) 2013 Texas Instruments Inc. - * Contact: Eduardo Valentin - * - * Highly based on cpufreq_cooling.c. - * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) - * Copyright (C) 2012 Amit Daniel - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/** - * struct clock_cooling_device - data for cooling device with clock - * @id: unique integer value corresponding to each clock_cooling_device - * registered. - * @dev: struct device pointer to the device being used to cool off using - * clock frequencies. - * @cdev: thermal_cooling_device pointer to keep track of the - * registered cooling device. - * @clk_rate_change_nb: reference to notifier block used to receive clock - * rate changes. - * @freq_table: frequency table used to keep track of available frequencies. - * @clock_state: integer value representing the current state of clock - * cooling devices. - * @clock_val: integer value representing the absolute value of the clipped - * frequency. - * @clk: struct clk reference used to enforce clock limits. - * @lock: mutex lock to protect this struct. - * - * This structure is required for keeping information of each - * clock_cooling_device registered. In order to prevent corruption of this a - * mutex @lock is used. - */ -struct clock_cooling_device { - int id; - struct device *dev; - struct thermal_cooling_device *cdev; - struct notifier_block clk_rate_change_nb; - struct cpufreq_frequency_table *freq_table; - unsigned long clock_state; - unsigned long clock_val; - struct clk *clk; - struct mutex lock; /* lock to protect the content of this struct */ -}; -#define to_clock_cooling_device(x) \ - container_of(x, struct clock_cooling_device, clk_rate_change_nb) -static DEFINE_IDA(clock_ida); - -/* Below code defines functions to be used for clock as cooling device */ - -enum clock_cooling_property { - GET_LEVEL, - GET_FREQ, - GET_MAXL, -}; - -/** - * clock_cooling_get_property - fetch a property of interest for a give cpu. - * @ccdev: clock cooling device reference - * @input: query parameter - * @output: query return - * @property: type of query (frequency, level, max level) - * - * This is the common function to - * 1. get maximum clock cooling states - * 2. translate frequency to cooling state - * 3. translate cooling state to frequency - * Note that the code may be not in good shape - * but it is written in this way in order to: - * a) reduce duplicate code as most of the code can be shared. - * b) make sure the logic is consistent when translating between - * cooling states and frequencies. - * - * Return: 0 on success, -EINVAL when invalid parameters are passed. - */ -static int clock_cooling_get_property(struct clock_cooling_device *ccdev, - unsigned long input, - unsigned long *output, - enum clock_cooling_property property) -{ - int i; - unsigned long max_level = 0, level = 0; - unsigned int freq = CPUFREQ_ENTRY_INVALID; - int descend = -1; - struct cpufreq_frequency_table *pos, *table = ccdev->freq_table; - - if (!output) - return -EINVAL; - - if (!table) - return -EINVAL; - - cpufreq_for_each_valid_entry(pos, table) { - /* ignore duplicate entry */ - if (freq == pos->frequency) - continue; - - /* get the frequency order */ - if (freq != CPUFREQ_ENTRY_INVALID && descend == -1) - descend = freq > pos->frequency; - - freq = pos->frequency; - max_level++; - } - - /* No valid cpu frequency entry */ - if (max_level == 0) - return -EINVAL; - - /* max_level is an index, not a counter */ - max_level--; - - /* get max level */ - if (property == GET_MAXL) { - *output = max_level; - return 0; - } - - if (property == GET_FREQ) - level = descend ? input : (max_level - input); - - i = 0; - cpufreq_for_each_valid_entry(pos, table) { - /* ignore duplicate entry */ - if (freq == pos->frequency) - continue; - - /* now we have a valid frequency entry */ - freq = pos->frequency; - - if (property == GET_LEVEL && (unsigned int)input == freq) { - /* get level by frequency */ - *output = descend ? i : (max_level - i); - return 0; - } - if (property == GET_FREQ && level == i) { - /* get frequency by level */ - *output = freq; - return 0; - } - i++; - } - - return -EINVAL; -} - -/** - * clock_cooling_get_level - return the cooling level of given clock cooling. - * @cdev: reference of a thermal cooling device of used as clock cooling device - * @freq: the frequency of interest - * - * This function will match the cooling level corresponding to the - * requested @freq and return it. - * - * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID - * otherwise. - */ -unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, - unsigned long freq) -{ - struct clock_cooling_device *ccdev = cdev->devdata; - unsigned long val; - - if (clock_cooling_get_property(ccdev, (unsigned long)freq, &val, - GET_LEVEL)) - return THERMAL_CSTATE_INVALID; - - return val; -} -EXPORT_SYMBOL_GPL(clock_cooling_get_level); - -/** - * clock_cooling_get_frequency - get the absolute value of frequency from level. - * @ccdev: clock cooling device reference - * @level: cooling level - * - * This function matches cooling level with frequency. Based on a cooling level - * of frequency, equals cooling state of cpu cooling device, it will return - * the corresponding frequency. - * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc - * - * Return: 0 on error, the corresponding frequency otherwise. - */ -static unsigned long -clock_cooling_get_frequency(struct clock_cooling_device *ccdev, - unsigned long level) -{ - int ret = 0; - unsigned long freq; - - ret = clock_cooling_get_property(ccdev, level, &freq, GET_FREQ); - if (ret) - return 0; - - return freq; -} - -/** - * clock_cooling_apply - function to apply frequency clipping. - * @ccdev: clock_cooling_device pointer containing frequency clipping data. - * @cooling_state: value of the cooling state. - * - * Function used to make sure the clock layer is aware of current thermal - * limits. The limits are applied by updating the clock rate in case it is - * higher than the corresponding frequency based on the requested cooling_state. - * - * Return: 0 on success, an error code otherwise (-EINVAL in case wrong - * cooling state). - */ -static int clock_cooling_apply(struct clock_cooling_device *ccdev, - unsigned long cooling_state) -{ - unsigned long clip_freq, cur_freq; - int ret = 0; - - /* Here we write the clipping */ - /* Check if the old cooling action is same as new cooling action */ - if (ccdev->clock_state == cooling_state) - return 0; - - clip_freq = clock_cooling_get_frequency(ccdev, cooling_state); - if (!clip_freq) - return -EINVAL; - - cur_freq = clk_get_rate(ccdev->clk); - - mutex_lock(&ccdev->lock); - ccdev->clock_state = cooling_state; - ccdev->clock_val = clip_freq; - /* enforce clock level */ - if (cur_freq > clip_freq) - ret = clk_set_rate(ccdev->clk, clip_freq); - mutex_unlock(&ccdev->lock); - - return ret; -} - -/** - * clock_cooling_clock_notifier - notifier callback on clock rate changes. - * @nb: struct notifier_block * with callback info. - * @event: value showing clock event for which this function invoked. - * @data: callback-specific data - * - * Callback to hijack the notification on clock transition. - * Every time there is a clock change, we intercept all pre change events - * and block the transition in case the new rate infringes thermal limits. - * - * Return: NOTIFY_DONE (success) or NOTIFY_BAD (new_rate > thermal limit). - */ -static int clock_cooling_clock_notifier(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct clk_notifier_data *ndata = data; - struct clock_cooling_device *ccdev = to_clock_cooling_device(nb); - - switch (event) { - case PRE_RATE_CHANGE: - /* - * checks on current state - * TODO: current method is not best we can find as it - * allows possibly voltage transitions, in case DVFS - * layer is also hijacking clock pre notifications. - */ - if (ndata->new_rate > ccdev->clock_val) - return NOTIFY_BAD; - /* fall through */ - case POST_RATE_CHANGE: - case ABORT_RATE_CHANGE: - default: - return NOTIFY_DONE; - } -} - -/* clock cooling device thermal callback functions are defined below */ - -/** - * clock_cooling_get_max_state - callback function to get the max cooling state. - * @cdev: thermal cooling device pointer. - * @state: fill this variable with the max cooling state. - * - * Callback for the thermal cooling device to return the clock - * max cooling state. - * - * Return: 0 on success, an error code otherwise. - */ -static int clock_cooling_get_max_state(struct thermal_cooling_device *cdev, - unsigned long *state) -{ - struct clock_cooling_device *ccdev = cdev->devdata; - unsigned long count = 0; - int ret; - - ret = clock_cooling_get_property(ccdev, 0, &count, GET_MAXL); - if (!ret) - *state = count; - - return ret; -} - -/** - * clock_cooling_get_cur_state - function to get the current cooling state. - * @cdev: thermal cooling device pointer. - * @state: fill this variable with the current cooling state. - * - * Callback for the thermal cooling device to return the clock - * current cooling state. - * - * Return: 0 (success) - */ -static int clock_cooling_get_cur_state(struct thermal_cooling_device *cdev, - unsigned long *state) -{ - struct clock_cooling_device *ccdev = cdev->devdata; - - *state = ccdev->clock_state; - - return 0; -} - -/** - * clock_cooling_set_cur_state - function to set the current cooling state. - * @cdev: thermal cooling device pointer. - * @state: set this variable to the current cooling state. - * - * Callback for the thermal cooling device to change the clock cooling - * current cooling state. - * - * Return: 0 on success, an error code otherwise. - */ -static int clock_cooling_set_cur_state(struct thermal_cooling_device *cdev, - unsigned long state) -{ - struct clock_cooling_device *clock_device = cdev->devdata; - - return clock_cooling_apply(clock_device, state); -} - -/* Bind clock callbacks to thermal cooling device ops */ -static struct thermal_cooling_device_ops const clock_cooling_ops = { - .get_max_state = clock_cooling_get_max_state, - .get_cur_state = clock_cooling_get_cur_state, - .set_cur_state = clock_cooling_set_cur_state, -}; - -/** - * clock_cooling_register - function to create clock cooling device. - * @dev: struct device pointer to the device used as clock cooling device. - * @clock_name: string containing the clock used as cooling mechanism. - * - * This interface function registers the clock cooling device with the name - * "thermal-clock-%x". The cooling device is based on clock frequencies. - * The struct device is assumed to be capable of DVFS transitions. - * The OPP layer is used to fetch and fill the available frequencies for - * the referred device. The ordered frequency table is used to control - * the clock cooling device cooling states and to limit clock transitions - * based on the cooling state requested by the thermal framework. - * - * Return: a valid struct thermal_cooling_device pointer on success, - * on failure, it returns a corresponding ERR_PTR(). - */ -struct thermal_cooling_device * -clock_cooling_register(struct device *dev, const char *clock_name) -{ - struct thermal_cooling_device *cdev; - struct clock_cooling_device *ccdev = NULL; - char dev_name[THERMAL_NAME_LENGTH]; - int ret = 0; - - ccdev = devm_kzalloc(dev, sizeof(*ccdev), GFP_KERNEL); - if (!ccdev) - return ERR_PTR(-ENOMEM); - - mutex_init(&ccdev->lock); - ccdev->dev = dev; - ccdev->clk = devm_clk_get(dev, clock_name); - if (IS_ERR(ccdev->clk)) - return ERR_CAST(ccdev->clk); - - ret = ida_simple_get(&clock_ida, 0, 0, GFP_KERNEL); - if (ret < 0) - return ERR_PTR(ret); - ccdev->id = ret; - - snprintf(dev_name, sizeof(dev_name), "thermal-clock-%d", ccdev->id); - - cdev = thermal_cooling_device_register(dev_name, ccdev, - &clock_cooling_ops); - if (IS_ERR(cdev)) { - ida_simple_remove(&clock_ida, ccdev->id); - return ERR_PTR(-EINVAL); - } - ccdev->cdev = cdev; - ccdev->clk_rate_change_nb.notifier_call = clock_cooling_clock_notifier; - - /* Assuming someone has already filled the opp table for this device */ - ret = dev_pm_opp_init_cpufreq_table(dev, &ccdev->freq_table); - if (ret) { - ida_simple_remove(&clock_ida, ccdev->id); - return ERR_PTR(ret); - } - ccdev->clock_state = 0; - ccdev->clock_val = clock_cooling_get_frequency(ccdev, 0); - - clk_notifier_register(ccdev->clk, &ccdev->clk_rate_change_nb); - - return cdev; -} -EXPORT_SYMBOL_GPL(clock_cooling_register); - -/** - * clock_cooling_unregister - function to remove clock cooling device. - * @cdev: thermal cooling device pointer. - * - * This interface function unregisters the "thermal-clock-%x" cooling device. - */ -void clock_cooling_unregister(struct thermal_cooling_device *cdev) -{ - struct clock_cooling_device *ccdev; - - if (!cdev) - return; - - ccdev = cdev->devdata; - - clk_notifier_unregister(ccdev->clk, &ccdev->clk_rate_change_nb); - dev_pm_opp_free_cpufreq_table(ccdev->dev, &ccdev->freq_table); - - thermal_cooling_device_unregister(ccdev->cdev); - ida_simple_remove(&clock_ida, ccdev->id); -} -EXPORT_SYMBOL_GPL(clock_cooling_unregister); diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h deleted file mode 100644 index 4b0a69863656..000000000000 --- a/include/linux/clock_cooling.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/include/linux/clock_cooling.h - * - * Copyright (C) 2014 Eduardo Valentin - * - * Copyright (C) 2013 Texas Instruments Inc. - * Contact: Eduardo Valentin - * - * Highly based on cpufreq_cooling.c. - * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) - * Copyright (C) 2012 Amit Daniel - */ - -#ifndef __CPU_COOLING_H__ -#define __CPU_COOLING_H__ - -#include -#include -#include - -#ifdef CONFIG_CLOCK_THERMAL -/** - * clock_cooling_register - function to create clock cooling device. - * @dev: struct device pointer to the device used as clock cooling device. - * @clock_name: string containing the clock used as cooling mechanism. - */ -struct thermal_cooling_device * -clock_cooling_register(struct device *dev, const char *clock_name); - -/** - * clock_cooling_unregister - function to remove clock cooling device. - * @cdev: thermal cooling device pointer. - */ -void clock_cooling_unregister(struct thermal_cooling_device *cdev); - -unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, - unsigned long freq); -#else /* !CONFIG_CLOCK_THERMAL */ -static inline struct thermal_cooling_device * -clock_cooling_register(struct device *dev, const char *clock_name) -{ - return NULL; -} -static inline -void clock_cooling_unregister(struct thermal_cooling_device *cdev) -{ -} -static inline -unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, - unsigned long freq) -{ - return THERMAL_CSTATE_INVALID; -} -#endif /* CONFIG_CLOCK_THERMAL */ - -#endif /* __CPU_COOLING_H__ */ -- cgit v1.2.3 From 1a4ae4138f386600fc539747bb978873299017f8 Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Fri, 3 Jul 2020 20:05:08 +0300 Subject: dt-bindings: clock: Add APB, DMAC, GPIO bindings for Actions S500 SoC Add the missing APB, DMAC and GPIO clock bindings constants for Actions Semi S500 SoC. Signed-off-by: Cristian Ciocaltea Link: https://lore.kernel.org/r/67112af4f5bc0cc5e70ce8410feb369cc72972b8.1593788312.git.cristian.ciocaltea@gmail.com Reviewed-by: Manivannan Sadhasivam Acked-by: Rob Herring Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/actions,s500-cmu.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h index 030981cd2d56..a250a52a6192 100644 --- a/include/dt-bindings/clock/actions,s500-cmu.h +++ b/include/dt-bindings/clock/actions,s500-cmu.h @@ -72,7 +72,12 @@ #define CLK_NAND 52 #define CLK_ECC 53 #define CLK_RMII_REF 54 +#define CLK_GPIO 55 -#define CLK_NR_CLKS (CLK_RMII_REF + 1) +/* system clock (part 2) */ +#define CLK_APB 56 +#define CLK_DMAC 57 + +#define CLK_NR_CLKS (CLK_DMAC + 1) #endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */ -- cgit v1.2.3 From fac1d443a2b73dfb5b277d4e3c202609f0927eb5 Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Fri, 3 Jul 2020 20:05:10 +0300 Subject: dt-bindings: reset: Add binding constants for Actions S500 RMU Add device tree binding constants for Actions Semi S500 SoC Reset Management Unit (RMU). Signed-off-by: Cristian Ciocaltea Acked-by: Philipp Zabel Link: https://lore.kernel.org/r/daf615160b3be9f38dcf7926cc82128c9c2d73e3.1593788312.git.cristian.ciocaltea@gmail.com Reviewed-by: Manivannan Sadhasivam Acked-by: Rob Herring Signed-off-by: Stephen Boyd --- include/dt-bindings/reset/actions,s500-reset.h | 67 ++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 include/dt-bindings/reset/actions,s500-reset.h (limited to 'include') diff --git a/include/dt-bindings/reset/actions,s500-reset.h b/include/dt-bindings/reset/actions,s500-reset.h new file mode 100644 index 000000000000..f5d94176d10b --- /dev/null +++ b/include/dt-bindings/reset/actions,s500-reset.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Device Tree binding constants for Actions Semi S500 Reset Management Unit + * + * Copyright (c) 2014 Actions Semi Inc. + * Copyright (c) 2020 Cristian Ciocaltea + */ + +#ifndef __DT_BINDINGS_ACTIONS_S500_RESET_H +#define __DT_BINDINGS_ACTIONS_S500_RESET_H + +#define RESET_DMAC 0 +#define RESET_NORIF 1 +#define RESET_DDR 2 +#define RESET_NANDC 3 +#define RESET_SD0 4 +#define RESET_SD1 5 +#define RESET_PCM1 6 +#define RESET_DE 7 +#define RESET_LCD 8 +#define RESET_SD2 9 +#define RESET_DSI 10 +#define RESET_CSI 11 +#define RESET_BISP 12 +#define RESET_KEY 13 +#define RESET_GPIO 14 +#define RESET_AUDIO 15 +#define RESET_PCM0 16 +#define RESET_VDE 17 +#define RESET_VCE 18 +#define RESET_GPU3D 19 +#define RESET_NIC301 20 +#define RESET_LENS 21 +#define RESET_PERIPHRESET 22 +#define RESET_USB2_0 23 +#define RESET_TVOUT 24 +#define RESET_HDMI 25 +#define RESET_HDCP2TX 26 +#define RESET_UART6 27 +#define RESET_UART0 28 +#define RESET_UART1 29 +#define RESET_UART2 30 +#define RESET_SPI0 31 +#define RESET_SPI1 32 +#define RESET_SPI2 33 +#define RESET_SPI3 34 +#define RESET_I2C0 35 +#define RESET_I2C1 36 +#define RESET_USB3 37 +#define RESET_UART3 38 +#define RESET_UART4 39 +#define RESET_UART5 40 +#define RESET_I2C2 41 +#define RESET_I2C3 42 +#define RESET_ETHERNET 43 +#define RESET_CHIPID 44 +#define RESET_USB2_1 45 +#define RESET_WD0RESET 46 +#define RESET_WD1RESET 47 +#define RESET_WD2RESET 48 +#define RESET_WD3RESET 49 +#define RESET_DBG0RESET 50 +#define RESET_DBG1RESET 51 +#define RESET_DBG2RESET 52 +#define RESET_DBG3RESET 53 + +#endif /* __DT_BINDINGS_ACTIONS_S500_RESET_H */ -- cgit v1.2.3 From e506ea451254ab17e0bf918ca36232fec2a9b10c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Oct 2019 16:29:32 -0700 Subject: compiler.h: Split {READ,WRITE}_ONCE definitions out into rwonce.h In preparation for allowing architectures to define their own implementation of the READ_ONCE() macro, move the generic {READ,WRITE}_ONCE() definitions out of the unwieldy 'linux/compiler.h' file and into a new 'rwonce.h' header under 'asm-generic'. Acked-by: Peter Zijlstra (Intel) Acked-by: Paul E. McKenney Signed-off-by: Will Deacon --- include/asm-generic/Kbuild | 1 + include/asm-generic/barrier.h | 2 +- include/asm-generic/rwonce.h | 101 ++++++++++++++++++++++++++++++++++++++++++ include/linux/compiler.h | 93 +------------------------------------- 4 files changed, 105 insertions(+), 92 deletions(-) create mode 100644 include/asm-generic/rwonce.h (limited to 'include') diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 44ec80e70518..74b0612601dd 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -45,6 +45,7 @@ mandatory-y += pci.h mandatory-y += percpu.h mandatory-y += pgalloc.h mandatory-y += preempt.h +mandatory-y += rwonce.h mandatory-y += sections.h mandatory-y += serial.h mandatory-y += shmparam.h diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 2eacaf7d62f6..8116744bb82c 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -13,7 +13,7 @@ #ifndef __ASSEMBLY__ -#include +#include #ifndef nop #define nop() asm volatile ("nop") diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h new file mode 100644 index 000000000000..87584379da43 --- /dev/null +++ b/include/asm-generic/rwonce.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Prevent the compiler from merging or refetching reads or writes. The + * compiler is also forbidden from reordering successive instances of + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some + * particular ordering. One way to make the compiler aware of ordering is to + * put the two invocations of READ_ONCE or WRITE_ONCE in different C + * statements. + * + * These two macros will also work on aggregate data types like structs or + * unions. + * + * Their two major use cases are: (1) Mediating communication between + * process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ +#ifndef __ASM_GENERIC_RWONCE_H +#define __ASM_GENERIC_RWONCE_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +#include + +/* + * Yes, this permits 64-bit accesses on 32-bit architectures. These will + * actually be atomic in some cases (namely Armv7 + LPAE), but for others we + * rely on the access being split into 2x32-bit accesses for a 32-bit quantity + * (e.g. a virtual address) and a strong prevailing wind. + */ +#define compiletime_assert_rwonce_type(t) \ + compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ + "Unsupported access size for {READ,WRITE}_ONCE().") + +/* + * Use __READ_ONCE() instead of READ_ONCE() if you do not require any + * atomicity or dependency ordering guarantees. Note that this may result + * in tears! + */ +#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) + +#define __READ_ONCE_SCALAR(x) \ +({ \ + __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \ + smp_read_barrier_depends(); \ + (typeof(x))__x; \ +}) + +#define READ_ONCE(x) \ +({ \ + compiletime_assert_rwonce_type(x); \ + __READ_ONCE_SCALAR(x); \ +}) + +#define __WRITE_ONCE(x, val) \ +do { \ + *(volatile typeof(x) *)&(x) = (val); \ +} while (0) + +#define WRITE_ONCE(x, val) \ +do { \ + compiletime_assert_rwonce_type(x); \ + __WRITE_ONCE(x, val); \ +} while (0) + +static __no_sanitize_or_inline +unsigned long __read_once_word_nocheck(const void *addr) +{ + return __READ_ONCE(*(unsigned long *)addr); +} + +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a + * word from memory atomically but without telling KASAN/KCSAN. This is + * usually used by unwinding code when walking the stack of a running process. + */ +#define READ_ONCE_NOCHECK(x) \ +({ \ + unsigned long __x; \ + compiletime_assert(sizeof(x) == sizeof(__x), \ + "Unsupported access size for READ_ONCE_NOCHECK()."); \ + __x = __read_once_word_nocheck(&(x)); \ + smp_read_barrier_depends(); \ + (typeof(x))__x; \ +}) + +static __no_kasan_or_inline +unsigned long read_word_at_a_time(const void *addr) +{ + kasan_check_read(addr, 1); + return *(unsigned long *)addr; +} + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_GENERIC_RWONCE_H */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 204e76856435..f075a3df4fe2 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -230,28 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) #endif -/* - * Prevent the compiler from merging or refetching reads or writes. The - * compiler is also forbidden from reordering successive instances of - * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some - * particular ordering. One way to make the compiler aware of ordering is to - * put the two invocations of READ_ONCE or WRITE_ONCE in different C - * statements. - * - * These two macros will also work on aggregate data types like structs or - * unions. - * - * Their two major use cases are: (1) Mediating communication between - * process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not fold, spindle, or otherwise - * mutilate accesses that either do not require ordering or that interact - * with an explicit memory barrier or atomic instruction that provides the - * required ordering. - */ -#include -#include -#include - /** * data_race - mark an expression as containing intentional data races * @@ -272,65 +250,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, __v; \ }) -/* - * Use __READ_ONCE() instead of READ_ONCE() if you do not require any - * atomicity or dependency ordering guarantees. Note that this may result - * in tears! - */ -#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) - -#define __READ_ONCE_SCALAR(x) \ -({ \ - __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \ - smp_read_barrier_depends(); \ - (typeof(x))__x; \ -}) - -#define READ_ONCE(x) \ -({ \ - compiletime_assert_rwonce_type(x); \ - __READ_ONCE_SCALAR(x); \ -}) - -#define __WRITE_ONCE(x, val) \ -do { \ - *(volatile typeof(x) *)&(x) = (val); \ -} while (0) - -#define WRITE_ONCE(x, val) \ -do { \ - compiletime_assert_rwonce_type(x); \ - __WRITE_ONCE(x, val); \ -} while (0) - -static __no_sanitize_or_inline -unsigned long __read_once_word_nocheck(const void *addr) -{ - return __READ_ONCE(*(unsigned long *)addr); -} - -/* - * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a - * word from memory atomically but without telling KASAN/KCSAN. This is - * usually used by unwinding code when walking the stack of a running process. - */ -#define READ_ONCE_NOCHECK(x) \ -({ \ - unsigned long __x; \ - compiletime_assert(sizeof(x) == sizeof(__x), \ - "Unsupported access size for READ_ONCE_NOCHECK()."); \ - __x = __read_once_word_nocheck(&(x)); \ - smp_read_barrier_depends(); \ - (typeof(x))__x; \ -}) - -static __no_kasan_or_inline -unsigned long read_word_at_a_time(const void *addr) -{ - kasan_check_read(addr, 1); - return *(unsigned long *)addr; -} - #endif /* __KERNEL__ */ /* @@ -395,16 +314,6 @@ static inline void *offset_to_ptr(const int *off) compiletime_assert(__native_word(t), \ "Need native word sized stores/loads for atomicity.") -/* - * Yes, this permits 64-bit accesses on 32-bit architectures. These will - * actually be atomic in some cases (namely Armv7 + LPAE), but for others we - * rely on the access being split into 2x32-bit accesses for a 32-bit quantity - * (e.g. a virtual address) and a strong prevailing wind. - */ -#define compiletime_assert_rwonce_type(t) \ - compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ - "Unsupported access size for {READ,WRITE}_ONCE().") - /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) @@ -414,4 +323,6 @@ static inline void *offset_to_ptr(const int *off) */ #define prevent_tail_call_optimization() mb() +#include + #endif /* __LINUX_COMPILER_H */ -- cgit v1.2.3 From b78b331a3f5c0773171dadd6bbfa2a2242b45604 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Oct 2019 17:30:47 -0700 Subject: asm/rwonce: Allow __READ_ONCE to be overridden by the architecture The meat and potatoes of READ_ONCE() is defined by the __READ_ONCE() macro, which uses a volatile casts in an attempt to avoid tearing of byte, halfword, word and double-word accesses. Allow this to be overridden by the architecture code in the case that things like memory barriers are also required. Acked-by: Peter Zijlstra (Intel) Acked-by: Paul E. McKenney Signed-off-by: Will Deacon --- include/asm-generic/rwonce.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h index 87584379da43..04586b55a7c2 100644 --- a/include/asm-generic/rwonce.h +++ b/include/asm-generic/rwonce.h @@ -43,7 +43,9 @@ * atomicity or dependency ordering guarantees. Note that this may result * in tears! */ +#ifndef __READ_ONCE #define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) +#endif #define __READ_ONCE_SCALAR(x) \ ({ \ -- cgit v1.2.3 From 3c9184109e78ea2371ca8fa66d7f36986a53af98 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 30 Oct 2019 16:51:07 +0000 Subject: asm/rwonce: Remove smp_read_barrier_depends() invocation Alpha overrides __READ_ONCE() directly, so there's no need to use smp_read_barrier_depends() in the core code. This also means that __READ_ONCE() can be relied upon to provide dependency ordering. Acked-by: Peter Zijlstra (Intel) Acked-by: Paul E. McKenney Signed-off-by: Will Deacon --- include/asm-generic/rwonce.h | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h index 04586b55a7c2..3a7f737c77bd 100644 --- a/include/asm-generic/rwonce.h +++ b/include/asm-generic/rwonce.h @@ -40,24 +40,16 @@ /* * Use __READ_ONCE() instead of READ_ONCE() if you do not require any - * atomicity or dependency ordering guarantees. Note that this may result - * in tears! + * atomicity. Note that this may result in tears! */ #ifndef __READ_ONCE #define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) #endif -#define __READ_ONCE_SCALAR(x) \ -({ \ - __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \ - smp_read_barrier_depends(); \ - (typeof(x))__x; \ -}) - #define READ_ONCE(x) \ ({ \ compiletime_assert_rwonce_type(x); \ - __READ_ONCE_SCALAR(x); \ + __READ_ONCE(x); \ }) #define __WRITE_ONCE(x, val) \ @@ -84,12 +76,9 @@ unsigned long __read_once_word_nocheck(const void *addr) */ #define READ_ONCE_NOCHECK(x) \ ({ \ - unsigned long __x; \ - compiletime_assert(sizeof(x) == sizeof(__x), \ + compiletime_assert(sizeof(x) == sizeof(unsigned long), \ "Unsupported access size for READ_ONCE_NOCHECK()."); \ - __x = __read_once_word_nocheck(&(x)); \ - smp_read_barrier_depends(); \ - (typeof(x))__x; \ + (typeof(x))__read_once_word_nocheck(&(x)); \ }) static __no_kasan_or_inline -- cgit v1.2.3 From 002dff36acfba3476b685a09f78ffb7c452f5951 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 10 Jul 2020 14:49:40 +0100 Subject: asm/rwonce: Don't pull into 'asm-generic/rwonce.h' Now that 'smp_read_barrier_depends()' has gone the way of the Norwegian Blue, drop the inclusion of in 'asm-generic/rwonce.h'. This requires fixups to some architecture vdso headers which were previously relying on 'asm/barrier.h' coming in via 'linux/compiler.h'. Acked-by: Peter Zijlstra (Intel) Signed-off-by: Will Deacon --- arch/arm/include/asm/vdso/gettimeofday.h | 1 + arch/arm64/include/asm/vdso/compat_gettimeofday.h | 1 + arch/arm64/include/asm/vdso/gettimeofday.h | 1 + arch/riscv/include/asm/vdso/gettimeofday.h | 1 + include/asm-generic/rwonce.h | 2 -- include/linux/nospec.h | 2 ++ 6 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h index 36dc18553ed8..1b207cf07697 100644 --- a/arch/arm/include/asm/vdso/gettimeofday.h +++ b/arch/arm/include/asm/vdso/gettimeofday.h @@ -7,6 +7,7 @@ #ifndef __ASSEMBLY__ +#include #include #include #include diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index b6907ae78e53..bcf7649999a4 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -7,6 +7,7 @@ #ifndef __ASSEMBLY__ +#include #include #include diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index afba6ba332f8..127fa63893e2 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -7,6 +7,7 @@ #ifndef __ASSEMBLY__ +#include #include #define VDSO_HAS_CLOCK_GETRES 1 diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h index c8e818688ec1..3099362d9f26 100644 --- a/arch/riscv/include/asm/vdso/gettimeofday.h +++ b/arch/riscv/include/asm/vdso/gettimeofday.h @@ -4,6 +4,7 @@ #ifndef __ASSEMBLY__ +#include #include #include #include diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h index 3a7f737c77bd..8d0a6280e982 100644 --- a/include/asm-generic/rwonce.h +++ b/include/asm-generic/rwonce.h @@ -26,8 +26,6 @@ #include #include -#include - /* * Yes, this permits 64-bit accesses on 32-bit architectures. These will * actually be atomic in some cases (namely Armv7 + LPAE), but for others we diff --git a/include/linux/nospec.h b/include/linux/nospec.h index 0c5ef54fd416..c1e79f72cd89 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h @@ -5,6 +5,8 @@ #ifndef _LINUX_NOSPEC_H #define _LINUX_NOSPEC_H + +#include #include struct task_struct; -- cgit v1.2.3 From 93fab07c22930c9ac4f01212fd92913c9a812f9f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 30 Oct 2019 17:17:22 +0000 Subject: locking/barriers: Remove definitions for [smp_]read_barrier_depends() There are no remaining users of [smp_]read_barrier_depends(), so remove it from the generic implementation of 'barrier.h'. Acked-by: Peter Zijlstra (Intel) Acked-by: Paul E. McKenney Signed-off-by: Will Deacon --- include/asm-generic/barrier.h | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'include') diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 8116744bb82c..fec97dc34de7 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -46,10 +46,6 @@ #define dma_wmb() wmb() #endif -#ifndef read_barrier_depends -#define read_barrier_depends() do { } while (0) -#endif - #ifndef __smp_mb #define __smp_mb() mb() #endif @@ -62,10 +58,6 @@ #define __smp_wmb() wmb() #endif -#ifndef __smp_read_barrier_depends -#define __smp_read_barrier_depends() read_barrier_depends() -#endif - #ifdef CONFIG_SMP #ifndef smp_mb @@ -80,10 +72,6 @@ #define smp_wmb() __smp_wmb() #endif -#ifndef smp_read_barrier_depends -#define smp_read_barrier_depends() __smp_read_barrier_depends() -#endif - #else /* !CONFIG_SMP */ #ifndef smp_mb @@ -98,10 +86,6 @@ #define smp_wmb() barrier() #endif -#ifndef smp_read_barrier_depends -#define smp_read_barrier_depends() do { } while (0) -#endif - #endif /* CONFIG_SMP */ #ifndef __smp_store_mb @@ -196,7 +180,6 @@ do { \ #define virt_mb() __smp_mb() #define virt_rmb() __smp_rmb() #define virt_wmb() __smp_wmb() -#define virt_read_barrier_depends() __smp_read_barrier_depends() #define virt_store_mb(var, value) __smp_store_mb(var, value) #define virt_mb__before_atomic() __smp_mb__before_atomic() #define virt_mb__after_atomic() __smp_mb__after_atomic() -- cgit v1.2.3 From c6cd2e011655aead2097273a04350f52429a1a8d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 7 Nov 2019 14:46:59 +0000 Subject: include/linux: Remove smp_read_barrier_depends() from comments smp_read_barrier_depends() doesn't exist any more, so reword the two comments that mention it to refer to "dependency ordering" instead. Acked-by: Peter Zijlstra (Intel) Acked-by: Paul E. McKenney Signed-off-by: Will Deacon --- include/linux/percpu-refcount.h | 2 +- include/linux/ptr_ring.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 22d9d183950d..87d8a38bdea1 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -155,7 +155,7 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, * between contaminating the pointer value, meaning that * READ_ONCE() is required when fetching it. * - * The smp_read_barrier_depends() implied by READ_ONCE() pairs + * The dependency ordering from the READ_ONCE() pairs * with smp_store_release() in __percpu_ref_switch_to_percpu(). */ percpu_ptr = READ_ONCE(ref->percpu_count_ptr); diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 417db0a79a62..808f9d3ee546 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -107,7 +107,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) return -ENOSPC; /* Make sure the pointer we are storing points to a valid data. */ - /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + /* Pairs with the dependency ordering in __ptr_ring_consume. */ smp_wmb(); WRITE_ONCE(r->queue[r->producer++], ptr); -- cgit v1.2.3 From eb5c2d4b45e3d2d5d052ea6b8f1463976b1020d5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 21 Jul 2020 09:54:15 +0100 Subject: compiler.h: Move compiletime_assert() macros into compiler_types.h The kernel test robot reports that moving READ_ONCE() out into its own header breaks a W=1 build for parisc, which is relying on the definition of compiletime_assert() being available: | In file included from ./arch/parisc/include/generated/asm/rwonce.h:1, | from ./include/asm-generic/barrier.h:16, | from ./arch/parisc/include/asm/barrier.h:29, | from ./arch/parisc/include/asm/atomic.h:11, | from ./include/linux/atomic.h:7, | from kernel/locking/percpu-rwsem.c:2: | ./arch/parisc/include/asm/atomic.h: In function 'atomic_read': | ./include/asm-generic/rwonce.h:36:2: error: implicit declaration of function 'compiletime_assert' [-Werror=implicit-function-declaration] | 36 | compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ | | ^~~~~~~~~~~~~~~~~~ | ./include/asm-generic/rwonce.h:49:2: note: in expansion of macro 'compiletime_assert_rwonce_type' | 49 | compiletime_assert_rwonce_type(x); \ | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ./arch/parisc/include/asm/atomic.h:73:9: note: in expansion of macro 'READ_ONCE' | 73 | return READ_ONCE((v)->counter); | | ^~~~~~~~~ Move these macros into compiler_types.h, so that they are available to READ_ONCE() and friends. Link: http://lists.infradead.org/pipermail/linux-arm-kernel/2020-July/587094.html Reported-by: kernel test robot Signed-off-by: Will Deacon --- include/linux/compiler.h | 41 ----------------------------------------- include/linux/compiler_types.h | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/include/linux/compiler.h b/include/linux/compiler.h index f075a3df4fe2..59f7194fdf08 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -273,47 +273,6 @@ static inline void *offset_to_ptr(const int *off) #endif /* __ASSEMBLY__ */ -/* Compile time object size, -1 for unknown */ -#ifndef __compiletime_object_size -# define __compiletime_object_size(obj) -1 -#endif -#ifndef __compiletime_warning -# define __compiletime_warning(message) -#endif -#ifndef __compiletime_error -# define __compiletime_error(message) -#endif - -#ifdef __OPTIMIZE__ -# define __compiletime_assert(condition, msg, prefix, suffix) \ - do { \ - extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (!(condition)) \ - prefix ## suffix(); \ - } while (0) -#else -# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) -#endif - -#define _compiletime_assert(condition, msg, prefix, suffix) \ - __compiletime_assert(condition, msg, prefix, suffix) - -/** - * compiletime_assert - break build and emit msg if condition is false - * @condition: a compile-time constant condition to check - * @msg: a message to emit if condition is false - * - * In tradition of POSIX assert, this macro will break the build if the - * supplied condition is *false*, emitting the supplied error message if the - * compiler has support to do so. - */ -#define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) - -#define compiletime_assert_atomic_type(t) \ - compiletime_assert(__native_word(t), \ - "Need native word sized stores/loads for atomicity.") - /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index c3bf7710f69a..d9bbb62a3e2a 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -300,6 +300,47 @@ struct ftrace_likely_data { (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) +/* Compile time object size, -1 for unknown */ +#ifndef __compiletime_object_size +# define __compiletime_object_size(obj) -1 +#endif +#ifndef __compiletime_warning +# define __compiletime_warning(message) +#endif +#ifndef __compiletime_error +# define __compiletime_error(message) +#endif + +#ifdef __OPTIMIZE__ +# define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (!(condition)) \ + prefix ## suffix(); \ + } while (0) +#else +# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +#endif + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) + +/** + * compiletime_assert - break build and emit msg if condition is false + * @condition: a compile-time constant condition to check + * @msg: a message to emit if condition is false + * + * In tradition of POSIX assert, this macro will break the build if the + * supplied condition is *false*, emitting the supplied error message if the + * compiler has support to do so. + */ +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) + +#define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ + "Need native word sized stores/loads for atomicity.") + /* Helpers for emitting diagnostics in pragmas. */ #ifndef __diag #define __diag(string) -- cgit v1.2.3 From 4a17c441c7cb06ac1e5fd3409a64d8ce78151983 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Thu, 16 Jul 2020 23:09:40 +0800 Subject: soundwire: intel: revisit SHIM programming sequences. Somehow the existing code is not aligned with the steps described in the documentation, refactor code and make sure the register programming sequences are correct. Also add missing power-up, power-down and wake capabilities (the last two are used in follow-up patches but introduced here for consistency). Some of the SHIM registers exposed fields that are link specific, and in addition some of the power-related registers (SPA/CPA) take time to be updated. Uncontrolled access leads to timeouts or errors. Add a mutex, shared by all links, so that all accesses to such registers are serialized, and follow a pattern of read-modify-write. This includes making sure SHIM_SYNC is programmed only once, before the first master is powered on. We use a 'shim_mask' field, shared between all links and protected by a mutex, to deal with power-up and power-down sequences. Note that the SYNCPRD value is tied only to the XTAL value and not the current bus frequency or the frame rate. BugLink: https://github.com/thesofproject/linux/issues/1555 Signed-off-by: Rander Wang Signed-off-by: Pierre-Louis Bossart Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20200716150947.22119-3-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/intel.c | 237 ++++++++++++++++++++++++++++++------ drivers/soundwire/intel.h | 4 + drivers/soundwire/intel_init.c | 4 + include/linux/soundwire/sdw_intel.h | 2 + 4 files changed, 213 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 8c7ae07c0fe1..4792613e8e5a 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -46,7 +46,8 @@ #define SDW_SHIM_LCTL_SPA BIT(0) #define SDW_SHIM_LCTL_CPA BIT(8) -#define SDW_SHIM_SYNC_SYNCPRD_VAL 0x176F +#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1) #define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0) #define SDW_SHIM_SYNC_SYNCCPU BIT(15) #define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16) @@ -272,8 +273,46 @@ static int intel_link_power_up(struct sdw_intel *sdw) { unsigned int link_id = sdw->instance; void __iomem *shim = sdw->link_res->shim; + u32 *shim_mask = sdw->link_res->shim_mask; + struct sdw_bus *bus = &sdw->cdns.bus; + struct sdw_master_prop *prop = &bus->prop; int spa_mask, cpa_mask; - int link_control, ret; + int link_control; + int ret = 0; + u32 syncprd; + u32 sync_reg; + + mutex_lock(sdw->link_res->shim_lock); + + /* + * The hardware relies on an internal counter, typically 4kHz, + * to generate the SoundWire SSP - which defines a 'safe' + * synchronization point between commands and audio transport + * and allows for multi link synchronization. The SYNCPRD value + * is only dependent on the oscillator clock provided to + * the IP, so adjust based on _DSD properties reported in DSDT + * tables. The values reported are based on either 24MHz + * (CNL/CML) or 38.4 MHz (ICL/TGL+). + */ + if (prop->mclk_freq % 6000000) + syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4; + else + syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24; + + if (!*shim_mask) { + /* we first need to program the SyncPRD/CPU registers */ + dev_dbg(sdw->cdns.dev, + "%s: first link up, programming SYNCPRD\n", __func__); + + /* set SyncPRD period */ + sync_reg = intel_readl(shim, SDW_SHIM_SYNC); + sync_reg |= (syncprd << + SDW_REG_SHIFT(SDW_SHIM_SYNC_SYNCPRD)); + + /* Set SyncCPU bit */ + sync_reg |= SDW_SHIM_SYNC_SYNCCPU; + intel_writel(shim, SDW_SHIM_SYNC, sync_reg); + } /* Link power up sequence */ link_control = intel_readl(shim, SDW_SHIM_LCTL); @@ -282,68 +321,180 @@ static int intel_link_power_up(struct sdw_intel *sdw) link_control |= spa_mask; ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask); - if (ret < 0) - return ret; + if (ret < 0) { + dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret); + goto out; + } + + if (!*shim_mask) { + /* SyncCPU will change once link is active */ + ret = intel_wait_bit(shim, SDW_SHIM_SYNC, + SDW_SHIM_SYNC_SYNCCPU, 0); + if (ret < 0) { + dev_err(sdw->cdns.dev, + "Failed to set SHIM_SYNC: %d\n", ret); + goto out; + } + } + + *shim_mask |= BIT(link_id); sdw->cdns.link_up = true; - return 0; +out: + mutex_unlock(sdw->link_res->shim_lock); + + return ret; } -static int intel_shim_init(struct sdw_intel *sdw) +/* this needs to be called with shim_lock */ +static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw) { void __iomem *shim = sdw->link_res->shim; unsigned int link_id = sdw->instance; - int sync_reg, ret; - u16 ioctl = 0, act = 0; + u16 ioctl; - /* Initialize Shim */ - ioctl |= SDW_SHIM_IOCTL_BKE; + /* Switch to MIP from Glue logic */ + ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id)); + + ioctl &= ~(SDW_SHIM_IOCTL_DOE); intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); - ioctl |= SDW_SHIM_IOCTL_WPDD; + ioctl &= ~(SDW_SHIM_IOCTL_DO); intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); - ioctl |= SDW_SHIM_IOCTL_DO; + ioctl |= (SDW_SHIM_IOCTL_MIF); intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); - ioctl |= SDW_SHIM_IOCTL_DOE; + ioctl &= ~(SDW_SHIM_IOCTL_BKE); + ioctl &= ~(SDW_SHIM_IOCTL_COE); intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); - /* Switch to MIP from Glue logic */ - ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id)); + /* at this point Master IP has full control of the I/Os */ +} - ioctl &= ~(SDW_SHIM_IOCTL_DOE); +/* this needs to be called with shim_lock */ +static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw) +{ + unsigned int link_id = sdw->instance; + void __iomem *shim = sdw->link_res->shim; + u16 ioctl; + + /* Glue logic */ + ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id)); + ioctl |= SDW_SHIM_IOCTL_BKE; + ioctl |= SDW_SHIM_IOCTL_COE; intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); - ioctl &= ~(SDW_SHIM_IOCTL_DO); + ioctl &= ~(SDW_SHIM_IOCTL_MIF); intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); - ioctl |= (SDW_SHIM_IOCTL_MIF); + /* at this point Integration Glue has full control of the I/Os */ +} + +static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop) +{ + void __iomem *shim = sdw->link_res->shim; + unsigned int link_id = sdw->instance; + int ret = 0; + u16 ioctl = 0, act = 0; + + mutex_lock(sdw->link_res->shim_lock); + + /* Initialize Shim */ + ioctl |= SDW_SHIM_IOCTL_BKE; intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); - ioctl &= ~(SDW_SHIM_IOCTL_BKE); - ioctl &= ~(SDW_SHIM_IOCTL_COE); + ioctl |= SDW_SHIM_IOCTL_WPDD; + intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); + ioctl |= SDW_SHIM_IOCTL_DO; + intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); + + ioctl |= SDW_SHIM_IOCTL_DOE; intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl); + usleep_range(10, 15); + + intel_shim_glue_to_master_ip(sdw); act |= 0x1 << SDW_REG_SHIFT(SDW_SHIM_CTMCTL_DOAIS); act |= SDW_SHIM_CTMCTL_DACTQE; act |= SDW_SHIM_CTMCTL_DODS; intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act); + usleep_range(10, 15); - /* Now set SyncPRD period */ - sync_reg = intel_readl(shim, SDW_SHIM_SYNC); - sync_reg |= (SDW_SHIM_SYNC_SYNCPRD_VAL << - SDW_REG_SHIFT(SDW_SHIM_SYNC_SYNCPRD)); + mutex_unlock(sdw->link_res->shim_lock); + + return ret; +} + +static void __maybe_unused intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) +{ + void __iomem *shim = sdw->link_res->shim; + unsigned int link_id = sdw->instance; + u16 wake_en, wake_sts; + + mutex_lock(sdw->link_res->shim_lock); + wake_en = intel_readw(shim, SDW_SHIM_WAKEEN); + + if (wake_enable) { + /* Enable the wakeup */ + wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id); + intel_writew(shim, SDW_SHIM_WAKEEN, wake_en); + } else { + /* Disable the wake up interrupt */ + wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id); + intel_writew(shim, SDW_SHIM_WAKEEN, wake_en); + + /* Clear wake status */ + wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); + wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id); + intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts); + } + mutex_unlock(sdw->link_res->shim_lock); +} + +static int __maybe_unused intel_link_power_down(struct sdw_intel *sdw) +{ + int link_control, spa_mask, cpa_mask; + unsigned int link_id = sdw->instance; + void __iomem *shim = sdw->link_res->shim; + u32 *shim_mask = sdw->link_res->shim_mask; + int ret = 0; + + mutex_lock(sdw->link_res->shim_lock); + + intel_shim_master_ip_to_glue(sdw); + + /* Link power down sequence */ + link_control = intel_readl(shim, SDW_SHIM_LCTL); + spa_mask = ~(SDW_SHIM_LCTL_SPA << link_id); + cpa_mask = (SDW_SHIM_LCTL_CPA << link_id); + link_control &= spa_mask; + + ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask); + + if (!(*shim_mask & BIT(link_id))) + dev_err(sdw->cdns.dev, + "%s: Unbalanced power-up/down calls\n", __func__); + + *shim_mask &= ~BIT(link_id); + + mutex_unlock(sdw->link_res->shim_lock); - /* Set SyncCPU bit */ - sync_reg |= SDW_SHIM_SYNC_SYNCCPU; - ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg, - SDW_SHIM_SYNC_SYNCCPU); if (ret < 0) - dev_err(sdw->cdns.dev, "Failed to set sync period: %d\n", ret); + return ret; - return ret; + sdw->cdns.link_up = false; + return 0; } /* @@ -566,11 +717,15 @@ static int intel_pre_bank_switch(struct sdw_bus *bus) if (!bus->multi_link) return 0; + mutex_lock(sdw->link_res->shim_lock); + /* Read SYNC register */ sync_reg = intel_readl(shim, SDW_SHIM_SYNC); sync_reg |= SDW_SHIM_SYNC_CMDSYNC << sdw->instance; intel_writel(shim, SDW_SHIM_SYNC, sync_reg); + mutex_unlock(sdw->link_res->shim_lock); + return 0; } @@ -585,6 +740,8 @@ static int intel_post_bank_switch(struct sdw_bus *bus) if (!bus->multi_link) return 0; + mutex_lock(sdw->link_res->shim_lock); + /* Read SYNC register */ sync_reg = intel_readl(shim, SDW_SHIM_SYNC); @@ -596,9 +753,10 @@ static int intel_post_bank_switch(struct sdw_bus *bus) * * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master. */ - if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) - return 0; - + if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) { + ret = 0; + goto unlock; + } /* * Set SyncGO bit to synchronously trigger a bank switch for * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all @@ -608,6 +766,9 @@ static int intel_post_bank_switch(struct sdw_bus *bus) ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg, SDW_SHIM_SYNC_SYNCGO); +unlock: + mutex_unlock(sdw->link_res->shim_lock); + if (ret < 0) dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret); @@ -1011,9 +1172,17 @@ static struct sdw_master_ops sdw_intel_ops = { static int intel_init(struct sdw_intel *sdw) { + bool clock_stop; + /* Initialize shim and controller */ intel_link_power_up(sdw); - intel_shim_init(sdw); + + clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns); + + intel_shim_init(sdw, clock_stop); + + if (clock_stop) + return 0; return sdw_cdns_init(&sdw->cdns); } diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h index 694117370ac3..d6bdd4d63e08 100644 --- a/drivers/soundwire/intel.h +++ b/drivers/soundwire/intel.h @@ -15,6 +15,8 @@ * @irq: Interrupt line * @ops: Shim callback ops * @dev: device implementing hw_params and free callbacks + * @shim_lock: mutex to handle access to shared SHIM registers + * @shim_mask: global pointer to check SHIM register initialization */ struct sdw_intel_link_res { struct platform_device *pdev; @@ -25,6 +27,8 @@ struct sdw_intel_link_res { int irq; const struct sdw_intel_ops *ops; struct device *dev; + struct mutex *shim_lock; /* protect shared registers */ + u32 *shim_mask; }; struct sdw_intel { diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c index 3f2e884b4f6d..f50a93130d12 100644 --- a/drivers/soundwire/intel_init.c +++ b/drivers/soundwire/intel_init.c @@ -180,6 +180,7 @@ static struct sdw_intel_ctx ctx->mmio_base = res->mmio_base; ctx->link_mask = res->link_mask; ctx->handle = res->handle; + mutex_init(&ctx->shim_lock); link = ctx->links; link_mask = ctx->link_mask; @@ -201,6 +202,9 @@ static struct sdw_intel_ctx link->ops = res->ops; link->dev = res->dev; + link->shim_lock = &ctx->shim_lock; + link->shim_mask = &ctx->shim_mask; + memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.parent = res->parent; diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 979b41b5dcb4..120ffddc03d2 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -115,6 +115,7 @@ struct sdw_intel_slave_id { * links * @link_list: list to handle interrupts across all links * @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers. + * @shim_mask: flags to track initialization of SHIM shared registers */ struct sdw_intel_ctx { int count; @@ -126,6 +127,7 @@ struct sdw_intel_ctx { struct sdw_intel_slave_id *ids; struct list_head link_list; struct mutex shim_lock; /* lock for access to shared SHIM registers */ + u32 shim_mask; }; /** -- cgit v1.2.3 From b2dcfefc43f783c4462f30507f78b7bb093ee8df Mon Sep 17 00:00:00 2001 From: Badhri Jagan Sridharan Date: Wed, 15 Jul 2020 20:41:26 -0700 Subject: usb: typec: tcpm: Support bist test data mode for compliance TCPM supports BIST carried mode. PD compliance tests require BIST Test Data to be supported as well. Introducing set_bist_data callback to signal tcpc driver for configuring the port controller hardware to enable/disable BIST Test Data mode. Signed-off-by: Badhri Jagan Sridharan Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/20200716034128.1251728-1-badhri@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/tcpm/tcpm.c | 11 +++++++++++ include/linux/usb/tcpm.h | 2 ++ 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index a6d4b03ec250..a04f6c18b11a 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -2748,6 +2748,11 @@ static void tcpm_detach(struct tcpm_port *port) if (!port->attached) return; + if (port->tcpc->set_bist_data) { + tcpm_log(port, "disable BIST MODE TESTDATA"); + port->tcpc->set_bist_data(port->tcpc, false); + } + if (tcpm_port_is_disconnected(port)) port->hard_reset_count = 0; @@ -3557,6 +3562,12 @@ static void run_state_machine(struct tcpm_port *port) case BDO_MODE_CARRIER2: tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL); break; + case BDO_MODE_TESTDATA: + if (port->tcpc->set_bist_data) { + tcpm_log(port, "Enable BIST MODE TESTDATA"); + port->tcpc->set_bist_data(port->tcpc, true); + } + break; default: break; } diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index e7979c01c351..89f58760cf48 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -79,6 +79,7 @@ enum tcpm_transmit_type { * @try_role: Optional; called to set a preferred role * @pd_transmit:Called to transmit PD message * @mux: Pointer to multiplexer data + * @set_bist_data: Turn on/off bist data mode for compliance testing */ struct tcpc_dev { struct fwnode_handle *fwnode; @@ -103,6 +104,7 @@ struct tcpc_dev { int (*try_role)(struct tcpc_dev *dev, int role); int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type, const struct pd_message *msg); + int (*set_bist_data)(struct tcpc_dev *dev, bool on); }; struct tcpm_port; -- cgit v1.2.3 From 6e1c2241f4cecfc21f26f44179d04217c8390338 Mon Sep 17 00:00:00 2001 From: Badhri Jagan Sridharan Date: Wed, 15 Jul 2020 20:41:28 -0700 Subject: usb: typec: tcpm: Stay in BIST mode till hardreset or unattached MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Port starts to toggle when transitioning to unattached state. This is incorrect while in BIST mode. 6.4.3.1 BIST Carrier Mode Upon receipt of a BIST Message, with a BIST Carrier Mode BIST Data Object, the UUT Shall send out a continuous string of BMC encoded alternating "1"s and “0”s. The UUT Shall exit the Continuous BIST Mode within tBISTContMode of this Continuous BIST Mode being enabled(see Section 6.6.7.2). 6.4.3.2 BIST Test Data Upon receipt of a BIST Message, with a BIST Test Data BIST Data Object, the UUT Shall return a GoodCRC Message and Shall enter a test mode in which it sends no further Messages except for GoodCRC Messages in response to received Messages. See Section 5.9.2 for the definition of the Test Data Frame. The test Shall be ended by sending Hard Reset Signaling to reset the UUT. Signed-off-by: Badhri Jagan Sridharan Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/20200716034128.1251728-3-badhri@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/tcpm/tcpm.c | 7 +++++-- include/linux/usb/pd.h | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index a04f6c18b11a..ff1cbd2147ca 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -3561,6 +3561,8 @@ static void run_state_machine(struct tcpm_port *port) switch (BDO_MODE_MASK(port->bist_request)) { case BDO_MODE_CARRIER2: tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL); + tcpm_set_state(port, unattached_state(port), + PD_T_BIST_CONT_MODE); break; case BDO_MODE_TESTDATA: if (port->tcpc->set_bist_data) { @@ -3571,8 +3573,6 @@ static void run_state_machine(struct tcpm_port *port) default: break; } - /* Always switch to unattached state */ - tcpm_set_state(port, unattached_state(port), 0); break; case GET_STATUS_SEND: tcpm_pd_send_control(port, PD_CTRL_GET_STATUS); @@ -3962,6 +3962,9 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) static void _tcpm_pd_hard_reset(struct tcpm_port *port) { tcpm_log_force(port, "Received hard reset"); + if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data) + port->tcpc->set_bist_data(port->tcpc, false); + /* * If we keep receiving hard reset requests, executing the hard reset * must have failed. Revert to error recovery if that happens. diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index a665d7f21142..b6c233e79bd4 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -483,4 +483,5 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP) #define PD_N_HARD_RESET_COUNT 2 +#define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */ #endif /* __LINUX_USB_PD_H */ -- cgit v1.2.3 From ffeb1e9e897b8d36b197275592d121c96d3bdb95 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sun, 19 Jul 2020 18:09:10 +0200 Subject: USB: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Link: https://lore.kernel.org/r/20200719160910.60018-1-grandmaster@al2klimov.de Signed-off-by: Greg Kroah-Hartman --- Documentation/usb/gadget_hid.rst | 2 +- Documentation/usb/gadget_multi.rst | 10 +++++----- Documentation/usb/linux.inf | 2 +- drivers/usb/cdns3/cdns3-ti.c | 2 +- drivers/usb/common/debug.c | 2 +- drivers/usb/host/max3421-hcd.c | 6 +++--- drivers/usb/misc/Kconfig | 4 ++-- include/linux/usb/phy_companion.h | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/Documentation/usb/gadget_hid.rst b/Documentation/usb/gadget_hid.rst index 098d563040cc..e623416de4f1 100644 --- a/Documentation/usb/gadget_hid.rst +++ b/Documentation/usb/gadget_hid.rst @@ -11,7 +11,7 @@ and HID reports can be sent/received through I/O on the /dev/hidgX character devices. For more details about HID, see the developer page on -http://www.usb.org/developers/hidpage/ +https://www.usb.org/developers/hidpage/ Configuration ============= diff --git a/Documentation/usb/gadget_multi.rst b/Documentation/usb/gadget_multi.rst index 9806b55af301..3a22c1b2f39e 100644 --- a/Documentation/usb/gadget_multi.rst +++ b/Documentation/usb/gadget_multi.rst @@ -142,7 +142,7 @@ Footnotes ========= [1] Remote Network Driver Interface Specification, -[[http://msdn.microsoft.com/en-us/library/ee484414.aspx]]. +[[https://msdn.microsoft.com/en-us/library/ee484414.aspx]]. [2] Communications Device Class Abstract Control Model, spec for this and other USB classes can be found at @@ -150,9 +150,9 @@ and other USB classes can be found at [3] CDC Ethernet Control Model. -[4] [[http://msdn.microsoft.com/en-us/library/ff537109(v=VS.85).aspx]] +[4] [[https://msdn.microsoft.com/en-us/library/ff537109(v=VS.85).aspx]] -[5] [[http://msdn.microsoft.com/en-us/library/ff539234(v=VS.85).aspx]] +[5] [[https://msdn.microsoft.com/en-us/library/ff539234(v=VS.85).aspx]] [6] To put it in some other nice words, Windows failed to respond to any user input. @@ -160,6 +160,6 @@ any user input. [7] You may find [[http://www.cygnal.org/ubb/Forum9/HTML/001050.html]] useful. -[8] http://www.nirsoft.net/utils/usb_devices_view.html +[8] https://www.nirsoft.net/utils/usb_devices_view.html -[9] [[http://msdn.microsoft.com/en-us/library/ff570620.aspx]] +[9] [[https://msdn.microsoft.com/en-us/library/ff570620.aspx]] diff --git a/Documentation/usb/linux.inf b/Documentation/usb/linux.inf index 4ffa715b0ae8..c569ac6bec58 100644 --- a/Documentation/usb/linux.inf +++ b/Documentation/usb/linux.inf @@ -1,5 +1,5 @@ ; Based on template INF file found at -; +; ; which was: ; Copyright (c) Microsoft Corporation ; and released under the MLPL as found at: diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c index e701ab56b0a7..90e246601537 100644 --- a/drivers/usb/cdns3/cdns3-ti.c +++ b/drivers/usb/cdns3/cdns3-ti.c @@ -2,7 +2,7 @@ /** * cdns3-ti.c - TI specific Glue layer for Cadence USB Controller * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #include diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c index 54c8f984c7b4..ba849c7bc5c7 100644 --- a/drivers/usb/common/debug.c +++ b/drivers/usb/common/debug.c @@ -2,7 +2,7 @@ /* * Common USB debugging functions * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com * * Authors: Felipe Balbi , * Sebastian Andrzej Siewior diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index 05828c0ab7de..0894f6caccb2 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -11,9 +11,9 @@ * * Based on: * o MAX3421E datasheet - * http://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf + * https://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf * o MAX3421E Programming Guide - * http://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf + * https://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf * o gadget/dummy_hcd.c * For USB HCD implementation. * o Arduino MAX3421 driver @@ -317,7 +317,7 @@ static const int hrsl_to_error[] = { }; /* - * See http://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a + * See https://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a * reasonable overview of how control transfers use the the IN/OUT * tokens. */ diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 4e48f8eed168..6818ea689cd9 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -78,7 +78,7 @@ config USB_CYPRESS_CY7C63 driver supports the pre-programmed devices (incl. firmware) by AK Modul-Bus Computer GmbH. - Please see: http://www.ak-modul-bus.de/stat/mikrocontroller.html + Please see: https://www.ak-modul-bus.de/stat/mikrocontroller.html To compile this driver as a module, choose M here: the module will be called cypress_cy7c63. @@ -106,7 +106,7 @@ config USB_IDMOUSE This driver creates an entry "/dev/idmouseX" or "/dev/usb/idmouseX", which can be used by, e.g.,"cat /dev/idmouse0 > fingerprint.pnm". - See also . + See also . config USB_FTDI_ELAN tristate "Elan PCMCIA CardBus Adapter USB Client" diff --git a/include/linux/usb/phy_companion.h b/include/linux/usb/phy_companion.h index 407f530061cd..263196f05015 100644 --- a/include/linux/usb/phy_companion.h +++ b/include/linux/usb/phy_companion.h @@ -2,7 +2,7 @@ /* * phy-companion.h -- phy companion to indicate the comparator part of PHY * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or -- cgit v1.2.3 From 9746c9be0bb5860592e048468b37974be4c59d44 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 11 Jul 2020 06:45:36 -0500 Subject: exec: Remove unnecessary spaces from binfmts.h The general convention in the linux kernel is to define a pointer member as "type *name". The declaration of struct linux_binprm has several pointer defined as "type * name". Update them to the form of "type *name" for consistency. Suggested-by: Kees Cook Reviewed-by: Kees Cook Reviewed-by: Christoph Hellwig Link: https://lkml.kernel.org/r/87v9iq6x9x.fsf@x220.int.ebiederm.org Signed-off-by: "Eric W. Biederman" --- include/linux/binfmts.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 7c27d7b57871..eb5cb8df5485 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -45,15 +45,15 @@ struct linux_binprm { #ifdef __alpha__ unsigned int taso:1; #endif - struct file * executable; /* Executable to pass to the interpreter */ - struct file * interpreter; - struct file * file; + struct file *executable; /* Executable to pass to the interpreter */ + struct file *interpreter; + struct file *file; struct cred *cred; /* new credentials */ int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ unsigned int per_clear; /* bits to clear in current->personality */ int argc, envc; - const char * filename; /* Name of binary as seen by procps */ - const char * interp; /* Name of the binary really executed. Most + const char *filename; /* Name of binary as seen by procps */ + const char *interp; /* Name of the binary really executed. Most of the time same as filename, but could be different for binfmt_{misc,script} */ unsigned interp_flags; -- cgit v1.2.3 From 60d9ad1d1d7f15964d23f6e71a7adcf1bde0e18e Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 11 Jul 2020 08:16:15 -0500 Subject: exec: Move initialization of bprm->filename into alloc_bprm Currently it is necessary for the usermode helper code and the code that launches init to use set_fs so that pages coming from the kernel look like they are coming from userspace. To allow that usage of set_fs to be removed cleanly the argument copying from userspace needs to happen earlier. Move the computation of bprm->filename and possible allocation of a name in the case of execveat into alloc_bprm to make that possible. The exectuable name, the arguments, and the environment are copied into the new usermode stack which is stored in bprm until exec passes the point of no return. As the executable name is copied first onto the usermode stack it needs to be known. As there are no dependencies to computing the executable name, compute it early in alloc_bprm. As an implementation detail if the filename needs to be generated because it embeds a file descriptor store that filename in a new field bprm->fdpath, and free it in free_bprm. Previously this was done in an independent variable pathbuf. I have renamed pathbuf fdpath because fdpath is more suggestive of what kind of path is in the variable. I moved fdpath into struct linux_binprm because it is tightly tied to the other variables in struct linux_binprm, and as such is needed to allow the call alloc_binprm to move. Reviewed-by: Kees Cook Reviewed-by: Christoph Hellwig Link: https://lkml.kernel.org/r/87k0z66x8f.fsf@x220.int.ebiederm.org Signed-off-by: "Eric W. Biederman" --- fs/exec.c | 61 ++++++++++++++++++++++++++----------------------- include/linux/binfmts.h | 1 + 2 files changed, 34 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/fs/exec.c b/fs/exec.c index 526156d6461d..7e8af27dd199 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1557,15 +1557,37 @@ static void free_bprm(struct linux_binprm *bprm) /* If a binfmt changed the interp, free it. */ if (bprm->interp != bprm->filename) kfree(bprm->interp); + kfree(bprm->fdpath); kfree(bprm); } -static struct linux_binprm *alloc_bprm(void) +static struct linux_binprm *alloc_bprm(int fd, struct filename *filename) { struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); + int retval = -ENOMEM; if (!bprm) - return ERR_PTR(-ENOMEM); + goto out; + + if (fd == AT_FDCWD || filename->name[0] == '/') { + bprm->filename = filename->name; + } else { + if (filename->name[0] == '\0') + bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); + else + bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", + fd, filename->name); + if (!bprm->fdpath) + goto out_free; + + bprm->filename = bprm->fdpath; + } + bprm->interp = bprm->filename; return bprm; + +out_free: + free_bprm(bprm); +out: + return ERR_PTR(retval); } int bprm_change_interp(const char *interp, struct linux_binprm *bprm) @@ -1831,7 +1853,6 @@ static int do_execveat_common(int fd, struct filename *filename, struct user_arg_ptr envp, int flags) { - char *pathbuf = NULL; struct linux_binprm *bprm; struct file *file; struct files_struct *displaced; @@ -1856,7 +1877,7 @@ static int do_execveat_common(int fd, struct filename *filename, * further execve() calls fail. */ current->flags &= ~PF_NPROC_EXCEEDED; - bprm = alloc_bprm(); + bprm = alloc_bprm(fd, filename); if (IS_ERR(bprm)) { retval = PTR_ERR(bprm); goto out_ret; @@ -1881,28 +1902,14 @@ static int do_execveat_common(int fd, struct filename *filename, sched_exec(); bprm->file = file; - if (fd == AT_FDCWD || filename->name[0] == '/') { - bprm->filename = filename->name; - } else { - if (filename->name[0] == '\0') - pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); - else - pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", - fd, filename->name); - if (!pathbuf) { - retval = -ENOMEM; - goto out_unmark; - } - /* - * Record that a name derived from an O_CLOEXEC fd will be - * inaccessible after exec. Relies on having exclusive access to - * current->files (due to unshare_files above). - */ - if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt))) - bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; - bprm->filename = pathbuf; - } - bprm->interp = bprm->filename; + /* + * Record that a name derived from an O_CLOEXEC fd will be + * inaccessible after exec. Relies on having exclusive access to + * current->files (due to unshare_files above). + */ + if (bprm->fdpath && + close_on_exec(fd, rcu_dereference_raw(current->files->fdt))) + bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; retval = bprm_mm_init(bprm); if (retval) @@ -1941,7 +1948,6 @@ static int do_execveat_common(int fd, struct filename *filename, acct_update_integrals(current); task_numa_free(current, false); free_bprm(bprm); - kfree(pathbuf); putname(filename); if (displaced) put_files_struct(displaced); @@ -1970,7 +1976,6 @@ out_files: reset_files_struct(displaced); out_free: free_bprm(bprm); - kfree(pathbuf); out_ret: putname(filename); diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index eb5cb8df5485..8e9e1b0c8eb8 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -56,6 +56,7 @@ struct linux_binprm { const char *interp; /* Name of the binary really executed. Most of the time same as filename, but could be different for binfmt_{misc,script} */ + const char *fdpath; /* generated filename for execveat */ unsigned interp_flags; int execfd; /* File descriptor of the executable */ unsigned long loader, exec; -- cgit v1.2.3 From be619f7f063a49c656f620a46af4f8ea3e759e91 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 13 Jul 2020 12:06:48 -0500 Subject: exec: Implement kernel_execve To allow the kernel not to play games with set_fs to call exec implement kernel_execve. The function kernel_execve takes pointers into kernel memory and copies the values pointed to onto the new userspace stack. The calls with arguments from kernel space of do_execve are replaced with calls to kernel_execve. The calls do_execve and do_execveat are made static as there are now no callers outside of exec. The comments that mention do_execve are updated to refer to kernel_execve or execve depending on the circumstances. In addition to correcting the comments, this makes it easy to grep for do_execve and verify it is not used. Inspired-by: https://lkml.kernel.org/r/20200627072704.2447163-1-hch@lst.de Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/87wo365ikj.fsf@x220.int.ebiederm.org Signed-off-by: "Eric W. Biederman" --- arch/x86/entry/entry_32.S | 2 +- arch/x86/entry/entry_64.S | 2 +- arch/x86/kernel/unwind_frame.c | 2 +- fs/exec.c | 88 +++++++++++++++++++++++++++++++++++++++++- include/linux/binfmts.h | 9 +---- init/main.c | 4 +- kernel/umh.c | 6 +-- security/tomoyo/common.h | 2 +- security/tomoyo/domain.c | 4 +- security/tomoyo/tomoyo.c | 4 +- 10 files changed, 100 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 024d7d276cd4..8f4e085ee06d 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -854,7 +854,7 @@ SYM_CODE_START(ret_from_fork) CALL_NOSPEC ebx /* * A kernel thread is allowed to return here after successfully - * calling do_execve(). Exit to userspace to complete the execve() + * calling kernel_execve(). Exit to userspace to complete the execve() * syscall. */ movl $0, PT_EAX(%esp) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index d2a00c97e53f..73c7e255256b 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -293,7 +293,7 @@ SYM_CODE_START(ret_from_fork) CALL_NOSPEC rbx /* * A kernel thread is allowed to return here after successfully - * calling do_execve(). Exit to userspace to complete the execve() + * calling kernel_execve(). Exit to userspace to complete the execve() * syscall. */ movq $0, RAX(%rsp) diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 722a85f3b2dd..e40b4942157f 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -275,7 +275,7 @@ bool unwind_next_frame(struct unwind_state *state) * This user_mode() check is slightly broader than a PF_KTHREAD * check because it also catches the awkward situation where a * newly forked kthread transitions into a user task by calling - * do_execve(), which eventually clears PF_KTHREAD. + * kernel_execve(), which eventually clears PF_KTHREAD. */ if (!user_mode(regs)) goto the_end; diff --git a/fs/exec.c b/fs/exec.c index f8135dc149b3..3698252719a3 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -448,6 +448,23 @@ static int count(struct user_arg_ptr argv, int max) return i; } +static int count_strings_kernel(const char *const *argv) +{ + int i; + + if (!argv) + return 0; + + for (i = 0; argv[i]; ++i) { + if (i >= MAX_ARG_STRINGS) + return -E2BIG; + if (fatal_signal_pending(current)) + return -ERESTARTNOHAND; + cond_resched(); + } + return i; +} + static int bprm_stack_limits(struct linux_binprm *bprm) { unsigned long limit, ptr_size; @@ -624,6 +641,20 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm) } EXPORT_SYMBOL(copy_string_kernel); +static int copy_strings_kernel(int argc, const char *const *argv, + struct linux_binprm *bprm) +{ + while (argc-- > 0) { + int ret = copy_string_kernel(argv[argc], bprm); + if (ret < 0) + return ret; + if (fatal_signal_pending(current)) + return -ERESTARTNOHAND; + cond_resched(); + } + return 0; +} + #ifdef CONFIG_MMU /* @@ -1991,7 +2022,60 @@ out_ret: return retval; } -int do_execve(struct filename *filename, +int kernel_execve(const char *kernel_filename, + const char *const *argv, const char *const *envp) +{ + struct filename *filename; + struct linux_binprm *bprm; + int fd = AT_FDCWD; + int retval; + + filename = getname_kernel(kernel_filename); + if (IS_ERR(filename)) + return PTR_ERR(filename); + + bprm = alloc_bprm(fd, filename); + if (IS_ERR(bprm)) { + retval = PTR_ERR(bprm); + goto out_ret; + } + + retval = count_strings_kernel(argv); + if (retval < 0) + goto out_free; + bprm->argc = retval; + + retval = count_strings_kernel(envp); + if (retval < 0) + goto out_free; + bprm->envc = retval; + + retval = bprm_stack_limits(bprm); + if (retval < 0) + goto out_free; + + retval = copy_string_kernel(bprm->filename, bprm); + if (retval < 0) + goto out_free; + bprm->exec = bprm->p; + + retval = copy_strings_kernel(bprm->envc, envp, bprm); + if (retval < 0) + goto out_free; + + retval = copy_strings_kernel(bprm->argc, argv, bprm); + if (retval < 0) + goto out_free; + + retval = bprm_execve(bprm, fd, filename, 0); +out_free: + free_bprm(bprm); +out_ret: + putname(filename); + return retval; +} + +static int do_execve(struct filename *filename, const char __user *const __user *__argv, const char __user *const __user *__envp) { @@ -2000,7 +2084,7 @@ int do_execve(struct filename *filename, return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); } -int do_execveat(int fd, struct filename *filename, +static int do_execveat(int fd, struct filename *filename, const char __user *const __user *__argv, const char __user *const __user *__envp, int flags) diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8e9e1b0c8eb8..0571701ab1c5 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -135,12 +135,7 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); -extern int do_execve(struct filename *, - const char __user * const __user *, - const char __user * const __user *); -extern int do_execveat(int, struct filename *, - const char __user * const __user *, - const char __user * const __user *, - int); +int kernel_execve(const char *filename, + const char *const *argv, const char *const *envp); #endif /* _LINUX_BINFMTS_H */ diff --git a/init/main.c b/init/main.c index 0ead83e86b5a..78ccec5c28f3 100644 --- a/init/main.c +++ b/init/main.c @@ -1329,9 +1329,7 @@ static int run_init_process(const char *init_filename) pr_debug(" with environment:\n"); for (p = envp_init; *p; p++) pr_debug(" %s\n", *p); - return do_execve(getname_kernel(init_filename), - (const char __user *const __user *)argv_init, - (const char __user *const __user *)envp_init); + return kernel_execve(init_filename, argv_init, envp_init); } static int try_to_run_init_process(const char *init_filename) diff --git a/kernel/umh.c b/kernel/umh.c index 6ca2096298b9..a25433f9cd9a 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -98,9 +98,9 @@ static int call_usermodehelper_exec_async(void *data) commit_creds(new); - retval = do_execve(getname_kernel(sub_info->path), - (const char __user *const __user *)sub_info->argv, - (const char __user *const __user *)sub_info->envp); + retval = kernel_execve(sub_info->path, + (const char *const *)sub_info->argv, + (const char *const *)sub_info->envp); out: sub_info->retval = retval; /* diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 050473df5809..85246b9df7ca 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -425,7 +425,7 @@ struct tomoyo_request_info { struct tomoyo_obj_info *obj; /* * For holding parameters specific to execve() request. - * NULL if not dealing do_execve(). + * NULL if not dealing execve(). */ struct tomoyo_execve *ee; struct tomoyo_domain_info *domain; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 7869d6a9980b..53b3e1f5f227 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -767,7 +767,7 @@ retry: /* * Check for domain transition preference if "file execute" matched. - * If preference is given, make do_execve() fail if domain transition + * If preference is given, make execve() fail if domain transition * has failed, for domain transition preference should be used with * destination domain defined. */ @@ -810,7 +810,7 @@ force_reset_domain: snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>", candidate->name); /* - * Make do_execve() fail if domain transition across namespaces + * Make execve() fail if domain transition across namespaces * has failed. */ reject_on_transition_failure = true; diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c index f9adddc42ac8..1f3cd432d830 100644 --- a/security/tomoyo/tomoyo.c +++ b/security/tomoyo/tomoyo.c @@ -93,7 +93,7 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm) struct tomoyo_task *s = tomoyo_task(current); /* - * Execute permission is checked against pathname passed to do_execve() + * Execute permission is checked against pathname passed to execve() * using current domain. */ if (!s->old_domain_info) { @@ -307,7 +307,7 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, */ static int tomoyo_file_open(struct file *f) { - /* Don't check read permission here if called from do_execve(). */ + /* Don't check read permission here if called from execve(). */ if (current->in_execve) return 0; return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, -- cgit v1.2.3 From d061cd734f1a59798574b50188b14b49e95a54d3 Mon Sep 17 00:00:00 2001 From: Mike Leach Date: Thu, 16 Jul 2020 11:57:40 -0600 Subject: coresight: Fix comment in main header file Comment for an elemnt in the coresight_device structure appears to have been corrupted and makes no sense. Fix this before making further changes. Signed-off-by: Mike Leach Reviewed-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20200716175746.3338735-12-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- include/linux/coresight.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/coresight.h b/include/linux/coresight.h index e3e9f0e3a878..84dc695e87d4 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -179,7 +179,8 @@ struct coresight_sysfs_link { * @enable: 'true' if component is currently part of an active path. * @activated: 'true' only if a _sink_ has been activated. A sink can be * activated but not yet enabled. Enabling for a _sink_ - * appens when a source has been selected for that it. + * happens when a source has been selected and a path is enabled + * from source to that sink. * @ea: Device attribute for sink representation under PMU directory. * @ect_dev: Associated cross trigger device. Not part of the trace data * path or connections. -- cgit v1.2.3 From 0336bdfd7354edfa3db0e01675b5df224516e3e9 Mon Sep 17 00:00:00 2001 From: Mike Leach Date: Thu, 16 Jul 2020 11:57:43 -0600 Subject: coresight: Add default sink selection to CoreSight base Adds a method to select a suitable sink connected to a given source. In cases where no sink is defined, the coresight_find_default_sink routine can search from a given source, through the child connections until a suitable sink is found. The suitability is defined in by the sink coresight_dev_subtype on the CoreSight device, and the distance from the source by counting connections. Higher value subtype is preferred - where these are equal, shorter distance from source is used as a tie-break. This allows for default sink to be discovered were none is specified (e.g. perf command line) Signed-off-by: Mike Leach Suggested-by: Suzuki K Poulose Reviewed-by: Leo Yan Reviewed-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20200716175746.3338735-15-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-priv.h | 2 + drivers/hwtracing/coresight/coresight.c | 166 +++++++++++++++++++++++++++ include/linux/coresight.h | 3 + 3 files changed, 171 insertions(+) (limited to 'include') diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 36c943ae94d5..f2dc625ea585 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -150,6 +150,8 @@ int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data); struct coresight_device *coresight_get_sink(struct list_head *path); struct coresight_device *coresight_get_enabled_sink(bool reset); struct coresight_device *coresight_get_sink_by_id(u32 id); +struct coresight_device * +coresight_find_default_sink(struct coresight_device *csdev); struct list_head *coresight_build_path(struct coresight_device *csdev, struct coresight_device *sink); void coresight_release_path(struct list_head *path); diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index f3efbb3b2b4d..e9c90f2de34a 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -769,6 +769,171 @@ void coresight_release_path(struct list_head *path) path = NULL; } +/* return true if the device is a suitable type for a default sink */ +static inline bool coresight_is_def_sink_type(struct coresight_device *csdev) +{ + /* sink & correct subtype */ + if (((csdev->type == CORESIGHT_DEV_TYPE_SINK) || + (csdev->type == CORESIGHT_DEV_TYPE_LINKSINK)) && + (csdev->subtype.sink_subtype >= CORESIGHT_DEV_SUBTYPE_SINK_BUFFER)) + return true; + return false; +} + +/** + * coresight_select_best_sink - return the best sink for use as default from + * the two provided. + * + * @sink: current best sink. + * @depth: search depth where current sink was found. + * @new_sink: new sink for comparison with current sink. + * @new_depth: search depth where new sink was found. + * + * Sinks prioritised according to coresight_dev_subtype_sink, with only + * subtypes CORESIGHT_DEV_SUBTYPE_SINK_BUFFER or higher being used. + * + * Where two sinks of equal priority are found, the sink closest to the + * source is used (smallest search depth). + * + * return @new_sink & update @depth if better than @sink, else return @sink. + */ +static struct coresight_device * +coresight_select_best_sink(struct coresight_device *sink, int *depth, + struct coresight_device *new_sink, int new_depth) +{ + bool update = false; + + if (!sink) { + /* first found at this level */ + update = true; + } else if (new_sink->subtype.sink_subtype > + sink->subtype.sink_subtype) { + /* found better sink */ + update = true; + } else if ((new_sink->subtype.sink_subtype == + sink->subtype.sink_subtype) && + (*depth > new_depth)) { + /* found same but closer sink */ + update = true; + } + + if (update) + *depth = new_depth; + return update ? new_sink : sink; +} + +/** + * coresight_find_sink - recursive function to walk trace connections from + * source to find a suitable default sink. + * + * @csdev: source / current device to check. + * @depth: [in] search depth of calling dev, [out] depth of found sink. + * + * This will walk the connection path from a source (ETM) till a suitable + * sink is encountered and return that sink to the original caller. + * + * If current device is a plain sink return that & depth, otherwise recursively + * call child connections looking for a sink. Select best possible using + * coresight_select_best_sink. + * + * return best sink found, or NULL if not found at this node or child nodes. + */ +static struct coresight_device * +coresight_find_sink(struct coresight_device *csdev, int *depth) +{ + int i, curr_depth = *depth + 1, found_depth = 0; + struct coresight_device *found_sink = NULL; + + if (coresight_is_def_sink_type(csdev)) { + found_depth = curr_depth; + found_sink = csdev; + if (csdev->type == CORESIGHT_DEV_TYPE_SINK) + goto return_def_sink; + /* look past LINKSINK for something better */ + } + + /* + * Not a sink we want - or possible child sink may be better. + * recursively explore each port found on this element. + */ + for (i = 0; i < csdev->pdata->nr_outport; i++) { + struct coresight_device *child_dev, *sink = NULL; + int child_depth = curr_depth; + + child_dev = csdev->pdata->conns[i].child_dev; + if (child_dev) + sink = coresight_find_sink(child_dev, &child_depth); + + if (sink) + found_sink = coresight_select_best_sink(found_sink, + &found_depth, + sink, + child_depth); + } + +return_def_sink: + /* return found sink and depth */ + if (found_sink) + *depth = found_depth; + return found_sink; +} + +/** + * coresight_find_default_sink: Find a sink suitable for use as a + * default sink. + * + * @csdev: starting source to find a connected sink. + * + * Walks connections graph looking for a suitable sink to enable for the + * supplied source. Uses CoreSight device subtypes and distance from source + * to select the best sink. + * + * If a sink is found, then the default sink for this device is set and + * will be automatically used in future. + * + * Used in cases where the CoreSight user (perf / sysfs) has not selected a + * sink. + */ +struct coresight_device * +coresight_find_default_sink(struct coresight_device *csdev) +{ + int depth = 0; + + /* look for a default sink if we have not found for this device */ + if (!csdev->def_sink) + csdev->def_sink = coresight_find_sink(csdev, &depth); + return csdev->def_sink; +} + +static int coresight_remove_sink_ref(struct device *dev, void *data) +{ + struct coresight_device *sink = data; + struct coresight_device *source = to_coresight_device(dev); + + if (source->def_sink == sink) + source->def_sink = NULL; + return 0; +} + +/** + * coresight_clear_default_sink: Remove all default sink references to the + * supplied sink. + * + * If supplied device is a sink, then check all the bus devices and clear + * out all the references to this sink from the coresight_device def_sink + * parameter. + * + * @csdev: coresight sink - remove references to this from all sources. + */ +static void coresight_clear_default_sink(struct coresight_device *csdev) +{ + if ((csdev->type == CORESIGHT_DEV_TYPE_SINK) || + (csdev->type == CORESIGHT_DEV_TYPE_LINKSINK)) { + bus_for_each_dev(&coresight_bustype, NULL, csdev, + coresight_remove_sink_ref); + } +} + /** coresight_validate_source - make sure a source has the right credentials * @csdev: the device structure for a source. * @function: the function this was called from. @@ -1358,6 +1523,7 @@ void coresight_unregister(struct coresight_device *csdev) etm_perf_del_symlink_sink(csdev); /* Remove references of that device in the topology */ coresight_remove_conns(csdev); + coresight_clear_default_sink(csdev); coresight_release_platform_data(csdev, csdev->pdata); device_unregister(&csdev->dev); } diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 84dc695e87d4..58fffdecdbfd 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -48,6 +48,7 @@ enum coresight_dev_subtype_sink { CORESIGHT_DEV_SUBTYPE_SINK_NONE, CORESIGHT_DEV_SUBTYPE_SINK_PORT, CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, + CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM, }; enum coresight_dev_subtype_link { @@ -182,6 +183,7 @@ struct coresight_sysfs_link { * happens when a source has been selected and a path is enabled * from source to that sink. * @ea: Device attribute for sink representation under PMU directory. + * @def_sink: cached reference to default sink found for this device. * @ect_dev: Associated cross trigger device. Not part of the trace data * path or connections. * @nr_links: number of sysfs links created to other components from this @@ -200,6 +202,7 @@ struct coresight_device { /* sink specific fields */ bool activated; /* true only if a sink is part of a path */ struct dev_ext_attribute *ea; + struct coresight_device *def_sink; /* cross trigger handling */ struct coresight_device *ect_dev; /* sysfs links between components */ -- cgit v1.2.3 From 5c9fa16e8abd342ce04dc830c1ebb2a03abf6c05 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 3 Jul 2020 11:19:57 +1000 Subject: powerpc/64s: Remove PROT_SAO support ISA v3.1 does not support the SAO storage control attribute required to implement PROT_SAO. PROT_SAO was used by specialised system software (Lx86) that has been discontinued for about 7 years, and is not thought to be used elsewhere, so removal should not cause problems. We rather remove it than keep support for older processors, because live migrating guest partitions to newer processors may not be possible if SAO is in use (or worse allowed with silent races). - PROT_SAO stays in the uapi header so code using it would still build. - arch_validate_prot() is removed, the generic version rejects PROT_SAO so applications would get a failure at mmap() time. Signed-off-by: Nicholas Piggin [mpe: Drop KVM change for the time being] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200703011958.1166620-3-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/pgtable.h | 8 +++-- arch/powerpc/include/asm/cputable.h | 10 +++---- arch/powerpc/include/asm/mman.h | 26 +++-------------- arch/powerpc/include/asm/nohash/64/pgtable.h | 2 -- arch/powerpc/include/uapi/asm/mman.h | 2 +- arch/powerpc/kernel/dt_cpu_ftrs.c | 2 +- arch/powerpc/mm/book3s64/hash_utils.c | 2 -- include/linux/mm.h | 2 -- include/trace/events/mmflags.h | 2 -- mm/ksm.c | 4 --- tools/testing/selftests/powerpc/mm/.gitignore | 1 - tools/testing/selftests/powerpc/mm/Makefile | 4 +-- tools/testing/selftests/powerpc/mm/prot_sao.c | 42 --------------------------- 13 files changed, 17 insertions(+), 90 deletions(-) delete mode 100644 tools/testing/selftests/powerpc/mm/prot_sao.c (limited to 'include') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 495fc0ccb453..6de56c3b33c4 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -20,9 +20,13 @@ #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ -#define _PAGE_SAO 0x00010 /* Strong access order */ + +#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */ + /* No bits set is normal cacheable memory */ + /* 0x00010 unused, is SAO bit on radix POWER9 */ #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */ + #define _PAGE_DIRTY 0x00080 /* C: page changed */ #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ /* @@ -824,8 +828,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, return hash__set_pte_at(mm, addr, ptep, pte, percpu); } -#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) - #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t prot) { diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index dd0a2e77a695..a461c3300804 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -191,7 +191,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000) #define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000) #define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000) -#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000) +// Free LONG_ASM_CONST(0x0000000008000000) #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000) #define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000) #define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000) @@ -435,7 +435,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ + CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | \ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX ) @@ -444,7 +444,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_SAO | \ + CPU_FTR_DSCR | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ @@ -455,7 +455,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_SAO | \ + CPU_FTR_DSCR | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ @@ -473,7 +473,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_SAO | \ + CPU_FTR_DSCR | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 4ba303ea27f5..7c07728af300 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -13,38 +13,20 @@ #include #include +#ifdef CONFIG_PPC_MEM_KEYS static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { -#ifdef CONFIG_PPC_MEM_KEYS - return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey)); -#else - return ((prot & PROT_SAO) ? VM_SAO : 0); -#endif + return pkey_to_vmflag_bits(pkey); } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { -#ifdef CONFIG_PPC_MEM_KEYS - return (vm_flags & VM_SAO) ? - __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : - __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); -#else - return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); -#endif + return __pgprot(vmflag_to_pte_pkey_bits(vm_flags)); } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) - -static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) -{ - if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) - return false; - if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO)) - return false; - return true; -} -#define arch_validate_prot arch_validate_prot +#endif #endif /* CONFIG_PPC64 */ #endif /* _ASM_POWERPC_MMAN_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 6cb8aa357191..59ee9fa4ae09 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -82,8 +82,6 @@ */ #include -#define _PAGE_SAO 0 - #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) /* diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h index c0c737215b00..3a700351feca 100644 --- a/arch/powerpc/include/uapi/asm/mman.h +++ b/arch/powerpc/include/uapi/asm/mman.h @@ -11,7 +11,7 @@ #include -#define PROT_SAO 0x10 /* Strong Access Ordering */ +#define PROT_SAO 0x10 /* Unsupported since v5.9 */ #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 554bec785f6a..9aa8537f7da2 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -622,7 +622,7 @@ static struct dt_cpu_feature_match __initdata {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, {"processor-utilization-of-resources-register", feat_enable_purr, 0}, {"no-execute", feat_enable, 0}, - {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, + /* strong-access-ordering is unused */ {"cache-inhibited-large-page", feat_enable_large_ci, 0}, {"coprocessor-icswx", feat_enable, 0}, {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0}, diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 9dfb0ceed5e3..6f9f346a5f65 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -232,8 +232,6 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) rflags |= HPTE_R_I; else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) rflags |= (HPTE_R_I | HPTE_R_G); - else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) - rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); else /* * Add memory coherence if cache inhibited is not set diff --git a/include/linux/mm.h b/include/linux/mm.h index dc7b87310c10..6c8333d6c991 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -317,8 +317,6 @@ extern unsigned int kobjsize(const void *objp); #if defined(CONFIG_X86) # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ -#elif defined(CONFIG_PPC) -# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_IA64) diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 5fb752034386..939092dbcb8b 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -114,8 +114,6 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" ) #if defined(CONFIG_X86) #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } -#elif defined(CONFIG_PPC) -#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64) #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } #elif !defined(CONFIG_MMU) diff --git a/mm/ksm.c b/mm/ksm.c index 4102034cd55a..d1cfa18689b5 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2452,10 +2452,6 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, if (vma_is_dax(vma)) return 0; -#ifdef VM_SAO - if (*vm_flags & VM_SAO) - return 0; -#endif #ifdef VM_SPARC_ADI if (*vm_flags & VM_SPARC_ADI) return 0; diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore index 8f841f925baa..8d041f508a51 100644 --- a/tools/testing/selftests/powerpc/mm/.gitignore +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -2,7 +2,6 @@ hugetlb_vs_thp_test subpage_prot tempfile -prot_sao segv_errors wild_bctr large_vm_fork_separation diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index f9fa0ba7435c..5a86d59441dc 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -2,7 +2,7 @@ noarg: $(MAKE) -C ../ -TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ +TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \ large_vm_fork_separation bad_accesses pkey_exec_prot TEST_GEN_PROGS_EXTENDED := tlbie_test TEST_GEN_FILES := tempfile @@ -12,8 +12,6 @@ include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c ../utils.c -$(OUTPUT)/prot_sao: ../utils.c - $(OUTPUT)/wild_bctr: CFLAGS += -m64 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 $(OUTPUT)/bad_accesses: CFLAGS += -m64 diff --git a/tools/testing/selftests/powerpc/mm/prot_sao.c b/tools/testing/selftests/powerpc/mm/prot_sao.c deleted file mode 100644 index e2eed65b7735..000000000000 --- a/tools/testing/selftests/powerpc/mm/prot_sao.c +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2016, Michael Ellerman, IBM Corp. - */ - -#include -#include -#include -#include - -#include - -#include "utils.h" - -#define SIZE (64 * 1024) - -int test_prot_sao(void) -{ - char *p; - - /* 2.06 or later should support SAO */ - SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); - - /* - * Ensure we can ask for PROT_SAO. - * We can't really verify that it does the right thing, but at least we - * confirm the kernel will accept it. - */ - p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO, - MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - FAIL_IF(p == MAP_FAILED); - - /* Write to the mapping, to at least cause a fault */ - memset(p, 0xaa, SIZE); - - return 0; -} - -int main(void) -{ - return test_harness(test_prot_sao, "prot-sao"); -} -- cgit v1.2.3 From f1d9b23cabc61e58509164c3c3132556476491d2 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Mon, 13 Jul 2020 15:51:59 -0400 Subject: audit: purge audit_log_string from the intra-kernel audit API audit_log_string() was inteded to be an internal audit function and since there are only two internal uses, remove them. Purge all external uses of it by restructuring code to use an existing audit_log_format() or using audit_log_format(). Please see the upstream issue https://github.com/linux-audit/audit-kernel/issues/84 Signed-off-by: Richard Guy Briggs Signed-off-by: Paul Moore --- include/linux/audit.h | 5 ----- kernel/audit.c | 4 ++-- security/apparmor/audit.c | 10 ++++------ security/apparmor/file.c | 25 +++++++------------------ security/apparmor/ipc.c | 46 +++++++++++++++++++++++----------------------- security/apparmor/net.c | 14 ++++++++------ security/lsm_audit.c | 4 ++-- 7 files changed, 46 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/include/linux/audit.h b/include/linux/audit.h index 523f77494847..b3d859831a31 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -694,9 +694,4 @@ static inline bool audit_loginuid_set(struct task_struct *tsk) return uid_valid(audit_get_loginuid(tsk)); } -static inline void audit_log_string(struct audit_buffer *ab, const char *buf) -{ - audit_log_n_string(ab, buf, strlen(buf)); -} - #endif diff --git a/kernel/audit.c b/kernel/audit.c index 8c201f414226..a2f3e34aa724 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -2080,13 +2080,13 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix, /* We will allow 11 spaces for ' (deleted)' to be appended */ pathname = kmalloc(PATH_MAX+11, ab->gfp_mask); if (!pathname) { - audit_log_string(ab, ""); + audit_log_format(ab, "\"\""); return; } p = d_path(path, pathname, PATH_MAX+11); if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */ /* FIXME: can we save some information here? */ - audit_log_string(ab, ""); + audit_log_format(ab, "\"\""); } else audit_log_untrustedstring(ab, p); kfree(pathname); diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c index 597732503815..f7e97c7e80f3 100644 --- a/security/apparmor/audit.c +++ b/security/apparmor/audit.c @@ -57,18 +57,16 @@ static void audit_pre(struct audit_buffer *ab, void *ca) struct common_audit_data *sa = ca; if (aa_g_audit_header) { - audit_log_format(ab, "apparmor="); - audit_log_string(ab, aa_audit_type[aad(sa)->type]); + audit_log_format(ab, "apparmor=\"%s\"", + aa_audit_type[aad(sa)->type]); } if (aad(sa)->op) { - audit_log_format(ab, " operation="); - audit_log_string(ab, aad(sa)->op); + audit_log_format(ab, " operation=\"%s\"", aad(sa)->op); } if (aad(sa)->info) { - audit_log_format(ab, " info="); - audit_log_string(ab, aad(sa)->info); + audit_log_format(ab, " info=\"%s\"", aad(sa)->info); if (aad(sa)->error) audit_log_format(ab, " error=%d", aad(sa)->error); } diff --git a/security/apparmor/file.c b/security/apparmor/file.c index 9a2d14b7c9f8..92acf9a49405 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c @@ -34,20 +34,6 @@ static u32 map_mask_to_chr_mask(u32 mask) return m; } -/** - * audit_file_mask - convert mask to permission string - * @buffer: buffer to write string to (NOT NULL) - * @mask: permission mask to convert - */ -static void audit_file_mask(struct audit_buffer *ab, u32 mask) -{ - char str[10]; - - aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs, - map_mask_to_chr_mask(mask)); - audit_log_string(ab, str); -} - /** * file_audit_cb - call back for file specific audit fields * @ab: audit_buffer (NOT NULL) @@ -57,14 +43,17 @@ static void file_audit_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; kuid_t fsuid = current_fsuid(); + char str[10]; if (aad(sa)->request & AA_AUDIT_FILE_MASK) { - audit_log_format(ab, " requested_mask="); - audit_file_mask(ab, aad(sa)->request); + aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs, + map_mask_to_chr_mask(aad(sa)->request)); + audit_log_format(ab, " requested_mask=\"%s\"", str); } if (aad(sa)->denied & AA_AUDIT_FILE_MASK) { - audit_log_format(ab, " denied_mask="); - audit_file_mask(ab, aad(sa)->denied); + aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs, + map_mask_to_chr_mask(aad(sa)->denied)); + audit_log_format(ab, " denied_mask=\"%s\"", str); } if (aad(sa)->request & AA_AUDIT_FILE_MASK) { audit_log_format(ab, " fsuid=%d", diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c index 4ecedffbdd33..fe36d112aad9 100644 --- a/security/apparmor/ipc.c +++ b/security/apparmor/ipc.c @@ -20,25 +20,23 @@ /** * audit_ptrace_mask - convert mask to permission string - * @buffer: buffer to write string to (NOT NULL) * @mask: permission mask to convert + * + * Returns: pointer to static string */ -static void audit_ptrace_mask(struct audit_buffer *ab, u32 mask) +static const char *audit_ptrace_mask(u32 mask) { switch (mask) { case MAY_READ: - audit_log_string(ab, "read"); - break; + return "read"; case MAY_WRITE: - audit_log_string(ab, "trace"); - break; + return "trace"; case AA_MAY_BE_READ: - audit_log_string(ab, "readby"); - break; + return "readby"; case AA_MAY_BE_TRACED: - audit_log_string(ab, "tracedby"); - break; + return "tracedby"; } + return ""; } /* call back to audit ptrace fields */ @@ -47,12 +45,12 @@ static void audit_ptrace_cb(struct audit_buffer *ab, void *va) struct common_audit_data *sa = va; if (aad(sa)->request & AA_PTRACE_PERM_MASK) { - audit_log_format(ab, " requested_mask="); - audit_ptrace_mask(ab, aad(sa)->request); + audit_log_format(ab, " requested_mask=\"%s\"", + audit_ptrace_mask(aad(sa)->request)); if (aad(sa)->denied & AA_PTRACE_PERM_MASK) { - audit_log_format(ab, " denied_mask="); - audit_ptrace_mask(ab, aad(sa)->denied); + audit_log_format(ab, " denied_mask=\"%s\"", + audit_ptrace_mask(aad(sa)->denied)); } } audit_log_format(ab, " peer="); @@ -142,16 +140,18 @@ static inline int map_signal_num(int sig) } /** - * audit_file_mask - convert mask to permission string - * @buffer: buffer to write string to (NOT NULL) + * audit_signal_mask - convert mask to permission string * @mask: permission mask to convert + * + * Returns: pointer to static string */ -static void audit_signal_mask(struct audit_buffer *ab, u32 mask) +static const char *audit_signal_mask(u32 mask) { if (mask & MAY_READ) - audit_log_string(ab, "receive"); + return "receive"; if (mask & MAY_WRITE) - audit_log_string(ab, "send"); + return "send"; + return ""; } /** @@ -164,11 +164,11 @@ static void audit_signal_cb(struct audit_buffer *ab, void *va) struct common_audit_data *sa = va; if (aad(sa)->request & AA_SIGNAL_PERM_MASK) { - audit_log_format(ab, " requested_mask="); - audit_signal_mask(ab, aad(sa)->request); + audit_log_format(ab, " requested_mask=\"%s\"", + audit_signal_mask(aad(sa)->request)); if (aad(sa)->denied & AA_SIGNAL_PERM_MASK) { - audit_log_format(ab, " denied_mask="); - audit_signal_mask(ab, aad(sa)->denied); + audit_log_format(ab, " denied_mask=\"%s\"", + audit_signal_mask(aad(sa)->denied)); } } if (aad(sa)->signal == SIGUNKNOWN) diff --git a/security/apparmor/net.c b/security/apparmor/net.c index d8afc39f663a..fa0e85568450 100644 --- a/security/apparmor/net.c +++ b/security/apparmor/net.c @@ -72,16 +72,18 @@ void audit_net_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; - audit_log_format(ab, " family="); if (address_family_names[sa->u.net->family]) - audit_log_string(ab, address_family_names[sa->u.net->family]); + audit_log_format(ab, " family=\"%s\"", + address_family_names[sa->u.net->family]); else - audit_log_format(ab, "\"unknown(%d)\"", sa->u.net->family); - audit_log_format(ab, " sock_type="); + audit_log_format(ab, " family=\"unknown(%d)\"", + sa->u.net->family); if (sock_type_names[aad(sa)->net.type]) - audit_log_string(ab, sock_type_names[aad(sa)->net.type]); + audit_log_format(ab, " sock_type=\"%s\"", + sock_type_names[aad(sa)->net.type]); else - audit_log_format(ab, "\"unknown(%d)\"", aad(sa)->net.type); + audit_log_format(ab, " sock_type=\"unknown(%d)\"", + aad(sa)->net.type); audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol); if (aad(sa)->request & NET_PERMS_MASK) { diff --git a/security/lsm_audit.c b/security/lsm_audit.c index 7c555621c2bd..53d0d183db8f 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -432,8 +432,8 @@ static void dump_common_audit_data(struct audit_buffer *ab, a->u.ibendport->port); break; case LSM_AUDIT_DATA_LOCKDOWN: - audit_log_format(ab, " lockdown_reason="); - audit_log_string(ab, lockdown_reasons[a->u.reason]); + audit_log_format(ab, " lockdown_reason=\"%s\"", + lockdown_reasons[a->u.reason]); break; } /* switch (a->type) */ } -- cgit v1.2.3 From b43870c74f3fdf0cd06bf5f1b7a5ed70a2cd4ed2 Mon Sep 17 00:00:00 2001 From: Max Englander Date: Sat, 4 Jul 2020 15:15:28 +0000 Subject: audit: report audit wait metric in audit status reply In environments where the preservation of audit events and predictable usage of system memory are prioritized, admins may use a combination of --backlog_wait_time and -b options at the risk of degraded performance resulting from backlog waiting. In some cases, this risk may be preferred to lost events or unbounded memory usage. Ideally, this risk can be mitigated by making adjustments when backlog waiting is detected. However, detection can be difficult using the currently available metrics. For example, an admin attempting to debug degraded performance may falsely believe a full backlog indicates backlog waiting. It may turn out the backlog frequently fills up but drains quickly. To make it easier to reliably track degraded performance to backlog waiting, this patch makes the following changes: Add a new field backlog_wait_time_total to the audit status reply. Initialize this field to zero. Add to this field the total time spent by the current task on scheduled timeouts while the backlog limit is exceeded. Reset field to zero upon request via AUDIT_SET. Tested on Ubuntu 18.04 using complementary changes to the audit-userspace and audit-testsuite: - https://github.com/linux-audit/audit-userspace/pull/134 - https://github.com/linux-audit/audit-testsuite/pull/97 Signed-off-by: Max Englander Signed-off-by: Paul Moore --- include/uapi/linux/audit.h | 18 +++++++++++------- kernel/audit.c | 35 +++++++++++++++++++++++++---------- 2 files changed, 36 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 9b6a973f4cc3..cd2d8279a5e4 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -333,14 +333,15 @@ enum { }; /* Status symbols */ - /* Mask values */ -#define AUDIT_STATUS_ENABLED 0x0001 -#define AUDIT_STATUS_FAILURE 0x0002 -#define AUDIT_STATUS_PID 0x0004 + /* Mask values */ +#define AUDIT_STATUS_ENABLED 0x0001 +#define AUDIT_STATUS_FAILURE 0x0002 +#define AUDIT_STATUS_PID 0x0004 #define AUDIT_STATUS_RATE_LIMIT 0x0008 -#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010 -#define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020 -#define AUDIT_STATUS_LOST 0x0040 +#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010 +#define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020 +#define AUDIT_STATUS_LOST 0x0040 +#define AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL 0x0080 #define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001 #define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002 @@ -467,6 +468,9 @@ struct audit_status { __u32 feature_bitmap; /* bitmap of kernel audit features */ }; __u32 backlog_wait_time;/* message queue wait timeout */ + __u32 backlog_wait_time_actual;/* time spent waiting while + * message limit exceeded + */ }; struct audit_features { diff --git a/kernel/audit.c b/kernel/audit.c index a2f3e34aa724..d72663ac248c 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -136,6 +136,11 @@ u32 audit_sig_sid = 0; */ static atomic_t audit_lost = ATOMIC_INIT(0); +/* Monotonically increasing sum of time the kernel has spent + * waiting while the backlog limit is exceeded. + */ +static atomic_t audit_backlog_wait_time_actual = ATOMIC_INIT(0); + /* Hash for inode-based rules */ struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; @@ -1201,17 +1206,18 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case AUDIT_GET: { struct audit_status s; memset(&s, 0, sizeof(s)); - s.enabled = audit_enabled; - s.failure = audit_failure; + s.enabled = audit_enabled; + s.failure = audit_failure; /* NOTE: use pid_vnr() so the PID is relative to the current * namespace */ - s.pid = auditd_pid_vnr(); - s.rate_limit = audit_rate_limit; - s.backlog_limit = audit_backlog_limit; - s.lost = atomic_read(&audit_lost); - s.backlog = skb_queue_len(&audit_queue); - s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL; - s.backlog_wait_time = audit_backlog_wait_time; + s.pid = auditd_pid_vnr(); + s.rate_limit = audit_rate_limit; + s.backlog_limit = audit_backlog_limit; + s.lost = atomic_read(&audit_lost); + s.backlog = skb_queue_len(&audit_queue); + s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL; + s.backlog_wait_time = audit_backlog_wait_time; + s.backlog_wait_time_actual = atomic_read(&audit_backlog_wait_time_actual); audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); break; } @@ -1315,6 +1321,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) audit_log_config_change("lost", 0, lost, 1); return lost; } + if (s.mask == AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL) { + u32 actual = atomic_xchg(&audit_backlog_wait_time_actual, 0); + + audit_log_config_change("backlog_wait_time_actual", 0, actual, 1); + return actual; + } break; } case AUDIT_GET_FEATURE: @@ -1826,12 +1838,15 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, /* sleep if we are allowed and we haven't exhausted our * backlog wait limit */ if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) { + long rtime = stime; + DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&audit_backlog_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); - stime = schedule_timeout(stime); + stime = schedule_timeout(rtime); + atomic_add(rtime - stime, &audit_backlog_wait_time_actual); remove_wait_queue(&audit_backlog_wait, &wait); } else { if (audit_rate_check() && printk_ratelimit()) -- cgit v1.2.3 From 98cc1b93724aa85bd828269855d48d884c702726 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Mon, 20 Jul 2020 15:43:57 -0500 Subject: power_supply: Add additional health properties to the header Add HEALTH_WARM, HEALTH_COOL and HEALTH_HOT to the health enum. HEALTH_WARM, HEALTH_COOL, and HEALTH_HOT properties are taken from JEITA specification JISC8712:2015 Acked-by: Andrew F. Davis Tested-by: Guru Das Srinagesh Signed-off-by: Dan Murphy Signed-off-by: Sebastian Reichel --- Documentation/ABI/testing/sysfs-class-power | 3 ++- drivers/power/supply/power_supply_sysfs.c | 3 +++ include/linux/power_supply.h | 3 +++ 3 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power index 216d61a22f1e..40213c73bc9c 100644 --- a/Documentation/ABI/testing/sysfs-class-power +++ b/Documentation/ABI/testing/sysfs-class-power @@ -205,7 +205,8 @@ Description: Valid values: "Unknown", "Good", "Overheat", "Dead", "Over voltage", "Unspecified failure", "Cold", "Watchdog timer expire", "Safety timer expire", - "Over current", "Calibration required" + "Over current", "Calibration required", "Warm", + "Cool", "Hot" What: /sys/class/power_supply//precharge_current Date: June 2017 diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index af0cad253f52..b903cb4dca2b 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -101,6 +101,9 @@ static const char * const POWER_SUPPLY_HEALTH_TEXT[] = { [POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE] = "Safety timer expire", [POWER_SUPPLY_HEALTH_OVERCURRENT] = "Over current", [POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED] = "Calibration required", + [POWER_SUPPLY_HEALTH_WARM] = "Warm", + [POWER_SUPPLY_HEALTH_COOL] = "Cool", + [POWER_SUPPLY_HEALTH_HOT] = "Hot", }; static const char * const POWER_SUPPLY_TECHNOLOGY_TEXT[] = { diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index ac1345a48ad0..b5ee35d3c304 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -62,6 +62,9 @@ enum { POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_OVERCURRENT, POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED, + POWER_SUPPLY_HEALTH_WARM, + POWER_SUPPLY_HEALTH_COOL, + POWER_SUPPLY_HEALTH_HOT, }; enum { -- cgit v1.2.3 From bc4f0548f683a3d53359cef15f088d2d5bb4bc39 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 20 Jul 2020 09:33:58 -0700 Subject: bpf: Compute bpf_skc_to_*() helper socket btf ids at build time Currently, socket types (struct tcp_sock, udp_sock, etc.) used by bpf_skc_to_*() helpers are computed when vmlinux_btf is first built in the kernel. Commit 5a2798ab32ba ("bpf: Add BTF_ID_LIST/BTF_ID/BTF_ID_UNUSED macros") implemented a mechanism to compute btf_ids at kernel build time which can simplify kernel implementation and reduce runtime overhead by removing in-kernel btf_id calculation. This patch did exactly this, removing in-kernel btf_id computation and utilizing build-time btf_id computation. If CONFIG_DEBUG_INFO_BTF is not defined, BTF_ID_LIST will define an array with size of 5, which is not enough for btf_sock_ids. So define its own static array if CONFIG_DEBUG_INFO_BTF is not defined. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200720163358.1393023-1-yhs@fb.com --- include/linux/bpf.h | 4 ---- kernel/bpf/btf.c | 1 - net/core/filter.c | 49 ++++++++++++++++++------------------------------- 3 files changed, 18 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index adb16bdc5f0a..1df1c0fd3f28 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1541,7 +1541,6 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map) struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); void bpf_map_offload_map_free(struct bpf_map *map); -void init_btf_sock_ids(struct btf *btf); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -1567,9 +1566,6 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) static inline void bpf_map_offload_map_free(struct bpf_map *map) { } -static inline void init_btf_sock_ids(struct btf *btf) -{ -} #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_BPF_STREAM_PARSER) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 03d6d43bb1d6..315cde73421b 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3672,7 +3672,6 @@ struct btf *btf_parse_vmlinux(void) goto errout; bpf_struct_ops_init(btf, log); - init_btf_sock_ids(btf); btf_verifier_env_free(env); refcount_set(&btf->refcnt, 1); diff --git a/net/core/filter.c b/net/core/filter.c index 2bd129b5ae74..5a65fb4b95ff 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9426,19 +9426,19 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) * sock_common as the first argument in its memory layout. */ #define BTF_SOCK_TYPE_xxx \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, "inet_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, "inet_connection_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, "inet_request_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, "inet_timewait_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, "request_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, "sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, "sock_common") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, "tcp_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, "tcp_request_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, "tcp_timewait_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, "tcp6_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, "udp_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, "udp6_sock") + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) enum { #define BTF_SOCK_TYPE(name, str) name, @@ -9447,26 +9447,13 @@ BTF_SOCK_TYPE_xxx MAX_BTF_SOCK_TYPE, }; -static int btf_sock_ids[MAX_BTF_SOCK_TYPE]; - -#ifdef CONFIG_BPF_SYSCALL -static const char *bpf_sock_types[] = { -#define BTF_SOCK_TYPE(name, str) str, +#ifdef CONFIG_DEBUG_INFO_BTF +BTF_ID_LIST(btf_sock_ids) +#define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) BTF_SOCK_TYPE_xxx #undef BTF_SOCK_TYPE -}; - -void init_btf_sock_ids(struct btf *btf) -{ - int i, btf_id; - - for (i = 0; i < MAX_BTF_SOCK_TYPE; i++) { - btf_id = btf_find_by_name_kind(btf, bpf_sock_types[i], - BTF_KIND_STRUCT); - if (btf_id > 0) - btf_sock_ids[i] = btf_id; - } -} +#else +static u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; #endif static bool check_arg_btf_id(u32 btf_id, u32 arg) -- cgit v1.2.3 From 0f12e584b241285cf60a6227f3771fa444cfcf76 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 20 Jul 2020 09:34:01 -0700 Subject: bpf: Add BTF_ID_LIST_GLOBAL in btf_ids.h Existing BTF_ID_LIST used a local static variable to store btf_ids. This patch provided a new macro BTF_ID_LIST_GLOBAL to store btf_ids in a global variable which can be shared among multiple files. The existing BTF_ID_LIST is still retained. Two reasons. First, BTF_ID_LIST is also used to build btf_ids for helper arguments which typically is an array of 5. Since typically different helpers have different signature, it makes little sense to share them. Second, some current computed btf_ids are indeed local. If later those btf_ids are shared between different files, they can use BTF_ID_LIST_GLOBAL then. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20200720163401.1393159-1-yhs@fb.com --- include/linux/btf_ids.h | 10 +++++-- tools/include/linux/btf_ids.h | 10 +++++-- .../selftests/bpf/prog_tests/resolve_btfids.c | 33 ++++++++++++++++------ 3 files changed, 39 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 1cdb56950ffe..77ab45baa095 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -57,17 +57,20 @@ asm( \ * .zero 4 * */ -#define __BTF_ID_LIST(name) \ +#define __BTF_ID_LIST(name, scope) \ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ -".local " #name "; \n" \ +"." #scope " " #name "; \n" \ #name ":; \n" \ ".popsection; \n"); \ #define BTF_ID_LIST(name) \ -__BTF_ID_LIST(name) \ +__BTF_ID_LIST(name, local) \ extern u32 name[]; +#define BTF_ID_LIST_GLOBAL(name) \ +__BTF_ID_LIST(name, globl) + /* * The BTF_ID_UNUSED macro defines 4 zero bytes. * It's used when we want to define 'unused' entry @@ -90,6 +93,7 @@ asm( \ #define BTF_ID_LIST(name) static u32 name[5]; #define BTF_ID(prefix, name) #define BTF_ID_UNUSED +#define BTF_ID_LIST_GLOBAL(name) u32 name[1]; #endif /* CONFIG_DEBUG_INFO_BTF */ diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h index 1cdb56950ffe..77ab45baa095 100644 --- a/tools/include/linux/btf_ids.h +++ b/tools/include/linux/btf_ids.h @@ -57,17 +57,20 @@ asm( \ * .zero 4 * */ -#define __BTF_ID_LIST(name) \ +#define __BTF_ID_LIST(name, scope) \ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ -".local " #name "; \n" \ +"." #scope " " #name "; \n" \ #name ":; \n" \ ".popsection; \n"); \ #define BTF_ID_LIST(name) \ -__BTF_ID_LIST(name) \ +__BTF_ID_LIST(name, local) \ extern u32 name[]; +#define BTF_ID_LIST_GLOBAL(name) \ +__BTF_ID_LIST(name, globl) + /* * The BTF_ID_UNUSED macro defines 4 zero bytes. * It's used when we want to define 'unused' entry @@ -90,6 +93,7 @@ asm( \ #define BTF_ID_LIST(name) static u32 name[5]; #define BTF_ID(prefix, name) #define BTF_ID_UNUSED +#define BTF_ID_LIST_GLOBAL(name) u32 name[1]; #endif /* CONFIG_DEBUG_INFO_BTF */ diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c index 22d83bba4e91..3b127cab4864 100644 --- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -28,7 +28,17 @@ struct symbol test_symbols[] = { { "func", BTF_KIND_FUNC, -1 }, }; -BTF_ID_LIST(test_list) +BTF_ID_LIST(test_list_local) +BTF_ID_UNUSED +BTF_ID(typedef, S) +BTF_ID(typedef, T) +BTF_ID(typedef, U) +BTF_ID(struct, S) +BTF_ID(union, U) +BTF_ID(func, func) + +extern __u32 test_list_global[]; +BTF_ID_LIST_GLOBAL(test_list_global) BTF_ID_UNUSED BTF_ID(typedef, S) BTF_ID(typedef, T) @@ -94,18 +104,25 @@ static int resolve_symbols(void) int test_resolve_btfids(void) { - unsigned int i; + __u32 *test_list, *test_lists[] = { test_list_local, test_list_global }; + unsigned int i, j; int ret = 0; if (resolve_symbols()) return -1; - /* Check BTF_ID_LIST(test_list) IDs */ - for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) { - ret = CHECK(test_list[i] != test_symbols[i].id, - "id_check", - "wrong ID for %s (%d != %d)\n", test_symbols[i].name, - test_list[i], test_symbols[i].id); + /* Check BTF_ID_LIST(test_list_local) and + * BTF_ID_LIST_GLOBAL(test_list_global) IDs + */ + for (j = 0; j < ARRAY_SIZE(test_lists); j++) { + test_list = test_lists[j]; + for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) { + ret = CHECK(test_list[i] != test_symbols[i].id, + "id_check", + "wrong ID for %s (%d != %d)\n", + test_symbols[i].name, + test_list[i], test_symbols[i].id); + } } return ret; -- cgit v1.2.3 From fce557bcef119a1bc5ab3cb02678cf454bcaf424 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 20 Jul 2020 09:34:02 -0700 Subject: bpf: Make btf_sock_ids global tcp and udp bpf_iter can reuse some socket ids in btf_sock_ids, so make it global. I put the extern definition in btf_ids.h as a central place so it can be easily discovered by developers. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200720163402.1393427-1-yhs@fb.com --- include/linux/btf_ids.h | 30 ++++++++++++++++++++++++++++++ net/core/filter.c | 30 ++---------------------------- tools/include/linux/btf_ids.h | 30 ++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 77ab45baa095..4867d549e3c1 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -97,4 +97,34 @@ asm( \ #endif /* CONFIG_DEBUG_INFO_BTF */ +#ifdef CONFIG_NET +/* Define a list of socket types which can be the argument for + * skc_to_*_sock() helpers. All these sockets should have + * sock_common as the first argument in its memory layout. + */ +#define BTF_SOCK_TYPE_xxx \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + +enum { +#define BTF_SOCK_TYPE(name, str) name, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +MAX_BTF_SOCK_TYPE, +}; + +extern u32 btf_sock_ids[]; +#endif + #endif diff --git a/net/core/filter.c b/net/core/filter.c index 5a65fb4b95ff..654c346b7d91 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9421,39 +9421,13 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); } -/* Define a list of socket types which can be the argument for - * skc_to_*_sock() helpers. All these sockets should have - * sock_common as the first argument in its memory layout. - */ -#define BTF_SOCK_TYPE_xxx \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) - -enum { -#define BTF_SOCK_TYPE(name, str) name, -BTF_SOCK_TYPE_xxx -#undef BTF_SOCK_TYPE -MAX_BTF_SOCK_TYPE, -}; - #ifdef CONFIG_DEBUG_INFO_BTF -BTF_ID_LIST(btf_sock_ids) +BTF_ID_LIST_GLOBAL(btf_sock_ids) #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) BTF_SOCK_TYPE_xxx #undef BTF_SOCK_TYPE #else -static u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; +u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; #endif static bool check_arg_btf_id(u32 btf_id, u32 arg) diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h index 77ab45baa095..4867d549e3c1 100644 --- a/tools/include/linux/btf_ids.h +++ b/tools/include/linux/btf_ids.h @@ -97,4 +97,34 @@ asm( \ #endif /* CONFIG_DEBUG_INFO_BTF */ +#ifdef CONFIG_NET +/* Define a list of socket types which can be the argument for + * skc_to_*_sock() helpers. All these sockets should have + * sock_common as the first argument in its memory layout. + */ +#define BTF_SOCK_TYPE_xxx \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + +enum { +#define BTF_SOCK_TYPE(name, str) name, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +MAX_BTF_SOCK_TYPE, +}; + +extern u32 btf_sock_ids[]; +#endif + #endif -- cgit v1.2.3 From 951cf368bcb11d6f817709660cf5cd914072c36f Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 20 Jul 2020 09:34:03 -0700 Subject: bpf: net: Use precomputed btf_id for bpf iterators One additional field btf_id is added to struct bpf_ctx_arg_aux to store the precomputed btf_ids. The btf_id is computed at build time with BTF_ID_LIST or BTF_ID_LIST_GLOBAL macro definitions. All existing bpf iterators are changed to used pre-compute btf_ids. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200720163403.1393551-1-yhs@fb.com --- include/linux/bpf.h | 1 + kernel/bpf/btf.c | 5 +++-- kernel/bpf/map_iter.c | 7 ++++++- kernel/bpf/task_iter.c | 12 ++++++++++-- net/ipv4/tcp_ipv4.c | 4 +++- net/ipv4/udp.c | 4 +++- net/ipv6/route.c | 7 ++++++- net/netlink/af_netlink.c | 7 ++++++- 8 files changed, 38 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1df1c0fd3f28..bae557ff2da8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -668,6 +668,7 @@ struct bpf_jit_poke_descriptor { struct bpf_ctx_arg_aux { u32 offset; enum bpf_reg_type reg_type; + u32 btf_id; }; struct bpf_prog_aux { diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 315cde73421b..ee36b7f60936 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3817,16 +3817,17 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, return true; /* this is a pointer to another type */ - info->reg_type = PTR_TO_BTF_ID; for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; if (ctx_arg_info->offset == off) { info->reg_type = ctx_arg_info->reg_type; - break; + info->btf_id = ctx_arg_info->btf_id; + return true; } } + info->reg_type = PTR_TO_BTF_ID; if (tgt_prog) { ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type, arg); if (ret > 0) { diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index c69071e334bf..8a7af11b411f 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -4,6 +4,7 @@ #include #include #include +#include struct bpf_iter_seq_map_info { u32 mid; @@ -81,7 +82,10 @@ static const struct seq_operations bpf_map_seq_ops = { .show = bpf_map_seq_show, }; -static const struct bpf_iter_reg bpf_map_reg_info = { +BTF_ID_LIST(btf_bpf_map_id) +BTF_ID(struct, bpf_map) + +static struct bpf_iter_reg bpf_map_reg_info = { .target = "bpf_map", .seq_ops = &bpf_map_seq_ops, .init_seq_private = NULL, @@ -96,6 +100,7 @@ static const struct bpf_iter_reg bpf_map_reg_info = { static int __init bpf_map_iter_init(void) { + bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id; return bpf_iter_reg_target(&bpf_map_reg_info); } diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 4dbf2b6035f8..2feecf095609 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -7,6 +7,7 @@ #include #include #include +#include struct bpf_iter_seq_task_common { struct pid_namespace *ns; @@ -312,7 +313,11 @@ static const struct seq_operations task_file_seq_ops = { .show = task_file_seq_show, }; -static const struct bpf_iter_reg task_reg_info = { +BTF_ID_LIST(btf_task_file_ids) +BTF_ID(struct, task_struct) +BTF_ID(struct, file) + +static struct bpf_iter_reg task_reg_info = { .target = "task", .seq_ops = &task_seq_ops, .init_seq_private = init_seq_pidns, @@ -325,7 +330,7 @@ static const struct bpf_iter_reg task_reg_info = { }, }; -static const struct bpf_iter_reg task_file_reg_info = { +static struct bpf_iter_reg task_file_reg_info = { .target = "task_file", .seq_ops = &task_file_seq_ops, .init_seq_private = init_seq_pidns, @@ -344,10 +349,13 @@ static int __init task_iter_init(void) { int ret; + task_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0]; ret = bpf_iter_reg_target(&task_reg_info); if (ret) return ret; + task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0]; + task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1]; return bpf_iter_reg_target(&task_file_reg_info); } late_initcall(task_iter_init); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 116c11a0aaed..a7f1b41482f8 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -76,6 +76,7 @@ #include #include #include +#include #include #include @@ -2954,7 +2955,7 @@ static void bpf_iter_fini_tcp(void *priv_data) bpf_iter_fini_seq_net(priv_data); } -static const struct bpf_iter_reg tcp_reg_info = { +static struct bpf_iter_reg tcp_reg_info = { .target = "tcp", .seq_ops = &bpf_iter_tcp_seq_ops, .init_seq_private = bpf_iter_init_tcp, @@ -2969,6 +2970,7 @@ static const struct bpf_iter_reg tcp_reg_info = { static void __init bpf_iter_register(void) { + tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON]; if (bpf_iter_reg_target(&tcp_reg_info)) pr_warn("Warning: could not register bpf iterator tcp\n"); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b738c63d7a77..b5231ab350e0 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -106,6 +106,7 @@ #include #include #include +#include #include #include #include "udp_impl.h" @@ -3232,7 +3233,7 @@ static void bpf_iter_fini_udp(void *priv_data) bpf_iter_fini_seq_net(priv_data); } -static const struct bpf_iter_reg udp_reg_info = { +static struct bpf_iter_reg udp_reg_info = { .target = "udp", .seq_ops = &bpf_iter_udp_seq_ops, .init_seq_private = bpf_iter_init_udp, @@ -3247,6 +3248,7 @@ static const struct bpf_iter_reg udp_reg_info = { static void __init bpf_iter_register(void) { + udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP]; if (bpf_iter_reg_target(&udp_reg_info)) pr_warn("Warning: could not register bpf iterator udp\n"); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 427b81cbc164..33f5efbad0a9 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -61,6 +61,7 @@ #include #include #include +#include #ifdef CONFIG_SYSCTL #include @@ -6423,7 +6424,10 @@ void __init ip6_route_init_special_entries(void) #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt) -static const struct bpf_iter_reg ipv6_route_reg_info = { +BTF_ID_LIST(btf_fib6_info_id) +BTF_ID(struct, fib6_info) + +static struct bpf_iter_reg ipv6_route_reg_info = { .target = "ipv6_route", .seq_ops = &ipv6_route_seq_ops, .init_seq_private = bpf_iter_init_seq_net, @@ -6438,6 +6442,7 @@ static const struct bpf_iter_reg ipv6_route_reg_info = { static int __init bpf_iter_register(void) { + ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id; return bpf_iter_reg_target(&ipv6_route_reg_info); } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 4f2c3b14ddbf..3cd58f0c2de4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include @@ -2803,7 +2804,10 @@ static const struct rhashtable_params netlink_rhashtable_params = { }; #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) -static const struct bpf_iter_reg netlink_reg_info = { +BTF_ID_LIST(btf_netlink_sock_id) +BTF_ID(struct, netlink_sock) + +static struct bpf_iter_reg netlink_reg_info = { .target = "netlink", .seq_ops = &netlink_seq_ops, .init_seq_private = bpf_iter_init_seq_net, @@ -2818,6 +2822,7 @@ static const struct bpf_iter_reg netlink_reg_info = { static int __init bpf_iter_register(void) { + netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id; return bpf_iter_reg_target(&netlink_reg_info); } #endif -- cgit v1.2.3 From 76abf9cea6c8215ea45b9359f11f0e30127c544a Mon Sep 17 00:00:00 2001 From: Rishabh Bhatnagar Date: Thu, 16 Jul 2020 15:20:33 -0700 Subject: remoteproc: Pass size and offset as arguments to segment dump function Change the segment dump API signature to include size and offset arguments. Refactor the qcom_q6v5_mss driver to use these arguments while copying the segment. Doing this lays the ground work for "inline" coredump functionality being added in the next patch. Tested-by: Sibi Sankar Reviewed-by: Bjorn Andersson Reviewed-by: Sibi Sankar Reviewed-by: Mathieu Poirier Signed-off-by: Rishabh Bhatnagar Link: https://lore.kernel.org/r/1594938035-7327-4-git-send-email-rishabhb@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/qcom_q6v5_mss.c | 10 +++++----- drivers/remoteproc/remoteproc_coredump.c | 5 +++-- include/linux/remoteproc.h | 5 +++-- 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 1ee809b772ff..a3c412a3f143 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1202,7 +1202,7 @@ out: static void qcom_q6v5_dump_segment(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest) + void *dest, size_t cp_offset, size_t size) { int ret = 0; struct q6v5 *qproc = rproc->priv; @@ -1222,16 +1222,16 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, } if (!ret) - ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size); + ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size); if (ptr) { - memcpy(dest, ptr, segment->size); + memcpy(dest, ptr, size); iounmap(ptr); } else { - memset(dest, 0xff, segment->size); + memset(dest, 0xff, size); } - qproc->current_dump_size += segment->size; + qproc->current_dump_size += size; /* Reclaim mba after copying segments */ if (qproc->current_dump_size == qproc->total_dump_size) { diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c index ded02442374a..390f563ec019 100644 --- a/drivers/remoteproc/remoteproc_coredump.c +++ b/drivers/remoteproc/remoteproc_coredump.c @@ -72,7 +72,8 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, dma_addr_t da, size_t size, void (*dumpfn)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest), + void *dest, size_t offset, + size_t size), void *priv) { struct rproc_dump_segment *segment; @@ -183,7 +184,7 @@ void rproc_coredump(struct rproc *rproc) elf_phdr_set_p_align(class, phdr, 0); if (segment->dump) { - segment->dump(rproc, segment, data + offset); + segment->dump(rproc, segment, data + offset, 0, segment->size); } else { ptr = rproc_da_to_va(rproc, segment->da, segment->size); if (!ptr) { diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 7c0567029f7c..5dab13b6baae 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -456,7 +456,7 @@ struct rproc_dump_segment { void *priv; void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest); + void *dest, size_t offset, size_t size); loff_t offset; }; @@ -638,7 +638,8 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, dma_addr_t da, size_t size, void (*dumpfn)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest), + void *dest, size_t offset, + size_t size), void *priv); int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine); -- cgit v1.2.3 From c97319881c9116dc7c56dd30115567b4078c4ba6 Mon Sep 17 00:00:00 2001 From: Rishabh Bhatnagar Date: Thu, 16 Jul 2020 15:20:34 -0700 Subject: remoteproc: Add inline coredump functionality The current coredump implementation uses vmalloc area to copy all the segments. But this might put strain on low memory targets as the firmware size sometimes is in tens of MBs. The situation becomes worse if there are multiple remote processors undergoing recovery at the same time. This patch adds inline coredump functionality that avoids extra memory usage. This requires recovery to be halted until data is read by userspace and free function is called. Reviewed-by: Bjorn Andersson Reviewed-by: Sibi Sankar Reviewed-by: Mathieu Poirier Signed-off-by: Rishabh Bhatnagar Tested-by: Sibi Sankar Link: https://lore.kernel.org/r/1594938035-7327-5-git-send-email-rishabhb@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_coredump.c | 156 +++++++++++++++++++++++++++---- include/linux/remoteproc.h | 16 ++++ 2 files changed, 154 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c index 390f563ec019..bb15a29038e8 100644 --- a/drivers/remoteproc/remoteproc_coredump.c +++ b/drivers/remoteproc/remoteproc_coredump.c @@ -5,6 +5,7 @@ * Copyright (c) 2020, The Linux Foundation. All rights reserved. */ +#include #include #include #include @@ -12,6 +13,12 @@ #include "remoteproc_internal.h" #include "remoteproc_elf_helpers.h" +struct rproc_coredump_state { + struct rproc *rproc; + void *header; + struct completion dump_done; +}; + /** * rproc_coredump_cleanup() - clean up dump_segments list * @rproc: the remote processor handle @@ -115,12 +122,110 @@ int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine) } EXPORT_SYMBOL(rproc_coredump_set_elf_info); +static void rproc_coredump_free(void *data) +{ + struct rproc_coredump_state *dump_state = data; + + vfree(dump_state->header); + complete(&dump_state->dump_done); +} + +static void *rproc_coredump_find_segment(loff_t user_offset, + struct list_head *segments, + size_t *data_left) +{ + struct rproc_dump_segment *segment; + + list_for_each_entry(segment, segments, node) { + if (user_offset < segment->size) { + *data_left = segment->size - user_offset; + return segment; + } + user_offset -= segment->size; + } + + *data_left = 0; + return NULL; +} + +static void rproc_copy_segment(struct rproc *rproc, void *dest, + struct rproc_dump_segment *segment, + size_t offset, size_t size) +{ + void *ptr; + + if (segment->dump) { + segment->dump(rproc, segment, dest, offset, size); + } else { + ptr = rproc_da_to_va(rproc, segment->da + offset, size); + if (!ptr) { + dev_err(&rproc->dev, + "invalid copy request for segment %pad with offset %zu and size %zu)\n", + &segment->da, offset, size); + memset(dest, 0xff, size); + } else { + memcpy(dest, ptr, size); + } + } +} + +static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count, + void *data, size_t header_sz) +{ + size_t seg_data, bytes_left = count; + ssize_t copy_sz; + struct rproc_dump_segment *seg; + struct rproc_coredump_state *dump_state = data; + struct rproc *rproc = dump_state->rproc; + void *elfcore = dump_state->header; + + /* Copy the vmalloc'ed header first. */ + if (offset < header_sz) { + copy_sz = memory_read_from_buffer(buffer, count, &offset, + elfcore, header_sz); + + return copy_sz; + } + + /* + * Find out the segment memory chunk to be copied based on offset. + * Keep copying data until count bytes are read. + */ + while (bytes_left) { + seg = rproc_coredump_find_segment(offset - header_sz, + &rproc->dump_segments, + &seg_data); + /* EOF check */ + if (!seg) { + dev_info(&rproc->dev, "Ramdump done, %lld bytes read", + offset); + break; + } + + copy_sz = min_t(size_t, bytes_left, seg_data); + + rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data, + copy_sz); + + offset += copy_sz; + buffer += copy_sz; + bytes_left -= copy_sz; + } + + return count - bytes_left; +} + /** * rproc_coredump() - perform coredump * @rproc: rproc handle * * This function will generate an ELF header for the registered segments - * and create a devcoredump device associated with rproc. + * and create a devcoredump device associated with rproc. Based on the + * coredump configuration this function will directly copy the segments + * from device memory to userspace or copy segments from device memory to + * a separate buffer, which can then be read by userspace. + * The first approach avoids using extra vmalloc memory. But it will stall + * recovery flow until dump is read by userspace. */ void rproc_coredump(struct rproc *rproc) { @@ -130,11 +235,13 @@ void rproc_coredump(struct rproc *rproc) size_t data_size; size_t offset; void *data; - void *ptr; u8 class = rproc->elf_class; int phnum = 0; + struct rproc_coredump_state dump_state; + enum rproc_dump_mechanism dump_conf = rproc->dump_conf; - if (list_empty(&rproc->dump_segments)) + if (list_empty(&rproc->dump_segments) || + dump_conf == RPROC_COREDUMP_DISABLED) return; if (class == ELFCLASSNONE) { @@ -144,7 +251,14 @@ void rproc_coredump(struct rproc *rproc) data_size = elf_size_of_hdr(class); list_for_each_entry(segment, &rproc->dump_segments, node) { - data_size += elf_size_of_phdr(class) + segment->size; + /* + * For default configuration buffer includes headers & segments. + * For inline dump buffer just includes headers as segments are + * directly read from device memory. + */ + data_size += elf_size_of_phdr(class); + if (dump_conf == RPROC_COREDUMP_DEFAULT) + data_size += segment->size; phnum++; } @@ -183,23 +297,29 @@ void rproc_coredump(struct rproc *rproc) elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X); elf_phdr_set_p_align(class, phdr, 0); - if (segment->dump) { - segment->dump(rproc, segment, data + offset, 0, segment->size); - } else { - ptr = rproc_da_to_va(rproc, segment->da, segment->size); - if (!ptr) { - dev_err(&rproc->dev, - "invalid coredump segment (%pad, %zu)\n", - &segment->da, segment->size); - memset(data + offset, 0xff, segment->size); - } else { - memcpy(data + offset, ptr, segment->size); - } - } + if (dump_conf == RPROC_COREDUMP_DEFAULT) + rproc_copy_segment(rproc, data + offset, segment, 0, + segment->size); offset += elf_phdr_get_p_filesz(class, phdr); phdr += elf_size_of_phdr(class); } + if (dump_conf == RPROC_COREDUMP_DEFAULT) { + dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); + return; + } + + /* Initialize the dump state struct to be used by rproc_coredump_read */ + dump_state.rproc = rproc; + dump_state.header = data; + init_completion(&dump_state.dump_done); + + dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL, + rproc_coredump_read, rproc_coredump_free); - dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); + /* + * Wait until the dump is read and free is called. Data is freed + * by devcoredump framework automatically after 5 minutes. + */ + wait_for_completion(&dump_state.dump_done); } diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 5dab13b6baae..0e8d2ff575b4 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -439,6 +439,20 @@ enum rproc_crash_type { RPROC_FATAL_ERROR, }; +/** + * enum rproc_dump_mechanism - Coredump options for core + * @RPROC_COREDUMP_DEFAULT: Copy dump to separate buffer and carry on with + recovery + * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall + recovery until all segments are read + * @RPROC_COREDUMP_DISABLED: Don't perform any dump + */ +enum rproc_dump_mechanism { + RPROC_COREDUMP_DEFAULT, + RPROC_COREDUMP_INLINE, + RPROC_COREDUMP_DISABLED, +}; + /** * struct rproc_dump_segment - segment info from ELF header * @node: list node related to the rproc segment list @@ -471,6 +485,7 @@ struct rproc_dump_segment { * @dev: virtual device for refcounting and common remoteproc behavior * @power: refcount of users who need this rproc powered up * @state: state of the device + * @dump_conf: Currently selected coredump configuration * @lock: lock which protects concurrent manipulations of the rproc * @dbg_dir: debugfs directory of this rproc device * @traces: list of trace buffers @@ -505,6 +520,7 @@ struct rproc { struct device dev; atomic_t power; unsigned int state; + enum rproc_dump_mechanism dump_conf; struct mutex lock; struct dentry *dbg_dir; struct list_head traces; -- cgit v1.2.3 From 53aab92dec447f93489e07924e310d605a389dea Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 21 Jul 2020 10:17:16 -0700 Subject: Input: synaptics-rmi4 - drop a duplicated word Drop the repeated word "to" in a comment. Signed-off-by: Randy Dunlap Link: https://lore.kernel.org/r/20200719003131.21050-1-rdunlap@infradead.org Signed-off-by: Dmitry Torokhov --- include/linux/rmi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/rmi.h b/include/linux/rmi.h index 7b22366d0065..8ed37f93f3c8 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h @@ -206,7 +206,7 @@ struct rmi_device_platform_data_spi { * * @reset_delay_ms - after issuing a reset command to the touch sensor, the * driver waits a few milliseconds to give the firmware a chance to - * to re-initialize. You can override the default wait period here. + * re-initialize. You can override the default wait period here. * @irq: irq associated with the attn gpio line, or negative */ struct rmi_device_platform_data { -- cgit v1.2.3 From 1571e700fd610c39e8b50b0110b1ee9badb2fe6a Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:36 +0100 Subject: net: phylink: in-band pause mode advertisement update for PCS Re-code the pause in-band advertisement update in light of the addition of PCS support, so that we perform the minimum required; only the PCS configuration function needs to be called in this case, followed by the request to trigger a restart of negotiation if the programmed advertisement changed. We need to change the pcs_config() signature to pass whether resolved pause should be passed to the MAC for setups such as mvneta and mvpp2 where doing so overrides the MAC manual flow controls. Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 55 +++++++++++++++++++++++++++++++++++++++++++---- include/linux/phylink.h | 7 ++++-- 2 files changed, 56 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 452d509803ca..84a426401102 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -441,7 +441,9 @@ static void phylink_pcs_config(struct phylink *pl, bool force_restart, if (pl->pcs_ops && pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, state->interface, - state->advertising)) + state->advertising, + !!(pl->link_config.pause & + MLO_PAUSE_AN))) restart = true; phylink_mac_config(pl, state); @@ -450,6 +452,49 @@ static void phylink_pcs_config(struct phylink *pl, bool force_restart, phylink_mac_pcs_an_restart(pl); } +/* + * Reconfigure for a change of inband advertisement. + * If we have a separate PCS, we only need to call its pcs_config() method, + * and then restart AN if it indicates something changed. Otherwise, we do + * the full MAC reconfiguration. + */ +static int phylink_change_inband_advert(struct phylink *pl) +{ + int ret; + + if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) + return 0; + + if (!pl->pcs_ops) { + /* Legacy method */ + phylink_mac_config(pl, &pl->link_config); + phylink_mac_pcs_an_restart(pl); + return 0; + } + + phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__, + phylink_an_mode_str(pl->cur_link_an_mode), + phy_modes(pl->link_config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, pl->link_config.advertising, + pl->link_config.pause); + + /* Modern PCS-based method; update the advert at the PCS, and + * restart negotiation if the pcs_config() helper indicates that + * the programmed advertisement has changed. + */ + ret = pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, + pl->link_config.interface, + pl->link_config.advertising, + !!(pl->link_config.pause & MLO_PAUSE_AN)); + if (ret < 0) + return ret; + + if (ret > 0) + phylink_mac_pcs_an_restart(pl); + + return 0; +} + static void phylink_mac_pcs_get_state(struct phylink *pl, struct phylink_link_state *state) { @@ -1524,9 +1569,11 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, config->pause = pause_state; - if (!pl->phydev && !test_bit(PHYLINK_DISABLE_STOPPED, - &pl->phylink_disable_state)) - phylink_pcs_config(pl, true, &pl->link_config); + /* Update our in-band advertisement, triggering a renegotiation if + * the advertisement changed. + */ + if (!pl->phydev) + phylink_change_inband_advert(pl); mutex_unlock(&pl->state_mutex); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index b32b8b45421b..d9913d8e6b91 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -286,7 +286,8 @@ struct phylink_pcs_ops { struct phylink_link_state *state); int (*pcs_config)(struct phylink_config *config, unsigned int mode, phy_interface_t interface, - const unsigned long *advertising); + const unsigned long *advertising, + bool permit_pause_to_mac); void (*pcs_an_restart)(struct phylink_config *config); void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, phy_interface_t interface, int speed, int duplex); @@ -317,9 +318,11 @@ void pcs_get_state(struct phylink_config *config, * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. * @interface: interface mode to be used * @advertising: adertisement ethtool link mode mask + * @permit_pause_to_mac: permit forwarding pause resolution to MAC * * Configure the PCS for the operating mode, the interface mode, and set - * the advertisement mask. + * the advertisement mask. @permit_pause_to_mac indicates whether the + * hardware may forward the pause mode resolution to the MAC. * * When operating in %MLO_AN_INBAND, inband should always be enabled, * otherwise inband should be disabled. -- cgit v1.2.3 From b7ad14c2fe2d4b2abee491e3adfa3d0123aa2d8c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:41 +0100 Subject: net: phylink: re-implement interface configuration with PCS With PCS support, how we implement interface reconfiguration (or other major reconfiguration) is not up to the job; we end up reconfiguring the PCS for an interface change while the link could potentially be up. In order to solve this, add two additional MAC methods for major configuration, one to prepare for the change, and one to finish the change. This allows mvneta and mvpp2 to shutdown what they require prior to the MAC and PCS configuration calls, and then restart as appropriate. This impacts ksettings_set(), which now needs to identify whether the change is a minor tweak to the advertisement masks or whether the interface mode has changed, and call the appropriate function for that update. Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 75 ++++++++++++++++++++++++++++++++--------------- include/linux/phylink.h | 48 ++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 84a426401102..d554a0fbb4f3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -433,23 +433,47 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl) } } -static void phylink_pcs_config(struct phylink *pl, bool force_restart, - const struct phylink_link_state *state) +static void phylink_major_config(struct phylink *pl, bool restart, + const struct phylink_link_state *state) { - bool restart = force_restart; + int err; + + phylink_dbg(pl, "major config %s\n", phy_modes(state->interface)); - if (pl->pcs_ops && pl->pcs_ops->pcs_config(pl->config, - pl->cur_link_an_mode, - state->interface, - state->advertising, - !!(pl->link_config.pause & - MLO_PAUSE_AN))) - restart = true; + if (pl->mac_ops->mac_prepare) { + err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode, + state->interface); + if (err < 0) { + phylink_err(pl, "mac_prepare failed: %pe\n", + ERR_PTR(err)); + return; + } + } phylink_mac_config(pl, state); + if (pl->pcs_ops) { + err = pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, + state->interface, + state->advertising, + !!(pl->link_config.pause & + MLO_PAUSE_AN)); + if (err < 0) + phylink_err(pl, "pcs_config failed: %pe\n", + ERR_PTR(err)); + if (err > 0) + restart = true; + } if (restart) phylink_mac_pcs_an_restart(pl); + + if (pl->mac_ops->mac_finish) { + err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode, + state->interface); + if (err < 0) + phylink_err(pl, "mac_prepare failed: %pe\n", + ERR_PTR(err)); + } } /* @@ -555,7 +579,7 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart) link_state.link = false; phylink_apply_manual_flow(pl, &link_state); - phylink_pcs_config(pl, force_restart, &link_state); + phylink_major_config(pl, force_restart, &link_state); } static const char *phylink_pause_to_str(int pause) @@ -674,7 +698,7 @@ static void phylink_resolve(struct work_struct *w) phylink_link_down(pl); cur_link_state = false; } - phylink_pcs_config(pl, false, &link_state); + phylink_major_config(pl, false, &link_state); pl->link_config.interface = link_state.interface; } else if (!pl->pcs_ops) { /* The interface remains unchanged, only the speed, @@ -1450,21 +1474,26 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, return -EINVAL; mutex_lock(&pl->state_mutex); - linkmode_copy(pl->link_config.advertising, config.advertising); - pl->link_config.interface = config.interface; pl->link_config.speed = config.speed; pl->link_config.duplex = config.duplex; pl->link_config.an_enabled = config.an_enabled; - if (pl->cur_link_an_mode == MLO_AN_INBAND && - !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { - /* If in 802.3z mode, this updates the advertisement. - * - * If we are in SGMII mode without a PHY, there is no - * advertisement; the only thing we have is the pause - * modes which can only come from a PHY. - */ - phylink_pcs_config(pl, true, &pl->link_config); + if (pl->link_config.interface != config.interface) { + /* The interface changed, e.g. 1000base-X <-> 2500base-X */ + /* We need to force the link down, then change the interface */ + if (pl->old_link_state) { + phylink_link_down(pl); + pl->old_link_state = false; + } + if (!test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) + phylink_major_config(pl, false, &config); + pl->link_config.interface = config.interface; + linkmode_copy(pl->link_config.advertising, config.advertising); + } else if (!linkmode_equal(pl->link_config.advertising, + config.advertising)) { + linkmode_copy(pl->link_config.advertising, config.advertising); + phylink_change_inband_advert(pl); } mutex_unlock(&pl->state_mutex); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index d9913d8e6b91..2f1315f32113 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -76,7 +76,9 @@ struct phylink_config { * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. * @mac_pcs_get_state: Read the current link state from the hardware. + * @mac_prepare: prepare for a major reconfiguration of the interface. * @mac_config: configure the MAC for the selected mode and state. + * @mac_finish: finish a major reconfiguration of the interface. * @mac_an_restart: restart 802.3z BaseX autonegotiation. * @mac_link_down: take the link down. * @mac_link_up: allow the link to come up. @@ -89,8 +91,12 @@ struct phylink_mac_ops { struct phylink_link_state *state); void (*mac_pcs_get_state)(struct phylink_config *config, struct phylink_link_state *state); + int (*mac_prepare)(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); void (*mac_config)(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); + int (*mac_finish)(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); void (*mac_an_restart)(struct phylink_config *config); void (*mac_link_down)(struct phylink_config *config, unsigned int mode, phy_interface_t interface); @@ -145,6 +151,31 @@ void validate(struct phylink_config *config, unsigned long *supported, void mac_pcs_get_state(struct phylink_config *config, struct phylink_link_state *state); +/** + * mac_prepare() - prepare to change the PHY interface mode + * @config: a pointer to a &struct phylink_config. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @iface: interface mode to switch to + * + * phylink will call this method at the beginning of a full initialisation + * of the link, which includes changing the interface mode or at initial + * startup time. It may be called for the current mode. The MAC driver + * should perform whatever actions are required, e.g. disabling the + * Serdes PHY. + * + * This will be the first call in the sequence: + * - mac_prepare() + * - mac_config() + * - pcs_config() + * - possible pcs_an_restart() + * - mac_finish() + * + * Returns zero on success, or negative errno on failure which will be + * reported to the kernel log. + */ +int mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); + /** * mac_config() - configure the MAC for the selected mode and state * @config: a pointer to a &struct phylink_config. @@ -220,6 +251,23 @@ void mac_pcs_get_state(struct phylink_config *config, void mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); +/** + * mac_finish() - finish a to change the PHY interface mode + * @config: a pointer to a &struct phylink_config. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @iface: interface mode to switch to + * + * phylink will call this if it called mac_prepare() to allow the MAC to + * complete any necessary steps after the MAC and PCS have been configured + * for the @mode and @iface. E.g. a MAC driver may wish to re-enable the + * Serdes PHY here if it was previously disabled by mac_prepare(). + * + * Returns zero on success, or negative errno on failure which will be + * reported to the kernel log. + */ +int mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); + /** * mac_an_restart() - restart 802.3z BaseX autonegotiation * @config: a pointer to a &struct phylink_config. -- cgit v1.2.3 From 7137e18f6f889a67046d5004e1690a32d7d2108d Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:46 +0100 Subject: net: phylink: add struct phylink_pcs Add a way for MAC PCS to have private data while keeping independence from struct phylink_config, which is used for the MAC itself. We need this independence as we will have stand-alone code for PCS that is independent of the MAC. Introduce struct phylink_pcs, which is designed to be embedded in a driver private data structure. This structure does not include a mdio_device as there are PCS implementations such as the Marvell DSA and network drivers where this is not necessary. Reviewed-by: Ioana Ciornei Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 34 ++++++++++++++++++++++++++-------- include/linux/phylink.h | 45 ++++++++++++++++++++++++++++++--------------- 2 files changed, 56 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index d554a0fbb4f3..b57cd2142786 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -43,6 +43,7 @@ struct phylink { const struct phylink_mac_ops *mac_ops; const struct phylink_pcs_ops *pcs_ops; struct phylink_config *config; + struct phylink_pcs *pcs; struct device *dev; unsigned int old_link_state:1; @@ -427,7 +428,7 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl) phy_interface_mode_is_8023z(pl->link_config.interface) && phylink_autoneg_inband(pl->cur_link_an_mode)) { if (pl->pcs_ops) - pl->pcs_ops->pcs_an_restart(pl->config); + pl->pcs_ops->pcs_an_restart(pl->pcs); else pl->mac_ops->mac_an_restart(pl->config); } @@ -453,7 +454,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, phylink_mac_config(pl, state); if (pl->pcs_ops) { - err = pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, + err = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode, state->interface, state->advertising, !!(pl->link_config.pause & @@ -506,7 +507,7 @@ static int phylink_change_inband_advert(struct phylink *pl) * restart negotiation if the pcs_config() helper indicates that * the programmed advertisement has changed. */ - ret = pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, + ret = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode, pl->link_config.interface, pl->link_config.advertising, !!(pl->link_config.pause & MLO_PAUSE_AN)); @@ -533,7 +534,7 @@ static void phylink_mac_pcs_get_state(struct phylink *pl, state->link = 1; if (pl->pcs_ops) - pl->pcs_ops->pcs_get_state(pl->config, state); + pl->pcs_ops->pcs_get_state(pl->pcs, state); else pl->mac_ops->mac_pcs_get_state(pl->config, state); } @@ -604,7 +605,7 @@ static void phylink_link_up(struct phylink *pl, pl->cur_interface = link_state.interface; if (pl->pcs_ops && pl->pcs_ops->pcs_link_up) - pl->pcs_ops->pcs_link_up(pl->config, pl->cur_link_an_mode, + pl->pcs_ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode, pl->cur_interface, link_state.speed, link_state.duplex); @@ -863,11 +864,26 @@ struct phylink *phylink_create(struct phylink_config *config, } EXPORT_SYMBOL_GPL(phylink_create); -void phylink_add_pcs(struct phylink *pl, const struct phylink_pcs_ops *ops) +/** + * phylink_set_pcs() - set the current PCS for phylink to use + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @pcs: a pointer to the &struct phylink_pcs + * + * Bind the MAC PCS to phylink. This may be called after phylink_create(), + * in mac_prepare() or mac_config() methods if it is desired to dynamically + * change the PCS. + * + * Please note that there are behavioural changes with the mac_config() + * callback if a PCS is present (denoting a newer setup) so removing a PCS + * is not supported, and if a PCS is going to be used, it must be registered + * by calling phylink_set_pcs() at the latest in the first mac_config() call. + */ +void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs) { - pl->pcs_ops = ops; + pl->pcs = pcs; + pl->pcs_ops = pcs->ops; } -EXPORT_SYMBOL_GPL(phylink_add_pcs); +EXPORT_SYMBOL_GPL(phylink_set_pcs); /** * phylink_destroy() - cleanup and destroy the phylink instance @@ -1212,6 +1228,8 @@ void phylink_start(struct phylink *pl) break; case MLO_AN_INBAND: poll |= pl->config->pcs_poll; + if (pl->pcs) + poll |= pl->pcs->poll; break; } if (poll) diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 2f1315f32113..057f78263a46 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -321,6 +321,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy, int speed, int duplex, bool tx_pause, bool rx_pause); #endif +struct phylink_pcs_ops; + +/** + * struct phylink_pcs - PHYLINK PCS instance + * @ops: a pointer to the &struct phylink_pcs_ops structure + * @poll: poll the PCS for link changes + * + * This structure is designed to be embedded within the PCS private data, + * and will be passed between phylink and the PCS. + */ +struct phylink_pcs { + const struct phylink_pcs_ops *ops; + bool poll; +}; + /** * struct phylink_pcs_ops - MAC PCS operations structure. * @pcs_get_state: read the current MAC PCS link state from the hardware. @@ -330,21 +345,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy, * (where necessary). */ struct phylink_pcs_ops { - void (*pcs_get_state)(struct phylink_config *config, + void (*pcs_get_state)(struct phylink_pcs *pcs, struct phylink_link_state *state); - int (*pcs_config)(struct phylink_config *config, unsigned int mode, + int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, const unsigned long *advertising, bool permit_pause_to_mac); - void (*pcs_an_restart)(struct phylink_config *config); - void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, + void (*pcs_an_restart)(struct phylink_pcs *pcs); + void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex); }; #if 0 /* For kernel-doc purposes only. */ /** * pcs_get_state() - Read the current inband link state from the hardware - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * @state: a pointer to a &struct phylink_link_state. * * Read the current inband link state from the MAC PCS, reporting the @@ -357,12 +372,12 @@ struct phylink_pcs_ops { * When present, this overrides mac_pcs_get_state() in &struct * phylink_mac_ops. */ -void pcs_get_state(struct phylink_config *config, +void pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state); /** * pcs_config() - Configure the PCS mode and advertisement - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. * @interface: interface mode to be used * @advertising: adertisement ethtool link mode mask @@ -382,21 +397,21 @@ void pcs_get_state(struct phylink_config *config, * * For most 10GBASE-R, there is no advertisement. */ -int (*pcs_config)(struct phylink_config *config, unsigned int mode, - phy_interface_t interface, const unsigned long *advertising); +int pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, const unsigned long *advertising); /** * pcs_an_restart() - restart 802.3z BaseX autonegotiation - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * * When PCS ops are present, this overrides mac_an_restart() in &struct * phylink_mac_ops. */ -void (*pcs_an_restart)(struct phylink_config *config); +void pcs_an_restart(struct phylink_pcs *pcs); /** * pcs_link_up() - program the PCS for the resolved link configuration - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode * @speed: link speed @@ -407,14 +422,14 @@ void (*pcs_an_restart)(struct phylink_config *config); * mode without in-band AN needs to be manually configured for the link * and duplex setting. Otherwise, this should be a no-op. */ -void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, - phy_interface_t interface, int speed, int duplex); +void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex); #endif struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, phy_interface_t iface, const struct phylink_mac_ops *mac_ops); -void phylink_add_pcs(struct phylink *, const struct phylink_pcs_ops *ops); +void phylink_set_pcs(struct phylink *, struct phylink_pcs *pcs); void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); -- cgit v1.2.3 From 93eaceb0fcf87b7f70924f9da3ec27e3c73be53d Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:51 +0100 Subject: net: phylink: add interface to configure clause 22 PCS PHY Add an interface to configure the advertisement for a clause 22 PCS PHY, and set the AN enable flag in the BMCR appropriately. Reviewed-by: Ioana Ciornei Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 37 +++++++++++++++++++++++++++++++++++++ include/linux/phylink.h | 3 +++ 2 files changed, 40 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b57cd2142786..32b4bd6a5b55 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -2442,6 +2442,43 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, } EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_set_advertisement); +/** + * phylink_mii_c22_pcs_config() - configure clause 22 PCS + * @pcs: a pointer to a &struct mdio_device. + * @mode: link autonegotiation mode + * @interface: the PHY interface mode being configured + * @advertising: the ethtool advertisement mask + * + * Configure a Clause 22 PCS PHY with the appropriate negotiation + * parameters for the @mode, @interface and @advertising parameters. + * Returns negative error number on failure, zero if the advertisement + * has not changed, or positive if there is a change. + */ +int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising) +{ + bool changed; + u16 bmcr; + int ret; + + ret = phylink_mii_c22_pcs_set_advertisement(pcs, interface, + advertising); + if (ret < 0) + return ret; + + changed = ret > 0; + + bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0; + ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR, + BMCR_ANENABLE, bmcr); + if (ret < 0) + return ret; + + return changed ? 1 : 0; +} +EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config); + /** * phylink_mii_c22_pcs_an_restart() - restart 802.3z autonegotiation * @pcs: a pointer to a &struct mdio_device. diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 057f78263a46..1aad2aea4610 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -478,6 +478,9 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, phy_interface_t interface, const unsigned long *advertising); +int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising); void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs); void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs, -- cgit v1.2.3 From ab673b987488c4fab7a0bc4824a48211f9d910e3 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 21 Jul 2020 15:59:19 -0700 Subject: fscrypt: use smp_load_acquire() for ->i_crypt_info Normally smp_store_release() or cmpxchg_release() is paired with smp_load_acquire(). Sometimes smp_load_acquire() can be replaced with the more lightweight READ_ONCE(). However, for this to be safe, all the published memory must only be accessed in a way that involves the pointer itself. This may not be the case if allocating the object also involves initializing a static or global variable, for example. fscrypt_info includes various sub-objects which are internal to and are allocated by other kernel subsystems such as keyrings and crypto. So by using READ_ONCE() for ->i_crypt_info, we're relying on internal implementation details of these other kernel subsystems. Remove this fragile assumption by using smp_load_acquire() instead. (Note: I haven't seen any real-world problems here. This change is just fixing the code to be guaranteed correct and less fragile.) Fixes: e37a784d8b6a ("fscrypt: use READ_ONCE() to access ->i_crypt_info") Link: https://lore.kernel.org/r/20200721225920.114347-5-ebiggers@kernel.org Signed-off-by: Eric Biggers --- fs/crypto/keysetup.c | 12 +++++++++++- fs/crypto/policy.c | 4 ++-- include/linux/fscrypt.h | 29 ++++++++++++++++++++++++----- 3 files changed, 37 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 7f85fc645602..fea6226afc2b 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -518,7 +518,17 @@ int fscrypt_get_encryption_info(struct inode *inode) if (res) goto out; + /* + * Multiple tasks may race to set ->i_crypt_info, so use + * cmpxchg_release(). This pairs with the smp_load_acquire() in + * fscrypt_get_info(). I.e., here we publish ->i_crypt_info with a + * RELEASE barrier so that other tasks can ACQUIRE it. + */ if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) { + /* + * We won the race and set ->i_crypt_info to our crypt_info. + * Now link it into the master key's inode list. + */ if (master_key) { struct fscrypt_master_key *mk = master_key->payload.data[0]; @@ -589,7 +599,7 @@ EXPORT_SYMBOL(fscrypt_free_inode); */ int fscrypt_drop_inode(struct inode *inode) { - const struct fscrypt_info *ci = READ_ONCE(inode->i_crypt_info); + const struct fscrypt_info *ci = fscrypt_get_info(inode); const struct fscrypt_master_key *mk; /* diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 8e667aadf271..2d73fd39ad96 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -352,7 +352,7 @@ static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy) union fscrypt_context ctx; int ret; - ci = READ_ONCE(inode->i_crypt_info); + ci = fscrypt_get_info(inode); if (ci) { /* key available, use the cached policy */ *policy = ci->ci_policy; @@ -641,7 +641,7 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, if (res < 0) return res; - ci = READ_ONCE(parent->i_crypt_info); + ci = fscrypt_get_info(parent); if (ci == NULL) return -ENOKEY; diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index bb257411365f..991ff8575d0e 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -74,10 +74,15 @@ struct fscrypt_operations { struct request_queue **devs); }; -static inline bool fscrypt_has_encryption_key(const struct inode *inode) +static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) { - /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */ - return READ_ONCE(inode->i_crypt_info) != NULL; + /* + * Pairs with the cmpxchg_release() in fscrypt_get_encryption_info(). + * I.e., another task may publish ->i_crypt_info concurrently, executing + * a RELEASE barrier. We need to use smp_load_acquire() here to safely + * ACQUIRE the memory the other task published. + */ + return smp_load_acquire(&inode->i_crypt_info); } /** @@ -234,9 +239,9 @@ static inline void fscrypt_set_ops(struct super_block *sb, } #else /* !CONFIG_FS_ENCRYPTION */ -static inline bool fscrypt_has_encryption_key(const struct inode *inode) +static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) { - return false; + return NULL; } static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) @@ -619,6 +624,20 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) !__fscrypt_inode_uses_inline_crypto(inode); } +/** + * fscrypt_has_encryption_key() - check whether an inode has had its key set up + * @inode: the inode to check + * + * Return: %true if the inode has had its encryption key set up, else %false. + * + * Usually this should be preceded by fscrypt_get_encryption_info() to try to + * set up the key first. + */ +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return fscrypt_get_info(inode) != NULL; +} + /** * fscrypt_require_key() - require an inode's encryption key * @inode: the inode we need the key for -- cgit v1.2.3 From f3db0bed458314a835ccef5ccb130270c5b2cf04 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 21 Jul 2020 15:59:20 -0700 Subject: fs-verity: use smp_load_acquire() for ->i_verity_info Normally smp_store_release() or cmpxchg_release() is paired with smp_load_acquire(). Sometimes smp_load_acquire() can be replaced with the more lightweight READ_ONCE(). However, for this to be safe, all the published memory must only be accessed in a way that involves the pointer itself. This may not be the case if allocating the object also involves initializing a static or global variable, for example. fsverity_info::tree_params.hash_alg->tfm is a crypto_ahash object that's internal to and is allocated by the crypto subsystem. So by using READ_ONCE() for ->i_verity_info, we're relying on internal implementation details of the crypto subsystem. Remove this fragile assumption by using smp_load_acquire() instead. Also fix the cmpxchg logic to correctly execute an ACQUIRE barrier when losing the cmpxchg race, since cmpxchg doesn't guarantee a memory barrier on failure. (Note: I haven't seen any real-world problems here. This change is just fixing the code to be guaranteed correct and less fragile.) Fixes: fd2d1acfcadf ("fs-verity: add the hook for file ->open()") Link: https://lore.kernel.org/r/20200721225920.114347-6-ebiggers@kernel.org Signed-off-by: Eric Biggers --- fs/verity/open.c | 15 ++++++++++++--- include/linux/fsverity.h | 9 +++++++-- 2 files changed, 19 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/verity/open.c b/fs/verity/open.c index d007db0c9304..bfe0280c14e4 100644 --- a/fs/verity/open.c +++ b/fs/verity/open.c @@ -221,11 +221,20 @@ out: void fsverity_set_info(struct inode *inode, struct fsverity_info *vi) { /* - * Multiple processes may race to set ->i_verity_info, so use cmpxchg. - * This pairs with the READ_ONCE() in fsverity_get_info(). + * Multiple tasks may race to set ->i_verity_info, so use + * cmpxchg_release(). This pairs with the smp_load_acquire() in + * fsverity_get_info(). I.e., here we publish ->i_verity_info with a + * RELEASE barrier so that other tasks can ACQUIRE it. */ - if (cmpxchg(&inode->i_verity_info, NULL, vi) != NULL) + if (cmpxchg_release(&inode->i_verity_info, NULL, vi) != NULL) { + /* Lost the race, so free the fsverity_info we allocated. */ fsverity_free_info(vi); + /* + * Afterwards, the caller may access ->i_verity_info directly, + * so make sure to ACQUIRE the winning fsverity_info. + */ + (void)fsverity_get_info(inode); + } } void fsverity_free_info(struct fsverity_info *vi) diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index 78201a6d35f6..c1144a450392 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -115,8 +115,13 @@ struct fsverity_operations { static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) { - /* pairs with the cmpxchg() in fsverity_set_info() */ - return READ_ONCE(inode->i_verity_info); + /* + * Pairs with the cmpxchg_release() in fsverity_set_info(). + * I.e., another task may publish ->i_verity_info concurrently, + * executing a RELEASE barrier. We need to use smp_load_acquire() here + * to safely ACQUIRE the memory the other task published. + */ + return smp_load_acquire(&inode->i_verity_info); } /* enable.c */ -- cgit v1.2.3 From 336ce1c93293e1e606fbc557587b1a1f8630cd5c Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 21 Jul 2020 19:53:53 +0300 Subject: devlink: Add comment for devlink instance lock Add comment to describe the purpose of devlink instance lock. Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/devlink.h b/include/net/devlink.h index 913e8679ae35..19d990c8edcc 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -40,7 +40,9 @@ struct devlink { struct xarray snapshot_ids; struct device *dev; possible_net_t _net; - struct mutex lock; + struct mutex lock; /* Serializes access to devlink instance specific objects such as + * port, sb, dpipe, resource, params, region, traps and more. + */ u8 reload_failed:1, reload_enabled:1, registered:1; -- cgit v1.2.3 From 35dfb013149f74c2be1ff9c78f14e6a3cd1539d1 Mon Sep 17 00:00:00 2001 From: Andrew Sy Kim Date: Wed, 8 Jul 2020 12:16:38 -0400 Subject: ipvs: queue delayed work to expire no destination connections if expire_nodest_conn=1 When expire_nodest_conn=1 and a destination is deleted, IPVS does not expire the existing connections until the next matching incoming packet. If there are many connection entries from a single client to a single destination, many packets may get dropped before all the connections are expired (more likely with lots of UDP traffic). An optimization can be made where upon deletion of a destination, IPVS queues up delayed work to immediately expire any connections with a deleted destination. This ensures any reused source ports from a client (within the IPVS timeouts) are scheduled to new real servers instead of silently dropped. Signed-off-by: Andrew Sy Kim Signed-off-by: Julian Anastasov Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 29 +++++++++++++++++++++++++ net/netfilter/ipvs/ip_vs_conn.c | 39 ++++++++++++++++++++++++++++++++++ net/netfilter/ipvs/ip_vs_core.c | 47 ++++++++++++++++++----------------------- net/netfilter/ipvs/ip_vs_ctl.c | 22 +++++++++++++++++++ 4 files changed, 110 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 011f407b76fe..9a59a33787cb 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -14,6 +14,7 @@ #include /* for struct rwlock_t */ #include /* for struct atomic_t */ #include /* for struct refcount_t */ +#include #include #include @@ -886,6 +887,8 @@ struct netns_ipvs { atomic_t conn_out_counter; #ifdef CONFIG_SYSCTL + /* delayed work for expiring no dest connections */ + struct delayed_work expire_nodest_conn_work; /* 1/rate drop and drop-entry variables */ struct delayed_work defense_work; /* Work handler */ int drop_rate; @@ -1051,6 +1054,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) return ipvs->sysctl_conn_reuse_mode; } +static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_expire_nodest_conn; +} + static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) { return ipvs->sysctl_schedule_icmp; @@ -1138,6 +1146,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) return 1; } +static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) +{ + return 0; +} + static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) { return 0; @@ -1507,6 +1520,22 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs) static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } #endif +#ifdef CONFIG_SYSCTL +/* Enqueue delayed work for expiring no dest connections + * Only run when sysctl_expire_nodest=1 + */ +static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) +{ + if (sysctl_expire_nodest_conn(ipvs)) + queue_delayed_work(system_long_wq, + &ipvs->expire_nodest_conn_work, 1); +} + +void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs); +#else +static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) {} +#endif + #define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \ IP_VS_CONN_F_FWD_MASK) diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index b3921ae92740..a90b8eac16ac 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1389,6 +1389,45 @@ flush_again: goto flush_again; } } + +#ifdef CONFIG_SYSCTL +void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs) +{ + int idx; + struct ip_vs_conn *cp, *cp_c; + struct ip_vs_dest *dest; + + rcu_read_lock(); + for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { + hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { + if (cp->ipvs != ipvs) + continue; + + dest = cp->dest; + if (!dest || (dest->flags & IP_VS_DEST_F_AVAILABLE)) + continue; + + if (atomic_read(&cp->n_control)) + continue; + + cp_c = cp->control; + IP_VS_DBG(4, "del connection\n"); + ip_vs_conn_del(cp); + if (cp_c && !atomic_read(&cp_c->n_control)) { + IP_VS_DBG(4, "del controlling connection\n"); + ip_vs_conn_del(cp_c); + } + } + cond_resched_rcu(); + + /* netns clean up started, abort delayed work */ + if (!ipvs->enable) + break; + } + rcu_read_unlock(); +} +#endif + /* * per netns init and exit */ diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index b4a6b7662f3f..e3668a6e54e4 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -694,16 +694,10 @@ static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) return ipvs->sysctl_nat_icmp_send; } -static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) -{ - return ipvs->sysctl_expire_nodest_conn; -} - #else static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; } static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; } -static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; } #endif @@ -2097,36 +2091,35 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int } } - if (unlikely(!cp)) { - int v; - - if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph)) - return v; - } - - IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet"); - /* Check the server status */ - if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { + if (cp && cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { /* the destination server is not available */ + if (sysctl_expire_nodest_conn(ipvs)) { + bool old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); - __u32 flags = cp->flags; - - /* when timer already started, silently drop the packet.*/ - if (timer_pending(&cp->timer)) - __ip_vs_conn_put(cp); - else - ip_vs_conn_put(cp); + if (!old_ct) + cp->flags &= ~IP_VS_CONN_F_NFCT; - if (sysctl_expire_nodest_conn(ipvs) && - !(flags & IP_VS_CONN_F_ONE_PACKET)) { - /* try to expire the connection immediately */ ip_vs_conn_expire_now(cp); + __ip_vs_conn_put(cp); + if (old_ct) + return NF_DROP; + cp = NULL; + } else { + __ip_vs_conn_put(cp); + return NF_DROP; } + } - return NF_DROP; + if (unlikely(!cp)) { + int v; + + if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph)) + return v; } + IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet"); + ip_vs_in_stats(cp, skb); ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); if (cp->packet_xmit) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 4af83f466dfc..f984d2c881ff 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -210,6 +210,17 @@ static void update_defense_level(struct netns_ipvs *ipvs) local_bh_enable(); } +/* Handler for delayed work for expiring no + * destination connections + */ +static void expire_nodest_conn_handler(struct work_struct *work) +{ + struct netns_ipvs *ipvs; + + ipvs = container_of(work, struct netns_ipvs, + expire_nodest_conn_work.work); + ip_vs_expire_nodest_conn_flush(ipvs); +} /* * Timer for checking the defense @@ -1164,6 +1175,12 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest, list_add(&dest->t_list, &ipvs->dest_trash); dest->idle_start = 0; spin_unlock_bh(&ipvs->dest_trash_lock); + + /* Queue up delayed work to expire all no destination connections. + * No-op when CONFIG_SYSCTL is disabled. + */ + if (!cleanup) + ip_vs_enqueue_expire_nodest_conns(ipvs); } @@ -4086,6 +4103,10 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) queue_delayed_work(system_long_wq, &ipvs->defense_work, DEFENSE_TIMER_PERIOD); + /* Init delayed work for expiring no dest conn */ + INIT_DELAYED_WORK(&ipvs->expire_nodest_conn_work, + expire_nodest_conn_handler); + return 0; } @@ -4093,6 +4114,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs) { struct net *net = ipvs->net; + cancel_delayed_work_sync(&ipvs->expire_nodest_conn_work); cancel_delayed_work_sync(&ipvs->defense_work); cancel_work_sync(&ipvs->defense_work.work); unregister_net_sysctl_table(ipvs->sysctl_hdr); -- cgit v1.2.3 From 4787dd582dbde0b7f29eb3dbe59df3da1b350925 Mon Sep 17 00:00:00 2001 From: Martin Varghese Date: Fri, 17 Jul 2020 08:05:12 +0530 Subject: bareudp: Reverted support to enable & disable rx metadata collection The commit fe80536acf83 ("bareudp: Added attribute to enable & disable rx metadata collection") breaks the the original(5.7) default behavior of bareudp module to collect RX metadadata at the receive. It was added to avoid the crash at the kernel neighbour subsytem when packet with metadata from bareudp is processed. But it is no more needed as the commit 394de110a733 ("net: Added pointer check for dst->ops->neigh_lookup in dst_neigh_lookup_skb") solves this crash. Fixes: fe80536acf83 ("bareudp: Added attribute to enable & disable rx metadata collection") Signed-off-by: Martin Varghese Acked-by: Guillaume Nault Signed-off-by: David S. Miller --- Documentation/networking/bareudp.rst | 6 ++---- drivers/net/bareudp.c | 21 +++++---------------- include/net/bareudp.h | 1 - include/uapi/linux/if_link.h | 1 - 4 files changed, 7 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst index 0e00636d8d74..465a8b251bfe 100644 --- a/Documentation/networking/bareudp.rst +++ b/Documentation/networking/bareudp.rst @@ -48,7 +48,5 @@ enabled. The bareudp device could be used along with OVS or flower filter in TC. The OVS or TC flower layer must set the tunnel information in SKB dst field before sending packet buffer to the bareudp device for transmission. On reception the -bareudp device decapsulates the udp header and passes the inner packet to the -network stack. If RX_COLLECT_METADATA flag is enabled in the device the tunnel -information will be stored in the SKB dst field before the packet buffer is -passed to the network stack. +bareudp device extracts and stores the tunnel information in SKB dst field before +passing the packet buffer to the network stack. diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 108a8cafc4f8..44eb2b1d0416 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -46,7 +46,6 @@ struct bareudp_dev { __be16 port; u16 sport_min; bool multi_proto_mode; - bool rx_collect_metadata; struct socket __rcu *sock; struct list_head next; /* bareudp node on namespace list */ struct gro_cells gro_cells; @@ -126,14 +125,12 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) bareudp->dev->stats.rx_dropped++; goto drop; } - if (bareudp->rx_collect_metadata) { - tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); - if (!tun_dst) { - bareudp->dev->stats.rx_dropped++; - goto drop; - } - skb_dst_set(skb, &tun_dst->dst); + tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); + if (!tun_dst) { + bareudp->dev->stats.rx_dropped++; + goto drop; } + skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; oiph = skb_network_header(skb); skb_reset_network_header(skb); @@ -577,9 +574,6 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) conf->multi_proto_mode = true; - if (data[IFLA_BAREUDP_RX_COLLECT_METADATA]) - conf->rx_collect_metadata = true; - return 0; } @@ -617,7 +611,6 @@ static int bareudp_configure(struct net *net, struct net_device *dev, bareudp->ethertype = conf->ethertype; bareudp->sport_min = conf->sport_min; bareudp->multi_proto_mode = conf->multi_proto_mode; - bareudp->rx_collect_metadata = conf->rx_collect_metadata; err = register_netdevice(dev); if (err) @@ -676,7 +669,6 @@ static size_t bareudp_get_size(const struct net_device *dev) nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */ - nla_total_size(0) + /* IFLA_BAREUDP_RX_COLLECT_METADATA */ 0; } @@ -693,9 +685,6 @@ static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) if (bareudp->multi_proto_mode && nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE)) goto nla_put_failure; - if (bareudp->rx_collect_metadata && - nla_put_flag(skb, IFLA_BAREUDP_RX_COLLECT_METADATA)) - goto nla_put_failure; return 0; diff --git a/include/net/bareudp.h b/include/net/bareudp.h index 3dd5f9a8d01c..dc65a0d71d9b 100644 --- a/include/net/bareudp.h +++ b/include/net/bareudp.h @@ -12,7 +12,6 @@ struct bareudp_conf { __be16 port; u16 sport_min; bool multi_proto_mode; - bool rx_collect_metadata; }; struct net_device *bareudp_dev_create(struct net *net, const char *name, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 26842ffd0501..af8f31987526 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -601,7 +601,6 @@ enum { IFLA_BAREUDP_ETHERTYPE, IFLA_BAREUDP_SRCPORT_MIN, IFLA_BAREUDP_MULTIPROTO_MODE, - IFLA_BAREUDP_RX_COLLECT_METADATA, __IFLA_BAREUDP_MAX }; -- cgit v1.2.3 From c333f9495c451d958c6f4a41e5de2d8f80f79496 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:37:13 -0700 Subject: raid: md_p.h: drop duplicated word in a comment Drop the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Cc: Song Liu Cc: linux-raid@vger.kernel.org Signed-off-by: Song Liu --- include/uapi/linux/raid/md_p.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index 1f2d8c81f0e0..e5a98a16f9b0 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -123,7 +123,7 @@ typedef struct mdp_device_descriptor_s { /* * Notes: - * - if an array is being reshaped (restriped) in order to change the + * - if an array is being reshaped (restriped) in order to change * the number of active devices in the array, 'raid_disks' will be * the larger of the old and new numbers. 'delta_disks' will * be the "new - old". So if +ve, raid_disks is the new value, and -- cgit v1.2.3 From 58877d347b58c9e971112df5eb311c13bb0acb28 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 2 Jul 2020 14:52:11 +0200 Subject: sched: Better document ttwu() Dave hit the problem fixed by commit: b6e13e85829f ("sched/core: Fix ttwu() race") and failed to understand much of the code involved. Per his request a few comments to (hopefully) clarify things. Requested-by: Dave Chinner Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200702125211.GQ4800@hirez.programming.kicks-ass.net --- include/linux/sched.h | 12 ++-- kernel/sched/core.c | 188 +++++++++++++++++++++++++++++++++++++++++++------- kernel/sched/sched.h | 10 +++ 3 files changed, 179 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 12b10ce51a08..5033813fecd5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -154,24 +154,24 @@ struct task_group; * * for (;;) { * set_current_state(TASK_UNINTERRUPTIBLE); - * if (!need_sleep) - * break; + * if (CONDITION) + * break; * * schedule(); * } * __set_current_state(TASK_RUNNING); * * If the caller does not need such serialisation (because, for instance, the - * condition test and condition change and wakeup are under the same lock) then + * CONDITION test and condition change and wakeup are under the same lock) then * use __set_current_state(). * * The above is typically ordered against the wakeup, which does: * - * need_sleep = false; + * CONDITION = 1; * wake_up_state(p, TASK_UNINTERRUPTIBLE); * - * where wake_up_state() executes a full memory barrier before accessing the - * task state. + * where wake_up_state()/try_to_wake_up() executes a full memory barrier before + * accessing p->state. * * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 08d02ce26b71..12db8fbd9c97 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -79,6 +79,100 @@ __read_mostly int scheduler_running; */ int sysctl_sched_rt_runtime = 950000; + +/* + * Serialization rules: + * + * Lock order: + * + * p->pi_lock + * rq->lock + * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) + * + * rq1->lock + * rq2->lock where: rq1 < rq2 + * + * Regular state: + * + * Normal scheduling state is serialized by rq->lock. __schedule() takes the + * local CPU's rq->lock, it optionally removes the task from the runqueue and + * always looks at the local rq data structures to find the most elegible task + * to run next. + * + * Task enqueue is also under rq->lock, possibly taken from another CPU. + * Wakeups from another LLC domain might use an IPI to transfer the enqueue to + * the local CPU to avoid bouncing the runqueue state around [ see + * ttwu_queue_wakelist() ] + * + * Task wakeup, specifically wakeups that involve migration, are horribly + * complicated to avoid having to take two rq->locks. + * + * Special state: + * + * System-calls and anything external will use task_rq_lock() which acquires + * both p->pi_lock and rq->lock. As a consequence the state they change is + * stable while holding either lock: + * + * - sched_setaffinity()/ + * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed + * - set_user_nice(): p->se.load, p->*prio + * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, + * p->se.load, p->rt_priority, + * p->dl.dl_{runtime, deadline, period, flags, bw, density} + * - sched_setnuma(): p->numa_preferred_nid + * - sched_move_task()/ + * cpu_cgroup_fork(): p->sched_task_group + * - uclamp_update_active() p->uclamp* + * + * p->state <- TASK_*: + * + * is changed locklessly using set_current_state(), __set_current_state() or + * set_special_state(), see their respective comments, or by + * try_to_wake_up(). This latter uses p->pi_lock to serialize against + * concurrent self. + * + * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: + * + * is set by activate_task() and cleared by deactivate_task(), under + * rq->lock. Non-zero indicates the task is runnable, the special + * ON_RQ_MIGRATING state is used for migration without holding both + * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). + * + * p->on_cpu <- { 0, 1 }: + * + * is set by prepare_task() and cleared by finish_task() such that it will be + * set before p is scheduled-in and cleared after p is scheduled-out, both + * under rq->lock. Non-zero indicates the task is running on its CPU. + * + * [ The astute reader will observe that it is possible for two tasks on one + * CPU to have ->on_cpu = 1 at the same time. ] + * + * task_cpu(p): is changed by set_task_cpu(), the rules are: + * + * - Don't call set_task_cpu() on a blocked task: + * + * We don't care what CPU we're not running on, this simplifies hotplug, + * the CPU assignment of blocked tasks isn't required to be valid. + * + * - for try_to_wake_up(), called under p->pi_lock: + * + * This allows try_to_wake_up() to only take one rq->lock, see its comment. + * + * - for migration called under rq->lock: + * [ see task_on_rq_migrating() in task_rq_lock() ] + * + * o move_queued_task() + * o detach_task() + * + * - for migration called under double_rq_lock(): + * + * o __migrate_swap_task() + * o push_rt_task() / pull_rt_task() + * o push_dl_task() / pull_dl_task() + * o dl_task_offline_migration() + * + */ + /* * __task_rq_lock - lock the rq @p resides on. */ @@ -1543,8 +1637,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, { lockdep_assert_held(&rq->lock); - WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); - dequeue_task(rq, p, DEQUEUE_NOCLOCK); + deactivate_task(rq, p, DEQUEUE_NOCLOCK); set_task_cpu(p, new_cpu); rq_unlock(rq, rf); @@ -1552,8 +1645,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, rq_lock(rq, rf); BUG_ON(task_cpu(p) != new_cpu); - enqueue_task(rq, p, 0); - p->on_rq = TASK_ON_RQ_QUEUED; + activate_task(rq, p, 0); check_preempt_curr(rq, p, 0); return rq; @@ -2318,12 +2410,31 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, } /* - * Called in case the task @p isn't fully descheduled from its runqueue, - * in this case we must do a remote wakeup. Its a 'light' wakeup though, - * since all we need to do is flip p->state to TASK_RUNNING, since - * the task is still ->on_rq. + * Consider @p being inside a wait loop: + * + * for (;;) { + * set_current_state(TASK_UNINTERRUPTIBLE); + * + * if (CONDITION) + * break; + * + * schedule(); + * } + * __set_current_state(TASK_RUNNING); + * + * between set_current_state() and schedule(). In this case @p is still + * runnable, so all that needs doing is change p->state back to TASK_RUNNING in + * an atomic manner. + * + * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq + * then schedule() must still happen and p->state can be changed to + * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we + * need to do a full wakeup with enqueue. + * + * Returns: %true when the wakeup is done, + * %false otherwise. */ -static int ttwu_remote(struct task_struct *p, int wake_flags) +static int ttwu_runnable(struct task_struct *p, int wake_flags) { struct rq_flags rf; struct rq *rq; @@ -2464,6 +2575,14 @@ static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) return false; } + +#else /* !CONFIG_SMP */ + +static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) +{ + return false; +} + #endif /* CONFIG_SMP */ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) @@ -2471,10 +2590,8 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) struct rq *rq = cpu_rq(cpu); struct rq_flags rf; -#if defined(CONFIG_SMP) if (ttwu_queue_wakelist(p, cpu, wake_flags)) return; -#endif rq_lock(rq, &rf); update_rq_clock(rq); @@ -2530,8 +2647,8 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) * migration. However the means are completely different as there is no lock * chain to provide order. Instead we do: * - * 1) smp_store_release(X->on_cpu, 0) - * 2) smp_cond_load_acquire(!X->on_cpu) + * 1) smp_store_release(X->on_cpu, 0) -- finish_task() + * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() * * Example: * @@ -2571,15 +2688,33 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) * @state: the mask of task states that can be woken * @wake_flags: wake modifier flags (WF_*) * - * If (@state & @p->state) @p->state = TASK_RUNNING. + * Conceptually does: + * + * If (@state & @p->state) @p->state = TASK_RUNNING. * * If the task was not queued/runnable, also place it back on a runqueue. * - * Atomic against schedule() which would dequeue a task, also see - * set_current_state(). + * This function is atomic against schedule() which would dequeue the task. + * + * It issues a full memory barrier before accessing @p->state, see the comment + * with set_current_state(). + * + * Uses p->pi_lock to serialize against concurrent wake-ups. * - * This function executes a full memory barrier before accessing the task - * state; see set_current_state(). + * Relies on p->pi_lock stabilizing: + * - p->sched_class + * - p->cpus_ptr + * - p->sched_task_group + * in order to do migration, see its use of select_task_rq()/set_task_cpu(). + * + * Tries really hard to only take one task_rq(p)->lock for performance. + * Takes rq->lock in: + * - ttwu_runnable() -- old rq, unavoidable, see comment there; + * - ttwu_queue() -- new rq, for enqueue of the task; + * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. + * + * As a consequence we race really badly with just about everything. See the + * many memory barriers and their comments for details. * * Return: %true if @p->state changes (an actual wakeup was done), * %false otherwise. @@ -2595,7 +2730,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) /* * We're waking current, this means 'p->on_rq' and 'task_cpu(p) * == smp_processor_id()'. Together this means we can special - * case the whole 'p->on_rq && ttwu_remote()' case below + * case the whole 'p->on_rq && ttwu_runnable()' case below * without taking any locks. * * In particular: @@ -2616,8 +2751,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) /* * If we are going to wake up a thread waiting for CONDITION we * need to ensure that CONDITION=1 done by the caller can not be - * reordered with p->state check below. This pairs with mb() in - * set_current_state() the waiting thread does. + * reordered with p->state check below. This pairs with smp_store_mb() + * in set_current_state() that the waiting thread does. */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); @@ -2652,7 +2787,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). */ smp_rmb(); - if (READ_ONCE(p->on_rq) && ttwu_remote(p, wake_flags)) + if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) goto unlock; if (p->in_iowait) { @@ -3222,8 +3357,10 @@ static inline void prepare_task(struct task_struct *next) /* * Claim the task as running, we do this before switching to it * such that any running task will have this set. + * + * See the ttwu() WF_ON_CPU case and its ordering comment. */ - next->on_cpu = 1; + WRITE_ONCE(next->on_cpu, 1); #endif } @@ -3231,8 +3368,9 @@ static inline void finish_task(struct task_struct *prev) { #ifdef CONFIG_SMP /* - * After ->on_cpu is cleared, the task can be moved to a different CPU. - * We must ensure this doesn't happen until the switch is completely + * This must be the very last reference to @prev from this CPU. After + * p->on_cpu is cleared, the task can be moved to a different CPU. We + * must ensure this doesn't happen until the switch is completely * finished. * * In particular, the load of prev->state in finish_task_switch() must diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 65b72e0487bf..9f33c77258ea 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1203,6 +1203,16 @@ struct rq_flags { #endif }; +/* + * Lockdep annotation that avoids accidental unlocks; it's like a + * sticky/continuous lockdep_assert_held(). + * + * This avoids code that has access to 'struct rq *rq' (basically everything in + * the scheduler) from accidentally unlocking the rq if they do not also have a + * copy of the (on-stack) 'struct rq_flags rf'. + * + * Also see Documentation/locking/lockdep-design.rst. + */ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) { rf->cookie = lockdep_pin_lock(&rq->lock); -- cgit v1.2.3 From 46132e3ac58cb2ee48051ed80bffc0070ad59b2e Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 1 Jul 2020 14:34:18 -0400 Subject: sched: nohz: stop passing around unused "ticks" parameter. The "ticks" parameter was added in commit 0f004f5a696a ("sched: Cure more NO_HZ load average woes") since calc_global_nohz() was called and needed the "ticks" argument. But in commit c308b56b5398 ("sched: Fix nohz load accounting -- again!") it became unused as the function calc_global_nohz() dropped using "ticks". Fixes: c308b56b5398 ("sched: Fix nohz load accounting -- again!") Signed-off-by: Paul Gortmaker Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/1593628458-32290-1-git-send-email-paul.gortmaker@windriver.com --- include/linux/sched/loadavg.h | 2 +- kernel/sched/loadavg.c | 2 +- kernel/time/timekeeping.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h index 4859bea47a7b..83ec54b65e79 100644 --- a/include/linux/sched/loadavg.h +++ b/include/linux/sched/loadavg.h @@ -43,6 +43,6 @@ extern unsigned long calc_load_n(unsigned long load, unsigned long exp, #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) -extern void calc_global_load(unsigned long ticks); +extern void calc_global_load(void); #endif /* _LINUX_SCHED_LOADAVG_H */ diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index de22da666ac7..d2a655643a02 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -347,7 +347,7 @@ static inline void calc_global_nohz(void) { } * * Called from the global timer code. */ -void calc_global_load(unsigned long ticks) +void calc_global_load(void) { unsigned long sample_window; long active, delta; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index d20d489841c8..63a632f9896c 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2193,7 +2193,7 @@ EXPORT_SYMBOL(ktime_get_coarse_ts64); void do_timer(unsigned long ticks) { jiffies_64 += ticks; - calc_global_load(ticks); + calc_global_load(); } /** -- cgit v1.2.3 From e0078e2eb8620079d988f150ba02a4ce9b5a946a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 18:30:31 -0700 Subject: linux/sched/mm.h: drop duplicated words in comments Drop doubled words "to" and "that". Signed-off-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/927ea8d8-3f6c-9b65-4c2b-63ab4bd59ef1@infradead.org --- include/linux/sched/mm.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index a98604ea76f1..6be66f52a2ad 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -23,7 +23,7 @@ extern struct mm_struct *mm_alloc(void); * will still exist later on and mmget_not_zero() has to be used before * accessing it. * - * This is a preferred way to to pin @mm for a longer/unbounded amount + * This is a preferred way to pin @mm for a longer/unbounded amount * of time. * * Use mmdrop() to release the reference acquired by mmgrab(). @@ -232,7 +232,7 @@ static inline unsigned int memalloc_noio_save(void) * @flags: Flags to restore. * * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. - * Always make sure that that the given flags is the return value from the + * Always make sure that the given flags is the return value from the * pairing memalloc_noio_save call. */ static inline void memalloc_noio_restore(unsigned int flags) @@ -263,7 +263,7 @@ static inline unsigned int memalloc_nofs_save(void) * @flags: Flags to restore. * * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. - * Always make sure that that the given flags is the return value from the + * Always make sure that the given flags is the return value from the * pairing memalloc_nofs_save call. */ static inline void memalloc_nofs_restore(unsigned int flags) -- cgit v1.2.3 From 2705937a0395bd15d515a2a302d26ebc8318c035 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 18:31:38 -0700 Subject: trace/events/sched.h: fix duplicated word Change "It it" to "It is". Signed-off-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/25305c1d-4ee8-e091-d20f-e700ddad49fd@infradead.org --- include/trace/events/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 0d5ff0958d48..fec25b9cfbaf 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -91,7 +91,7 @@ DEFINE_EVENT(sched_wakeup_template, sched_waking, /* * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG. - * It it not always called from the waking context. + * It is not always called from the waking context. */ DEFINE_EVENT(sched_wakeup_template, sched_wakeup, TP_PROTO(struct task_struct *p), -- cgit v1.2.3 From 25980c7a79af42f2daa73e2f475ebf4cbac8253e Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Sun, 12 Jul 2020 17:59:15 +0100 Subject: arch_topology, sched/core: Cleanup thermal pressure definition The following commit: 14533a16c46d ("thermal/cpu-cooling, sched/core: Move the arch_set_thermal_pressure() API to generic scheduler code") moved the definition of arch_set_thermal_pressure() to sched/core.c, but kept its declaration in linux/arch_topology.h. When building e.g. an x86 kernel with CONFIG_SCHED_THERMAL_PRESSURE=y, cpufreq_cooling.c ends up getting the declaration of arch_set_thermal_pressure() from include/linux/arch_topology.h, which is somewhat awkward. On top of this, sched/core.c unconditionally defines o The thermal_pressure percpu variable o arch_set_thermal_pressure() while arch_scale_thermal_pressure() does nothing unless redefined by the architecture. arch_*() functions are meant to be defined by architectures, so revert the aforementioned commit and re-implement it in a way that keeps arch_set_thermal_pressure() architecture-definable, and doesn't define the thermal pressure percpu variable for kernels that don't need it (CONFIG_SCHED_THERMAL_PRESSURE=n). Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200712165917.9168-2-valentin.schneider@arm.com --- arch/arm/include/asm/topology.h | 3 ++- arch/arm64/include/asm/topology.h | 3 ++- drivers/base/arch_topology.c | 11 +++++++++++ include/linux/arch_topology.h | 4 ++-- include/linux/sched/topology.h | 7 +++++++ kernel/sched/core.c | 11 ----------- 6 files changed, 24 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 435aba289fc5..e0593cf095d0 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -16,8 +16,9 @@ /* Enable topology flag updates */ #define arch_update_cpu_topology topology_update_cpu_topology -/* Replace task scheduler's default thermal pressure retrieve API */ +/* Replace task scheduler's default thermal pressure API */ #define arch_scale_thermal_pressure topology_get_thermal_pressure +#define arch_set_thermal_pressure topology_set_thermal_pressure #else diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 0cc835ddfcd1..e042f6527981 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -34,8 +34,9 @@ void topology_scale_freq_tick(void); /* Enable topology flag updates */ #define arch_update_cpu_topology topology_update_cpu_topology -/* Replace task scheduler's default thermal pressure retrieve API */ +/* Replace task scheduler's default thermal pressure API */ #define arch_scale_thermal_pressure topology_get_thermal_pressure +#define arch_set_thermal_pressure topology_set_thermal_pressure #include diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 4d0a0038b476..75f72d684294 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -54,6 +54,17 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) per_cpu(cpu_scale, cpu) = capacity; } +DEFINE_PER_CPU(unsigned long, thermal_pressure); + +void topology_set_thermal_pressure(const struct cpumask *cpus, + unsigned long th_pressure) +{ + int cpu; + + for_each_cpu(cpu, cpus) + WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); +} + static ssize_t cpu_capacity_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 0566cb3314ef..69b1dabe39dc 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -39,8 +39,8 @@ static inline unsigned long topology_get_thermal_pressure(int cpu) return per_cpu(thermal_pressure, cpu); } -void arch_set_thermal_pressure(struct cpumask *cpus, - unsigned long th_pressure); +void topology_set_thermal_pressure(const struct cpumask *cpus, + unsigned long th_pressure); struct cpu_topology { int thread_id; diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index fb11091129b3..764222d637b7 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -232,6 +232,13 @@ unsigned long arch_scale_thermal_pressure(int cpu) } #endif +#ifndef arch_set_thermal_pressure +static __always_inline +void arch_set_thermal_pressure(const struct cpumask *cpus, + unsigned long th_pressure) +{ } +#endif + static inline int task_node(const struct task_struct *p) { return cpu_to_node(task_cpu(p)); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 12db8fbd9c97..bd8e5211d31f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3869,17 +3869,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) return ns; } -DEFINE_PER_CPU(unsigned long, thermal_pressure); - -void arch_set_thermal_pressure(struct cpumask *cpus, - unsigned long th_pressure) -{ - int cpu; - - for_each_cpu(cpu, cpus) - WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); -} - /* * This function gets called by the timer code, with HZ frequency. * We call it with interrupts disabled. -- cgit v1.2.3 From c2127e14c127de2775feefdfb1444e30a129a59f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:30:27 -0700 Subject: perf: : drop a duplicated word Drop the repeated word "the" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200719003027.20798-1-rdunlap@infradead.org --- include/linux/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3b22db08b6fb..0edd257a5916 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -366,7 +366,7 @@ struct pmu { * ->stop() with PERF_EF_UPDATE will read the counter and update * period/count values like ->read() would. * - * ->start() with PERF_EF_RELOAD will reprogram the the counter + * ->start() with PERF_EF_RELOAD will reprogram the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. */ void (*start) (struct perf_event *event, int flags); -- cgit v1.2.3 From df746b3f079c31db7350b282c86e9004fa1a88df Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 21 Jul 2020 16:23:33 -0500 Subject: misc: rtsx: Remove unused pcie_cap There are no more uses of struct rtsx_pcr.pcie_cap. Remove it. Signed-off-by: Bjorn Helgaas Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200721212336.1159079-3-helgaas@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rtsx_pcr.c | 1 - include/linux/rtsx_pci.h | 1 - 2 files changed, 2 deletions(-) (limited to 'include') diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 6d76929f31f5..0c0f1dd6f00f 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1253,7 +1253,6 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK, RTS5228_LDO1_SR_0_5); - pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP); rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); rtsx_pci_enable_bus_int(pcr); diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 27a6ea82aeea..4ff7b221f36e 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -1166,7 +1166,6 @@ struct rtsx_hw_param { struct rtsx_pcr { struct pci_dev *pci; unsigned int id; - int pcie_cap; struct rtsx_cr_option option; struct rtsx_hw_param hw_param; -- cgit v1.2.3 From 22bf3251d7b7da0339f41ec27f2c3d4e0ec02255 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 21 Jul 2020 16:23:34 -0500 Subject: misc: rtsx: Remove rtsx_pci_read/write_config() wrappers rtsx_pci_read_config_dword() and similar wrappers around the PCI config accessors add very little value, and they obscure the fact that often we are accessing standard PCI registers that should be coordinated with the PCI core. Remove the wrappers and use the PCI config accessors directly. No functional change intended. Signed-off-by: Bjorn Helgaas Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200721212336.1159079-4-helgaas@kernel.org [ fixed up some other instances as original patch was based on old tree - gregkh Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rtl8411.c | 8 +++++--- drivers/misc/cardreader/rts5209.c | 5 +++-- drivers/misc/cardreader/rts5227.c | 5 +++-- drivers/misc/cardreader/rts5228.c | 8 +++++--- drivers/misc/cardreader/rts5229.c | 5 +++-- drivers/misc/cardreader/rts5249.c | 12 +++++++----- drivers/misc/cardreader/rts5260.c | 10 ++++++---- drivers/misc/cardreader/rts5261.c | 19 +++++++++++-------- drivers/misc/cardreader/rtsx_pcr.c | 2 +- include/linux/rtsx_pci.h | 12 ------------ 10 files changed, 44 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/drivers/misc/cardreader/rtl8411.c b/drivers/misc/cardreader/rtl8411.c index 489ebe907688..a07674ed0596 100644 --- a/drivers/misc/cardreader/rtl8411.c +++ b/drivers/misc/cardreader/rtl8411.c @@ -37,10 +37,11 @@ static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr) static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 reg1 = 0; u8 reg3 = 0; - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®1); + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®1); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1); if (!rtsx_vendor_setting_valid(reg1)) @@ -52,16 +53,17 @@ static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->card_drive_sel &= 0x3F; pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1); - rtsx_pci_read_config_byte(pcr, PCR_SETTING_REG3, ®3); + pci_read_config_byte(pdev, PCR_SETTING_REG3, ®3); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3); pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3); } static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 reg = 0; - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); if (!rtsx_vendor_setting_valid(reg)) diff --git a/drivers/misc/cardreader/rts5209.c b/drivers/misc/cardreader/rts5209.c index 659056164b21..39a6a7ecc32e 100644 --- a/drivers/misc/cardreader/rts5209.c +++ b/drivers/misc/cardreader/rts5209.c @@ -23,9 +23,10 @@ static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr) static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 reg; - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); if (rts5209_vendor_setting1_valid(reg)) { @@ -34,7 +35,7 @@ static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->aspm_en = rts5209_reg_to_aspm(reg); } - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); if (rts5209_vendor_setting2_valid(reg)) { diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 3a9467aaa435..f5f392ddf3d6 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -56,9 +56,10 @@ static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage) static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 reg; - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); if (!rtsx_vendor_setting_valid(reg)) @@ -69,7 +70,7 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->card_drive_sel &= 0x3F; pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg); - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); if (rtsx_reg_check_reverse_socket(reg)) diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c index 99aff7cd0a93..448929829de4 100644 --- a/drivers/misc/cardreader/rts5228.c +++ b/drivers/misc/cardreader/rts5228.c @@ -60,9 +60,11 @@ static void rts5228_fill_driving(struct rtsx_pcr *pcr, u8 voltage) static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 reg; + /* 0x724~0x727 */ - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); if (!rtsx_vendor_setting_valid(reg)) { @@ -73,7 +75,7 @@ static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->aspm_en = rtsx_reg_to_aspm(reg); /* 0x814~0x817 */ - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); pcr->rtd3_en = rtsx_reg_to_rtd3(reg); @@ -380,7 +382,7 @@ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) u32 lval; struct rtsx_cr_option *option = &pcr->option; - rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval); + pci_read_config_dword(pcr->pci, PCR_ASPM_SETTING_REG1, &lval); if (0 == (lval & 0x0F)) diff --git a/drivers/misc/cardreader/rts5229.c b/drivers/misc/cardreader/rts5229.c index 9f080a32ef50..89e6f124ca5c 100644 --- a/drivers/misc/cardreader/rts5229.c +++ b/drivers/misc/cardreader/rts5229.c @@ -23,9 +23,10 @@ static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr) static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 reg; - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); if (!rtsx_vendor_setting_valid(reg)) @@ -37,7 +38,7 @@ static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->card_drive_sel &= 0x3F; pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg); - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); pcr->sd30_drive_sel_3v3 = map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg)); diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index 6c6c9e95a29f..665472d05993 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -55,9 +55,10 @@ static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage) static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 reg; - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); if (!rtsx_vendor_setting_valid(reg)) { @@ -70,7 +71,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->card_drive_sel &= 0x3F; pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg); - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); if (rtsx_reg_check_reverse_socket(reg)) @@ -93,14 +94,15 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) static void rts5249_init_from_cfg(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; struct rtsx_cr_option *option = &(pcr->option); u32 lval; if (CHK_PCI_PID(pcr, PID_524A)) - rtsx_pci_read_config_dword(pcr, + pci_read_config_dword(pdev, PCR_ASPM_SETTING_REG1, &lval); else - rtsx_pci_read_config_dword(pcr, + pci_read_config_dword(pdev, PCR_ASPM_SETTING_REG2, &lval); if (lval & ASPM_L1_1_EN_MASK) @@ -118,7 +120,7 @@ static void rts5249_init_from_cfg(struct rtsx_pcr *pcr) if (option->ltr_en) { u16 val; - pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); if (val & PCI_EXP_DEVCTL2_LTR_EN) { option->ltr_enabled = true; option->ltr_active = true; diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index 7a9dbb778e84..0e806dd7ad08 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -64,9 +64,10 @@ static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage) static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 reg; - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); if (!rtsx_vendor_setting_valid(reg)) { @@ -79,7 +80,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->card_drive_sel &= 0x3F; pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg); - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); if (rtsx_reg_check_reverse_socket(reg)) @@ -496,10 +497,11 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr) static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; struct rtsx_cr_option *option = &pcr->option; u32 lval; - rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval); + pci_read_config_dword(pdev, PCR_ASPM_SETTING_5260, &lval); if (lval & ASPM_L1_1_EN_MASK) rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); @@ -518,7 +520,7 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) if (option->ltr_en) { u16 val; - pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); if (val & PCI_EXP_DEVCTL2_LTR_EN) { option->ltr_enabled = true; option->ltr_active = true; diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 195822ec858e..4f30637ee4ac 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -59,9 +59,11 @@ static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage) static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 reg; + /* 0x814~0x817 */ - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); if (!rts5261_vendor_setting_valid(reg)) { @@ -76,7 +78,7 @@ static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr) pcr->flags |= PCR_REVERSE_SOCKET; /* 0x724~0x727 */ - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, ®); + pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); pcr->aspm_en = rts5261_reg_to_aspm(reg); @@ -361,6 +363,7 @@ static void rts5261_process_ocp(struct rtsx_pcr *pcr) static int rts5261_init_from_hw(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; int retval; u32 lval, i; u8 valid, efuse_valid, tmp; @@ -386,8 +389,7 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr) pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid); if (efuse_valid == 0) { - retval = rtsx_pci_read_config_dword(pcr, - PCR_SETTING_REG2, &lval); + retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval); if (retval != 0) pcr_dbg(pcr, "read 0x814 DW fail\n"); pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval); @@ -399,9 +401,9 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr) REG_EFUSE_POR, 0); pcr_dbg(pcr, "Disable efuse por!\n"); - rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &lval); + pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval); lval = lval & 0x00FFFFFF; - retval = rtsx_pci_write_config_dword(pcr, PCR_SETTING_REG2, lval); + retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval); if (retval != 0) pcr_dbg(pcr, "write config fail\n"); @@ -410,10 +412,11 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr) static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; u32 lval; struct rtsx_cr_option *option = &pcr->option; - rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval); + pci_read_config_dword(pdev, PCR_ASPM_SETTING_REG1, &lval); if (lval & ASPM_L1_1_EN_MASK) rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); @@ -439,7 +442,7 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) if (option->ltr_en) { u16 val; - pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val); + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val); if (val & PCI_EXP_DEVCTL2_LTR_EN) { option->ltr_enabled = true; option->ltr_active = true; diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 0c0f1dd6f00f..2fc6b938e999 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1350,7 +1350,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_CLKREQ_EN); /* Enter L1 when host tx idle */ - rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B); + pci_write_config_byte(pdev, 0x70F, 0x5B); if (pcr->ops->extra_init_hw) { err = pcr->ops->extra_init_hw(pcr); diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 4ff7b221f36e..b93573c3c5fc 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -99,18 +99,6 @@ #define rtsx_pci_readb(pcr, reg) \ ioread8((pcr)->remap_addr + reg) -#define rtsx_pci_read_config_byte(pcr, where, val) \ - pci_read_config_byte((pcr)->pci, where, val) - -#define rtsx_pci_write_config_byte(pcr, where, val) \ - pci_write_config_byte((pcr)->pci, where, val) - -#define rtsx_pci_read_config_dword(pcr, where, val) \ - pci_read_config_dword((pcr)->pci, where, val) - -#define rtsx_pci_write_config_dword(pcr, where, val) \ - pci_write_config_dword((pcr)->pci, where, val) - #define STATE_TRANS_NONE 0 #define STATE_TRANS_CMD 1 #define STATE_TRANS_BUF 2 -- cgit v1.2.3 From ed86a9877d05b99088a409a7603828b818a433dc Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 21 Jul 2020 16:23:35 -0500 Subject: misc: rtsx: Find L1 PM Substates capability instead of hard-coding Instead of hard-coding the location of the L1 PM Substates capability based on the Device ID, search for it in the extended capabilities list. This works for any device, as long as it implements the L1 PM Substates capability correctly, so it doesn't require maintenance as new devices are added. No functional change intended. Signed-off-by: Bjorn Helgaas Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200721212336.1159079-5-helgaas@kernel.org [ minor addition due to differences in my tree - gregkh] Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rts5228.c | 7 ++++++- drivers/misc/cardreader/rts5249.c | 12 ++++++------ drivers/misc/cardreader/rts5260.c | 7 ++++++- drivers/misc/cardreader/rts5261.c | 7 ++++++- include/linux/rtsx_pci.h | 4 ---- 5 files changed, 24 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c index 448929829de4..f5716c631104 100644 --- a/drivers/misc/cardreader/rts5228.c +++ b/drivers/misc/cardreader/rts5228.c @@ -379,11 +379,16 @@ static void rts5228_process_ocp(struct rtsx_pcr *pcr) static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) { + struct pci_dev *pdev = pcr->pci; + int l1ss; u32 lval; struct rtsx_cr_option *option = &pcr->option; - pci_read_config_dword(pcr->pci, PCR_ASPM_SETTING_REG1, &lval); + l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); if (0 == (lval & 0x0F)) rtsx_pci_enable_oobs_polling(pcr); diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index 665472d05993..1b8149e806c1 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -95,15 +95,15 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) static void rts5249_init_from_cfg(struct rtsx_pcr *pcr) { struct pci_dev *pdev = pcr->pci; + int l1ss; struct rtsx_cr_option *option = &(pcr->option); u32 lval; - if (CHK_PCI_PID(pcr, PID_524A)) - pci_read_config_dword(pdev, - PCR_ASPM_SETTING_REG1, &lval); - else - pci_read_config_dword(pdev, - PCR_ASPM_SETTING_REG2, &lval); + l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); if (lval & ASPM_L1_1_EN_MASK) rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index 0e806dd7ad08..ebf77643cc90 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -498,10 +498,15 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr) static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) { struct pci_dev *pdev = pcr->pci; + int l1ss; struct rtsx_cr_option *option = &pcr->option; u32 lval; - pci_read_config_dword(pdev, PCR_ASPM_SETTING_5260, &lval); + l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); if (lval & ASPM_L1_1_EN_MASK) rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 4f30637ee4ac..4b6e3fe4a007 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -413,10 +413,15 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr) static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) { struct pci_dev *pdev = pcr->pci; + int l1ss; u32 lval; struct rtsx_cr_option *option = &pcr->option; - pci_read_config_dword(pdev, PCR_ASPM_SETTING_REG1, &lval); + l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + + pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); if (lval & ASPM_L1_1_EN_MASK) rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index b93573c3c5fc..f146ca413f38 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -1037,10 +1037,6 @@ #define PHY_DIG1E_RX_EN_KEEP 0x0001 #define PHY_DUM_REG 0x1F -#define PCR_ASPM_SETTING_REG1 0x160 -#define PCR_ASPM_SETTING_REG2 0x168 -#define PCR_ASPM_SETTING_5260 0x178 - #define PCR_SETTING_REG1 0x724 #define PCR_SETTING_REG2 0x814 #define PCR_SETTING_REG3 0x747 -- cgit v1.2.3 From 7a4462a96777b64b22412f782de226c90290bf75 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 21 Jul 2020 16:23:36 -0500 Subject: misc: rtsx: Use standard PCI definitions When reading registers defined by the PCIe spec, use the names already defined by the PCI core. This makes maintenance of the PCI core and drivers easier. No functional change intended. Signed-off-by: Bjorn Helgaas Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200721212336.1159079-6-helgaas@kernel.org [ additional replacements due to changes in my tree - gregkh ] Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rts5228.c | 8 ++++---- drivers/misc/cardreader/rts5249.c | 8 ++++---- drivers/misc/cardreader/rts5260.c | 8 ++++---- drivers/misc/cardreader/rts5261.c | 8 ++++---- include/linux/rtsx_pci.h | 5 ----- 5 files changed, 16 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c index f5716c631104..28feab1449ab 100644 --- a/drivers/misc/cardreader/rts5228.c +++ b/drivers/misc/cardreader/rts5228.c @@ -395,22 +395,22 @@ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) else rtsx_pci_disable_oobs_polling(pcr); - if (lval & ASPM_L1_1_EN_MASK) + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); else rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); - if (lval & ASPM_L1_2_EN_MASK) + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); else rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); - if (lval & PM_L1_1_EN_MASK) + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) rtsx_set_dev_flag(pcr, PM_L1_1_EN); else rtsx_clear_dev_flag(pcr, PM_L1_1_EN); - if (lval & PM_L1_2_EN_MASK) + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) rtsx_set_dev_flag(pcr, PM_L1_2_EN); else rtsx_clear_dev_flag(pcr, PM_L1_2_EN); diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index 1b8149e806c1..941b3d77f1e9 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -105,16 +105,16 @@ static void rts5249_init_from_cfg(struct rtsx_pcr *pcr) pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - if (lval & ASPM_L1_1_EN_MASK) + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - if (lval & ASPM_L1_2_EN_MASK) + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - if (lval & PM_L1_1_EN_MASK) + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) rtsx_set_dev_flag(pcr, PM_L1_1_EN); - if (lval & PM_L1_2_EN_MASK) + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) rtsx_set_dev_flag(pcr, PM_L1_2_EN); if (option->ltr_en) { diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index ebf77643cc90..b9f66b1384a6 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -508,16 +508,16 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - if (lval & ASPM_L1_1_EN_MASK) + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); - if (lval & ASPM_L1_2_EN_MASK) + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); - if (lval & PM_L1_1_EN_MASK) + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) rtsx_set_dev_flag(pcr, PM_L1_1_EN); - if (lval & PM_L1_2_EN_MASK) + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) rtsx_set_dev_flag(pcr, PM_L1_2_EN); rts5260_pwr_saving_setting(pcr); diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 4b6e3fe4a007..471961487ff8 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -423,22 +423,22 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval); - if (lval & ASPM_L1_1_EN_MASK) + if (lval & PCI_L1SS_CTL1_ASPM_L1_1) rtsx_set_dev_flag(pcr, ASPM_L1_1_EN); else rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN); - if (lval & ASPM_L1_2_EN_MASK) + if (lval & PCI_L1SS_CTL1_ASPM_L1_2) rtsx_set_dev_flag(pcr, ASPM_L1_2_EN); else rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN); - if (lval & PM_L1_1_EN_MASK) + if (lval & PCI_L1SS_CTL1_PCIPM_L1_1) rtsx_set_dev_flag(pcr, PM_L1_1_EN); else rtsx_clear_dev_flag(pcr, PM_L1_1_EN); - if (lval & PM_L1_2_EN_MASK) + if (lval & PCI_L1SS_CTL1_PCIPM_L1_2) rtsx_set_dev_flag(pcr, PM_L1_2_EN); else rtsx_clear_dev_flag(pcr, PM_L1_2_EN); diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index f146ca413f38..745f5e73f99a 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -1083,11 +1083,6 @@ struct pcr_ops { enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; -#define ASPM_L1_1_EN_MASK BIT(3) -#define ASPM_L1_2_EN_MASK BIT(2) -#define PM_L1_1_EN_MASK BIT(1) -#define PM_L1_2_EN_MASK BIT(0) - #define ASPM_L1_1_EN BIT(0) #define ASPM_L1_2_EN BIT(1) #define PM_L1_1_EN BIT(2) -- cgit v1.2.3 From 55d5d3b46b08a4dc0b05343d24640744e7430ed7 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Thu, 16 Jul 2020 13:19:56 -0500 Subject: leds: multicolor: Introduce a multicolor class definition Introduce a multicolor class that groups colored LEDs within a LED node. The multicolor class groups monochrome LEDs and allows controlling two aspects of the final combined color: hue and lightness. The former is controlled via the intensity file and the latter is controlled via brightness file. Signed-off-by: Dan Murphy Acked-by: Jacek Anaszewski Signed-off-by: Pavel Machek [squashed leds: multicolor: Fix camel case in documentation in] --- .../ABI/testing/sysfs-class-led-multicolor | 35 ++++ Documentation/leds/index.rst | 1 + Documentation/leds/leds-class-multicolor.rst | 86 +++++++++ drivers/leds/Kconfig | 10 + drivers/leds/Makefile | 1 + drivers/leds/led-class-multicolor.c | 203 +++++++++++++++++++++ include/linux/led-class-multicolor.h | 121 ++++++++++++ 7 files changed, 457 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-class-led-multicolor create mode 100644 Documentation/leds/leds-class-multicolor.rst create mode 100644 drivers/leds/led-class-multicolor.c create mode 100644 include/linux/led-class-multicolor.h (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-class-led-multicolor b/Documentation/ABI/testing/sysfs-class-led-multicolor new file mode 100644 index 000000000000..eeeddcbdbbe3 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-led-multicolor @@ -0,0 +1,35 @@ +What: /sys/class/leds//brightness +Date: March 2020 +KernelVersion: 5.9 +Contact: Dan Murphy +Description: read/write + Writing to this file will update all LEDs within the group to a + calculated percentage of what each color LED intensity is set + to. The percentage is calculated for each grouped LED via the + equation below: + + led_brightness = brightness * multi_intensity/max_brightness + + For additional details please refer to + Documentation/leds/leds-class-multicolor.rst. + + The value of the LED is from 0 to + /sys/class/leds//max_brightness. + +What: /sys/class/leds//multi_index +Date: March 2020 +KernelVersion: 5.9 +Contact: Dan Murphy +Description: read + The multi_index array, when read, will output the LED colors + as an array of strings as they are indexed in the + multi_intensity file. + +What: /sys/class/leds//multi_intensity +Date: March 2020 +KernelVersion: 5.9 +Contact: Dan Murphy +Description: read/write + This file contains array of integers. Order of components is + described by the multi_index array. The maximum intensity should + not exceed /sys/class/leds//max_brightness. diff --git a/Documentation/leds/index.rst b/Documentation/leds/index.rst index 060f4e485897..bc70c6aa7138 100644 --- a/Documentation/leds/index.rst +++ b/Documentation/leds/index.rst @@ -9,6 +9,7 @@ LEDs leds-class leds-class-flash + leds-class-multicolor ledtrig-oneshot ledtrig-transient ledtrig-usbport diff --git a/Documentation/leds/leds-class-multicolor.rst b/Documentation/leds/leds-class-multicolor.rst new file mode 100644 index 000000000000..c57b98bfd387 --- /dev/null +++ b/Documentation/leds/leds-class-multicolor.rst @@ -0,0 +1,86 @@ +.. SPDX-License-Identifier: GPL-2.0 + +==================================== +Multicolor LED handling under Linux +==================================== + +Description +=========== +The multicolor class groups monochrome LEDs and allows controlling two +aspects of the final combined color: hue and lightness. The former is +controlled via the multi_intensity array file and the latter is controlled +via brightness file. + +Multicolor Class Control +======================== +The multicolor class presents files that groups the colors as indexes in an +array. These files are children under the LED parent node created by the +led_class framework. The led_class framework is documented in led-class.rst +within this documentation directory. + +Each colored LED will be indexed under the multi_* files. The order of the +colors will be arbitrary. The multi_index file can be read to determine the +color name to indexed value. + +The multi_index file is an array that contains the string list of the colors as +they are defined in each multi_* array file. + +The multi_intensity is an array that can be read or written to for the +individual color intensities. All elements within this array must be written in +order for the color LED intensities to be updated. + +Directory Layout Example +======================== +root:/sys/class/leds/multicolor:status# ls -lR +-rw-r--r-- 1 root root 4096 Oct 19 16:16 brightness +-r--r--r-- 1 root root 4096 Oct 19 16:16 max_brightness +-r--r--r-- 1 root root 4096 Oct 19 16:16 multi_index +-rw-r--r-- 1 root root 4096 Oct 19 16:16 multi_intensity + +Multicolor Class Brightness Control +=================================== +The brightness level for each LED is calculated based on the color LED +intensity setting divided by the global max_brightness setting multiplied by +the requested brightness. + +led_brightness = brightness * multi_intensity/max_brightness + +Example: +A user first writes the multi_intensity file with the brightness levels +for each LED that are necessary to achieve a certain color output from a +multicolor LED group. + +cat /sys/class/leds/multicolor:status/multi_index +green blue red + +echo 43 226 138 > /sys/class/leds/multicolor:status/multi_intensity + +red - + intensity = 138 + max_brightness = 255 +green - + intensity = 43 + max_brightness = 255 +blue - + intensity = 226 + max_brightness = 255 + +The user can control the brightness of that multicolor LED group by writing the +global 'brightness' control. Assuming a max_brightness of 255 the user +may want to dim the LED color group to half. The user would write a value of +128 to the global brightness file then the values written to each LED will be +adjusted base on this value. + +cat /sys/class/leds/multicolor:status/max_brightness +255 +echo 128 > /sys/class/leds/multicolor:status/brightness + +adjusted_red_value = 128 * 138/255 = 69 +adjusted_green_value = 128 * 43/255 = 21 +adjusted_blue_value = 128 * 226/255 = 113 + +Reading the global brightness file will return the current brightness value of +the color LED group. + +cat /sys/class/leds/multicolor:status/brightness +128 diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index ed943140e1fd..1de6e8e264a0 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -30,6 +30,16 @@ config LEDS_CLASS_FLASH for the flash related features of a LED device. It can be built as a module. +config LEDS_CLASS_MULTICOLOR + tristate "LED Multicolor Class Support" + depends on LEDS_CLASS + help + This option enables the multicolor LED sysfs class in /sys/class/leds. + It wraps LED class and adds multicolor LED specific sysfs attributes + and kernel internal API to it. You'll need this to provide support + for multicolor LEDs that are grouped together. This class is not + intended for single color LEDs. It can be built as a module. + config LEDS_BRIGHTNESS_HW_CHANGED bool "LED Class brightness_hw_changed attribute support" depends on LEDS_CLASS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index d6b8a792c936..d684bc76d2b2 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_NEW_LEDS) += led-core.o obj-$(CONFIG_LEDS_CLASS) += led-class.o obj-$(CONFIG_LEDS_CLASS_FLASH) += led-class-flash.o +obj-$(CONFIG_LEDS_CLASS_MULTICOLOR) += led-class-multicolor.o obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o # LED Platform Drivers (keep this sorted, M-| sort) diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c new file mode 100644 index 000000000000..e317408583df --- /dev/null +++ b/drivers/leds/led-class-multicolor.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +// LED Multicolor class interface +// Copyright (C) 2019-20 Texas Instruments Incorporated - http://www.ti.com/ +// Author: Dan Murphy + +#include +#include +#include +#include +#include +#include + +#include "leds.h" + +int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev, + enum led_brightness brightness) +{ + struct led_classdev *led_cdev = &mcled_cdev->led_cdev; + int i; + + for (i = 0; i < mcled_cdev->num_colors; i++) + mcled_cdev->subled_info[i].brightness = brightness * + mcled_cdev->subled_info[i].intensity / + led_cdev->max_brightness; + + return 0; +} +EXPORT_SYMBOL_GPL(led_mc_calc_color_components); + +static ssize_t multi_intensity_store(struct device *dev, + struct device_attribute *intensity_attr, + const char *buf, size_t size) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct led_classdev_mc *mcled_cdev = lcdev_to_mccdev(led_cdev); + int nrchars, offset = 0; + int intensity_value[LED_COLOR_ID_MAX]; + int i; + ssize_t ret; + + mutex_lock(&led_cdev->led_access); + + for (i = 0; i < mcled_cdev->num_colors; i++) { + ret = sscanf(buf + offset, "%i%n", + &intensity_value[i], &nrchars); + if (ret != 1) { + ret = -EINVAL; + goto err_out; + } + offset += nrchars; + } + + offset++; + if (offset < size) { + ret = -EINVAL; + goto err_out; + } + + for (i = 0; i < mcled_cdev->num_colors; i++) + mcled_cdev->subled_info[i].intensity = intensity_value[i]; + + led_set_brightness(led_cdev, led_cdev->brightness); + ret = size; +err_out: + mutex_unlock(&led_cdev->led_access); + return ret; +} + +static ssize_t multi_intensity_show(struct device *dev, + struct device_attribute *intensity_attr, + char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct led_classdev_mc *mcled_cdev = lcdev_to_mccdev(led_cdev); + int len = 0; + int i; + + for (i = 0; i < mcled_cdev->num_colors; i++) { + len += sprintf(buf + len, "%d", + mcled_cdev->subled_info[i].intensity); + if (i < mcled_cdev->num_colors - 1) + len += sprintf(buf + len, " "); + } + + buf[len++] = '\n'; + return len; +} +static DEVICE_ATTR_RW(multi_intensity); + +static ssize_t multi_index_show(struct device *dev, + struct device_attribute *multi_index_attr, + char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct led_classdev_mc *mcled_cdev = lcdev_to_mccdev(led_cdev); + int len = 0; + int index; + int i; + + for (i = 0; i < mcled_cdev->num_colors; i++) { + index = mcled_cdev->subled_info[i].color_index; + len += sprintf(buf + len, "%s", led_colors[index]); + if (i < mcled_cdev->num_colors - 1) + len += sprintf(buf + len, " "); + } + + buf[len++] = '\n'; + return len; +} +static DEVICE_ATTR_RO(multi_index); + +static struct attribute *led_multicolor_attrs[] = { + &dev_attr_multi_intensity.attr, + &dev_attr_multi_index.attr, + NULL, +}; +ATTRIBUTE_GROUPS(led_multicolor); + +int led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data) +{ + struct led_classdev *led_cdev; + + if (!mcled_cdev) + return -EINVAL; + + if (mcled_cdev->num_colors <= 0) + return -EINVAL; + + if (mcled_cdev->num_colors > LED_COLOR_ID_MAX) + return -EINVAL; + + led_cdev = &mcled_cdev->led_cdev; + mcled_cdev->led_cdev.groups = led_multicolor_groups; + + return led_classdev_register_ext(parent, led_cdev, init_data); +} +EXPORT_SYMBOL_GPL(led_classdev_multicolor_register_ext); + +void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) +{ + if (!mcled_cdev) + return; + + led_classdev_unregister(&mcled_cdev->led_cdev); +} +EXPORT_SYMBOL_GPL(led_classdev_multicolor_unregister); + +static void devm_led_classdev_multicolor_release(struct device *dev, void *res) +{ + led_classdev_multicolor_unregister(*(struct led_classdev_mc **)res); +} + +int devm_led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data) +{ + struct led_classdev_mc **dr; + int ret; + + dr = devres_alloc(devm_led_classdev_multicolor_release, + sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + ret = led_classdev_multicolor_register_ext(parent, mcled_cdev, + init_data); + if (ret) { + devres_free(dr); + return ret; + } + + *dr = mcled_cdev; + devres_add(parent, dr); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_led_classdev_multicolor_register_ext); + +static int devm_led_classdev_multicolor_match(struct device *dev, + void *res, void *data) +{ + struct led_classdev_mc **p = res; + + if (WARN_ON(!p || !*p)) + return 0; + + return *p == data; +} + +void devm_led_classdev_multicolor_unregister(struct device *dev, + struct led_classdev_mc *mcled_cdev) +{ + WARN_ON(devres_release(dev, + devm_led_classdev_multicolor_release, + devm_led_classdev_multicolor_match, mcled_cdev)); +} +EXPORT_SYMBOL_GPL(devm_led_classdev_multicolor_unregister); + +MODULE_AUTHOR("Dan Murphy "); +MODULE_DESCRIPTION("Multicolor LED class interface"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h new file mode 100644 index 000000000000..5116f9a866cc --- /dev/null +++ b/include/linux/led-class-multicolor.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* LED Multicolor class interface + * Copyright (C) 2019-20 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef _LINUX_MULTICOLOR_LEDS_H_INCLUDED +#define _LINUX_MULTICOLOR_LEDS_H_INCLUDED + +#include +#include + +struct mc_subled { + unsigned int color_index; + unsigned int brightness; + unsigned int intensity; + unsigned int channel; +}; + +struct led_classdev_mc { + /* led class device */ + struct led_classdev led_cdev; + unsigned int num_colors; + + struct mc_subled *subled_info; +}; + +static inline struct led_classdev_mc *lcdev_to_mccdev( + struct led_classdev *led_cdev) +{ + return container_of(led_cdev, struct led_classdev_mc, led_cdev); +} + +#if IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) +/** + * led_classdev_multicolor_register_ext - register a new object of led_classdev + * class with support for multicolor LEDs + * @parent: the multicolor LED to register + * @mcled_cdev: the led_classdev_mc structure for this device + * @init_data: the LED class multicolor device initialization data + * + * Returns: 0 on success or negative error value on failure + */ +int led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data); + +static inline int led_classdev_multicolor_register(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{ + return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL); +} + +/** + * led_classdev_multicolor_unregister - unregisters an object of led_classdev + * class with support for multicolor LEDs + * @mcled_cdev: the multicolor LED to unregister + * + * Unregister a previously registered via led_classdev_multicolor_register + * object + */ +void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev); + +/* Calculate brightness for the monochrome LED cluster */ +int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev, + enum led_brightness brightness); + +int devm_led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data); + +static inline int devm_led_classdev_multicolor_register(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{ + return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev, + NULL); +} + +void devm_led_classdev_multicolor_unregister(struct device *parent, + struct led_classdev_mc *mcled_cdev); +#else + +static inline int led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data) +{ + return -EINVAL; +} + +static inline int led_classdev_multicolor_register(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{ + return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL); +} + +static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {}; +static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev, + enum led_brightness brightness) +{ + return -EINVAL; +} + +static inline int devm_led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data) +{ + return -EINVAL; +} + +static inline int devm_led_classdev_multicolor_register(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{ + return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev, + NULL); +} + +static inline void devm_led_classdev_multicolor_unregister(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{}; + +#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */ +#endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */ -- cgit v1.2.3 From 92a81562e695628086acb92f95090ab09d9b9ec0 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Thu, 16 Jul 2020 13:20:01 -0500 Subject: leds: lp55xx: Add multicolor framework support to lp55xx Add multicolor framework support for the lp55xx family. Acked-by: Pavel Machek Acked-by: Jacek Anaszewski Signed-off-by: Dan Murphy Signed-off-by: Pavel Machek --- drivers/leds/Kconfig | 11 +- drivers/leds/leds-lp5521.c | 14 ++- drivers/leds/leds-lp5523.c | 14 ++- drivers/leds/leds-lp5562.c | 13 ++- drivers/leds/leds-lp55xx-common.c | 177 ++++++++++++++++++++++++++---- drivers/leds/leds-lp55xx-common.h | 14 ++- drivers/leds/leds-lp8501.c | 14 ++- include/linux/platform_data/leds-lp55xx.h | 7 ++ 8 files changed, 212 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 1de6e8e264a0..b9002850b5fa 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -386,7 +386,8 @@ config LEDS_LP3952 config LEDS_LP55XX_COMMON tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501" - depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501 + depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR + depends on OF select FW_LOADER select FW_LOADER_USER_HELPER help @@ -396,7 +397,7 @@ config LEDS_LP55XX_COMMON config LEDS_LP5521 tristate "LED Support for N.S. LP5521 LED driver chip" depends on LEDS_CLASS && I2C - select LEDS_LP55XX_COMMON + depends on LEDS_LP55XX_COMMON help If you say yes here you get support for the National Semiconductor LP5521 LED driver. It is 3 channel chip with programmable engines. @@ -406,7 +407,7 @@ config LEDS_LP5521 config LEDS_LP5523 tristate "LED Support for TI/National LP5523/55231 LED driver chip" depends on LEDS_CLASS && I2C - select LEDS_LP55XX_COMMON + depends on LEDS_LP55XX_COMMON help If you say yes here you get support for TI/National Semiconductor LP5523/55231 LED driver. @@ -417,7 +418,7 @@ config LEDS_LP5523 config LEDS_LP5562 tristate "LED Support for TI LP5562 LED driver chip" depends on LEDS_CLASS && I2C - select LEDS_LP55XX_COMMON + depends on LEDS_LP55XX_COMMON help If you say yes here you get support for TI LP5562 LED driver. It is 4 channels chip with programmable engines. @@ -427,7 +428,7 @@ config LEDS_LP5562 config LEDS_LP8501 tristate "LED Support for TI LP8501 LED driver chip" depends on LEDS_CLASS && I2C - select LEDS_LP55XX_COMMON + depends on LEDS_LP55XX_COMMON help If you say yes here you get support for TI LP8501 LED driver. It is 9 channel chip with programmable engines. diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 6d2163c0f625..6ff81d6be789 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -505,9 +505,16 @@ static int lp5521_probe(struct i2c_client *client, struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev); struct device_node *np = client->dev.of_node; + chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->cfg = &lp5521_cfg; + if (!pdata) { if (np) { - pdata = lp55xx_of_populate_pdata(&client->dev, np); + pdata = lp55xx_of_populate_pdata(&client->dev, np, + chip); if (IS_ERR(pdata)) return PTR_ERR(pdata); } else { @@ -516,10 +523,6 @@ static int lp5521_probe(struct i2c_client *client, } } - chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - led = devm_kcalloc(&client->dev, pdata->num_channels, sizeof(*led), GFP_KERNEL); if (!led) @@ -527,7 +530,6 @@ static int lp5521_probe(struct i2c_client *client, chip->cl = client; chip->pdata = pdata; - chip->cfg = &lp5521_cfg; mutex_init(&chip->lock); diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index cb550cf19e14..bb97549007d7 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -873,9 +873,16 @@ static int lp5523_probe(struct i2c_client *client, struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev); struct device_node *np = client->dev.of_node; + chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->cfg = &lp5523_cfg; + if (!pdata) { if (np) { - pdata = lp55xx_of_populate_pdata(&client->dev, np); + pdata = lp55xx_of_populate_pdata(&client->dev, np, + chip); if (IS_ERR(pdata)) return PTR_ERR(pdata); } else { @@ -884,10 +891,6 @@ static int lp5523_probe(struct i2c_client *client, } } - chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - led = devm_kcalloc(&client->dev, pdata->num_channels, sizeof(*led), GFP_KERNEL); if (!led) @@ -895,7 +898,6 @@ static int lp5523_probe(struct i2c_client *client, chip->cl = client; chip->pdata = pdata; - chip->cfg = &lp5523_cfg; mutex_init(&chip->lock); diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c index 1c94422408b0..7ecdd199d7ef 100644 --- a/drivers/leds/leds-lp5562.c +++ b/drivers/leds/leds-lp5562.c @@ -520,9 +520,16 @@ static int lp5562_probe(struct i2c_client *client, struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev); struct device_node *np = client->dev.of_node; + chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->cfg = &lp5562_cfg; + if (!pdata) { if (np) { - pdata = lp55xx_of_populate_pdata(&client->dev, np); + pdata = lp55xx_of_populate_pdata(&client->dev, np, + chip); if (IS_ERR(pdata)) return PTR_ERR(pdata); } else { @@ -531,9 +538,6 @@ static int lp5562_probe(struct i2c_client *client, } } - chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; led = devm_kcalloc(&client->dev, pdata->num_channels, sizeof(*led), GFP_KERNEL); @@ -542,7 +546,6 @@ static int lp5562_probe(struct i2c_client *client, chip->cl = client; chip->pdata = pdata; - chip->cfg = &lp5562_cfg; mutex_init(&chip->lock); diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 243c749ebda5..af14e2b2d577 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -34,6 +34,11 @@ static struct lp55xx_led *dev_to_lp55xx_led(struct device *dev) return cdev_to_lp55xx_led(dev_get_drvdata(dev)); } +static struct lp55xx_led *mcled_cdev_to_led(struct led_classdev_mc *mc_cdev) +{ + return container_of(mc_cdev, struct lp55xx_led, mc_cdev); +} + static void lp55xx_reset_device(struct lp55xx_chip *chip) { struct lp55xx_device_config *cfg = chip->cfg; @@ -129,6 +134,18 @@ static struct attribute *lp55xx_led_attrs[] = { }; ATTRIBUTE_GROUPS(lp55xx_led); +static int lp55xx_set_mc_brightness(struct led_classdev *cdev, + enum led_brightness brightness) +{ + struct led_classdev_mc *mc_dev = lcdev_to_mccdev(cdev); + struct lp55xx_led *led = mcled_cdev_to_led(mc_dev); + struct lp55xx_device_config *cfg = led->chip->cfg; + + led_mc_calc_color_components(&led->mc_cdev, brightness); + return cfg->multicolor_brightness_fn(led); + +} + static int lp55xx_set_brightness(struct led_classdev *cdev, enum led_brightness brightness) { @@ -145,9 +162,12 @@ static int lp55xx_init_led(struct lp55xx_led *led, struct lp55xx_platform_data *pdata = chip->pdata; struct lp55xx_device_config *cfg = chip->cfg; struct device *dev = &chip->cl->dev; + int max_channel = cfg->max_channel; + struct mc_subled *mc_led_info; + struct led_classdev *led_cdev; char name[32]; + int i, j = 0; int ret; - int max_channel = cfg->max_channel; if (chan >= max_channel) { dev_err(dev, "invalid channel: %d / %d\n", chan, max_channel); @@ -157,10 +177,43 @@ static int lp55xx_init_led(struct lp55xx_led *led, if (pdata->led_config[chan].led_current == 0) return 0; + if (pdata->led_config[chan].name) { + led->cdev.name = pdata->led_config[chan].name; + } else { + snprintf(name, sizeof(name), "%s:channel%d", + pdata->label ? : chip->cl->name, chan); + led->cdev.name = name; + } + + if (pdata->led_config[chan].num_colors > 1) { + mc_led_info = devm_kcalloc(dev, + pdata->led_config[chan].num_colors, + sizeof(*mc_led_info), GFP_KERNEL); + if (!mc_led_info) + return -ENOMEM; + + led_cdev = &led->mc_cdev.led_cdev; + led_cdev->name = led->cdev.name; + led_cdev->brightness_set_blocking = lp55xx_set_mc_brightness; + led->mc_cdev.num_colors = pdata->led_config[chan].num_colors; + for (i = 0; i < led->mc_cdev.num_colors; i++) { + mc_led_info[i].color_index = + pdata->led_config[chan].color_id[i]; + mc_led_info[i].channel = + pdata->led_config[chan].output_num[i]; + j++; + } + + led->mc_cdev.subled_info = mc_led_info; + } else { + led->cdev.brightness_set_blocking = lp55xx_set_brightness; + } + + led->cdev.groups = lp55xx_led_groups; + led->cdev.default_trigger = pdata->led_config[chan].default_trigger; led->led_current = pdata->led_config[chan].led_current; led->max_current = pdata->led_config[chan].max_current; led->chan_nr = pdata->led_config[chan].chan_nr; - led->cdev.default_trigger = pdata->led_config[chan].default_trigger; if (led->chan_nr >= max_channel) { dev_err(dev, "Use channel numbers between 0 and %d\n", @@ -168,18 +221,11 @@ static int lp55xx_init_led(struct lp55xx_led *led, return -EINVAL; } - led->cdev.brightness_set_blocking = lp55xx_set_brightness; - led->cdev.groups = lp55xx_led_groups; - - if (pdata->led_config[chan].name) { - led->cdev.name = pdata->led_config[chan].name; - } else { - snprintf(name, sizeof(name), "%s:channel%d", - pdata->label ? : chip->cl->name, chan); - led->cdev.name = name; - } + if (pdata->led_config[chan].num_colors > 1) + ret = devm_led_classdev_multicolor_register(dev, &led->mc_cdev); + else + ret = devm_led_classdev_register(dev, &led->cdev); - ret = devm_led_classdev_register(dev, &led->cdev); if (ret) { dev_err(dev, "led register err: %d\n", ret); return ret; @@ -515,14 +561,105 @@ void lp55xx_unregister_sysfs(struct lp55xx_chip *chip) } EXPORT_SYMBOL_GPL(lp55xx_unregister_sysfs); +static int lp55xx_parse_common_child(struct device_node *np, + struct lp55xx_led_config *cfg, + int led_number, int *chan_nr) +{ + int ret; + + of_property_read_string(np, "chan-name", + &cfg[led_number].name); + of_property_read_u8(np, "led-cur", + &cfg[led_number].led_current); + of_property_read_u8(np, "max-cur", + &cfg[led_number].max_current); + + ret = of_property_read_u32(np, "reg", chan_nr); + if (ret) + return ret; + + if (*chan_nr < 0 || *chan_nr > cfg->max_channel) + return -EINVAL; + + return 0; +} + +static int lp55xx_parse_multi_led_child(struct device_node *child, + struct lp55xx_led_config *cfg, + int child_number, int color_number) +{ + int chan_nr, color_id, ret; + + ret = lp55xx_parse_common_child(child, cfg, child_number, &chan_nr); + if (ret) + return ret; + + ret = of_property_read_u32(child, "color", &color_id); + if (ret) + return ret; + + cfg[child_number].color_id[color_number] = color_id; + cfg[child_number].output_num[color_number] = chan_nr; + + return 0; +} + +static int lp55xx_parse_multi_led(struct device_node *np, + struct lp55xx_led_config *cfg, + int child_number) +{ + struct device_node *child; + int num_colors = 0, ret; + + for_each_child_of_node(np, child) { + ret = lp55xx_parse_multi_led_child(child, cfg, child_number, + num_colors); + if (ret) + return ret; + num_colors++; + } + + cfg[child_number].num_colors = num_colors; + + return 0; +} + +static int lp55xx_parse_logical_led(struct device_node *np, + struct lp55xx_led_config *cfg, + int child_number) +{ + int led_color, ret; + int chan_nr = 0; + + cfg[child_number].default_trigger = + of_get_property(np, "linux,default-trigger", NULL); + + ret = of_property_read_u32(np, "color", &led_color); + if (ret) + return ret; + + if (led_color == LED_COLOR_ID_MULTI) + return lp55xx_parse_multi_led(np, cfg, child_number); + + ret = lp55xx_parse_common_child(np, cfg, child_number, &chan_nr); + if (ret < 0) + return ret; + + cfg[child_number].chan_nr = chan_nr; + + return ret; +} + struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev, - struct device_node *np) + struct device_node *np, + struct lp55xx_chip *chip) { struct device_node *child; struct lp55xx_platform_data *pdata; struct lp55xx_led_config *cfg; int num_channels; int i = 0; + int ret; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) @@ -540,16 +677,12 @@ struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev, pdata->led_config = &cfg[0]; pdata->num_channels = num_channels; + cfg->max_channel = chip->cfg->max_channel; for_each_child_of_node(np, child) { - cfg[i].chan_nr = i; - - of_property_read_string(child, "chan-name", &cfg[i].name); - of_property_read_u8(child, "led-cur", &cfg[i].led_current); - of_property_read_u8(child, "max-cur", &cfg[i].max_current); - cfg[i].default_trigger = - of_get_property(child, "linux,default-trigger", NULL); - + ret = lp55xx_parse_logical_led(child, cfg, i); + if (ret) + return ERR_PTR(-EINVAL); i++; } diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h index b9b1041e8143..2f38c5b33830 100644 --- a/drivers/leds/leds-lp55xx-common.h +++ b/drivers/leds/leds-lp55xx-common.h @@ -12,6 +12,8 @@ #ifndef _LEDS_LP55XX_COMMON_H #define _LEDS_LP55XX_COMMON_H +#include + enum lp55xx_engine_index { LP55XX_ENGINE_INVALID, LP55XX_ENGINE_1, @@ -93,6 +95,7 @@ struct lp55xx_reg { * @max_channel : Maximum number of channels * @post_init_device : Chip specific initialization code * @brightness_fn : Brightness function + * @multicolor_brightness_fn : Multicolor brightness function * @set_led_current : LED current set function * @firmware_cb : Call function when the firmware is loaded * @run_engine : Run internal engine for pattern @@ -106,9 +109,12 @@ struct lp55xx_device_config { /* define if the device has specific initialization process */ int (*post_init_device) (struct lp55xx_chip *chip); - /* access brightness register */ + /* set LED brightness */ int (*brightness_fn)(struct lp55xx_led *led); + /* set multicolor LED brightness */ + int (*multicolor_brightness_fn)(struct lp55xx_led *led); + /* current setting function */ void (*set_led_current) (struct lp55xx_led *led, u8 led_current); @@ -159,6 +165,8 @@ struct lp55xx_chip { * struct lp55xx_led * @chan_nr : Channel number * @cdev : LED class device + * @mc_cdev : Multi color class device + * @color_components: Multi color LED map information * @led_current : Current setting at each led channel * @max_current : Maximun current at each led channel * @brightness : Brightness value @@ -167,6 +175,7 @@ struct lp55xx_chip { struct lp55xx_led { int chan_nr; struct led_classdev cdev; + struct led_classdev_mc mc_cdev; u8 led_current; u8 max_current; u8 brightness; @@ -196,6 +205,7 @@ extern void lp55xx_unregister_sysfs(struct lp55xx_chip *chip); /* common device tree population function */ extern struct lp55xx_platform_data -*lp55xx_of_populate_pdata(struct device *dev, struct device_node *np); +*lp55xx_of_populate_pdata(struct device *dev, struct device_node *np, + struct lp55xx_chip *chip); #endif /* _LEDS_LP55XX_COMMON_H */ diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c index a58019cdb8c3..ac2c31db4a65 100644 --- a/drivers/leds/leds-lp8501.c +++ b/drivers/leds/leds-lp8501.c @@ -308,9 +308,16 @@ static int lp8501_probe(struct i2c_client *client, struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev); struct device_node *np = client->dev.of_node; + chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->cfg = &lp8501_cfg; + if (!pdata) { if (np) { - pdata = lp55xx_of_populate_pdata(&client->dev, np); + pdata = lp55xx_of_populate_pdata(&client->dev, np, + chip); if (IS_ERR(pdata)) return PTR_ERR(pdata); } else { @@ -319,10 +326,6 @@ static int lp8501_probe(struct i2c_client *client, } } - chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - led = devm_kcalloc(&client->dev, pdata->num_channels, sizeof(*led), GFP_KERNEL); if (!led) @@ -330,7 +333,6 @@ static int lp8501_probe(struct i2c_client *client, chip->cl = client; chip->pdata = pdata; - chip->cfg = &lp8501_cfg; mutex_init(&chip->lock); diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h index 00492d6ff018..3441064713a3 100644 --- a/include/linux/platform_data/leds-lp55xx.h +++ b/include/linux/platform_data/leds-lp55xx.h @@ -13,18 +13,25 @@ #define _LEDS_LP55XX_H #include +#include /* Clock configuration */ #define LP55XX_CLOCK_AUTO 0 #define LP55XX_CLOCK_INT 1 #define LP55XX_CLOCK_EXT 2 +#define LP55XX_MAX_GROUPED_CHAN 4 + struct lp55xx_led_config { const char *name; const char *default_trigger; u8 chan_nr; u8 led_current; /* mA x10, 0 if led is not connected */ u8 max_current; + int num_colors; + unsigned int max_channel; + int color_id[LED_COLOR_ID_MAX]; + int output_num[LED_COLOR_ID_MAX]; }; struct lp55xx_predef_pattern { -- cgit v1.2.3 From 93690cdf3060c61dfce813121d0bfc055e7fa30d Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Thu, 16 Jul 2020 19:17:28 +0200 Subject: leds: trigger: add support for LED-private device triggers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some LED controllers may come with an internal HW triggering mechanism for the LED and the ability to switch between SW control and the internal HW control. This includes most PHYs, various ethernet switches, the Turris Omnia LED controller or AXP20X PMIC. This adds support for registering such triggers. This code is based on work by Pavel Machek and Ondřej Jirman . Signed-off-by: Marek Behún Acked-by: Jacek Anaszewski Signed-off-by: Pavel Machek --- drivers/leds/led-triggers.c | 26 ++++++++++++++++++++------ include/linux/leds.h | 10 ++++++++++ 2 files changed, 30 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 0836bf7631ea..91da90cfb11d 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -27,6 +27,12 @@ LIST_HEAD(trigger_list); /* Used by LED Class */ +static inline bool +trigger_relevant(struct led_classdev *led_cdev, struct led_trigger *trig) +{ + return !trig->trigger_type || trig->trigger_type == led_cdev->trigger_type; +} + ssize_t led_trigger_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) @@ -50,7 +56,7 @@ ssize_t led_trigger_write(struct file *filp, struct kobject *kobj, down_read(&triggers_list_lock); list_for_each_entry(trig, &trigger_list, next_trig) { - if (sysfs_streq(buf, trig->name)) { + if (sysfs_streq(buf, trig->name) && trigger_relevant(led_cdev, trig)) { down_write(&led_cdev->trigger_lock); led_trigger_set(led_cdev, trig); up_write(&led_cdev->trigger_lock); @@ -93,8 +99,12 @@ static int led_trigger_format(char *buf, size_t size, led_cdev->trigger ? "none" : "[none]"); list_for_each_entry(trig, &trigger_list, next_trig) { - bool hit = led_cdev->trigger && - !strcmp(led_cdev->trigger->name, trig->name); + bool hit; + + if (!trigger_relevant(led_cdev, trig)) + continue; + + hit = led_cdev->trigger && !strcmp(led_cdev->trigger->name, trig->name); len += led_trigger_snprintf(buf + len, size - len, " %s%s%s", hit ? "[" : "", @@ -243,7 +253,8 @@ void led_trigger_set_default(struct led_classdev *led_cdev) down_read(&triggers_list_lock); down_write(&led_cdev->trigger_lock); list_for_each_entry(trig, &trigger_list, next_trig) { - if (!strcmp(led_cdev->default_trigger, trig->name)) { + if (!strcmp(led_cdev->default_trigger, trig->name) && + trigger_relevant(led_cdev, trig)) { led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER; led_trigger_set(led_cdev, trig); break; @@ -280,7 +291,9 @@ int led_trigger_register(struct led_trigger *trig) down_write(&triggers_list_lock); /* Make sure the trigger's name isn't already in use */ list_for_each_entry(_trig, &trigger_list, next_trig) { - if (!strcmp(_trig->name, trig->name)) { + if (!strcmp(_trig->name, trig->name) && + (trig->trigger_type == _trig->trigger_type || + !trig->trigger_type || !_trig->trigger_type)) { up_write(&triggers_list_lock); return -EEXIST; } @@ -294,7 +307,8 @@ int led_trigger_register(struct led_trigger *trig) list_for_each_entry(led_cdev, &leds_list, node) { down_write(&led_cdev->trigger_lock); if (!led_cdev->trigger && led_cdev->default_trigger && - !strcmp(led_cdev->default_trigger, trig->name)) { + !strcmp(led_cdev->default_trigger, trig->name) && + trigger_relevant(led_cdev, trig)) { led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER; led_trigger_set(led_cdev, trig); } diff --git a/include/linux/leds.h b/include/linux/leds.h index 2451962d1ec5..6a8d6409c993 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -57,6 +57,10 @@ struct led_init_data { bool devname_mandatory; }; +struct led_hw_trigger_type { + int dummy; +}; + struct led_classdev { const char *name; enum led_brightness brightness; @@ -141,6 +145,9 @@ struct led_classdev { void *trigger_data; /* true if activated - deactivate routine uses it to do cleanup */ bool activated; + + /* LEDs that have private triggers have this set */ + struct led_hw_trigger_type *trigger_type; #endif #ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED @@ -345,6 +352,9 @@ struct led_trigger { int (*activate)(struct led_classdev *led_cdev); void (*deactivate)(struct led_classdev *led_cdev); + /* LED-private triggers have this set */ + struct led_hw_trigger_type *trigger_type; + /* LEDs under control by this trigger (for simple triggers) */ rwlock_t leddev_list_lock; struct list_head led_cdevs; -- cgit v1.2.3 From 9fadd6d1e2977bbd449d4fb99cde41ed6f71f668 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 16 Jul 2020 14:02:08 +0200 Subject: drm/ttm: remove TTM_MEMTYPE_FLAG_MAPPABLE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not used any more. And it is bad design to use a TTM flag to do a check inside a driver. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/378245/ --- include/drm/ttm/ttm_bo_driver.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 71b195e78c7c..9b251853afe2 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -46,7 +46,6 @@ #define TTM_MAX_BO_PRIORITY 4U #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ -#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ struct ttm_mem_type_manager; -- cgit v1.2.3 From 39c8378a1cdf856a3671b6431f99352b75a07248 Mon Sep 17 00:00:00 2001 From: Lars Povlsen Date: Mon, 15 Jun 2020 15:32:39 +0200 Subject: dt-bindings: clock: sparx5: Add bindings include file The Sparx5 support 9 different clock outputs. This include file has defines for each supported clock ordinal. Link: https://lore.kernel.org/r/20200615133242.24911-8-lars.povlsen@microchip.com Reviewed-by: Stephen Boyd Reviewed-by: Alexandre Belloni Signed-off-by: Lars Povlsen Reviewed-by: Rob Herring Signed-off-by: Arnd Bergmann --- include/dt-bindings/clock/microchip,sparx5.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 include/dt-bindings/clock/microchip,sparx5.h (limited to 'include') diff --git a/include/dt-bindings/clock/microchip,sparx5.h b/include/dt-bindings/clock/microchip,sparx5.h new file mode 100644 index 000000000000..4b04dabacec2 --- /dev/null +++ b/include/dt-bindings/clock/microchip,sparx5.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019 Microchip Inc. + * + * Author: Lars Povlsen + */ + +#ifndef _DT_BINDINGS_CLK_SPARX5_H +#define _DT_BINDINGS_CLK_SPARX5_H + +#define CLK_ID_CORE 0 +#define CLK_ID_DDR 1 +#define CLK_ID_CPU2 2 +#define CLK_ID_ARM2 3 +#define CLK_ID_AUX1 4 +#define CLK_ID_AUX2 5 +#define CLK_ID_AUX3 6 +#define CLK_ID_AUX4 7 +#define CLK_ID_SYNCE 8 + +#define N_CLOCKS 9 + +#endif -- cgit v1.2.3 From 2cf2f4f546f1dea54b63302a7eb28d1fe15f1e28 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:31 +0300 Subject: qed: reformat "qed_chain.h" a bit Reformat structs and macros definitions a bit prior to making functional changes. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- include/linux/qed/qed_chain.h | 126 ++++++++++++++++++++++-------------------- 1 file changed, 66 insertions(+), 60 deletions(-) (limited to 'include') diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 7071dc92b4e2..087073517c09 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -26,9 +26,9 @@ enum qed_chain_mode { }; enum qed_chain_use_mode { - QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ - QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ - QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ }; enum qed_chain_cnt_type { @@ -40,84 +40,86 @@ enum qed_chain_cnt_type { }; struct qed_chain_next { - struct regpair next_phys; - void *next_virt; + struct regpair next_phys; + void *next_virt; }; struct qed_chain_pbl_u16 { - u16 prod_page_idx; - u16 cons_page_idx; + u16 prod_page_idx; + u16 cons_page_idx; }; struct qed_chain_pbl_u32 { - u32 prod_page_idx; - u32 cons_page_idx; + u32 prod_page_idx; + u32 cons_page_idx; }; struct qed_chain_ext_pbl { - dma_addr_t p_pbl_phys; - void *p_pbl_virt; + dma_addr_t p_pbl_phys; + void *p_pbl_virt; }; struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ - u16 prod_idx; - u16 cons_idx; + u16 prod_idx; + u16 cons_idx; }; struct qed_chain_u32 { /* Cyclic index of next element to produce/consme */ - u32 prod_idx; - u32 cons_idx; + u32 prod_idx; + u32 cons_idx; }; struct addr_tbl_entry { - void *virt_addr; - dma_addr_t dma_map; + void *virt_addr; + dma_addr_t dma_map; }; struct qed_chain { - /* fastpath portion of the chain - required for commands such + /* Fastpath portion of the chain - required for commands such * as produce / consume. */ + /* Point to next element to produce/consume */ - void *p_prod_elem; - void *p_cons_elem; + void *p_prod_elem; + void *p_cons_elem; /* Fastpath portions of the PBL [if exists] */ + struct { /* Table for keeping the virtual and physical addresses of the * chain pages, respectively to the physical addresses * in the pbl table. */ - struct addr_tbl_entry *pp_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl; union { - struct qed_chain_pbl_u16 u16; - struct qed_chain_pbl_u32 u32; - } c; - } pbl; + struct qed_chain_pbl_u16 u16; + struct qed_chain_pbl_u32 u32; + } c; + } pbl; union { - struct qed_chain_u16 chain16; - struct qed_chain_u32 chain32; - } u; + struct qed_chain_u16 chain16; + struct qed_chain_u32 chain32; + } u; /* Capacity counts only usable elements */ - u32 capacity; - u32 page_cnt; + u32 capacity; + u32 page_cnt; - enum qed_chain_mode mode; + enum qed_chain_mode mode; /* Elements information for fast calculations */ - u16 elem_per_page; - u16 elem_per_page_mask; - u16 elem_size; - u16 next_page_mask; - u16 usable_per_page; - u8 elem_unusable; + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_size; + u16 next_page_mask; + u16 usable_per_page; + u8 elem_unusable; - u8 cnt_type; + u8 cnt_type; /* Slowpath of the chain - required for initialization and destruction, * but isn't involved in regular functionality. @@ -125,43 +127,47 @@ struct qed_chain { /* Base address of a pre-allocated buffer for pbl */ struct { - dma_addr_t p_phys_table; - void *p_virt_table; - } pbl_sp; + dma_addr_t p_phys_table; + void *p_virt_table; + } pbl_sp; /* Address of first page of the chain - the address is required * for fastpath operation [consume/produce] but only for the SINGLE * flavour which isn't considered fastpath [== SPQ]. */ - void *p_virt_addr; - dma_addr_t p_phys_addr; + void *p_virt_addr; + dma_addr_t p_phys_addr; /* Total number of elements [for entire chain] */ - u32 size; + u32 size; - u8 intended_use; + u8 intended_use; - bool b_external_pbl; + bool b_external_pbl; }; -#define QED_CHAIN_PBL_ENTRY_SIZE (8) -#define QED_CHAIN_PAGE_SIZE (0x1000) -#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) +#define QED_CHAIN_PBL_ENTRY_SIZE 8 +#define QED_CHAIN_PAGE_SIZE 0x1000 + +#define ELEMS_PER_PAGE(elem_size) \ + (QED_CHAIN_PAGE_SIZE / (elem_size)) -#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ - (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ - (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \ - (elem_size))) : 0) +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ + (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \ + 0) -#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ - ((u32)(ELEMS_PER_PAGE(elem_size) - \ - UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) +#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((u32)(ELEMS_PER_PAGE(elem_size) - \ + UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode)))) -#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ - DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ + DIV_ROUND_UP((elem_cnt), USABLE_ELEMS_PER_PAGE((elem_size), (mode))) -#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) -#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) +#define is_chain_u16(p) \ + ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) \ + ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) /* Accessors */ static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) -- cgit v1.2.3 From 9b6ee3cf95d322ab02e9927f5b08ebc870ca9f1f Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:35 +0300 Subject: qed: sanitize PBL chains allocation PBL chain elements are actually DMA addresses stored in __le64, but currently their size is hardcoded to 8, and DMA addresses are assigned via cast to variable-sized dma_addr_t without any bitwise conversions. Change the type of pbl_virt array to match the actual one, add a new field to store the size of allocated DMA memory and sanitize elements assignment. Misc: give more logic names to the members of qed_chain::pbl_sp embedded struct. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_chain.c | 21 ++++++++++----------- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 4 ++-- include/linux/qed/qed_chain.h | 16 ++++++++-------- 3 files changed, 20 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index 917b783433f7..a9ff15b9d8c0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -49,7 +49,7 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) { struct device *dev = &cdev->pdev->dev; struct addr_tbl_entry *entry; - u32 pbl_size, i; + u32 i; if (!chain->pbl.pp_addr_tbl) return; @@ -63,11 +63,10 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) entry->dma_map); } - pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - if (!chain->b_external_pbl) - dma_free_coherent(dev, pbl_size, chain->pbl_sp.p_virt_table, - chain->pbl_sp.p_phys_table); + dma_free_coherent(dev, chain->pbl_sp.table_size, + chain->pbl_sp.table_virt, + chain->pbl_sp.table_phys); vfree(chain->pbl.pp_addr_tbl); chain->pbl.pp_addr_tbl = NULL; @@ -190,7 +189,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, struct device *dev = &cdev->pdev->dev; struct addr_tbl_entry *addr_tbl; dma_addr_t phys, pbl_phys; - void *pbl_virt; + __le64 *pbl_virt; u32 page_cnt, i; size_t size; void *virt; @@ -214,7 +213,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, chain->b_external_pbl = true; } else { - size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE); + size = array_size(page_cnt, sizeof(*pbl_virt)); if (unlikely(size == SIZE_MAX)) return -EOVERFLOW; @@ -225,8 +224,9 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, if (!pbl_virt) return -ENOMEM; - chain->pbl_sp.p_virt_table = pbl_virt; - chain->pbl_sp.p_phys_table = pbl_phys; + chain->pbl_sp.table_virt = pbl_virt; + chain->pbl_sp.table_phys = pbl_phys; + chain->pbl_sp.table_size = size; for (i = 0; i < page_cnt; i++) { virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, @@ -240,8 +240,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, } /* Fill the PBL table with the physical address of the page */ - *(dma_addr_t *)pbl_virt = phys; - pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + pbl_virt[i] = cpu_to_le64(phys); /* Keep the virtual address of the page */ addr_tbl[i].virt_addr = virt; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 8142f5669b26..aa71adcf31ee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -366,11 +366,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, - p_hwfn->p_eq->chain.pbl_sp.p_phys_table); + qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, - p_hwfn->p_consq->chain.pbl_sp.p_phys_table); + qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 087073517c09..265e0b671a5c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -127,8 +127,9 @@ struct qed_chain { /* Base address of a pre-allocated buffer for pbl */ struct { - dma_addr_t p_phys_table; - void *p_virt_table; + __le64 *table_virt; + dma_addr_t table_phys; + size_t table_size; } pbl_sp; /* Address of first page of the chain - the address is required @@ -146,7 +147,6 @@ struct qed_chain { bool b_external_pbl; }; -#define QED_CHAIN_PBL_ENTRY_SIZE 8 #define QED_CHAIN_PAGE_SIZE 0x1000 #define ELEMS_PER_PAGE(elem_size) \ @@ -236,7 +236,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) { - return p_chain->pbl_sp.p_phys_table; + return p_chain->pbl_sp.table_phys; } /** @@ -527,8 +527,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->capacity = p_chain->usable_per_page * page_cnt; p_chain->size = p_chain->elem_per_page * page_cnt; - p_chain->pbl_sp.p_phys_table = 0; - p_chain->pbl_sp.p_virt_table = NULL; + p_chain->pbl_sp.table_phys = 0; + p_chain->pbl_sp.table_virt = NULL; p_chain->pbl.pp_addr_tbl = NULL; } @@ -569,8 +569,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, dma_addr_t p_phys_pbl, struct addr_tbl_entry *pp_addr_tbl) { - p_chain->pbl_sp.p_phys_table = p_phys_pbl; - p_chain->pbl_sp.p_virt_table = p_virt_pbl; + p_chain->pbl_sp.table_phys = p_phys_pbl; + p_chain->pbl_sp.table_virt = p_virt_pbl; p_chain->pbl.pp_addr_tbl = pp_addr_tbl; } -- cgit v1.2.3 From 5e776d8016119e13c27fbb6e87c9e1fd6f8b2a75 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:36 +0300 Subject: qed: move chain initialization inlines next to allocation functions qed_chain_init*() are used in one file/place on "cold" path only, so they can be uninlined and moved next to the call sites. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_chain.c | 47 ++++++++++++ include/linux/qed/qed_chain.h | 112 ---------------------------- 2 files changed, 47 insertions(+), 112 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index a9ff15b9d8c0..b60ec3e4654c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -7,6 +7,53 @@ #include "qed_dev_api.h" +static void qed_chain_init_params(struct qed_chain *chain, + u32 page_cnt, u8 elem_size, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type) +{ + memset(chain, 0, sizeof(*chain)); + + chain->elem_size = elem_size; + chain->intended_use = intended_use; + chain->mode = mode; + chain->cnt_type = cnt_type; + + chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); + chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + + chain->elem_per_page_mask = chain->elem_per_page - 1; + chain->next_page_mask = chain->usable_per_page & + chain->elem_per_page_mask; + + chain->page_cnt = page_cnt; + chain->capacity = chain->usable_per_page * page_cnt; + chain->size = chain->elem_per_page * page_cnt; +} + +static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain, + void *virt_curr, void *virt_next, + dma_addr_t phys_next) +{ + struct qed_chain_next *next; + u32 size; + + size = chain->elem_size * chain->usable_per_page; + next = virt_curr + size; + + DMA_REGPAIR_LE(next->next_phys, phys_next); + next->next_virt = virt_next; +} + +static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr, + dma_addr_t phys_addr) +{ + chain->p_virt_addr = virt_addr; + chain->p_phys_addr = phys_addr; +} + static void qed_chain_free_next_ptr(struct qed_dev *cdev, struct qed_chain *chain) { diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 265e0b671a5c..a0d83095dc73 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -490,118 +490,6 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) } } -/** - * @brief qed_chain_init - Initalizes a basic chain struct - * - * @param p_chain - * @param p_virt_addr - * @param p_phys_addr physical address of allocated buffer's beginning - * @param page_cnt number of pages in the allocated buffer - * @param elem_size size of each element in the chain - * @param intended_use - * @param mode - */ -static inline void qed_chain_init_params(struct qed_chain *p_chain, - u32 page_cnt, - u8 elem_size, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type) -{ - /* chain fixed parameters */ - p_chain->p_virt_addr = NULL; - p_chain->p_phys_addr = 0; - p_chain->elem_size = elem_size; - p_chain->intended_use = (u8)intended_use; - p_chain->mode = mode; - p_chain->cnt_type = (u8)cnt_type; - - p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); - p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; - p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->next_page_mask = (p_chain->usable_per_page & - p_chain->elem_per_page_mask); - - p_chain->page_cnt = page_cnt; - p_chain->capacity = p_chain->usable_per_page * page_cnt; - p_chain->size = p_chain->elem_per_page * page_cnt; - - p_chain->pbl_sp.table_phys = 0; - p_chain->pbl_sp.table_virt = NULL; - p_chain->pbl.pp_addr_tbl = NULL; -} - -/** - * @brief qed_chain_init_mem - - * - * Initalizes a basic chain struct with its chain buffers - * - * @param p_chain - * @param p_virt_addr virtual address of allocated buffer's beginning - * @param p_phys_addr physical address of allocated buffer's beginning - * - */ -static inline void qed_chain_init_mem(struct qed_chain *p_chain, - void *p_virt_addr, dma_addr_t p_phys_addr) -{ - p_chain->p_virt_addr = p_virt_addr; - p_chain->p_phys_addr = p_phys_addr; -} - -/** - * @brief qed_chain_init_pbl_mem - - * - * Initalizes a basic chain struct with its pbl buffers - * - * @param p_chain - * @param p_virt_pbl pointer to a pre allocated side table which will hold - * virtual page addresses. - * @param p_phys_pbl pointer to a pre-allocated side table which will hold - * physical page addresses. - * @param pp_virt_addr_tbl - * pointer to a pre-allocated side table which will hold - * the virtual addresses of the chain pages. - * - */ -static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, - void *p_virt_pbl, - dma_addr_t p_phys_pbl, - struct addr_tbl_entry *pp_addr_tbl) -{ - p_chain->pbl_sp.table_phys = p_phys_pbl; - p_chain->pbl_sp.table_virt = p_virt_pbl; - p_chain->pbl.pp_addr_tbl = pp_addr_tbl; -} - -/** - * @brief qed_chain_init_next_ptr_elem - - * - * Initalizes a next pointer element - * - * @param p_chain - * @param p_virt_curr virtual address of a chain page of which the next - * pointer element is initialized - * @param p_virt_next virtual address of the next chain page - * @param p_phys_next physical address of the next chain page - * - */ -static inline void -qed_chain_init_next_ptr_elem(struct qed_chain *p_chain, - void *p_virt_curr, - void *p_virt_next, dma_addr_t p_phys_next) -{ - struct qed_chain_next *p_next; - u32 size; - - size = p_chain->elem_size * p_chain->usable_per_page; - p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size); - - DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); - - p_next->next_virt = p_virt_next; -} - /** * @brief qed_chain_get_last_elem - * -- cgit v1.2.3 From b6db3f71c976ea92361dbc7ebfb65db666ac9f02 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:38 +0300 Subject: qed: simplify chain allocation with init params struct To simplify qed_chain_alloc() prototype and call sites, introduce struct qed_chain_init_params to specify chain params, and pass a pointer to filled struct to the actual qed_chain_alloc() instead of a long list of separate arguments. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/main.c | 20 +++--- drivers/infiniband/hw/qedr/verbs.c | 95 +++++++++++++-------------- drivers/net/ethernet/qlogic/qed/qed_chain.c | 80 ++++++++++++---------- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 32 +-------- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 39 +++++------ drivers/net/ethernet/qlogic/qed/qed_ll2.c | 44 +++++++------ drivers/net/ethernet/qlogic/qed/qed_spq.c | 90 +++++++++++++++---------- drivers/net/ethernet/qlogic/qede/qede_main.c | 45 ++++++------- include/linux/qed/qed_chain.h | 21 ++++-- include/linux/qed/qed_if.h | 9 +-- 10 files changed, 242 insertions(+), 233 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index ccaedfd53e49..b1de8d608e4d 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -346,9 +346,14 @@ static void qedr_free_resources(struct qedr_dev *dev) static int qedr_alloc_resources(struct qedr_dev *dev) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .elem_size = sizeof(struct regpair *), + }; struct qedr_cnq *cnq; __le16 *cons_pi; - u16 n_entries; int i, rc; dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid), @@ -382,7 +387,9 @@ static int qedr_alloc_resources(struct qedr_dev *dev) dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); /* Allocate CNQ PBLs */ - n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE); + params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, + QEDR_ROCE_MAX_CNQ_SIZE); + for (i = 0; i < dev->num_cnq; i++) { cnq = &dev->cnq_array[i]; @@ -391,13 +398,8 @@ static int qedr_alloc_resources(struct qedr_dev *dev) if (rc) goto err3; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - n_entries, - sizeof(struct regpair *), - &cnq->pbl, NULL); + rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl, + ¶ms); if (rc) goto err4; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9b9e80266367..6737895a0d68 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -891,6 +891,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, udata, struct qedr_ucontext, ibucontext); struct qed_rdma_destroy_cq_out_params destroy_oparams; struct qed_rdma_destroy_cq_in_params destroy_iparams; + struct qed_chain_init_params chain_params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + .elem_size = sizeof(union rdma_cqe), + }; struct qedr_dev *dev = get_qedr_dev(ibdev); struct qed_rdma_create_cq_in_params params; struct qedr_create_cq_ureq ureq = {}; @@ -917,6 +923,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, chain_entries = qedr_align_cq_entries(entries); chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES); + chain_params.num_elems = chain_entries; /* calc db offset. user will add DPI base, kernel will add db addr */ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); @@ -951,13 +958,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, } else { cq->cq_type = QEDR_CQ_TYPE_KERNEL; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - chain_entries, - sizeof(union rdma_cqe), - &cq->pbl, NULL); + rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl, + &chain_params); if (rc) goto err0; @@ -1446,6 +1448,12 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq, struct ib_srq_init_attr *init_attr) { struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + .elem_size = QEDR_SRQ_WQE_ELEM_SIZE, + }; dma_addr_t phy_prod_pair_addr; u32 num_elems; void *va; @@ -1464,13 +1472,9 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq, hw_srq->virt_prod_pair_addr = va; num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - num_elems, - QEDR_SRQ_WQE_ELEM_SIZE, - &hw_srq->pbl, NULL); + params.num_elems = num_elems; + + rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, ¶ms); if (rc) goto err0; @@ -1901,29 +1905,28 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, u32 n_sq_elems, u32 n_rq_elems) { struct qed_rdma_create_qp_out_params out_params; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + }; int rc; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_sq_elems, - QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl, NULL); + params.intended_use = QED_CHAIN_USE_TO_PRODUCE; + params.num_elems = n_sq_elems; + params.elem_size = QEDR_SQE_ELEMENT_SIZE; + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms); if (rc) return rc; in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_rq_elems, - QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl, NULL); + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; + params.elem_size = n_rq_elems; + params.elem_size = QEDR_RQE_ELEMENT_SIZE; + + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms); if (rc) return rc; @@ -1949,7 +1952,10 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, u32 n_sq_elems, u32 n_rq_elems) { struct qed_rdma_create_qp_out_params out_params; - struct qed_chain_ext_pbl ext_pbl; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + }; int rc; in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems, @@ -1966,31 +1972,24 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, return -EINVAL; /* Now we allocate the chain */ - ext_pbl.p_pbl_virt = out_params.sq_pbl_virt; - ext_pbl.p_pbl_phys = out_params.sq_pbl_phys; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_sq_elems, - QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl, &ext_pbl); + params.intended_use = QED_CHAIN_USE_TO_PRODUCE; + params.num_elems = n_sq_elems; + params.elem_size = QEDR_SQE_ELEMENT_SIZE; + params.ext_pbl_virt = out_params.sq_pbl_virt; + params.ext_pbl_phys = out_params.sq_pbl_phys; + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms); if (rc) goto err; - ext_pbl.p_pbl_virt = out_params.rq_pbl_virt; - ext_pbl.p_pbl_phys = out_params.rq_pbl_phys; - - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_rq_elems, - QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl, &ext_pbl); + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; + params.num_elems = n_rq_elems; + params.elem_size = QEDR_RQE_ELEMENT_SIZE; + params.ext_pbl_virt = out_params.rq_pbl_virt; + params.ext_pbl_phys = out_params.rq_pbl_phys; + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms); if (rc) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index 6effee3b50f4..a68ee4b3dbbc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -7,23 +7,22 @@ #include "qed_dev_api.h" -static void qed_chain_init_params(struct qed_chain *chain, - u32 page_cnt, u8 elem_size, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - const struct qed_chain_ext_pbl *ext_pbl) +static void qed_chain_init(struct qed_chain *chain, + const struct qed_chain_init_params *params, + u32 page_cnt) { memset(chain, 0, sizeof(*chain)); - chain->elem_size = elem_size; - chain->intended_use = intended_use; - chain->mode = mode; - chain->cnt_type = cnt_type; + chain->elem_size = params->elem_size; + chain->intended_use = params->intended_use; + chain->mode = params->mode; + chain->cnt_type = params->cnt_type; - chain->elem_per_page = ELEMS_PER_PAGE(elem_size); - chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); - chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size); + chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size, + params->mode); + chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size, + params->mode); chain->elem_per_page_mask = chain->elem_per_page - 1; chain->next_page_mask = chain->usable_per_page & @@ -33,9 +32,9 @@ static void qed_chain_init_params(struct qed_chain *chain, chain->capacity = chain->usable_per_page * page_cnt; chain->size = chain->elem_per_page * page_cnt; - if (ext_pbl && ext_pbl->p_pbl_virt) { - chain->pbl_sp.table_virt = ext_pbl->p_pbl_virt; - chain->pbl_sp.table_phys = ext_pbl->p_pbl_phys; + if (params->ext_pbl_virt) { + chain->pbl_sp.table_virt = params->ext_pbl_virt; + chain->pbl_sp.table_phys = params->ext_pbl_phys; chain->b_external_pbl = true; } @@ -154,10 +153,16 @@ void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain) static int qed_chain_alloc_sanity_check(struct qed_dev *cdev, - enum qed_chain_cnt_type cnt_type, - size_t elem_size, u32 page_cnt) + const struct qed_chain_init_params *params, + u32 page_cnt) { - u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; + u64 chain_size; + + chain_size = ELEMS_PER_PAGE(params->elem_size); + chain_size *= page_cnt; + + if (!chain_size) + return -EINVAL; /* The actual chain size can be larger than the maximal possible value * after rounding up the requested elements number to pages, and after @@ -165,7 +170,7 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev, * The size of a "u16" chain can be (U16_MAX + 1) since the chain * size/capacity fields are of u32 type. */ - switch (cnt_type) { + switch (params->cnt_type) { case QED_CHAIN_CNT_TYPE_U16: if (chain_size > U16_MAX + 1) break; @@ -298,37 +303,42 @@ alloc_pages: return 0; } -int qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *chain, - struct qed_chain_ext_pbl *ext_pbl) +/** + * qed_chain_alloc() - Allocate and initialize a chain. + * + * @cdev: Main device structure. + * @chain: Chain to be processed. + * @params: Chain initialization parameters. + * + * Return: 0 on success, negative errno otherwise. + */ +int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, + struct qed_chain_init_params *params) { u32 page_cnt; int rc; - if (mode == QED_CHAIN_MODE_SINGLE) + if (params->mode == QED_CHAIN_MODE_SINGLE) page_cnt = 1; else - page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); + page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems, + params->elem_size, + params->mode); - rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); + rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt); if (rc) { DP_NOTICE(cdev, "Cannot allocate a chain with the given arguments:\n"); DP_NOTICE(cdev, "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", - intended_use, mode, cnt_type, num_elems, elem_size); + params->intended_use, params->mode, params->cnt_type, + params->num_elems, params->elem_size); return rc; } - qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode, - cnt_type, ext_pbl); + qed_chain_init(chain, params, page_cnt); - switch (mode) { + switch (params->mode) { case QED_CHAIN_MODE_NEXT_PTR: rc = qed_chain_alloc_next_ptr(cdev, chain); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 395d4932c262..d3c1f3879be8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -254,35 +254,9 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params); -/** - * @brief qed_chain_alloc - Allocate and initialize a chain - * - * @param p_hwfn - * @param intended_use - * @param mode - * @param num_elems - * @param elem_size - * @param p_chain - * @param ext_pbl - a possible external PBL - * - * @return int - */ -int -qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl); - -/** - * @brief qed_chain_free - Free chain DMA memory - * - * @param p_hwfn - * @param p_chain - */ -void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain); +int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, + struct qed_chain_init_params *params); +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain); /** * @@brief qed_fw_l2_queue - Get absolute L2 queue ID diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 25d2c882d7ac..4eae4ee3538f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -684,9 +684,13 @@ nomem: static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn **p_out_conn) { - u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0; struct scsi_terminate_extra_params *p_q_cnts = NULL; struct qed_iscsi_pf_params *p_params = NULL; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + }; struct tcp_upload_params *p_tcp = NULL; struct qed_iscsi_conn *p_conn = NULL; int rc = 0; @@ -727,34 +731,25 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, goto nomem_upload_param; p_conn->tcp_upload_params_virt_addr = p_tcp; - r2tq_num_elements = p_params->num_r2tq_pages_in_ring * - QED_CHAIN_PAGE_SIZE / 0x80; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - r2tq_num_elements, 0x80, &p_conn->r2tq, NULL); + params.num_elems = p_params->num_r2tq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_wqe); + params.elem_size = sizeof(struct iscsi_wqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, ¶ms); if (rc) goto nomem_r2tq; - uhq_num_elements = p_params->num_uhq_pages_in_ring * + params.num_elems = p_params->num_uhq_pages_in_ring * QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe); - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - uhq_num_elements, - sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL); + params.elem_size = sizeof(struct iscsi_uhqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, ¶ms); if (rc) goto nomem_uhq; - xhq_num_elements = uhq_num_elements; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - xhq_num_elements, - sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL); + params.elem_size = sizeof(struct iscsi_xhqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, ¶ms); if (rc) goto nomem; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 6f4aec339cd4..0452b728c527 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1125,6 +1125,12 @@ static int qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { + struct qed_chain_init_params params = { + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.rx_num_desc, + }; + struct qed_dev *cdev = p_hwfn->cdev; struct qed_ll2_rx_packet *p_descq; u32 capacity; int rc = 0; @@ -1132,13 +1138,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, if (!p_ll2_info->input.rx_num_desc) goto out; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_NEXT_PTR, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.rx_num_desc, - sizeof(struct core_rx_bd), - &p_ll2_info->rx_queue.rxq_chain, NULL); + params.mode = QED_CHAIN_MODE_NEXT_PTR; + params.elem_size = sizeof(struct core_rx_bd); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, ¶ms); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); goto out; @@ -1154,13 +1157,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, } p_ll2_info->rx_queue.descq_array = p_descq; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.rx_num_desc, - sizeof(struct core_rx_fast_path_cqe), - &p_ll2_info->rx_queue.rcq_chain, NULL); + params.mode = QED_CHAIN_MODE_PBL; + params.elem_size = sizeof(struct core_rx_fast_path_cqe); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, ¶ms); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); goto out; @@ -1177,6 +1177,13 @@ out: static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.tx_num_desc, + .elem_size = sizeof(struct core_tx_bd), + }; struct qed_ll2_tx_packet *p_descq; u32 desc_size; u32 capacity; @@ -1185,13 +1192,8 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, if (!p_ll2_info->input.tx_num_desc) goto out; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.tx_num_desc, - sizeof(struct core_tx_bd), - &p_ll2_info->tx_queue.txq_chain, NULL); + rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain, + ¶ms); if (rc) goto out; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 92ab029789e5..0bc1a0aeb56e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -382,22 +382,26 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = num_elem, + .elem_size = sizeof(union event_ring_element), + }; struct qed_eq *p_eq; + int ret; /* Allocate EQ struct */ p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); if (!p_eq) return -ENOMEM; - /* Allocate and initialize EQ chain*/ - if (qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - num_elem, - sizeof(union event_ring_element), - &p_eq->chain, NULL)) + ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n"); goto eq_allocate_fail; + } /* register EQ completion on the SP SB */ qed_int_register_cb(p_hwfn, qed_eq_completion, @@ -408,7 +412,8 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) eq_allocate_fail: kfree(p_eq); - return -ENOMEM; + + return ret; } void qed_eq_setup(struct qed_hwfn *p_hwfn) @@ -529,33 +534,40 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) int qed_spq_alloc(struct qed_hwfn *p_hwfn) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_SINGLE, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .elem_size = sizeof(struct slow_path_element), + }; + struct qed_dev *cdev = p_hwfn->cdev; struct qed_spq_entry *p_virt = NULL; struct qed_spq *p_spq = NULL; dma_addr_t p_phys = 0; u32 capacity; + int ret; /* SPQ struct */ p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); if (!p_spq) return -ENOMEM; - /* SPQ ring */ - if (qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_SINGLE, - QED_CHAIN_CNT_TYPE_U16, - 0, /* N/A when the mode is SINGLE */ - sizeof(struct slow_path_element), - &p_spq->chain, NULL)) - goto spq_allocate_fail; + /* SPQ ring */ + ret = qed_chain_alloc(cdev, &p_spq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n"); + goto spq_chain_alloc_fail; + } /* allocate and fill the SPQ elements (incl. ramrod data list) */ capacity = qed_chain_get_capacity(&p_spq->chain); - p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + ret = -ENOMEM; + + p_virt = dma_alloc_coherent(&cdev->pdev->dev, capacity * sizeof(struct qed_spq_entry), &p_phys, GFP_KERNEL); if (!p_virt) - goto spq_allocate_fail; + goto spq_alloc_fail; p_spq->p_virt = p_virt; p_spq->p_phys = p_phys; @@ -563,10 +575,12 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) return 0; -spq_allocate_fail: - qed_chain_free(p_hwfn->cdev, &p_spq->chain); +spq_alloc_fail: + qed_chain_free(cdev, &p_spq->chain); +spq_chain_alloc_fail: kfree(p_spq); - return -ENOMEM; + + return ret; } void qed_spq_free(struct qed_hwfn *p_hwfn) @@ -967,30 +981,40 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, return 0; } +#define QED_SPQ_CONSQ_ELEM_SIZE 0x80 + int qed_consq_alloc(struct qed_hwfn *p_hwfn) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE, + .elem_size = QED_SPQ_CONSQ_ELEM_SIZE, + }; struct qed_consq *p_consq; + int ret; /* Allocate ConsQ struct */ p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); if (!p_consq) return -ENOMEM; - /* Allocate and initialize EQ chain*/ - if (qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, &p_consq->chain, NULL)) - goto consq_allocate_fail; + /* Allocate and initialize ConsQ chain */ + ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain"); + goto consq_alloc_fail; + } p_hwfn->p_consq = p_consq; + return 0; -consq_allocate_fail: +consq_alloc_fail: kfree(p_consq); - return -ENOMEM; + + return ret; } void qed_consq_setup(struct qed_hwfn *p_hwfn) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6f2171dc0dea..b5a95f165520 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1442,6 +1442,11 @@ static void qede_set_tpa_param(struct qede_rx_queue *rxq) /* This function allocates all memory needed per Rx queue */ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { + struct qed_chain_init_params params = { + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = RX_RING_SIZE, + }; + struct qed_dev *cdev = edev->cdev; int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; @@ -1477,24 +1482,20 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } /* Allocate FW Rx ring */ - rc = edev->ops->common->chain_alloc(edev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_NEXT_PTR, - QED_CHAIN_CNT_TYPE_U16, - RX_RING_SIZE, - sizeof(struct eth_rx_bd), - &rxq->rx_bd_ring, NULL); + params.mode = QED_CHAIN_MODE_NEXT_PTR; + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; + params.elem_size = sizeof(struct eth_rx_bd); + + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms); if (rc) goto err; /* Allocate FW completion ring */ - rc = edev->ops->common->chain_alloc(edev->cdev, - QED_CHAIN_USE_TO_CONSUME, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - RX_RING_SIZE, - sizeof(union eth_rx_cqe), - &rxq->rx_comp_ring, NULL); + params.mode = QED_CHAIN_MODE_PBL; + params.intended_use = QED_CHAIN_USE_TO_CONSUME; + params.elem_size = sizeof(union eth_rx_cqe); + + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms); if (rc) goto err; @@ -1531,7 +1532,13 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) /* This function allocates all memory needed per Tx queue */ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { - union eth_tx_bd_types *p_virt; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = edev->q_num_tx_buffers, + .elem_size = sizeof(union eth_tx_bd_types), + }; int size, rc; txq->num_tx_buffers = edev->q_num_tx_buffers; @@ -1549,13 +1556,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) goto err; } - rc = edev->ops->common->chain_alloc(edev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - txq->num_tx_buffers, - sizeof(*p_virt), - &txq->tx_pbl, NULL); + rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms); if (rc) goto err; diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index a0d83095dc73..f5cfee0934e5 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -54,11 +54,6 @@ struct qed_chain_pbl_u32 { u32 cons_page_idx; }; -struct qed_chain_ext_pbl { - dma_addr_t p_pbl_phys; - void *p_pbl_virt; -}; - struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ u16 prod_idx; @@ -119,7 +114,7 @@ struct qed_chain { u16 usable_per_page; u8 elem_unusable; - u8 cnt_type; + enum qed_chain_cnt_type cnt_type; /* Slowpath of the chain - required for initialization and destruction, * but isn't involved in regular functionality. @@ -142,11 +137,23 @@ struct qed_chain { /* Total number of elements [for entire chain] */ u32 size; - u8 intended_use; + enum qed_chain_use_mode intended_use; bool b_external_pbl; }; +struct qed_chain_init_params { + enum qed_chain_mode mode; + enum qed_chain_use_mode intended_use; + enum qed_chain_cnt_type cnt_type; + + u32 num_elems; + size_t elem_size; + + void *ext_pbl_virt; + dma_addr_t ext_pbl_phys; +}; + #define QED_CHAIN_PAGE_SIZE 0x1000 #define ELEMS_PER_PAGE(elem_size) \ diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index a5c6854343e6..cd6a5c7e56eb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -948,13 +948,8 @@ struct qed_common_ops { u8 dp_level); int (*chain_alloc)(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl); + struct qed_chain *chain, + struct qed_chain_init_params *params); void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); -- cgit v1.2.3 From 155065866bc36f20061c55fd2ca287a466911b16 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:39 +0300 Subject: qed: add support for different page sizes for chains Extend current infrastructure to store chain page size in a struct and use it in all functions instead of fixed QED_CHAIN_PAGE_SIZE. Its value remains the default one, but can be overridden in qed_chain_init_params before chain allocation. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/verbs.c | 2 ++ drivers/net/ethernet/qlogic/qed/qed_chain.c | 28 ++++++++++++++++++---------- include/linux/qed/qed_chain.h | 21 +++++++++++++-------- 3 files changed, 33 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 6737895a0d68..49b8a43e3fa2 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1960,9 +1960,11 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems, QEDR_SQE_ELEMENT_SIZE, + QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems, QEDR_RQE_ELEMENT_SIZE, + QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index a68ee4b3dbbc..f8efd36d66e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -18,8 +18,10 @@ static void qed_chain_init(struct qed_chain *chain, chain->mode = params->mode; chain->cnt_type = params->cnt_type; - chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size); + chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size, + params->page_size); chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size, + params->page_size, params->mode); chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size, params->mode); @@ -28,6 +30,7 @@ static void qed_chain_init(struct qed_chain *chain, chain->next_page_mask = chain->usable_per_page & chain->elem_per_page_mask; + chain->page_size = params->page_size; chain->page_cnt = page_cnt; chain->capacity = chain->usable_per_page * page_cnt; chain->size = chain->elem_per_page * page_cnt; @@ -82,7 +85,7 @@ static void qed_chain_free_next_ptr(struct qed_dev *cdev, virt_next = next->next_virt; phys_next = HILO_DMA_REGPAIR(next->next_phys); - dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys); + dma_free_coherent(dev, chain->page_size, virt, phys); virt = virt_next; phys = phys_next; @@ -95,7 +98,7 @@ static void qed_chain_free_single(struct qed_dev *cdev, if (!chain->p_virt_addr) return; - dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, + dma_free_coherent(&cdev->pdev->dev, chain->page_size, chain->p_virt_addr, chain->p_phys_addr); } @@ -113,7 +116,7 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) if (!entry->virt_addr) break; - dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr, + dma_free_coherent(dev, chain->page_size, entry->virt_addr, entry->dma_map); } @@ -158,7 +161,7 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev, { u64 chain_size; - chain_size = ELEMS_PER_PAGE(params->elem_size); + chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size); chain_size *= page_cnt; if (!chain_size) @@ -201,7 +204,7 @@ static int qed_chain_alloc_next_ptr(struct qed_dev *cdev, u32 i; for (i = 0; i < chain->page_cnt; i++) { - virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, + virt = dma_alloc_coherent(dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -232,7 +235,7 @@ static int qed_chain_alloc_single(struct qed_dev *cdev, dma_addr_t phys; void *virt; - virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, + virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -282,7 +285,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) alloc_pages: for (i = 0; i < page_cnt; i++) { - virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, + virt = dma_alloc_coherent(dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -318,11 +321,15 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, u32 page_cnt; int rc; + if (!params->page_size) + params->page_size = QED_CHAIN_PAGE_SIZE; + if (params->mode == QED_CHAIN_MODE_SINGLE) page_cnt = 1; else page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems, params->elem_size, + params->page_size, params->mode); rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt); @@ -330,9 +337,10 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, DP_NOTICE(cdev, "Cannot allocate a chain with the given arguments:\n"); DP_NOTICE(cdev, - "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", + "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n", params->intended_use, params->mode, params->cnt_type, - params->num_elems, params->elem_size); + params->num_elems, params->elem_size, + params->page_size); return rc; } diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index f5cfee0934e5..8a96c361cc19 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -120,6 +121,8 @@ struct qed_chain { * but isn't involved in regular functionality. */ + u32 page_size; + /* Base address of a pre-allocated buffer for pbl */ struct { __le64 *table_virt; @@ -147,6 +150,7 @@ struct qed_chain_init_params { enum qed_chain_use_mode intended_use; enum qed_chain_cnt_type cnt_type; + u32 page_size; u32 num_elems; size_t elem_size; @@ -154,22 +158,23 @@ struct qed_chain_init_params { dma_addr_t ext_pbl_phys; }; -#define QED_CHAIN_PAGE_SIZE 0x1000 +#define QED_CHAIN_PAGE_SIZE SZ_4K -#define ELEMS_PER_PAGE(elem_size) \ - (QED_CHAIN_PAGE_SIZE / (elem_size)) +#define ELEMS_PER_PAGE(elem_size, page_size) \ + ((page_size) / (elem_size)) #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \ 0) -#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ - ((u32)(ELEMS_PER_PAGE(elem_size) - \ +#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \ + ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \ UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode)))) -#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ - DIV_ROUND_UP((elem_cnt), USABLE_ELEMS_PER_PAGE((elem_size), (mode))) +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \ + DIV_ROUND_UP((elem_cnt), \ + USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode))) #define is_chain_u16(p) \ ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) @@ -604,7 +609,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) for (i = 0; i < page_cnt; i++) memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, - QED_CHAIN_PAGE_SIZE); + p_chain->page_size); } #endif -- cgit v1.2.3 From f2aefd20b02d83b8565c867c38eedd88853967e9 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:40 +0300 Subject: qed: optimize common chain accessors Constify chain pointers and refactor qed_chain_get_elem_left{,u32}() a bit. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- include/linux/qed/qed_chain.h | 60 ++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 8a96c361cc19..434479e2ab65 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -182,73 +182,79 @@ struct qed_chain_init_params { ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) /* Accessors */ -static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) + +static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain) +{ + return chain->u.chain16.prod_idx; +} + +static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain) { - return p_chain->u.chain16.prod_idx; + return chain->u.chain16.cons_idx; } -static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) +static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain) { - return p_chain->u.chain16.cons_idx; + return chain->u.chain32.prod_idx; } -static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) +static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain) { - return p_chain->u.chain32.cons_idx; + return chain->u.chain32.cons_idx; } -static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) +static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) { - u16 elem_per_page = p_chain->elem_per_page; - u32 prod = p_chain->u.chain16.prod_idx; - u32 cons = p_chain->u.chain16.cons_idx; + u32 prod = qed_chain_get_prod_idx(chain); + u32 cons = qed_chain_get_cons_idx(chain); + u16 elem_per_page = chain->elem_per_page; u16 used; if (prod < cons) prod += (u32)U16_MAX + 1; used = (u16)(prod - cons); - if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= prod / elem_per_page - cons / elem_per_page; + if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= (u16)(prod / elem_per_page - cons / elem_per_page); - return (u16)(p_chain->capacity - used); + return (u16)(chain->capacity - used); } -static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) +static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) { - u16 elem_per_page = p_chain->elem_per_page; - u64 prod = p_chain->u.chain32.prod_idx; - u64 cons = p_chain->u.chain32.cons_idx; + u64 prod = qed_chain_get_prod_idx_u32(chain); + u64 cons = qed_chain_get_cons_idx_u32(chain); + u16 elem_per_page = chain->elem_per_page; u32 used; if (prod < cons) prod += (u64)U32_MAX + 1; used = (u32)(prod - cons); - if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) used -= (u32)(prod / elem_per_page - cons / elem_per_page); - return p_chain->capacity - used; + return chain->capacity - used; } -static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) +static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain) { - return p_chain->usable_per_page; + return chain->usable_per_page; } -static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) +static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain) { - return p_chain->elem_unusable; + return chain->elem_unusable; } -static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) +static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain) { - return p_chain->page_cnt; + return chain->page_cnt; } -static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) +static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) { - return p_chain->pbl_sp.table_phys; + return chain->pbl_sp.table_phys; } /** -- cgit v1.2.3 From be0cec6ffd686364bbba2796bbe5eee2fad18181 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:41 +0300 Subject: qed: introduce qed_chain_get_elem_used{,u32}() Add reverse-variants of qed_chain_get_elem_left{,u32}() to be able to know current chain occupation. They will be used in the upcoming qede XDP_REDIRECT code. They share most of the logics with the mentioned ones, so were reused to collapse the latters. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- include/linux/qed/qed_chain.h | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 434479e2ab65..4d58dc8943f0 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -203,7 +203,7 @@ static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain) return chain->u.chain32.cons_idx; } -static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) +static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain) { u32 prod = qed_chain_get_prod_idx(chain); u32 cons = qed_chain_get_cons_idx(chain); @@ -217,10 +217,15 @@ static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) used -= (u16)(prod / elem_per_page - cons / elem_per_page); - return (u16)(chain->capacity - used); + return used; } -static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) +static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) +{ + return (u16)(chain->capacity - qed_chain_get_elem_used(chain)); +} + +static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain) { u64 prod = qed_chain_get_prod_idx_u32(chain); u64 cons = qed_chain_get_cons_idx_u32(chain); @@ -234,7 +239,12 @@ static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) used -= (u32)(prod / elem_per_page - cons / elem_per_page); - return chain->capacity - used; + return used; +} + +static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) +{ + return chain->capacity - qed_chain_get_elem_used_u32(chain); } static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain) -- cgit v1.2.3 From bd25b4886ddcebec92591f298ce2ce345d7f2ea9 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:51 -0400 Subject: padata: remove start function padata_start() is only used right after pcrypt allocates an instance with all possible CPUs, when PADATA_INVALID can't happen, so there's no need for a separate "start" step. It can be done during allocation to save text, make using padata easier, and avoid unneeded calls in the future. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 3 --- include/linux/padata.h | 1 - kernel/padata.c | 26 +------------------------- 3 files changed, 1 insertion(+), 29 deletions(-) (limited to 'include') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index cbc383a1a3fe..fb9127ec5357 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -355,9 +355,6 @@ static int __init pcrypt_init(void) if (err) goto err_deinit_pencrypt; - padata_start(pencrypt); - padata_start(pdecrypt); - return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: diff --git a/include/linux/padata.h b/include/linux/padata.h index 7302efff5e65..20294cddc739 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -204,6 +204,5 @@ extern void padata_do_serial(struct padata_priv *padata); extern void __init padata_do_multithreaded(struct padata_mt_job *job); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); #endif diff --git a/kernel/padata.c b/kernel/padata.c index 4373f7adaa40..931762316612 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -789,30 +789,6 @@ out: } EXPORT_SYMBOL(padata_set_cpumask); -/** - * padata_start - start the parallel processing - * - * @pinst: padata instance to start - * - * Return: 0 on success or negative error code - */ -int padata_start(struct padata_instance *pinst) -{ - int err = 0; - - mutex_lock(&pinst->lock); - - if (pinst->flags & PADATA_INVALID) - err = -EINVAL; - - __padata_start(pinst); - - mutex_unlock(&pinst->lock); - - return err; -} -EXPORT_SYMBOL(padata_start); - /** * padata_stop - stop the parallel processing * @@ -1100,7 +1076,7 @@ static struct padata_instance *padata_alloc(const char *name, if (padata_setup_cpumasks(pinst)) goto err_free_rcpumask_cbcpu; - pinst->flags = 0; + __padata_start(pinst); kobject_init(&pinst->kobj, &padata_attr_type); mutex_init(&pinst->lock); -- cgit v1.2.3 From 350ef051d4edd884e8dea0be9f3685b4b32142fb Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:52 -0400 Subject: padata: remove stop function padata_stop() has two callers and is unnecessary in both cases. When pcrypt calls it before padata_free(), it's being unloaded so there are no outstanding padata jobs[0]. When __padata_free() calls it, it's either along the same path or else pcrypt initialization failed, which of course means there are also no outstanding jobs. Removing it simplifies padata and saves text. [0] https://lore.kernel.org/linux-crypto/20191119225017.mjrak2fwa5vccazl@gondor.apana.org.au/ Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- Documentation/core-api/padata.rst | 16 ++-------------- crypto/pcrypt.c | 12 +++--------- include/linux/padata.h | 1 - kernel/padata.c | 14 -------------- 4 files changed, 5 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/Documentation/core-api/padata.rst b/Documentation/core-api/padata.rst index 0830e5b0e821..771d50330e5b 100644 --- a/Documentation/core-api/padata.rst +++ b/Documentation/core-api/padata.rst @@ -31,18 +31,7 @@ padata_instance structure for overall control of how jobs are to be run:: 'name' simply identifies the instance. -There are functions for enabling and disabling the instance:: - - int padata_start(struct padata_instance *pinst); - void padata_stop(struct padata_instance *pinst); - -These functions are setting or clearing the "PADATA_INIT" flag; if that flag is -not set, other functions will refuse to work. padata_start() returns zero on -success (flag set) or -EINVAL if the padata cpumask contains no active CPU -(flag not set). padata_stop() clears the flag and blocks until the padata -instance is unused. - -Finally, complete padata initialization by allocating a padata_shell:: +Then, complete padata initialization by allocating a padata_shell:: struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); @@ -155,11 +144,10 @@ submitted. Destroying ---------- -Cleaning up a padata instance predictably involves calling the three free +Cleaning up a padata instance predictably involves calling the two free functions that correspond to the allocation in reverse:: void padata_free_shell(struct padata_shell *ps); - void padata_stop(struct padata_instance *pinst); void padata_free(struct padata_instance *pinst); It is the user's responsibility to ensure all outstanding jobs are complete diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index fb9127ec5357..2d4ac9d44902 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -327,12 +327,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) return ret; } -static void pcrypt_fini_padata(struct padata_instance *pinst) -{ - padata_stop(pinst); - padata_free(pinst); -} - static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, @@ -358,7 +352,7 @@ static int __init pcrypt_init(void) return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: - pcrypt_fini_padata(pencrypt); + padata_free(pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: @@ -369,8 +363,8 @@ static void __exit pcrypt_exit(void) { crypto_unregister_template(&pcrypt_tmpl); - pcrypt_fini_padata(pencrypt); - pcrypt_fini_padata(pdecrypt); + padata_free(pencrypt); + padata_free(pdecrypt); kset_unregister(pcrypt_kset); } diff --git a/include/linux/padata.h b/include/linux/padata.h index 20294cddc739..7d53208b43da 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -204,5 +204,4 @@ extern void padata_do_serial(struct padata_priv *padata); extern void __init padata_do_multithreaded(struct padata_mt_job *job); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern void padata_stop(struct padata_instance *pinst); #endif diff --git a/kernel/padata.c b/kernel/padata.c index 931762316612..8f55e717ba50 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -789,19 +789,6 @@ out: } EXPORT_SYMBOL(padata_set_cpumask); -/** - * padata_stop - stop the parallel processing - * - * @pinst: padata instance to stop - */ -void padata_stop(struct padata_instance *pinst) -{ - mutex_lock(&pinst->lock); - __padata_stop(pinst); - mutex_unlock(&pinst->lock); -} -EXPORT_SYMBOL(padata_stop); - #ifdef CONFIG_HOTPLUG_CPU static int __padata_add_cpu(struct padata_instance *pinst, int cpu) @@ -883,7 +870,6 @@ static void __padata_free(struct padata_instance *pinst) WARN_ON(!list_empty(&pinst->pslist)); - padata_stop(pinst); free_cpumask_var(pinst->rcpumask.cbcpu); free_cpumask_var(pinst->rcpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu); -- cgit v1.2.3 From d69e037bcc4a7e31fdd40ae416aa1bd768dd7d99 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:54 -0400 Subject: padata: remove effective cpumasks from the instance A padata instance has effective cpumasks that store the user-supplied masks ANDed with the online mask, but this middleman is unnecessary. parallel_data keeps the same information around. Removing this saves text and code churn in future changes. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- include/linux/padata.h | 2 -- kernel/padata.c | 30 +++--------------------------- 2 files changed, 3 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/linux/padata.h b/include/linux/padata.h index 7d53208b43da..a941b96b7119 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -167,7 +167,6 @@ struct padata_mt_job { * @serial_wq: The workqueue used for serial work. * @pslist: List of padata_shell objects attached to this instance. * @cpumask: User supplied cpumasks for parallel and serial works. - * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask. * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. @@ -179,7 +178,6 @@ struct padata_instance { struct workqueue_struct *serial_wq; struct list_head pslist; struct padata_cpumask cpumask; - struct padata_cpumask rcpumask; struct kobject kobj; struct mutex lock; u8 flags; diff --git a/kernel/padata.c b/kernel/padata.c index 27f90a3c4dc6..4f0a57e5738c 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -571,13 +571,8 @@ static void padata_init_pqueues(struct parallel_data *pd) static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) { struct padata_instance *pinst = ps->pinst; - const struct cpumask *cbcpumask; - const struct cpumask *pcpumask; struct parallel_data *pd; - cbcpumask = pinst->rcpumask.cbcpu; - pcpumask = pinst->rcpumask.pcpu; - pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); if (!pd) goto err; @@ -597,8 +592,8 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) goto err_free_pcpu; - cpumask_copy(pd->cpumask.pcpu, pcpumask); - cpumask_copy(pd->cpumask.cbcpu, cbcpumask); + cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); + cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); padata_init_pqueues(pd); padata_init_squeues(pd); @@ -668,12 +663,6 @@ static int padata_replace(struct padata_instance *pinst) pinst->flags |= PADATA_RESET; - cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu, - cpu_online_mask); - - cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu, - cpu_online_mask); - list_for_each_entry(ps, &pinst->pslist, list) { err = padata_replace_one(ps); if (err) @@ -856,8 +845,6 @@ static void __padata_free(struct padata_instance *pinst) WARN_ON(!list_empty(&pinst->pslist)); - free_cpumask_var(pinst->rcpumask.cbcpu); - free_cpumask_var(pinst->rcpumask.pcpu); free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); destroy_workqueue(pinst->serial_wq); @@ -1033,20 +1020,13 @@ static struct padata_instance *padata_alloc(const char *name, !padata_validate_cpumask(pinst, cbcpumask)) goto err_free_masks; - if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL)) - goto err_free_masks; - if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL)) - goto err_free_rcpumask_pcpu; - INIT_LIST_HEAD(&pinst->pslist); cpumask_copy(pinst->cpumask.pcpu, pcpumask); cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); - cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask); - cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask); if (padata_setup_cpumasks(pinst)) - goto err_free_rcpumask_cbcpu; + goto err_free_masks; __padata_start(pinst); @@ -1064,10 +1044,6 @@ static struct padata_instance *padata_alloc(const char *name, return pinst; -err_free_rcpumask_cbcpu: - free_cpumask_var(pinst->rcpumask.cbcpu); -err_free_rcpumask_pcpu: - free_cpumask_var(pinst->rcpumask.pcpu); err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); -- cgit v1.2.3 From 3f257191d31d5eaf154ebdb696efc238837ddd51 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:55 -0400 Subject: padata: fold padata_alloc_possible() into padata_alloc() There's no reason to have two interfaces when there's only one caller. Removing _possible saves text and simplifies future changes. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- Documentation/core-api/padata.rst | 2 +- crypto/pcrypt.c | 2 +- include/linux/padata.h | 2 +- kernel/padata.c | 33 +++++---------------------------- 4 files changed, 8 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/Documentation/core-api/padata.rst b/Documentation/core-api/padata.rst index 771d50330e5b..35175710b43c 100644 --- a/Documentation/core-api/padata.rst +++ b/Documentation/core-api/padata.rst @@ -27,7 +27,7 @@ padata_instance structure for overall control of how jobs are to be run:: #include - struct padata_instance *padata_alloc_possible(const char *name); + struct padata_instance *padata_alloc(const char *name); 'name' simply identifies the instance. diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 2d4ac9d44902..d569c7ed6c80 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -316,7 +316,7 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; - *pinst = padata_alloc_possible(name); + *pinst = padata_alloc(name); if (!*pinst) return ret; diff --git a/include/linux/padata.h b/include/linux/padata.h index a941b96b7119..070a7d43e8af 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -192,7 +192,7 @@ extern void __init padata_init(void); static inline void __init padata_init(void) {} #endif -extern struct padata_instance *padata_alloc_possible(const char *name); +extern struct padata_instance *padata_alloc(const char *name); extern void padata_free(struct padata_instance *pinst); extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); extern void padata_free_shell(struct padata_shell *ps); diff --git a/kernel/padata.c b/kernel/padata.c index 4f0a57e5738c..1c0b97891edb 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -979,18 +979,12 @@ static struct kobj_type padata_attr_type = { }; /** - * padata_alloc - allocate and initialize a padata instance and specify - * cpumasks for serial and parallel workers. - * + * padata_alloc - allocate and initialize a padata instance * @name: used to identify the instance - * @pcpumask: cpumask that will be used for padata parallelization - * @cbcpumask: cpumask that will be used for padata serialization * * Return: new instance on success, NULL on error */ -static struct padata_instance *padata_alloc(const char *name, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +struct padata_instance *padata_alloc(const char *name) { struct padata_instance *pinst; @@ -1016,14 +1010,11 @@ static struct padata_instance *padata_alloc(const char *name, free_cpumask_var(pinst->cpumask.pcpu); goto err_free_serial_wq; } - if (!padata_validate_cpumask(pinst, pcpumask) || - !padata_validate_cpumask(pinst, cbcpumask)) - goto err_free_masks; INIT_LIST_HEAD(&pinst->pslist); - cpumask_copy(pinst->cpumask.pcpu, pcpumask); - cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); + cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask); + cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask); if (padata_setup_cpumasks(pinst)) goto err_free_masks; @@ -1057,21 +1048,7 @@ err_free_inst: err: return NULL; } - -/** - * padata_alloc_possible - Allocate and initialize padata instance. - * Use the cpu_possible_mask for serial and - * parallel workers. - * - * @name: used to identify the instance - * - * Return: new instance on success, NULL on error - */ -struct padata_instance *padata_alloc_possible(const char *name) -{ - return padata_alloc(name, cpu_possible_mask, cpu_possible_mask); -} -EXPORT_SYMBOL(padata_alloc_possible); +EXPORT_SYMBOL(padata_alloc); /** * padata_free - free a padata instance -- cgit v1.2.3 From f601c725a6ac072608f47083e7c8115a3f504f95 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:56 -0400 Subject: padata: remove padata_parallel_queue Only its reorder field is actually used now, so remove the struct and embed @reorder directly in parallel_data. No functional change, just a cleanup. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- include/linux/padata.h | 15 ++------------- kernel/padata.c | 46 ++++++++++++++++++++-------------------------- 2 files changed, 22 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/include/linux/padata.h b/include/linux/padata.h index 070a7d43e8af..a433f13fc4bf 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -66,17 +66,6 @@ struct padata_serial_queue { struct parallel_data *pd; }; -/** - * struct padata_parallel_queue - The percpu padata parallel queue - * - * @reorder: List to wait for reordering after parallel processing. - * @num_obj: Number of objects that are processed by this cpu. - */ -struct padata_parallel_queue { - struct padata_list reorder; - atomic_t num_obj; -}; - /** * struct padata_cpumask - The cpumasks for the parallel/serial workers * @@ -93,7 +82,7 @@ struct padata_cpumask { * that depends on the cpumask in use. * * @ps: padata_shell object. - * @pqueue: percpu padata queues used for parallelization. + * @reorder_list: percpu reorder lists * @squeue: percpu padata queues used for serialuzation. * @refcnt: Number of objects holding a reference on this parallel_data. * @seq_nr: Sequence number of the parallelized data object. @@ -105,7 +94,7 @@ struct padata_cpumask { */ struct parallel_data { struct padata_shell *ps; - struct padata_parallel_queue __percpu *pqueue; + struct padata_list __percpu *reorder_list; struct padata_serial_queue __percpu *squeue; atomic_t refcnt; unsigned int seq_nr; diff --git a/kernel/padata.c b/kernel/padata.c index 1c0b97891edb..16cb894dc272 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -250,13 +250,11 @@ EXPORT_SYMBOL(padata_do_parallel); static struct padata_priv *padata_find_next(struct parallel_data *pd, bool remove_object) { - struct padata_parallel_queue *next_queue; struct padata_priv *padata; struct padata_list *reorder; int cpu = pd->cpu; - next_queue = per_cpu_ptr(pd->pqueue, cpu); - reorder = &next_queue->reorder; + reorder = per_cpu_ptr(pd->reorder_list, cpu); spin_lock(&reorder->lock); if (list_empty(&reorder->list)) { @@ -291,7 +289,7 @@ static void padata_reorder(struct parallel_data *pd) int cb_cpu; struct padata_priv *padata; struct padata_serial_queue *squeue; - struct padata_parallel_queue *next_queue; + struct padata_list *reorder; /* * We need to ensure that only one cpu can work on dequeueing of @@ -339,9 +337,8 @@ static void padata_reorder(struct parallel_data *pd) */ smp_mb(); - next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); - if (!list_empty(&next_queue->reorder.list) && - padata_find_next(pd, false)) + reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); + if (!list_empty(&reorder->list) && padata_find_next(pd, false)) queue_work(pinst->serial_wq, &pd->reorder_work); } @@ -401,17 +398,16 @@ void padata_do_serial(struct padata_priv *padata) { struct parallel_data *pd = padata->pd; int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); - struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue, - hashed_cpu); + struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); struct padata_priv *cur; - spin_lock(&pqueue->reorder.lock); + spin_lock(&reorder->lock); /* Sort in ascending order of sequence number. */ - list_for_each_entry_reverse(cur, &pqueue->reorder.list, list) + list_for_each_entry_reverse(cur, &reorder->list, list) if (cur->seq_nr < padata->seq_nr) break; list_add(&padata->list, &cur->list); - spin_unlock(&pqueue->reorder.lock); + spin_unlock(&reorder->lock); /* * Ensure the addition to the reorder list is ordered correctly @@ -553,17 +549,15 @@ static void padata_init_squeues(struct parallel_data *pd) } } -/* Initialize all percpu queues used by parallel workers */ -static void padata_init_pqueues(struct parallel_data *pd) +/* Initialize per-CPU reorder lists */ +static void padata_init_reorder_list(struct parallel_data *pd) { int cpu; - struct padata_parallel_queue *pqueue; + struct padata_list *list; for_each_cpu(cpu, pd->cpumask.pcpu) { - pqueue = per_cpu_ptr(pd->pqueue, cpu); - - __padata_list_init(&pqueue->reorder); - atomic_set(&pqueue->num_obj, 0); + list = per_cpu_ptr(pd->reorder_list, cpu); + __padata_list_init(list); } } @@ -577,13 +571,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) if (!pd) goto err; - pd->pqueue = alloc_percpu(struct padata_parallel_queue); - if (!pd->pqueue) + pd->reorder_list = alloc_percpu(struct padata_list); + if (!pd->reorder_list) goto err_free_pd; pd->squeue = alloc_percpu(struct padata_serial_queue); if (!pd->squeue) - goto err_free_pqueue; + goto err_free_reorder_list; pd->ps = ps; @@ -595,7 +589,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); - padata_init_pqueues(pd); + padata_init_reorder_list(pd); padata_init_squeues(pd); pd->seq_nr = -1; atomic_set(&pd->refcnt, 1); @@ -609,8 +603,8 @@ err_free_pcpu: free_cpumask_var(pd->cpumask.pcpu); err_free_squeue: free_percpu(pd->squeue); -err_free_pqueue: - free_percpu(pd->pqueue); +err_free_reorder_list: + free_percpu(pd->reorder_list); err_free_pd: kfree(pd); err: @@ -621,7 +615,7 @@ static void padata_free_pd(struct parallel_data *pd) { free_cpumask_var(pd->cpumask.pcpu); free_cpumask_var(pd->cpumask.cbcpu); - free_percpu(pd->pqueue); + free_percpu(pd->reorder_list); free_percpu(pd->squeue); kfree(pd); } -- cgit v1.2.3 From da087a4cdcbf93c8a6a9decab4a34c38e6c7e867 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:35:33 -0700 Subject: crypto: hash - drop duplicated word in a comment Drop the doubled word "in" in a comment. Signed-off-by: Randy Dunlap Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Acked-by: David S. Miller Signed-off-by: Herbert Xu --- include/crypto/hash.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 4829d2367eda..19ce91f2359f 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -687,7 +687,7 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, * The message digest API is able to maintain state information for the * caller. * - * The synchronous message digest API can store user-related context in in its + * The synchronous message digest API can store user-related context in its * shash_desc request data structure. */ -- cgit v1.2.3 From dd3240a28c08f8092386c76951408966bf4005ff Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:35:49 -0700 Subject: crypto: skcipher - drop duplicated word in kernel-doc Drop the doubled word "request" in a kernel-doc comment. Signed-off-by: Randy Dunlap Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Acked-by: David S. Miller Signed-off-by: Herbert Xu --- include/crypto/skcipher.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 141e7690f9c3..5663f71198b3 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -18,7 +18,7 @@ * @iv: Initialisation Vector * @src: Source SG list * @dst: Destination SG list - * @base: Underlying async request request + * @base: Underlying async request * @__ctx: Start of private context data */ struct skcipher_request { -- cgit v1.2.3 From 34ec0aa62b40e55bcd5f41b89494242c38556f30 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:29:43 -0700 Subject: misc: mic: : drop a duplicated word Drop the repeated word "the" in a comment. Signed-off-by: Randy Dunlap Cc: Sudeep Dutt Cc: Ashutosh Dixit Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20200719002943.20624-1-rdunlap@infradead.org Signed-off-by: Greg Kroah-Hartman --- include/linux/mic_bus.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index 491156a2359f..e99c789424e0 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h @@ -6,7 +6,7 @@ * * Intel MIC Bus driver. * - * This implementation is very similar to the the virtio bus driver + * This implementation is very similar to the virtio bus driver * implementation @ include/linux/virtio.h. */ #ifndef _MIC_BUS_H_ -- cgit v1.2.3 From 1859f4ebcf2dfe2e878a3805700036fdc4e01e2c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:27:38 -0700 Subject: android: binder.h: drop a duplicated word MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop the repeated word "the" in a comment. Cc: Arve Hjønnevåg Cc: Todd Kjos Cc: Martijn Coenen Cc: Joel Fernandes Cc: Hridya Valsaraju Cc: Suren Baghdasaryan Cc: devel@driverdev.osuosl.org Acked-by: Christian Brauner Signed-off-by: Randy Dunlap Link: https://lore.kernel.org/r/20200719002738.20210-1-rdunlap@infradead.org Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/android/binder.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 2832134e5397..f1ce2c4c077e 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -404,7 +404,7 @@ enum binder_driver_return_protocol { BR_FAILED_REPLY = _IO('r', 17), /* - * The the last transaction (either a bcTRANSACTION or + * The last transaction (either a bcTRANSACTION or * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. */ }; -- cgit v1.2.3 From c1e18d4fb9590268105e724444792656fcb3927d Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Mon, 15 Jun 2020 22:35:21 +0200 Subject: platform/chrome: cros_ec_proto: Do not export cros_ec_cmd_xfer() Now that all the remaining users of cros_ec_cmd_xfer() has been removed, make this function private to the cros_ec_proto module. Signed-off-by: Enric Balletbo i Serra --- drivers/platform/chrome/cros_ec_proto.c | 5 ++--- include/linux/platform_data/cros_ec_proto.h | 3 --- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 3e745e0fe092..11a2db7cd0f7 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -496,8 +496,8 @@ EXPORT_SYMBOL(cros_ec_query_all); * * Return: 0 on success or negative error code. */ -int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg) +static int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg) { int ret; @@ -541,7 +541,6 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, return ret; } -EXPORT_SYMBOL(cros_ec_cmd_xfer); /** * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 383243326676..4a415ae851ef 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -216,9 +216,6 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, int cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); -int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); -- cgit v1.2.3 From bf9b82b7fe4d3c7d4d432ef80b4299c9b6b4a1f8 Mon Sep 17 00:00:00 2001 From: Garrit Franke Date: Thu, 16 Jul 2020 22:31:01 +0200 Subject: kobject: remove unused KOBJ_MAX action The loop in libb/kobj_uevent.c that checked for KOBBJ_MAX is no longer present, we do a much more sane ARRAY_SIZE() check instead. See 5c5daf657cb5 ("Driver core: exclude kobject_uevent.c for !CONFIG_HOTPLUG"). Signed-off-by: Garrit Franke Link: https://lore.kernel.org/r/20200716203100.7959-1-garritfranke@gmail.com Signed-off-by: Greg Kroah-Hartman --- include/linux/kobject.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 6cba088bee24..ea30529fba08 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -59,7 +59,6 @@ enum kobject_action { KOBJ_OFFLINE, KOBJ_BIND, KOBJ_UNBIND, - KOBJ_MAX }; struct kobject { -- cgit v1.2.3 From 8fc3ed3a474d76cd76dd0a154ea904373e9a5530 Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Thu, 23 Jul 2020 09:58:28 +0000 Subject: gpio: Correct kernel-doc inconsistency Fix kernel-doc comment to match parameter name change "chip" to "gc" in gpiochip_add_data function. Signed-off-by: Colton Lewis Link: https://lore.kernel.org/r/20200723095658.234668-1-colton.w.lewis@protonmail.com Signed-off-by: Linus Walleij --- include/linux/gpio/driver.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index db82451776fc..6e9f1826ecd7 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -497,25 +497,25 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, /** * gpiochip_add_data() - register a gpio_chip - * @gc: the chip to register, with chip->base initialized + * @gc: the chip to register, with gc->base initialized * @data: driver-private data associated with this chip * * Context: potentially before irqs will work * * When gpiochip_add_data() is called very early during boot, so that GPIOs - * can be freely used, the chip->parent device must be registered before + * can be freely used, the gc->parent device must be registered before * the gpio framework's arch_initcall(). Otherwise sysfs initialization * for GPIOs will fail rudely. * * gpiochip_add_data() must only be called after gpiolib initialization, * ie after core_initcall(). * - * If chip->base is negative, this requests dynamic assignment of + * If gc->base is negative, this requests dynamic assignment of * a range of valid GPIOs. * * Returns: * A negative errno if the chip can't be registered, such as because the - * chip->base is invalid or already associated with a different chip. + * gc->base is invalid or already associated with a different chip. * Otherwise it returns zero as a success code. */ #ifdef CONFIG_LOCKDEP -- cgit v1.2.3 From b07b616214857c9db01e2807cde2f6bba8019fc3 Mon Sep 17 00:00:00 2001 From: Hanks Chen Date: Thu, 23 Jul 2020 19:19:51 +0800 Subject: pinctrl: mediatek: update pinmux definitions for mt6779 Add devicetree bindings for Mediatek mt6779 SoC Pin Controller. Signed-off-by: Mars Cheng Signed-off-by: Andy Teng Signed-off-by: Hanks Chen Acked-by: Sean Wang Link: https://lore.kernel.org/r/1595503197-15246-2-git-send-email-hanks.chen@mediatek.com Signed-off-by: Linus Walleij --- include/dt-bindings/pinctrl/mt6779-pinfunc.h | 1242 ++++++++++++++++++++++++++ 1 file changed, 1242 insertions(+) create mode 100644 include/dt-bindings/pinctrl/mt6779-pinfunc.h (limited to 'include') diff --git a/include/dt-bindings/pinctrl/mt6779-pinfunc.h b/include/dt-bindings/pinctrl/mt6779-pinfunc.h new file mode 100644 index 000000000000..87fdc4310936 --- /dev/null +++ b/include/dt-bindings/pinctrl/mt6779-pinfunc.h @@ -0,0 +1,1242 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 MediaTek Inc. + * Author: Andy Teng + * + */ + +#ifndef __MT6779_PINFUNC_H +#define __MT6779_PINFUNC_H + +#include + +#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define PINMUX_GPIO0__FUNC_SPI6_MI (MTK_PIN_NO(0) | 1) +#define PINMUX_GPIO0__FUNC_I2S5_LRCK (MTK_PIN_NO(0) | 2) +#define PINMUX_GPIO0__FUNC_TDM_LRCK_2ND (MTK_PIN_NO(0) | 3) +#define PINMUX_GPIO0__FUNC_PCM1_SYNC (MTK_PIN_NO(0) | 4) +#define PINMUX_GPIO0__FUNC_SCL_6306 (MTK_PIN_NO(0) | 5) +#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 6) +#define PINMUX_GPIO0__FUNC_PTA_RXD (MTK_PIN_NO(0) | 7) + +#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define PINMUX_GPIO1__FUNC_SPI6_CSB (MTK_PIN_NO(1) | 1) +#define PINMUX_GPIO1__FUNC_I2S5_DO (MTK_PIN_NO(1) | 2) +#define PINMUX_GPIO1__FUNC_TDM_DATA0_2ND (MTK_PIN_NO(1) | 3) +#define PINMUX_GPIO1__FUNC_PCM1_DO0 (MTK_PIN_NO(1) | 4) +#define PINMUX_GPIO1__FUNC_SDA_6306 (MTK_PIN_NO(1) | 5) +#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 6) +#define PINMUX_GPIO1__FUNC_PTA_TXD (MTK_PIN_NO(1) | 7) + +#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define PINMUX_GPIO2__FUNC_SPI6_MO (MTK_PIN_NO(2) | 1) +#define PINMUX_GPIO2__FUNC_I2S5_BCK (MTK_PIN_NO(2) | 2) +#define PINMUX_GPIO2__FUNC_TDM_BCK_2ND (MTK_PIN_NO(2) | 3) +#define PINMUX_GPIO2__FUNC_PCM1_CLK (MTK_PIN_NO(2) | 4) +#define PINMUX_GPIO2__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(2) | 5) +#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 6) + +#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define PINMUX_GPIO3__FUNC_SPI6_CLK (MTK_PIN_NO(3) | 1) +#define PINMUX_GPIO3__FUNC_I2S5_MCK (MTK_PIN_NO(3) | 2) +#define PINMUX_GPIO3__FUNC_TDM_MCK_2ND (MTK_PIN_NO(3) | 3) +#define PINMUX_GPIO3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(3) | 4) +#define PINMUX_GPIO3__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(3) | 5) +#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 6) + +#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define PINMUX_GPIO4__FUNC_SPI7_MI (MTK_PIN_NO(4) | 1) +#define PINMUX_GPIO4__FUNC_I2S0_MCK (MTK_PIN_NO(4) | 2) +#define PINMUX_GPIO4__FUNC_TDM_DATA1_2ND (MTK_PIN_NO(4) | 3) +#define PINMUX_GPIO4__FUNC_PCM1_DO1 (MTK_PIN_NO(4) | 4) +#define PINMUX_GPIO4__FUNC_DMIC1_CLK (MTK_PIN_NO(4) | 5) +#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6) +#define PINMUX_GPIO4__FUNC_SCL8 (MTK_PIN_NO(4) | 7) + +#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define PINMUX_GPIO5__FUNC_SPI7_CSB (MTK_PIN_NO(5) | 1) +#define PINMUX_GPIO5__FUNC_I2S0_BCK (MTK_PIN_NO(5) | 2) +#define PINMUX_GPIO5__FUNC_TDM_DATA2_2ND (MTK_PIN_NO(5) | 3) +#define PINMUX_GPIO5__FUNC_PCM1_DO2 (MTK_PIN_NO(5) | 4) +#define PINMUX_GPIO5__FUNC_DMIC1_DAT (MTK_PIN_NO(5) | 5) +#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6) +#define PINMUX_GPIO5__FUNC_SDA8 (MTK_PIN_NO(5) | 7) + +#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define PINMUX_GPIO6__FUNC_SPI7_MO (MTK_PIN_NO(6) | 1) +#define PINMUX_GPIO6__FUNC_I2S0_LRCK (MTK_PIN_NO(6) | 2) +#define PINMUX_GPIO6__FUNC_TDM_DATA3_2ND (MTK_PIN_NO(6) | 3) +#define PINMUX_GPIO6__FUNC_PCM1_DI (MTK_PIN_NO(6) | 4) +#define PINMUX_GPIO6__FUNC_DMIC_CLK (MTK_PIN_NO(6) | 5) +#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6) +#define PINMUX_GPIO6__FUNC_SCL9 (MTK_PIN_NO(6) | 7) + +#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define PINMUX_GPIO7__FUNC_SPI7_CLK (MTK_PIN_NO(7) | 1) +#define PINMUX_GPIO7__FUNC_I2S0_DI (MTK_PIN_NO(7) | 2) +#define PINMUX_GPIO7__FUNC_SRCLKENAI1 (MTK_PIN_NO(7) | 3) +#define PINMUX_GPIO7__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(7) | 4) +#define PINMUX_GPIO7__FUNC_DMIC_DAT (MTK_PIN_NO(7) | 5) +#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6) +#define PINMUX_GPIO7__FUNC_SDA9 (MTK_PIN_NO(7) | 7) + +#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define PINMUX_GPIO8__FUNC_PWM_0 (MTK_PIN_NO(8) | 1) +#define PINMUX_GPIO8__FUNC_I2S2_DI2 (MTK_PIN_NO(8) | 2) +#define PINMUX_GPIO8__FUNC_SRCLKENAI0 (MTK_PIN_NO(8) | 3) +#define PINMUX_GPIO8__FUNC_URXD1 (MTK_PIN_NO(8) | 4) +#define PINMUX_GPIO8__FUNC_I2S0_MCK (MTK_PIN_NO(8) | 5) +#define PINMUX_GPIO8__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(8) | 6) +#define PINMUX_GPIO8__FUNC_IDDIG (MTK_PIN_NO(8) | 7) + +#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define PINMUX_GPIO9__FUNC_PWM_3 (MTK_PIN_NO(9) | 1) +#define PINMUX_GPIO9__FUNC_MD_INT0 (MTK_PIN_NO(9) | 2) +#define PINMUX_GPIO9__FUNC_SRCLKENAI1 (MTK_PIN_NO(9) | 3) +#define PINMUX_GPIO9__FUNC_UTXD1 (MTK_PIN_NO(9) | 4) +#define PINMUX_GPIO9__FUNC_I2S0_BCK (MTK_PIN_NO(9) | 5) +#define PINMUX_GPIO9__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(9) | 6) +#define PINMUX_GPIO9__FUNC_USB_DRVVBUS (MTK_PIN_NO(9) | 7) + +#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define PINMUX_GPIO10__FUNC_MSDC1_CLK_A (MTK_PIN_NO(10) | 1) +#define PINMUX_GPIO10__FUNC_TP_URXD1_AO (MTK_PIN_NO(10) | 2) +#define PINMUX_GPIO10__FUNC_I2S1_LRCK (MTK_PIN_NO(10) | 3) +#define PINMUX_GPIO10__FUNC_UCTS0 (MTK_PIN_NO(10) | 4) +#define PINMUX_GPIO10__FUNC_DMIC1_CLK (MTK_PIN_NO(10) | 5) +#define PINMUX_GPIO10__FUNC_KPCOL2 (MTK_PIN_NO(10) | 6) +#define PINMUX_GPIO10__FUNC_SCL8 (MTK_PIN_NO(10) | 7) + +#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define PINMUX_GPIO11__FUNC_MSDC1_CMD_A (MTK_PIN_NO(11) | 1) +#define PINMUX_GPIO11__FUNC_TP_UTXD1_AO (MTK_PIN_NO(11) | 2) +#define PINMUX_GPIO11__FUNC_I2S1_DO (MTK_PIN_NO(11) | 3) +#define PINMUX_GPIO11__FUNC_URTS0 (MTK_PIN_NO(11) | 4) +#define PINMUX_GPIO11__FUNC_DMIC1_DAT (MTK_PIN_NO(11) | 5) +#define PINMUX_GPIO11__FUNC_KPROW2 (MTK_PIN_NO(11) | 6) +#define PINMUX_GPIO11__FUNC_SDA8 (MTK_PIN_NO(11) | 7) + +#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define PINMUX_GPIO12__FUNC_MSDC1_DAT3_A (MTK_PIN_NO(12) | 1) +#define PINMUX_GPIO12__FUNC_TP_URXD2_AO (MTK_PIN_NO(12) | 2) +#define PINMUX_GPIO12__FUNC_I2S1_MCK (MTK_PIN_NO(12) | 3) +#define PINMUX_GPIO12__FUNC_UCTS1 (MTK_PIN_NO(12) | 4) +#define PINMUX_GPIO12__FUNC_DMIC_CLK (MTK_PIN_NO(12) | 5) +#define PINMUX_GPIO12__FUNC_ANT_SEL9 (MTK_PIN_NO(12) | 6) +#define PINMUX_GPIO12__FUNC_SCL9 (MTK_PIN_NO(12) | 7) + +#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define PINMUX_GPIO13__FUNC_MSDC1_DAT0_A (MTK_PIN_NO(13) | 1) +#define PINMUX_GPIO13__FUNC_TP_UTXD2_AO (MTK_PIN_NO(13) | 2) +#define PINMUX_GPIO13__FUNC_I2S1_BCK (MTK_PIN_NO(13) | 3) +#define PINMUX_GPIO13__FUNC_URTS1 (MTK_PIN_NO(13) | 4) +#define PINMUX_GPIO13__FUNC_DMIC_DAT (MTK_PIN_NO(13) | 5) +#define PINMUX_GPIO13__FUNC_ANT_SEL10 (MTK_PIN_NO(13) | 6) +#define PINMUX_GPIO13__FUNC_SDA9 (MTK_PIN_NO(13) | 7) + +#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define PINMUX_GPIO14__FUNC_MSDC1_DAT2_A (MTK_PIN_NO(14) | 1) +#define PINMUX_GPIO14__FUNC_PWM_3 (MTK_PIN_NO(14) | 2) +#define PINMUX_GPIO14__FUNC_IDDIG (MTK_PIN_NO(14) | 3) +#define PINMUX_GPIO14__FUNC_MD_INT0 (MTK_PIN_NO(14) | 4) +#define PINMUX_GPIO14__FUNC_PTA_RXD (MTK_PIN_NO(14) | 5) +#define PINMUX_GPIO14__FUNC_ANT_SEL11 (MTK_PIN_NO(14) | 6) + +#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define PINMUX_GPIO15__FUNC_MSDC1_DAT1_A (MTK_PIN_NO(15) | 1) +#define PINMUX_GPIO15__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(15) | 2) +#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 3) +#define PINMUX_GPIO15__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(15) | 4) +#define PINMUX_GPIO15__FUNC_PTA_TXD (MTK_PIN_NO(15) | 5) +#define PINMUX_GPIO15__FUNC_ANT_SEL12 (MTK_PIN_NO(15) | 6) + +#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define PINMUX_GPIO16__FUNC_SRCLKENAI0 (MTK_PIN_NO(16) | 1) +#define PINMUX_GPIO16__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(16) | 2) +#define PINMUX_GPIO16__FUNC_MFG_EJTAG_TRSTN (MTK_PIN_NO(16) | 3) +#define PINMUX_GPIO16__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(16) | 4) +#define PINMUX_GPIO16__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(16) | 5) +#define PINMUX_GPIO16__FUNC_PWM_2 (MTK_PIN_NO(16) | 6) +#define PINMUX_GPIO16__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(16) | 7) + +#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define PINMUX_GPIO17__FUNC_SPI0_A_MI (MTK_PIN_NO(17) | 1) +#define PINMUX_GPIO17__FUNC_SCP_SPI0_MI (MTK_PIN_NO(17) | 2) +#define PINMUX_GPIO17__FUNC_MFG_EJTAG_TDO (MTK_PIN_NO(17) | 3) +#define PINMUX_GPIO17__FUNC_DPI_HSYNC (MTK_PIN_NO(17) | 4) +#define PINMUX_GPIO17__FUNC_MFG_DFD_JTAG_TDO (MTK_PIN_NO(17) | 5) +#define PINMUX_GPIO17__FUNC_DFD_TDO (MTK_PIN_NO(17) | 6) +#define PINMUX_GPIO17__FUNC_JTDO_SEL1 (MTK_PIN_NO(17) | 7) + +#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define PINMUX_GPIO18__FUNC_SPI0_A_MO (MTK_PIN_NO(18) | 1) +#define PINMUX_GPIO18__FUNC_SCP_SPI0_MO (MTK_PIN_NO(18) | 2) +#define PINMUX_GPIO18__FUNC_MFG_EJTAG_TDI (MTK_PIN_NO(18) | 3) +#define PINMUX_GPIO18__FUNC_DPI_VSYNC (MTK_PIN_NO(18) | 4) +#define PINMUX_GPIO18__FUNC_MFG_DFD_JTAG_TDI (MTK_PIN_NO(18) | 5) +#define PINMUX_GPIO18__FUNC_DFD_TDI (MTK_PIN_NO(18) | 6) +#define PINMUX_GPIO18__FUNC_JTDI_SEL1 (MTK_PIN_NO(18) | 7) + +#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define PINMUX_GPIO19__FUNC_SPI0_A_CSB (MTK_PIN_NO(19) | 1) +#define PINMUX_GPIO19__FUNC_SCP_SPI0_CS (MTK_PIN_NO(19) | 2) +#define PINMUX_GPIO19__FUNC_MFG_EJTAG_TMS (MTK_PIN_NO(19) | 3) +#define PINMUX_GPIO19__FUNC_DPI_DE (MTK_PIN_NO(19) | 4) +#define PINMUX_GPIO19__FUNC_MFG_DFD_JTAG_TMS (MTK_PIN_NO(19) | 5) +#define PINMUX_GPIO19__FUNC_DFD_TMS (MTK_PIN_NO(19) | 6) +#define PINMUX_GPIO19__FUNC_JTMS_SEL1 (MTK_PIN_NO(19) | 7) + +#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define PINMUX_GPIO20__FUNC_SPI0_A_CLK (MTK_PIN_NO(20) | 1) +#define PINMUX_GPIO20__FUNC_SCP_SPI0_CK (MTK_PIN_NO(20) | 2) +#define PINMUX_GPIO20__FUNC_MFG_EJTAG_TCK (MTK_PIN_NO(20) | 3) +#define PINMUX_GPIO20__FUNC_DPI_CK (MTK_PIN_NO(20) | 4) +#define PINMUX_GPIO20__FUNC_MFG_DFD_JTAG_TCK (MTK_PIN_NO(20) | 5) +#define PINMUX_GPIO20__FUNC_DFD_TCK_XI (MTK_PIN_NO(20) | 6) +#define PINMUX_GPIO20__FUNC_JTCK_SEL1 (MTK_PIN_NO(20) | 7) + +#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define PINMUX_GPIO21__FUNC_PWM_0 (MTK_PIN_NO(21) | 1) +#define PINMUX_GPIO21__FUNC_CMFLASH0 (MTK_PIN_NO(21) | 2) +#define PINMUX_GPIO21__FUNC_CMVREF2 (MTK_PIN_NO(21) | 3) +#define PINMUX_GPIO21__FUNC_CLKM0 (MTK_PIN_NO(21) | 4) +#define PINMUX_GPIO21__FUNC_ANT_SEL9 (MTK_PIN_NO(21) | 5) +#define PINMUX_GPIO21__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(21) | 6) +#define PINMUX_GPIO21__FUNC_DBG_MON_A27 (MTK_PIN_NO(21) | 7) + +#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define PINMUX_GPIO22__FUNC_PWM_1 (MTK_PIN_NO(22) | 1) +#define PINMUX_GPIO22__FUNC_CMFLASH1 (MTK_PIN_NO(22) | 2) +#define PINMUX_GPIO22__FUNC_CMVREF3 (MTK_PIN_NO(22) | 3) +#define PINMUX_GPIO22__FUNC_CLKM1 (MTK_PIN_NO(22) | 4) +#define PINMUX_GPIO22__FUNC_ANT_SEL10 (MTK_PIN_NO(22) | 5) +#define PINMUX_GPIO22__FUNC_DBG_MON_A28 (MTK_PIN_NO(22) | 7) + +#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define PINMUX_GPIO23__FUNC_PWM_2 (MTK_PIN_NO(23) | 1) +#define PINMUX_GPIO23__FUNC_CMFLASH2 (MTK_PIN_NO(23) | 2) +#define PINMUX_GPIO23__FUNC_CMVREF0 (MTK_PIN_NO(23) | 3) +#define PINMUX_GPIO23__FUNC_CLKM2 (MTK_PIN_NO(23) | 4) +#define PINMUX_GPIO23__FUNC_ANT_SEL11 (MTK_PIN_NO(23) | 5) +#define PINMUX_GPIO23__FUNC_DBG_MON_A29 (MTK_PIN_NO(23) | 7) + +#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define PINMUX_GPIO24__FUNC_PWM_0 (MTK_PIN_NO(24) | 1) +#define PINMUX_GPIO24__FUNC_CMFLASH3 (MTK_PIN_NO(24) | 2) +#define PINMUX_GPIO24__FUNC_CMVREF1 (MTK_PIN_NO(24) | 3) +#define PINMUX_GPIO24__FUNC_CLKM3 (MTK_PIN_NO(24) | 4) +#define PINMUX_GPIO24__FUNC_ANT_SEL12 (MTK_PIN_NO(24) | 5) +#define PINMUX_GPIO24__FUNC_DBG_MON_A30 (MTK_PIN_NO(24) | 7) + +#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define PINMUX_GPIO25__FUNC_SRCLKENAI0 (MTK_PIN_NO(25) | 1) +#define PINMUX_GPIO25__FUNC_UCTS0 (MTK_PIN_NO(25) | 2) +#define PINMUX_GPIO25__FUNC_SCL8 (MTK_PIN_NO(25) | 3) +#define PINMUX_GPIO25__FUNC_CMVREF4 (MTK_PIN_NO(25) | 4) +#define PINMUX_GPIO25__FUNC_I2S0_LRCK (MTK_PIN_NO(25) | 5) +#define PINMUX_GPIO25__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(25) | 6) +#define PINMUX_GPIO25__FUNC_DBG_MON_A31 (MTK_PIN_NO(25) | 7) + +#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define PINMUX_GPIO26__FUNC_PWM_0 (MTK_PIN_NO(26) | 1) +#define PINMUX_GPIO26__FUNC_URTS0 (MTK_PIN_NO(26) | 2) +#define PINMUX_GPIO26__FUNC_SDA8 (MTK_PIN_NO(26) | 3) +#define PINMUX_GPIO26__FUNC_CLKM0 (MTK_PIN_NO(26) | 4) +#define PINMUX_GPIO26__FUNC_I2S0_DI (MTK_PIN_NO(26) | 5) +#define PINMUX_GPIO26__FUNC_AGPS_SYNC (MTK_PIN_NO(26) | 6) +#define PINMUX_GPIO26__FUNC_DBG_MON_A32 (MTK_PIN_NO(26) | 7) + +#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define PINMUX_GPIO27__FUNC_AP_GOOD (MTK_PIN_NO(27) | 1) + +#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define PINMUX_GPIO28__FUNC_SCL5 (MTK_PIN_NO(28) | 1) + +#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define PINMUX_GPIO29__FUNC_SDA5 (MTK_PIN_NO(29) | 1) + +#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define PINMUX_GPIO30__FUNC_I2S1_MCK (MTK_PIN_NO(30) | 1) +#define PINMUX_GPIO30__FUNC_I2S3_MCK (MTK_PIN_NO(30) | 2) +#define PINMUX_GPIO30__FUNC_I2S2_MCK (MTK_PIN_NO(30) | 3) +#define PINMUX_GPIO30__FUNC_DPI_D0 (MTK_PIN_NO(30) | 4) +#define PINMUX_GPIO30__FUNC_SPI4_MI (MTK_PIN_NO(30) | 5) +#define PINMUX_GPIO30__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(30) | 6) + +#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define PINMUX_GPIO31__FUNC_I2S1_BCK (MTK_PIN_NO(31) | 1) +#define PINMUX_GPIO31__FUNC_I2S3_BCK (MTK_PIN_NO(31) | 2) +#define PINMUX_GPIO31__FUNC_I2S2_BCK (MTK_PIN_NO(31) | 3) +#define PINMUX_GPIO31__FUNC_DPI_D1 (MTK_PIN_NO(31) | 4) +#define PINMUX_GPIO31__FUNC_SPI4_CSB (MTK_PIN_NO(31) | 5) +#define PINMUX_GPIO31__FUNC_CONN_MCU_TDO (MTK_PIN_NO(31) | 6) + +#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define PINMUX_GPIO32__FUNC_I2S1_LRCK (MTK_PIN_NO(32) | 1) +#define PINMUX_GPIO32__FUNC_I2S3_LRCK (MTK_PIN_NO(32) | 2) +#define PINMUX_GPIO32__FUNC_I2S2_LRCK (MTK_PIN_NO(32) | 3) +#define PINMUX_GPIO32__FUNC_DPI_D2 (MTK_PIN_NO(32) | 4) +#define PINMUX_GPIO32__FUNC_SPI4_MO (MTK_PIN_NO(32) | 5) +#define PINMUX_GPIO32__FUNC_CONN_MCU_TDI (MTK_PIN_NO(32) | 6) + +#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define PINMUX_GPIO33__FUNC_I2S2_DI (MTK_PIN_NO(33) | 1) +#define PINMUX_GPIO33__FUNC_I2S0_DI (MTK_PIN_NO(33) | 2) +#define PINMUX_GPIO33__FUNC_I2S5_DO (MTK_PIN_NO(33) | 3) +#define PINMUX_GPIO33__FUNC_DPI_D3 (MTK_PIN_NO(33) | 4) +#define PINMUX_GPIO33__FUNC_SPI4_CLK (MTK_PIN_NO(33) | 5) +#define PINMUX_GPIO33__FUNC_CONN_MCU_TMS (MTK_PIN_NO(33) | 6) + +#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define PINMUX_GPIO34__FUNC_I2S1_DO (MTK_PIN_NO(34) | 1) +#define PINMUX_GPIO34__FUNC_I2S3_DO (MTK_PIN_NO(34) | 2) +#define PINMUX_GPIO34__FUNC_I2S2_DI2 (MTK_PIN_NO(34) | 3) +#define PINMUX_GPIO34__FUNC_DPI_D4 (MTK_PIN_NO(34) | 4) +#define PINMUX_GPIO34__FUNC_AGPS_SYNC (MTK_PIN_NO(34) | 5) +#define PINMUX_GPIO34__FUNC_CONN_MCU_TCK (MTK_PIN_NO(34) | 6) + +#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define PINMUX_GPIO35__FUNC_TDM_LRCK (MTK_PIN_NO(35) | 1) +#define PINMUX_GPIO35__FUNC_I2S1_LRCK (MTK_PIN_NO(35) | 2) +#define PINMUX_GPIO35__FUNC_I2S5_LRCK (MTK_PIN_NO(35) | 3) +#define PINMUX_GPIO35__FUNC_DPI_D5 (MTK_PIN_NO(35) | 4) +#define PINMUX_GPIO35__FUNC_SPI5_A_MO (MTK_PIN_NO(35) | 5) +#define PINMUX_GPIO35__FUNC_IO_JTAG_TDI (MTK_PIN_NO(35) | 6) +#define PINMUX_GPIO35__FUNC_PWM_2 (MTK_PIN_NO(35) | 7) + +#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define PINMUX_GPIO36__FUNC_TDM_BCK (MTK_PIN_NO(36) | 1) +#define PINMUX_GPIO36__FUNC_I2S1_BCK (MTK_PIN_NO(36) | 2) +#define PINMUX_GPIO36__FUNC_I2S5_BCK (MTK_PIN_NO(36) | 3) +#define PINMUX_GPIO36__FUNC_DPI_D6 (MTK_PIN_NO(36) | 4) +#define PINMUX_GPIO36__FUNC_SPI5_A_CSB (MTK_PIN_NO(36) | 5) +#define PINMUX_GPIO36__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(36) | 6) +#define PINMUX_GPIO36__FUNC_SRCLKENAI1 (MTK_PIN_NO(36) | 7) + +#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define PINMUX_GPIO37__FUNC_TDM_MCK (MTK_PIN_NO(37) | 1) +#define PINMUX_GPIO37__FUNC_I2S1_MCK (MTK_PIN_NO(37) | 2) +#define PINMUX_GPIO37__FUNC_I2S5_MCK (MTK_PIN_NO(37) | 3) +#define PINMUX_GPIO37__FUNC_DPI_D7 (MTK_PIN_NO(37) | 4) +#define PINMUX_GPIO37__FUNC_SPI5_A_MI (MTK_PIN_NO(37) | 5) +#define PINMUX_GPIO37__FUNC_IO_JTAG_TCK (MTK_PIN_NO(37) | 6) +#define PINMUX_GPIO37__FUNC_SRCLKENAI0 (MTK_PIN_NO(37) | 7) + +#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define PINMUX_GPIO38__FUNC_TDM_DATA0 (MTK_PIN_NO(38) | 1) +#define PINMUX_GPIO38__FUNC_I2S2_DI (MTK_PIN_NO(38) | 2) +#define PINMUX_GPIO38__FUNC_I2S5_DO (MTK_PIN_NO(38) | 3) +#define PINMUX_GPIO38__FUNC_DPI_D8 (MTK_PIN_NO(38) | 4) +#define PINMUX_GPIO38__FUNC_SPI5_A_CLK (MTK_PIN_NO(38) | 5) +#define PINMUX_GPIO38__FUNC_IO_JTAG_TDO (MTK_PIN_NO(38) | 6) +#define PINMUX_GPIO38__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(38) | 7) + +#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define PINMUX_GPIO39__FUNC_TDM_DATA1 (MTK_PIN_NO(39) | 1) +#define PINMUX_GPIO39__FUNC_I2S1_DO (MTK_PIN_NO(39) | 2) +#define PINMUX_GPIO39__FUNC_I2S2_DI2 (MTK_PIN_NO(39) | 3) +#define PINMUX_GPIO39__FUNC_DPI_D9 (MTK_PIN_NO(39) | 4) +#define PINMUX_GPIO39__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(39) | 5) +#define PINMUX_GPIO39__FUNC_IO_JTAG_TMS (MTK_PIN_NO(39) | 6) +#define PINMUX_GPIO39__FUNC_IDDIG (MTK_PIN_NO(39) | 7) + +#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define PINMUX_GPIO40__FUNC_TDM_DATA2 (MTK_PIN_NO(40) | 1) +#define PINMUX_GPIO40__FUNC_SCL9 (MTK_PIN_NO(40) | 2) +#define PINMUX_GPIO40__FUNC_PWM_3 (MTK_PIN_NO(40) | 3) +#define PINMUX_GPIO40__FUNC_DPI_D10 (MTK_PIN_NO(40) | 4) +#define PINMUX_GPIO40__FUNC_SRCLKENAI0 (MTK_PIN_NO(40) | 5) +#define PINMUX_GPIO40__FUNC_DAP_MD32_SWD (MTK_PIN_NO(40) | 6) +#define PINMUX_GPIO40__FUNC_USB_DRVVBUS (MTK_PIN_NO(40) | 7) + +#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define PINMUX_GPIO41__FUNC_TDM_DATA3 (MTK_PIN_NO(41) | 1) +#define PINMUX_GPIO41__FUNC_SDA9 (MTK_PIN_NO(41) | 2) +#define PINMUX_GPIO41__FUNC_PWM_1 (MTK_PIN_NO(41) | 3) +#define PINMUX_GPIO41__FUNC_DPI_D11 (MTK_PIN_NO(41) | 4) +#define PINMUX_GPIO41__FUNC_CLKM1 (MTK_PIN_NO(41) | 5) +#define PINMUX_GPIO41__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(41) | 6) + +#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define PINMUX_GPIO42__FUNC_DISP_PWM (MTK_PIN_NO(42) | 1) + +#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define PINMUX_GPIO43__FUNC_DSI_TE (MTK_PIN_NO(43) | 1) + +#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define PINMUX_GPIO44__FUNC_LCM_RST (MTK_PIN_NO(44) | 1) + +#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define PINMUX_GPIO45__FUNC_SCL6 (MTK_PIN_NO(45) | 1) +#define PINMUX_GPIO45__FUNC_SCP_SCL0 (MTK_PIN_NO(45) | 2) +#define PINMUX_GPIO45__FUNC_SCP_SCL1 (MTK_PIN_NO(45) | 3) +#define PINMUX_GPIO45__FUNC_SCL_6306 (MTK_PIN_NO(45) | 4) + +#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define PINMUX_GPIO46__FUNC_SDA6 (MTK_PIN_NO(46) | 1) +#define PINMUX_GPIO46__FUNC_SCP_SDA0 (MTK_PIN_NO(46) | 2) +#define PINMUX_GPIO46__FUNC_SCP_SDA1 (MTK_PIN_NO(46) | 3) +#define PINMUX_GPIO46__FUNC_SDA_6306 (MTK_PIN_NO(46) | 4) + +#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define PINMUX_GPIO47__FUNC_SPI1_A_MI (MTK_PIN_NO(47) | 1) +#define PINMUX_GPIO47__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(47) | 2) +#define PINMUX_GPIO47__FUNC_KPCOL2 (MTK_PIN_NO(47) | 3) +#define PINMUX_GPIO47__FUNC_MD_URXD0 (MTK_PIN_NO(47) | 4) +#define PINMUX_GPIO47__FUNC_CONN_UART0_RXD (MTK_PIN_NO(47) | 5) +#define PINMUX_GPIO47__FUNC_SSPM_URXD_AO (MTK_PIN_NO(47) | 6) +#define PINMUX_GPIO47__FUNC_DBG_MON_B32 (MTK_PIN_NO(47) | 7) + +#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define PINMUX_GPIO48__FUNC_SPI1_A_CSB (MTK_PIN_NO(48) | 1) +#define PINMUX_GPIO48__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(48) | 2) +#define PINMUX_GPIO48__FUNC_KPROW2 (MTK_PIN_NO(48) | 3) +#define PINMUX_GPIO48__FUNC_MD_UTXD0 (MTK_PIN_NO(48) | 4) +#define PINMUX_GPIO48__FUNC_CONN_UART0_TXD (MTK_PIN_NO(48) | 5) +#define PINMUX_GPIO48__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(48) | 6) +#define PINMUX_GPIO48__FUNC_DBG_MON_B31 (MTK_PIN_NO(48) | 7) + +#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define PINMUX_GPIO49__FUNC_SPI1_A_MO (MTK_PIN_NO(49) | 1) +#define PINMUX_GPIO49__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(49) | 2) +#define PINMUX_GPIO49__FUNC_UCTS0 (MTK_PIN_NO(49) | 3) +#define PINMUX_GPIO49__FUNC_MD_URXD1 (MTK_PIN_NO(49) | 4) +#define PINMUX_GPIO49__FUNC_PWM_1 (MTK_PIN_NO(49) | 5) +#define PINMUX_GPIO49__FUNC_TP_URXD2_AO (MTK_PIN_NO(49) | 6) +#define PINMUX_GPIO49__FUNC_DBG_MON_B30 (MTK_PIN_NO(49) | 7) + +#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0) +#define PINMUX_GPIO50__FUNC_SPI1_A_CLK (MTK_PIN_NO(50) | 1) +#define PINMUX_GPIO50__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(50) | 2) +#define PINMUX_GPIO50__FUNC_URTS0 (MTK_PIN_NO(50) | 3) +#define PINMUX_GPIO50__FUNC_MD_UTXD1 (MTK_PIN_NO(50) | 4) +#define PINMUX_GPIO50__FUNC_WIFI_TXD (MTK_PIN_NO(50) | 5) +#define PINMUX_GPIO50__FUNC_TP_UTXD2_AO (MTK_PIN_NO(50) | 6) +#define PINMUX_GPIO50__FUNC_DBG_MON_B29 (MTK_PIN_NO(50) | 7) + +#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0) +#define PINMUX_GPIO51__FUNC_SCL0 (MTK_PIN_NO(51) | 1) + +#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0) +#define PINMUX_GPIO52__FUNC_SDA0 (MTK_PIN_NO(52) | 1) + +#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define PINMUX_GPIO53__FUNC_URXD0 (MTK_PIN_NO(53) | 1) +#define PINMUX_GPIO53__FUNC_UTXD0 (MTK_PIN_NO(53) | 2) +#define PINMUX_GPIO53__FUNC_MD_URXD0 (MTK_PIN_NO(53) | 3) +#define PINMUX_GPIO53__FUNC_MD_URXD1 (MTK_PIN_NO(53) | 4) +#define PINMUX_GPIO53__FUNC_SSPM_URXD_AO (MTK_PIN_NO(53) | 5) +#define PINMUX_GPIO53__FUNC_CONN_UART0_RXD (MTK_PIN_NO(53) | 7) + +#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define PINMUX_GPIO54__FUNC_UTXD0 (MTK_PIN_NO(54) | 1) +#define PINMUX_GPIO54__FUNC_URXD0 (MTK_PIN_NO(54) | 2) +#define PINMUX_GPIO54__FUNC_MD_UTXD0 (MTK_PIN_NO(54) | 3) +#define PINMUX_GPIO54__FUNC_MD_UTXD1 (MTK_PIN_NO(54) | 4) +#define PINMUX_GPIO54__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(54) | 5) +#define PINMUX_GPIO54__FUNC_WIFI_TXD (MTK_PIN_NO(54) | 6) +#define PINMUX_GPIO54__FUNC_CONN_UART0_TXD (MTK_PIN_NO(54) | 7) + +#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define PINMUX_GPIO55__FUNC_SCL3 (MTK_PIN_NO(55) | 1) +#define PINMUX_GPIO55__FUNC_SCP_SCL0 (MTK_PIN_NO(55) | 2) +#define PINMUX_GPIO55__FUNC_SCP_SCL1 (MTK_PIN_NO(55) | 3) +#define PINMUX_GPIO55__FUNC_SCL_6306 (MTK_PIN_NO(55) | 4) + +#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define PINMUX_GPIO56__FUNC_SDA3 (MTK_PIN_NO(56) | 1) +#define PINMUX_GPIO56__FUNC_SCP_SDA0 (MTK_PIN_NO(56) | 2) +#define PINMUX_GPIO56__FUNC_SCP_SDA1 (MTK_PIN_NO(56) | 3) +#define PINMUX_GPIO56__FUNC_SDA_6306 (MTK_PIN_NO(56) | 4) + +#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define PINMUX_GPIO57__FUNC_KPROW1 (MTK_PIN_NO(57) | 1) +#define PINMUX_GPIO57__FUNC_PWM_1 (MTK_PIN_NO(57) | 2) +#define PINMUX_GPIO57__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(57) | 3) +#define PINMUX_GPIO57__FUNC_CLKM1 (MTK_PIN_NO(57) | 4) +#define PINMUX_GPIO57__FUNC_IDDIG (MTK_PIN_NO(57) | 5) +#define PINMUX_GPIO57__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(57) | 6) +#define PINMUX_GPIO57__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(57) | 7) + +#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define PINMUX_GPIO58__FUNC_KPROW0 (MTK_PIN_NO(58) | 1) +#define PINMUX_GPIO58__FUNC_DBG_MON_B28 (MTK_PIN_NO(58) | 7) + +#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0) +#define PINMUX_GPIO59__FUNC_KPCOL0 (MTK_PIN_NO(59) | 1) +#define PINMUX_GPIO59__FUNC_DBG_MON_B27 (MTK_PIN_NO(59) | 7) + +#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define PINMUX_GPIO60__FUNC_KPCOL1 (MTK_PIN_NO(60) | 1) +#define PINMUX_GPIO60__FUNC_PWM_2 (MTK_PIN_NO(60) | 2) +#define PINMUX_GPIO60__FUNC_UCTS1 (MTK_PIN_NO(60) | 3) +#define PINMUX_GPIO60__FUNC_CLKM2 (MTK_PIN_NO(60) | 4) +#define PINMUX_GPIO60__FUNC_USB_DRVVBUS (MTK_PIN_NO(60) | 5) +#define PINMUX_GPIO60__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(60) | 7) + +#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define PINMUX_GPIO61__FUNC_SCL1 (MTK_PIN_NO(61) | 1) +#define PINMUX_GPIO61__FUNC_SCP_SCL0 (MTK_PIN_NO(61) | 2) +#define PINMUX_GPIO61__FUNC_SCP_SCL1 (MTK_PIN_NO(61) | 3) + +#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define PINMUX_GPIO62__FUNC_SDA1 (MTK_PIN_NO(62) | 1) +#define PINMUX_GPIO62__FUNC_SCP_SDA0 (MTK_PIN_NO(62) | 2) +#define PINMUX_GPIO62__FUNC_SCP_SDA1 (MTK_PIN_NO(62) | 3) + +#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define PINMUX_GPIO63__FUNC_SPI2_MI (MTK_PIN_NO(63) | 1) +#define PINMUX_GPIO63__FUNC_SCP_SPI2_MI (MTK_PIN_NO(63) | 2) +#define PINMUX_GPIO63__FUNC_KPCOL2 (MTK_PIN_NO(63) | 3) +#define PINMUX_GPIO63__FUNC_MRG_DI (MTK_PIN_NO(63) | 4) +#define PINMUX_GPIO63__FUNC_MD_URXD0 (MTK_PIN_NO(63) | 5) +#define PINMUX_GPIO63__FUNC_CONN_UART0_RXD (MTK_PIN_NO(63) | 6) +#define PINMUX_GPIO63__FUNC_DBG_MON_B26 (MTK_PIN_NO(63) | 7) + +#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define PINMUX_GPIO64__FUNC_SPI2_CSB (MTK_PIN_NO(64) | 1) +#define PINMUX_GPIO64__FUNC_SCP_SPI2_CS (MTK_PIN_NO(64) | 2) +#define PINMUX_GPIO64__FUNC_KPROW2 (MTK_PIN_NO(64) | 3) +#define PINMUX_GPIO64__FUNC_MRG_SYNC (MTK_PIN_NO(64) | 4) +#define PINMUX_GPIO64__FUNC_MD_UTXD0 (MTK_PIN_NO(64) | 5) +#define PINMUX_GPIO64__FUNC_CONN_UART0_TXD (MTK_PIN_NO(64) | 6) +#define PINMUX_GPIO64__FUNC_DBG_MON_B25 (MTK_PIN_NO(64) | 7) + +#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define PINMUX_GPIO65__FUNC_SPI2_MO (MTK_PIN_NO(65) | 1) +#define PINMUX_GPIO65__FUNC_SCP_SPI2_MO (MTK_PIN_NO(65) | 2) +#define PINMUX_GPIO65__FUNC_SCP_SDA1 (MTK_PIN_NO(65) | 3) +#define PINMUX_GPIO65__FUNC_MRG_DO (MTK_PIN_NO(65) | 4) +#define PINMUX_GPIO65__FUNC_MD_URXD1 (MTK_PIN_NO(65) | 5) +#define PINMUX_GPIO65__FUNC_PWM_3 (MTK_PIN_NO(65) | 6) + +#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define PINMUX_GPIO66__FUNC_SPI2_CLK (MTK_PIN_NO(66) | 1) +#define PINMUX_GPIO66__FUNC_SCP_SPI2_CK (MTK_PIN_NO(66) | 2) +#define PINMUX_GPIO66__FUNC_SCP_SCL1 (MTK_PIN_NO(66) | 3) +#define PINMUX_GPIO66__FUNC_MRG_CLK (MTK_PIN_NO(66) | 4) +#define PINMUX_GPIO66__FUNC_MD_UTXD1 (MTK_PIN_NO(66) | 5) +#define PINMUX_GPIO66__FUNC_WIFI_TXD (MTK_PIN_NO(66) | 6) + +#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define PINMUX_GPIO67__FUNC_I2S3_LRCK (MTK_PIN_NO(67) | 1) +#define PINMUX_GPIO67__FUNC_I2S1_LRCK (MTK_PIN_NO(67) | 2) +#define PINMUX_GPIO67__FUNC_URXD1 (MTK_PIN_NO(67) | 3) +#define PINMUX_GPIO67__FUNC_PCM0_SYNC (MTK_PIN_NO(67) | 4) +#define PINMUX_GPIO67__FUNC_I2S5_LRCK (MTK_PIN_NO(67) | 5) +#define PINMUX_GPIO67__FUNC_ANT_SEL9 (MTK_PIN_NO(67) | 6) +#define PINMUX_GPIO67__FUNC_DBG_MON_B10 (MTK_PIN_NO(67) | 7) + +#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define PINMUX_GPIO68__FUNC_I2S3_DO (MTK_PIN_NO(68) | 1) +#define PINMUX_GPIO68__FUNC_I2S1_DO (MTK_PIN_NO(68) | 2) +#define PINMUX_GPIO68__FUNC_UTXD1 (MTK_PIN_NO(68) | 3) +#define PINMUX_GPIO68__FUNC_PCM0_DO (MTK_PIN_NO(68) | 4) +#define PINMUX_GPIO68__FUNC_I2S5_DO (MTK_PIN_NO(68) | 5) +#define PINMUX_GPIO68__FUNC_ANT_SEL10 (MTK_PIN_NO(68) | 6) +#define PINMUX_GPIO68__FUNC_DBG_MON_B9 (MTK_PIN_NO(68) | 7) + +#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define PINMUX_GPIO69__FUNC_I2S3_MCK (MTK_PIN_NO(69) | 1) +#define PINMUX_GPIO69__FUNC_I2S1_MCK (MTK_PIN_NO(69) | 2) +#define PINMUX_GPIO69__FUNC_URTS1 (MTK_PIN_NO(69) | 3) +#define PINMUX_GPIO69__FUNC_AGPS_SYNC (MTK_PIN_NO(69) | 4) +#define PINMUX_GPIO69__FUNC_I2S5_MCK (MTK_PIN_NO(69) | 5) +#define PINMUX_GPIO69__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(69) | 6) +#define PINMUX_GPIO69__FUNC_DBG_MON_B8 (MTK_PIN_NO(69) | 7) + +#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define PINMUX_GPIO70__FUNC_I2S0_DI (MTK_PIN_NO(70) | 1) +#define PINMUX_GPIO70__FUNC_I2S2_DI (MTK_PIN_NO(70) | 2) +#define PINMUX_GPIO70__FUNC_KPCOL2 (MTK_PIN_NO(70) | 3) +#define PINMUX_GPIO70__FUNC_PCM0_DI (MTK_PIN_NO(70) | 4) +#define PINMUX_GPIO70__FUNC_I2S2_DI2 (MTK_PIN_NO(70) | 5) +#define PINMUX_GPIO70__FUNC_ANT_SEL11 (MTK_PIN_NO(70) | 6) +#define PINMUX_GPIO70__FUNC_DBG_MON_B7 (MTK_PIN_NO(70) | 7) + +#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define PINMUX_GPIO71__FUNC_I2S3_BCK (MTK_PIN_NO(71) | 1) +#define PINMUX_GPIO71__FUNC_I2S1_BCK (MTK_PIN_NO(71) | 2) +#define PINMUX_GPIO71__FUNC_KPROW2 (MTK_PIN_NO(71) | 3) +#define PINMUX_GPIO71__FUNC_PCM0_CLK (MTK_PIN_NO(71) | 4) +#define PINMUX_GPIO71__FUNC_I2S5_BCK (MTK_PIN_NO(71) | 5) +#define PINMUX_GPIO71__FUNC_ANT_SEL12 (MTK_PIN_NO(71) | 6) +#define PINMUX_GPIO71__FUNC_DBG_MON_B6 (MTK_PIN_NO(71) | 7) + +#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define PINMUX_GPIO72__FUNC_BPI_BUS19_OLAT0 (MTK_PIN_NO(72) | 1) +#define PINMUX_GPIO72__FUNC_CONN_BPI_BUS19_OLAT0 (MTK_PIN_NO(72) | 2) + +#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define PINMUX_GPIO73__FUNC_BPI_BUS18_PA_VM1 (MTK_PIN_NO(73) | 1) +#define PINMUX_GPIO73__FUNC_CONN_MIPI5_SCLK (MTK_PIN_NO(73) | 2) +#define PINMUX_GPIO73__FUNC_MIPI5_SCLK (MTK_PIN_NO(73) | 3) + +#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define PINMUX_GPIO74__FUNC_BPI_BUS17_PA_VM0 (MTK_PIN_NO(74) | 1) +#define PINMUX_GPIO74__FUNC_CONN_MIPI5_SDATA (MTK_PIN_NO(74) | 2) +#define PINMUX_GPIO74__FUNC_MIPI5_SDATA (MTK_PIN_NO(74) | 3) + +#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define PINMUX_GPIO75__FUNC_BPI_BUS20_OLAT1 (MTK_PIN_NO(75) | 1) +#define PINMUX_GPIO75__FUNC_CONN_BPI_BUS20_OLAT1 (MTK_PIN_NO(75) | 2) +#define PINMUX_GPIO75__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(75) | 3) + +#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define PINMUX_GPIO76__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(76) | 1) + +#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define PINMUX_GPIO77__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(77) | 1) + +#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define PINMUX_GPIO78__FUNC_BPI_BUS7 (MTK_PIN_NO(78) | 1) +#define PINMUX_GPIO78__FUNC_DBG_MON_B24 (MTK_PIN_NO(78) | 7) + +#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define PINMUX_GPIO79__FUNC_BPI_BUS6 (MTK_PIN_NO(79) | 1) +#define PINMUX_GPIO79__FUNC_DBG_MON_B23 (MTK_PIN_NO(79) | 7) + +#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define PINMUX_GPIO80__FUNC_BPI_BUS8 (MTK_PIN_NO(80) | 1) +#define PINMUX_GPIO80__FUNC_DBG_MON_B22 (MTK_PIN_NO(80) | 7) + +#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define PINMUX_GPIO81__FUNC_BPI_BUS9 (MTK_PIN_NO(81) | 1) +#define PINMUX_GPIO81__FUNC_DBG_MON_B21 (MTK_PIN_NO(81) | 7) + +#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define PINMUX_GPIO82__FUNC_BPI_BUS10 (MTK_PIN_NO(82) | 1) +#define PINMUX_GPIO82__FUNC_DBG_MON_B20 (MTK_PIN_NO(82) | 7) + +#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define PINMUX_GPIO83__FUNC_BPI_BUS11 (MTK_PIN_NO(83) | 1) +#define PINMUX_GPIO83__FUNC_DBG_MON_B19 (MTK_PIN_NO(83) | 7) + +#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define PINMUX_GPIO84__FUNC_BPI_BUS12 (MTK_PIN_NO(84) | 1) +#define PINMUX_GPIO84__FUNC_CONN_BPI_BUS12 (MTK_PIN_NO(84) | 2) + +#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0) +#define PINMUX_GPIO85__FUNC_BPI_BUS13 (MTK_PIN_NO(85) | 1) +#define PINMUX_GPIO85__FUNC_CONN_BPI_BUS13 (MTK_PIN_NO(85) | 2) + +#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0) +#define PINMUX_GPIO86__FUNC_BPI_BUS14 (MTK_PIN_NO(86) | 1) +#define PINMUX_GPIO86__FUNC_CONN_BPI_BUS14 (MTK_PIN_NO(86) | 2) + +#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0) +#define PINMUX_GPIO87__FUNC_BPI_BUS15 (MTK_PIN_NO(87) | 1) +#define PINMUX_GPIO87__FUNC_CONN_BPI_BUS15 (MTK_PIN_NO(87) | 2) + +#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0) +#define PINMUX_GPIO88__FUNC_BPI_BUS16 (MTK_PIN_NO(88) | 1) +#define PINMUX_GPIO88__FUNC_CONN_BPI_BUS16 (MTK_PIN_NO(88) | 2) + +#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0) +#define PINMUX_GPIO89__FUNC_BPI_BUS5 (MTK_PIN_NO(89) | 1) +#define PINMUX_GPIO89__FUNC_DBG_MON_B18 (MTK_PIN_NO(89) | 7) + +#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0) +#define PINMUX_GPIO90__FUNC_BPI_BUS4 (MTK_PIN_NO(90) | 1) +#define PINMUX_GPIO90__FUNC_DBG_MON_B17 (MTK_PIN_NO(90) | 7) + +#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define PINMUX_GPIO91__FUNC_BPI_BUS3 (MTK_PIN_NO(91) | 1) + +#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define PINMUX_GPIO92__FUNC_BPI_BUS2 (MTK_PIN_NO(92) | 1) +#define PINMUX_GPIO92__FUNC_DBG_MON_B16 (MTK_PIN_NO(92) | 7) + +#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define PINMUX_GPIO93__FUNC_BPI_BUS1 (MTK_PIN_NO(93) | 1) + +#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define PINMUX_GPIO94__FUNC_BPI_BUS0 (MTK_PIN_NO(94) | 1) +#define PINMUX_GPIO94__FUNC_DBG_MON_B15 (MTK_PIN_NO(94) | 7) + +#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define PINMUX_GPIO95__FUNC_MIPI0_SDATA (MTK_PIN_NO(95) | 1) + +#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define PINMUX_GPIO96__FUNC_MIPI0_SCLK (MTK_PIN_NO(96) | 1) + +#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define PINMUX_GPIO97__FUNC_MIPI1_SDATA (MTK_PIN_NO(97) | 1) + +#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define PINMUX_GPIO98__FUNC_MIPI1_SCLK (MTK_PIN_NO(98) | 1) + +#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define PINMUX_GPIO99__FUNC_MIPI2_SCLK (MTK_PIN_NO(99) | 1) +#define PINMUX_GPIO99__FUNC_DBG_MON_B14 (MTK_PIN_NO(99) | 7) + +#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define PINMUX_GPIO100__FUNC_MIPI2_SDATA (MTK_PIN_NO(100) | 1) +#define PINMUX_GPIO100__FUNC_DBG_MON_B13 (MTK_PIN_NO(100) | 7) + +#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define PINMUX_GPIO101__FUNC_MIPI3_SCLK (MTK_PIN_NO(101) | 1) +#define PINMUX_GPIO101__FUNC_DBG_MON_B12 (MTK_PIN_NO(101) | 7) + +#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define PINMUX_GPIO102__FUNC_MIPI3_SDATA (MTK_PIN_NO(102) | 1) +#define PINMUX_GPIO102__FUNC_DBG_MON_B11 (MTK_PIN_NO(102) | 7) + +#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define PINMUX_GPIO103__FUNC_MIPI4_SCLK (MTK_PIN_NO(103) | 1) +#define PINMUX_GPIO103__FUNC_CONN_MIPI4_SCLK (MTK_PIN_NO(103) | 2) + +#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define PINMUX_GPIO104__FUNC_MIPI4_SDATA (MTK_PIN_NO(104) | 1) +#define PINMUX_GPIO104__FUNC_CONN_MIPI4_SDATA (MTK_PIN_NO(104) | 2) + +#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define PINMUX_GPIO105__FUNC_BPI_BUS22_OLAT3 (MTK_PIN_NO(105) | 1) +#define PINMUX_GPIO105__FUNC_CONN_BPI_BUS22_OLAT3 (MTK_PIN_NO(105) | 2) + +#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define PINMUX_GPIO106__FUNC_BPI_BUS21_OLAT2 (MTK_PIN_NO(106) | 1) +#define PINMUX_GPIO106__FUNC_CONN_BPI_BUS21_OLAT2 (MTK_PIN_NO(106) | 2) + +#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define PINMUX_GPIO107__FUNC_BPI_BUS24_ANT1 (MTK_PIN_NO(107) | 1) +#define PINMUX_GPIO107__FUNC_CONN_BPI_BUS24_ANT1 (MTK_PIN_NO(107) | 2) + +#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define PINMUX_GPIO108__FUNC_BPI_BUS25_ANT2 (MTK_PIN_NO(108) | 1) +#define PINMUX_GPIO108__FUNC_CONN_BPI_BUS25_ANT2 (MTK_PIN_NO(108) | 2) + +#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define PINMUX_GPIO109__FUNC_BPI_BUS23_ANT0 (MTK_PIN_NO(109) | 1) +#define PINMUX_GPIO109__FUNC_CONN_BPI_BUS23_ANT0 (MTK_PIN_NO(109) | 2) + +#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define PINMUX_GPIO110__FUNC_SCL4 (MTK_PIN_NO(110) | 1) + +#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define PINMUX_GPIO111__FUNC_SDA4 (MTK_PIN_NO(111) | 1) + +#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define PINMUX_GPIO112__FUNC_SCL2 (MTK_PIN_NO(112) | 1) + +#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define PINMUX_GPIO113__FUNC_SDA2 (MTK_PIN_NO(113) | 1) + +#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define PINMUX_GPIO114__FUNC_CLKM0 (MTK_PIN_NO(114) | 1) +#define PINMUX_GPIO114__FUNC_SPI3_MI (MTK_PIN_NO(114) | 2) +#define PINMUX_GPIO114__FUNC_DBG_MON_B5 (MTK_PIN_NO(114) | 7) + +#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define PINMUX_GPIO115__FUNC_CLKM1 (MTK_PIN_NO(115) | 1) +#define PINMUX_GPIO115__FUNC_SPI3_CSB (MTK_PIN_NO(115) | 2) +#define PINMUX_GPIO115__FUNC_DBG_MON_B4 (MTK_PIN_NO(115) | 7) + +#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define PINMUX_GPIO116__FUNC_CMMCLK0 (MTK_PIN_NO(116) | 1) +#define PINMUX_GPIO116__FUNC_DBG_MON_B3 (MTK_PIN_NO(116) | 7) + +#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define PINMUX_GPIO117__FUNC_CMMCLK1 (MTK_PIN_NO(117) | 1) +#define PINMUX_GPIO117__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(117) | 2) +#define PINMUX_GPIO117__FUNC_DBG_MON_B2 (MTK_PIN_NO(117) | 7) + +#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define PINMUX_GPIO118__FUNC_CLKM2 (MTK_PIN_NO(118) | 1) +#define PINMUX_GPIO118__FUNC_SPI3_MO (MTK_PIN_NO(118) | 2) +#define PINMUX_GPIO118__FUNC_DBG_MON_B1 (MTK_PIN_NO(118) | 7) + +#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define PINMUX_GPIO119__FUNC_CLKM3 (MTK_PIN_NO(119) | 1) +#define PINMUX_GPIO119__FUNC_SPI3_CLK (MTK_PIN_NO(119) | 2) +#define PINMUX_GPIO119__FUNC_DBG_MON_B0 (MTK_PIN_NO(119) | 7) + +#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define PINMUX_GPIO120__FUNC_CMMCLK2 (MTK_PIN_NO(120) | 1) +#define PINMUX_GPIO120__FUNC_CLKM2 (MTK_PIN_NO(120) | 2) +#define PINMUX_GPIO120__FUNC_ANT_SEL12 (MTK_PIN_NO(120) | 6) +#define PINMUX_GPIO120__FUNC_TP_UCTS2_AO (MTK_PIN_NO(120) | 7) + +#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define PINMUX_GPIO121__FUNC_CMMCLK3 (MTK_PIN_NO(121) | 1) +#define PINMUX_GPIO121__FUNC_CLKM3 (MTK_PIN_NO(121) | 2) +#define PINMUX_GPIO121__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(121) | 3) +#define PINMUX_GPIO121__FUNC_ANT_SEL11 (MTK_PIN_NO(121) | 6) +#define PINMUX_GPIO121__FUNC_TP_URTS2_AO (MTK_PIN_NO(121) | 7) + +#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define PINMUX_GPIO122__FUNC_CMVREF1 (MTK_PIN_NO(122) | 1) +#define PINMUX_GPIO122__FUNC_PCM0_SYNC (MTK_PIN_NO(122) | 2) +#define PINMUX_GPIO122__FUNC_SRCLKENAI1 (MTK_PIN_NO(122) | 3) +#define PINMUX_GPIO122__FUNC_AGPS_SYNC (MTK_PIN_NO(122) | 4) +#define PINMUX_GPIO122__FUNC_PWM_1 (MTK_PIN_NO(122) | 5) +#define PINMUX_GPIO122__FUNC_ANT_SEL9 (MTK_PIN_NO(122) | 6) +#define PINMUX_GPIO122__FUNC_TP_UCTS1_AO (MTK_PIN_NO(122) | 7) + +#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define PINMUX_GPIO123__FUNC_PCM0_DI (MTK_PIN_NO(123) | 2) +#define PINMUX_GPIO123__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(123) | 3) +#define PINMUX_GPIO123__FUNC_VPU_UDI_NTRST (MTK_PIN_NO(123) | 4) +#define PINMUX_GPIO123__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(123) | 5) +#define PINMUX_GPIO123__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(123) | 6) + +#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define PINMUX_GPIO124__FUNC_CMVREF2 (MTK_PIN_NO(124) | 1) +#define PINMUX_GPIO124__FUNC_PCM0_CLK (MTK_PIN_NO(124) | 2) +#define PINMUX_GPIO124__FUNC_MD_INT0 (MTK_PIN_NO(124) | 3) +#define PINMUX_GPIO124__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(124) | 4) +#define PINMUX_GPIO124__FUNC_PWM_2 (MTK_PIN_NO(124) | 5) +#define PINMUX_GPIO124__FUNC_ANT_SEL10 (MTK_PIN_NO(124) | 6) +#define PINMUX_GPIO124__FUNC_TP_URTS1_AO (MTK_PIN_NO(124) | 7) + +#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define PINMUX_GPIO125__FUNC_CMVREF3 (MTK_PIN_NO(125) | 1) +#define PINMUX_GPIO125__FUNC_PCM0_DO (MTK_PIN_NO(125) | 2) +#define PINMUX_GPIO125__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(125) | 3) +#define PINMUX_GPIO125__FUNC_VPU_UDI_TMS (MTK_PIN_NO(125) | 4) +#define PINMUX_GPIO125__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(125) | 5) +#define PINMUX_GPIO125__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(125) | 6) + +#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define PINMUX_GPIO126__FUNC_CMVREF4 (MTK_PIN_NO(126) | 1) +#define PINMUX_GPIO126__FUNC_CMFLASH0 (MTK_PIN_NO(126) | 2) +#define PINMUX_GPIO126__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(126) | 6) + +#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0) +#define PINMUX_GPIO127__FUNC_CMVREF0 (MTK_PIN_NO(127) | 1) +#define PINMUX_GPIO127__FUNC_CMFLASH1 (MTK_PIN_NO(127) | 2) +#define PINMUX_GPIO127__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(127) | 6) + +#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0) +#define PINMUX_GPIO128__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(128) | 1) +#define PINMUX_GPIO128__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(128) | 2) +#define PINMUX_GPIO128__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(128) | 3) +#define PINMUX_GPIO128__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(128) | 4) +#define PINMUX_GPIO128__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(128) | 5) +#define PINMUX_GPIO128__FUNC_LVTS_FOUT (MTK_PIN_NO(128) | 6) +#define PINMUX_GPIO128__FUNC_DBG_MON_A3 (MTK_PIN_NO(128) | 7) + +#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0) +#define PINMUX_GPIO129__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(129) | 1) +#define PINMUX_GPIO129__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(129) | 2) +#define PINMUX_GPIO129__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(129) | 3) +#define PINMUX_GPIO129__FUNC_CONN_DSP_JCK (MTK_PIN_NO(129) | 4) +#define PINMUX_GPIO129__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(129) | 5) +#define PINMUX_GPIO129__FUNC_LVTS_SDO (MTK_PIN_NO(129) | 6) +#define PINMUX_GPIO129__FUNC_DBG_MON_A4 (MTK_PIN_NO(129) | 7) + +#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0) +#define PINMUX_GPIO130__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(130) | 1) +#define PINMUX_GPIO130__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(130) | 2) +#define PINMUX_GPIO130__FUNC_LVTS_26M (MTK_PIN_NO(130) | 6) +#define PINMUX_GPIO130__FUNC_DBG_MON_A5 (MTK_PIN_NO(130) | 7) + +#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0) +#define PINMUX_GPIO131__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(131) | 1) +#define PINMUX_GPIO131__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(131) | 2) +#define PINMUX_GPIO131__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(131) | 3) +#define PINMUX_GPIO131__FUNC_CONN_DSP_JDI (MTK_PIN_NO(131) | 4) +#define PINMUX_GPIO131__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(131) | 5) +#define PINMUX_GPIO131__FUNC_LVTS_SCK (MTK_PIN_NO(131) | 6) +#define PINMUX_GPIO131__FUNC_DBG_MON_A0 (MTK_PIN_NO(131) | 7) + +#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0) +#define PINMUX_GPIO132__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(132) | 1) +#define PINMUX_GPIO132__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(132) | 2) +#define PINMUX_GPIO132__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(132) | 3) +#define PINMUX_GPIO132__FUNC_CONN_DSP_JMS (MTK_PIN_NO(132) | 4) +#define PINMUX_GPIO132__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(132) | 5) +#define PINMUX_GPIO132__FUNC_LVTS_SDI (MTK_PIN_NO(132) | 6) +#define PINMUX_GPIO132__FUNC_DBG_MON_A1 (MTK_PIN_NO(132) | 7) + +#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0) +#define PINMUX_GPIO133__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(133) | 1) +#define PINMUX_GPIO133__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(133) | 2) +#define PINMUX_GPIO133__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(133) | 3) +#define PINMUX_GPIO133__FUNC_CONN_DSP_JDO (MTK_PIN_NO(133) | 4) +#define PINMUX_GPIO133__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(133) | 5) +#define PINMUX_GPIO133__FUNC_LVTS_SCF (MTK_PIN_NO(133) | 6) +#define PINMUX_GPIO133__FUNC_DBG_MON_A2 (MTK_PIN_NO(133) | 7) + +#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0) +#define PINMUX_GPIO134__FUNC_MSDC1_CLK (MTK_PIN_NO(134) | 1) +#define PINMUX_GPIO134__FUNC_PCM1_CLK (MTK_PIN_NO(134) | 2) +#define PINMUX_GPIO134__FUNC_SPI5_B_MI (MTK_PIN_NO(134) | 3) +#define PINMUX_GPIO134__FUNC_UDI_TCK (MTK_PIN_NO(134) | 4) +#define PINMUX_GPIO134__FUNC_CONN_DSP_JCK (MTK_PIN_NO(134) | 5) +#define PINMUX_GPIO134__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(134) | 6) +#define PINMUX_GPIO134__FUNC_JTCK_SEL3 (MTK_PIN_NO(134) | 7) + +#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0) +#define PINMUX_GPIO135__FUNC_MSDC1_CMD (MTK_PIN_NO(135) | 1) +#define PINMUX_GPIO135__FUNC_PCM1_SYNC (MTK_PIN_NO(135) | 2) +#define PINMUX_GPIO135__FUNC_SPI5_B_CSB (MTK_PIN_NO(135) | 3) +#define PINMUX_GPIO135__FUNC_UDI_TMS (MTK_PIN_NO(135) | 4) +#define PINMUX_GPIO135__FUNC_CONN_DSP_JMS (MTK_PIN_NO(135) | 5) +#define PINMUX_GPIO135__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(135) | 6) +#define PINMUX_GPIO135__FUNC_JTMS_SEL3 (MTK_PIN_NO(135) | 7) + +#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0) +#define PINMUX_GPIO136__FUNC_MSDC1_DAT3 (MTK_PIN_NO(136) | 1) +#define PINMUX_GPIO136__FUNC_PCM1_DI (MTK_PIN_NO(136) | 2) +#define PINMUX_GPIO136__FUNC_SPI5_B_MO (MTK_PIN_NO(136) | 3) +#define PINMUX_GPIO136__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(136) | 4) +#define PINMUX_GPIO136__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(136) | 5) +#define PINMUX_GPIO136__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(136) | 6) + +#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0) +#define PINMUX_GPIO137__FUNC_MSDC1_DAT0 (MTK_PIN_NO(137) | 1) +#define PINMUX_GPIO137__FUNC_PCM1_DO0 (MTK_PIN_NO(137) | 2) +#define PINMUX_GPIO137__FUNC_SPI5_B_CLK (MTK_PIN_NO(137) | 3) +#define PINMUX_GPIO137__FUNC_UDI_TDI (MTK_PIN_NO(137) | 4) +#define PINMUX_GPIO137__FUNC_CONN_DSP_JDI (MTK_PIN_NO(137) | 5) +#define PINMUX_GPIO137__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(137) | 6) +#define PINMUX_GPIO137__FUNC_JTDI_SEL3 (MTK_PIN_NO(137) | 7) + +#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0) +#define PINMUX_GPIO138__FUNC_MSDC1_DAT2 (MTK_PIN_NO(138) | 1) +#define PINMUX_GPIO138__FUNC_PCM1_DO2 (MTK_PIN_NO(138) | 2) +#define PINMUX_GPIO138__FUNC_ANT_SEL11 (MTK_PIN_NO(138) | 3) +#define PINMUX_GPIO138__FUNC_UDI_NTRST (MTK_PIN_NO(138) | 4) +#define PINMUX_GPIO138__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(138) | 5) +#define PINMUX_GPIO138__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(138) | 6) +#define PINMUX_GPIO138__FUNC_JTRSTN_SEL3 (MTK_PIN_NO(138) | 7) + +#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0) +#define PINMUX_GPIO139__FUNC_MSDC1_DAT1 (MTK_PIN_NO(139) | 1) +#define PINMUX_GPIO139__FUNC_PCM1_DO1 (MTK_PIN_NO(139) | 2) +#define PINMUX_GPIO139__FUNC_ANT_SEL12 (MTK_PIN_NO(139) | 3) +#define PINMUX_GPIO139__FUNC_UDI_TDO (MTK_PIN_NO(139) | 4) +#define PINMUX_GPIO139__FUNC_CONN_DSP_JDO (MTK_PIN_NO(139) | 5) +#define PINMUX_GPIO139__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(139) | 6) +#define PINMUX_GPIO139__FUNC_JTDO_SEL3 (MTK_PIN_NO(139) | 7) + +#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0) +#define PINMUX_GPIO140__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(140) | 1) +#define PINMUX_GPIO140__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(140) | 2) +#define PINMUX_GPIO140__FUNC_ADSP_URXD0 (MTK_PIN_NO(140) | 3) +#define PINMUX_GPIO140__FUNC_SCL_6306 (MTK_PIN_NO(140) | 4) +#define PINMUX_GPIO140__FUNC_PTA_RXD (MTK_PIN_NO(140) | 5) +#define PINMUX_GPIO140__FUNC_SSPM_URXD_AO (MTK_PIN_NO(140) | 6) + +#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0) +#define PINMUX_GPIO141__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(141) | 1) +#define PINMUX_GPIO141__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(141) | 2) +#define PINMUX_GPIO141__FUNC_ADSP_UTXD0 (MTK_PIN_NO(141) | 3) +#define PINMUX_GPIO141__FUNC_SDA_6306 (MTK_PIN_NO(141) | 4) +#define PINMUX_GPIO141__FUNC_PTA_TXD (MTK_PIN_NO(141) | 5) +#define PINMUX_GPIO141__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(141) | 6) + +#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0) +#define PINMUX_GPIO142__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(142) | 1) +#define PINMUX_GPIO142__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(142) | 2) + +#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0) +#define PINMUX_GPIO143__FUNC_AUD_DAT_MOSI2 (MTK_PIN_NO(143) | 1) +#define PINMUX_GPIO143__FUNC_DBG_MON_A9 (MTK_PIN_NO(143) | 7) + +#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0) +#define PINMUX_GPIO144__FUNC_AUD_NLE_MOSI1 (MTK_PIN_NO(144) | 1) +#define PINMUX_GPIO144__FUNC_AUD_CLK_MISO (MTK_PIN_NO(144) | 2) +#define PINMUX_GPIO144__FUNC_I2S2_MCK (MTK_PIN_NO(144) | 3) +#define PINMUX_GPIO144__FUNC_UDI_TCK (MTK_PIN_NO(144) | 5) +#define PINMUX_GPIO144__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(144) | 6) +#define PINMUX_GPIO144__FUNC_DBG_MON_A10 (MTK_PIN_NO(144) | 7) + +#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0) +#define PINMUX_GPIO145__FUNC_AUD_NLE_MOSI0 (MTK_PIN_NO(145) | 1) +#define PINMUX_GPIO145__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(145) | 2) +#define PINMUX_GPIO145__FUNC_I2S2_BCK (MTK_PIN_NO(145) | 3) +#define PINMUX_GPIO145__FUNC_UDI_TMS (MTK_PIN_NO(145) | 5) +#define PINMUX_GPIO145__FUNC_DBG_MON_A11 (MTK_PIN_NO(145) | 7) + +#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0) +#define PINMUX_GPIO146__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(146) | 1) +#define PINMUX_GPIO146__FUNC_I2S2_DI2 (MTK_PIN_NO(146) | 3) +#define PINMUX_GPIO146__FUNC_UDI_TDO (MTK_PIN_NO(146) | 5) +#define PINMUX_GPIO146__FUNC_DBG_MON_A14 (MTK_PIN_NO(146) | 7) + +#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0) +#define PINMUX_GPIO147__FUNC_ANT_SEL0 (MTK_PIN_NO(147) | 1) +#define PINMUX_GPIO147__FUNC_PWM_3 (MTK_PIN_NO(147) | 2) + +#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0) +#define PINMUX_GPIO148__FUNC_ANT_SEL1 (MTK_PIN_NO(148) | 1) +#define PINMUX_GPIO148__FUNC_SPI0_B_MI (MTK_PIN_NO(148) | 2) +#define PINMUX_GPIO148__FUNC_SSPM_URXD_AO (MTK_PIN_NO(148) | 3) +#define PINMUX_GPIO148__FUNC_TP_UCTS2_AO (MTK_PIN_NO(148) | 5) +#define PINMUX_GPIO148__FUNC_CLKM0 (MTK_PIN_NO(148) | 6) + +#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0) +#define PINMUX_GPIO149__FUNC_ANT_SEL2 (MTK_PIN_NO(149) | 1) +#define PINMUX_GPIO149__FUNC_SPI0_B_CSB (MTK_PIN_NO(149) | 2) +#define PINMUX_GPIO149__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(149) | 3) +#define PINMUX_GPIO149__FUNC_TP_URTS2_AO (MTK_PIN_NO(149) | 5) +#define PINMUX_GPIO149__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(149) | 6) + +#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0) +#define PINMUX_GPIO150__FUNC_ANT_SEL3 (MTK_PIN_NO(150) | 1) +#define PINMUX_GPIO150__FUNC_SPI0_B_MO (MTK_PIN_NO(150) | 2) +#define PINMUX_GPIO150__FUNC_UCTS1 (MTK_PIN_NO(150) | 3) +#define PINMUX_GPIO150__FUNC_TP_UCTS1_AO (MTK_PIN_NO(150) | 5) +#define PINMUX_GPIO150__FUNC_IDDIG (MTK_PIN_NO(150) | 6) +#define PINMUX_GPIO150__FUNC_SCL9 (MTK_PIN_NO(150) | 7) + +#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0) +#define PINMUX_GPIO151__FUNC_ANT_SEL4 (MTK_PIN_NO(151) | 1) +#define PINMUX_GPIO151__FUNC_SPI0_B_CLK (MTK_PIN_NO(151) | 2) +#define PINMUX_GPIO151__FUNC_URTS1 (MTK_PIN_NO(151) | 3) +#define PINMUX_GPIO151__FUNC_TP_URTS1_AO (MTK_PIN_NO(151) | 5) +#define PINMUX_GPIO151__FUNC_USB_DRVVBUS (MTK_PIN_NO(151) | 6) +#define PINMUX_GPIO151__FUNC_SDA9 (MTK_PIN_NO(151) | 7) + +#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0) +#define PINMUX_GPIO152__FUNC_ANT_SEL5 (MTK_PIN_NO(152) | 1) +#define PINMUX_GPIO152__FUNC_SPI1_B_MI (MTK_PIN_NO(152) | 2) +#define PINMUX_GPIO152__FUNC_CLKM3 (MTK_PIN_NO(152) | 3) +#define PINMUX_GPIO152__FUNC_TP_URXD1_AO (MTK_PIN_NO(152) | 5) +#define PINMUX_GPIO152__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(152) | 6) +#define PINMUX_GPIO152__FUNC_SCL8 (MTK_PIN_NO(152) | 7) + +#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0) +#define PINMUX_GPIO153__FUNC_ANT_SEL6 (MTK_PIN_NO(153) | 1) +#define PINMUX_GPIO153__FUNC_SPI1_B_CSB (MTK_PIN_NO(153) | 2) +#define PINMUX_GPIO153__FUNC_SRCLKENAI0 (MTK_PIN_NO(153) | 3) +#define PINMUX_GPIO153__FUNC_PWM_0 (MTK_PIN_NO(153) | 4) +#define PINMUX_GPIO153__FUNC_TP_UTXD1_AO (MTK_PIN_NO(153) | 5) +#define PINMUX_GPIO153__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(153) | 6) +#define PINMUX_GPIO153__FUNC_SDA8 (MTK_PIN_NO(153) | 7) + +#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0) +#define PINMUX_GPIO154__FUNC_ANT_SEL7 (MTK_PIN_NO(154) | 1) +#define PINMUX_GPIO154__FUNC_SPI1_B_MO (MTK_PIN_NO(154) | 2) +#define PINMUX_GPIO154__FUNC_SRCLKENAI1 (MTK_PIN_NO(154) | 3) +#define PINMUX_GPIO154__FUNC_TP_URXD2_AO (MTK_PIN_NO(154) | 5) +#define PINMUX_GPIO154__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(154) | 6) + +#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0) +#define PINMUX_GPIO155__FUNC_ANT_SEL8 (MTK_PIN_NO(155) | 1) +#define PINMUX_GPIO155__FUNC_SPI1_B_CLK (MTK_PIN_NO(155) | 2) +#define PINMUX_GPIO155__FUNC_MD_INT0 (MTK_PIN_NO(155) | 3) +#define PINMUX_GPIO155__FUNC_TP_UTXD2_AO (MTK_PIN_NO(155) | 5) +#define PINMUX_GPIO155__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(155) | 6) +#define PINMUX_GPIO155__FUNC_DBG_MON_A15 (MTK_PIN_NO(155) | 7) + +#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0) +#define PINMUX_GPIO156__FUNC_CONN_TOP_CLK (MTK_PIN_NO(156) | 1) +#define PINMUX_GPIO156__FUNC_AUXIF_CLK0 (MTK_PIN_NO(156) | 2) +#define PINMUX_GPIO156__FUNC_DBG_MON_A16 (MTK_PIN_NO(156) | 7) + +#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0) +#define PINMUX_GPIO157__FUNC_CONN_TOP_DATA (MTK_PIN_NO(157) | 1) +#define PINMUX_GPIO157__FUNC_AUXIF_ST0 (MTK_PIN_NO(157) | 2) +#define PINMUX_GPIO157__FUNC_DBG_MON_A17 (MTK_PIN_NO(157) | 7) + +#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0) +#define PINMUX_GPIO158__FUNC_CONN_HRST_B (MTK_PIN_NO(158) | 1) +#define PINMUX_GPIO158__FUNC_DBG_MON_A18 (MTK_PIN_NO(158) | 7) + +#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0) +#define PINMUX_GPIO159__FUNC_CONN_WB_PTA (MTK_PIN_NO(159) | 1) +#define PINMUX_GPIO159__FUNC_DBG_MON_A19 (MTK_PIN_NO(159) | 7) + +#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0) +#define PINMUX_GPIO160__FUNC_CONN_BT_CLK (MTK_PIN_NO(160) | 1) +#define PINMUX_GPIO160__FUNC_AUXIF_CLK1 (MTK_PIN_NO(160) | 2) +#define PINMUX_GPIO160__FUNC_DBG_MON_A20 (MTK_PIN_NO(160) | 7) + +#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0) +#define PINMUX_GPIO161__FUNC_CONN_BT_DATA (MTK_PIN_NO(161) | 1) +#define PINMUX_GPIO161__FUNC_AUXIF_ST1 (MTK_PIN_NO(161) | 2) +#define PINMUX_GPIO161__FUNC_DBG_MON_A21 (MTK_PIN_NO(161) | 7) + +#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0) +#define PINMUX_GPIO162__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(162) | 1) +#define PINMUX_GPIO162__FUNC_DBG_MON_A22 (MTK_PIN_NO(162) | 7) + +#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0) +#define PINMUX_GPIO163__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(163) | 1) +#define PINMUX_GPIO163__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(163) | 2) +#define PINMUX_GPIO163__FUNC_DBG_MON_A23 (MTK_PIN_NO(163) | 7) + +#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0) +#define PINMUX_GPIO164__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(164) | 1) +#define PINMUX_GPIO164__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(164) | 2) +#define PINMUX_GPIO164__FUNC_DBG_MON_A24 (MTK_PIN_NO(164) | 7) + +#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0) +#define PINMUX_GPIO165__FUNC_CONN_WF_CTRL3 (MTK_PIN_NO(165) | 1) +#define PINMUX_GPIO165__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(165) | 2) +#define PINMUX_GPIO165__FUNC_DBG_MON_A25 (MTK_PIN_NO(165) | 7) + +#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0) +#define PINMUX_GPIO166__FUNC_CONN_WF_CTRL4 (MTK_PIN_NO(166) | 1) +#define PINMUX_GPIO166__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(166) | 2) +#define PINMUX_GPIO166__FUNC_DBG_MON_A26 (MTK_PIN_NO(166) | 7) + +#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0) +#define PINMUX_GPIO167__FUNC_MSDC0_CMD (MTK_PIN_NO(167) | 1) + +#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0) +#define PINMUX_GPIO168__FUNC_MSDC0_DAT0 (MTK_PIN_NO(168) | 1) + +#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0) +#define PINMUX_GPIO169__FUNC_MSDC0_DAT2 (MTK_PIN_NO(169) | 1) + +#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0) +#define PINMUX_GPIO170__FUNC_MSDC0_DAT4 (MTK_PIN_NO(170) | 1) + +#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0) +#define PINMUX_GPIO171__FUNC_MSDC0_DAT6 (MTK_PIN_NO(171) | 1) + +#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0) +#define PINMUX_GPIO172__FUNC_MSDC0_DAT1 (MTK_PIN_NO(172) | 1) + +#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0) +#define PINMUX_GPIO173__FUNC_MSDC0_DAT5 (MTK_PIN_NO(173) | 1) + +#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0) +#define PINMUX_GPIO174__FUNC_MSDC0_DAT7 (MTK_PIN_NO(174) | 1) + +#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0) +#define PINMUX_GPIO175__FUNC_MSDC0_DSL (MTK_PIN_NO(175) | 1) +#define PINMUX_GPIO175__FUNC_ANT_SEL9 (MTK_PIN_NO(175) | 2) + +#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0) +#define PINMUX_GPIO176__FUNC_MSDC0_CLK (MTK_PIN_NO(176) | 1) +#define PINMUX_GPIO176__FUNC_ANT_SEL10 (MTK_PIN_NO(176) | 2) + +#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0) +#define PINMUX_GPIO177__FUNC_MSDC0_DAT3 (MTK_PIN_NO(177) | 1) + +#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0) +#define PINMUX_GPIO178__FUNC_MSDC0_RSTB (MTK_PIN_NO(178) | 1) + +#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0) +#define PINMUX_GPIO179__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(179) | 1) + +#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0) +#define PINMUX_GPIO180__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(180) | 1) + +#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0) +#define PINMUX_GPIO181__FUNC_SRCLKENA0 (MTK_PIN_NO(181) | 1) + +#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0) +#define PINMUX_GPIO182__FUNC_SRCLKENA1 (MTK_PIN_NO(182) | 1) + +#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0) +#define PINMUX_GPIO183__FUNC_WATCHDOG (MTK_PIN_NO(183) | 1) + +#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0) +#define PINMUX_GPIO184__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(184) | 1) +#define PINMUX_GPIO184__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(184) | 2) + +#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0) +#define PINMUX_GPIO185__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(185) | 1) + +#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0) +#define PINMUX_GPIO186__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(186) | 1) +#define PINMUX_GPIO186__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(186) | 2) + +#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0) +#define PINMUX_GPIO187__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(187) | 1) + +#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0) +#define PINMUX_GPIO188__FUNC_RTC32K_CK (MTK_PIN_NO(188) | 1) + +#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0) +#define PINMUX_GPIO189__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(189) | 1) +#define PINMUX_GPIO189__FUNC_I2S1_MCK (MTK_PIN_NO(189) | 3) +#define PINMUX_GPIO189__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(189) | 6) + +#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0) +#define PINMUX_GPIO190__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(190) | 1) +#define PINMUX_GPIO190__FUNC_I2S1_BCK (MTK_PIN_NO(190) | 3) +#define PINMUX_GPIO190__FUNC_DBG_MON_A6 (MTK_PIN_NO(190) | 7) + +#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0) +#define PINMUX_GPIO191__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(191) | 1) +#define PINMUX_GPIO191__FUNC_I2S1_LRCK (MTK_PIN_NO(191) | 3) +#define PINMUX_GPIO191__FUNC_DBG_MON_A7 (MTK_PIN_NO(191) | 7) + +#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0) +#define PINMUX_GPIO192__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(192) | 1) +#define PINMUX_GPIO192__FUNC_I2S1_DO (MTK_PIN_NO(192) | 3) +#define PINMUX_GPIO192__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(192) | 6) +#define PINMUX_GPIO192__FUNC_DBG_MON_A8 (MTK_PIN_NO(192) | 7) + +#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0) +#define PINMUX_GPIO193__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(193) | 1) +#define PINMUX_GPIO193__FUNC_VOW_DAT_MISO (MTK_PIN_NO(193) | 2) +#define PINMUX_GPIO193__FUNC_I2S2_LRCK (MTK_PIN_NO(193) | 3) +#define PINMUX_GPIO193__FUNC_UDI_TDI (MTK_PIN_NO(193) | 5) +#define PINMUX_GPIO193__FUNC_DBG_MON_A12 (MTK_PIN_NO(193) | 7) + +#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0) +#define PINMUX_GPIO194__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(194) | 1) +#define PINMUX_GPIO194__FUNC_VOW_CLK_MISO (MTK_PIN_NO(194) | 2) +#define PINMUX_GPIO194__FUNC_I2S2_DI (MTK_PIN_NO(194) | 3) +#define PINMUX_GPIO194__FUNC_UDI_NTRST (MTK_PIN_NO(194) | 5) +#define PINMUX_GPIO194__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(194) | 6) +#define PINMUX_GPIO194__FUNC_DBG_MON_A13 (MTK_PIN_NO(194) | 7) + +#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0) +#define PINMUX_GPIO195__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(195) | 3) +#define PINMUX_GPIO195__FUNC_VPU_UDI_TCK (MTK_PIN_NO(195) | 4) +#define PINMUX_GPIO195__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(195) | 5) +#define PINMUX_GPIO195__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(195) | 6) + +#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0) +#define PINMUX_GPIO196__FUNC_CMMCLK4 (MTK_PIN_NO(196) | 1) +#define PINMUX_GPIO196__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(196) | 3) +#define PINMUX_GPIO196__FUNC_VPU_UDI_TDI (MTK_PIN_NO(196) | 4) +#define PINMUX_GPIO196__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(196) | 5) +#define PINMUX_GPIO196__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(196) | 6) + +#define PINMUX_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0) +#define PINMUX_GPIO197__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(197) | 3) +#define PINMUX_GPIO197__FUNC_VPU_UDI_TDO (MTK_PIN_NO(197) | 4) +#define PINMUX_GPIO197__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(197) | 5) +#define PINMUX_GPIO197__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(197) | 6) + +#define PINMUX_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0) +#define PINMUX_GPIO198__FUNC_SCL7 (MTK_PIN_NO(198) | 1) + +#define PINMUX_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0) +#define PINMUX_GPIO199__FUNC_SDA7 (MTK_PIN_NO(199) | 1) + +#define PINMUX_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0) +#define PINMUX_GPIO200__FUNC_URXD1 (MTK_PIN_NO(200) | 1) +#define PINMUX_GPIO200__FUNC_ADSP_URXD0 (MTK_PIN_NO(200) | 2) +#define PINMUX_GPIO200__FUNC_TP_URXD1_AO (MTK_PIN_NO(200) | 3) +#define PINMUX_GPIO200__FUNC_SSPM_URXD_AO (MTK_PIN_NO(200) | 4) +#define PINMUX_GPIO200__FUNC_TP_URXD2_AO (MTK_PIN_NO(200) | 5) +#define PINMUX_GPIO200__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(200) | 6) + +#define PINMUX_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0) +#define PINMUX_GPIO201__FUNC_UTXD1 (MTK_PIN_NO(201) | 1) +#define PINMUX_GPIO201__FUNC_ADSP_UTXD0 (MTK_PIN_NO(201) | 2) +#define PINMUX_GPIO201__FUNC_TP_UTXD1_AO (MTK_PIN_NO(201) | 3) +#define PINMUX_GPIO201__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(201) | 4) +#define PINMUX_GPIO201__FUNC_TP_UTXD2_AO (MTK_PIN_NO(201) | 5) +#define PINMUX_GPIO201__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(201) | 6) + +#define PINMUX_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0) +#define PINMUX_GPIO202__FUNC_PWM_3 (MTK_PIN_NO(202) | 1) +#define PINMUX_GPIO202__FUNC_CLKM3 (MTK_PIN_NO(202) | 2) + +#define PINMUX_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0) + +#define PINMUX_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0) + +#define PINMUX_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0) + +#define PINMUX_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0) + +#define PINMUX_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0) + +#define PINMUX_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0) + +#define PINMUX_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0) + +#endif /* __MT6779-PINFUNC_H */ -- cgit v1.2.3 From a3b7a581823857fab4fae71be9d1c830af78a766 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Fri, 17 Jul 2020 18:47:58 +0300 Subject: bus: fsl-mc: add missing device types The MC bus has different types of devices that can be discovered on the bus. Add the missing device types. Signed-off-by: Ioana Ciornei Reviewed-by: Laurentiu Tudor Link: https://lore.kernel.org/r/20200717154800.17169-2-ioana.ciornei@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/bus/fsl-mc/fsl-mc-bus.c | 30 ++++++++++++++++++++++++++++++ include/linux/fsl/mc.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) (limited to 'include') diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 40526da5c6a6..ee1873438e97 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -201,6 +201,31 @@ struct device_type fsl_mc_bus_dpseci_type = { }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type); +struct device_type fsl_mc_bus_dpdmux_type = { + .name = "fsl_mc_bus_dpdmux" +}; +EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type); + +struct device_type fsl_mc_bus_dpdcei_type = { + .name = "fsl_mc_bus_dpdcei" +}; +EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type); + +struct device_type fsl_mc_bus_dpaiop_type = { + .name = "fsl_mc_bus_dpaiop" +}; +EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type); + +struct device_type fsl_mc_bus_dpci_type = { + .name = "fsl_mc_bus_dpci" +}; +EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type); + +struct device_type fsl_mc_bus_dpdmai_type = { + .name = "fsl_mc_bus_dpdmai" +}; +EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type); + static struct device_type *fsl_mc_get_device_type(const char *type) { static const struct { @@ -217,6 +242,11 @@ static struct device_type *fsl_mc_get_device_type(const char *type) { &fsl_mc_bus_dpmac_type, "dpmac" }, { &fsl_mc_bus_dprtc_type, "dprtc" }, { &fsl_mc_bus_dpseci_type, "dpseci" }, + { &fsl_mc_bus_dpdmux_type, "dpdmux" }, + { &fsl_mc_bus_dpdcei_type, "dpdcei" }, + { &fsl_mc_bus_dpaiop_type, "dpaiop" }, + { &fsl_mc_bus_dpci_type, "dpci" }, + { &fsl_mc_bus_dpdmai_type, "dpdmai" }, { NULL, NULL } }; int i; diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 2b5f8366dbe1..cdb03aca2aef 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -433,6 +433,11 @@ extern struct device_type fsl_mc_bus_dpmcp_type; extern struct device_type fsl_mc_bus_dpmac_type; extern struct device_type fsl_mc_bus_dprtc_type; extern struct device_type fsl_mc_bus_dpseci_type; +extern struct device_type fsl_mc_bus_dpdmux_type; +extern struct device_type fsl_mc_bus_dpdcei_type; +extern struct device_type fsl_mc_bus_dpaiop_type; +extern struct device_type fsl_mc_bus_dpci_type; +extern struct device_type fsl_mc_bus_dpdmai_type; static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev) { @@ -454,6 +459,11 @@ static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev) return mc_dev->dev.type == &fsl_mc_bus_dpsw_type; } +static inline bool is_fsl_mc_bus_dpdmux(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpdmux_type; +} + static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpbp_type; @@ -484,6 +494,26 @@ static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev) return mc_dev->dev.type == &fsl_mc_bus_dpseci_type; } +static inline bool is_fsl_mc_bus_dpdcei(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpdcei_type; +} + +static inline bool is_fsl_mc_bus_dpaiop(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpaiop_type; +} + +static inline bool is_fsl_mc_bus_dpci(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpci_type; +} + +static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type; +} + /* * Data Path Buffer Pool (DPBP) API * Contains initialization APIs and runtime control APIs for DPBP -- cgit v1.2.3 From 9a872def598195d2de4a4a74e17804142e2aa78e Mon Sep 17 00:00:00 2001 From: Laurentiu Tudor Date: Fri, 17 Jul 2020 18:47:59 +0300 Subject: bus: fsl-mc: use raw spin lock to serialize mc cmds Replace the spinlock that serializes the MC commands with a raw spinlock. This is needed for the RT kernel because there are MC commands sent in interrupt context. Signed-off-by: Laurentiu Tudor Signed-off-by: Ioana Ciornei Reviewed-by: Laurentiu Tudor Link: https://lore.kernel.org/r/20200717154800.17169-3-ioana.ciornei@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/bus/fsl-mc/mc-io.c | 2 +- drivers/bus/fsl-mc/mc-sys.c | 4 ++-- include/linux/fsl/mc.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c index 6ae48ad80409..a30b53f1d87d 100644 --- a/drivers/bus/fsl-mc/mc-io.c +++ b/drivers/bus/fsl-mc/mc-io.c @@ -82,7 +82,7 @@ int __must_check fsl_create_mc_io(struct device *dev, mc_io->portal_phys_addr = mc_portal_phys_addr; mc_io->portal_size = mc_portal_size; if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) - spin_lock_init(&mc_io->spinlock); + raw_spin_lock_init(&mc_io->spinlock); else mutex_init(&mc_io->mutex); diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c index 3221a7fbaf0a..85a0225db522 100644 --- a/drivers/bus/fsl-mc/mc-sys.c +++ b/drivers/bus/fsl-mc/mc-sys.c @@ -251,7 +251,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd) return -EINVAL; if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) - spin_lock_irqsave(&mc_io->spinlock, irq_flags); + raw_spin_lock_irqsave(&mc_io->spinlock, irq_flags); else mutex_lock(&mc_io->mutex); @@ -287,7 +287,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd) error = 0; common_exit: if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) - spin_unlock_irqrestore(&mc_io->spinlock, irq_flags); + raw_spin_unlock_irqrestore(&mc_io->spinlock, irq_flags); else mutex_unlock(&mc_io->mutex); diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index cdb03aca2aef..a428c61ead6e 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -339,7 +339,7 @@ struct fsl_mc_io { * This field is only meaningful if the * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set */ - spinlock_t spinlock; /* serializes mc_send_command() */ + raw_spinlock_t spinlock; /* serializes mc_send_command() */ }; }; -- cgit v1.2.3 From ac756d05c468e535380c7b4b102105793c5d095e Mon Sep 17 00:00:00 2001 From: "周琰杰 (Zhou Yanjie)" Date: Thu, 23 Jul 2020 01:18:03 +0800 Subject: dt-bindings: timer: Add Ingenic X1000 OST bindings. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the OST bindings for the X1000 SoC from Ingenic. Tested-by: 周正 (Zhou Zheng) Signed-off-by: 周琰杰 (Zhou Yanjie) Reviewed-by: Paul Cercueil Reviewed-by: Rob Herring Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200722171804.97559-2-zhouyanjie@wanyeetech.com --- .../devicetree/bindings/timer/ingenic,sysost.yaml | 63 ++++++++++++++++++++++ include/dt-bindings/clock/ingenic,sysost.h | 12 +++++ 2 files changed, 75 insertions(+) create mode 100644 Documentation/devicetree/bindings/timer/ingenic,sysost.yaml create mode 100644 include/dt-bindings/clock/ingenic,sysost.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/timer/ingenic,sysost.yaml b/Documentation/devicetree/bindings/timer/ingenic,sysost.yaml new file mode 100644 index 000000000000..df3eb76045e0 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/ingenic,sysost.yaml @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/timer/ingenic,sysost.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Bindings for SYSOST in Ingenic XBurst family SoCs + +maintainers: + - 周琰杰 (Zhou Yanjie) + +description: + The SYSOST in an Ingenic SoC provides one 64bit timer for clocksource + and one or more 32bit timers for clockevent. + +properties: + "#clock-cells": + const: 1 + + compatible: + enum: + - ingenic,x1000-ost + - ingenic,x2000-ost + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + const: ost + + interrupts: + maxItems: 1 + +required: + - "#clock-cells" + - compatible + - reg + - clocks + - clock-names + - interrupts + +additionalProperties: false + +examples: + - | + #include + + ost: timer@12000000 { + compatible = "ingenic,x1000-ost"; + reg = <0x12000000 0x3c>; + + #clock-cells = <1>; + + clocks = <&cgu X1000_CLK_OST>; + clock-names = "ost"; + + interrupt-parent = <&cpuintc>; + interrupts = <3>; + }; +... diff --git a/include/dt-bindings/clock/ingenic,sysost.h b/include/dt-bindings/clock/ingenic,sysost.h new file mode 100644 index 000000000000..9ac88e90babf --- /dev/null +++ b/include/dt-bindings/clock/ingenic,sysost.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,tcu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_INGENIC_OST_H__ +#define __DT_BINDINGS_CLOCK_INGENIC_OST_H__ + +#define OST_CLK_PERCPU_TIMER 0 +#define OST_CLK_GLOBAL_TIMER 1 + +#endif /* __DT_BINDINGS_CLOCK_INGENIC_OST_H__ */ -- cgit v1.2.3 From 7deff7b5b4395784b194bae3631b8333d3423938 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:28:41 -0700 Subject: hyperv: hyperv.h: drop a duplicated word Drop the repeated word "the" in a comment. Signed-off-by: Randy Dunlap Cc: "K. Y. Srinivasan" Cc: Haiyang Zhang Cc: Stephen Hemminger Cc: Wei Liu Cc: linux-hyperv@vger.kernel.org Link: https://lore.kernel.org/r/20200719002841.20369-1-rdunlap@infradead.org Signed-off-by: Wei Liu --- include/uapi/linux/hyperv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h index 8f24404ad04f..6135d92e0d47 100644 --- a/include/uapi/linux/hyperv.h +++ b/include/uapi/linux/hyperv.h @@ -219,7 +219,7 @@ struct hv_do_fcopy { * kernel and user-level daemon communicate using a connector channel. * * The user mode component first registers with the - * the kernel component. Subsequently, the kernel component requests, data + * kernel component. Subsequently, the kernel component requests, data * for the specified keys. In response to this message the user mode component * fills in the value corresponding to the specified key. We overload the * sequence field in the cn_msg header to define our KVP message types. -- cgit v1.2.3 From 0ceef681e34a61afcd77af0837b2132925c5bad4 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 20 Jul 2020 10:17:39 +0900 Subject: ASoC: soc-xxx: add asoc_substream_to_rtd() Current soc-xxx are getting rtd from substream by rtd = substream->private_data; But, getting data from "private_data" is very unclear. This patch adds asoc_substream_to_rtd() macro which is easy to understand that rtd from substream. Signed-off-by: Kuninori Morimoto Link: https://lore.kernel.org/r/87wo2z0yve.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Mark Brown --- include/sound/soc.h | 2 ++ sound/soc/soc-component.c | 20 +++++------ sound/soc/soc-dai.c | 8 ++--- sound/soc/soc-dapm.c | 6 ++-- sound/soc/soc-generic-dmaengine-pcm.c | 4 +-- sound/soc/soc-link.c | 12 +++---- sound/soc/soc-pcm.c | 62 +++++++++++++++++------------------ sound/soc/soc-utils.c | 2 +- 8 files changed, 59 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/include/sound/soc.h b/include/sound/soc.h index 59235e553630..acbb5efb28ef 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1168,6 +1168,8 @@ struct snd_soc_pcm_runtime { /* see soc_new_pcm_runtime() */ #define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n] #define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->num_cpus] +#define asoc_substream_to_rtd(substream) \ + (struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream) #define for_each_rtd_components(rtd, i, component) \ for ((i) = 0, component = NULL; \ diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index 9565a0dd7cb6..c1b799f98460 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -606,7 +606,7 @@ EXPORT_SYMBOL_GPL(snd_soc_component_test_bits); int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i; @@ -621,7 +621,7 @@ int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream) int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i; @@ -638,7 +638,7 @@ int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream, int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret; @@ -658,7 +658,7 @@ int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream, int channel, unsigned long pos, void __user *buf, unsigned long bytes) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i; @@ -677,7 +677,7 @@ int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream, struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream, unsigned long offset) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; struct page *page; int i; @@ -698,7 +698,7 @@ struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream, int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i; @@ -745,7 +745,7 @@ void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd) int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret; @@ -764,7 +764,7 @@ int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_component **last) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret; @@ -786,7 +786,7 @@ int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, struct snd_soc_component *last) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret; @@ -805,7 +805,7 @@ void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, int snd_soc_pcm_component_trigger(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret; diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c index 98f0c98b06bb..693893420bf0 100644 --- a/sound/soc/soc-dai.c +++ b/sound/soc/soc-dai.c @@ -316,7 +316,7 @@ int snd_soc_dai_hw_params(struct snd_soc_dai *dai, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; /* perform any topology hw_params fixups before DAI */ @@ -516,7 +516,7 @@ int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd) int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; int i, ret; @@ -535,7 +535,7 @@ int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream) int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; int i, ret; @@ -554,7 +554,7 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; int i, ret; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index e51aa2efc65c..5076299abf37 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -2673,7 +2673,7 @@ int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; mutex_lock_nested(&rtd->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); @@ -3795,7 +3795,7 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w, { struct snd_soc_dapm_path *path; struct snd_soc_dai *source, *sink; - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_pcm_hw_params *params = NULL; const struct snd_soc_pcm_stream *config = NULL; struct snd_pcm_runtime *runtime = NULL; @@ -4117,7 +4117,7 @@ snd_soc_dapm_new_dai(struct snd_soc_card *card, struct snd_pcm_substream *substream, char *id) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dapm_widget template; struct snd_soc_dapm_widget *w; const char **w_param_text; diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index 61844403f181..d17b4bf1dbe3 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -46,7 +46,7 @@ static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm, int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_dmaengine_dai_dma_data *dma_data; int ret; @@ -105,7 +105,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component, struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct dmaengine_pcm *pcm = soc_component_to_pcm(component); struct device *dma_dev = dmaengine_dma_dev(pcm, substream); struct dma_chan *chan = pcm->chan[substream->stream]; diff --git a/sound/soc/soc-link.c b/sound/soc/soc-link.c index 1c3bf2118718..cec70b19863e 100644 --- a/sound/soc/soc-link.c +++ b/sound/soc/soc-link.c @@ -59,7 +59,7 @@ int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, int snd_soc_link_startup(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; if (rtd->dai_link->ops && @@ -71,7 +71,7 @@ int snd_soc_link_startup(struct snd_pcm_substream *substream) void snd_soc_link_shutdown(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); if (rtd->dai_link->ops && rtd->dai_link->ops->shutdown) @@ -80,7 +80,7 @@ void snd_soc_link_shutdown(struct snd_pcm_substream *substream) int snd_soc_link_prepare(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; if (rtd->dai_link->ops && @@ -93,7 +93,7 @@ int snd_soc_link_prepare(struct snd_pcm_substream *substream) int snd_soc_link_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; if (rtd->dai_link->ops && @@ -105,7 +105,7 @@ int snd_soc_link_hw_params(struct snd_pcm_substream *substream, void snd_soc_link_hw_free(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); if (rtd->dai_link->ops && rtd->dai_link->ops->hw_free) @@ -114,7 +114,7 @@ void snd_soc_link_hw_free(struct snd_pcm_substream *substream) int snd_soc_link_trigger(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; if (rtd->dai_link->ops && diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index f2c7c85ad40c..10f703986be3 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -304,7 +304,7 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir, static int soc_pcm_apply_symmetry(struct snd_pcm_substream *substream, struct snd_soc_dai *soc_dai) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; if (soc_dai->rate && (soc_dai->driver->symmetric_rates || @@ -361,7 +361,7 @@ static int soc_pcm_apply_symmetry(struct snd_pcm_substream *substream, static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; struct snd_soc_dai *cpu_dai; unsigned int rate, channels, sample_bits, symmetry, i; @@ -423,7 +423,7 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream, static bool soc_pcm_has_symmetry(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai_link *link = rtd->dai_link; struct snd_soc_dai *dai; unsigned int symmetry, i; @@ -443,7 +443,7 @@ static bool soc_pcm_has_symmetry(struct snd_pcm_substream *substream) static void soc_pcm_set_msb(struct snd_pcm_substream *substream, int bits) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; if (!bits) @@ -457,7 +457,7 @@ static void soc_pcm_set_msb(struct snd_pcm_substream *substream, int bits) static void soc_pcm_apply_msb(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai; struct snd_soc_dai *codec_dai; struct snd_soc_pcm_stream *pcm_codec, *pcm_cpu; @@ -592,7 +592,7 @@ EXPORT_SYMBOL_GPL(snd_soc_runtime_calc_hw); static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream) { struct snd_pcm_hardware *hw = &substream->runtime->hw; - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); u64 formats = hw->formats; /* @@ -608,7 +608,7 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream) static int soc_pcm_components_open(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *last = NULL; struct snd_soc_component *component; int i, ret = 0; @@ -650,7 +650,7 @@ static int soc_pcm_components_open(struct snd_pcm_substream *substream) static int soc_pcm_components_close(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, r, ret = 0; @@ -672,7 +672,7 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream) */ static int soc_pcm_close(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; struct snd_soc_dai *dai; int i; @@ -711,7 +711,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream) */ static int soc_pcm_open(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_component *component; struct snd_soc_dai *dai; @@ -850,7 +850,7 @@ static void codec2codec_close_delayed_work(struct snd_soc_pcm_runtime *rtd) */ static int soc_pcm_prepare(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; int i, ret = 0; @@ -907,7 +907,7 @@ static void soc_pcm_codec_params_fixup(struct snd_pcm_hw_params *params, static int soc_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; struct snd_soc_dai *cpu_dai; struct snd_soc_dai *codec_dai; @@ -1035,7 +1035,7 @@ codec_err: */ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; int i; @@ -1116,7 +1116,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) */ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai; struct snd_soc_dai *codec_dai; struct snd_pcm_runtime *runtime = substream->runtime; @@ -1594,7 +1594,7 @@ static void dpcm_init_runtime_hw(struct snd_pcm_runtime *runtime, static void dpcm_runtime_merge_format(struct snd_pcm_substream *substream, u64 *formats) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); struct snd_soc_dpcm *dpcm; struct snd_soc_dai *dai; int stream = substream->stream; @@ -1631,7 +1631,7 @@ static void dpcm_runtime_merge_chan(struct snd_pcm_substream *substream, unsigned int *channels_min, unsigned int *channels_max) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); struct snd_soc_dpcm *dpcm; int stream = substream->stream; @@ -1686,7 +1686,7 @@ static void dpcm_runtime_merge_rate(struct snd_pcm_substream *substream, unsigned int *rate_min, unsigned int *rate_max) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); struct snd_soc_dpcm *dpcm; int stream = substream->stream; @@ -1724,7 +1724,7 @@ static void dpcm_runtime_merge_rate(struct snd_pcm_substream *substream, static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai; int i; @@ -1775,7 +1775,7 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream, int stream) { struct snd_soc_dpcm *dpcm; - struct snd_soc_pcm_runtime *fe = fe_substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); struct snd_soc_dai *fe_cpu_dai; int err; int i; @@ -1806,7 +1806,7 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream, if (!be_substream) continue; - rtd = be_substream->private_data; + rtd = asoc_substream_to_rtd(be_substream); if (rtd->dai_link->be_hw_params_fixup) continue; @@ -1828,7 +1828,7 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream, static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) { - struct snd_soc_pcm_runtime *fe = fe_substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); struct snd_pcm_runtime *runtime = fe_substream->runtime; int stream = fe_substream->stream, ret = 0; @@ -1909,7 +1909,7 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream) static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int stream = substream->stream; dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); @@ -1975,7 +1975,7 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream) static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int err, stream = substream->stream; mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); @@ -2080,7 +2080,7 @@ unwind: static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int ret, stream = substream->stream; mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); @@ -2226,7 +2226,7 @@ EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger); static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream, int cmd, bool fe_first) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int ret; /* call trigger on the frontend before the backend. */ @@ -2257,7 +2257,7 @@ static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream, static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int stream = substream->stream; int ret = 0; enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream]; @@ -2342,7 +2342,7 @@ out: static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int stream = substream->stream; /* if FE's runtime_update is already set, we're in race; @@ -2395,7 +2395,7 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream) static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *fe = substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int stream = substream->stream, ret = 0; mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); @@ -2662,7 +2662,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dpcm_runtime_update); static void dpcm_fe_dai_cleanup(struct snd_pcm_substream *fe_substream) { - struct snd_soc_pcm_runtime *fe = fe_substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); struct snd_soc_dpcm *dpcm; int stream = fe_substream->stream; @@ -2677,7 +2677,7 @@ static void dpcm_fe_dai_cleanup(struct snd_pcm_substream *fe_substream) static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream) { - struct snd_soc_pcm_runtime *fe = fe_substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); int ret; mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); @@ -2691,7 +2691,7 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream) static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream) { - struct snd_soc_pcm_runtime *fe = fe_substream->private_data; + struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); struct snd_soc_dapm_widget_list *list; int ret; int stream = fe_substream->stream; diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c index 364b2483bdee..f27f94ca064b 100644 --- a/sound/soc/soc-utils.c +++ b/sound/soc/soc-utils.c @@ -66,7 +66,7 @@ static const struct snd_pcm_hardware dummy_dma_hardware = { static int dummy_dma_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); /* BE's dont need dummy params */ if (!rtd->dai_link->no_pcm) -- cgit v1.2.3 From 80d7f913de00383722315c3aa9a246ad7333e6cc Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:28:16 -0700 Subject: dt-bindings: mux: mux.h: drop a duplicated word Drop the repeated word "the" in a comment. Cc: Peter Rosin Cc: Rob Herring Cc: devicetree@vger.kernel.org Signed-off-by: Randy Dunlap Link: https://lore.kernel.org/r/20200719002816.20263-1-rdunlap@infradead.org Signed-off-by: Rob Herring --- include/dt-bindings/mux/mux.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/dt-bindings/mux/mux.h b/include/dt-bindings/mux/mux.h index 042719218dbf..0b9d654506ef 100644 --- a/include/dt-bindings/mux/mux.h +++ b/include/dt-bindings/mux/mux.h @@ -3,7 +3,7 @@ * This header provides constants for most Multiplexer bindings. * * Most Multiplexer bindings specify an idle state. In most cases, the - * the multiplexer can be left as is when idle, and in some cases it can + * multiplexer can be left as is when idle, and in some cases it can * disconnect the input/output and leave the multiplexer in a high * impedance state. */ -- cgit v1.2.3 From 5df5661a1387e829c901d009cdd1fccc376cdb74 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Thu, 23 Jul 2020 01:43:12 +0300 Subject: net: dsa: stop overriding master's ndo_get_phys_port_name The purpose of this override is to give the user an indication of what the number of the CPU port is (in DSA, the CPU port is a hardware implementation detail and not a network interface capable of traffic). However, it has always failed (by design) at providing this information to the user in a reliable fashion. Prior to commit 3369afba1e46 ("net: Call into DSA netdevice_ops wrappers"), the behavior was to only override this callback if it was not provided by the DSA master. That was its first failure: if the DSA master itself was a DSA port or a switchdev, then the user would not see the number of the CPU port in /sys/class/net/eth0/phys_port_name, but the number of the DSA master port within its respective physical switch. But that was actually ok in a way. The commit mentioned above changed that behavior, and now overrides the master's ndo_get_phys_port_name unconditionally. That comes with problems of its own, which are worse in a way. The idea is that it's typical for switchdev users to have udev rules for consistent interface naming. These are based, among other things, on the phys_port_name attribute. If we let the DSA switch at the bottom to start randomly overriding ndo_get_phys_port_name with its own CPU port, we basically lose any predictability in interface naming, or even uniqueness, for that matter. So, there are reasons to let DSA override the master's callback (to provide a consistent interface, a number which has a clear meaning and must not be interpreted according to context), and there are reasons to not let DSA override it (it breaks udev matching for the DSA master). But, there is an alternative method for users to retrieve the number of the CPU port of each DSA switch in the system: $ devlink port pci/0000:00:00.5/0: type eth netdev swp0 flavour physical port 0 pci/0000:00:00.5/2: type eth netdev swp2 flavour physical port 2 pci/0000:00:00.5/4: type notset flavour cpu port 4 spi/spi2.0/0: type eth netdev sw0p0 flavour physical port 0 spi/spi2.0/1: type eth netdev sw0p1 flavour physical port 1 spi/spi2.0/2: type eth netdev sw0p2 flavour physical port 2 spi/spi2.0/4: type notset flavour cpu port 4 spi/spi2.1/0: type eth netdev sw1p0 flavour physical port 0 spi/spi2.1/1: type eth netdev sw1p1 flavour physical port 1 spi/spi2.1/2: type eth netdev sw1p2 flavour physical port 2 spi/spi2.1/3: type eth netdev sw1p3 flavour physical port 3 spi/spi2.1/4: type notset flavour cpu port 4 So remove this duplicated, unreliable and troublesome method. From this patch on, the phys_port_name attribute of the DSA master will only contain information about itself (if at all). If the users need reliable information about the CPU port they're probably using devlink anyway. Signed-off-by: Vladimir Oltean Acked-by: florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 23 ----------------------- net/core/dev.c | 5 ----- net/dsa/master.c | 12 ------------ 3 files changed, 40 deletions(-) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index f1b63d06d132..75c8fac82017 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -94,8 +94,6 @@ struct dsa_device_ops { struct dsa_netdevice_ops { int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); - int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, - size_t len); }; #define DSA_TAG_DRIVER_ALIAS "dsa_tag-" @@ -719,33 +717,12 @@ static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, return ops->ndo_do_ioctl(dev, ifr, cmd); } - -static inline int dsa_ndo_get_phys_port_name(struct net_device *dev, - char *name, size_t len) -{ - const struct dsa_netdevice_ops *ops; - int err; - - err = __dsa_netdevice_ops_check(dev); - if (err) - return err; - - ops = dev->dsa_ptr->netdev_ops; - - return ops->ndo_get_phys_port_name(dev, name, len); -} #else static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { return -EOPNOTSUPP; } - -static inline int dsa_ndo_get_phys_port_name(struct net_device *dev, - char *name, size_t len) -{ - return -EOPNOTSUPP; -} #endif void dsa_unregister_switch(struct dsa_switch *ds); diff --git a/net/core/dev.c b/net/core/dev.c index 316349f6cea5..a986b07ea845 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -98,7 +98,6 @@ #include #include #include -#include #include #include #include @@ -8605,10 +8604,6 @@ int dev_get_phys_port_name(struct net_device *dev, const struct net_device_ops *ops = dev->netdev_ops; int err; - err = dsa_ndo_get_phys_port_name(dev, name, len); - if (err == 0 || err != -EOPNOTSUPP) - return err; - if (ops->ndo_get_phys_port_name) { err = ops->ndo_get_phys_port_name(dev, name, len); if (err != -EOPNOTSUPP) diff --git a/net/dsa/master.c b/net/dsa/master.c index 0a90911ae31b..61615ebc70e9 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -186,17 +186,6 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset, } } -static int dsa_master_get_phys_port_name(struct net_device *dev, - char *name, size_t len) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - - if (snprintf(name, len, "p%d", cpu_dp->index) >= len) - return -EINVAL; - - return 0; -} - static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct dsa_port *cpu_dp = dev->dsa_ptr; @@ -228,7 +217,6 @@ static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static const struct dsa_netdevice_ops dsa_netdev_ops = { .ndo_do_ioctl = dsa_master_ioctl, - .ndo_get_phys_port_name = dsa_master_get_phys_port_name, }; static int dsa_master_ethtool_setup(struct net_device *dev) -- cgit v1.2.3 From 7f3d176f5f7e3f0477bf82df0f600fcddcdcc4e4 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Fri, 10 Jul 2020 14:29:55 -0500 Subject: tpm: Require that all digests are present in TCG_PCR_EVENT2 structures Require that the TCG_PCR_EVENT2.digests.count value strictly matches the value of TCG_EfiSpecIdEvent.numberOfAlgorithms in the event field of the TCG_PCClientPCREvent event log header. Also require that TCG_EfiSpecIdEvent.numberOfAlgorithms is non-zero. The TCG PC Client Platform Firmware Profile Specification section 9.1 (Family "2.0", Level 00 Revision 1.04) states: For each Hash algorithm enumerated in the TCG_PCClientPCREvent entry, there SHALL be a corresponding digest in all TCG_PCR_EVENT2 structures. Note: This includes EV_NO_ACTION events which do not extend the PCR. Section 9.4.5.1 provides this description of TCG_EfiSpecIdEvent.numberOfAlgorithms: The number of Hash algorithms in the digestSizes field. This field MUST be set to a value of 0x01 or greater. Enforce these restrictions, as required by the above specification, in order to better identify and ignore invalid sequences of bytes at the end of an otherwise valid TPM2 event log. Firmware doesn't always have the means necessary to inform the kernel of the actual event log size so the kernel's event log parsing code should be stringent when parsing the event log for resiliency against firmware bugs. This is true, for example, when firmware passes the event log to the kernel via a reserved memory region described in device tree. POWER and some ARM systems use the "linux,sml-base" and "linux,sml-size" device tree properties to describe the memory region used to pass the event log from firmware to the kernel. Unfortunately, the "linux,sml-size" property describes the size of the entire reserved memory region rather than the size of the event long within the memory region and the event log format does not include information describing the size of the event log. tpm_read_log_of(), in drivers/char/tpm/eventlog/of.c, is where the "linux,sml-size" property is used. At the end of that function, log->bios_event_log_end is pointing at the end of the reserved memory region. That's typically 0x10000 bytes offset from "linux,sml-base", depending on what's defined in the device tree source. The firmware event log only fills a portion of those 0x10000 bytes and the rest of the memory region should be zeroed out by firmware. Even in the case of a properly zeroed bytes in the remainder of the memory region, the only thing allowing the kernel's event log parser to detect the end of the event log is the following conditional in __calc_tpm2_event_size(): if (event_type == 0 && event_field->event_size == 0) size = 0; If that wasn't there, __calc_tpm2_event_size() would think that a 16 byte sequence of zeroes, following an otherwise valid event log, was a valid event. However, problems can occur if a single bit is set in the offset corresponding to either the TCG_PCR_EVENT2.eventType or TCG_PCR_EVENT2.eventSize fields, after the last valid event log entry. This could confuse the parser into thinking that an additional entry is present in the event log and exposing this invalid entry to userspace in the /sys/kernel/security/tpm0/binary_bios_measurements file. Such problems have been seen if firmware does not fully zero the memory region upon a warm reboot. This patch significantly raises the bar on how difficult it is for stale/invalid memory to confuse the kernel's event log parser but there's still, ultimately, a reliance on firmware to properly initialize the remainder of the memory region reserved for the event log as the parser cannot be expected to detect a stale but otherwise properly formatted firmware event log entry. Fixes: fd5c78694f3f ("tpm: fix handling of the TPM 2.0 event logs") Signed-off-by: Tyler Hicks Reviewed-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen --- include/linux/tpm_eventlog.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 64356b199e94..739ba9a03ec1 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -211,9 +211,16 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, efispecid = (struct tcg_efi_specid_event_head *)event_header->event; - /* Check if event is malformed. */ + /* + * Perform validation of the event in order to identify malformed + * events. This function may be asked to parse arbitrary byte sequences + * immediately following a valid event log. The caller expects this + * function to recognize that the byte sequence is not a valid event + * and to return an event size of 0. + */ if (memcmp(efispecid->signature, TCG_SPECID_SIG, - sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) { + sizeof(TCG_SPECID_SIG)) || + !efispecid->num_algs || count != efispecid->num_algs) { size = 0; goto out; } -- cgit v1.2.3 From 6c4e79d99e6f42b79040f1a33cd4018f5425030b Mon Sep 17 00:00:00 2001 From: Jarkko Sakkinen Date: Fri, 3 Jul 2020 01:55:59 +0300 Subject: tpm: Unify the mismatching TPM space buffer sizes The size of the buffers for storing context's and sessions can vary from arch to arch as PAGE_SIZE can be anything between 4 kB and 256 kB (the maximum for PPC64). Define a fixed buffer size set to 16 kB. This should be enough for most use with three handles (that is how many we allow at the moment). Parametrize the buffer size while doing this, so that it is easier to revisit this later on if required. Cc: stable@vger.kernel.org Reported-by: Stefan Berger Fixes: 745b361e989a ("tpm: infrastructure for TPM spaces") Reviewed-by: Jerry Snitselaar Tested-by: Stefan Berger Signed-off-by: Jarkko Sakkinen --- drivers/char/tpm/tpm-chip.c | 9 ++------- drivers/char/tpm/tpm.h | 5 ++++- drivers/char/tpm/tpm2-space.c | 26 ++++++++++++++++---------- drivers/char/tpm/tpmrm-dev.c | 2 +- include/linux/tpm.h | 1 + 5 files changed, 24 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 8c77e88012e9..ddaeceb7e109 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->cdev.owner = THIS_MODULE; chip->cdevs.owner = THIS_MODULE; - chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.context_buf) { - rc = -ENOMEM; - goto out; - } - chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.session_buf) { + rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); + if (rc) { rc = -ENOMEM; goto out; } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 0fbcede241ea..947d1db0a5cc 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -59,6 +59,9 @@ enum tpm_addr { #define TPM_TAG_RQU_COMMAND 193 +/* TPM2 specific constants. */ +#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */ + struct stclear_flags_t { __be16 tag; u8 deactivated; @@ -228,7 +231,7 @@ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm2_probe(struct tpm_chip *chip); int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); -int tpm2_init_space(struct tpm_space *space); +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size); void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space); void tpm2_flush_space(struct tpm_chip *chip); int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 982d341d8837..784b8b3cb903 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) } } -int tpm2_init_space(struct tpm_space *space) +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) { - space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->context_buf = kzalloc(buf_size, GFP_KERNEL); if (!space->context_buf) return -ENOMEM; - space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->session_buf = kzalloc(buf_size, GFP_KERNEL); if (space->session_buf == NULL) { kfree(space->context_buf); + /* Prevent caller getting a dangling pointer. */ + space->context_buf = NULL; return -ENOMEM; } + space->buf_size = buf_size; return 0; } @@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, sizeof(space->context_tbl)); memcpy(&chip->work_space.session_tbl, &space->session_tbl, sizeof(space->session_tbl)); - memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE); - memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE); + memcpy(chip->work_space.context_buf, space->context_buf, + space->buf_size); + memcpy(chip->work_space.session_buf, space->session_buf, + space->buf_size); rc = tpm2_load_space(chip); if (rc) { @@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->context_tbl[i], - space->context_buf, PAGE_SIZE, + space->context_buf, space->buf_size, &offset); if (rc == -ENOENT) { space->context_tbl[i] = 0; @@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->session_tbl[i], - space->session_buf, PAGE_SIZE, + space->session_buf, space->buf_size, &offset); - if (rc == -ENOENT) { /* handle error saving session, just forget it */ space->session_tbl[i] = 0; @@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, sizeof(space->context_tbl)); memcpy(&space->session_tbl, &chip->work_space.session_tbl, sizeof(space->session_tbl)); - memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE); - memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE); + memcpy(space->context_buf, chip->work_space.context_buf, + space->buf_size); + memcpy(space->session_buf, chip->work_space.session_buf, + space->buf_size); return 0; out: diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index 7a0a7051a06f..eef0fb06ea83 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file) if (priv == NULL) return -ENOMEM; - rc = tpm2_init_space(&priv->space); + rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE); if (rc) { kfree(priv); return -ENOMEM; diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 03e9b184411b..8f4ff39f51e7 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -96,6 +96,7 @@ struct tpm_space { u8 *context_buf; u32 session_tbl[3]; u8 *session_buf; + u32 buf_size; }; struct tpm_bios_log { -- cgit v1.2.3 From 18306111e65bb99b6ec676d51728bbfe85fdacae Mon Sep 17 00:00:00 2001 From: Stefan Berger Date: Mon, 6 Jul 2020 19:58:06 -0400 Subject: acpi: Extend TPM2 ACPI table with missing log fields Recent extensions of the TPM2 ACPI table added 3 more fields including 12 bytes of start method specific parameters and Log Area Minimum Length (u32) and Log Area Start Address (u64). So, we define a new structure acpi_tpm2_phy that holds these optional new fields. The new fields allow non-UEFI systems to access the TPM2's log. The specification that has the new fields is the following: TCG ACPI Specification Family "1.2" and "2.0" Version 1.2, Revision 8 https://trustedcomputinggroup.org/wp-content/uploads/TCG_ACPIGeneralSpecification_v1.20_r8.pdf Cc: linux-acpi@vger.kernel.org Cc: Len Brown Signed-off-by: Stefan Berger Acked-by: Rafael J. Wysocki Reviewed-by: Jarkko Sakkinen Reviewed-by: Jerry Snitselaar Signed-off-by: Jarkko Sakkinen --- include/acpi/actbl3.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index b0b163b9efc6..bdcac69fa6bd 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -415,6 +415,13 @@ struct acpi_table_tpm2 { /* Platform-specific data follows */ }; +/* Optional trailer for revision 4 holding platform-specific data */ +struct acpi_tpm2_phy { + u8 start_method_specific[12]; + u32 log_area_minimum_length; + u64 log_area_start_address; +}; + /* Values for start_method above */ #define ACPI_TPM2_NOT_ALLOWED 0 -- cgit v1.2.3 From 5f77d6ca5ca74e4b4a5e2e010f7ff50c45dea326 Mon Sep 17 00:00:00 2001 From: Liu Yi L Date: Fri, 24 Jul 2020 09:49:14 +0800 Subject: iommu/vt-d: Enforce PASID devTLB field mask Set proper masks to avoid invalid input spillover to reserved bits. Signed-off-by: Liu Yi L Signed-off-by: Jacob Pan Signed-off-by: Lu Baolu Reviewed-by: Eric Auger Link: https://lore.kernel.org/r/20200724014925.15523-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- include/linux/intel-iommu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 3e8fa1c7a1e6..311117b50e93 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -381,8 +381,8 @@ enum { #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) -#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ -- cgit v1.2.3 From 78df6c86f0691f5b6e325006aeb470de443351ea Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Fri, 24 Jul 2020 09:49:15 +0800 Subject: iommu/vt-d: Remove global page support in devTLB flush Global pages support is removed from VT-d spec 3.0 for dev TLB invalidation. This patch is to remove the bits for vSVA. Similar change already made for the native SVA. See the link below. Signed-off-by: Jacob Pan Signed-off-by: Lu Baolu Reviewed-by: Eric Auger Link: https://lore.kernel.org/linux-iommu/20190830142919.GE11578@8bytes.org/T/ Link: https://lore.kernel.org/r/20200724014925.15523-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/dmar.c | 4 +--- drivers/iommu/intel/iommu.c | 4 ++-- include/linux/intel-iommu.h | 3 +-- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 683b812c5c47..9be08b9400ee 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1438,8 +1438,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, /* PASID-based device IOTLB Invalidate */ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, - u32 pasid, u16 qdep, u64 addr, - unsigned int size_order, u64 granu) + u32 pasid, u16 qdep, u64 addr, unsigned int size_order) { unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1); struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0}; @@ -1447,7 +1446,6 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); - desc.qw1 = QI_DEV_EIOTLB_GLOB(granu); /* * If S bit is 0, we only flush a single page. If S bit is set, diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index d759e7234e98..bdd1e7d81178 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5474,7 +5474,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, info->pfsid, pasid, info->ats_qdep, inv_info->addr_info.addr, - size, granu); + size); break; case IOMMU_CACHE_INV_TYPE_DEV_IOTLB: if (info->ats_enabled) @@ -5482,7 +5482,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, info->pfsid, pasid, info->ats_qdep, inv_info->addr_info.addr, - size, granu); + size); else pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n"); break; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 311117b50e93..c7a8aae36771 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -381,7 +381,6 @@ enum { #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) #define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) @@ -705,7 +704,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, u32 pasid, u16 qdep, u64 addr, - unsigned int size_order, u64 granu); + unsigned int size_order); void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int pasid); -- cgit v1.2.3 From f793e45494586f742410f17539f1ea4156ea7bf9 Mon Sep 17 00:00:00 2001 From: Jonathan Marek Date: Thu, 9 Jul 2020 09:52:38 -0400 Subject: dt-bindings: clock: add SM8150 QCOM Graphics clock bindings Add device tree bindings for graphics clock controller for Qualcomm Technology Inc's SM8150 SoCs. Signed-off-by: Jonathan Marek Tested-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20200709135251.643-8-jonathan@marek.ca Reviewed-by: Rob Herring Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/qcom,gpucc.yaml | 4 ++- include/dt-bindings/clock/qcom,gpucc-sm8150.h | 33 ++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 include/dt-bindings/clock/qcom,gpucc-sm8150.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml index aab6bef79771..3e064ed0e0ea 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml @@ -11,17 +11,19 @@ maintainers: description: | Qualcomm graphics clock control module which supports the clocks, resets and - power domains on SDM845/SC7180. + power domains on SDM845/SC7180/SM8150. See also: dt-bindings/clock/qcom,gpucc-sdm845.h dt-bindings/clock/qcom,gpucc-sc7180.h + dt-bindings/clock/qcom,gpucc-sm8150.h properties: compatible: enum: - qcom,sdm845-gpucc - qcom,sc7180-gpucc + - qcom,sm8150-gpucc clocks: items: diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8150.h b/include/dt-bindings/clock/qcom,gpucc-sm8150.h new file mode 100644 index 000000000000..c5b70aad7770 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sm8150.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H + +/* GPU_CC clock registers */ +#define GPU_CC_AHB_CLK 0 +#define GPU_CC_CRC_AHB_CLK 1 +#define GPU_CC_CX_APB_CLK 2 +#define GPU_CC_CX_GMU_CLK 3 +#define GPU_CC_CX_SNOC_DVM_CLK 4 +#define GPU_CC_CXO_AON_CLK 5 +#define GPU_CC_CXO_CLK 6 +#define GPU_CC_GMU_CLK_SRC 7 +#define GPU_CC_GX_GMU_CLK 8 +#define GPU_CC_PLL1 9 + +/* GPU_CC Resets */ +#define GPUCC_GPU_CC_CX_BCR 0 +#define GPUCC_GPU_CC_GFX3D_AON_BCR 1 +#define GPUCC_GPU_CC_GMU_BCR 2 +#define GPUCC_GPU_CC_GX_BCR 3 +#define GPUCC_GPU_CC_SPDM_BCR 4 +#define GPUCC_GPU_CC_XO_BCR 5 + +/* GPU_CC GDSCRs */ +#define GPU_CX_GDSC 0 +#define GPU_GX_GDSC 1 + +#endif -- cgit v1.2.3 From dd6692f1b883bac46036000a1e3a0b3785f89e87 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 24 Jul 2020 09:49:21 +0800 Subject: iommu/vt-d: Refactor device_to_iommu() helper It is refactored in two ways: - Make it global so that it could be used in other files. - Make bus/devfn optional so that callers could ignore these two returned values when they only want to get the coresponding iommu pointer. Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20200724014925.15523-9-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 55 +++++++++++++-------------------------------- drivers/iommu/intel/svm.c | 8 +++---- include/linux/intel-iommu.h | 3 +-- 3 files changed, 21 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 0a4831532442..ce8458e8119c 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -778,16 +778,16 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge) return false; } -static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) +struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; + struct pci_dev *pdev = NULL; struct intel_iommu *iommu; struct device *tmp; - struct pci_dev *pdev = NULL; u16 segment = 0; int i; - if (iommu_dummy(dev)) + if (!dev || iommu_dummy(dev)) return NULL; if (dev_is_pci(dev)) { @@ -818,8 +818,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf if (pdev && pdev->is_virtfn) goto got_pdev; - *bus = drhd->devices[i].bus; - *devfn = drhd->devices[i].devfn; + if (bus && devfn) { + *bus = drhd->devices[i].bus; + *devfn = drhd->devices[i].devfn; + } goto out; } @@ -829,8 +831,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf if (pdev && drhd->include_all) { got_pdev: - *bus = pdev->bus->number; - *devfn = pdev->devfn; + if (bus && devfn) { + *bus = pdev->bus->number; + *devfn = pdev->devfn; + } goto out; } } @@ -5146,11 +5150,10 @@ static int aux_domain_add_dev(struct dmar_domain *domain, struct device *dev) { int ret; - u8 bus, devfn; unsigned long flags; struct intel_iommu *iommu; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return -ENODEV; @@ -5236,9 +5239,8 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct intel_iommu *iommu; int addr_width; - u8 bus, devfn; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return -ENODEV; @@ -5668,9 +5670,8 @@ static bool intel_iommu_capable(enum iommu_cap cap) static struct iommu_device *intel_iommu_probe_device(struct device *dev) { struct intel_iommu *iommu; - u8 bus, devfn; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return ERR_PTR(-ENODEV); @@ -5683,9 +5684,8 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) static void intel_iommu_release_device(struct device *dev) { struct intel_iommu *iommu; - u8 bus, devfn; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return; @@ -5835,37 +5835,14 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev) return generic_device_group(dev); } -#ifdef CONFIG_INTEL_IOMMU_SVM -struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) -{ - struct intel_iommu *iommu; - u8 bus, devfn; - - if (iommu_dummy(dev)) { - dev_warn(dev, - "No IOMMU translation for device; cannot enable SVM\n"); - return NULL; - } - - iommu = device_to_iommu(dev, &bus, &devfn); - if ((!iommu)) { - dev_err(dev, "No IOMMU for device; cannot enable SVM\n"); - return NULL; - } - - return iommu; -} -#endif /* CONFIG_INTEL_IOMMU_SVM */ - static int intel_iommu_enable_auxd(struct device *dev) { struct device_domain_info *info; struct intel_iommu *iommu; unsigned long flags; - u8 bus, devfn; int ret; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu || dmar_disabled) return -EINVAL; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index d386853121a2..65d2327dcd0d 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -231,7 +231,7 @@ static LIST_HEAD(global_svm_list); int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, struct iommu_gpasid_bind_data *data) { - struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct dmar_domain *dmar_domain; struct intel_svm_dev *sdev; struct intel_svm *svm; @@ -369,7 +369,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, int intel_svm_unbind_gpasid(struct device *dev, int pasid) { - struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct intel_svm_dev *sdev; struct intel_svm *svm; int ret = -EINVAL; @@ -426,7 +426,7 @@ static int intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops, struct mm_struct *mm, struct intel_svm_dev **sd) { - struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct device_domain_info *info; struct intel_svm_dev *sdev; struct intel_svm *svm = NULL; @@ -604,7 +604,7 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid) struct intel_svm *svm; int ret = -EINVAL; - iommu = intel_svm_device_to_iommu(dev); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) goto out; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index c7a8aae36771..a57ffbcc84c7 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -727,6 +727,7 @@ void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); struct dmar_domain *find_domain(struct device *dev); struct device_domain_info *get_domain_info(struct device *dev); +struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); #ifdef CONFIG_INTEL_IOMMU_SVM extern void intel_svm_check(struct intel_iommu *iommu); @@ -765,8 +766,6 @@ struct intel_svm { struct list_head devs; struct list_head list; }; - -extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); #else static inline void intel_svm_check(struct intel_iommu *iommu) {} #endif -- cgit v1.2.3 From 8b73712115ebd603b75876f7d96d59e41d9107ad Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 24 Jul 2020 09:49:24 +0800 Subject: iommu/vt-d: Add page response ops support After page requests are handled, software must respond to the device which raised the page request with the result. This is done through the iommu ops.page_response if the request was reported to outside of vendor iommu driver through iommu_report_device_fault(). This adds the VT-d implementation of page_response ops. Co-developed-by: Jacob Pan Co-developed-by: Liu Yi L Signed-off-by: Jacob Pan Signed-off-by: Liu Yi L Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20200724014925.15523-12-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 1 + drivers/iommu/intel/svm.c | 99 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/intel-iommu.h | 3 ++ 3 files changed, 103 insertions(+) (limited to 'include') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index ce8458e8119c..1cb3a2a050c3 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -6067,6 +6067,7 @@ const struct iommu_ops intel_iommu_ops = { .sva_bind = intel_svm_bind, .sva_unbind = intel_svm_unbind, .sva_get_pasid = intel_svm_get_pasid, + .page_response = intel_svm_page_response, #endif }; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 140114ab1375..85ce8daa3177 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -1078,3 +1078,102 @@ int intel_svm_get_pasid(struct iommu_sva *sva) return pasid; } + +int intel_svm_page_response(struct device *dev, + struct iommu_fault_event *evt, + struct iommu_page_response *msg) +{ + struct iommu_fault_page_request *prm; + struct intel_svm_dev *sdev = NULL; + struct intel_svm *svm = NULL; + struct intel_iommu *iommu; + bool private_present; + bool pasid_present; + bool last_page; + u8 bus, devfn; + int ret = 0; + u16 sid; + + if (!dev || !dev_is_pci(dev)) + return -ENODEV; + + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) + return -ENODEV; + + if (!msg || !evt) + return -EINVAL; + + mutex_lock(&pasid_mutex); + + prm = &evt->fault.prm; + sid = PCI_DEVID(bus, devfn); + pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; + last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + + if (!pasid_present) { + ret = -EINVAL; + goto out; + } + + if (prm->pasid == 0 || prm->pasid >= PASID_MAX) { + ret = -EINVAL; + goto out; + } + + ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev); + if (ret || !sdev) { + ret = -ENODEV; + goto out; + } + + /* + * For responses from userspace, need to make sure that the + * pasid has been bound to its mm. + */ + if (svm->flags & SVM_FLAG_GUEST_MODE) { + struct mm_struct *mm; + + mm = get_task_mm(current); + if (!mm) { + ret = -EINVAL; + goto out; + } + + if (mm != svm->mm) { + ret = -ENODEV; + mmput(mm); + goto out; + } + + mmput(mm); + } + + /* + * Per VT-d spec. v3.0 ch7.7, system software must respond + * with page group response if private data is present (PDP) + * or last page in group (LPIG) bit is set. This is an + * additional VT-d requirement beyond PCI ATS spec. + */ + if (last_page || private_present) { + struct qi_desc desc; + + desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | + QI_PGRP_PASID_P(pasid_present) | + QI_PGRP_PDP(private_present) | + QI_PGRP_RESP_CODE(msg->code) | + QI_PGRP_RESP_TYPE; + desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); + desc.qw2 = 0; + desc.qw3 = 0; + if (private_present) + memcpy(&desc.qw2, prm->private_data, + sizeof(prm->private_data)); + + qi_submit_sync(iommu, &desc, 1, 0); + } +out: + mutex_unlock(&pasid_mutex); + return ret; +} diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index a57ffbcc84c7..9ff5e340948b 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -740,6 +740,9 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata); void intel_svm_unbind(struct iommu_sva *handle); int intel_svm_get_pasid(struct iommu_sva *handle); +int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, + struct iommu_page_response *msg); + struct svm_dev_ops; struct intel_svm_dev { -- cgit v1.2.3 From 324e0bfcfb005f161bbb31ea21ddad0f1bc8f400 Mon Sep 17 00:00:00 2001 From: Jonathan Marek Date: Thu, 9 Jul 2020 09:52:39 -0400 Subject: dt-bindings: clock: add SM8250 QCOM Graphics clock bindings Add device tree bindings for graphics clock controller for Qualcomm Technology Inc's SM8250 SoCs. Signed-off-by: Jonathan Marek Tested-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20200709135251.643-9-jonathan@marek.ca Reviewed-by: Rob Herring Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/qcom,gpucc.yaml | 4 ++- include/dt-bindings/clock/qcom,gpucc-sm8250.h | 34 ++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 include/dt-bindings/clock/qcom,gpucc-sm8250.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml index 3e064ed0e0ea..df943c4c3234 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.yaml @@ -11,12 +11,13 @@ maintainers: description: | Qualcomm graphics clock control module which supports the clocks, resets and - power domains on SDM845/SC7180/SM8150. + power domains on SDM845/SC7180/SM8150/SM8250. See also: dt-bindings/clock/qcom,gpucc-sdm845.h dt-bindings/clock/qcom,gpucc-sc7180.h dt-bindings/clock/qcom,gpucc-sm8150.h + dt-bindings/clock/qcom,gpucc-sm8250.h properties: compatible: @@ -24,6 +25,7 @@ properties: - qcom,sdm845-gpucc - qcom,sc7180-gpucc - qcom,sm8150-gpucc + - qcom,sm8250-gpucc clocks: items: diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8250.h b/include/dt-bindings/clock/qcom,gpucc-sm8250.h new file mode 100644 index 000000000000..dc8e387c48ad --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sm8250.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8250_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8250_H + +/* GPU_CC clock registers */ +#define GPU_CC_AHB_CLK 0 +#define GPU_CC_CRC_AHB_CLK 1 +#define GPU_CC_CX_APB_CLK 2 +#define GPU_CC_CX_GMU_CLK 3 +#define GPU_CC_CX_SNOC_DVM_CLK 4 +#define GPU_CC_CXO_AON_CLK 5 +#define GPU_CC_CXO_CLK 6 +#define GPU_CC_GMU_CLK_SRC 7 +#define GPU_CC_GX_GMU_CLK 8 +#define GPU_CC_PLL1 9 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 10 + +/* GPU_CC Resets */ +#define GPUCC_GPU_CC_ACD_BCR 0 +#define GPUCC_GPU_CC_CX_BCR 1 +#define GPUCC_GPU_CC_GFX3D_AON_BCR 2 +#define GPUCC_GPU_CC_GMU_BCR 3 +#define GPUCC_GPU_CC_GX_BCR 4 +#define GPUCC_GPU_CC_XO_BCR 5 + +/* GPU_CC GDSCRs */ +#define GPU_CX_GDSC 0 +#define GPU_GX_GDSC 1 + +#endif -- cgit v1.2.3 From 75c88143f3b879664cc5bf68b91854c1a98f5e5b Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Wed, 22 Jul 2020 10:38:20 +0300 Subject: clk: at91: clk-master: add master clock support for SAMA7G5 Add master clock support (MCK1..4) for SAMA7G5. SAMA7G5's PMC has multiple master clocks feeding different subsystems. One of them feeds image subsystem and is changeable based on image subsystem needs. Signed-off-by: Claudiu Beznea Link: https://lore.kernel.org/r/1595403506-8209-13-git-send-email-claudiu.beznea@microchip.com Signed-off-by: Stephen Boyd --- drivers/clk/at91/clk-master.c | 310 +++++++++++++++++++++++++++++++++++++++++- drivers/clk/at91/pmc.h | 7 + include/linux/clk/at91_pmc.h | 1 + 3 files changed, 313 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index 88d545b1698c..bd0d8a69a2cf 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -17,30 +17,49 @@ #define MASTER_DIV_SHIFT 8 #define MASTER_DIV_MASK 0x3 +#define PMC_MCR 0x30 +#define PMC_MCR_ID_MSK GENMASK(3, 0) +#define PMC_MCR_CMD BIT(7) +#define PMC_MCR_DIV GENMASK(10, 8) +#define PMC_MCR_CSS GENMASK(20, 16) +#define PMC_MCR_CSS_SHIFT (16) +#define PMC_MCR_EN BIT(28) + +#define PMC_MCR_ID(x) ((x) & PMC_MCR_ID_MSK) + +#define MASTER_MAX_ID 4 + #define to_clk_master(hw) container_of(hw, struct clk_master, hw) struct clk_master { struct clk_hw hw; struct regmap *regmap; + spinlock_t *lock; const struct clk_master_layout *layout; const struct clk_master_characteristics *characteristics; + u32 *mux_table; u32 mckr; + int chg_pid; + u8 id; + u8 parent; + u8 div; }; -static inline bool clk_master_ready(struct regmap *regmap) +static inline bool clk_master_ready(struct clk_master *master) { + unsigned int bit = master->id ? AT91_PMC_MCKXRDY : AT91_PMC_MCKRDY; unsigned int status; - regmap_read(regmap, AT91_PMC_SR, &status); + regmap_read(master->regmap, AT91_PMC_SR, &status); - return !!(status & AT91_PMC_MCKRDY); + return !!(status & bit); } static int clk_master_prepare(struct clk_hw *hw) { struct clk_master *master = to_clk_master(hw); - while (!clk_master_ready(master->regmap)) + while (!clk_master_ready(master)) cpu_relax(); return 0; @@ -50,7 +69,7 @@ static int clk_master_is_prepared(struct clk_hw *hw) { struct clk_master *master = to_clk_master(hw); - return clk_master_ready(master->regmap); + return clk_master_ready(master); } static unsigned long clk_master_recalc_rate(struct clk_hw *hw, @@ -143,6 +162,287 @@ at91_clk_register_master(struct regmap *regmap, return hw; } +static unsigned long +clk_sama7g5_master_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_master *master = to_clk_master(hw); + + return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div)); +} + +static void clk_sama7g5_master_best_diff(struct clk_rate_request *req, + struct clk_hw *parent, + unsigned long parent_rate, + long *best_rate, + long *best_diff, + u32 div) +{ + unsigned long tmp_rate, tmp_diff; + + if (div == MASTER_PRES_MAX) + tmp_rate = parent_rate / 3; + else + tmp_rate = parent_rate >> div; + + tmp_diff = abs(req->rate - tmp_rate); + + if (*best_diff < 0 || *best_diff >= tmp_diff) { + *best_rate = tmp_rate; + *best_diff = tmp_diff; + req->best_parent_rate = parent_rate; + req->best_parent_hw = parent; + } +} + +static int clk_sama7g5_master_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_master *master = to_clk_master(hw); + struct clk_rate_request req_parent = *req; + struct clk_hw *parent; + long best_rate = LONG_MIN, best_diff = LONG_MIN; + unsigned long parent_rate; + unsigned int div, i; + + /* First: check the dividers of MCR. */ + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + parent = clk_hw_get_parent_by_index(hw, i); + if (!parent) + continue; + + parent_rate = clk_hw_get_rate(parent); + if (!parent_rate) + continue; + + for (div = 0; div < MASTER_PRES_MAX + 1; div++) { + clk_sama7g5_master_best_diff(req, parent, parent_rate, + &best_rate, &best_diff, + div); + if (!best_diff) + break; + } + + if (!best_diff) + break; + } + + /* Second: try to request rate form changeable parent. */ + if (master->chg_pid < 0) + goto end; + + parent = clk_hw_get_parent_by_index(hw, master->chg_pid); + if (!parent) + goto end; + + for (div = 0; div < MASTER_PRES_MAX + 1; div++) { + if (div == MASTER_PRES_MAX) + req_parent.rate = req->rate * 3; + else + req_parent.rate = req->rate << div; + + if (__clk_determine_rate(parent, &req_parent)) + continue; + + clk_sama7g5_master_best_diff(req, parent, req_parent.rate, + &best_rate, &best_diff, div); + + if (!best_diff) + break; + } + +end: + pr_debug("MCK: %s, best_rate = %ld, parent clk: %s @ %ld\n", + __func__, best_rate, + __clk_get_name((req->best_parent_hw)->clk), + req->best_parent_rate); + + if (best_rate < 0) + return -EINVAL; + + req->rate = best_rate; + + return 0; +} + +static u8 clk_sama7g5_master_get_parent(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + u8 index; + + spin_lock_irqsave(master->lock, flags); + index = clk_mux_val_to_index(&master->hw, master->mux_table, 0, + master->parent); + spin_unlock_irqrestore(master->lock, flags); + + return index; +} + +static int clk_sama7g5_master_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + + if (index >= clk_hw_get_num_parents(hw)) + return -EINVAL; + + spin_lock_irqsave(master->lock, flags); + master->parent = clk_mux_index_to_val(master->mux_table, 0, index); + spin_unlock_irqrestore(master->lock, flags); + + return 0; +} + +static int clk_sama7g5_master_enable(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + unsigned int val, cparent; + + spin_lock_irqsave(master->lock, flags); + + regmap_write(master->regmap, PMC_MCR, PMC_MCR_ID(master->id)); + regmap_read(master->regmap, PMC_MCR, &val); + regmap_update_bits(master->regmap, PMC_MCR, + PMC_MCR_EN | PMC_MCR_CSS | PMC_MCR_DIV | + PMC_MCR_CMD | PMC_MCR_ID_MSK, + PMC_MCR_EN | (master->parent << PMC_MCR_CSS_SHIFT) | + (master->div << MASTER_DIV_SHIFT) | + PMC_MCR_CMD | PMC_MCR_ID(master->id)); + + cparent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT; + + /* Wait here only if parent is being changed. */ + while ((cparent != master->parent) && !clk_master_ready(master)) + cpu_relax(); + + spin_unlock_irqrestore(master->lock, flags); + + return 0; +} + +static void clk_sama7g5_master_disable(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + + spin_lock_irqsave(master->lock, flags); + + regmap_write(master->regmap, PMC_MCR, master->id); + regmap_update_bits(master->regmap, PMC_MCR, + PMC_MCR_EN | PMC_MCR_CMD | PMC_MCR_ID_MSK, + PMC_MCR_CMD | PMC_MCR_ID(master->id)); + + spin_unlock_irqrestore(master->lock, flags); +} + +static int clk_sama7g5_master_is_enabled(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(master->lock, flags); + + regmap_write(master->regmap, PMC_MCR, master->id); + regmap_read(master->regmap, PMC_MCR, &val); + + spin_unlock_irqrestore(master->lock, flags); + + return !!(val & PMC_MCR_EN); +} + +static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long div, flags; + + div = DIV_ROUND_CLOSEST(parent_rate, rate); + if ((div > (1 << (MASTER_PRES_MAX - 1))) || (div & (div - 1))) + return -EINVAL; + + if (div == 3) + div = MASTER_PRES_MAX; + else + div = ffs(div) - 1; + + spin_lock_irqsave(master->lock, flags); + master->div = div; + spin_unlock_irqrestore(master->lock, flags); + + return 0; +} + +static const struct clk_ops sama7g5_master_ops = { + .enable = clk_sama7g5_master_enable, + .disable = clk_sama7g5_master_disable, + .is_enabled = clk_sama7g5_master_is_enabled, + .recalc_rate = clk_sama7g5_master_recalc_rate, + .determine_rate = clk_sama7g5_master_determine_rate, + .set_rate = clk_sama7g5_master_set_rate, + .get_parent = clk_sama7g5_master_get_parent, + .set_parent = clk_sama7g5_master_set_parent, +}; + +struct clk_hw * __init +at91_clk_sama7g5_register_master(struct regmap *regmap, + const char *name, int num_parents, + const char **parent_names, + u32 *mux_table, + spinlock_t *lock, u8 id, + bool critical, int chg_pid) +{ + struct clk_master *master; + struct clk_hw *hw; + struct clk_init_data init; + unsigned long flags; + unsigned int val; + int ret; + + if (!name || !num_parents || !parent_names || !mux_table || + !lock || id > MASTER_MAX_ID) + return ERR_PTR(-EINVAL); + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &sama7g5_master_ops; + init.parent_names = parent_names; + init.num_parents = num_parents; + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; + if (chg_pid >= 0) + init.flags |= CLK_SET_RATE_PARENT; + if (critical) + init.flags |= CLK_IS_CRITICAL; + + master->hw.init = &init; + master->regmap = regmap; + master->id = id; + master->chg_pid = chg_pid; + master->lock = lock; + master->mux_table = mux_table; + + spin_lock_irqsave(master->lock, flags); + regmap_write(master->regmap, PMC_MCR, master->id); + regmap_read(master->regmap, PMC_MCR, &val); + master->parent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT; + master->div = (val & PMC_MCR_DIV) >> MASTER_DIV_SHIFT; + spin_unlock_irqrestore(master->lock, flags); + + hw = &master->hw; + ret = clk_hw_register(NULL, &master->hw); + if (ret) { + kfree(master); + hw = ERR_PTR(ret); + } + + return hw; +} + const struct clk_master_layout at91rm9200_master_layout = { .mask = 0x31F, .pres_shift = 2, diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 2bfe1405f9f8..29d150feaa46 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -154,6 +154,13 @@ at91_clk_register_master(struct regmap *regmap, const char *name, const struct clk_master_layout *layout, const struct clk_master_characteristics *characteristics); +struct clk_hw * __init +at91_clk_sama7g5_register_master(struct regmap *regmap, + const char *name, int num_parents, + const char **parent_names, u32 *mux_table, + spinlock_t *lock, u8 id, bool critical, + int chg_pid); + struct clk_hw * __init at91_clk_register_peripheral(struct regmap *regmap, const char *name, const char *parent_name, u32 id); diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 49a53a137610..77d6dabc4c3c 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -174,6 +174,7 @@ #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ #define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ +#define AT91_PMC_MCKXRDY (1 << 26) /* Master Clock x [x=1..4] Ready Status */ #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ #define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */ -- cgit v1.2.3 From 0416824edca1cdcb6e00e6f909423bf0fc529eef Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Wed, 22 Jul 2020 10:38:23 +0300 Subject: clk: at91: add macro for pll ids mask Add macro for PLL IDs mask. Signed-off-by: Claudiu Beznea Link: https://lore.kernel.org/r/1595403506-8209-16-git-send-email-claudiu.beznea@microchip.com Signed-off-by: Stephen Boyd --- include/linux/clk/at91_pmc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 77d6dabc4c3c..dc5e85f124e0 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -59,6 +59,7 @@ #define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */ #define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */ #define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */ +#define AT91_PMC_PLL_UPDT_ID_MSK (0xf) /* PLL ID mask */ #define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */ #define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ -- cgit v1.2.3 From ef396df99251b848596c717b63ff4fe74a941193 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Wed, 22 Jul 2020 10:38:25 +0300 Subject: clk: at91: clk-utmi: add utmi support for sama7g5 Add UTMI support for SAMA7G5. SAMA7G5's UTMI control is done via XTALF register. Values written at bits 2..0 in this register correspond to the on board crystal oscillator frequency. Signed-off-by: Claudiu Beznea Link: https://lore.kernel.org/r/1595403506-8209-18-git-send-email-claudiu.beznea@microchip.com Signed-off-by: Stephen Boyd --- drivers/clk/at91/clk-utmi.c | 103 ++++++++++++++++++++++++++++++++++++++++--- drivers/clk/at91/pmc.h | 4 ++ include/linux/clk/at91_pmc.h | 2 + 3 files changed, 104 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c index f1ef4e1f41a9..df9f3fc3b6a6 100644 --- a/drivers/clk/at91/clk-utmi.c +++ b/drivers/clk/at91/clk-utmi.c @@ -120,9 +120,11 @@ static const struct clk_ops utmi_ops = { .recalc_rate = clk_utmi_recalc_rate, }; -struct clk_hw * __init -at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, - const char *name, const char *parent_name) +static struct clk_hw * __init +at91_clk_register_utmi_internal(struct regmap *regmap_pmc, + struct regmap *regmap_sfr, + const char *name, const char *parent_name, + const struct clk_ops *ops, unsigned long flags) { struct clk_utmi *utmi; struct clk_hw *hw; @@ -134,10 +136,10 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, return ERR_PTR(-ENOMEM); init.name = name; - init.ops = &utmi_ops; + init.ops = ops; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; - init.flags = CLK_SET_RATE_GATE; + init.flags = flags; utmi->hw.init = &init; utmi->regmap_pmc = regmap_pmc; @@ -152,3 +154,94 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, return hw; } + +struct clk_hw * __init +at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, + const char *name, const char *parent_name) +{ + return at91_clk_register_utmi_internal(regmap_pmc, regmap_sfr, name, + parent_name, &utmi_ops, CLK_SET_RATE_GATE); +} + +static int clk_utmi_sama7g5_prepare(struct clk_hw *hw) +{ + struct clk_utmi *utmi = to_clk_utmi(hw); + struct clk_hw *hw_parent; + unsigned long parent_rate; + unsigned int val; + + hw_parent = clk_hw_get_parent(hw); + parent_rate = clk_hw_get_rate(hw_parent); + + switch (parent_rate) { + case 16000000: + val = 0; + break; + case 20000000: + val = 2; + break; + case 24000000: + val = 3; + break; + case 32000000: + val = 5; + break; + default: + pr_err("UTMICK: unsupported main_xtal rate\n"); + return -EINVAL; + } + + regmap_write(utmi->regmap_pmc, AT91_PMC_XTALF, val); + + return 0; + +} + +static int clk_utmi_sama7g5_is_prepared(struct clk_hw *hw) +{ + struct clk_utmi *utmi = to_clk_utmi(hw); + struct clk_hw *hw_parent; + unsigned long parent_rate; + unsigned int val; + + hw_parent = clk_hw_get_parent(hw); + parent_rate = clk_hw_get_rate(hw_parent); + + regmap_read(utmi->regmap_pmc, AT91_PMC_XTALF, &val); + switch (val & 0x7) { + case 0: + if (parent_rate == 16000000) + return 1; + break; + case 2: + if (parent_rate == 20000000) + return 1; + break; + case 3: + if (parent_rate == 24000000) + return 1; + break; + case 5: + if (parent_rate == 32000000) + return 1; + break; + default: + break; + } + + return 0; +} + +static const struct clk_ops sama7g5_utmi_ops = { + .prepare = clk_utmi_sama7g5_prepare, + .is_prepared = clk_utmi_sama7g5_is_prepared, + .recalc_rate = clk_utmi_recalc_rate, +}; + +struct clk_hw * __init +at91_clk_sama7g5_register_utmi(struct regmap *regmap_pmc, const char *name, + const char *parent_name) +{ + return at91_clk_register_utmi_internal(regmap_pmc, NULL, name, + parent_name, &sama7g5_utmi_ops, 0); +} diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 6340b9be8205..7b86affc6d7c 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -236,6 +236,10 @@ struct clk_hw * __init at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, const char *name, const char *parent_name); +struct clk_hw * __init +at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name, + const char *parent_name); + #ifdef CONFIG_PM void pmc_register_id(u8 id); void pmc_register_pck(u8 pck); diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index dc5e85f124e0..a4f82e836a7c 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -137,6 +137,8 @@ #define AT91_PMC_PLLADIV2_ON (1 << 12) #define AT91_PMC_H32MXDIV BIT(24) +#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */ + #define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ #define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ #define AT91_PMC_USBS_PLLA (0 << 0) -- cgit v1.2.3 From d19e789f068b3d633cbac430764962f404198022 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 24 Jul 2020 13:50:25 +0200 Subject: compiler.h: Move instrumentation_begin()/end() to new header Linus pointed out that compiler.h - which is a key header that gets included in every single one of the 28,000+ kernel files during a kernel build - was bloated in: 655389666643: ("vmlinux.lds.h: Create section for protection against instrumentation") Linus noted: > I have pulled this, but do we really want to add this to a header file > that is _so_ core that it gets included for basically every single > file built? > > I don't even see those instrumentation_begin/end() things used > anywhere right now. > > It seems excessive. That 53 lines is maybe not a lot, but it pushed > that header file to over 12kB, and while it's mostly comments, it's > extra IO and parsing basically for _every_ single file compiled in the > kernel. > > For what appears to be absolutely zero upside right now, and I really > don't see why this should be in such a core header file! Move these primitives into a new header: , and include that header in the headers that make use of it. Unfortunately one of these headers is asm-generic/bug.h, which does get included in a lot of places, similarly to compiler.h. So the de-bloating effect isn't as good as we'd like it to be - but at least the interfaces are defined separately. No change to functionality intended. Reported-by: Linus Torvalds Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200604071921.GA1361070@gmail.com Cc: Thomas Gleixner Cc: Borislav Petkov Cc: Peter Zijlstra --- arch/x86/include/asm/bug.h | 1 + include/asm-generic/bug.h | 1 + include/linux/compiler.h | 53 ------------------------------------- include/linux/context_tracking.h | 2 ++ include/linux/instrumentation.h | 57 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 61 insertions(+), 53 deletions(-) create mode 100644 include/linux/instrumentation.h (limited to 'include') diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 028189575560..297fa12e7e27 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -3,6 +3,7 @@ #define _ASM_X86_BUG_H #include +#include /* * Despite that some emulators terminate on UD2, we use it for WARN(). diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index c94e33ae3e7b..18b0f4eee8cb 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -3,6 +3,7 @@ #define _ASM_GENERIC_BUG_H #include +#include #define CUT_HERE "------------[ cut here ]------------\n" diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 204e76856435..681894bfde99 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -120,65 +120,12 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, /* Annotate a C jump table to allow objtool to follow the code flow */ #define __annotate_jump_table __section(.rodata..c_jump_table) -#ifdef CONFIG_DEBUG_ENTRY -/* Begin/end of an instrumentation safe region */ -#define instrumentation_begin() ({ \ - asm volatile("%c0: nop\n\t" \ - ".pushsection .discard.instr_begin\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ -}) - -/* - * Because instrumentation_{begin,end}() can nest, objtool validation considers - * _begin() a +1 and _end() a -1 and computes a sum over the instructions. - * When the value is greater than 0, we consider instrumentation allowed. - * - * There is a problem with code like: - * - * noinstr void foo() - * { - * instrumentation_begin(); - * ... - * if (cond) { - * instrumentation_begin(); - * ... - * instrumentation_end(); - * } - * bar(); - * instrumentation_end(); - * } - * - * If instrumentation_end() would be an empty label, like all the other - * annotations, the inner _end(), which is at the end of a conditional block, - * would land on the instruction after the block. - * - * If we then consider the sum of the !cond path, we'll see that the call to - * bar() is with a 0-value, even though, we meant it to happen with a positive - * value. - * - * To avoid this, have _end() be a NOP instruction, this ensures it will be - * part of the condition block and does not escape. - */ -#define instrumentation_end() ({ \ - asm volatile("%c0: nop\n\t" \ - ".pushsection .discard.instr_end\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ -}) -#endif /* CONFIG_DEBUG_ENTRY */ - #else #define annotate_reachable() #define annotate_unreachable() #define __annotate_jump_table #endif -#ifndef instrumentation_begin -#define instrumentation_begin() do { } while(0) -#define instrumentation_end() do { } while(0) -#endif - #ifndef ASM_UNREACHABLE # define ASM_UNREACHABLE #endif diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 981b880d5b60..d53cd331c4dd 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -5,6 +5,8 @@ #include #include #include +#include + #include diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h new file mode 100644 index 000000000000..93e2ad67fc10 --- /dev/null +++ b/include/linux/instrumentation.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_INSTRUMENTATION_H +#define __LINUX_INSTRUMENTATION_H + +#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION) + +/* Begin/end of an instrumentation safe region */ +#define instrumentation_begin() ({ \ + asm volatile("%c0: nop\n\t" \ + ".pushsection .discard.instr_begin\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) + +/* + * Because instrumentation_{begin,end}() can nest, objtool validation considers + * _begin() a +1 and _end() a -1 and computes a sum over the instructions. + * When the value is greater than 0, we consider instrumentation allowed. + * + * There is a problem with code like: + * + * noinstr void foo() + * { + * instrumentation_begin(); + * ... + * if (cond) { + * instrumentation_begin(); + * ... + * instrumentation_end(); + * } + * bar(); + * instrumentation_end(); + * } + * + * If instrumentation_end() would be an empty label, like all the other + * annotations, the inner _end(), which is at the end of a conditional block, + * would land on the instruction after the block. + * + * If we then consider the sum of the !cond path, we'll see that the call to + * bar() is with a 0-value, even though, we meant it to happen with a positive + * value. + * + * To avoid this, have _end() be a NOP instruction, this ensures it will be + * part of the condition block and does not escape. + */ +#define instrumentation_end() ({ \ + asm volatile("%c0: nop\n\t" \ + ".pushsection .discard.instr_end\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#else +# define instrumentation_begin() do { } while(0) +# define instrumentation_end() do { } while(0) +#endif + +#endif /* __LINUX_INSTRUMENTATION_H */ -- cgit v1.2.3 From 3503d56cc7233ced602e38a4c13caa64f00ab2aa Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Wed, 24 Jun 2020 01:33:18 -0700 Subject: arm64/vdso: Add time namespace page Allocate the time namespace page among VVAR pages. Provide __arch_get_timens_vdso_data() helper for VDSO code to get the code-relative position of VVARs on that special page. If a task belongs to a time namespace then the VVAR page which contains the system wide VDSO data is replaced with a namespace specific page which has the same layout as the VVAR page. That page has vdso_data->seq set to 1 to enforce the slow path and vdso_data->clock_mode set to VCLOCK_TIMENS to enforce the time namespace handling path. The extra check in the case that vdso_data->seq is odd, e.g. a concurrent update of the VDSO data is in progress, is not really affecting regular tasks which are not part of a time namespace as the task is spin waiting for the update to finish and vdso_data->seq to become even again. If a time namespace task hits that code path, it invokes the corresponding time getter function which retrieves the real VVAR page, reads host time and then adds the offset for the requested clock which is stored in the special VVAR page. The time-namespace page isn't allocated on !CONFIG_TIME_NAMESPACE, but vma is the same size, which simplifies criu/vdso migration between different kernel configs. Signed-off-by: Andrei Vagin Reviewed-by: Vincenzo Frascino Reviewed-by: Dmitry Safonov Cc: Mark Rutland Link: https://lore.kernel.org/r/20200624083321.144975-4-avagin@gmail.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/vdso.h | 2 ++ arch/arm64/include/asm/vdso/compat_gettimeofday.h | 12 ++++++++++++ arch/arm64/include/asm/vdso/gettimeofday.h | 8 ++++++++ arch/arm64/kernel/vdso.c | 19 ++++++++++++++++--- arch/arm64/kernel/vdso/vdso.lds.S | 5 ++++- arch/arm64/kernel/vdso32/vdso.lds.S | 5 ++++- include/vdso/datapage.h | 1 + 7 files changed, 47 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h index 07468428fd29..f99dcb94b438 100644 --- a/arch/arm64/include/asm/vdso.h +++ b/arch/arm64/include/asm/vdso.h @@ -12,6 +12,8 @@ */ #define VDSO_LBASE 0x0 +#define __VVAR_PAGES 2 + #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index b6907ae78e53..b7c549d46d18 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -152,6 +152,18 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) return ret; } +#ifdef CONFIG_TIME_NS +static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) +{ + const struct vdso_data *ret; + + /* See __arch_get_vdso_data(). */ + asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data)); + + return ret; +} +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index afba6ba332f8..cf39eae5eaaf 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -96,6 +96,14 @@ const struct vdso_data *__arch_get_vdso_data(void) return _vdso_data; } +#ifdef CONFIG_TIME_NS +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(void) +{ + return _timens_data; +} +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 33ac18060bfc..fcb559726920 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -40,6 +40,12 @@ enum vdso_abi { #endif /* CONFIG_COMPAT_VDSO */ }; +enum vvar_pages { + VVAR_DATA_PAGE_OFFSET, + VVAR_TIMENS_PAGE_OFFSET, + VVAR_NR_PAGES, +}; + struct vdso_abi_info { const char *name; const char *vdso_code_start; @@ -125,6 +131,11 @@ static int __vdso_init(enum vdso_abi abi) } #ifdef CONFIG_TIME_NS +struct vdso_data *arch_get_vdso_data(void *vvar_page) +{ + return (struct vdso_data *)(vvar_page); +} + /* * The vvar mapping contains data for a specific time namespace, so when a task * changes namespace we must unmap its vvar data for the old namespace. @@ -173,9 +184,11 @@ static int __setup_additional_pages(enum vdso_abi abi, unsigned long gp_flags = 0; void *ret; + BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); + vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ - vdso_mapping_len = vdso_text_len + PAGE_SIZE; + vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE; vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { @@ -183,7 +196,7 @@ static int __setup_additional_pages(enum vdso_abi abi, goto up_fail; } - ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE, VM_READ|VM_MAYREAD|VM_PFNMAP, vdso_info[abi].dm); if (IS_ERR(ret)) @@ -192,7 +205,7 @@ static int __setup_additional_pages(enum vdso_abi abi, if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti()) gp_flags = VM_ARM64_BTI; - vdso_base += PAGE_SIZE; + vdso_base += VVAR_NR_PAGES * PAGE_SIZE; mm->context.vdso = (void *)vdso_base; ret = _install_special_mapping(mm, vdso_base, vdso_text_len, VM_READ|VM_EXEC|gp_flags| diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 7ad2d3a0cd48..d808ad31e01f 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -17,7 +17,10 @@ OUTPUT_ARCH(aarch64) SECTIONS { - PROVIDE(_vdso_data = . - PAGE_SIZE); + PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); +#ifdef CONFIG_TIME_NS + PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); +#endif . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S index 337d03522048..3348ce5ea306 100644 --- a/arch/arm64/kernel/vdso32/vdso.lds.S +++ b/arch/arm64/kernel/vdso32/vdso.lds.S @@ -17,7 +17,10 @@ OUTPUT_ARCH(arm) SECTIONS { - PROVIDE_HIDDEN(_vdso_data = . - PAGE_SIZE); + PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); +#ifdef CONFIG_TIME_NS + PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE); +#endif . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h index 7955c56d6b3c..ee810cae4e1e 100644 --- a/include/vdso/datapage.h +++ b/include/vdso/datapage.h @@ -109,6 +109,7 @@ struct vdso_data { * relocation, and this is what we need. */ extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden"))); +extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden"))); /* * The generic vDSO implementation requires that gettimeofday.h -- cgit v1.2.3 From f34ce7a7018c2f71d78fc7f512f6daf01e487114 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 12 Jun 2020 11:39:55 +0800 Subject: iommu: Add gfp parameter to io_pgtable_ops->map() Now the ARM page tables are always allocated by GFP_ATOMIC parameter, but the iommu_ops->map() function has been added a gfp_t parameter by commit 781ca2de89ba ("iommu: Add gfp parameter to iommu_ops::map"), thus io_pgtable_ops->map() should use the gfp parameter passed from iommu_ops->map() to allocate page pages, which can avoid wasting the memory allocators atomic pools for some non-atomic contexts. Signed-off-by: Baolin Wang Acked-by: Will Deacon Link: https://lore.kernel.org/r/3093df4cb95497aaf713fca623ce4ecebb197c2e.1591930156.git.baolin.wang@linux.alibaba.com Signed-off-by: Joerg Roedel --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +- drivers/iommu/arm-smmu-v3.c | 2 +- drivers/iommu/arm-smmu.c | 2 +- drivers/iommu/io-pgtable-arm-v7s.c | 18 +++++++++--------- drivers/iommu/io-pgtable-arm.c | 18 +++++++++--------- drivers/iommu/ipmmu-vmsa.c | 2 +- drivers/iommu/msm_iommu.c | 2 +- drivers/iommu/mtk_iommu.c | 2 +- drivers/iommu/qcom_iommu.c | 2 +- include/linux/io-pgtable.h | 2 +- 10 files changed, 26 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index ed28aeba6d59..5a39eee8cf83 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, while (len) { size_t pgsize = get_pgsize(iova | paddr, len); - ops->map(ops, iova, paddr, pgsize, prot); + ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL); iova += pgsize; paddr += pgsize; len -= pgsize; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index f578677a5c41..7b59f06b3913 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2850,7 +2850,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, if (!ops) return -ENODEV; - return ops->map(ops, iova, paddr, size, prot); + return ops->map(ops, iova, paddr, size, prot, gfp); } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 243bc4cb2705..dc1d2535798a 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1227,7 +1227,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; arm_smmu_rpm_get(smmu); - ret = ops->map(ops, iova, paddr, size, prot); + ret = ops->map(ops, iova, paddr, size, prot, gfp); arm_smmu_rpm_put(smmu); return ret; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 4272fe4e17f4..a688f22cbe3b 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -470,7 +470,7 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, int prot, - int lvl, arm_v7s_iopte *ptep) + int lvl, arm_v7s_iopte *ptep, gfp_t gfp) { struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_v7s_iopte pte, *cptep; @@ -491,7 +491,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, /* Grab a pointer to the next level */ pte = READ_ONCE(*ptep); if (!pte) { - cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data); + cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data); if (!cptep) return -ENOMEM; @@ -512,11 +512,11 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, } /* Rinse, repeat */ - return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep); + return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); } static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable *iop = &data->iop; @@ -530,7 +530,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, paddr >= (1ULL << data->iop.cfg.oas))) return -ERANGE; - ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); + ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp); /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova. @@ -922,12 +922,12 @@ static int __init arm_v7s_do_selftests(void) if (ops->map(ops, iova, iova, size, IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC | - IOMMU_CACHE)) + IOMMU_CACHE, GFP_KERNEL)) return __FAIL(ops); /* Overlapping mappings */ if (!ops->map(ops, iova, iova + size, size, - IOMMU_READ | IOMMU_NOEXEC)) + IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) @@ -946,7 +946,7 @@ static int __init arm_v7s_do_selftests(void) return __FAIL(ops); /* Remap of partial unmap */ - if (ops->map(ops, iova_start + size, size, size, IOMMU_READ)) + if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova_start + size + 42) @@ -967,7 +967,7 @@ static int __init arm_v7s_do_selftests(void) return __FAIL(ops); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) + if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 04fbd4bf0ff9..4a5a7b083a9b 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -355,7 +355,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, arm_lpae_iopte prot, - int lvl, arm_lpae_iopte *ptep) + int lvl, arm_lpae_iopte *ptep, gfp_t gfp) { arm_lpae_iopte *cptep, pte; size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); @@ -376,7 +376,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, /* Grab a pointer to the next level */ pte = READ_ONCE(*ptep); if (!pte) { - cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg); + cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg); if (!cptep) return -ENOMEM; @@ -396,7 +396,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, } /* Rinse, repeat */ - return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); + return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); } static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, @@ -461,7 +461,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, } static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int iommu_prot) + phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; @@ -483,7 +483,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, return -ERANGE; prot = arm_lpae_prot_to_pte(data, iommu_prot); - ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); + ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp); /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova. @@ -1178,12 +1178,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) if (ops->map(ops, iova, iova, size, IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC | - IOMMU_CACHE)) + IOMMU_CACHE, GFP_KERNEL)) return __FAIL(ops, i); /* Overlapping mappings */ if (!ops->map(ops, iova, iova + size, size, - IOMMU_READ | IOMMU_NOEXEC)) + IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) @@ -1198,7 +1198,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) return __FAIL(ops, i); /* Remap of partial unmap */ - if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) + if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) @@ -1216,7 +1216,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) return __FAIL(ops, i); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) + if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 4c2972f3153b..87475b2f7ef1 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -687,7 +687,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, if (!domain) return -ENODEV; - return domain->iop->map(domain->iop, iova, paddr, size, prot); + return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp); } static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index f773cc85f311..3615cd6241c4 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -491,7 +491,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, int ret; spin_lock_irqsave(&priv->pgtlock, flags); - ret = priv->iop->map(priv->iop, iova, pa, len, prot); + ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC); spin_unlock_irqrestore(&priv->pgtlock, flags); return ret; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 2be96f1cdbd2..b7b16414a217 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -397,7 +397,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, paddr |= BIT_ULL(32); /* Synchronize with the tlb_lock */ - return dom->iop->map(dom->iop, iova, paddr, size, prot); + return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp); } static size_t mtk_iommu_unmap(struct iommu_domain *domain, diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index c3e1fbd1988c..cfcfd7553b30 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -441,7 +441,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot); + ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); return ret; } diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 53d53c6c2be9..23285ba645db 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -155,7 +155,7 @@ struct io_pgtable_cfg { */ struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); + phys_addr_t paddr, size_t size, int prot, gfp_t gfp); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, -- cgit v1.2.3 From b1012ca8dc4f9b1a1fe8e2cb1590dd6d43ea3849 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Thu, 23 Jul 2020 09:34:37 +0800 Subject: iommu/vt-d: Skip TE disabling on quirky gfx dedicated iommu The VT-d spec requires (10.4.4 Global Command Register, TE field) that: Hardware implementations supporting DMA draining must drain any in-flight DMA read/write requests queued within the Root-Complex before completing the translation enable command and reflecting the status of the command through the TES field in the Global Status register. Unfortunately, some integrated graphic devices fail to do so after some kind of power state transition. As the result, the system might stuck in iommu_disable_translation(), waiting for the completion of TE transition. This provides a quirk list for those devices and skips TE disabling if the qurik hits. Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=208363 Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=206571 Signed-off-by: Lu Baolu Tested-by: Koba Ko Tested-by: Jun Miao Cc: Ashok Raj Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200723013437.2268-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/dmar.c | 1 + drivers/iommu/intel/iommu.c | 27 +++++++++++++++++++++++++++ include/linux/dmar.h | 1 + include/linux/intel-iommu.h | 2 ++ 4 files changed, 31 insertions(+) (limited to 'include') diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index f6cbe3f95c8d..93e6345f3414 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1102,6 +1102,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) } drhd->iommu = iommu; + iommu->drhd = drhd; return 0; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b3faf99457a6..0c2d582ff8cd 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -356,6 +356,7 @@ static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; static int intel_no_bounce; +static int iommu_skip_te_disable; #define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 @@ -1633,6 +1634,10 @@ static void iommu_disable_translation(struct intel_iommu *iommu) u32 sts; unsigned long flag; + if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && + (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) + return; + raw_spin_lock_irqsave(&iommu->register_lock, flag); iommu->gcmd &= ~DMA_GCMD_TE; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); @@ -4043,6 +4048,7 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ + drhd->gfx_dedicated = 1; if (!dmar_map_gfx) { drhd->ignored = 1; for_each_active_dev_scope(drhd->devices, @@ -6170,6 +6176,27 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_g DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); +static void quirk_igfx_skip_te_disable(struct pci_dev *dev) +{ + unsigned short ver; + + if (!IS_GFX_DEVICE(dev)) + return; + + ver = (dev->device >> 8) & 0xff; + if (ver != 0x45 && ver != 0x46 && ver != 0x4c && + ver != 0x4e && ver != 0x8a && ver != 0x98 && + ver != 0x9a) + return; + + if (risky_device(dev)) + return; + + pci_info(dev, "Skip IOMMU disabling for graphics\n"); + iommu_skip_te_disable = 1; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable); + /* On Tylersburg chipsets, some BIOSes have been known to enable the ISOCH DMAR unit for the Azalia sound device, but not give it any TLB entries, which causes it to deadlock. Check for that. We do diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7bf029df737..65565820328a 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -48,6 +48,7 @@ struct dmar_drhd_unit { u16 segment; /* PCI domain */ u8 ignored:1; /* ignore drhd */ u8 include_all:1; + u8 gfx_dedicated:1; /* graphic dedicated */ struct intel_iommu *iommu; }; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 9ff5e340948b..b1ed2f25f7c0 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -599,6 +599,8 @@ struct intel_iommu { struct iommu_device iommu; /* IOMMU core code handle */ int node; u32 flags; /* Software defined flags */ + + struct dmar_drhd_unit *drhd; }; /* PCI domain-device relationship */ -- cgit v1.2.3 From 6823ecabf03031d610a6c5afe7ed4b4fd659a99f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jul 2020 23:59:55 +0200 Subject: seccomp: Provide stub for __secure_computing() To avoid #ifdeffery in the upcoming generic syscall entry work code provide a stub for __secure_computing() as this is preferred over secure_computing() because the TIF flag is already evaluated. Signed-off-by: Thomas Gleixner Acked-by: Kees Cook Link: https://lkml.kernel.org/r/20200722220519.404974280@linutronix.de --- include/linux/seccomp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 4192369b8418..03d28c32ad01 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -61,6 +61,7 @@ struct seccomp_filter { }; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER static inline int secure_computing(void) { return 0; } +static inline int __secure_computing(void) { return 0; } #else static inline void secure_computing_strict(int this_syscall) { return; } #endif -- cgit v1.2.3 From 142781e108b13b2b0e8f035cfb5bfbbc8f14d887 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jul 2020 23:59:56 +0200 Subject: entry: Provide generic syscall entry functionality On syscall entry certain work needs to be done: - Establish state (lockdep, context tracking, tracing) - Conditional work (ptrace, seccomp, audit...) This code is needlessly duplicated and different in all architectures. Provide a generic version based on the x86 implementation which has all the RCU and instrumentation bits right. As interrupt/exception entry from user space needs parts of the same functionality, provide a function for this as well. syscall_enter_from_user_mode() and irqentry_enter_from_user_mode() must be called right after the low level ASM entry. The calling code must be non-instrumentable. After the functions returns state is correct and the subsequent functions can be instrumented. Signed-off-by: Thomas Gleixner Acked-by: Kees Cook Link: https://lkml.kernel.org/r/20200722220519.513463269@linutronix.de --- arch/Kconfig | 3 ++ include/linux/entry-common.h | 121 +++++++++++++++++++++++++++++++++++++++++++ kernel/Makefile | 1 + kernel/entry/Makefile | 12 +++++ kernel/entry/common.c | 88 +++++++++++++++++++++++++++++++ 5 files changed, 225 insertions(+) create mode 100644 include/linux/entry-common.h create mode 100644 kernel/entry/Makefile create mode 100644 kernel/entry/common.c (limited to 'include') diff --git a/arch/Kconfig b/arch/Kconfig index 8cc35dc556c7..852a527f418f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -27,6 +27,9 @@ config HAVE_IMA_KEXEC config HOTPLUG_SMT bool +config GENERIC_ENTRY + bool + config OPROFILE tristate "OProfile system profiling" depends on PROFILING diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h new file mode 100644 index 000000000000..42fc8e4632bb --- /dev/null +++ b/include/linux/entry-common.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ENTRYCOMMON_H +#define __LINUX_ENTRYCOMMON_H + +#include +#include +#include +#include + +#include + +/* + * Define dummy _TIF work flags if not defined by the architecture or for + * disabled functionality. + */ +#ifndef _TIF_SYSCALL_EMU +# define _TIF_SYSCALL_EMU (0) +#endif + +#ifndef _TIF_SYSCALL_TRACEPOINT +# define _TIF_SYSCALL_TRACEPOINT (0) +#endif + +#ifndef _TIF_SECCOMP +# define _TIF_SECCOMP (0) +#endif + +#ifndef _TIF_SYSCALL_AUDIT +# define _TIF_SYSCALL_AUDIT (0) +#endif + +/* + * TIF flags handled in syscall_enter_from_usermode() + */ +#ifndef ARCH_SYSCALL_ENTER_WORK +# define ARCH_SYSCALL_ENTER_WORK (0) +#endif + +#define SYSCALL_ENTER_WORK \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_EMU | \ + ARCH_SYSCALL_ENTER_WORK) + +/** + * arch_check_user_regs - Architecture specific sanity check for user mode regs + * @regs: Pointer to currents pt_regs + * + * Defaults to an empty implementation. Can be replaced by architecture + * specific code. + * + * Invoked from syscall_enter_from_user_mode() in the non-instrumentable + * section. Use __always_inline so the compiler cannot push it out of line + * and make it instrumentable. + */ +static __always_inline void arch_check_user_regs(struct pt_regs *regs); + +#ifndef arch_check_user_regs +static __always_inline void arch_check_user_regs(struct pt_regs *regs) {} +#endif + +/** + * arch_syscall_enter_tracehook - Wrapper around tracehook_report_syscall_entry() + * @regs: Pointer to currents pt_regs + * + * Returns: 0 on success or an error code to skip the syscall. + * + * Defaults to tracehook_report_syscall_entry(). Can be replaced by + * architecture specific code. + * + * Invoked from syscall_enter_from_user_mode() + */ +static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs); + +#ifndef arch_syscall_enter_tracehook +static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs) +{ + return tracehook_report_syscall_entry(regs); +} +#endif + +/** + * syscall_enter_from_user_mode - Check and handle work before invoking + * a syscall + * @regs: Pointer to currents pt_regs + * @syscall: The syscall number + * + * Invoked from architecture specific syscall entry code with interrupts + * disabled. The calling code has to be non-instrumentable. When the + * function returns all state is correct and the subsequent functions can be + * instrumented. + * + * Returns: The original or a modified syscall number + * + * If the returned syscall number is -1 then the syscall should be + * skipped. In this case the caller may invoke syscall_set_error() or + * syscall_set_return_value() first. If neither of those are called and -1 + * is returned, then the syscall will fail with ENOSYS. + * + * The following functionality is handled here: + * + * 1) Establish state (lockdep, RCU (context tracking), tracing) + * 2) TIF flag dependent invocations of arch_syscall_enter_tracehook(), + * __secure_computing(), trace_sys_enter() + * 3) Invocation of audit_syscall_entry() + */ +long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); + +/** + * irqentry_enter_from_user_mode - Establish state before invoking the irq handler + * @regs: Pointer to currents pt_regs + * + * Invoked from architecture specific entry code with interrupts disabled. + * Can only be called when the interrupt entry came from user mode. The + * calling code must be non-instrumentable. When the function returns all + * state is correct and the subsequent functions can be instrumented. + * + * The function establishes state (lockdep, RCU (context tracking), tracing) + */ +void irqentry_enter_from_user_mode(struct pt_regs *regs); + +#endif diff --git a/kernel/Makefile b/kernel/Makefile index f3218bc5ec69..fde2000d0d0d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -48,6 +48,7 @@ obj-y += irq/ obj-y += rcu/ obj-y += livepatch/ obj-y += dma/ +obj-y += entry/ obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o obj-$(CONFIG_FREEZER) += freezer.o diff --git a/kernel/entry/Makefile b/kernel/entry/Makefile new file mode 100644 index 000000000000..c207d202bf3a --- /dev/null +++ b/kernel/entry/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 + +# Prevent the noinstr section from being pestered by sanitizer and other goodies +# as long as these things cannot be disabled per function. +KASAN_SANITIZE := n +UBSAN_SANITIZE := n +KCOV_INSTRUMENT := n + +CFLAGS_REMOVE_common.o = -fstack-protector -fstack-protector-strong +CFLAGS_common.o += -fno-stack-protector + +obj-$(CONFIG_GENERIC_ENTRY) += common.o diff --git a/kernel/entry/common.c b/kernel/entry/common.c new file mode 100644 index 000000000000..1d636dee2fec --- /dev/null +++ b/kernel/entry/common.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#define CREATE_TRACE_POINTS +#include + +/** + * enter_from_user_mode - Establish state when coming from user mode + * + * Syscall/interrupt entry disables interrupts, but user mode is traced as + * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. + * + * 1) Tell lockdep that interrupts are disabled + * 2) Invoke context tracking if enabled to reactivate RCU + * 3) Trace interrupts off state + */ +static __always_inline void enter_from_user_mode(struct pt_regs *regs) +{ + arch_check_user_regs(regs); + lockdep_hardirqs_off(CALLER_ADDR0); + + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit_irqoff(); + + instrumentation_begin(); + trace_hardirqs_off_finish(); + instrumentation_end(); +} + +static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) +{ + if (unlikely(audit_context())) { + unsigned long args[6]; + + syscall_get_arguments(current, regs, args); + audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]); + } +} + +static long syscall_trace_enter(struct pt_regs *regs, long syscall, + unsigned long ti_work) +{ + long ret = 0; + + /* Handle ptrace */ + if (ti_work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) { + ret = arch_syscall_enter_tracehook(regs); + if (ret || (ti_work & _TIF_SYSCALL_EMU)) + return -1L; + } + + /* Do seccomp after ptrace, to catch any tracer changes. */ + if (ti_work & _TIF_SECCOMP) { + ret = __secure_computing(NULL); + if (ret == -1L) + return ret; + } + + if (unlikely(ti_work & _TIF_SYSCALL_TRACEPOINT)) + trace_sys_enter(regs, syscall); + + syscall_enter_audit(regs, syscall); + + return ret ? : syscall; +} + +noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) +{ + unsigned long ti_work; + + enter_from_user_mode(regs); + instrumentation_begin(); + + local_irq_enable(); + ti_work = READ_ONCE(current_thread_info()->flags); + if (ti_work & SYSCALL_ENTER_WORK) + syscall = syscall_trace_enter(regs, syscall, ti_work); + instrumentation_end(); + + return syscall; +} + +noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) +{ + enter_from_user_mode(regs); +} -- cgit v1.2.3 From a9f3a74a29af095f3e1b89e9176f8127912ae0f0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jul 2020 23:59:57 +0200 Subject: entry: Provide generic syscall exit function Like syscall entry all architectures have similar and pointlessly different code to handle pending work before returning from a syscall to user space. 1) One-time syscall exit work: - rseq syscall exit - audit - syscall tracing - tracehook (single stepping) 2) Preparatory work - Exit to user mode loop (common TIF handling). - Architecture specific one time work arch_exit_to_user_mode_prepare() - Address limit and lockdep checks 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes arch_exit_to_user_mode() to handle e.g. speculation mitigations Provide a generic version based on the x86 code which has all the RCU and instrumentation protections right. Provide a variant for interrupt return to user mode as well which shares the above #2 and #3 work items. After syscall_exit_to_user_mode() and irqentry_exit_to_user_mode() the architecture code just has to return to user space. The code after returning from these functions must not be instrumented. Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20200722220519.613977173@linutronix.de --- include/linux/entry-common.h | 189 +++++++++++++++++++++++++++++++++++++++++++ kernel/entry/common.c | 169 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 358 insertions(+) (limited to 'include') diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 42fc8e4632bb..c4a57be9cde4 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -29,6 +29,14 @@ # define _TIF_SYSCALL_AUDIT (0) #endif +#ifndef _TIF_PATCH_PENDING +# define _TIF_PATCH_PENDING (0) +#endif + +#ifndef _TIF_UPROBE +# define _TIF_UPROBE (0) +#endif + /* * TIF flags handled in syscall_enter_from_usermode() */ @@ -41,6 +49,29 @@ _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_EMU | \ ARCH_SYSCALL_ENTER_WORK) +/* + * TIF flags handled in syscall_exit_to_user_mode() + */ +#ifndef ARCH_SYSCALL_EXIT_WORK +# define ARCH_SYSCALL_EXIT_WORK (0) +#endif + +#define SYSCALL_EXIT_WORK \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK) + +/* + * TIF flags handled in exit_to_user_mode_loop() + */ +#ifndef ARCH_EXIT_TO_USER_MODE_WORK +# define ARCH_EXIT_TO_USER_MODE_WORK (0) +#endif + +#define EXIT_TO_USER_MODE_WORK \ + (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \ + ARCH_EXIT_TO_USER_MODE_WORK) + /** * arch_check_user_regs - Architecture specific sanity check for user mode regs * @regs: Pointer to currents pt_regs @@ -105,6 +136,149 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs */ long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); +/** + * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() + * @ti_work: Cached TIF flags gathered with interrupts disabled + * + * Defaults to local_irq_enable(). Can be supplied by architecture specific + * code. + */ +static inline void local_irq_enable_exit_to_user(unsigned long ti_work); + +#ifndef local_irq_enable_exit_to_user +static inline void local_irq_enable_exit_to_user(unsigned long ti_work) +{ + local_irq_enable(); +} +#endif + +/** + * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() + * + * Defaults to local_irq_disable(). Can be supplied by architecture specific + * code. + */ +static inline void local_irq_disable_exit_to_user(void); + +#ifndef local_irq_disable_exit_to_user +static inline void local_irq_disable_exit_to_user(void) +{ + local_irq_disable(); +} +#endif + +/** + * arch_exit_to_user_mode_work - Architecture specific TIF work for exit + * to user mode. + * @regs: Pointer to currents pt_regs + * @ti_work: Cached TIF flags gathered with interrupts disabled + * + * Invoked from exit_to_user_mode_loop() with interrupt enabled + * + * Defaults to NOOP. Can be supplied by architecture specific code. + */ +static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, + unsigned long ti_work); + +#ifndef arch_exit_to_user_mode_work +static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, + unsigned long ti_work) +{ +} +#endif + +/** + * arch_exit_to_user_mode_prepare - Architecture specific preparation for + * exit to user mode. + * @regs: Pointer to currents pt_regs + * @ti_work: Cached TIF flags gathered with interrupts disabled + * + * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last + * function before return. Defaults to NOOP. + */ +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work); + +#ifndef arch_exit_to_user_mode_prepare +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work) +{ +} +#endif + +/** + * arch_exit_to_user_mode - Architecture specific final work before + * exit to user mode. + * + * Invoked from exit_to_user_mode() with interrupt disabled as the last + * function before return. Defaults to NOOP. + * + * This needs to be __always_inline because it is non-instrumentable code + * invoked after context tracking switched to user mode. + * + * An architecture implementation must not do anything complex, no locking + * etc. The main purpose is for speculation mitigations. + */ +static __always_inline void arch_exit_to_user_mode(void); + +#ifndef arch_exit_to_user_mode +static __always_inline void arch_exit_to_user_mode(void) { } +#endif + +/** + * arch_do_signal - Architecture specific signal delivery function + * @regs: Pointer to currents pt_regs + * + * Invoked from exit_to_user_mode_loop(). + */ +void arch_do_signal(struct pt_regs *regs); + +/** + * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit() + * @regs: Pointer to currents pt_regs + * @step: Indicator for single step + * + * Defaults to tracehook_report_syscall_exit(). Can be replaced by + * architecture specific code. + * + * Invoked from syscall_exit_to_user_mode() + */ +static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step); + +#ifndef arch_syscall_exit_tracehook +static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step) +{ + tracehook_report_syscall_exit(regs, step); +} +#endif + +/** + * syscall_exit_to_user_mode - Handle work before returning to user mode + * @regs: Pointer to currents pt_regs + * + * Invoked with interrupts enabled and fully valid regs. Returns with all + * work handled, interrupts disabled such that the caller can immediately + * switch to user mode. Called from architecture specific syscall and ret + * from fork code. + * + * The call order is: + * 1) One-time syscall exit work: + * - rseq syscall exit + * - audit + * - syscall tracing + * - tracehook (single stepping) + * + * 2) Preparatory work + * - Exit to user mode loop (common TIF handling). Invokes + * arch_exit_to_user_mode_work() for architecture specific TIF work + * - Architecture specific one time work arch_exit_to_user_mode_prepare() + * - Address limit and lockdep checks + * + * 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes + * arch_exit_to_user_mode() to handle e.g. speculation mitigations + */ +void syscall_exit_to_user_mode(struct pt_regs *regs); + /** * irqentry_enter_from_user_mode - Establish state before invoking the irq handler * @regs: Pointer to currents pt_regs @@ -118,4 +292,19 @@ long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); */ void irqentry_enter_from_user_mode(struct pt_regs *regs); +/** + * irqentry_exit_to_user_mode - Interrupt exit work + * @regs: Pointer to current's pt_regs + * + * Invoked with interrupts disbled and fully valid regs. Returns with all + * work handled, interrupts disabled such that the caller can immediately + * switch to user mode. Called from architecture specific interrupt + * handling code. + * + * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). + * Interrupt exit is not invoking #1 which is the syscall specific one time + * work. + */ +void irqentry_exit_to_user_mode(struct pt_regs *regs); + #endif diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 1d636dee2fec..0a051bb91a56 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -2,6 +2,8 @@ #include #include +#include +#include #define CREATE_TRACE_POINTS #include @@ -82,7 +84,174 @@ noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) return syscall; } +/** + * exit_to_user_mode - Fixup state when exiting to user mode + * + * Syscall/interupt exit enables interrupts, but the kernel state is + * interrupts disabled when this is invoked. Also tell RCU about it. + * + * 1) Trace interrupts on state + * 2) Invoke context tracking if enabled to adjust RCU state + * 3) Invoke architecture specific last minute exit code, e.g. speculation + * mitigations, etc. + * 4) Tell lockdep that interrupts are enabled + */ +static __always_inline void exit_to_user_mode(void) +{ + instrumentation_begin(); + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + instrumentation_end(); + + user_enter_irqoff(); + arch_exit_to_user_mode(); + lockdep_hardirqs_on(CALLER_ADDR0); +} + +/* Workaround to allow gradual conversion of architecture code */ +void __weak arch_do_signal(struct pt_regs *regs) { } + +static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, + unsigned long ti_work) +{ + /* + * Before returning to user space ensure that all pending work + * items have been completed. + */ + while (ti_work & EXIT_TO_USER_MODE_WORK) { + + local_irq_enable_exit_to_user(ti_work); + + if (ti_work & _TIF_NEED_RESCHED) + schedule(); + + if (ti_work & _TIF_UPROBE) + uprobe_notify_resume(regs); + + if (ti_work & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + + if (ti_work & _TIF_SIGPENDING) + arch_do_signal(regs); + + if (ti_work & _TIF_NOTIFY_RESUME) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); + } + + /* Architecture specific TIF work */ + arch_exit_to_user_mode_work(regs, ti_work); + + /* + * Disable interrupts and reevaluate the work flags as they + * might have changed while interrupts and preemption was + * enabled above. + */ + local_irq_disable_exit_to_user(); + ti_work = READ_ONCE(current_thread_info()->flags); + } + + /* Return the latest work state for arch_exit_to_user_mode() */ + return ti_work; +} + +static void exit_to_user_mode_prepare(struct pt_regs *regs) +{ + unsigned long ti_work = READ_ONCE(current_thread_info()->flags); + + lockdep_assert_irqs_disabled(); + + if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) + ti_work = exit_to_user_mode_loop(regs, ti_work); + + arch_exit_to_user_mode_prepare(regs, ti_work); + + /* Ensure that the address limit is intact and no locks are held */ + addr_limit_user_check(); + lockdep_assert_irqs_disabled(); + lockdep_sys_exit(); +} + +#ifndef _TIF_SINGLESTEP +static inline bool report_single_step(unsigned long ti_work) +{ + return false; +} +#else +/* + * If TIF_SYSCALL_EMU is set, then the only reason to report is when + * TIF_SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall + * instruction has been already reported in syscall_enter_from_usermode(). + */ +#define SYSEMU_STEP (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU) + +static inline bool report_single_step(unsigned long ti_work) +{ + return (ti_work & SYSEMU_STEP) == _TIF_SINGLESTEP; +} +#endif + +static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work) +{ + bool step; + + audit_syscall_exit(regs); + + if (ti_work & _TIF_SYSCALL_TRACEPOINT) + trace_sys_exit(regs, syscall_get_return_value(current, regs)); + + step = report_single_step(ti_work); + if (step || ti_work & _TIF_SYSCALL_TRACE) + arch_syscall_exit_tracehook(regs, step); +} + +/* + * Syscall specific exit to user mode preparation. Runs with interrupts + * enabled. + */ +static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs) +{ + u32 cached_flags = READ_ONCE(current_thread_info()->flags); + unsigned long nr = syscall_get_nr(current, regs); + + CT_WARN_ON(ct_state() != CONTEXT_KERNEL); + + if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { + if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr)) + local_irq_enable(); + } + + rseq_syscall(regs); + + /* + * Do one-time syscall specific work. If these work items are + * enabled, we want to run them exactly once per syscall exit with + * interrupts enabled. + */ + if (unlikely(cached_flags & SYSCALL_EXIT_WORK)) + syscall_exit_work(regs, cached_flags); +} + +__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs) +{ + instrumentation_begin(); + syscall_exit_to_user_mode_prepare(regs); + local_irq_disable_exit_to_user(); + exit_to_user_mode_prepare(regs); + instrumentation_end(); + exit_to_user_mode(); +} + noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) { enter_from_user_mode(regs); } + +noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs) +{ + instrumentation_begin(); + exit_to_user_mode_prepare(regs); + instrumentation_end(); + exit_to_user_mode(); +} -- cgit v1.2.3 From a5497bab5f72dce38a259a53fd3ac1239a7ecf40 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jul 2020 23:59:58 +0200 Subject: entry: Provide generic interrupt entry/exit code Like the syscall entry/exit code interrupt/exception entry after the real low level ASM bits should not be different accross architectures. Provide a generic version based on the x86 code. irqentry_enter() is called after the low level entry code and irqentry_exit() must be invoked right before returning to the low level code which just contains the actual return logic. The code before irqentry_enter() and irqentry_exit() must not be instrumented. Code after irqentry_enter() and before irqentry_exit() can be instrumented. irqentry_enter() invokes irqentry_enter_from_user_mode() if the interrupt/exception came from user mode. If if entered from kernel mode it handles the kernel mode variant of establishing state for lockdep, RCU and tracing depending on the kernel context it interrupted (idle, non-idle). Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20200722220519.723703209@linutronix.de --- include/linux/entry-common.h | 62 +++++++++++++++++++++++ kernel/entry/common.c | 117 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 179 insertions(+) (limited to 'include') diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index c4a57be9cde4..efebbffcd5cc 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -307,4 +307,66 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs); */ void irqentry_exit_to_user_mode(struct pt_regs *regs); +#ifndef irqentry_state +typedef struct irqentry_state { + bool exit_rcu; +} irqentry_state_t; +#endif + +/** + * irqentry_enter - Handle state tracking on ordinary interrupt entries + * @regs: Pointer to pt_regs of interrupted context + * + * Invokes: + * - lockdep irqflag state tracking as low level ASM entry disabled + * interrupts. + * + * - Context tracking if the exception hit user mode. + * + * - The hardirq tracer to keep the state consistent as low level ASM + * entry disabled interrupts. + * + * As a precondition, this requires that the entry came from user mode, + * idle, or a kernel context in which RCU is watching. + * + * For kernel mode entries RCU handling is done conditional. If RCU is + * watching then the only RCU requirement is to check whether the tick has + * to be restarted. If RCU is not watching then rcu_irq_enter() has to be + * invoked on entry and rcu_irq_exit() on exit. + * + * Avoiding the rcu_irq_enter/exit() calls is an optimization but also + * solves the problem of kernel mode pagefaults which can schedule, which + * is not possible after invoking rcu_irq_enter() without undoing it. + * + * For user mode entries irqentry_enter_from_user_mode() is invoked to + * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit + * would not be possible. + * + * Returns: An opaque object that must be passed to idtentry_exit() + */ +irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); + +/** + * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt + * + * Conditional reschedule with additional sanity checks. + */ +void irqentry_exit_cond_resched(void); + +/** + * irqentry_exit - Handle return from exception that used irqentry_enter() + * @regs: Pointer to pt_regs (exception entry regs) + * @state: Return value from matching call to irqentry_enter() + * + * Depending on the return target (kernel/user) this runs the necessary + * preemption and work checks if possible and reguired and returns to + * the caller with interrupts disabled and no further work pending. + * + * This is the last action before returning to the low level ASM code which + * just needs to return to the appropriate context. + * + * Counterpart to irqentry_enter(). + */ +void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); + #endif diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 0a051bb91a56..495f5c051b03 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -255,3 +255,120 @@ noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs) instrumentation_end(); exit_to_user_mode(); } + +irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs) +{ + irqentry_state_t ret = { + .exit_rcu = false, + }; + + if (user_mode(regs)) { + irqentry_enter_from_user_mode(regs); + return ret; + } + + /* + * If this entry hit the idle task invoke rcu_irq_enter() whether + * RCU is watching or not. + * + * Interupts can nest when the first interrupt invokes softirq + * processing on return which enables interrupts. + * + * Scheduler ticks in the idle task can mark quiescent state and + * terminate a grace period, if and only if the timer interrupt is + * not nested into another interrupt. + * + * Checking for __rcu_is_watching() here would prevent the nesting + * interrupt to invoke rcu_irq_enter(). If that nested interrupt is + * the tick then rcu_flavor_sched_clock_irq() would wrongfully + * assume that it is the first interupt and eventually claim + * quiescient state and end grace periods prematurely. + * + * Unconditionally invoke rcu_irq_enter() so RCU state stays + * consistent. + * + * TINY_RCU does not support EQS, so let the compiler eliminate + * this part when enabled. + */ + if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { + /* + * If RCU is not watching then the same careful + * sequence vs. lockdep and tracing is required + * as in irq_enter_from_user_mode(). + */ + lockdep_hardirqs_off(CALLER_ADDR0); + rcu_irq_enter(); + instrumentation_begin(); + trace_hardirqs_off_finish(); + instrumentation_end(); + + ret.exit_rcu = true; + return ret; + } + + /* + * If RCU is watching then RCU only wants to check whether it needs + * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick() + * already contains a warning when RCU is not watching, so no point + * in having another one here. + */ + instrumentation_begin(); + rcu_irq_enter_check_tick(); + /* Use the combo lockdep/tracing function */ + trace_hardirqs_off(); + instrumentation_end(); + + return ret; +} + +void irqentry_exit_cond_resched(void) +{ + if (!preempt_count()) { + /* Sanity check RCU and thread stack */ + rcu_irq_exit_check_preempt(); + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) + WARN_ON_ONCE(!on_thread_stack()); + if (need_resched()) + preempt_schedule_irq(); + } +} + +void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state) +{ + lockdep_assert_irqs_disabled(); + + /* Check whether this returns to user mode */ + if (user_mode(regs)) { + irqentry_exit_to_user_mode(regs); + } else if (!regs_irqs_disabled(regs)) { + /* + * If RCU was not watching on entry this needs to be done + * carefully and needs the same ordering of lockdep/tracing + * and RCU as the return to user mode path. + */ + if (state.exit_rcu) { + instrumentation_begin(); + /* Tell the tracer that IRET will enable interrupts */ + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + instrumentation_end(); + rcu_irq_exit(); + lockdep_hardirqs_on(CALLER_ADDR0); + return; + } + + instrumentation_begin(); + if (IS_ENABLED(CONFIG_PREEMPTION)) + irqentry_exit_cond_resched(); + /* Covers both tracing and lockdep */ + trace_hardirqs_on(); + instrumentation_end(); + } else { + /* + * IRQ flags state is correct already. Just tell RCU if it + * was not watching on entry. + */ + if (state.exit_rcu) + rcu_irq_exit(); + } +} -- cgit v1.2.3 From 935ace2fb5cc49ae88bd1f1735ddc51cdc2ebfb3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 22 Jul 2020 23:59:59 +0200 Subject: entry: Provide infrastructure for work before transitioning to guest mode Entering a guest is similar to exiting to user space. Pending work like handling signals, rescheduling, task work etc. needs to be handled before that. Provide generic infrastructure to avoid duplication of the same handling code all over the place. The transfer to guest mode handling is different from the exit to usermode handling, e.g. vs. rseq and live patching, so a separate function is used. The initial list of work items handled is: TIF_SIGPENDING, TIF_NEED_RESCHED, TIF_NOTIFY_RESUME Architecture specific TIF flags can be added via defines in the architecture specific include files. The calling convention is also different from the syscall/interrupt entry functions as KVM invokes this from the outer vcpu_run() loop with interrupts and preemption enabled. To prevent missing a pending work item it invokes a check for pending TIF work from interrupt disabled code right before transitioning to guest mode. The lockdep, RCU and tracing state handling is also done directly around the switch to and from guest mode. Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20200722220519.833296398@linutronix.de --- include/linux/entry-kvm.h | 80 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/kvm_host.h | 8 +++++ kernel/entry/Makefile | 3 +- kernel/entry/kvm.c | 51 ++++++++++++++++++++++++++++++ virt/kvm/Kconfig | 3 ++ 5 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 include/linux/entry-kvm.h create mode 100644 kernel/entry/kvm.c (limited to 'include') diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h new file mode 100644 index 000000000000..0cef17afb41a --- /dev/null +++ b/include/linux/entry-kvm.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ENTRYKVM_H +#define __LINUX_ENTRYKVM_H + +#include + +/* Transfer to guest mode work */ +#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK + +#ifndef ARCH_XFER_TO_GUEST_MODE_WORK +# define ARCH_XFER_TO_GUEST_MODE_WORK (0) +#endif + +#define XFER_TO_GUEST_MODE_WORK \ + (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK) + +struct kvm_vcpu; + +/** + * arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest + * mode work handling function. + * @vcpu: Pointer to current's VCPU data + * @ti_work: Cached TIF flags gathered in xfer_to_guest_mode_handle_work() + * + * Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be + * replaced by architecture specific code. + */ +static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu, + unsigned long ti_work); + +#ifndef arch_xfer_to_guest_mode_work +static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu, + unsigned long ti_work) +{ + return 0; +} +#endif + +/** + * xfer_to_guest_mode_handle_work - Check and handle pending work which needs + * to be handled before going to guest mode + * @vcpu: Pointer to current's VCPU data + * + * Returns: 0 or an error code + */ +int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu); + +/** + * __xfer_to_guest_mode_work_pending - Check if work is pending + * + * Returns: True if work pending, False otherwise. + * + * Bare variant of xfer_to_guest_mode_work_pending(). Can be called from + * interrupt enabled code for racy quick checks with care. + */ +static inline bool __xfer_to_guest_mode_work_pending(void) +{ + unsigned long ti_work = READ_ONCE(current_thread_info()->flags); + + return !!(ti_work & XFER_TO_GUEST_MODE_WORK); +} + +/** + * xfer_to_guest_mode_work_pending - Check if work is pending which needs to be + * handled before returning to guest mode + * + * Returns: True if work pending, False otherwise. + * + * Has to be invoked with interrupts disabled before the transition to + * guest mode. + */ +static inline bool xfer_to_guest_mode_work_pending(void) +{ + lockdep_assert_irqs_disabled(); + return __xfer_to_guest_mode_work_pending(); +} +#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ + +#endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d564855243d8..ac83e9c1d82c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1439,4 +1439,12 @@ int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, uintptr_t data, const char *name, struct task_struct **thread_ptr); +#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK +static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) +{ + vcpu->run->exit_reason = KVM_EXIT_INTR; + vcpu->stat.signal_exits++; +} +#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ + #endif diff --git a/kernel/entry/Makefile b/kernel/entry/Makefile index c207d202bf3a..34c8a3f1c735 100644 --- a/kernel/entry/Makefile +++ b/kernel/entry/Makefile @@ -9,4 +9,5 @@ KCOV_INSTRUMENT := n CFLAGS_REMOVE_common.o = -fstack-protector -fstack-protector-strong CFLAGS_common.o += -fno-stack-protector -obj-$(CONFIG_GENERIC_ENTRY) += common.o +obj-$(CONFIG_GENERIC_ENTRY) += common.o +obj-$(CONFIG_KVM_XFER_TO_GUEST_WORK) += kvm.o diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c new file mode 100644 index 000000000000..eb1a8a4c867c --- /dev/null +++ b/kernel/entry/kvm.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) +{ + do { + int ret; + + if (ti_work & _TIF_SIGPENDING) { + kvm_handle_signal_exit(vcpu); + return -EINTR; + } + + if (ti_work & _TIF_NEED_RESCHED) + schedule(); + + if (ti_work & _TIF_NOTIFY_RESUME) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(NULL); + } + + ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work); + if (ret) + return ret; + + ti_work = READ_ONCE(current_thread_info()->flags); + } while (ti_work & XFER_TO_GUEST_MODE_WORK || need_resched()); + return 0; +} + +int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu) +{ + unsigned long ti_work; + + /* + * This is invoked from the outer guest loop with interrupts and + * preemption enabled. + * + * KVM invokes xfer_to_guest_mode_work_pending() with interrupts + * disabled in the inner loop before going into guest mode. No need + * to disable interrupts here. + */ + ti_work = READ_ONCE(current_thread_info()->flags); + if (!(ti_work & XFER_TO_GUEST_MODE_WORK)) + return 0; + + return xfer_to_guest_mode_work(vcpu, ti_work); +} +EXPORT_SYMBOL_GPL(xfer_to_guest_mode_handle_work); diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index aad9284c043a..1c37ccd5d402 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -60,3 +60,6 @@ config HAVE_KVM_VCPU_RUN_PID_CHANGE config HAVE_KVM_NO_POLL bool + +config KVM_XFER_TO_GUEST_WORK + bool -- cgit v1.2.3 From 3f9a7a13fe4cb6e119e4e4745fbf975d30bfac9b Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Thu, 23 Jul 2020 15:37:29 -0700 Subject: PCI/ATS: Add pci_pri_supported() to check device or associated PF For SR-IOV, the PF PRI is shared between the PF and any associated VFs, and the PRI Capability is allowed for PFs but not for VFs. Searching for the PRI Capability on a VF always fails, even if its associated PF supports PRI. Add pci_pri_supported() to check whether device or its associated PF supports PRI. [bhelgaas: commit log, avoid "!!"] Fixes: b16d0cb9e2fc ("iommu/vt-d: Always enable PASID/PRI PCI capabilities before ATS") Link: https://lore.kernel.org/r/1595543849-19692-1-git-send-email-ashok.raj@intel.com Signed-off-by: Ashok Raj Signed-off-by: Bjorn Helgaas Reviewed-by: Lu Baolu Acked-by: Joerg Roedel Cc: stable@vger.kernel.org # v4.4+ --- drivers/iommu/intel/iommu.c | 2 +- drivers/pci/ats.c | 15 +++++++++++++++ include/linux/pci-ats.h | 4 ++++ 3 files changed, 20 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 9129663a7406..5552e7d5d2b1 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2554,7 +2554,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, } if (info->ats_supported && ecap_prs(iommu->ecap) && - pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI)) + pci_pri_supported(pdev)) info->pri_supported = 1; } } diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index b761c1f72f67..647e097530a8 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -325,6 +325,21 @@ int pci_prg_resp_pasid_required(struct pci_dev *pdev) return pdev->pasid_required; } + +/** + * pci_pri_supported - Check if PRI is supported. + * @pdev: PCI device structure + * + * Returns true if PRI capability is present, false otherwise. + */ +bool pci_pri_supported(struct pci_dev *pdev) +{ + /* VFs share the PF PRI */ + if (pci_physfn(pdev)->pri_cap) + return true; + return false; +} +EXPORT_SYMBOL_GPL(pci_pri_supported); #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index f75c307f346d..df54cd5b15db 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h @@ -28,6 +28,10 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs); void pci_disable_pri(struct pci_dev *pdev); int pci_reset_pri(struct pci_dev *pdev); int pci_prg_resp_pasid_required(struct pci_dev *pdev); +bool pci_pri_supported(struct pci_dev *pdev); +#else +static inline bool pci_pri_supported(struct pci_dev *pdev) +{ return false; } #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID -- cgit v1.2.3 From e5ebffe18e5add85acb23c7a0f74509749967204 Mon Sep 17 00:00:00 2001 From: Jim Cromie Date: Sun, 19 Jul 2020 17:10:45 -0600 Subject: dyndbg: rename __verbose section to __dyndbg dyndbg populates its callsite info into __verbose section, change that to a more specific and descriptive name, __dyndbg. Also, per checkpatch: simplify __attribute(..) to __section(__dyndbg) declaration. and 1 spelling fix, decriptor Acked-by: Signed-off-by: Jim Cromie Link: https://lore.kernel.org/r/20200719231058.1586423-6-jim.cromie@gmail.com Signed-off-by: Greg Kroah-Hartman --- include/asm-generic/vmlinux.lds.h | 6 +++--- include/linux/dynamic_debug.h | 4 ++-- kernel/module.c | 2 +- lib/dynamic_debug.c | 12 ++++++------ 4 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index db600ef218d7..05af5cef1ad6 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -320,9 +320,9 @@ *(__tracepoints) \ /* implement dynamic printk debug */ \ . = ALIGN(8); \ - __start___verbose = .; \ - KEEP(*(__verbose)) \ - __stop___verbose = .; \ + __start___dyndbg = .; \ + KEEP(*(__dyndbg)) \ + __stop___dyndbg = .; \ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index abcd5fde30eb..aa9ff9e1c0b3 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -80,7 +80,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ static struct _ddebug __aligned(8) \ - __attribute__((section("__verbose"))) name = { \ + __section(__dyndbg) name = { \ .modname = KBUILD_MODNAME, \ .function = __func__, \ .filename = __FILE__, \ @@ -133,7 +133,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, /* * "Factory macro" for generating a call to func, guarded by a - * DYNAMIC_DEBUG_BRANCH. The dynamic debug decriptor will be + * DYNAMIC_DEBUG_BRANCH. The dynamic debug descriptor will be * initialized using the fmt argument. The function will be called with * the address of the descriptor as first argument, followed by all * the varargs. Note that fmt is repeated in invocations of this diff --git a/kernel/module.c b/kernel/module.c index aa183c9ac0a2..e7b4ff7e4fd0 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3237,7 +3237,7 @@ static int find_module_sections(struct module *mod, struct load_info *info) if (section_addr(info, "__obsparm")) pr_warn("%s: Ignoring obsolete parameters\n", mod->name); - info->debug = section_objs(info, "__verbose", + info->debug = section_objs(info, "__dyndbg", sizeof(*info->debug), &info->num_debug); return 0; diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c97872cffc8e..66c0bdf06ce7 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -39,8 +39,8 @@ #include -extern struct _ddebug __start___verbose[]; -extern struct _ddebug __stop___verbose[]; +extern struct _ddebug __start___dyndbg[]; +extern struct _ddebug __stop___dyndbg[]; struct ddebug_table { struct list_head link; @@ -1019,7 +1019,7 @@ static int __init dynamic_debug_init(void) int n = 0, entries = 0, modct = 0; int verbose_bytes = 0; - if (&__start___verbose == &__stop___verbose) { + if (&__start___dyndbg == &__stop___dyndbg) { if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) { pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n"); return 1; @@ -1028,10 +1028,10 @@ static int __init dynamic_debug_init(void) ddebug_init_success = 1; return 0; } - iter = __start___verbose; + iter = __start___dyndbg; modname = iter->modname; iter_start = iter; - for (; iter < __stop___verbose; iter++) { + for (; iter < __stop___dyndbg; iter++) { entries++; verbose_bytes += strlen(iter->modname) + strlen(iter->function) + strlen(iter->filename) + strlen(iter->format); @@ -1054,7 +1054,7 @@ static int __init dynamic_debug_init(void) ddebug_init_success = 1; vpr_info("%d modules, %d entries and %d bytes in ddebug tables, %d bytes in (readonly) verbose section\n", modct, entries, (int)(modct * sizeof(struct ddebug_table)), - verbose_bytes + (int)(__stop___verbose - __start___verbose)); + verbose_bytes + (int)(__stop___dyndbg - __start___dyndbg)); /* apply ddebug_query boot param, dont unload tables on err */ if (ddebug_setup_string[0] != '\0') { -- cgit v1.2.3 From 3bf1c021e36e7269c71dceafec713b0181f413a9 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 9 Jun 2020 16:14:55 +0300 Subject: uapi/habanalabs: fix some comments MAP/UNMAP are done also for device memory. Reviewed-by: Omer Shpigelman Signed-off-by: Oded Gabbay --- include/uapi/misc/habanalabs.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index f6267a8d7416..f218d1c62c62 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -530,13 +530,13 @@ union hl_wait_cs_args { struct hl_wait_cs_out out; }; -/* Opcode to alloc device memory */ +/* Opcode to allocate device memory */ #define HL_MEM_OP_ALLOC 0 /* Opcode to free previously allocated device memory */ #define HL_MEM_OP_FREE 1 -/* Opcode to map host memory */ +/* Opcode to map host and device memory */ #define HL_MEM_OP_MAP 2 -/* Opcode to unmap previously mapped host memory */ +/* Opcode to unmap previously mapped host and device memory */ #define HL_MEM_OP_UNMAP 3 /* Memory flags */ -- cgit v1.2.3 From db491e4f08a9fd84ebb1ebd22a6b0b988a81a0d8 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 18 Jun 2020 09:51:16 +0300 Subject: habanalabs: Add dropped cs statistics info struct Add command submission statistics structure which can be obtained through the info ioctl. Each drop counter describes the reason for which the command submission was dropped. This information is needed for the user to be aware of the specific reason for which the submitted work was dropped. The user can then utilize the driver more efficiently. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/command_submission.c | 24 +++++++++++++++++++++++- drivers/misc/habanalabs/habanalabs.h | 5 +++++ drivers/misc/habanalabs/habanalabs_ioctl.c | 24 ++++++++++++++++++++++++ drivers/misc/habanalabs/hw_queue.c | 5 ++++- include/uapi/misc/habanalabs.h | 21 +++++++++++++++++++++ 5 files changed, 77 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index f81d6685e011..777f88d25acd 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -246,6 +246,18 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) kfree(job); } +static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx) +{ + hdev->aggregated_cs_counters.device_in_reset_drop_cnt += + ctx->cs_counters.device_in_reset_drop_cnt; + hdev->aggregated_cs_counters.out_of_mem_drop_cnt += + ctx->cs_counters.out_of_mem_drop_cnt; + hdev->aggregated_cs_counters.parsing_drop_cnt += + ctx->cs_counters.parsing_drop_cnt; + hdev->aggregated_cs_counters.queue_full_drop_cnt += + ctx->cs_counters.queue_full_drop_cnt; +} + static void cs_do_release(struct kref *ref) { struct hl_cs *cs = container_of(ref, struct hl_cs, @@ -349,6 +361,8 @@ static void cs_do_release(struct kref *ref) dma_fence_signal(cs->fence); dma_fence_put(cs->fence); + cs_counters_aggregate(hdev, cs->ctx); + kfree(cs); } @@ -632,12 +646,15 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, rc = validate_queue_index(hdev, chunk, &queue_type, &is_kernel_allocated_cb); - if (rc) + if (rc) { + hpriv->ctx->cs_counters.parsing_drop_cnt++; goto free_cs_object; + } if (is_kernel_allocated_cb) { cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk); if (!cb) { + hpriv->ctx->cs_counters.parsing_drop_cnt++; rc = -EINVAL; goto free_cs_object; } @@ -651,6 +668,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, job = hl_cs_allocate_job(hdev, queue_type, is_kernel_allocated_cb); if (!job) { + hpriv->ctx->cs_counters.out_of_mem_drop_cnt++; dev_err(hdev->dev, "Failed to allocate a new job\n"); rc = -ENOMEM; if (is_kernel_allocated_cb) @@ -683,6 +701,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, rc = cs_parser(hpriv, job); if (rc) { + hpriv->ctx->cs_counters.parsing_drop_cnt++; dev_err(hdev->dev, "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", cs->ctx->asid, cs->sequence, job->id, rc); @@ -691,6 +710,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, } if (int_queues_only) { + hpriv->ctx->cs_counters.parsing_drop_cnt++; dev_err(hdev->dev, "Reject CS %d.%llu because only internal queues jobs are present\n", cs->ctx->asid, cs->sequence); @@ -875,6 +895,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, job = hl_cs_allocate_job(hdev, q_type, true); if (!job) { + ctx->cs_counters.out_of_mem_drop_cnt++; dev_err(hdev->dev, "Failed to allocate a new job\n"); rc = -ENOMEM; goto put_cs; @@ -882,6 +903,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, cb = hl_cb_kernel_create(hdev, PAGE_SIZE); if (!cb) { + ctx->cs_counters.out_of_mem_drop_cnt++; kfree(job); rc = -EFAULT; goto put_cs; diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index e4d6f7c91194..ae781453a509 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -10,6 +10,7 @@ #include "include/armcp_if.h" #include "include/qman_if.h" +#include #include #include @@ -787,6 +788,7 @@ struct hl_ctx { struct mutex mem_hash_lock; struct mutex mmu_lock; struct list_head debugfs_list; + struct hl_cs_counters cs_counters; u64 cs_sequence; u64 *dram_default_hops; spinlock_t cs_lock; @@ -1391,6 +1393,7 @@ struct hl_device_idle_busy_ts { * @compute_ctx: current compute context executing. * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy * and vice-versa + * @aggregated_cs_counters: aggregated cs counters among all contexts * @dram_used_mem: current DRAM memory consumption. * @timeout_jiffies: device CS timeout value. * @max_power: the max power of the device, as configured by the sysadmin. This @@ -1489,6 +1492,8 @@ struct hl_device { struct hl_device_idle_busy_ts *idle_busy_ts_arr; + struct hl_cs_counters aggregated_cs_counters; + atomic64_t dram_used_mem; u64 timeout_jiffies; u64 max_power; diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c index 52eedd3a6c3a..5af1c03da473 100644 --- a/drivers/misc/habanalabs/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/habanalabs_ioctl.c @@ -276,6 +276,27 @@ static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args) min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0; } +static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_info_cs_counters cs_counters = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters, + sizeof(struct hl_cs_counters)); + + if (hpriv->ctx) + memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters, + sizeof(struct hl_cs_counters)); + + return copy_to_user(out, &cs_counters, + min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0; +} + static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { @@ -336,6 +357,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_TIME_SYNC: return time_sync_info(hdev, args); + case HL_INFO_CS_COUNTERS: + return cs_counters_info(hpriv, args); + default: dev_err(dev, "Invalid request %d\n", args->op); rc = -ENOTTY; diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c index f5a10a5ac300..da66ffb528f8 100644 --- a/drivers/misc/habanalabs/hw_queue.c +++ b/drivers/misc/habanalabs/hw_queue.c @@ -514,6 +514,7 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) hdev->asic_funcs->hw_queues_lock(hdev); if (hl_device_disabled_or_in_reset(hdev)) { + ctx->cs_counters.device_in_reset_drop_cnt++; dev_err(hdev->dev, "device is disabled or in reset, CS rejected!\n"); rc = -EPERM; @@ -543,8 +544,10 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) break; } - if (rc) + if (rc) { + ctx->cs_counters.queue_full_drop_cnt++; goto unroll_cq_resv; + } if (q->queue_type == QUEUE_TYPE_EXT || q->queue_type == QUEUE_TYPE_HW) diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index f218d1c62c62..d5c4f983b7a8 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -263,6 +263,7 @@ enum hl_device_status { * time the driver was loaded. * HL_INFO_TIME_SYNC - Retrieve the device's time alongside the host's time * for synchronization. + * HL_INFO_CS_COUNTERS - Retrieve command submission counters */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -274,6 +275,7 @@ enum hl_device_status { #define HL_INFO_CLK_RATE 8 #define HL_INFO_RESET_COUNT 9 #define HL_INFO_TIME_SYNC 10 +#define HL_INFO_CS_COUNTERS 11 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -338,6 +340,25 @@ struct hl_info_time_sync { __u64 host_time; }; +/** + * struct hl_info_cs_counters - command submission counters + * @out_of_mem_drop_cnt: dropped due to memory allocation issue + * @parsing_drop_cnt: dropped due to error in packet parsing + * @queue_full_drop_cnt: dropped due to queue full + * @device_in_reset_drop_cnt: dropped due to device in reset + */ +struct hl_cs_counters { + __u64 out_of_mem_drop_cnt; + __u64 parsing_drop_cnt; + __u64 queue_full_drop_cnt; + __u64 device_in_reset_drop_cnt; +}; + +struct hl_info_cs_counters { + struct hl_cs_counters cs_counters; + struct hl_cs_counters ctx_cs_counters; +}; + struct hl_info_args { /* Location of relevant struct in userspace */ __u64 return_pointer; -- cgit v1.2.3 From 0f63ef1dd581de1655075e2dbdf2ef302514a983 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 19 Jul 2020 08:22:22 +0300 Subject: RDMA/core: Align abort/commit object scheme for write() and ioctl() paths Create the same logic flow for the write() interface as we have for the ioctl() path by making sure that the object is committed or aborted automatically after HW object creation. Link: https://lore.kernel.org/r/20200719052223.75245-2-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_main.c | 4 ++++ drivers/infiniband/core/uverbs_std_types_device.c | 7 ++++++- include/rdma/uverbs_ioctl.h | 1 + include/rdma/uverbs_std_types.h | 14 ++++++++++++++ 4 files changed, 25 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 69e4755cc04b..37794d88b1f3 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -601,6 +601,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); bundle.ufile = file; bundle.context = NULL; /* only valid if bundle has uobject */ + bundle.uobject = NULL; if (!method_elm->is_ex) { size_t in_len = hdr.in_words * 4 - sizeof(hdr); size_t out_len = hdr.out_words * 4; @@ -664,6 +665,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, } ret = method_elm->handler(&bundle); + if (bundle.uobject) + uverbs_finalize_object(bundle.uobject, UVERBS_ACCESS_NEW, true, + !ret, &bundle); out_unlock: srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); return (ret) ? : count; diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c index 8e58605a17be..75df2094a010 100644 --- a/drivers/infiniband/core/uverbs_std_types_device.c +++ b/drivers/infiniband/core/uverbs_std_types_device.c @@ -38,7 +38,12 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INVOKE_WRITE)( attrs->ucore.outlen < method_elm->resp_size) return -ENOSPC; - return method_elm->handler(attrs); + attrs->uobject = NULL; + rc = method_elm->handler(attrs); + if (attrs->uobject) + uverbs_finalize_object(attrs->uobject, UVERBS_ACCESS_NEW, true, + !rc, attrs); + return rc; } DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_INVOKE_WRITE, diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 86de10ea30af..db419c8dbd10 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -652,6 +652,7 @@ struct uverbs_attr_bundle { struct ib_udata ucore; struct ib_uverbs_file *ufile; struct ib_ucontext *context; + struct ib_uobject *uobject; DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN); struct uverbs_attr attrs[]; }; diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index bf0392ae15eb..8451b19103ee 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -110,6 +110,20 @@ static inline void uobj_alloc_abort(struct ib_uobject *uobj, rdma_alloc_abort_uobject(uobj, attrs, false); } +static inline void uobj_finalize_uobj_create(struct ib_uobject *uobj, + struct uverbs_attr_bundle *attrs) +{ + /* + * Tell the core code that the write() handler has completed + * initializing the object and that the core should commit or + * abort this object based upon the return code from the write() + * method. Similar to what uverbs_finalize_uobj_create() does for + * ioctl() + */ + WARN_ON(attrs->uobject); + attrs->uobject = uobj; +} + static inline struct ib_uobject * __uobj_alloc(const struct uverbs_api_object *obj, struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev) -- cgit v1.2.3 From dd6f843a9fca8f225c86fee5f50da429c369c045 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 18 Jul 2020 11:32:51 +0300 Subject: tasks: add put_task_struct_many() put_task_struct_many() is as put_task_struct() but puts several references at once. Useful to batching it. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- include/linux/sched/task.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 38359071236a..1301077f9c24 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -126,6 +126,12 @@ static inline void put_task_struct(struct task_struct *t) __put_task_struct(t); } +static inline void put_task_struct_many(struct task_struct *t, int nr) +{ + if (refcount_sub_and_test(nr, &t->usage)) + __put_task_struct(t); +} + void put_task_struct_rcu_user(struct task_struct *task); #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT -- cgit v1.2.3 From 042dd05bddbd84e6a52b337a65d1994003c9b9bb Mon Sep 17 00:00:00 2001 From: Meir Lichtinger Date: Thu, 4 Jun 2020 08:49:38 +0300 Subject: RDMA/mlx5: ConnectX-7 new capabilities to set relaxed ordering by UMR Up to ConnectX-7 setting mkey relaxed ordering read/write attributes by UMR is not supported. ConnectX-7 supports this option, which is indicated by two new HCA capabilities - relaxed_ordering_write_umr and relaxed_ordering_read_umr. Signed-off-by: Meir Lichtinger Signed-off-by: Leon Romanovsky --- include/linux/mlx5/mlx5_ifc.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 435ab47d5362..0257329431e8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1240,7 +1240,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_sgl_for_optimized_performance[0x8]; u8 log_max_cq_sz[0x8]; - u8 reserved_at_d0[0x9]; + u8 relaxed_ordering_write_umr[0x1]; + u8 relaxed_ordering_read_umr[0x1]; + u8 reserved_at_d2[0x7]; u8 virtio_net_device_emualtion_manager[0x1]; u8 virtio_blk_device_emualtion_manager[0x1]; u8 log_max_cq[0x5]; -- cgit v1.2.3 From 0c2a34937f7e4c4776bb261114c475392da2355c Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 30 Jun 2020 18:24:40 +0200 Subject: i2c: revert "i2c: core: Allow drivers to disable i2c-core irq mapping" This manually reverts commit d1d84bb95364ed604015c2b788caaf3dbca0262f. The only user has gone two years ago with commit 589edb56b424 ("ACPI / scan: Create platform device for INT33FE ACPI nodes") and no new user has showed up. Remove and hope we will never need it again. Signed-off-by: Wolfram Sang Acked-by: Hans de Goede Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 6 +++--- include/linux/i2c.h | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 26f03a14a478..dc43242a85ba 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -319,11 +319,9 @@ static int i2c_device_probe(struct device *dev) if (!client) return 0; - driver = to_i2c_driver(dev->driver); - client->irq = client->init_irq; - if (!client->irq && !driver->disable_i2c_core_irq_mapping) { + if (!client->irq) { int irq = -ENOENT; if (client->flags & I2C_CLIENT_HOST_NOTIFY) { @@ -349,6 +347,8 @@ static int i2c_device_probe(struct device *dev) client->irq = irq; } + driver = to_i2c_driver(dev->driver); + /* * An I2C ID table is not mandatory, if and only if, a suitable OF * or ACPI ID table is supplied for the probing device. diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b8b8963f8bb9..098405df431f 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -231,7 +231,6 @@ enum i2c_alert_protocol { * @detect: Callback for device detection * @address_list: The I2C addresses to probe (for detect) * @clients: List of detected clients we created (for i2c-core use only) - * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping * * The driver.owner field should be set to the module owner of this driver. * The driver.name field should be set to the name of this driver. @@ -290,8 +289,6 @@ struct i2c_driver { int (*detect)(struct i2c_client *client, struct i2c_board_info *info); const unsigned short *address_list; struct list_head clients; - - bool disable_i2c_core_irq_mapping; }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) -- cgit v1.2.3 From 8b603d0715a372f5827d3a6b19d9568bf854b687 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Fri, 24 Jul 2020 10:41:12 +0200 Subject: RDMA/mlx5: Fix typo in enum name Nnothing uses the enum name, so this is harmless. Fixes: 322694412400 ("IB/mlx5: Introduce driver create and destroy flow methods") Link: https://lore.kernel.org/r/20200724084112.GC31930@amd Signed-off-by: Pavel Machek (CIP) Signed-off-by: Jason Gunthorpe --- include/uapi/rdma/mlx5_user_ioctl_cmds.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index b330e6eee626..e24d66d278cf 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -263,7 +263,7 @@ enum mlx5_ib_create_flow_attrs { MLX5_IB_ATTR_CREATE_FLOW_FLAGS, }; -enum mlx5_ib_destoy_flow_attrs { +enum mlx5_ib_destroy_flow_attrs { MLX5_IB_ATTR_DESTROY_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT), }; -- cgit v1.2.3 From 381cc6f97cdaf15dabceb1b48fa82c9de0163e58 Mon Sep 17 00:00:00 2001 From: Taniya Das Date: Fri, 24 Jul 2020 21:37:56 +0530 Subject: dt-bindings: clock: Add YAML schemas for LPASS clocks on SC7180 The LPASS(Low Power Audio Subsystem) clock provider have a bunch of generic properties that are needed in a device tree. Also add clock ids for GCC LPASS and LPASS Core clock IDs for LPASS client to request for the clocks. Signed-off-by: Taniya Das Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/1595606878-2664-3-git-send-email-tdas@codeaurora.org Signed-off-by: Stephen Boyd --- .../bindings/clock/qcom,sc7180-lpasscorecc.yaml | 102 +++++++++++++++++++++ include/dt-bindings/clock/qcom,gcc-sc7180.h | 1 + .../dt-bindings/clock/qcom,lpasscorecc-sc7180.h | 29 ++++++ 3 files changed, 132 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml create mode 100644 include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml new file mode 100644 index 000000000000..a838250b33e7 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml @@ -0,0 +1,102 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/qcom,sc7180-lpasscorecc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm LPASS Core Clock Controller Binding for SC7180 + +maintainers: + - Taniya Das + +description: | + Qualcomm LPASS core clock control module which supports the clocks and + power domains on SC7180. + + See also: + - dt-bindings/clock/qcom,lpasscorecc-sc7180.h + +properties: + compatible: + enum: + - qcom,sc7180-lpasshm + - qcom,sc7180-lpasscorecc + + clocks: + items: + - description: gcc_lpass_sway clock from GCC + + clock-names: + items: + - const: iface + + power-domains: + maxItems: 1 + + '#clock-cells': + const: 1 + + '#power-domain-cells': + const: 1 + + reg: + minItems: 1 + items: + - description: lpass core cc register + - description: lpass audio cc register + + reg-names: + items: + - const: lpass_core_cc + - const: lpass_audio_cc + +if: + properties: + compatible: + contains: + const: qcom,sc7180-lpasshm +then: + properties: + reg: + maxItems: 1 + +else: + properties: + reg: + minItems: 2 + +required: + - compatible + - reg + - clocks + - clock-names + - '#clock-cells' + - '#power-domain-cells' + +additionalProperties: false + +examples: + - | + #include + #include + clock-controller@63000000 { + compatible = "qcom,sc7180-lpasshm"; + reg = <0x63000000 0x28>; + clocks = <&gcc GCC_LPASS_CFG_NOC_SWAY_CLK>; + clock-names = "iface"; + #clock-cells = <1>; + #power-domain-cells = <1>; + }; + + - | + clock-controller@62d00000 { + compatible = "qcom,sc7180-lpasscorecc"; + reg = <0x62d00000 0x50000>, <0x62780000 0x30000>; + reg-names = "lpass_core_cc", "lpass_audio_cc"; + clocks = <&gcc GCC_LPASS_CFG_NOC_SWAY_CLK>; + clock-names = "iface"; + power-domains = <&lpass_hm LPASS_CORE_HM_GDSCR>; + #clock-cells = <1>; + #power-domain-cells = <1>; + }; +... diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h index 992b67b7e5e4..bdf43adc7897 100644 --- a/include/dt-bindings/clock/qcom,gcc-sc7180.h +++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h @@ -138,6 +138,7 @@ #define GCC_MSS_Q6_MEMNOC_AXI_CLK 128 #define GCC_MSS_SNOC_AXI_CLK 129 #define GCC_SEC_CTRL_CLK_SRC 130 +#define GCC_LPASS_CFG_NOC_SWAY_CLK 131 /* GCC resets */ #define GCC_QUSB2PHY_PRIM_BCR 0 diff --git a/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h b/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h new file mode 100644 index 000000000000..a55d01db2b20 --- /dev/null +++ b/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7180_H +#define _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7180_H + +/* LPASS_CORE_CC clocks */ +#define LPASS_LPAAUDIO_DIG_PLL 0 +#define LPASS_LPAAUDIO_DIG_PLL_OUT_ODD 1 +#define CORE_CLK_SRC 2 +#define EXT_MCLK0_CLK_SRC 3 +#define LPAIF_PRI_CLK_SRC 4 +#define LPAIF_SEC_CLK_SRC 5 +#define LPASS_AUDIO_CORE_CORE_CLK 6 +#define LPASS_AUDIO_CORE_EXT_MCLK0_CLK 7 +#define LPASS_AUDIO_CORE_LPAIF_PRI_IBIT_CLK 8 +#define LPASS_AUDIO_CORE_LPAIF_SEC_IBIT_CLK 9 +#define LPASS_AUDIO_CORE_SYSNOC_MPORT_CORE_CLK 10 + +/* LPASS Core power domains */ +#define LPASS_CORE_HM_GDSCR 0 + +/* LPASS Audio power domains */ +#define LPASS_AUDIO_HM_GDSCR 0 +#define LPASS_PDC_HM_GDSCR 1 + +#endif -- cgit v1.2.3 From 43148b1cab44dbabf91cb70749ef3f9b24592e91 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 24 Jul 2020 14:19:12 -0700 Subject: soc: ti: k3-ringacc: add request pair of rings api. Add new API k3_ringacc_request_rings_pair() to request pair of rings at once, as in the most cases Rings are used with DMA channels, which need to request pair of rings - one to feed DMA with descriptors (TX/RX FDQ) and one to receive completions (RX/TX CQ). This will allow to simplify Ringacc API users. Signed-off-by: Grygorii Strashko Reviewed-by: Peter Ujfalusi Signed-off-by: Santosh Shilimkar --- drivers/soc/ti/k3-ringacc.c | 24 ++++++++++++++++++++++++ include/linux/soc/ti/k3-ringacc.h | 4 ++++ 2 files changed, 28 insertions(+) (limited to 'include') diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c index 3d8e11079ede..c9f022037942 100644 --- a/drivers/soc/ti/k3-ringacc.c +++ b/drivers/soc/ti/k3-ringacc.c @@ -322,6 +322,30 @@ error: } EXPORT_SYMBOL_GPL(k3_ringacc_request_ring); +int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc, + int fwd_id, int compl_id, + struct k3_ring **fwd_ring, + struct k3_ring **compl_ring) +{ + int ret = 0; + + if (!fwd_ring || !compl_ring) + return -EINVAL; + + *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0); + if (!(*fwd_ring)) + return -ENODEV; + + *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0); + if (!(*compl_ring)) { + k3_ringacc_ring_free(*fwd_ring); + ret = -ENODEV; + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair); + static void k3_ringacc_ring_reset_sci(struct k3_ring *ring) { struct k3_ringacc *ringacc = ring->parent; diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h index 26f73df0a524..7ac115432fa1 100644 --- a/include/linux/soc/ti/k3-ringacc.h +++ b/include/linux/soc/ti/k3-ringacc.h @@ -107,6 +107,10 @@ struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, int id, u32 flags); +int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc, + int fwd_id, int compl_id, + struct k3_ring **fwd_ring, + struct k3_ring **compl_ring); /** * k3_ringacc_ring_reset - ring reset * @ring: pointer on Ring -- cgit v1.2.3 From 2ea17d504a0ee9b357a5fd2bde257c2ec37ceca7 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 24 Jul 2020 14:42:03 -0700 Subject: soc: ti/ti_sci_protocol.h: drop a duplicated word + clarify Drop the repeated word "an" in a comment. Insert "and" between "source" and "destination" as is done a few lines earlier. Signed-off-by: Randy Dunlap Reviewed-by: Tero Kristo Signed-off-by: Santosh Shilimkar --- include/linux/soc/ti/ti_sci_protocol.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 9531ec823298..6c728dad8c10 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -226,8 +226,8 @@ struct ti_sci_rm_core_ops { * and destination * @set_event_map: Set an Event based peripheral irq to Interrupt * Aggregator. - * @free_irq: Free an an IRQ route between the requested source - * destination. + * @free_irq: Free an IRQ route between the requested source + * and destination. * @free_event_map: Free an event based peripheral irq to Interrupt * Aggregator. */ -- cgit v1.2.3 From a6df49f4224324dd8588f6a0d9cff53cd61a196b Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Fri, 24 Jul 2020 14:43:48 -0700 Subject: firmware: ti_sci: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Acked-by: Rob Herring Signed-off-by: Santosh Shilimkar --- Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt | 2 +- drivers/firmware/ti_sci.c | 2 +- drivers/firmware/ti_sci.h | 2 +- drivers/irqchip/irq-ti-sci-inta.c | 2 +- drivers/irqchip/irq-ti-sci-intr.c | 2 +- drivers/reset/reset-ti-sci.c | 2 +- include/linux/soc/ti/ti_sci_inta_msi.h | 2 +- include/linux/soc/ti/ti_sci_protocol.h | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt index 1a8718f8855d..178fca08278f 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt @@ -55,7 +55,7 @@ Required Properties: corresponds to a range of host irqs. For more details on TISCI IRQ resource management refer: -http://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html +https://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html Example: -------- diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 4126be9e3216..53cee17d0115 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -2,7 +2,7 @@ /* * Texas Instruments System Control Interface Protocol Driver * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index f0d068c03944..57cd04062994 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -6,7 +6,7 @@ * The system works in a message response protocol * See: http://processors.wiki.ti.com/index.php/TISCI for details * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __TI_SCI_H diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index 7e3ebf6ed2cd..85de19fe9b6e 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -2,7 +2,7 @@ /* * Texas Instruments' K3 Interrupt Aggregator irqchip driver * - * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/ * Lokesh Vutla */ diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c index 59d51a20bbd8..5ea148faf2ab 100644 --- a/drivers/irqchip/irq-ti-sci-intr.c +++ b/drivers/irqchip/irq-ti-sci-intr.c @@ -2,7 +2,7 @@ /* * Texas Instruments' K3 Interrupt Router irqchip driver * - * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/ * Lokesh Vutla */ diff --git a/drivers/reset/reset-ti-sci.c b/drivers/reset/reset-ti-sci.c index bf68729ab729..b799aefad547 100644 --- a/drivers/reset/reset-ti-sci.c +++ b/drivers/reset/reset-ti-sci.c @@ -1,7 +1,7 @@ /* * Texas Instrument's System Control Interface (TI-SCI) reset driver * - * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h index 11fb5048f5f6..e3aa8b14612e 100644 --- a/include/linux/soc/ti/ti_sci_inta_msi.h +++ b/include/linux/soc/ti/ti_sci_inta_msi.h @@ -2,7 +2,7 @@ /* * Texas Instruments' K3 TI SCI INTA MSI helper * - * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/ * Lokesh Vutla */ diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 6c728dad8c10..49c5d29cd33c 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -2,7 +2,7 @@ /* * Texas Instruments System Control Interface Protocol * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ -- cgit v1.2.3 From 0cb09aff9d49d92305c3969fc84b785117412968 Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Thu, 23 Jul 2020 01:03:00 +0300 Subject: net/flow_dissector: add packet hash dissection Retreive a hash value from the SKB and store it in the dissector key for future matching. Signed-off-by: Ariel Levkovich Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++++ include/net/flow_dissector.h | 9 +++++++++ net/core/flow_dissector.c | 17 +++++++++++++++++ 3 files changed, 30 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6a82d4a8229e..fa817a105517 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1342,6 +1342,10 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container); +void skb_flow_dissect_hash(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + static inline __u32 skb_get_hash(struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 4b6e36288ddd..cc10b10dc3a1 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -243,6 +243,14 @@ struct flow_dissector_key_ct { u32 ct_labels[4]; }; +/** + * struct flow_dissector_key_hash: + * @hash: hash value + */ +struct flow_dissector_key_hash { + u32 hash; +}; + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -271,6 +279,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */ FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */ FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */ + FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 142a8824f0a8..29806eb765cf 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -383,6 +383,23 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, } EXPORT_SYMBOL(skb_flow_dissect_tunnel_info); +void skb_flow_dissect_hash(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container) +{ + struct flow_dissector_key_hash *key; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_HASH)) + return; + + key = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_HASH, + target_container); + + key->hash = skb_get_hash_raw(skb); +} +EXPORT_SYMBOL(skb_flow_dissect_hash); + static enum flow_dissect_ret __skb_flow_dissect_mpls(const struct sk_buff *skb, struct flow_dissector *flow_dissector, -- cgit v1.2.3 From 5923b8f7fa218a9bccd730c0a9692635eb2fc740 Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Thu, 23 Jul 2020 01:03:01 +0300 Subject: net/sched: cls_flower: Add hash info to flow classification Adding new cls flower keys for hash value and hash mask and dissect the hash info from the skb into the flow key towards flow classication. Signed-off-by: Ariel Levkovich Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 3 +++ net/sched/cls_flower.c | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 7576209d96f9..ee95f42fb0ec 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -578,6 +578,9 @@ enum { TCA_FLOWER_KEY_MPLS_OPTS, + TCA_FLOWER_KEY_HASH, /* u32 */ + TCA_FLOWER_KEY_HASH_MASK, /* u32 */ + __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index acd8e05c2ba5..a4f7ef1de7e7 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -64,6 +64,7 @@ struct fl_flow_key { }; } tp_range; struct flow_dissector_key_ct ct; + struct flow_dissector_key_hash hash; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -318,6 +319,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, fl_ct_info_to_flower_map, ARRAY_SIZE(fl_ct_info_to_flower_map)); + skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); skb_flow_dissect(skb, &mask->dissector, &skb_key, 0); f = fl_mask_lookup(mask, &skb_key); @@ -695,6 +697,9 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, .len = 128 / BITS_PER_BYTE }, [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, + }; static const struct nla_policy @@ -1626,6 +1631,10 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); + fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH, + &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, + sizeof(key->hash.hash)); + if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { ret = fl_set_enc_opt(tb, key, mask, extack); if (ret) @@ -1740,6 +1749,8 @@ static void fl_init_dissector(struct flow_dissector *dissector, FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_CT, ct); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_HASH, hash); skb_flow_dissector_init(dissector, keys, cnt); } @@ -2960,6 +2971,11 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) goto nla_put_failure; + if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, + &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, + sizeof(key->hash.hash))) + goto nla_put_failure; + return 0; nla_put_failure: -- cgit v1.2.3 From ba423fdaa589d972473083defedf9e862626d268 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:46 +0200 Subject: net: add a new sockptr_t type Add a uptr_t type that can hold a pointer to either a user or kernel memory region, and simply helpers to copy to and from it. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 104 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 include/linux/sockptr.h (limited to 'include') diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h new file mode 100644 index 000000000000..700856e13ea0 --- /dev/null +++ b/include/linux/sockptr.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 Christoph Hellwig. + * + * Support for "universal" pointers that can point to either kernel or userspace + * memory. + */ +#ifndef _LINUX_SOCKPTR_H +#define _LINUX_SOCKPTR_H + +#include +#include + +typedef struct { + union { + void *kernel; + void __user *user; + }; + bool is_kernel : 1; +} sockptr_t; + +static inline bool sockptr_is_kernel(sockptr_t sockptr) +{ + return sockptr.is_kernel; +} + +static inline sockptr_t KERNEL_SOCKPTR(void *p) +{ + return (sockptr_t) { .kernel = p, .is_kernel = true }; +} + +static inline sockptr_t USER_SOCKPTR(void __user *p) +{ + return (sockptr_t) { .user = p }; +} + +static inline bool sockptr_is_null(sockptr_t sockptr) +{ + return !sockptr.user && !sockptr.kernel; +} + +static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) +{ + if (!sockptr_is_kernel(src)) + return copy_from_user(dst, src.user, size); + memcpy(dst, src.kernel, size); + return 0; +} + +static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) +{ + if (!sockptr_is_kernel(dst)) + return copy_to_user(dst.user, src, size); + memcpy(dst.kernel, src, size); + return 0; +} + +static inline void *memdup_sockptr(sockptr_t src, size_t len) +{ + void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_sockptr(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + return p; +} + +static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) +{ + char *p = kmalloc_track_caller(len + 1, GFP_KERNEL); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_sockptr(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + p[len] = '\0'; + return p; +} + +static inline void sockptr_advance(sockptr_t sockptr, size_t len) +{ + if (sockptr_is_kernel(sockptr)) + sockptr.kernel += len; + else + sockptr.user += len; +} + +static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) +{ + if (sockptr_is_kernel(src)) { + size_t len = min(strnlen(src.kernel, count - 1) + 1, count); + + memcpy(dst, src.kernel, len); + return len; + } + return strncpy_from_user(dst, src.user, count); +} + +#endif /* _LINUX_SOCKPTR_H */ -- cgit v1.2.3 From b1ea9ff6aff2deae84eccaf0a07cd14912669680 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:47 +0200 Subject: net: switch copy_bpf_fprog_from_user to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/filter.h | 3 ++- net/core/filter.c | 6 +++--- net/core/sock.c | 6 ++++-- net/packet/af_packet.c | 4 ++-- 4 files changed, 11 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/filter.h b/include/linux/filter.h index 1c6b6d982bf4..d07a6e973a7d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -1276,7 +1277,7 @@ struct bpf_sockopt_kern { s32 retval; }; -int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len); +int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len); struct bpf_sk_lookup_kern { u16 family; diff --git a/net/core/filter.c b/net/core/filter.c index 3fa16b8c0d61..29e3455122f7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -77,14 +77,14 @@ #include #include -int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len) +int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len) { if (in_compat_syscall()) { struct compat_sock_fprog f32; if (len != sizeof(f32)) return -EINVAL; - if (copy_from_user(&f32, src, sizeof(f32))) + if (copy_from_sockptr(&f32, src, sizeof(f32))) return -EFAULT; memset(dst, 0, sizeof(*dst)); dst->len = f32.len; @@ -92,7 +92,7 @@ int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len) } else { if (len != sizeof(*dst)) return -EINVAL; - if (copy_from_user(dst, src, sizeof(*dst))) + if (copy_from_sockptr(dst, src, sizeof(*dst))) return -EFAULT; } diff --git a/net/core/sock.c b/net/core/sock.c index 6da54eac2b34..71fc7e4ddd06 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1063,7 +1063,8 @@ set_sndbuf: case SO_ATTACH_FILTER: { struct sock_fprog fprog; - ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); + ret = copy_bpf_fprog_from_user(&fprog, USER_SOCKPTR(optval), + optlen); if (!ret) ret = sk_attach_filter(&fprog, sk); break; @@ -1084,7 +1085,8 @@ set_sndbuf: case SO_ATTACH_REUSEPORT_CBPF: { struct sock_fprog fprog; - ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); + ret = copy_bpf_fprog_from_user(&fprog, USER_SOCKPTR(optval), + optlen); if (!ret) ret = sk_reuseport_attach_filter(&fprog, sk); break; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c240fb5de3f0..d8d4f78f78e4 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1536,7 +1536,7 @@ static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) } } -static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, +static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, unsigned int len) { struct bpf_prog *new; @@ -1584,7 +1584,7 @@ static int fanout_set_data(struct packet_sock *po, char __user *data, { switch (po->fanout->type) { case PACKET_FANOUT_CBPF: - return fanout_set_data_cbpf(po, data, len); + return fanout_set_data_cbpf(po, USER_SOCKPTR(data), len); case PACKET_FANOUT_EBPF: return fanout_set_data_ebpf(po, data, len); default: -- cgit v1.2.3 From c8c1bbb6eb498109286739f8b6090e99313dd104 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:50 +0200 Subject: net: switch sock_set_timeout to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Acked-by: Matthieu Baerts Signed-off-by: David S. Miller --- include/net/sock.h | 3 ++- net/core/sock.c | 26 ++++++++++++-------------- net/mptcp/protocol.c | 6 ++++-- net/socket.c | 3 ++- 4 files changed, 20 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index 62e18fc8ac9f..bfb2fe2fc368 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -59,6 +59,7 @@ #include #include #include +#include #include #include @@ -1669,7 +1670,7 @@ void sock_pfree(struct sk_buff *skb); #endif int sock_setsockopt(struct socket *sock, int level, int op, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); int sock_getsockopt(struct socket *sock, int level, int op, char __user *optval, int __user *optlen); diff --git a/net/core/sock.c b/net/core/sock.c index 8b9eddaff868..1444d7d53ba2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -826,7 +826,7 @@ EXPORT_SYMBOL(sock_set_rcvbuf); */ int sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock_txtime sk_txtime; struct sock *sk = sock->sk; @@ -840,12 +840,12 @@ int sock_setsockopt(struct socket *sock, int level, int optname, */ if (optname == SO_BINDTODEVICE) - return sock_setbindtodevice(sk, USER_SOCKPTR(optval), optlen); + return sock_setbindtodevice(sk, optval, optlen); if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; valbool = val ? 1 : 0; @@ -958,7 +958,7 @@ set_sndbuf: ret = -EINVAL; /* 1003.1g */ break; } - if (copy_from_user(&ling, optval, sizeof(ling))) { + if (copy_from_sockptr(&ling, optval, sizeof(ling))) { ret = -EFAULT; break; } @@ -1052,21 +1052,20 @@ set_sndbuf: case SO_RCVTIMEO_OLD: case SO_RCVTIMEO_NEW: - ret = sock_set_timeout(&sk->sk_rcvtimeo, USER_SOCKPTR(optval), + ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD); break; case SO_SNDTIMEO_OLD: case SO_SNDTIMEO_NEW: - ret = sock_set_timeout(&sk->sk_sndtimeo, USER_SOCKPTR(optval), + ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD); break; case SO_ATTACH_FILTER: { struct sock_fprog fprog; - ret = copy_bpf_fprog_from_user(&fprog, USER_SOCKPTR(optval), - optlen); + ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); if (!ret) ret = sk_attach_filter(&fprog, sk); break; @@ -1077,7 +1076,7 @@ set_sndbuf: u32 ufd; ret = -EFAULT; - if (copy_from_user(&ufd, optval, sizeof(ufd))) + if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) break; ret = sk_attach_bpf(ufd, sk); @@ -1087,8 +1086,7 @@ set_sndbuf: case SO_ATTACH_REUSEPORT_CBPF: { struct sock_fprog fprog; - ret = copy_bpf_fprog_from_user(&fprog, USER_SOCKPTR(optval), - optlen); + ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); if (!ret) ret = sk_reuseport_attach_filter(&fprog, sk); break; @@ -1099,7 +1097,7 @@ set_sndbuf: u32 ufd; ret = -EFAULT; - if (copy_from_user(&ufd, optval, sizeof(ufd))) + if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) break; ret = sk_reuseport_attach_bpf(ufd, sk); @@ -1179,7 +1177,7 @@ set_sndbuf: if (sizeof(ulval) != sizeof(val) && optlen >= sizeof(ulval) && - get_user(ulval, (unsigned long __user *)optval)) { + copy_from_sockptr(&ulval, optval, sizeof(ulval))) { ret = -EFAULT; break; } @@ -1222,7 +1220,7 @@ set_sndbuf: if (optlen != sizeof(struct sock_txtime)) { ret = -EINVAL; break; - } else if (copy_from_user(&sk_txtime, optval, + } else if (copy_from_sockptr(&sk_txtime, optval, sizeof(struct sock_txtime))) { ret = -EFAULT; break; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 979dfcd2aa14..7246847efa90 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1648,7 +1648,8 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, return -EINVAL; } - ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen); + ret = sock_setsockopt(ssock, SOL_SOCKET, optname, + USER_SOCKPTR(optval), optlen); if (ret == 0) { if (optname == SO_REUSEPORT) sk->sk_reuseport = ssock->sk->sk_reuseport; @@ -1659,7 +1660,8 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, return ret; } - return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); + return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, + USER_SOCKPTR(optval), optlen); } static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, diff --git a/net/socket.c b/net/socket.c index 93846568c2fb..c97f83d879ae 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2130,7 +2130,8 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *optval, } if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock)) - err = sock_setsockopt(sock, level, optname, optval, optlen); + err = sock_setsockopt(sock, level, optname, + USER_SOCKPTR(optval), optlen); else if (unlikely(!sock->ops->setsockopt)) err = -EOPNOTSUPP; else -- cgit v1.2.3 From c6d1b26a8fd4940fe5d4199311838f6c2aef0174 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:51 +0200 Subject: net/xfrm: switch xfrm_user_policy to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/xfrm.h | 8 +++++--- net/ipv4/ip_sockglue.c | 3 ++- net/ipv6/ipv6_sockglue.c | 3 ++- net/xfrm/xfrm_state.c | 6 +++--- 4 files changed, 12 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index f9e1fda82ddf..5e81868b574a 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -1609,10 +1610,11 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); -int xfrm_user_policy(struct sock *sk, int optname, - u8 __user *optval, int optlen); +int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, + int optlen); #else -static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) +static inline int xfrm_user_policy(struct sock *sk, int optname, + sockptr_t optval, int optlen) { return -ENOPROTOOPT; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index a5ea02d7a183..da933f99b5d5 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1322,7 +1322,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -EPERM; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) break; - err = xfrm_user_policy(sk, optname, optval, optlen); + err = xfrm_user_policy(sk, optname, USER_SOCKPTR(optval), + optlen); break; case IP_TRANSPARENT: diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index add8f7912299..56a74707c617 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -935,7 +935,8 @@ done: retv = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; - retv = xfrm_user_policy(sk, optname, optval, optlen); + retv = xfrm_user_policy(sk, optname, USER_SOCKPTR(optval), + optlen); break; case IPV6_ADDR_PREFERENCES: diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 8be2d926acc2..69520ad3d83b 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2264,7 +2264,7 @@ static bool km_is_alive(const struct km_event *c) return is_alive; } -int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) +int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen) { int err; u8 *data; @@ -2274,7 +2274,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen if (in_compat_syscall()) return -EOPNOTSUPP; - if (!optval && !optlen) { + if (sockptr_is_null(optval) && !optlen) { xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); __sk_dst_reset(sk); @@ -2284,7 +2284,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen if (optlen <= 0 || optlen > PAGE_SIZE) return -EMSGSIZE; - data = memdup_user(optval, optlen); + data = memdup_sockptr(optval, optlen); if (IS_ERR(data)) return PTR_ERR(data); -- cgit v1.2.3 From ab214d1bf8c7ef1ed7af803a72491cb29edfa8f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:53 +0200 Subject: netfilter: switch xt_copy_counters to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/netfilter/x_tables.h | 4 ++-- net/ipv4/netfilter/arp_tables.c | 7 +++---- net/ipv4/netfilter/ip_tables.c | 7 +++---- net/ipv6/netfilter/ip6_tables.c | 6 +++--- net/netfilter/x_tables.c | 20 ++++++++++---------- 5 files changed, 21 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index b8b943ee7b8b..5deb099d156d 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -301,8 +301,8 @@ int xt_target_to_user(const struct xt_entry_target *t, int xt_data_to_user(void __user *dst, const void *src, int usersize, int size, int aligned_size); -void *xt_copy_counters_from_user(const void __user *user, unsigned int len, - struct xt_counters_info *info); +void *xt_copy_counters(sockptr_t arg, unsigned int len, + struct xt_counters_info *info); struct xt_counters *xt_counters_alloc(unsigned int counters); struct xt_table *xt_register_table(struct net *net, diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 2c8a4dad39d7..6d24b686c7f0 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -996,8 +996,7 @@ static int do_replace(struct net *net, const void __user *user, return ret; } -static int do_add_counters(struct net *net, const void __user *user, - unsigned int len) +static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len) { unsigned int i; struct xt_counters_info tmp; @@ -1008,7 +1007,7 @@ static int do_add_counters(struct net *net, const void __user *user, struct arpt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp); + paddc = xt_copy_counters(arg, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); @@ -1420,7 +1419,7 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned break; case ARPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len); + ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); break; default: diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 161901dd1cae..4697d09c98dc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1151,8 +1151,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) } static int -do_add_counters(struct net *net, const void __user *user, - unsigned int len) +do_add_counters(struct net *net, sockptr_t arg, unsigned int len) { unsigned int i; struct xt_counters_info tmp; @@ -1163,7 +1162,7 @@ do_add_counters(struct net *net, const void __user *user, struct ipt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp); + paddc = xt_copy_counters(arg, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); @@ -1629,7 +1628,7 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) break; case IPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len); + ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); break; default: diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index fd1f8f931231..a787aba30e2d 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1168,7 +1168,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) } static int -do_add_counters(struct net *net, const void __user *user, unsigned int len) +do_add_counters(struct net *net, sockptr_t arg, unsigned int len) { unsigned int i; struct xt_counters_info tmp; @@ -1179,7 +1179,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len) struct ip6t_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp); + paddc = xt_copy_counters(arg, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); t = xt_find_table_lock(net, AF_INET6, tmp.name); @@ -1637,7 +1637,7 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) break; case IP6T_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len); + ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); break; default: diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 32bab45af7e4..b97eb4b538fd 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1028,9 +1028,9 @@ int xt_check_target(struct xt_tgchk_param *par, EXPORT_SYMBOL_GPL(xt_check_target); /** - * xt_copy_counters_from_user - copy counters and metadata from userspace + * xt_copy_counters - copy counters and metadata from a sockptr_t * - * @user: src pointer to userspace memory + * @arg: src sockptr * @len: alleged size of userspace memory * @info: where to store the xt_counters_info metadata * @@ -1047,8 +1047,8 @@ EXPORT_SYMBOL_GPL(xt_check_target); * Return: returns pointer that caller has to test via IS_ERR(). * If IS_ERR is false, caller has to vfree the pointer. */ -void *xt_copy_counters_from_user(const void __user *user, unsigned int len, - struct xt_counters_info *info) +void *xt_copy_counters(sockptr_t arg, unsigned int len, + struct xt_counters_info *info) { void *mem; u64 size; @@ -1062,12 +1062,12 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, return ERR_PTR(-EINVAL); len -= sizeof(compat_tmp); - if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) + if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0) return ERR_PTR(-EFAULT); memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); info->num_counters = compat_tmp.num_counters; - user += sizeof(compat_tmp); + sockptr_advance(arg, sizeof(compat_tmp)); } else #endif { @@ -1075,10 +1075,10 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, return ERR_PTR(-EINVAL); len -= sizeof(*info); - if (copy_from_user(info, user, sizeof(*info)) != 0) + if (copy_from_sockptr(info, arg, sizeof(*info)) != 0) return ERR_PTR(-EFAULT); - user += sizeof(*info); + sockptr_advance(arg, sizeof(*info)); } info->name[sizeof(info->name) - 1] = '\0'; @@ -1092,13 +1092,13 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, if (!mem) return ERR_PTR(-ENOMEM); - if (copy_from_user(mem, user, len) == 0) + if (copy_from_sockptr(mem, arg, len) == 0) return mem; vfree(mem); return ERR_PTR(-EFAULT); } -EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); +EXPORT_SYMBOL_GPL(xt_copy_counters); #ifdef CONFIG_COMPAT int xt_compat_target_offset(const struct xt_target *target) -- cgit v1.2.3 From c2f12630c60ff33a9cafd221646053fc10ec59b6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:54 +0200 Subject: netfilter: switch nf_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/netfilter.h | 6 ++++-- net/bridge/netfilter/ebtables.c | 37 +++++++++++++++++-------------------- net/decnet/af_decnet.c | 3 ++- net/ipv4/ip_sockglue.c | 3 ++- net/ipv4/netfilter/arp_tables.c | 28 ++++++++++++++-------------- net/ipv4/netfilter/ip_tables.c | 24 ++++++++++++------------ net/ipv6/ipv6_sockglue.c | 3 ++- net/ipv6/netfilter/ip6_tables.c | 24 ++++++++++++------------ net/netfilter/ipvs/ip_vs_ctl.c | 4 ++-- net/netfilter/nf_sockopt.c | 2 +- 10 files changed, 68 insertions(+), 66 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 711b4d4486f0..0101747de549 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -13,6 +13,7 @@ #include #include #include +#include #include static inline int NF_DROP_GETERR(int verdict) @@ -163,7 +164,8 @@ struct nf_sockopt_ops { /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; int set_optmax; - int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); + int (*set)(struct sock *sk, int optval, sockptr_t arg, + unsigned int len); int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); @@ -338,7 +340,7 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, } /* Call setsockopt() */ -int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, +int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 12f8929667bf..d35173e803d3 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1063,14 +1063,13 @@ free_counterstmp: } /* replace the table */ -static int do_replace(struct net *net, const void __user *user, - unsigned int len) +static int do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret, countersize; struct ebt_table_info *newinfo; struct ebt_replace tmp; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) @@ -1286,12 +1285,11 @@ free_tmp: return ret; } -static int update_counters(struct net *net, const void __user *user, - unsigned int len) +static int update_counters(struct net *net, sockptr_t arg, unsigned int len) { struct ebt_replace hlp; - if (copy_from_user(&hlp, user, sizeof(hlp))) + if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) return -EFAULT; if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) @@ -2079,7 +2077,7 @@ static int compat_copy_entries(unsigned char *data, unsigned int size_user, static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, - void __user *user, unsigned int len) + sockptr_t arg, unsigned int len) { struct compat_ebt_replace tmp; int i; @@ -2087,7 +2085,7 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, if (len < sizeof(tmp)) return -EINVAL; - if (copy_from_user(&tmp, user, sizeof(tmp))) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp))) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) @@ -2114,8 +2112,7 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, return 0; } -static int compat_do_replace(struct net *net, void __user *user, - unsigned int len) +static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret, i, countersize, size64; struct ebt_table_info *newinfo; @@ -2123,10 +2120,10 @@ static int compat_do_replace(struct net *net, void __user *user, struct ebt_entries_buf_state state; void *entries_tmp; - ret = compat_copy_ebt_replace_from_user(&tmp, user, len); + ret = compat_copy_ebt_replace_from_user(&tmp, arg, len); if (ret) { /* try real handler in case userland supplied needed padding */ - if (ret == -EINVAL && do_replace(net, user, len) == 0) + if (ret == -EINVAL && do_replace(net, arg, len) == 0) ret = 0; return ret; } @@ -2217,17 +2214,17 @@ out_unlock: goto free_entries; } -static int compat_update_counters(struct net *net, void __user *user, +static int compat_update_counters(struct net *net, sockptr_t arg, unsigned int len) { struct compat_ebt_replace hlp; - if (copy_from_user(&hlp, user, sizeof(hlp))) + if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) return -EFAULT; /* try real handler in case userland supplied needed padding */ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) - return update_counters(net, user, len); + return update_counters(net, arg, len); return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), hlp.num_counters, len); @@ -2368,7 +2365,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) return ret; } -static int do_ebt_set_ctl(struct sock *sk, int cmd, void __user *user, +static int do_ebt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len) { struct net *net = sock_net(sk); @@ -2381,18 +2378,18 @@ static int do_ebt_set_ctl(struct sock *sk, int cmd, void __user *user, case EBT_SO_SET_ENTRIES: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_do_replace(net, user, len); + ret = compat_do_replace(net, arg, len); else #endif - ret = do_replace(net, user, len); + ret = do_replace(net, arg, len); break; case EBT_SO_SET_COUNTERS: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_update_counters(net, user, len); + ret = compat_update_counters(net, arg, len); else #endif - ret = update_counters(net, user, len); + ret = update_counters(net, arg, len); break; default: ret = -EINVAL; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 7d7ae2dd69b8..7d51ab608fb3 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1332,7 +1332,8 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != DSO_LINKINFO && optname != DSO_STREAM && optname != DSO_SEQPACKET) - err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); + err = nf_setsockopt(sk, PF_DECnet, optname, + USER_SOCKPTR(optval), optlen); #endif return err; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index da933f99b5d5..42befbf12846 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1422,7 +1422,8 @@ int ip_setsockopt(struct sock *sk, int level, optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) - err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); + err = nf_setsockopt(sk, PF_INET, optname, USER_SOCKPTR(optval), + optlen); #endif return err; } diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 6d24b686c7f0..f5b26ef17820 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only + /* * Packet matching code for ARP packets. * @@ -947,8 +947,7 @@ static int __do_replace(struct net *net, const char *name, return ret; } -static int do_replace(struct net *net, const void __user *user, - unsigned int len) +static int do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct arpt_replace tmp; @@ -956,7 +955,7 @@ static int do_replace(struct net *net, const void __user *user, void *loc_cpu_entry; struct arpt_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -972,8 +971,8 @@ static int do_replace(struct net *net, const void __user *user, return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1244,8 +1243,7 @@ out_unlock: return ret; } -static int compat_do_replace(struct net *net, void __user *user, - unsigned int len) +static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct compat_arpt_replace tmp; @@ -1253,7 +1251,7 @@ static int compat_do_replace(struct net *net, void __user *user, void *loc_cpu_entry; struct arpt_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1269,7 +1267,8 @@ static int compat_do_replace(struct net *net, void __user *user, return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1401,7 +1400,8 @@ static int compat_get_entries(struct net *net, } #endif -static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) +static int do_arpt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, + unsigned int len) { int ret; @@ -1412,14 +1412,14 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned case ARPT_SO_SET_REPLACE: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_do_replace(sock_net(sk), user, len); + ret = compat_do_replace(sock_net(sk), arg, len); else #endif - ret = do_replace(sock_net(sk), user, len); + ret = do_replace(sock_net(sk), arg, len); break; case ARPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); + ret = do_add_counters(sock_net(sk), arg, len); break; default: diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 4697d09c98dc..f2a9680303d8 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1102,7 +1102,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, } static int -do_replace(struct net *net, const void __user *user, unsigned int len) +do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct ipt_replace tmp; @@ -1110,7 +1110,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) void *loc_cpu_entry; struct ipt_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1126,8 +1126,8 @@ do_replace(struct net *net, const void __user *user, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1484,7 +1484,7 @@ out_unlock: } static int -compat_do_replace(struct net *net, void __user *user, unsigned int len) +compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct compat_ipt_replace tmp; @@ -1492,7 +1492,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) void *loc_cpu_entry; struct ipt_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1508,8 +1508,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1610,7 +1610,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, #endif static int -do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) +do_ipt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len) { int ret; @@ -1621,14 +1621,14 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) case IPT_SO_SET_REPLACE: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_do_replace(sock_net(sk), user, len); + ret = compat_do_replace(sock_net(sk), arg, len); else #endif - ret = do_replace(sock_net(sk), user, len); + ret = do_replace(sock_net(sk), arg, len); break; case IPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); + ret = do_add_counters(sock_net(sk), arg, len); break; default: diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 56a74707c617..85892b35cff7 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -996,7 +996,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && optname != IPV6_XFRM_POLICY) - err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); + err = nf_setsockopt(sk, PF_INET6, optname, USER_SOCKPTR(optval), + optlen); #endif return err; } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index a787aba30e2d..1d52957a413f 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1119,7 +1119,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, } static int -do_replace(struct net *net, const void __user *user, unsigned int len) +do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct ip6t_replace tmp; @@ -1127,7 +1127,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) void *loc_cpu_entry; struct ip6t_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1143,8 +1143,8 @@ do_replace(struct net *net, const void __user *user, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1493,7 +1493,7 @@ out_unlock: } static int -compat_do_replace(struct net *net, void __user *user, unsigned int len) +compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct compat_ip6t_replace tmp; @@ -1501,7 +1501,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) void *loc_cpu_entry; struct ip6t_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1517,8 +1517,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1619,7 +1619,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, #endif static int -do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) +do_ip6t_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len) { int ret; @@ -1630,14 +1630,14 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) case IP6T_SO_SET_REPLACE: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_do_replace(sock_net(sk), user, len); + ret = compat_do_replace(sock_net(sk), arg, len); else #endif - ret = do_replace(sock_net(sk), user, len); + ret = do_replace(sock_net(sk), arg, len); break; case IP6T_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); + ret = do_add_counters(sock_net(sk), arg, len); break; default: diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 4af83f466dfc..bcac316addab 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2434,7 +2434,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest, } static int -do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) +do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len) { struct net *net = sock_net(sk); int ret; @@ -2458,7 +2458,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) return -EINVAL; } - if (copy_from_user(arg, user, len) != 0) + if (copy_from_sockptr(arg, ptr, len) != 0) return -EFAULT; /* Handle daemons since they have another lock */ diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index 90469b1f628a..34afcd03b6f6 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c @@ -89,7 +89,7 @@ out: return ops; } -int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, +int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, sockptr_t opt, unsigned int len) { struct nf_sockopt_ops *ops; -- cgit v1.2.3 From b03afaa82ece13b2a008f0e3a7127bead578e3e6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:55 +0200 Subject: bpfilter: switch bpfilter_ip_set_sockopt to sockptr_t This is mostly to prepare for cleaning up the callers, as bpfilter by design can't handle kernel pointers. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/bpfilter.h | 6 +++--- net/bpfilter/bpfilter_kern.c | 6 +++--- net/ipv4/bpfilter/sockopt.c | 8 ++++---- net/ipv4/ip_sockglue.c | 3 ++- 4 files changed, 12 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index 9b114c718a76..2ae3c8e1d83c 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h @@ -4,9 +4,10 @@ #include #include +#include struct sock; -int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, +int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen); int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen); @@ -16,8 +17,7 @@ struct bpfilter_umh_ops { struct umd_info info; /* since ip_getsockopt() can run in parallel, serialize access to umh */ struct mutex lock; - int (*sockopt)(struct sock *sk, int optname, - char __user *optval, + int (*sockopt)(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen, bool is_set); int (*start)(void); }; diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 00540457e5f4..f580c3344cb3 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -60,17 +60,17 @@ stop: } static int bpfilter_process_sockopt(struct sock *sk, int optname, - char __user *optval, unsigned int optlen, + sockptr_t optval, unsigned int optlen, bool is_set) { struct mbox_request req = { .is_set = is_set, .pid = current->pid, .cmd = optname, - .addr = (uintptr_t)optval, + .addr = (uintptr_t)optval.user, .len = optlen, }; - if (uaccess_kernel()) { + if (uaccess_kernel() || sockptr_is_kernel(optval)) { pr_err("kernel access not supported\n"); return -EFAULT; } diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 9063c6767d34..1b34cb9a7708 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -21,8 +21,7 @@ void bpfilter_umh_cleanup(struct umd_info *info) } EXPORT_SYMBOL_GPL(bpfilter_umh_cleanup); -static int bpfilter_mbox_request(struct sock *sk, int optname, - char __user *optval, +static int bpfilter_mbox_request(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen, bool is_set) { int err; @@ -52,7 +51,7 @@ out: return err; } -int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, +int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen) { return bpfilter_mbox_request(sk, optname, optval, optlen, true); @@ -66,7 +65,8 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, if (get_user(len, optlen)) return -EFAULT; - return bpfilter_mbox_request(sk, optname, optval, len, false); + return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len, + false); } static int __init bpfilter_sockopt_init(void) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 42befbf12846..36f746e01741 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1414,7 +1414,8 @@ int ip_setsockopt(struct sock *sk, int level, #if IS_ENABLED(CONFIG_BPFILTER_UMH) if (optname >= BPFILTER_IPT_SO_SET_REPLACE && optname < BPFILTER_IPT_SET_MAX) - err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen); + err = bpfilter_ip_set_sockopt(sk, optname, USER_SOCKPTR(optval), + optlen); #endif #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ -- cgit v1.2.3 From 01ccb5b48f08f59ca3746d59221f4990aec1b194 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:56 +0200 Subject: net/ipv4: switch ip_mroute_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/mroute.h | 5 +++-- net/ipv4/ip_sockglue.c | 3 ++- net/ipv4/ipmr.c | 14 +++++++------- 3 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 9a36fad9e068..6cbbfe94348c 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -8,6 +8,7 @@ #include #include #include +#include #ifdef CONFIG_IP_MROUTE static inline int ip_mroute_opt(int opt) @@ -15,7 +16,7 @@ static inline int ip_mroute_opt(int opt) return opt >= MRT_BASE && opt <= MRT_MAX; } -int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); @@ -23,7 +24,7 @@ int ip_mr_init(void); bool ipmr_rule_default(const struct fib_rule *rule); #else static inline int ip_mroute_setsockopt(struct sock *sock, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { return -ENOPROTOOPT; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 36f746e01741..ac495b0cff8f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -925,7 +925,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (optname == IP_ROUTER_ALERT) return ip_ra_control(sk, val ? 1 : 0, NULL); if (ip_mroute_opt(optname)) - return ip_mroute_setsockopt(sk, optname, optval, optlen); + return ip_mroute_setsockopt(sk, optname, USER_SOCKPTR(optval), + optlen); err = 0; if (needs_rtnl) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 678639c01e48..cdf3a40f9ff5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1341,7 +1341,7 @@ static void mrtsock_destruct(struct sock *sk) * MOSPF/PIM router set up we can clean this up. */ -int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, +int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen) { struct net *net = sock_net(sk); @@ -1413,7 +1413,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (copy_from_user(&vif, optval, sizeof(vif))) { + if (copy_from_sockptr(&vif, optval, sizeof(vif))) { ret = -EFAULT; break; } @@ -1441,7 +1441,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (copy_from_user(&mfc, optval, sizeof(mfc))) { + if (copy_from_sockptr(&val, optval, sizeof(val))) { ret = -EFAULT; break; } @@ -1459,7 +1459,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (get_user(val, (int __user *)optval)) { + if (copy_from_sockptr(&val, optval, sizeof(val))) { ret = -EFAULT; break; } @@ -1471,7 +1471,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (get_user(val, (int __user *)optval)) { + if (copy_from_sockptr(&val, optval, sizeof(val))) { ret = -EFAULT; break; } @@ -1486,7 +1486,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (get_user(val, (int __user *)optval)) { + if (copy_from_sockptr(&val, optval, sizeof(val))) { ret = -EFAULT; break; } @@ -1508,7 +1508,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (get_user(uval, (u32 __user *)optval)) { + if (copy_from_sockptr(&uval, optval, sizeof(uval))) { ret = -EFAULT; break; } -- cgit v1.2.3 From de40a3e88311b6f0fc79b876a4768bf2d99f9aae Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:57 +0200 Subject: net/ipv4: merge ip_options_get and ip_options_get_from_user Use the sockptr_t type to merge the versions. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/ip.h | 5 ++--- net/ipv4/ip_options.c | 43 +++++++++++-------------------------------- net/ipv4/ip_sockglue.c | 7 ++++--- 3 files changed, 17 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/net/ip.h b/include/net/ip.h index 3d34acc95ca8..d66ad3a95220 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -707,9 +708,7 @@ int __ip_options_compile(struct net *net, struct ip_options *opt, int ip_options_compile(struct net *net, struct ip_options *opt, struct sk_buff *skb); int ip_options_get(struct net *net, struct ip_options_rcu **optp, - unsigned char *data, int optlen); -int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, - unsigned char __user *data, int optlen); + sockptr_t data, int optlen); void ip_options_undo(struct ip_options *opt); void ip_forward_options(struct sk_buff *skb); int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev); diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index ddaa01ec2bce..948747aac4e2 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -519,15 +519,20 @@ void ip_options_undo(struct ip_options *opt) } } -static struct ip_options_rcu *ip_options_get_alloc(const int optlen) +int ip_options_get(struct net *net, struct ip_options_rcu **optp, + sockptr_t data, int optlen) { - return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3), + struct ip_options_rcu *opt; + + opt = kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3), GFP_KERNEL); -} + if (!opt) + return -ENOMEM; + if (optlen && copy_from_sockptr(opt->opt.__data, data, optlen)) { + kfree(opt); + return -EFAULT; + } -static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp, - struct ip_options_rcu *opt, int optlen) -{ while (optlen & 3) opt->opt.__data[optlen++] = IPOPT_END; opt->opt.optlen = optlen; @@ -540,32 +545,6 @@ static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp, return 0; } -int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, - unsigned char __user *data, int optlen) -{ - struct ip_options_rcu *opt = ip_options_get_alloc(optlen); - - if (!opt) - return -ENOMEM; - if (optlen && copy_from_user(opt->opt.__data, data, optlen)) { - kfree(opt); - return -EFAULT; - } - return ip_options_get_finish(net, optp, opt, optlen); -} - -int ip_options_get(struct net *net, struct ip_options_rcu **optp, - unsigned char *data, int optlen) -{ - struct ip_options_rcu *opt = ip_options_get_alloc(optlen); - - if (!opt) - return -ENOMEM; - if (optlen) - memcpy(opt->opt.__data, data, optlen); - return ip_options_get_finish(net, optp, opt, optlen); -} - void ip_forward_options(struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ac495b0cff8f..b12f39b52008 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -280,7 +280,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, err = cmsg->cmsg_len - sizeof(struct cmsghdr); /* Our caller is responsible for freeing ipc->opt */ - err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), + err = ip_options_get(net, &ipc->opt, + KERNEL_SOCKPTR(CMSG_DATA(cmsg)), err < 40 ? err : 40); if (err) return err; @@ -940,8 +941,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (optlen > 40) goto e_inval; - err = ip_options_get_from_user(sock_net(sk), &opt, - optval, optlen); + err = ip_options_get(sock_net(sk), &opt, USER_SOCKPTR(optval), + optlen); if (err) break; old = rcu_dereference_protected(inet->inet_opt, -- cgit v1.2.3 From b43c6153132c92745317f92174af84f57b160b76 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:59 +0200 Subject: net/ipv6: switch ip6_mroute_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/mroute6.h | 8 ++++---- net/ipv6/ip6mr.c | 17 +++++++++-------- net/ipv6/ipv6_sockglue.c | 3 ++- 3 files changed, 15 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index c4a45859f586..bc351a85ce9b 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #ifdef CONFIG_IPV6_MROUTE @@ -25,7 +26,7 @@ static inline int ip6_mroute_opt(int opt) struct sock; #ifdef CONFIG_IPV6_MROUTE -extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); @@ -33,9 +34,8 @@ extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *ar extern int ip6_mr_init(void); extern void ip6_mr_cleanup(void); #else -static inline -int ip6_mroute_setsockopt(struct sock *sock, - int optname, char __user *optval, unsigned int optlen) +static inline int ip6_mroute_setsockopt(struct sock *sock, int optname, + sockptr_t optval, unsigned int optlen) { return -ENOPROTOOPT; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 1f4d20e97c07..06b0d2c329b9 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1629,7 +1629,8 @@ EXPORT_SYMBOL(mroute6_is_socket); * MOSPF/PIM router set up we can clean this up. */ -int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) +int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, + unsigned int optlen) { int ret, parent = 0; struct mif6ctl vif; @@ -1665,7 +1666,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns case MRT6_ADD_MIF: if (optlen < sizeof(vif)) return -EINVAL; - if (copy_from_user(&vif, optval, sizeof(vif))) + if (copy_from_sockptr(&vif, optval, sizeof(vif))) return -EFAULT; if (vif.mif6c_mifi >= MAXMIFS) return -ENFILE; @@ -1678,7 +1679,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns case MRT6_DEL_MIF: if (optlen < sizeof(mifi_t)) return -EINVAL; - if (copy_from_user(&mifi, optval, sizeof(mifi_t))) + if (copy_from_sockptr(&mifi, optval, sizeof(mifi_t))) return -EFAULT; rtnl_lock(); ret = mif6_delete(mrt, mifi, 0, NULL); @@ -1697,7 +1698,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns case MRT6_DEL_MFC_PROXY: if (optlen < sizeof(mfc)) return -EINVAL; - if (copy_from_user(&mfc, optval, sizeof(mfc))) + if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) return -EFAULT; if (parent == 0) parent = mfc.mf6cc_parent; @@ -1718,7 +1719,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen != sizeof(flags)) return -EINVAL; - if (get_user(flags, (int __user *)optval)) + if (copy_from_sockptr(&flags, optval, sizeof(flags))) return -EFAULT; rtnl_lock(); mroute_clean_tables(mrt, flags); @@ -1735,7 +1736,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen != sizeof(v)) return -EINVAL; - if (get_user(v, (int __user *)optval)) + if (copy_from_sockptr(&v, optval, sizeof(v))) return -EFAULT; mrt->mroute_do_assert = v; return 0; @@ -1748,7 +1749,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen != sizeof(v)) return -EINVAL; - if (get_user(v, (int __user *)optval)) + if (copy_from_sockptr(&v, optval, sizeof(v))) return -EFAULT; v = !!v; rtnl_lock(); @@ -1769,7 +1770,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen != sizeof(u32)) return -EINVAL; - if (get_user(v, (u32 __user *)optval)) + if (copy_from_sockptr(&v, optval, sizeof(v))) return -EFAULT; /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */ if (v != RT_TABLE_DEFAULT && v >= 100000000) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 85892b35cff7..119dfaf5f4bb 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -337,7 +337,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, valbool = (val != 0); if (ip6_mroute_opt(optname)) - return ip6_mroute_setsockopt(sk, optname, optval, optlen); + return ip6_mroute_setsockopt(sk, optname, USER_SOCKPTR(optval), + optlen); if (needs_rtnl) rtnl_lock(); -- cgit v1.2.3 From 86298285c9ae3a41ce21c2d00ebdde51dd2abc73 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:01 +0200 Subject: net/ipv6: switch ipv6_flowlabel_opt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Note that the get case is pretty weird in that it actually copies data back to userspace from setsockopt. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/ipv6.h | 2 +- net/ipv6/ip6_flowlabel.c | 16 +++++++++------- net/ipv6/ipv6_sockglue.c | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 262fc88dbd7e..4c9d89b5d732 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -406,7 +406,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, struct ipv6_txoptions *fopt); void fl6_free_socklist(struct sock *sk); -int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen); +int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen); int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags); int ip6_flowlabel_init(void); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 817b4f379009..215b6f5e733e 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -371,7 +371,7 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo static struct ip6_flowlabel * fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, - char __user *optval, int optlen, int *err_p) + sockptr_t optval, int optlen, int *err_p) { struct ip6_flowlabel *fl = NULL; int olen; @@ -401,7 +401,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, memset(fl->opt, 0, sizeof(*fl->opt)); fl->opt->tot_len = sizeof(*fl->opt) + olen; err = -EFAULT; - if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen)) + sockptr_advance(optval, CMSG_ALIGN(sizeof(*freq))); + if (copy_from_sockptr(fl->opt + 1, optval, olen)) goto done; msg.msg_controllen = olen; @@ -604,7 +605,7 @@ static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq) } static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, - void __user *optval, int optlen) + sockptr_t optval, int optlen) { struct ipv6_fl_socklist *sfl, *sfl1 = NULL; struct ip6_flowlabel *fl, *fl1 = NULL; @@ -702,8 +703,9 @@ release: goto recheck; if (!freq->flr_label) { - if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label, - &fl->label, sizeof(fl->label))) { + sockptr_advance(optval, + offsetof(struct in6_flowlabel_req, flr_label)); + if (copy_to_sockptr(optval, &fl->label, sizeof(fl->label))) { /* Intentionally ignore fault. */ } } @@ -716,13 +718,13 @@ done: return err; } -int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) +int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen) { struct in6_flowlabel_req freq; if (optlen < sizeof(freq)) return -EINVAL; - if (copy_from_user(&freq, optval, sizeof(freq))) + if (copy_from_sockptr(&freq, optval, sizeof(freq))) return -EFAULT; switch (freq.flr_action) { diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 119dfaf5f4bb..3897fb55372d 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -929,7 +929,7 @@ done: retv = 0; break; case IPV6_FLOWLABEL_MGR: - retv = ipv6_flowlabel_opt(sk, optval, optlen); + retv = ipv6_flowlabel_opt(sk, USER_SOCKPTR(optval), optlen); break; case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: -- cgit v1.2.3 From 91ac1ccaff597d06b1e16801e1a4c99b8a78dcbe Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:04 +0200 Subject: net/udp: switch udp_lib_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/udp.h | 2 +- net/ipv4/udp.c | 7 ++++--- net/ipv6/udp.c | 3 ++- 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/udp.h b/include/net/udp.h index 17a9e86a8076..295d52a73598 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -306,7 +306,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int udp_lib_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen, + sockptr_t optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)); struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index bb95cddcb040..c6cb2d09dbc7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2588,7 +2588,7 @@ void udp_destroy_sock(struct sock *sk) * Socket option code for UDP */ int udp_lib_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen, + sockptr_t optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)) { struct udp_sock *up = udp_sk(sk); @@ -2599,7 +2599,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; valbool = val ? 1 : 0; @@ -2707,7 +2707,8 @@ int udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_setsockopt(sk, level, optname, optval, optlen, + return udp_lib_setsockopt(sk, level, optname, + USER_SOCKPTR(optval), optlen, udp_push_pending_frames); return ip_setsockopt(sk, level, optname, optval, optlen); } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 7c1143feb2bf..2df1e6c9d7cb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1622,7 +1622,8 @@ int udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_setsockopt(sk, level, optname, optval, optlen, + return udp_lib_setsockopt(sk, level, optname, + USER_SOCKPTR(optval), optlen, udp_v6_push_pending_frames); return ipv6_setsockopt(sk, level, optname, optval, optlen); } -- cgit v1.2.3 From d4c19c49142ddb2efcc34cff6379d03edb3553bd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:05 +0200 Subject: net/tcp: switch ->md5_parse to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/tcp.h | 2 +- net/ipv4/tcp.c | 3 ++- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv6/tcp_ipv6.c | 4 ++-- 4 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 9f7f7c0c1104..e3c8e1d82021 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2002,7 +2002,7 @@ struct tcp_sock_af_ops { const struct sk_buff *skb); int (*md5_parse)(struct sock *sk, int optname, - char __user *optval, + sockptr_t optval, int optlen); #endif }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 58ede3d62b2e..49bf15c27dea 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3249,7 +3249,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); + err = tp->af_specific->md5_parse(sk, optname, + USER_SOCKPTR(optval), optlen); break; #endif case TCP_USER_TIMEOUT: diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index daa39d33702b..f8913923a6c0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1195,7 +1195,7 @@ static void tcp_clear_md5_list(struct sock *sk) } static int tcp_v4_parse_md5_keys(struct sock *sk, int optname, - char __user *optval, int optlen) + sockptr_t optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; @@ -1206,7 +1206,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname, if (optlen < sizeof(cmd)) return -EINVAL; - if (copy_from_user(&cmd, optval, sizeof(cmd))) + if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin->sin_family != AF_INET) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c34b7834fd84..305870a72352 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -567,7 +567,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, } static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, - char __user *optval, int optlen) + sockptr_t optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; @@ -577,7 +577,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, if (optlen < sizeof(cmd)) return -EINVAL; - if (copy_from_user(&cmd, optval, sizeof(cmd))) + if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin6->sin6_family != AF_INET6) -- cgit v1.2.3 From a7b75c5a8c41445f33efb663887ff5f5c3b4454b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:07 +0200 Subject: net: pass a sockptr_t into ->setsockopt Rework the remaining setsockopt code to pass a sockptr_t instead of a plain user pointer. This removes the last remaining set_fs(KERNEL_DS) outside of architecture specific code. Signed-off-by: Christoph Hellwig Acked-by: Stefan Schmidt [ieee802154] Acked-by: Matthieu Baerts Signed-off-by: David S. Miller --- crypto/af_alg.c | 7 +++--- drivers/crypto/chelsio/chtls/chtls_main.c | 18 +++++++------- drivers/isdn/mISDN/socket.c | 4 ++-- include/linux/net.h | 4 +++- include/net/inet_connection_sock.h | 3 ++- include/net/ip.h | 2 +- include/net/ipv6.h | 4 ++-- include/net/sctp/structs.h | 2 +- include/net/sock.h | 4 ++-- include/net/tcp.h | 4 ++-- net/atm/common.c | 6 ++--- net/atm/common.h | 2 +- net/atm/pvc.c | 2 +- net/atm/svc.c | 6 ++--- net/ax25/af_ax25.c | 6 ++--- net/bluetooth/hci_sock.c | 8 +++---- net/bluetooth/l2cap_sock.c | 22 ++++++++--------- net/bluetooth/rfcomm/sock.c | 12 ++++++---- net/bluetooth/sco.c | 6 ++--- net/caif/caif_socket.c | 8 +++---- net/can/j1939/socket.c | 12 +++++----- net/can/raw.c | 16 ++++++------- net/core/sock.c | 2 +- net/dccp/dccp.h | 2 +- net/dccp/proto.c | 20 ++++++++-------- net/decnet/af_decnet.c | 16 +++++++------ net/ieee802154/socket.c | 6 ++--- net/ipv4/ip_sockglue.c | 13 ++++------- net/ipv4/raw.c | 8 +++---- net/ipv4/tcp.c | 5 ++-- net/ipv4/udp.c | 6 ++--- net/ipv4/udp_impl.h | 4 ++-- net/ipv6/ipv6_sockglue.c | 10 ++++---- net/ipv6/raw.c | 10 ++++---- net/ipv6/udp.c | 6 ++--- net/ipv6/udp_impl.h | 4 ++-- net/iucv/af_iucv.c | 4 ++-- net/kcm/kcmsock.c | 6 ++--- net/l2tp/l2tp_ppp.c | 4 ++-- net/llc/af_llc.c | 4 ++-- net/mptcp/protocol.c | 12 ++++------ net/netlink/af_netlink.c | 4 ++-- net/netrom/af_netrom.c | 4 ++-- net/nfc/llcp_sock.c | 6 ++--- net/packet/af_packet.c | 39 ++++++++++++++++--------------- net/phonet/pep.c | 4 ++-- net/rds/af_rds.c | 30 +++++++++++------------- net/rds/rdma.c | 14 +++++------ net/rds/rds.h | 6 ++--- net/rose/af_rose.c | 4 ++-- net/rxrpc/af_rxrpc.c | 8 +++---- net/rxrpc/ar-internal.h | 4 ++-- net/rxrpc/key.c | 9 ++++--- net/sctp/socket.c | 4 ++-- net/smc/af_smc.c | 4 ++-- net/socket.c | 23 ++++++------------ net/tipc/socket.c | 8 +++---- net/tls/tls_main.c | 17 +++++++------- net/vmw_vsock/af_vsock.c | 4 ++-- net/x25/af_x25.c | 4 ++-- net/xdp/xsk.c | 8 +++---- 61 files changed, 246 insertions(+), 258 deletions(-) (limited to 'include') diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 29f71428520b..892242a42c3e 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -197,8 +197,7 @@ unlock: return err; } -static int alg_setkey(struct sock *sk, char __user *ukey, - unsigned int keylen) +static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen) { struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type = ask->type; @@ -210,7 +209,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey, return -ENOMEM; err = -EFAULT; - if (copy_from_user(key, ukey, keylen)) + if (copy_from_sockptr(key, ukey, keylen)) goto out; err = type->setkey(ask->private, key, keylen); @@ -222,7 +221,7 @@ out: } static int alg_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index d98b89d0fa6e..c3058dcdb33c 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -488,7 +488,7 @@ static int chtls_getsockopt(struct sock *sk, int level, int optname, } static int do_chtls_setsockopt(struct sock *sk, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct tls_crypto_info *crypto_info, tmp_crypto_info; struct chtls_sock *csk; @@ -498,12 +498,12 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, csk = rcu_dereference_sk_user_data(sk); - if (!optval || optlen < sizeof(*crypto_info)) { + if (sockptr_is_null(optval) || optlen < sizeof(*crypto_info)) { rc = -EINVAL; goto out; } - rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info)); + rc = copy_from_sockptr(&tmp_crypto_info, optval, sizeof(*crypto_info)); if (rc) { rc = -EFAULT; goto out; @@ -525,8 +525,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, /* Obtain version and type from previous copy */ crypto_info[0] = tmp_crypto_info; /* Now copy the following data */ - rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), - optval + sizeof(*crypto_info), + sockptr_advance(optval, sizeof(*crypto_info)); + rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info), + optval, sizeof(struct tls12_crypto_info_aes_gcm_128) - sizeof(*crypto_info)); @@ -541,8 +542,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, } case TLS_CIPHER_AES_GCM_256: { crypto_info[0] = tmp_crypto_info; - rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), - optval + sizeof(*crypto_info), + sockptr_advance(optval, sizeof(*crypto_info)); + rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info), + optval, sizeof(struct tls12_crypto_info_aes_gcm_256) - sizeof(*crypto_info)); @@ -565,7 +567,7 @@ out: } static int chtls_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct tls_context *ctx = tls_get_ctx(sk); diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 1b2b91479107..2835daae9e9f 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -401,7 +401,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) } static int data_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int len) + sockptr_t optval, unsigned int len) { struct sock *sk = sock->sk; int err = 0, opt = 0; @@ -414,7 +414,7 @@ static int data_sock_setsockopt(struct socket *sock, int level, int optname, switch (optname) { case MISDN_TIME_STAMP: - if (get_user(opt, (int __user *)optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(int))) { err = -EFAULT; break; } diff --git a/include/linux/net.h b/include/linux/net.h index 858ff1d98154..d48ff1180879 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -21,6 +21,7 @@ #include #include #include +#include #include @@ -162,7 +163,8 @@ struct proto_ops { int (*listen) (struct socket *sock, int len); int (*shutdown) (struct socket *sock, int flags); int (*setsockopt)(struct socket *sock, int level, - int optname, char __user *optval, unsigned int optlen); + int optname, sockptr_t optval, + unsigned int optlen); int (*getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); void (*show_fdinfo)(struct seq_file *m, struct socket *sock); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 157c60cca0ca..1e209ce7d1bd 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -45,7 +46,7 @@ struct inet_connection_sock_af_ops { u16 net_frag_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); diff --git a/include/net/ip.h b/include/net/ip.h index d66ad3a95220..b09c48d862cc 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -722,7 +722,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, int tlen, int offset); int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6); -int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, +int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4c9d89b5d732..bd1f396cc9c7 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1084,8 +1084,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, * socket options (ipv6_sockglue.c) */ -int ipv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); +int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen); int ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 233bbf7df5d6..b33f1aefad09 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -431,7 +431,7 @@ struct sctp_af { int (*setsockopt) (struct sock *sk, int level, int optname, - char __user *optval, + sockptr_t optval, unsigned int optlen); int (*getsockopt) (struct sock *sk, int level, diff --git a/include/net/sock.h b/include/net/sock.h index bfb2fe2fc368..2cc3ba667908 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1141,7 +1141,7 @@ struct proto { void (*destroy)(struct sock *sk); void (*shutdown)(struct sock *sk, int how); int (*setsockopt)(struct sock *sk, int level, - int optname, char __user *optval, + int optname, sockptr_t optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, @@ -1734,7 +1734,7 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname, int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); int sock_common_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); void sk_common_release(struct sock *sk); diff --git a/include/net/tcp.h b/include/net/tcp.h index e3c8e1d82021..e0c35d56091f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -399,8 +399,8 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -int tcp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); +int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen); void tcp_set_keepalive(struct sock *sk, int val); void tcp_syn_ack_timeout(const struct request_sock *req); int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, diff --git a/net/atm/common.c b/net/atm/common.c index 9b28f1fb3c69..84367b844b14 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -745,7 +745,7 @@ static int check_qos(const struct atm_qos *qos) } int vcc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct atm_vcc *vcc; unsigned long value; @@ -760,7 +760,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname, { struct atm_qos qos; - if (copy_from_user(&qos, optval, sizeof(qos))) + if (copy_from_sockptr(&qos, optval, sizeof(qos))) return -EFAULT; error = check_qos(&qos); if (error) @@ -774,7 +774,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname, return 0; } case SO_SETCLP: - if (get_user(value, (unsigned long __user *)optval)) + if (copy_from_sockptr(&value, optval, sizeof(value))) return -EFAULT; if (value) vcc->atm_options |= ATM_ATMOPT_CLP; diff --git a/net/atm/common.h b/net/atm/common.h index 5850649068bb..a1e56e8de698 100644 --- a/net/atm/common.h +++ b/net/atm/common.h @@ -21,7 +21,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait); int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int vcc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); int vcc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); void vcc_process_recv_queue(struct atm_vcc *vcc); diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 02bd2a436bdf..53e7d3f39e26 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -63,7 +63,7 @@ static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr, } static int pvc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int error; diff --git a/net/atm/svc.c b/net/atm/svc.c index ba144d035e3d..4a02bcaad279 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -451,7 +451,7 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos) } static int svc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); @@ -464,7 +464,7 @@ static int svc_setsockopt(struct socket *sock, int level, int optname, error = -EINVAL; goto out; } - if (copy_from_user(&vcc->sap, optval, optlen)) { + if (copy_from_sockptr(&vcc->sap, optval, optlen)) { error = -EFAULT; goto out; } @@ -475,7 +475,7 @@ static int svc_setsockopt(struct socket *sock, int level, int optname, error = -EINVAL; goto out; } - if (get_user(value, (int __user *)optval)) { + if (copy_from_sockptr(&value, optval, sizeof(int))) { error = -EFAULT; goto out; } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index fd91cd34f25e..17bf31a89692 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -528,7 +528,7 @@ ax25_cb *ax25_create_cb(void) */ static int ax25_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; ax25_cb *ax25; @@ -543,7 +543,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(unsigned int)) return -EINVAL; - if (get_user(opt, (unsigned int __user *)optval)) + if (copy_from_sockptr(&opt, optval, sizeof(unsigned int))) return -EFAULT; lock_sock(sk); @@ -640,7 +640,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, memset(devname, 0, sizeof(devname)); - if (copy_from_user(devname, optval, optlen)) { + if (copy_from_sockptr(devname, optval, optlen)) { res = -EFAULT; break; } diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index caf38a8ea6a8..d5eff27d5b1e 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -1842,7 +1842,7 @@ drop: } static int hci_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int len) + sockptr_t optval, unsigned int len) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; @@ -1862,7 +1862,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, switch (optname) { case HCI_DATA_DIR: - if (get_user(opt, (int __user *)optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(opt))) { err = -EFAULT; break; } @@ -1874,7 +1874,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, break; case HCI_TIME_STAMP: - if (get_user(opt, (int __user *)optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(opt))) { err = -EFAULT; break; } @@ -1896,7 +1896,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, } len = min_t(unsigned int, len, sizeof(uf)); - if (copy_from_user(&uf, optval, len)) { + if (copy_from_sockptr(&uf, optval, len)) { err = -EFAULT; break; } diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a995d2c51fa7..a3d104123f38 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -703,7 +703,7 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) } static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; @@ -736,7 +736,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, opts.txwin_size = chan->tx_win; len = min_t(unsigned int, sizeof(opts), optlen); - if (copy_from_user((char *) &opts, optval, len)) { + if (copy_from_sockptr(&opts, optval, len)) { err = -EFAULT; break; } @@ -782,7 +782,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, break; case L2CAP_LM: - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -859,7 +859,7 @@ static int l2cap_set_mode(struct l2cap_chan *chan, u8 mode) } static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; @@ -891,7 +891,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); - if (copy_from_user((char *) &sec, optval, len)) { + if (copy_from_sockptr(&sec, optval, len)) { err = -EFAULT; break; } @@ -939,7 +939,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -954,7 +954,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_FLUSHABLE: - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -990,7 +990,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; len = min_t(unsigned int, sizeof(pwr), optlen); - if (copy_from_user((char *) &pwr, optval, len)) { + if (copy_from_sockptr(&pwr, optval, len)) { err = -EFAULT; break; } @@ -1002,7 +1002,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_CHANNEL_POLICY: - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -1050,7 +1050,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u16 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u16))) { err = -EFAULT; break; } @@ -1081,7 +1081,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u8 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u8))) { err = -EFAULT; break; } diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index df14eebe80da..dba4ea0e1b0d 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -644,7 +644,8 @@ static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg, return len; } -static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) +static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; @@ -656,7 +657,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u switch (optname) { case RFCOMM_LM: - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -685,7 +686,8 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u return err; } -static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) +static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; @@ -713,7 +715,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); - if (copy_from_user((char *) &sec, optval, len)) { + if (copy_from_sockptr(&sec, optval, len)) { err = -EFAULT; break; } @@ -732,7 +734,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index c8c3d38cdc7b..37260baf7150 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -791,7 +791,7 @@ static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg, } static int sco_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int len, err = 0; @@ -810,7 +810,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -831,7 +831,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, sizeof(voice), optlen); - if (copy_from_user((char *)&voice, optval, len)) { + if (copy_from_sockptr(&voice, optval, len)) { err = -EFAULT; break; } diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index b94ecd931002..3ad0a1df6712 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -669,8 +669,8 @@ out_err: return sent ? : err; } -static int setsockopt(struct socket *sock, - int lvl, int opt, char __user *ov, unsigned int ol) +static int setsockopt(struct socket *sock, int lvl, int opt, sockptr_t ov, + unsigned int ol) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); @@ -685,7 +685,7 @@ static int setsockopt(struct socket *sock, return -EINVAL; if (lvl != SOL_CAIF) goto bad_sol; - if (copy_from_user(&linksel, ov, sizeof(int))) + if (copy_from_sockptr(&linksel, ov, sizeof(int))) return -EINVAL; lock_sock(&(cf_sk->sk)); cf_sk->conn_req.link_selector = linksel; @@ -699,7 +699,7 @@ static int setsockopt(struct socket *sock, return -ENOPROTOOPT; lock_sock(&(cf_sk->sk)); if (ol > sizeof(cf_sk->conn_req.param.data) || - copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { + copy_from_sockptr(&cf_sk->conn_req.param.data, ov, ol)) { release_sock(&cf_sk->sk); return -EINVAL; } diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index f7587428febd..78ff9b3f1d40 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -627,14 +627,14 @@ static int j1939_sk_release(struct socket *sock) return 0; } -static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, char __user *optval, +static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval, unsigned int optlen, int flag) { int tmp; if (optlen != sizeof(tmp)) return -EINVAL; - if (copy_from_user(&tmp, optval, optlen)) + if (copy_from_sockptr(&tmp, optval, optlen)) return -EFAULT; lock_sock(&jsk->sk); if (tmp) @@ -646,7 +646,7 @@ static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, char __user *optval, } static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct j1939_sock *jsk = j1939_sk(sk); @@ -658,7 +658,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, switch (optname) { case SO_J1939_FILTER: - if (optval) { + if (!sockptr_is_null(optval)) { struct j1939_filter *f; int c; @@ -670,7 +670,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, return -EINVAL; count = optlen / sizeof(*filters); - filters = memdup_user(optval, optlen); + filters = memdup_sockptr(optval, optlen); if (IS_ERR(filters)) return PTR_ERR(filters); @@ -703,7 +703,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, case SO_J1939_SEND_PRIO: if (optlen != sizeof(tmp)) return -EINVAL; - if (copy_from_user(&tmp, optval, optlen)) + if (copy_from_sockptr(&tmp, optval, optlen)) return -EFAULT; if (tmp < 0 || tmp > 7) return -EDOM; diff --git a/net/can/raw.c b/net/can/raw.c index 59c039d73c6d..94a9405658dc 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -485,7 +485,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr, } static int raw_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); @@ -511,11 +511,11 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (count > 1) { /* filter does not fit into dfilter => alloc space */ - filter = memdup_user(optval, optlen); + filter = memdup_sockptr(optval, optlen); if (IS_ERR(filter)) return PTR_ERR(filter); } else if (count == 1) { - if (copy_from_user(&sfilter, optval, sizeof(sfilter))) + if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter))) return -EFAULT; } @@ -568,7 +568,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(err_mask)) return -EINVAL; - if (copy_from_user(&err_mask, optval, optlen)) + if (copy_from_sockptr(&err_mask, optval, optlen)) return -EFAULT; err_mask &= CAN_ERR_MASK; @@ -607,7 +607,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(ro->loopback)) return -EINVAL; - if (copy_from_user(&ro->loopback, optval, optlen)) + if (copy_from_sockptr(&ro->loopback, optval, optlen)) return -EFAULT; break; @@ -616,7 +616,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(ro->recv_own_msgs)) return -EINVAL; - if (copy_from_user(&ro->recv_own_msgs, optval, optlen)) + if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen)) return -EFAULT; break; @@ -625,7 +625,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(ro->fd_frames)) return -EINVAL; - if (copy_from_user(&ro->fd_frames, optval, optlen)) + if (copy_from_sockptr(&ro->fd_frames, optval, optlen)) return -EFAULT; break; @@ -634,7 +634,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(ro->join_filters)) return -EINVAL; - if (copy_from_user(&ro->join_filters, optval, optlen)) + if (copy_from_sockptr(&ro->join_filters, optval, optlen)) return -EFAULT; break; diff --git a/net/core/sock.c b/net/core/sock.c index 1444d7d53ba2..2c5dd1397775 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3211,7 +3211,7 @@ EXPORT_SYMBOL(sock_common_recvmsg); * Set socket options on an inet socket. */ int sock_common_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 434eea91b767..9cc9d1ee6cdb 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -295,7 +295,7 @@ int dccp_disconnect(struct sock *sk, int flags); int dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 9e453611107f..2e9e8449698f 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -411,7 +411,7 @@ out: EXPORT_SYMBOL_GPL(dccp_ioctl); static int dccp_setsockopt_service(struct sock *sk, const __be32 service, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct dccp_sock *dp = dccp_sk(sk); struct dccp_service_list *sl = NULL; @@ -426,9 +426,9 @@ static int dccp_setsockopt_service(struct sock *sk, const __be32 service, return -ENOMEM; sl->dccpsl_nr = optlen / sizeof(u32) - 1; - if (copy_from_user(sl->dccpsl_list, - optval + sizeof(service), - optlen - sizeof(service)) || + sockptr_advance(optval, sizeof(service)); + if (copy_from_sockptr(sl->dccpsl_list, optval, + optlen - sizeof(service)) || dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) { kfree(sl); return -EFAULT; @@ -482,7 +482,7 @@ static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx) } static int dccp_setsockopt_ccid(struct sock *sk, int type, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { u8 *val; int rc = 0; @@ -490,7 +490,7 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type, if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) return -EINVAL; - val = memdup_user(optval, optlen); + val = memdup_sockptr(optval, optlen); if (IS_ERR(val)) return PTR_ERR(val); @@ -507,7 +507,7 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type, } static int do_dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct dccp_sock *dp = dccp_sk(sk); int val, err = 0; @@ -529,7 +529,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, if (optlen < (int)sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; if (optname == DCCP_SOCKOPT_SERVICE) @@ -572,8 +572,8 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, return err; } -int dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +int dccp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { if (level != SOL_DCCP) return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 7d51ab608fb3..3b53d766789d 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -150,7 +150,8 @@ static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; static struct hlist_head dn_wild_sk; static atomic_long_t decnet_memory_allocated; -static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); +static int __dn_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen, int flags); static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); static struct hlist_head *dn_find_list(struct sock *sk) @@ -1320,7 +1321,8 @@ out: return err; } -static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) +static int dn_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err; @@ -1332,14 +1334,14 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != DSO_LINKINFO && optname != DSO_STREAM && optname != DSO_SEQPACKET) - err = nf_setsockopt(sk, PF_DECnet, optname, - USER_SOCKPTR(optval), optlen); + err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); #endif return err; } -static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags) +static int __dn_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen, int flags) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); @@ -1355,13 +1357,13 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us } u; int err; - if (optlen && !optval) + if (optlen && sockptr_is_null(optval)) return -EINVAL; if (optlen > sizeof(u)) return -EINVAL; - if (copy_from_user(&u, optval, optlen)) + if (copy_from_sockptr(&u, optval, optlen)) return -EFAULT; switch (optname) { diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 94ae9662133e..a45a0401adc5 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -382,7 +382,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname, } static int raw_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { return -EOPNOTSUPP; } @@ -872,7 +872,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname, } static int dgram_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct dgram_sock *ro = dgram_sk(sk); struct net *net = sock_net(sk); @@ -882,7 +882,7 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; lock_sock(sk); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f7f1507b89fe..8dc027e54c5b 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1401,21 +1401,19 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) skb_dst_drop(skb); } -int ip_setsockopt(struct sock *sk, int level, - int optname, char __user *optval, unsigned int optlen) +int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { int err; if (level != SOL_IP) return -ENOPROTOOPT; - err = do_ip_setsockopt(sk, level, optname, USER_SOCKPTR(optval), - optlen); + err = do_ip_setsockopt(sk, level, optname, optval, optlen); #if IS_ENABLED(CONFIG_BPFILTER_UMH) if (optname >= BPFILTER_IPT_SO_SET_REPLACE && optname < BPFILTER_IPT_SET_MAX) - err = bpfilter_ip_set_sockopt(sk, optname, USER_SOCKPTR(optval), - optlen); + err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen); #endif #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ @@ -1423,8 +1421,7 @@ int ip_setsockopt(struct sock *sk, int level, optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) - err = nf_setsockopt(sk, PF_INET, optname, USER_SOCKPTR(optval), - optlen); + err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); #endif return err; } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 2a57d633b31e..6fd4330287c2 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -809,11 +809,11 @@ static int raw_sk_init(struct sock *sk) return 0; } -static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen) +static int raw_seticmpfilter(struct sock *sk, sockptr_t optval, int optlen) { if (optlen > sizeof(struct icmp_filter)) optlen = sizeof(struct icmp_filter); - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) + if (copy_from_sockptr(&raw_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; } @@ -838,7 +838,7 @@ out: return ret; } static int do_raw_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) @@ -850,7 +850,7 @@ static int do_raw_setsockopt(struct sock *sk, int level, int optname, } static int raw_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { if (level != SOL_RAW) return ip_setsockopt(sk, level, optname, optval, optlen); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 71cbc61c335f..27de9380ed14 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3323,7 +3323,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, return err; } -int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, +int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { const struct inet_connection_sock *icsk = inet_csk(sk); @@ -3331,8 +3331,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, if (level != SOL_TCP) return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); - return do_tcp_setsockopt(sk, level, optname, USER_SOCKPTR(optval), - optlen); + return do_tcp_setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(tcp_setsockopt); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index c6cb2d09dbc7..5a6a2f6d86b9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2703,12 +2703,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, } EXPORT_SYMBOL(udp_lib_setsockopt); -int udp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, - USER_SOCKPTR(optval), optlen, + optval, optlen, udp_push_pending_frames); return ip_setsockopt(sk, level, optname, optval, optlen); } diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index ab313702c87f..2878d8285caf 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -12,8 +12,8 @@ int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); int udp_v4_get_port(struct sock *sk, unsigned short snum); void udp_v4_rehash(struct sock *sk); -int udp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); +int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen); int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index dcd000a5a9b1..d2282f5c9760 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -980,8 +980,8 @@ e_inval: return -EINVAL; } -int ipv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { int err; @@ -991,14 +991,12 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, if (level != SOL_IPV6) return -ENOPROTOOPT; - err = do_ipv6_setsockopt(sk, level, optname, USER_SOCKPTR(optval), - optlen); + err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && optname != IPV6_XFRM_POLICY) - err = nf_setsockopt(sk, PF_INET6, optname, USER_SOCKPTR(optval), - optlen); + err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); #endif return err; } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 594e01ad670a..874f01cd7aec 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -972,13 +972,13 @@ do_confirm: } static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, - char __user *optval, int optlen) + sockptr_t optval, int optlen) { switch (optname) { case ICMPV6_FILTER: if (optlen > sizeof(struct icmp6_filter)) optlen = sizeof(struct icmp6_filter); - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) + if (copy_from_sockptr(&raw6_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; default: @@ -1015,12 +1015,12 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct raw6_sock *rp = raw6_sk(sk); int val; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; switch (optname) { @@ -1062,7 +1062,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, } static int rawv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { switch (level) { case SOL_RAW: diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 2df1e6c9d7cb..15818e18655d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1618,12 +1618,12 @@ void udpv6_destroy_sock(struct sock *sk) /* * Socket option code for UDP */ -int udpv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, - USER_SOCKPTR(optval), optlen, + optval, optlen, udp_v6_push_pending_frames); return ipv6_setsockopt(sk, level, optname, optval, optlen); } diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 30dfb6f1b762..b2fcc46c1630 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -17,8 +17,8 @@ void udp_v6_rehash(struct sock *sk); int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -int udpv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); +int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen); int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index ee0add15497d..6ee9851ac7c6 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1494,7 +1494,7 @@ static int iucv_sock_release(struct socket *sock) /* getsockopt and setsockopt */ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); @@ -1507,7 +1507,7 @@ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *) optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; rc = 0; diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 56fac24a627a..56dad9565bc9 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1265,7 +1265,7 @@ static void kcm_recv_enable(struct kcm_sock *kcm) } static int kcm_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct kcm_sock *kcm = kcm_sk(sock->sk); int val, valbool; @@ -1277,8 +1277,8 @@ static int kcm_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EINVAL; + if (copy_from_sockptr(&val, optval, sizeof(int))) + return -EFAULT; valbool = val ? 1 : 0; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index e58fe7e3b884..4389df66af35 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1242,7 +1242,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, * session or the special tunnel type. */ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2tp_session *session; @@ -1256,7 +1256,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; err = -ENOTCONN; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 6140a3e46c26..7180979114e4 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -1053,7 +1053,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd, * Set various connection specific parameters. */ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); @@ -1063,7 +1063,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, lock_sock(sk); if (unlikely(level != SOL_LLC || optlen != sizeof(int))) goto out; - rc = get_user(opt, (int __user *)optval); + rc = copy_from_sockptr(&opt, optval, sizeof(opt)); if (rc) goto out; rc = -EINVAL; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 7246847efa90..2891ae8a1028 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1632,7 +1632,7 @@ static void mptcp_destroy(struct sock *sk) } static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = (struct sock *)msk; struct socket *ssock; @@ -1648,8 +1648,7 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, return -EINVAL; } - ret = sock_setsockopt(ssock, SOL_SOCKET, optname, - USER_SOCKPTR(optval), optlen); + ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen); if (ret == 0) { if (optname == SO_REUSEPORT) sk->sk_reuseport = ssock->sk->sk_reuseport; @@ -1660,12 +1659,11 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, return ret; } - return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, - USER_SOCKPTR(optval), optlen); + return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); } static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = (struct sock *)msk; int ret = -EOPNOTSUPP; @@ -1692,7 +1690,7 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, } static int mptcp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct mptcp_sock *msk = mptcp_sk(sk); struct sock *ssk; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 3cd58f0c2de4..d8921b833744 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1621,7 +1621,7 @@ static void netlink_update_socket_mc(struct netlink_sock *nlk, } static int netlink_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); @@ -1632,7 +1632,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, return -ENOPROTOOPT; if (optlen >= sizeof(int) && - get_user(val, (unsigned int __user *)optval)) + copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; switch (optname) { diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index f90ef6934b8f..6d16e1ab1a8a 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -294,7 +294,7 @@ void nr_destroy_socket(struct sock *sk) */ static int nr_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); @@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(unsigned int)) return -EINVAL; - if (get_user(opt, (unsigned int __user *)optval)) + if (copy_from_sockptr(&opt, optval, sizeof(unsigned int))) return -EFAULT; switch (optname) { diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 6da1e2334bb6..d257ed3b732a 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -218,7 +218,7 @@ error: } static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); @@ -241,7 +241,7 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -263,7 +263,7 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d8d4f78f78e4..0b8160d1a6e0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1558,7 +1558,7 @@ static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, return 0; } -static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, +static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data, unsigned int len) { struct bpf_prog *new; @@ -1568,7 +1568,7 @@ static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, return -EPERM; if (len != sizeof(fd)) return -EINVAL; - if (copy_from_user(&fd, data, len)) + if (copy_from_sockptr(&fd, data, len)) return -EFAULT; new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); @@ -1579,12 +1579,12 @@ static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, return 0; } -static int fanout_set_data(struct packet_sock *po, char __user *data, +static int fanout_set_data(struct packet_sock *po, sockptr_t data, unsigned int len) { switch (po->fanout->type) { case PACKET_FANOUT_CBPF: - return fanout_set_data_cbpf(po, USER_SOCKPTR(data), len); + return fanout_set_data_cbpf(po, data, len); case PACKET_FANOUT_EBPF: return fanout_set_data_ebpf(po, data, len); default: @@ -3652,7 +3652,8 @@ static void packet_flush_mclist(struct sock *sk) } static int -packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) +packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, + unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); @@ -3672,7 +3673,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); - if (copy_from_user(&mreq, optval, len)) + if (copy_from_sockptr(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; @@ -3703,7 +3704,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen < len) { ret = -EINVAL; } else { - if (copy_from_user(&req_u.req, optval, len)) + if (copy_from_sockptr(&req_u.req, optval, len)) ret = -EFAULT; else ret = packet_set_ring(sk, &req_u, 0, @@ -3718,7 +3719,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; @@ -3730,7 +3731,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: @@ -3756,7 +3757,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; if (val > INT_MAX) return -EINVAL; @@ -3776,7 +3777,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3795,7 +3796,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen < sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3809,7 +3810,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen < sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3825,7 +3826,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv return -EINVAL; if (optlen < sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3844,7 +3845,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; @@ -3856,7 +3857,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; return fanout_add(sk, val & 0xffff, val >> 16); @@ -3874,7 +3875,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; if (val < 0 || val > 1) return -EINVAL; @@ -3888,7 +3889,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3907,7 +3908,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; po->xmit = val ? packet_direct_xmit : dev_queue_xmit; diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 4577e43cb777..e47d09aca4af 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -975,7 +975,7 @@ static int pep_init(struct sock *sk) } static int pep_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct pep_sock *pn = pep_sk(sk); int val = 0, err = 0; @@ -983,7 +983,7 @@ static int pep_setsockopt(struct sock *sk, int level, int optname, if (level != SOL_PNPIPE) return -ENOPROTOOPT; if (optlen >= sizeof(int)) { - if (get_user(val, (int __user *) optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; } diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 1a5bf3fa4578..b239120dd9ca 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -290,8 +290,7 @@ static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return 0; } -static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval, - int len) +static int rds_cancel_sent_to(struct rds_sock *rs, sockptr_t optval, int len) { struct sockaddr_in6 sin6; struct sockaddr_in sin; @@ -308,14 +307,15 @@ static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval, goto out; } else if (len < sizeof(struct sockaddr_in6)) { /* Assume IPv4 */ - if (copy_from_user(&sin, optval, sizeof(struct sockaddr_in))) { + if (copy_from_sockptr(&sin, optval, + sizeof(struct sockaddr_in))) { ret = -EFAULT; goto out; } ipv6_addr_set_v4mapped(sin.sin_addr.s_addr, &sin6.sin6_addr); sin6.sin6_port = sin.sin_port; } else { - if (copy_from_user(&sin6, optval, + if (copy_from_sockptr(&sin6, optval, sizeof(struct sockaddr_in6))) { ret = -EFAULT; goto out; @@ -327,21 +327,20 @@ out: return ret; } -static int rds_set_bool_option(unsigned char *optvar, char __user *optval, +static int rds_set_bool_option(unsigned char *optvar, sockptr_t optval, int optlen) { int value; if (optlen < sizeof(int)) return -EINVAL; - if (get_user(value, (int __user *) optval)) + if (copy_from_sockptr(&value, optval, sizeof(int))) return -EFAULT; *optvar = !!value; return 0; } -static int rds_cong_monitor(struct rds_sock *rs, char __user *optval, - int optlen) +static int rds_cong_monitor(struct rds_sock *rs, sockptr_t optval, int optlen) { int ret; @@ -358,8 +357,7 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval, return ret; } -static int rds_set_transport(struct rds_sock *rs, char __user *optval, - int optlen) +static int rds_set_transport(struct rds_sock *rs, sockptr_t optval, int optlen) { int t_type; @@ -369,7 +367,7 @@ static int rds_set_transport(struct rds_sock *rs, char __user *optval, if (optlen != sizeof(int)) return -EINVAL; - if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type))) + if (copy_from_sockptr(&t_type, optval, sizeof(t_type))) return -EFAULT; if (t_type < 0 || t_type >= RDS_TRANS_COUNT) @@ -380,7 +378,7 @@ static int rds_set_transport(struct rds_sock *rs, char __user *optval, return rs->rs_transport ? 0 : -ENOPROTOOPT; } -static int rds_enable_recvtstamp(struct sock *sk, char __user *optval, +static int rds_enable_recvtstamp(struct sock *sk, sockptr_t optval, int optlen, int optname) { int val, valbool; @@ -388,7 +386,7 @@ static int rds_enable_recvtstamp(struct sock *sk, char __user *optval, if (optlen != sizeof(int)) return -EFAULT; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; valbool = val ? 1 : 0; @@ -404,7 +402,7 @@ static int rds_enable_recvtstamp(struct sock *sk, char __user *optval, return 0; } -static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval, +static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval, int optlen) { struct rds_rx_trace_so trace; @@ -413,7 +411,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval, if (optlen != sizeof(struct rds_rx_trace_so)) return -EFAULT; - if (copy_from_user(&trace, optval, sizeof(trace))) + if (copy_from_sockptr(&trace, optval, sizeof(trace))) return -EFAULT; if (trace.rx_traces > RDS_MSG_RX_DGRAM_TRACE_MAX) @@ -432,7 +430,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval, } static int rds_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct rds_sock *rs = rds_sk_to_rs(sock->sk); int ret; diff --git a/net/rds/rdma.c b/net/rds/rdma.c index a7ae11846cd7..ccdd304eae0a 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -353,21 +353,20 @@ out: return ret; } -int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) +int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen) { struct rds_get_mr_args args; if (optlen != sizeof(struct rds_get_mr_args)) return -EINVAL; - if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval, - sizeof(struct rds_get_mr_args))) + if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args))) return -EFAULT; return __rds_rdma_map(rs, &args, NULL, NULL, NULL); } -int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) +int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen) { struct rds_get_mr_for_dest_args args; struct rds_get_mr_args new_args; @@ -375,7 +374,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) if (optlen != sizeof(struct rds_get_mr_for_dest_args)) return -EINVAL; - if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, + if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_for_dest_args))) return -EFAULT; @@ -394,7 +393,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) /* * Free the MR indicated by the given R_Key */ -int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) +int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen) { struct rds_free_mr_args args; struct rds_mr *mr; @@ -403,8 +402,7 @@ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) if (optlen != sizeof(struct rds_free_mr_args)) return -EINVAL; - if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval, - sizeof(struct rds_free_mr_args))) + if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args))) return -EFAULT; /* Special case - a null cookie means flush all unused MRs */ diff --git a/net/rds/rds.h b/net/rds/rds.h index 106e862996b9..d35d1fc39807 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -924,9 +924,9 @@ int rds_send_pong(struct rds_conn_path *cp, __be16 dport); /* rdma.c */ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); -int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen); -int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen); -int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen); +int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen); +int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen); +int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen); void rds_rdma_drop_keys(struct rds_sock *rs); int rds_rdma_extra_size(struct rds_rdma_args *args, struct rds_iov_vector *iov); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index ce85656ac9c1..cf7d974e0f61 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -365,7 +365,7 @@ void rose_destroy_socket(struct sock *sk) */ static int rose_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); @@ -377,7 +377,7 @@ static int rose_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(opt, (int __user *)optval)) + if (copy_from_sockptr(&opt, optval, sizeof(int))) return -EFAULT; switch (optname) { diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index cd7d0d204c74..e6725a6de015 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -588,7 +588,7 @@ EXPORT_SYMBOL(rxrpc_sock_set_min_security_level); * set RxRPC socket options */ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); unsigned int min_sec_level; @@ -639,8 +639,8 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNBOUND) goto error; - ret = get_user(min_sec_level, - (unsigned int __user *) optval); + ret = copy_from_sockptr(&min_sec_level, optval, + sizeof(unsigned int)); if (ret < 0) goto error; ret = -EINVAL; @@ -658,7 +658,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, if (rx->sk.sk_state != RXRPC_SERVER_BOUND2) goto error; ret = -EFAULT; - if (copy_from_user(service_upgrade, optval, + if (copy_from_sockptr(service_upgrade, optval, sizeof(service_upgrade)) != 0) goto error; ret = -EINVAL; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 9a2139ebd67d..6d29a3603a3e 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -909,8 +909,8 @@ extern const struct rxrpc_security rxrpc_no_security; extern struct key_type key_type_rxrpc; extern struct key_type key_type_rxrpc_s; -int rxrpc_request_key(struct rxrpc_sock *, char __user *, int); -int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int); +int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int); +int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int); int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t, u32); diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 0c98313dd7a8..94c3df392651 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -896,7 +896,7 @@ static void rxrpc_describe(const struct key *key, struct seq_file *m) /* * grab the security key for a socket */ -int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) +int rxrpc_request_key(struct rxrpc_sock *rx, sockptr_t optval, int optlen) { struct key *key; char *description; @@ -906,7 +906,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) if (optlen <= 0 || optlen > PAGE_SIZE - 1) return -EINVAL; - description = memdup_user_nul(optval, optlen); + description = memdup_sockptr_nul(optval, optlen); if (IS_ERR(description)) return PTR_ERR(description); @@ -926,8 +926,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) /* * grab the security keyring for a server socket */ -int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval, - int optlen) +int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen) { struct key *key; char *description; @@ -937,7 +936,7 @@ int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval, if (optlen <= 0 || optlen > PAGE_SIZE - 1) return -EINVAL; - description = memdup_user_nul(optval, optlen); + description = memdup_sockptr_nul(optval, optlen); if (IS_ERR(description)) return PTR_ERR(description); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9a767f359718..144808dfea9e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4429,7 +4429,7 @@ out: * optlen - the size of the buffer. */ static int sctp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { void *kopt = NULL; int retval = 0; @@ -4449,7 +4449,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, } if (optlen > 0) { - kopt = memdup_user(optval, optlen); + kopt = memdup_sockptr(optval, optlen); if (IS_ERR(kopt)) return PTR_ERR(kopt); } diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 9711c9e0e515..4ac1d4de6676 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1731,7 +1731,7 @@ out: } static int smc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -1754,7 +1754,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; lock_sock(sk); diff --git a/net/socket.c b/net/socket.c index c97f83d879ae..e44b8ac47f6f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2094,10 +2094,10 @@ static bool sock_use_custom_sol_socket(const struct socket *sock) * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ -int __sys_setsockopt(int fd, int level, int optname, char __user *optval, +int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, int optlen) { - mm_segment_t oldfs = get_fs(); + sockptr_t optval = USER_SOCKPTR(user_optval); char *kernel_optval = NULL; int err, fput_needed; struct socket *sock; @@ -2115,7 +2115,7 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *optval, if (!in_compat_syscall()) err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname, - optval, &optlen, + user_optval, &optlen, &kernel_optval); if (err < 0) goto out_put; @@ -2124,25 +2124,16 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *optval, goto out_put; } - if (kernel_optval) { - set_fs(KERNEL_DS); - optval = (char __user __force *)kernel_optval; - } - + if (kernel_optval) + optval = KERNEL_SOCKPTR(kernel_optval); if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock)) - err = sock_setsockopt(sock, level, optname, - USER_SOCKPTR(optval), optlen); + err = sock_setsockopt(sock, level, optname, optval, optlen); else if (unlikely(!sock->ops->setsockopt)) err = -EOPNOTSUPP; else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); - - if (kernel_optval) { - set_fs(oldfs); - kfree(kernel_optval); - } - + kfree(kernel_optval); out_put: fput_light(sock->file, fput_needed); return err; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index fc388cef6471..07419f36116a 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3103,7 +3103,7 @@ static int tipc_sk_leave(struct tipc_sock *tsk) * Returns 0 on success, errno otherwise */ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, - char __user *ov, unsigned int ol) + sockptr_t ov, unsigned int ol) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); @@ -3124,17 +3124,17 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, case TIPC_NODELAY: if (ol < sizeof(value)) return -EINVAL; - if (get_user(value, (u32 __user *)ov)) + if (copy_from_sockptr(&value, ov, sizeof(u32))) return -EFAULT; break; case TIPC_GROUP_JOIN: if (ol < sizeof(mreq)) return -EINVAL; - if (copy_from_user(&mreq, ov, sizeof(mreq))) + if (copy_from_sockptr(&mreq, ov, sizeof(mreq))) return -EFAULT; break; default: - if (ov || ol) + if (!sockptr_is_null(ov) || ol) return -EINVAL; } diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index ec10041c6b7d..d77f7d821130 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -450,7 +450,7 @@ static int tls_getsockopt(struct sock *sk, int level, int optname, return do_tls_getsockopt(sk, optname, optval, optlen); } -static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, +static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, unsigned int optlen, int tx) { struct tls_crypto_info *crypto_info; @@ -460,7 +460,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, int rc = 0; int conf; - if (!optval || (optlen < sizeof(*crypto_info))) { + if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info))) { rc = -EINVAL; goto out; } @@ -479,7 +479,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, goto out; } - rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); + rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info)); if (rc) { rc = -EFAULT; goto err_crypto_info; @@ -522,8 +522,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, goto err_crypto_info; } - rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), - optlen - sizeof(*crypto_info)); + sockptr_advance(optval, sizeof(*crypto_info)); + rc = copy_from_sockptr(crypto_info + 1, optval, + optlen - sizeof(*crypto_info)); if (rc) { rc = -EFAULT; goto err_crypto_info; @@ -579,8 +580,8 @@ out: return rc; } -static int do_tls_setsockopt(struct sock *sk, int optname, - char __user *optval, unsigned int optlen) +static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, + unsigned int optlen) { int rc = 0; @@ -600,7 +601,7 @@ static int do_tls_setsockopt(struct sock *sk, int optname, } static int tls_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct tls_context *ctx = tls_get_ctx(sk); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index df204c6761c4..27bbcfad9c17 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1517,7 +1517,7 @@ static void vsock_update_buffer_size(struct vsock_sock *vsk, static int vsock_stream_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, + sockptr_t optval, unsigned int optlen) { int err; @@ -1535,7 +1535,7 @@ static int vsock_stream_setsockopt(struct socket *sock, err = -EINVAL; \ goto exit; \ } \ - if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \ + if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \ err = -EFAULT; \ goto exit; \ } \ diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d5b09bbff375..0bbb283f23c9 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -431,7 +431,7 @@ void x25_destroy_socket_from_timer(struct sock *sk) */ static int x25_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { int opt; struct sock *sk = sock->sk; @@ -445,7 +445,7 @@ static int x25_setsockopt(struct socket *sock, int level, int optname, goto out; rc = -EFAULT; - if (get_user(opt, (int __user *)optval)) + if (copy_from_sockptr(&opt, optval, sizeof(int))) goto out; if (opt) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 26e3bba8c204..2e94a7e94671 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -702,7 +702,7 @@ struct xdp_umem_reg_v1 { }; static int xsk_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); @@ -720,7 +720,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(entries)) return -EINVAL; - if (copy_from_user(&entries, optval, sizeof(entries))) + if (copy_from_sockptr(&entries, optval, sizeof(entries))) return -EFAULT; mutex_lock(&xs->mutex); @@ -747,7 +747,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, else if (optlen < sizeof(mr)) mr_size = sizeof(struct xdp_umem_reg_v1); - if (copy_from_user(&mr, optval, mr_size)) + if (copy_from_sockptr(&mr, optval, mr_size)) return -EFAULT; mutex_lock(&xs->mutex); @@ -774,7 +774,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, struct xsk_queue **q; int entries; - if (copy_from_user(&entries, optval, sizeof(entries))) + if (copy_from_sockptr(&entries, optval, sizeof(entries))) return -EFAULT; mutex_lock(&xs->mutex); -- cgit v1.2.3 From 6d04fe15f78acdf8e32329e208552e226f7a8ae6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:08 +0200 Subject: net: optimize the sockptr_t for unified kernel/user address spaces For architectures like x86 and arm64 we don't need the separate bit to indicate that a pointer is a kernel pointer as the address spaces are unified. That way the sockptr_t can be reduced to a union of two pointers, which leads to nicer calling conventions. The only caveat is that we need to check that users don't pass in kernel address and thus gain access to kernel memory. Thus the USER_SOCKPTR helper is replaced with a init_user_sockptr function that does this check and returns an error if it fails. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 32 ++++++++++++++++++++++++++++++-- net/ipv4/bpfilter/sockopt.c | 14 ++++++++------ net/socket.c | 6 +++++- 3 files changed, 43 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 700856e13ea0..7d5cdb2b30b5 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -8,9 +8,34 @@ #ifndef _LINUX_SOCKPTR_H #define _LINUX_SOCKPTR_H +#include #include #include +#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE +typedef union { + void *kernel; + void __user *user; +} sockptr_t; + +static inline bool sockptr_is_kernel(sockptr_t sockptr) +{ + return (unsigned long)sockptr.kernel >= TASK_SIZE; +} + +static inline sockptr_t KERNEL_SOCKPTR(void *p) +{ + return (sockptr_t) { .kernel = p }; +} + +static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) +{ + if ((unsigned long)p >= TASK_SIZE) + return -EFAULT; + sp->user = p; + return 0; +} +#else /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ typedef struct { union { void *kernel; @@ -29,10 +54,13 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p) return (sockptr_t) { .kernel = p, .is_kernel = true }; } -static inline sockptr_t USER_SOCKPTR(void __user *p) +static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) { - return (sockptr_t) { .user = p }; + sp->user = p; + sp->is_kernel = false; + return 0; } +#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ static inline bool sockptr_is_null(sockptr_t sockptr) { diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 1b34cb9a7708..94f18d2352d0 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -57,16 +57,18 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, return bpfilter_mbox_request(sk, optname, optval, optlen, true); } -int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, - int __user *optlen) +int bpfilter_ip_get_sockopt(struct sock *sk, int optname, + char __user *user_optval, int __user *optlen) { - int len; + sockptr_t optval; + int err, len; if (get_user(len, optlen)) return -EFAULT; - - return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len, - false); + err = init_user_sockptr(&optval, user_optval); + if (err) + return err; + return bpfilter_mbox_request(sk, optname, optval, len, false); } static int __init bpfilter_sockopt_init(void) diff --git a/net/socket.c b/net/socket.c index e44b8ac47f6f..94ca4547cd7c 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2097,7 +2097,7 @@ static bool sock_use_custom_sol_socket(const struct socket *sock) int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, int optlen) { - sockptr_t optval = USER_SOCKPTR(user_optval); + sockptr_t optval; char *kernel_optval = NULL; int err, fput_needed; struct socket *sock; @@ -2105,6 +2105,10 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, if (optlen < 0) return -EINVAL; + err = init_user_sockptr(&optval, user_optval); + if (err) + return err; + sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; -- cgit v1.2.3 From 178c49d9f9a4b5ade00c93480d714708fe971e24 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 24 Jul 2020 09:03:09 -0400 Subject: icmp: prepare rfc 4884 for ipv6 The RFC 4884 spec is largely the same between IPv4 and IPv6. Factor out the IPv4 specific parts in preparation for IPv6 support: - icmp types supported - icmp header size, and thus offset to original datagram start - datagram length field offset in icmp(6)hdr. - datagram length field word size: 4B for IPv4, 8B for IPv6. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/icmp.h | 3 ++- net/ipv4/icmp.c | 17 ++++------------- net/ipv4/ip_sockglue.c | 14 +++++++++++++- 3 files changed, 19 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/icmp.h b/include/linux/icmp.h index 8fc38a34cb20..0af4d210ee31 100644 --- a/include/linux/icmp.h +++ b/include/linux/icmp.h @@ -37,6 +37,7 @@ static inline bool icmp_is_err(int type) } void ip_icmp_error_rfc4884(const struct sk_buff *skb, - struct sock_ee_data_rfc4884 *out); + struct sock_ee_data_rfc4884 *out, + int thlen, int off); #endif /* _LINUX_ICMP_H */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 8d2654cdbd77..7498c58460a1 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1151,24 +1151,15 @@ static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off) } void ip_icmp_error_rfc4884(const struct sk_buff *skb, - struct sock_ee_data_rfc4884 *out) + struct sock_ee_data_rfc4884 *out, + int thlen, int off) { - int hlen, off; - - switch (icmp_hdr(skb)->type) { - case ICMP_DEST_UNREACH: - case ICMP_TIME_EXCEEDED: - case ICMP_PARAMETERPROB: - break; - default: - return; - } + int hlen; /* original datagram headers: end of icmph to payload (skb->data) */ - hlen = -skb_transport_offset(skb) - sizeof(struct icmphdr); + hlen = -skb_transport_offset(skb) - thlen; /* per rfc 4884: minimal datagram length of 128 bytes */ - off = icmp_hdr(skb)->un.reserved[1] * sizeof(u32); if (off < 128 || off < hlen) return; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 8dc027e54c5b..d2c223554ff7 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -390,6 +390,18 @@ int ip_ra_control(struct sock *sk, unsigned char on, return 0; } +static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb, + struct sock_ee_data_rfc4884 *out) +{ + switch (icmp_hdr(skb)->type) { + case ICMP_DEST_UNREACH: + case ICMP_TIME_EXCEEDED: + case ICMP_PARAMETERPROB: + ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr), + icmp_hdr(skb)->un.reserved[1] * 4); + } +} + void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { @@ -413,7 +425,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, if (skb_pull(skb, payload - skb->data)) { if (inet_sk(sk)->recverr_rfc4884) - ip_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884); + ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884); skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb) == 0) -- cgit v1.2.3 From 01370434df85eb76ecb1527a4466013c4aca2436 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 24 Jul 2020 09:03:10 -0400 Subject: icmp6: support rfc 4884 Extend the rfc 4884 read interface introduced for ipv4 in commit eba75c587e81 ("icmp: support rfc 4884") to ipv6. Add socket option SOL_IPV6/IPV6_RECVERR_RFC4884. Changes v1->v2: - make ipv6_icmp_error_rfc4884 static (file scope) Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/ipv6.h | 1 + include/uapi/linux/icmpv6.h | 1 + include/uapi/linux/in6.h | 1 + net/ipv4/icmp.c | 1 + net/ipv6/datagram.c | 16 ++++++++++++++++ net/ipv6/ipv6_sockglue.c | 12 ++++++++++++ 6 files changed, 32 insertions(+) (limited to 'include') diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 8d8f877e7f81..a44789d027cc 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -283,6 +283,7 @@ struct ipv6_pinfo { autoflowlabel:1, autoflowlabel_set:1, mc_all:1, + recverr_rfc4884:1, rtalert_isolate:1; __u8 min_hopcount; __u8 tclass; diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h index 2622b5a3e616..c1661febc2dc 100644 --- a/include/uapi/linux/icmpv6.h +++ b/include/uapi/linux/icmpv6.h @@ -68,6 +68,7 @@ struct icmp6hdr { #define icmp6_mtu icmp6_dataun.un_data32[0] #define icmp6_unused icmp6_dataun.un_data32[0] #define icmp6_maxdelay icmp6_dataun.un_data16[0] +#define icmp6_datagram_len icmp6_dataun.un_data8[0] #define icmp6_router icmp6_dataun.u_nd_advt.router #define icmp6_solicited icmp6_dataun.u_nd_advt.solicited #define icmp6_override icmp6_dataun.u_nd_advt.override diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h index 9f2273a08356..5ad396a57eb3 100644 --- a/include/uapi/linux/in6.h +++ b/include/uapi/linux/in6.h @@ -179,6 +179,7 @@ struct in6_flowlabel_req { #define IPV6_LEAVE_ANYCAST 28 #define IPV6_MULTICAST_ALL 29 #define IPV6_ROUTER_ALERT_ISOLATE 30 +#define IPV6_RECVERR_RFC4884 31 /* IPV6_MTU_DISCOVER values */ #define IPV6_PMTUDISC_DONT 0 diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 7498c58460a1..cf36f955bfe6 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1173,6 +1173,7 @@ void ip_icmp_error_rfc4884(const struct sk_buff *skb, if (!ip_icmp_error_rfc4884_validate(skb, off)) out->flags |= SO_EE_RFC4884_FLAG_INVALID; } +EXPORT_SYMBOL_GPL(ip_icmp_error_rfc4884); int icmp_err(struct sk_buff *skb, u32 info) { diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 390bedde21a5..cc8ad7ddecda 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -284,6 +285,17 @@ int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, } EXPORT_SYMBOL_GPL(ip6_datagram_connect_v6_only); +static void ipv6_icmp_error_rfc4884(const struct sk_buff *skb, + struct sock_ee_data_rfc4884 *out) +{ + switch (icmp6_hdr(skb)->icmp6_type) { + case ICMPV6_TIME_EXCEED: + case ICMPV6_DEST_UNREACH: + ip_icmp_error_rfc4884(skb, out, sizeof(struct icmp6hdr), + icmp6_hdr(skb)->icmp6_datagram_len * 8); + } +} + void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { @@ -313,6 +325,10 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, serr->port = port; __skb_pull(skb, payload - skb->data); + + if (inet6_sk(sk)->recverr_rfc4884) + ipv6_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884); + skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb)) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index d2282f5c9760..20c740976334 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -965,6 +965,14 @@ done: np->rxopt.bits.recvfragsize = valbool; retv = 0; break; + case IPV6_RECVERR_RFC4884: + if (optlen < sizeof(int)) + goto e_inval; + if (val < 0 || val > 1) + goto e_inval; + np->recverr_rfc4884 = valbool; + retv = 0; + break; } release_sock(sk); @@ -1439,6 +1447,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, val = np->rtalert_isolate; break; + case IPV6_RECVERR_RFC4884: + val = np->recverr_rfc4884; + break; + default: return -ENOPROTOOPT; } -- cgit v1.2.3 From e6b9489acc7e679fe6107a98ab315b28977c0553 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:32:32 -0700 Subject: scsi: scsi_transport_iscsi: Drop a duplicated word Drop the repeated word "the" in a comment. Link: https://lore.kernel.org/r/20200719003232.21301-1-rdunlap@infradead.org Cc: "James E.J. Bottomley" Cc: "Martin K. Petersen" Cc: linux-scsi@vger.kernel.org Signed-off-by: Randy Dunlap Signed-off-by: Martin K. Petersen --- include/scsi/scsi_transport_iscsi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index bdcb6d69d154..8a26a2ffa952 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -57,7 +57,7 @@ struct iscsi_bus_flash_conn; * When not offloading the data path, this is called * from the scsi work queue without the session lock. * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the - * the number of bytes transferred on success, and -Exyz + * number of bytes transferred on success, and -Exyz * value on error. When offloading the data path, this * is called from queuecommand with the session lock, or * from the iscsi_conn_send_pdu context with the session -- cgit v1.2.3 From 0f206514749be9988a083ca364b889a7fcff7f78 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 10 Jul 2020 00:20:08 -0700 Subject: scsi: firmware: qcom_scm: Add support for programming inline crypto keys Add support for the Inline Crypto Engine (ICE) key programming interface that's needed for the ufs-qcom driver to use inline encryption on Snapdragon SoCs. This interface consists of two SCM calls: one to program a key into a keyslot, and one to invalidate a keyslot. Although the UFS specification defines a standard way to do this, on these SoCs the Linux kernel isn't permitted to access the needed crypto configuration registers directly; these SCM calls must be used instead. Link: https://lore.kernel.org/r/20200710072013.177481-2-ebiggers@kernel.org Acked-by: Bjorn Andersson Signed-off-by: Eric Biggers Signed-off-by: Martin K. Petersen --- drivers/firmware/qcom_scm.c | 101 ++++++++++++++++++++++++++++++++++++++++++++ drivers/firmware/qcom_scm.h | 4 ++ include/linux/qcom_scm.h | 19 +++++++++ 3 files changed, 124 insertions(+) (limited to 'include') diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 0e7233a20f34..1a8eb1b42b1e 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -923,6 +923,107 @@ int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) } EXPORT_SYMBOL(qcom_scm_ocmem_unlock); +/** + * qcom_scm_ice_available() - Is the ICE key programming interface available? + * + * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and + * qcom_scm_ice_set_key() are available. + */ +bool qcom_scm_ice_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, + QCOM_SCM_ES_INVALIDATE_ICE_KEY) && + __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, + QCOM_SCM_ES_CONFIG_SET_ICE_KEY); +} +EXPORT_SYMBOL(qcom_scm_ice_available); + +/** + * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key + * @index: the keyslot to invalidate + * + * The UFSHCI standard defines a standard way to do this, but it doesn't work on + * these SoCs; only this SCM call does. + * + * Return: 0 on success; -errno on failure. + */ +int qcom_scm_ice_invalidate_key(u32 index) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_ES, + .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = index, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_ice_invalidate_key); + +/** + * qcom_scm_ice_set_key() - Set an inline encryption key + * @index: the keyslot into which to set the key + * @key: the key to program + * @key_size: the size of the key in bytes + * @cipher: the encryption algorithm the key is for + * @data_unit_size: the encryption data unit size, i.e. the size of each + * individual plaintext and ciphertext. Given in 512-byte + * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. + * + * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it + * can then be used to encrypt/decrypt UFS I/O requests inline. + * + * The UFSHCI standard defines a standard way to do this, but it doesn't work on + * these SoCs; only this SCM call does. + * + * Return: 0 on success; -errno on failure. + */ +int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, u32 data_unit_size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_ES, + .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, + .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, + QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL), + .args[0] = index, + .args[2] = key_size, + .args[3] = cipher, + .args[4] = data_unit_size, + .owner = ARM_SMCCC_OWNER_SIP, + }; + void *keybuf; + dma_addr_t key_phys; + int ret; + + /* + * 'key' may point to vmalloc()'ed memory, but we need to pass a + * physical address that's been properly flushed. The sanctioned way to + * do this is by using the DMA API. But as is best practice for crypto + * keys, we also must wipe the key after use. This makes kmemdup() + + * dma_map_single() not clearly correct, since the DMA API can use + * bounce buffers. Instead, just use dma_alloc_coherent(). Programming + * keys is normally rare and thus not performance-critical. + */ + + keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, + GFP_KERNEL); + if (!keybuf) + return -ENOMEM; + memcpy(keybuf, key, key_size); + desc.args[1] = key_phys; + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + + memzero_explicit(keybuf, key_size); + + dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); + return ret; +} +EXPORT_SYMBOL(qcom_scm_ice_set_key); + /** * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. * diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index d9ed670da222..38ea614d29fe 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -103,6 +103,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, #define QCOM_SCM_OCMEM_LOCK_CMD 0x01 #define QCOM_SCM_OCMEM_UNLOCK_CMD 0x02 +#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */ +#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03 +#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04 + #define QCOM_SCM_SVC_HDCP 0x11 #define QCOM_SCM_HDCP_INVOKE 0x01 diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 3d6a24697761..2e1193a3fb5f 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -44,6 +44,13 @@ enum qcom_scm_sec_dev_id { QCOM_SCM_ICE_DEV_ID = 20, }; +enum qcom_scm_ice_cipher { + QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0, + QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1, + QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3, + QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4, +}; + #define QCOM_SCM_VMID_HLOS 0x3 #define QCOM_SCM_VMID_MSS_MSA 0xF #define QCOM_SCM_VMID_WLAN 0x18 @@ -88,6 +95,12 @@ extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size); +extern bool qcom_scm_ice_available(void); +extern int qcom_scm_ice_invalidate_key(u32 index); +extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, + u32 data_unit_size); + extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); @@ -138,6 +151,12 @@ static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) { return -ENODEV; } +static inline bool qcom_scm_ice_available(void) { return false; } +static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; } +static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, + u32 data_unit_size) { return -ENODEV; } + static inline bool qcom_scm_hdcp_available(void) { return false; } static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) { return -ENODEV; } -- cgit v1.2.3 From e73a5e8e8003978e65d368f5475e36e1e2a0613b Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 22 Jun 2020 08:30:22 +0200 Subject: scsi: core: Only return started requests from scsi_host_find_tag() scsi_host_find_tag() is used by the drivers to return a scsi command based on the command tag. Typically it's used from the interrupt handler to fetch the command associated with a value returned from hardware. Some drivers like fnic or qla4xxx, however, also use it also to traverse outstanding commands. With the current implementation scsi_host_find_tag() will return commands even if they are not started (i.e. passed to the driver). This will result in random errors with those drivers. With this patch scsi_host_find_tag() will only return 'started' commands (i.e. commands which have been passed to the drivers) thus avoiding the above issue. The other use cases will be unaffected as the interrupt handler naturally will only ever return 'started' requests. Link: https://lore.kernel.org/r/20200622063022.67891-1-hare@suse.de Signed-off-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- include/scsi/scsi_tcq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index 6053d46e794e..ea7848e74d25 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -34,7 +34,7 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost, blk_mq_unique_tag_to_tag(tag)); } - if (!req) + if (!req || !blk_mq_request_started(req)) return NULL; return blk_mq_rq_to_pdu(req); } -- cgit v1.2.3 From 2d38dbf89a06d0f689daec9842c5d3295c49777f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 24 Jul 2020 14:36:22 -0700 Subject: test_firmware: Test platform fw loading on non-EFI systems On non-EFI systems, it wasn't possible to test the platform firmware loader because it will have never set "checked_fw" during __init. Instead, allow the test code to override this check. Additionally split the declarations into a private header file so it there is greater enforcement of the symbol visibility. Fixes: 548193cba2a7 ("test_firmware: add support for firmware_request_platform") Cc: stable@vger.kernel.org Acked-by: Scott Branden Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20200724213640.389191-2-keescook@chromium.org Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/efi/embedded-firmware.c | 21 ++++++++++++++++----- drivers/firmware/efi/embedded-firmware.h | 19 +++++++++++++++++++ include/linux/efi_embedded_fw.h | 13 ------------- lib/test_firmware.c | 5 +++++ 4 files changed, 40 insertions(+), 18 deletions(-) create mode 100644 drivers/firmware/efi/embedded-firmware.h (limited to 'include') diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c index a1b199de9006..0fb03cd0a5a2 100644 --- a/drivers/firmware/efi/embedded-firmware.c +++ b/drivers/firmware/efi/embedded-firmware.c @@ -14,11 +14,22 @@ #include #include +#include "embedded-firmware.h" + +#ifdef CONFIG_TEST_FIRMWARE +# define EFI_EMBEDDED_FW_VISIBILITY +#else +# define EFI_EMBEDDED_FW_VISIBILITY static +#endif + +EFI_EMBEDDED_FW_VISIBILITY LIST_HEAD(efi_embedded_fw_list); +EFI_EMBEDDED_FW_VISIBILITY bool efi_embedded_fw_checked; + /* Exported for use by lib/test_firmware.c only */ -LIST_HEAD(efi_embedded_fw_list); +#ifdef CONFIG_TEST_FIRMWARE EXPORT_SYMBOL_GPL(efi_embedded_fw_list); - -static bool checked_for_fw; +EXPORT_SYMBOL_GPL(efi_embedded_fw_checked); +#endif static const struct dmi_system_id * const embedded_fw_table[] = { #ifdef CONFIG_TOUCHSCREEN_DMI @@ -119,14 +130,14 @@ void __init efi_check_for_embedded_firmwares(void) } } - checked_for_fw = true; + efi_embedded_fw_checked = true; } int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size) { struct efi_embedded_fw *iter, *fw = NULL; - if (!checked_for_fw) { + if (!efi_embedded_fw_checked) { pr_warn("Warning %s called while we did not check for embedded fw\n", __func__); return -ENOENT; diff --git a/drivers/firmware/efi/embedded-firmware.h b/drivers/firmware/efi/embedded-firmware.h new file mode 100644 index 000000000000..34113316d068 --- /dev/null +++ b/drivers/firmware/efi/embedded-firmware.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _EFI_EMBEDDED_FW_INTERNAL_H_ +#define _EFI_EMBEDDED_FW_INTERNAL_H_ + +/* + * This struct and efi_embedded_fw_list are private to the efi-embedded fw + * implementation they only in separate header for use by lib/test_firmware.c. + */ +struct efi_embedded_fw { + struct list_head list; + const char *name; + const u8 *data; + size_t length; +}; + +extern struct list_head efi_embedded_fw_list; +extern bool efi_embedded_fw_checked; + +#endif /* _EFI_EMBEDDED_FW_INTERNAL_H_ */ diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h index 57eac5241303..4ad5db9f5312 100644 --- a/include/linux/efi_embedded_fw.h +++ b/include/linux/efi_embedded_fw.h @@ -7,19 +7,6 @@ #define EFI_EMBEDDED_FW_PREFIX_LEN 8 -/* - * This struct and efi_embedded_fw_list are private to the efi-embedded fw - * implementation they are in this header for use by lib/test_firmware.c only! - */ -struct efi_embedded_fw { - struct list_head list; - const char *name; - const u8 *data; - size_t length; -}; - -extern struct list_head efi_embedded_fw_list; - /** * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw * code to search for embedded firmwares. diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 9fee2b93a8d1..62af792e151c 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -489,6 +489,7 @@ out: static DEVICE_ATTR_WO(trigger_request); #ifdef CONFIG_EFI_EMBEDDED_FIRMWARE +#include "../drivers/firmware/efi/embedded-firmware.h" static ssize_t trigger_request_platform_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -501,6 +502,7 @@ static ssize_t trigger_request_platform_store(struct device *dev, }; struct efi_embedded_fw efi_embedded_fw; const struct firmware *firmware = NULL; + bool saved_efi_embedded_fw_checked; char *name; int rc; @@ -513,6 +515,8 @@ static ssize_t trigger_request_platform_store(struct device *dev, efi_embedded_fw.data = (void *)test_data; efi_embedded_fw.length = sizeof(test_data); list_add(&efi_embedded_fw.list, &efi_embedded_fw_list); + saved_efi_embedded_fw_checked = efi_embedded_fw_checked; + efi_embedded_fw_checked = true; pr_info("loading '%s'\n", name); rc = firmware_request_platform(&firmware, name, dev); @@ -530,6 +534,7 @@ static ssize_t trigger_request_platform_store(struct device *dev, rc = count; out: + efi_embedded_fw_checked = saved_efi_embedded_fw_checked; release_firmware(firmware); list_del(&efi_embedded_fw.list); kfree(name); -- cgit v1.2.3 From d721a43ff69cd473019c3b77aacb76b09102aca3 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 25 Jul 2020 20:00:27 +0800 Subject: bcache: increase super block version for cache device and backing device The new added super block version BCACHE_SB_VERSION_BDEV_WITH_FEATURES (5) BCACHE_SB_VERSION_CDEV_WITH_FEATURES value (6), is for the feature set bits. Devices have super block version equal to the new version will have three new members for feature set bits in the on-disk super block, __le64 feature_compat; __le64 feature_incompat; __le64 feature_ro_compat; They are used for further new features which may introduce on-disk format change, and avoid unncessary super block version increase. The very basic features handling code skeleton is also initialized in this patch. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/features.h | 78 ++++++++++++++++++++++++++++++++++++++++++++ drivers/md/bcache/super.c | 32 ++++++++++++++++-- include/uapi/linux/bcache.h | 29 +++++++++++----- 3 files changed, 128 insertions(+), 11 deletions(-) create mode 100644 drivers/md/bcache/features.h (limited to 'include') diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h new file mode 100644 index 000000000000..ae7df37b9862 --- /dev/null +++ b/drivers/md/bcache/features.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _BCACHE_FEATURES_H +#define _BCACHE_FEATURES_H + +#include +#include +#include + +#define BCH_FEATURE_COMPAT 0 +#define BCH_FEATURE_RO_COMPAT 1 +#define BCH_FEATURE_INCOMPAT 2 +#define BCH_FEATURE_TYPE_MASK 0x03 + +#define BCH_FEATURE_COMPAT_SUUP 0 +#define BCH_FEATURE_RO_COMPAT_SUUP 0 +#define BCH_FEATURE_INCOMPAT_SUUP 0 + +#define BCH_HAS_COMPAT_FEATURE(sb, mask) \ + ((sb)->feature_compat & (mask)) +#define BCH_HAS_RO_COMPAT_FEATURE(sb, mask) \ + ((sb)->feature_ro_compat & (mask)) +#define BCH_HAS_INCOMPAT_FEATURE(sb, mask) \ + ((sb)->feature_incompat & (mask)) + +/* Feature set definition */ + +#define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \ +static inline int bch_has_feature_##name(struct cache_sb *sb) \ +{ \ + return (((sb)->feature_compat & \ + BCH##_FEATURE_COMPAT_##flagname) != 0); \ +} \ +static inline void bch_set_feature_##name(struct cache_sb *sb) \ +{ \ + (sb)->feature_compat |= \ + BCH##_FEATURE_COMPAT_##flagname; \ +} \ +static inline void bch_clear_feature_##name(struct cache_sb *sb) \ +{ \ + (sb)->feature_compat &= \ + ~BCH##_FEATURE_COMPAT_##flagname; \ +} + +#define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \ +static inline int bch_has_feature_##name(struct cache_sb *sb) \ +{ \ + return (((sb)->feature_ro_compat & \ + BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \ +} \ +static inline void bch_set_feature_##name(struct cache_sb *sb) \ +{ \ + (sb)->feature_ro_compat |= \ + BCH##_FEATURE_RO_COMPAT_##flagname; \ +} \ +static inline void bch_clear_feature_##name(struct cache_sb *sb) \ +{ \ + (sb)->feature_ro_compat &= \ + ~BCH##_FEATURE_RO_COMPAT_##flagname; \ +} + +#define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \ +static inline int bch_has_feature_##name(struct cache_sb *sb) \ +{ \ + return (((sb)->feature_incompat & \ + BCH##_FEATURE_INCOMPAT_##flagname) != 0); \ +} \ +static inline void bch_set_feature_##name(struct cache_sb *sb) \ +{ \ + (sb)->feature_incompat |= \ + BCH##_FEATURE_INCOMPAT_##flagname; \ +} \ +static inline void bch_clear_feature_##name(struct cache_sb *sb) \ +{ \ + (sb)->feature_incompat &= \ + ~BCH##_FEATURE_INCOMPAT_##flagname; \ +} + +#endif diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 40fb18028c01..c6ef410a21a8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -13,6 +13,7 @@ #include "extents.h" #include "request.h" #include "writeback.h" +#include "features.h" #include #include @@ -194,6 +195,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, sb->data_offset = BDEV_DATA_START_DEFAULT; break; case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: + case BCACHE_SB_VERSION_BDEV_WITH_FEATURES: sb->data_offset = le64_to_cpu(s->data_offset); err = "Bad data offset"; @@ -207,6 +209,14 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, if (err) goto err; break; + case BCACHE_SB_VERSION_CDEV_WITH_FEATURES: + err = read_super_common(sb, bdev, s); + if (err) + goto err; + sb->feature_compat = le64_to_cpu(s->feature_compat); + sb->feature_incompat = le64_to_cpu(s->feature_incompat); + sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat); + break; default: err = "Unsupported superblock version"; goto err; @@ -241,7 +251,6 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, offset_in_page(out)); out->offset = cpu_to_le64(sb->offset); - out->version = cpu_to_le64(sb->version); memcpy(out->uuid, sb->uuid, 16); memcpy(out->set_uuid, sb->set_uuid, 16); @@ -257,6 +266,13 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, for (i = 0; i < sb->keys; i++) out->d[i] = cpu_to_le64(sb->d[i]); + if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { + out->feature_compat = cpu_to_le64(sb->feature_compat); + out->feature_incompat = cpu_to_le64(sb->feature_incompat); + out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat); + } + + out->version = cpu_to_le64(sb->version); out->csum = csum_set(out); pr_debug("ver %llu, flags %llu, seq %llu\n", @@ -313,17 +329,20 @@ void bcache_write_super(struct cache_set *c) { struct closure *cl = &c->sb_write; struct cache *ca; - unsigned int i; + unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID; down(&c->sb_write_mutex); closure_init(cl, &c->cl); c->sb.seq++; + if (c->sb.version > version) + version = c->sb.version; + for_each_cache(ca, c, i) { struct bio *bio = &ca->sb_bio; - ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID; + ca->sb.version = version; ca->sb.seq = c->sb.seq; ca->sb.last_mount = c->sb.last_mount; @@ -1839,6 +1858,13 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) c->sb.bucket_size = sb->bucket_size; c->sb.nr_in_set = sb->nr_in_set; c->sb.last_mount = sb->last_mount; + c->sb.version = sb->version; + if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) { + c->sb.feature_compat = sb->feature_compat; + c->sb.feature_ro_compat = sb->feature_ro_compat; + c->sb.feature_incompat = sb->feature_incompat; + } + c->bucket_bits = ilog2(sb->bucket_size); c->block_bits = ilog2(sb->block_size); c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 9a1965c6c3d0..47df2db2e727 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -141,11 +141,13 @@ static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys) * Version 3: Cache device with new UUID format * Version 4: Backing device with data offset */ -#define BCACHE_SB_VERSION_CDEV 0 -#define BCACHE_SB_VERSION_BDEV 1 -#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3 -#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4 -#define BCACHE_SB_MAX_VERSION 4 +#define BCACHE_SB_VERSION_CDEV 0 +#define BCACHE_SB_VERSION_BDEV 1 +#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3 +#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4 +#define BCACHE_SB_VERSION_CDEV_WITH_FEATURES 5 +#define BCACHE_SB_VERSION_BDEV_WITH_FEATURES 6 +#define BCACHE_SB_MAX_VERSION 6 #define SB_SECTOR 8 #define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT) @@ -173,7 +175,12 @@ struct cache_sb_disk { __le64 flags; __le64 seq; - __le64 pad[8]; + + __le64 feature_compat; + __le64 feature_incompat; + __le64 feature_ro_compat; + + __le64 pad[5]; union { struct { @@ -224,7 +231,12 @@ struct cache_sb { __u64 flags; __u64 seq; - __u64 pad[8]; + + __u64 feature_compat; + __u64 feature_incompat; + __u64 feature_ro_compat; + + __u64 pad[5]; union { struct { @@ -262,7 +274,8 @@ struct cache_sb { static inline _Bool SB_IS_BDEV(const struct cache_sb *sb) { return sb->version == BCACHE_SB_VERSION_BDEV - || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET; + || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET + || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES; } BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1); -- cgit v1.2.3 From 4c1ccd0896d6a45f2159280d957afd441a7aeaba Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 25 Jul 2020 20:00:29 +0800 Subject: bcache: struct cache_sb is only for in-memory super block now We have struct cache_sb_disk for on-disk super block already, it is unnecessary to keep the in-memory super block format exactly mapping to the on-disk struct layout. This patch adds code comments to notice that struct cache_sb is not exactly mapping to cache_sb_disk, and removes the useless member csum and pad[5]. Although struct cache_sb does not belong to uapi, but there are still some on-disk format related macros reference it and it is unncessary to get rid of such dependency now. So struct cache_sb will continue to stay in include/uapi/linux/bache.h for now. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- include/uapi/linux/bcache.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 47df2db2e727..0ef984ea515a 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -215,8 +215,13 @@ struct cache_sb_disk { __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */ }; +/* + * This is for in-memory bcache super block. + * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member + * size, ordering and even whole struct size may be different + * from cache_sb_disk. + */ struct cache_sb { - __u64 csum; __u64 offset; /* sector where this sb was written */ __u64 version; @@ -236,8 +241,6 @@ struct cache_sb { __u64 feature_incompat; __u64 feature_ro_compat; - __u64 pad[5]; - union { struct { /* Cache devices */ @@ -245,7 +248,6 @@ struct cache_sb { __u16 block_size; /* sectors */ __u16 bucket_size; /* sectors */ - __u16 nr_in_set; __u16 nr_this_dev; }; -- cgit v1.2.3 From ffa470327572b8f85dceda48fd0676d9658cb8c5 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 25 Jul 2020 20:00:35 +0800 Subject: bcache: add bucket_size_hi into struct cache_sb_disk for large bucket The large bucket feature is to extend bucket_size from 16bit to 32bit. When create cache device on zoned device (e.g. zoned NVMe SSD), making a single bucket cover one or more zones of the zoned device is the simplest way to support zoned device as cache by bcache. But current maximum bucket size is 16MB and a typical zone size of zoned device is 256MB, this is the major motiviation to extend bucket size to a larger bit width. This patch is the basic and first change to support large bucket size, the major changes it makes are, - Add BCH_FEATURE_INCOMPAT_LARGE_BUCKET for the large bucket feature, INCOMPAT means it introduces incompatible on-disk format change. - Add BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET) routines. - Adds __le16 bucket_size_hi into struct cache_sb_disk at offset 0x8d0 for the on-disk super block format. - For the in-memory super block struct cache_sb, member bucket_size is extended from __u16 to __32. - Add get_bucket_size() to combine the bucket_size and bucket_size_hi from struct cache_sb_disk into an unsigned int value. Since we already have large bucket size helpers meta_bucket_pages(), meta_bucket_bytes() and alloc_meta_bucket_pages(), they make sure when bucket size > 8MB, the memory allocation for bcache meta data bucket won't fail no matter how large the bucket size extended. So these meta data buckets are handled properly when the bucket size width increase from 16bit to 32bit, we don't need to worry about them. Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/alloc.c | 2 +- drivers/md/bcache/features.c | 22 ++++++++++++++++++++++ drivers/md/bcache/features.h | 9 ++++++--- drivers/md/bcache/movinggc.c | 4 ++-- drivers/md/bcache/super.c | 23 +++++++++++++++++++---- include/uapi/linux/bcache.h | 3 ++- 6 files changed, 52 insertions(+), 11 deletions(-) create mode 100644 drivers/md/bcache/features.c (limited to 'include') diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index a1df0d95151c..52035a78d836 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors) { struct cache *ca; struct bucket *b; - unsigned int next = c->nbuckets * c->sb.bucket_size / 1024; + unsigned long next = c->nbuckets * c->sb.bucket_size / 1024; unsigned int i; int r; diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c new file mode 100644 index 000000000000..ba53944bb390 --- /dev/null +++ b/drivers/md/bcache/features.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Feature set bits and string conversion. + * Inspired by ext4's features compat/incompat/ro_compat related code. + * + * Copyright 2020 Coly Li + * + */ +#include +#include "bcache.h" + +struct feature { + int compat; + unsigned int mask; + const char *string; +}; + +static struct feature feature_list[] = { + {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET, + "large_bucket"}, + {0, 0, 0 }, +}; diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h index ae7df37b9862..dca052cf5203 100644 --- a/drivers/md/bcache/features.h +++ b/drivers/md/bcache/features.h @@ -11,9 +11,13 @@ #define BCH_FEATURE_INCOMPAT 2 #define BCH_FEATURE_TYPE_MASK 0x03 +/* Feature set definition */ +/* Incompat feature set */ +#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */ + #define BCH_FEATURE_COMPAT_SUUP 0 #define BCH_FEATURE_RO_COMPAT_SUUP 0 -#define BCH_FEATURE_INCOMPAT_SUUP 0 +#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET #define BCH_HAS_COMPAT_FEATURE(sb, mask) \ ((sb)->feature_compat & (mask)) @@ -22,8 +26,6 @@ #define BCH_HAS_INCOMPAT_FEATURE(sb, mask) \ ((sb)->feature_incompat & (mask)) -/* Feature set definition */ - #define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \ static inline int bch_has_feature_##name(struct cache_sb *sb) \ { \ @@ -75,4 +77,5 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \ ~BCH##_FEATURE_INCOMPAT_##flagname; \ } +BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET); #endif diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index b7dd2d75f58c..5872d6470470 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -206,8 +206,8 @@ void bch_moving_gc(struct cache_set *c) mutex_lock(&c->bucket_lock); for_each_cache(ca, c, i) { - unsigned int sectors_to_move = 0; - unsigned int reserve_sectors = ca->sb.bucket_size * + unsigned long sectors_to_move = 0; + unsigned long reserve_sectors = ca->sb.bucket_size * fifo_used(&ca->free[RESERVE_MOVINGGC]); ca->heap.used = 0; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e4f05c4ddcdd..62c9681fe92f 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -60,6 +60,17 @@ struct workqueue_struct *bch_journal_wq; /* Superblock */ +static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s) +{ + unsigned int bucket_size = le16_to_cpu(s->bucket_size); + + if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES && + bch_has_feature_large_bucket(sb)) + bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16; + + return bucket_size; +} + static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev, struct cache_sb_disk *s) { @@ -68,7 +79,7 @@ static const char *read_super_common(struct cache_sb *sb, struct block_device * sb->first_bucket= le16_to_cpu(s->first_bucket); sb->nbuckets = le64_to_cpu(s->nbuckets); - sb->bucket_size = le16_to_cpu(s->bucket_size); + sb->bucket_size = get_bucket_size(sb, s); sb->nr_in_set = le16_to_cpu(s->nr_in_set); sb->nr_this_dev = le16_to_cpu(s->nr_this_dev); @@ -210,12 +221,16 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, goto err; break; case BCACHE_SB_VERSION_CDEV_WITH_FEATURES: - err = read_super_common(sb, bdev, s); - if (err) - goto err; + /* + * Feature bits are needed in read_super_common(), + * convert them firstly. + */ sb->feature_compat = le64_to_cpu(s->feature_compat); sb->feature_incompat = le64_to_cpu(s->feature_incompat); sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat); + err = read_super_common(sb, bdev, s); + if (err) + goto err; break; default: err = "Unsupported superblock version"; diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 0ef984ea515a..52e8bcb33981 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -213,6 +213,7 @@ struct cache_sb_disk { __le16 keys; }; __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */ + __le16 bucket_size_hi; }; /* @@ -247,9 +248,9 @@ struct cache_sb { __u64 nbuckets; /* device size */ __u16 block_size; /* sectors */ - __u16 bucket_size; /* sectors */ __u16 nr_in_set; __u16 nr_this_dev; + __u32 bucket_size; /* sectors */ }; struct { /* Backing devices */ -- cgit v1.2.3 From 92fe2aa859f52ce6aa595ca97fec110dc7100e63 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:07:30 -0700 Subject: libnvdimm: Validate command family indices The ND_CMD_CALL format allows for a general passthrough of passlisted commands targeting a given command set. However there is no validation of the family index relative to what the bus supports. - Update the NFIT bus implementation (the only one that supports ND_CMD_CALL passthrough) to also passlist the valid set of command family indices. - Update the generic __nd_ioctl() path to validate that field on behalf of all implementations. Fixes: 31eca76ba2fc ("nfit, libnvdimm: limited/whitelisted dimm command marshaling mechanism") Cc: Vishal Verma Cc: Dave Jiang Cc: Ira Weiny Cc: "Rafael J. Wysocki" Cc: Len Brown Cc: Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- drivers/acpi/nfit/core.c | 11 +++++++++-- drivers/acpi/nfit/nfit.h | 1 - drivers/nvdimm/bus.c | 16 ++++++++++++++++ include/linux/libnvdimm.h | 2 ++ include/uapi/linux/ndctl.h | 4 ++++ 5 files changed, 31 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 7c138a4edc03..1f72ce1a782b 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1823,6 +1823,7 @@ static void populate_shutdown_status(struct nfit_mem *nfit_mem) static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, u32 device_handle) { + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct acpi_device *adev, *adev_dimm; struct device *dev = acpi_desc->dev; unsigned long dsm_mask, label_mask; @@ -1834,6 +1835,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, /* nfit test assumes 1:1 relationship between commands and dsms */ nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; nfit_mem->family = NVDIMM_FAMILY_INTEL; + set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x", @@ -1886,10 +1888,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, * Note, that checking for function0 (bit0) tells us if any commands * are reachable through this GUID. */ + clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) - if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) + if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) { + set_bit(i, &nd_desc->dimm_family_mask); if (family < 0 || i == default_dsm_family) family = i; + } /* limit the supported commands to those that are publicly documented */ nfit_mem->family = family; @@ -2153,6 +2158,9 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; + set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); + set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask); + adev = to_acpi_dev(acpi_desc); if (!adev) return; @@ -2160,7 +2168,6 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, &nd_desc->cmd_mask); - set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); dsm_mask = (1 << ND_CMD_ARS_CAP) | diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index f5525f8bb770..5c5e7ebba8dc 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -33,7 +33,6 @@ | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) -#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV #define NVDIMM_CMD_MAX 31 #define NVDIMM_STANDARD_CMDMASK \ diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 09087c38fabd..955265656b96 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -1037,9 +1037,25 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, dimm_name = "bus"; } + /* Validate command family support against bus declared support */ if (cmd == ND_CMD_CALL) { + unsigned long *mask; + if (copy_from_user(&pkg, p, sizeof(pkg))) return -EFAULT; + + if (nvdimm) { + if (pkg.nd_family > NVDIMM_FAMILY_MAX) + return -EINVAL; + mask = &nd_desc->dimm_family_mask; + } else { + if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX) + return -EINVAL; + mask = &nd_desc->bus_family_mask; + } + + if (!test_bit(pkg.nd_family, mask)) + return -EINVAL; } if (!desc || diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 18da4059be09..bd39a2cf7972 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -78,6 +78,8 @@ struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; unsigned long bus_dsm_mask; unsigned long cmd_mask; + unsigned long dimm_family_mask; + unsigned long bus_family_mask; struct module *module; char *provider_name; struct device_node *of_node; diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 0e09dc5cec19..e9468b9332bd 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -245,6 +245,10 @@ struct nd_cmd_pkg { #define NVDIMM_FAMILY_MSFT 3 #define NVDIMM_FAMILY_HYPERV 4 #define NVDIMM_FAMILY_PAPR 5 +#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_PAPR + +#define NVDIMM_BUS_FAMILY_NFIT 0 +#define NVDIMM_BUS_FAMILY_MAX NVDIMM_BUS_FAMILY_NFIT #define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\ struct nd_cmd_pkg) -- cgit v1.2.3 From d46e6a2176f8edf7030db34aeb54a4f016fabe0a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:07:35 -0700 Subject: ACPI: NFIT: Move bus_dsm_mask out of generic nvdimm_bus_descriptor DSMs are strictly an ACPI mechanism, evict the bus_dsm_mask concept from the generic 'struct nvdimm_bus_descriptor' object. As a side effect the test facility ->bus_nfit_cmd_force_en is no longer necessary. The test infrastructure can communicate that information directly in ->bus_dsm_mask. Cc: Vishal Verma Cc: Dave Jiang Cc: Ira Weiny Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- drivers/acpi/nfit/core.c | 8 ++++---- drivers/acpi/nfit/nfit.h | 2 +- include/linux/libnvdimm.h | 1 - tools/testing/nvdimm/test/nfit.c | 16 ++++++++-------- 4 files changed, 13 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 1f72ce1a782b..9fdd655bdf0e 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -478,7 +478,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; - dsm_mask = nd_desc->bus_dsm_mask; + dsm_mask = acpi_desc->bus_dsm_mask; desc = nd_cmd_bus_desc(cmd); guid = to_nfit_uuid(NFIT_DEV_BUS); handle = adev->handle; @@ -1238,8 +1238,9 @@ static ssize_t bus_dsm_mask_show(struct device *dev, { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); + return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask); } static struct device_attribute dev_attr_bus_dsm_mask = __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); @@ -2157,7 +2158,6 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) int i; nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; - nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask); @@ -2180,7 +2180,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) (1 << NFIT_CMD_ARS_INJECT_GET); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) - set_bit(i, &nd_desc->bus_dsm_mask); + set_bit(i, &acpi_desc->bus_dsm_mask); } static ssize_t range_index_show(struct device *dev, diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 5c5e7ebba8dc..da097149d94d 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -237,7 +237,7 @@ struct acpi_nfit_desc { unsigned long scrub_flags; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; - unsigned long bus_nfit_cmd_force_en; + unsigned long bus_dsm_mask; unsigned int platform_cap; unsigned int scrub_tmo; int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index bd39a2cf7972..ad9898ece7d3 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -76,7 +76,6 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct device_node; struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; - unsigned long bus_dsm_mask; unsigned long cmd_mask; unsigned long dimm_family_mask; unsigned long bus_family_mask; diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index a8ee5c4d41eb..a59174ba1d2a 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -2507,10 +2507,10 @@ static void nfit_test0_setup(struct nfit_test *t) set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en); - set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en); - set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en); - set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en); - set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en); + set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_dsm_mask); + set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_dsm_mask); + set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_dsm_mask); + set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_dsm_mask); set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en); set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en); set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en); @@ -2731,11 +2731,11 @@ static int nfit_ctl_test(struct device *dev) .module = THIS_MODULE, .provider_name = "ACPI.NFIT", .ndctl = acpi_nfit_ctl, - .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA - | 1UL << NFIT_CMD_ARS_INJECT_SET - | 1UL << NFIT_CMD_ARS_INJECT_CLEAR - | 1UL << NFIT_CMD_ARS_INJECT_GET, }, + .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA + | 1UL << NFIT_CMD_ARS_INJECT_SET + | 1UL << NFIT_CMD_ARS_INJECT_CLEAR + | 1UL << NFIT_CMD_ARS_INJECT_GET, .dev = &adev->dev, }; -- cgit v1.2.3 From 6450ddbd5d8e83ea9927c7f9076a21f829699e0f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:07:40 -0700 Subject: ACPI: NFIT: Define runtime firmware activation commands Platform reboots are expensive. Towards reducing downtime to apply firmware updates the Intel NVDIMM command definition is growing support for applying live firmware updates that only require temporarily suspending memory traffic instead of a full reboot. Follow-on commits add support for triggering firmware activation, this patch only defines the commands, adds probe support, and validates that they are blocked via the ioctl path. The ioctl-path block ensures that the OS is in charge since these commands have side effects only the OS can handle. Specifically firmware activation may cause the memory controller to be quiesced on the order of 100s of milliseconds. In that case Linux ensure the activation only takes place while the OS is in a suspend state. Link: https://pmem.io/documents/IntelOptanePMem_DSM_Interface-V2.0.pdf Cc: Vishal Verma Cc: Dave Jiang Cc: Ira Weiny Cc: "Rafael J. Wysocki" Cc: Len Brown Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- drivers/acpi/nfit/core.c | 86 +++++++++++++++++++++++++++++++--------------- drivers/acpi/nfit/intel.h | 53 ++++++++++++++++++++++++++++ drivers/acpi/nfit/nfit.h | 25 +++++++++++++- include/uapi/linux/ndctl.h | 3 +- 4 files changed, 137 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 9fdd655bdf0e..78cc9e2d2aa3 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -73,6 +73,18 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id) } EXPORT_SYMBOL(to_nfit_uuid); +static const guid_t *to_nfit_bus_uuid(int family) +{ + if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT, + "only secondary bus families can be translated\n")) + return NULL; + /* + * The index of bus UUIDs starts immediately following the last + * NVDIMM/leaf family. + */ + return to_nfit_uuid(family + NVDIMM_FAMILY_MAX); +} + static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; @@ -362,24 +374,8 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func) { static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = { [NVDIMM_FAMILY_INTEL] = { - [NVDIMM_INTEL_GET_MODES] = 2, - [NVDIMM_INTEL_GET_FWINFO] = 2, - [NVDIMM_INTEL_START_FWUPDATE] = 2, - [NVDIMM_INTEL_SEND_FWUPDATE] = 2, - [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, - [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, - [NVDIMM_INTEL_SET_THRESHOLD] = 2, - [NVDIMM_INTEL_INJECT_ERROR] = 2, - [NVDIMM_INTEL_GET_SECURITY_STATE] = 2, - [NVDIMM_INTEL_SET_PASSPHRASE] = 2, - [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2, - [NVDIMM_INTEL_UNLOCK_UNIT] = 2, - [NVDIMM_INTEL_FREEZE_LOCK] = 2, - [NVDIMM_INTEL_SECURE_ERASE] = 2, - [NVDIMM_INTEL_OVERWRITE] = 2, - [NVDIMM_INTEL_QUERY_OVERWRITE] = 2, - [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2, - [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2, + [NVDIMM_INTEL_GET_MODES ... + NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2, }, }; u8 id; @@ -406,7 +402,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) } static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, - struct nd_cmd_pkg *call_pkg) + struct nd_cmd_pkg *call_pkg, int *family) { if (call_pkg) { int i; @@ -417,6 +413,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) if (call_pkg->nd_reserved2[i]) return -EINVAL; + *family = call_pkg->nd_family; return call_pkg->nd_command; } @@ -450,13 +447,14 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, acpi_handle handle; const guid_t *guid; int func, rc, i; + int family = 0; if (cmd_rc) *cmd_rc = -EINVAL; if (cmd == ND_CMD_CALL) call_pkg = buf; - func = cmd_to_func(nfit_mem, cmd, call_pkg); + func = cmd_to_func(nfit_mem, cmd, call_pkg, &family); if (func < 0) return func; @@ -478,9 +476,17 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; - dsm_mask = acpi_desc->bus_dsm_mask; + if (cmd == ND_CMD_CALL && call_pkg->nd_family) { + family = call_pkg->nd_family; + if (!test_bit(family, &nd_desc->bus_family_mask)) + return -EINVAL; + dsm_mask = acpi_desc->family_dsm_mask[family]; + guid = to_nfit_bus_uuid(family); + } else { + dsm_mask = acpi_desc->bus_dsm_mask; + guid = to_nfit_uuid(NFIT_DEV_BUS); + } desc = nd_cmd_bus_desc(cmd); - guid = to_nfit_uuid(NFIT_DEV_BUS); handle = adev->handle; dimm_name = "bus"; } @@ -516,8 +522,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, in_buf.buffer.length = call_pkg->nd_size_in; } - dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n", - dimm_name, cmd, func, in_buf.buffer.length); + dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n", + dimm_name, cmd, family, func, in_buf.buffer.length); if (payload_dumpable(nvdimm, func)) print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, in_buf.buffer.pointer, @@ -2153,14 +2159,21 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); + unsigned long dsm_mask, *mask; struct acpi_device *adev; - unsigned long dsm_mask; int i; - nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask); + /* enable nfit_test to inject bus command emulation */ + if (acpi_desc->bus_cmd_force_en) { + nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; + mask = &nd_desc->bus_family_mask; + if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) + set_bit(NVDIMM_BUS_FAMILY_INTEL, mask); + } + adev = to_acpi_dev(acpi_desc); if (!adev) return; @@ -2181,6 +2194,14 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, &acpi_desc->bus_dsm_mask); + + /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */ + dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; + guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL); + mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; + for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) + if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) + set_bit(i, mask); } static ssize_t range_index_show(struct device *dev, @@ -3492,7 +3513,10 @@ static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, return 0; } -/* prevent security commands from being issued via ioctl */ +/* + * Prevent security and firmware activate commands from being issued via + * ioctl. + */ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf) { @@ -3503,10 +3527,15 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, call_pkg->nd_family == NVDIMM_FAMILY_INTEL) { func = call_pkg->nd_command; if (func > NVDIMM_CMD_MAX || - (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK) + (1 << func) & NVDIMM_INTEL_DENY_CMDMASK) return -EOPNOTSUPP; } + /* block all non-nfit bus commands */ + if (!nvdimm && cmd == ND_CMD_CALL && + call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT) + return -EOPNOTSUPP; + return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd); } @@ -3798,6 +3827,7 @@ static __init int nfit_init(void) guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]); + guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]); nfit_wq = create_singlethread_workqueue("nfit"); if (!nfit_wq) diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h index 0aca682ab9d7..868d073731cc 100644 --- a/drivers/acpi/nfit/intel.h +++ b/drivers/acpi/nfit/intel.h @@ -111,4 +111,57 @@ struct nd_intel_master_secure_erase { u8 passphrase[ND_INTEL_PASSPHRASE_SIZE]; u32 status; } __packed; + +#define ND_INTEL_FWA_IDLE 0 +#define ND_INTEL_FWA_ARMED 1 +#define ND_INTEL_FWA_BUSY 2 + +#define ND_INTEL_DIMM_FWA_NONE 0 +#define ND_INTEL_DIMM_FWA_NOTSTAGED 1 +#define ND_INTEL_DIMM_FWA_SUCCESS 2 +#define ND_INTEL_DIMM_FWA_NEEDRESET 3 +#define ND_INTEL_DIMM_FWA_MEDIAFAILED 4 +#define ND_INTEL_DIMM_FWA_ABORT 5 +#define ND_INTEL_DIMM_FWA_NOTSUPP 6 +#define ND_INTEL_DIMM_FWA_ERROR 7 + +struct nd_intel_fw_activate_dimminfo { + u32 status; + u16 result; + u8 state; + u8 reserved[7]; +} __packed; + +struct nd_intel_fw_activate_arm { + u8 activate_arm; + u32 status; +} __packed; + +/* Root device command payloads */ +#define ND_INTEL_BUS_FWA_CAP_FWQUIESCE (1 << 0) +#define ND_INTEL_BUS_FWA_CAP_OSQUIESCE (1 << 1) +#define ND_INTEL_BUS_FWA_CAP_RESET (1 << 2) + +struct nd_intel_bus_fw_activate_businfo { + u32 status; + u16 reserved; + u8 state; + u8 capability; + u64 activate_tmo; + u64 cpu_quiesce_tmo; + u64 io_quiesce_tmo; + u64 max_quiesce_tmo; +} __packed; + +#define ND_INTEL_BUS_FWA_STATUS_NOARM (6 | 1 << 16) +#define ND_INTEL_BUS_FWA_STATUS_BUSY (6 | 2 << 16) +#define ND_INTEL_BUS_FWA_STATUS_NOFW (6 | 3 << 16) +#define ND_INTEL_BUS_FWA_STATUS_TMO (6 | 4 << 16) +#define ND_INTEL_BUS_FWA_STATUS_NOIDLE (6 | 5 << 16) +#define ND_INTEL_BUS_FWA_STATUS_ABORT (6 | 6 << 16) + +struct nd_intel_bus_fw_activate { + u8 iodev_state; + u32 status; +} __packed; #endif diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index da097149d94d..97c122628975 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -18,6 +18,7 @@ /* http://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */ #define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66" +#define UUID_INTEL_BUS "c7d8acd4-2df8-4b82-9f65-a325335af149" /* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */ #define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6" @@ -65,6 +66,13 @@ enum nvdimm_family_cmds { NVDIMM_INTEL_QUERY_OVERWRITE = 26, NVDIMM_INTEL_SET_MASTER_PASSPHRASE = 27, NVDIMM_INTEL_MASTER_SECURE_ERASE = 28, + NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO = 29, + NVDIMM_INTEL_FW_ACTIVATE_ARM = 30, +}; + +enum nvdimm_bus_family_cmds { + NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO = 1, + NVDIMM_BUS_INTEL_FW_ACTIVATE = 2, }; #define NVDIMM_INTEL_SECURITY_CMDMASK \ @@ -75,13 +83,22 @@ enum nvdimm_family_cmds { | 1 << NVDIMM_INTEL_SET_MASTER_PASSPHRASE \ | 1 << NVDIMM_INTEL_MASTER_SECURE_ERASE) +#define NVDIMM_INTEL_FW_ACTIVATE_CMDMASK \ +(1 << NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO | 1 << NVDIMM_INTEL_FW_ACTIVATE_ARM) + +#define NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK \ +(1 << NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO | 1 << NVDIMM_BUS_INTEL_FW_ACTIVATE) + #define NVDIMM_INTEL_CMDMASK \ (NVDIMM_STANDARD_CMDMASK | 1 << NVDIMM_INTEL_GET_MODES \ | 1 << NVDIMM_INTEL_GET_FWINFO | 1 << NVDIMM_INTEL_START_FWUPDATE \ | 1 << NVDIMM_INTEL_SEND_FWUPDATE | 1 << NVDIMM_INTEL_FINISH_FWUPDATE \ | 1 << NVDIMM_INTEL_QUERY_FWUPDATE | 1 << NVDIMM_INTEL_SET_THRESHOLD \ | 1 << NVDIMM_INTEL_INJECT_ERROR | 1 << NVDIMM_INTEL_LATCH_SHUTDOWN \ - | NVDIMM_INTEL_SECURITY_CMDMASK) + | NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) + +#define NVDIMM_INTEL_DENY_CMDMASK \ +(NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) enum nfit_uuids { /* for simplicity alias the uuid index with the family id */ @@ -90,6 +107,11 @@ enum nfit_uuids { NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV, + /* + * to_nfit_bus_uuid() expects to translate bus uuid family ids + * to a UUID index using NVDIMM_FAMILY_MAX as an offset + */ + NFIT_BUS_INTEL = NVDIMM_FAMILY_MAX + NVDIMM_BUS_FAMILY_INTEL, NFIT_SPA_VOLATILE, NFIT_SPA_PM, NFIT_SPA_DCR, @@ -238,6 +260,7 @@ struct acpi_nfit_desc { unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; unsigned long bus_dsm_mask; + unsigned long family_dsm_mask[NVDIMM_BUS_FAMILY_MAX + 1]; unsigned int platform_cap; unsigned int scrub_tmo; int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index e9468b9332bd..8cf1e4884fd5 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -248,7 +248,8 @@ struct nd_cmd_pkg { #define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_PAPR #define NVDIMM_BUS_FAMILY_NFIT 0 -#define NVDIMM_BUS_FAMILY_MAX NVDIMM_BUS_FAMILY_NFIT +#define NVDIMM_BUS_FAMILY_INTEL 1 +#define NVDIMM_BUS_FAMILY_MAX NVDIMM_BUS_FAMILY_INTEL #define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\ struct nd_cmd_pkg) -- cgit v1.2.3 From 60d360acddc54344409a710af07c561e025f13f5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:08:07 -0700 Subject: driver-core: Introduce DEVICE_ATTR_ADMIN_{RO,RW} A common pattern for using plain DEVICE_ATTR() instead of DEVICE_ATTR_RO() and DEVICE_ATTR_RW() is for attributes that want to limit read to only root. I.e. many users of DEVICE_ATTR() are specifying 0400 or 0600 for permissions. Given the expectation that CAP_SYS_ADMIN is needed to access these sensitive attributes add an explicit helper with the _ADMIN_ identifier for DEVICE_ATTR_ADMIN_{RO,RW}. Cc: "Rafael J. Wysocki" Reviewed-by: Greg Kroah-Hartman Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- include/linux/device.h | 4 ++++ include/linux/sysfs.h | 7 +++++++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/device.h b/include/linux/device.h index 15460a5ac024..d7c2570368fa 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -128,8 +128,12 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, __ATTR_PREALLOC(_name, _mode, _show, _store) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) +#define DEVICE_ATTR_ADMIN_RW(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600) #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) +#define DEVICE_ATTR_ADMIN_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400) #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 86067dbe7745..34e84122f635 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -123,6 +123,13 @@ struct attribute_group { .show = _name##_show, \ } +#define __ATTR_RW_MODE(_name, _mode) { \ + .attr = { .name = __stringify(_name), \ + .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ + .show = _name##_show, \ + .store = _name##_store, \ +} + #define __ATTR_WO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0200 }, \ .store = _name##_store, \ -- cgit v1.2.3 From a228a64fc1e4428e2b96dc68e9ad3c447095c9e7 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 1 Jul 2020 18:10:18 -0700 Subject: bpf: Add bpf_prog iterator It's mostly a copy paste of commit 6086d29def80 ("bpf: Add bpf_map iterator") that is use to implement bpf_seq_file opreations to traverse all bpf programs. v1->v2: Tweak to use build time btf_id Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Acked-by: Daniel Borkmann --- include/linux/bpf.h | 1 + kernel/bpf/Makefile | 2 +- kernel/bpf/prog_iter.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/syscall.c | 19 +++++++++ 4 files changed, 124 insertions(+), 1 deletion(-) create mode 100644 kernel/bpf/prog_iter.c (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bae557ff2da8..72221aea1c60 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1117,6 +1117,7 @@ int generic_map_delete_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); struct bpf_map *bpf_map_get_curr_or_next(u32 *id); +struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); extern int sysctl_unprivileged_bpf_disabled; diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 1131a921e1a6..e6eb9c0402da 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -2,7 +2,7 @@ obj-y := core.o CFLAGS_core.o += $(call cc-disable-warning, override-init) -obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o +obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o obj-$(CONFIG_BPF_SYSCALL) += disasm.o diff --git a/kernel/bpf/prog_iter.c b/kernel/bpf/prog_iter.c new file mode 100644 index 000000000000..6541b577d69f --- /dev/null +++ b/kernel/bpf/prog_iter.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#include +#include +#include +#include +#include + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +static void *bpf_prog_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_prog_info *info = seq->private; + struct bpf_prog *prog; + + prog = bpf_prog_get_curr_or_next(&info->prog_id); + if (!prog) + return NULL; + + if (*pos == 0) + ++*pos; + return prog; +} + +static void *bpf_prog_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_prog_info *info = seq->private; + + ++*pos; + ++info->prog_id; + bpf_prog_put((struct bpf_prog *)v); + return bpf_prog_get_curr_or_next(&info->prog_id); +} + +struct bpf_iter__bpf_prog { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_prog *, prog); +}; + +DEFINE_BPF_ITER_FUNC(bpf_prog, struct bpf_iter_meta *meta, struct bpf_prog *prog) + +static int __bpf_prog_seq_show(struct seq_file *seq, void *v, bool in_stop) +{ + struct bpf_iter__bpf_prog ctx; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + int ret = 0; + + ctx.meta = &meta; + ctx.prog = v; + meta.seq = seq; + prog = bpf_iter_get_info(&meta, in_stop); + if (prog) + ret = bpf_iter_run_prog(prog, &ctx); + + return ret; +} + +static int bpf_prog_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_prog_seq_show(seq, v, false); +} + +static void bpf_prog_seq_stop(struct seq_file *seq, void *v) +{ + if (!v) + (void)__bpf_prog_seq_show(seq, v, true); + else + bpf_prog_put((struct bpf_prog *)v); +} + +static const struct seq_operations bpf_prog_seq_ops = { + .start = bpf_prog_seq_start, + .next = bpf_prog_seq_next, + .stop = bpf_prog_seq_stop, + .show = bpf_prog_seq_show, +}; + +BTF_ID_LIST(btf_bpf_prog_id) +BTF_ID(struct, bpf_prog) + +static struct bpf_iter_reg bpf_prog_reg_info = { + .target = "bpf_prog", + .seq_ops = &bpf_prog_seq_ops, + .init_seq_private = NULL, + .fini_seq_private = NULL, + .seq_priv_size = sizeof(struct bpf_iter_seq_prog_info), + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_prog, prog), + PTR_TO_BTF_ID_OR_NULL }, + }, +}; + +static int __init bpf_prog_iter_init(void) +{ + bpf_prog_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_prog_id; + return bpf_iter_reg_target(&bpf_prog_reg_info); +} + +late_initcall(bpf_prog_iter_init); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index d07417d17712..ee290b1f2d9e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3044,6 +3044,25 @@ again: return map; } +struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) +{ + struct bpf_prog *prog; + + spin_lock_bh(&prog_idr_lock); +again: + prog = idr_get_next(&prog_idr, id); + if (prog) { + prog = bpf_prog_inc_not_zero(prog); + if (IS_ERR(prog)) { + (*id)++; + goto again; + } + } + spin_unlock_bh(&prog_idr_lock); + + return prog; +} + #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id struct bpf_prog *bpf_prog_by_id(u32 id) -- cgit v1.2.3 From 14fc6bd6b79c430f615500d0fe6cea4722110db8 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:09 -0700 Subject: bpf: Refactor bpf_iter_reg to have separate seq_info member There is no functionality change for this patch. Struct bpf_iter_reg is used to register a bpf_iter target, which includes information for both prog_load, link_create and seq_file creation. This patch puts fields related seq_file creation into a different structure. This will be useful for map elements iterator where one iterator covers different map types and different map types may have different seq_ops, init/fini private_data function and private_data size. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184109.590030-1-yhs@fb.com --- include/linux/bpf.h | 17 ++++++++++------- kernel/bpf/bpf_iter.c | 12 ++++++------ kernel/bpf/map_iter.c | 8 ++++++-- kernel/bpf/prog_iter.c | 8 ++++++-- kernel/bpf/task_iter.c | 16 ++++++++++++---- net/ipv4/tcp_ipv4.c | 8 ++++++-- net/ipv4/udp.c | 8 ++++++-- net/ipv6/route.c | 8 ++++++-- net/netlink/af_netlink.c | 8 ++++++-- 9 files changed, 64 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 72221aea1c60..127067f71fd4 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -37,6 +37,15 @@ struct seq_operations; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; +typedef int (*bpf_iter_init_seq_priv_t)(void *private_data); +typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ @@ -1189,18 +1198,12 @@ int bpf_obj_get_user(const char __user *pathname, int flags); extern int bpf_iter_ ## target(args); \ int __init bpf_iter_ ## target(args) { return 0; } -typedef int (*bpf_iter_init_seq_priv_t)(void *private_data); -typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); - #define BPF_ITER_CTX_ARG_MAX 2 struct bpf_iter_reg { const char *target; - const struct seq_operations *seq_ops; - bpf_iter_init_seq_priv_t init_seq_private; - bpf_iter_fini_seq_priv_t fini_seq_private; - u32 seq_priv_size; u32 ctx_arg_info_size; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; + const struct bpf_iter_seq_info *seq_info; }; struct bpf_iter_meta { diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index dd612b80b9fe..5b2387d6aa1f 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -218,8 +218,8 @@ static int iter_release(struct inode *inode, struct file *file) iter_priv = container_of(seq->private, struct bpf_iter_priv_data, target_private); - if (iter_priv->tinfo->reg_info->fini_seq_private) - iter_priv->tinfo->reg_info->fini_seq_private(seq->private); + if (iter_priv->tinfo->reg_info->seq_info->fini_seq_private) + iter_priv->tinfo->reg_info->seq_info->fini_seq_private(seq->private); bpf_prog_put(iter_priv->prog); seq->private = iter_priv; @@ -433,16 +433,16 @@ static int prepare_seq_file(struct file *file, struct bpf_iter_link *link) tinfo = link->tinfo; total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) + - tinfo->reg_info->seq_priv_size; - priv_data = __seq_open_private(file, tinfo->reg_info->seq_ops, + tinfo->reg_info->seq_info->seq_priv_size; + priv_data = __seq_open_private(file, tinfo->reg_info->seq_info->seq_ops, total_priv_dsize); if (!priv_data) { err = -ENOMEM; goto release_prog; } - if (tinfo->reg_info->init_seq_private) { - err = tinfo->reg_info->init_seq_private(priv_data->target_private); + if (tinfo->reg_info->seq_info->init_seq_private) { + err = tinfo->reg_info->seq_info->init_seq_private(priv_data->target_private); if (err) goto release_seq_file; } diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 5926c76d854e..1a69241fb1e2 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -81,17 +81,21 @@ static const struct seq_operations bpf_map_seq_ops = { BTF_ID_LIST(btf_bpf_map_id) BTF_ID(struct, bpf_map) -static struct bpf_iter_reg bpf_map_reg_info = { - .target = "bpf_map", +static const struct bpf_iter_seq_info bpf_map_seq_info = { .seq_ops = &bpf_map_seq_ops, .init_seq_private = NULL, .fini_seq_private = NULL, .seq_priv_size = sizeof(struct bpf_iter_seq_map_info), +}; + +static struct bpf_iter_reg bpf_map_reg_info = { + .target = "bpf_map", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__bpf_map, map), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &bpf_map_seq_info, }; static int __init bpf_map_iter_init(void) diff --git a/kernel/bpf/prog_iter.c b/kernel/bpf/prog_iter.c index 6541b577d69f..53a73c841c13 100644 --- a/kernel/bpf/prog_iter.c +++ b/kernel/bpf/prog_iter.c @@ -81,17 +81,21 @@ static const struct seq_operations bpf_prog_seq_ops = { BTF_ID_LIST(btf_bpf_prog_id) BTF_ID(struct, bpf_prog) -static struct bpf_iter_reg bpf_prog_reg_info = { - .target = "bpf_prog", +static const struct bpf_iter_seq_info bpf_prog_seq_info = { .seq_ops = &bpf_prog_seq_ops, .init_seq_private = NULL, .fini_seq_private = NULL, .seq_priv_size = sizeof(struct bpf_iter_seq_prog_info), +}; + +static struct bpf_iter_reg bpf_prog_reg_info = { + .target = "bpf_prog", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__bpf_prog, prog), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &bpf_prog_seq_info, }; static int __init bpf_prog_iter_init(void) diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 1039e52ebd8b..6d9cd23869bf 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -319,25 +319,32 @@ BTF_ID_LIST(btf_task_file_ids) BTF_ID(struct, task_struct) BTF_ID(struct, file) -static struct bpf_iter_reg task_reg_info = { - .target = "task", +static const struct bpf_iter_seq_info task_seq_info = { .seq_ops = &task_seq_ops, .init_seq_private = init_seq_pidns, .fini_seq_private = fini_seq_pidns, .seq_priv_size = sizeof(struct bpf_iter_seq_task_info), +}; + +static struct bpf_iter_reg task_reg_info = { + .target = "task", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__task, task), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &task_seq_info, }; -static struct bpf_iter_reg task_file_reg_info = { - .target = "task_file", +static const struct bpf_iter_seq_info task_file_seq_info = { .seq_ops = &task_file_seq_ops, .init_seq_private = init_seq_pidns, .fini_seq_private = fini_seq_pidns, .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info), +}; + +static struct bpf_iter_reg task_file_reg_info = { + .target = "task_file", .ctx_arg_info_size = 2, .ctx_arg_info = { { offsetof(struct bpf_iter__task_file, task), @@ -345,6 +352,7 @@ static struct bpf_iter_reg task_file_reg_info = { { offsetof(struct bpf_iter__task_file, file), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &task_file_seq_info, }; static int __init task_iter_init(void) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f8913923a6c0..cb288fdcf2ca 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2947,17 +2947,21 @@ static void bpf_iter_fini_tcp(void *priv_data) bpf_iter_fini_seq_net(priv_data); } -static struct bpf_iter_reg tcp_reg_info = { - .target = "tcp", +static const struct bpf_iter_seq_info tcp_seq_info = { .seq_ops = &bpf_iter_tcp_seq_ops, .init_seq_private = bpf_iter_init_tcp, .fini_seq_private = bpf_iter_fini_tcp, .seq_priv_size = sizeof(struct tcp_iter_state), +}; + +static struct bpf_iter_reg tcp_reg_info = { + .target = "tcp", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__tcp, sk_common), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &tcp_seq_info, }; static void __init bpf_iter_register(void) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 0fb5e4ea133f..1bc50ec2caef 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -3208,17 +3208,21 @@ static void bpf_iter_fini_udp(void *priv_data) bpf_iter_fini_seq_net(priv_data); } -static struct bpf_iter_reg udp_reg_info = { - .target = "udp", +static const struct bpf_iter_seq_info udp_seq_info = { .seq_ops = &bpf_iter_udp_seq_ops, .init_seq_private = bpf_iter_init_udp, .fini_seq_private = bpf_iter_fini_udp, .seq_priv_size = sizeof(struct udp_iter_state), +}; + +static struct bpf_iter_reg udp_reg_info = { + .target = "udp", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__udp, udp_sk), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &udp_seq_info, }; static void __init bpf_iter_register(void) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 33f5efbad0a9..8bfc57b0802a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -6427,17 +6427,21 @@ DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *r BTF_ID_LIST(btf_fib6_info_id) BTF_ID(struct, fib6_info) -static struct bpf_iter_reg ipv6_route_reg_info = { - .target = "ipv6_route", +static const struct bpf_iter_seq_info ipv6_route_seq_info = { .seq_ops = &ipv6_route_seq_ops, .init_seq_private = bpf_iter_init_seq_net, .fini_seq_private = bpf_iter_fini_seq_net, .seq_priv_size = sizeof(struct ipv6_route_iter), +}; + +static struct bpf_iter_reg ipv6_route_reg_info = { + .target = "ipv6_route", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__ipv6_route, rt), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &ipv6_route_seq_info, }; static int __init bpf_iter_register(void) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d8921b833744..b5f30d7d30d0 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2807,17 +2807,21 @@ static const struct rhashtable_params netlink_rhashtable_params = { BTF_ID_LIST(btf_netlink_sock_id) BTF_ID(struct, netlink_sock) -static struct bpf_iter_reg netlink_reg_info = { - .target = "netlink", +static const struct bpf_iter_seq_info netlink_seq_info = { .seq_ops = &netlink_seq_ops, .init_seq_private = bpf_iter_init_seq_net, .fini_seq_private = bpf_iter_fini_seq_net, .seq_priv_size = sizeof(struct nl_seq_iter), +}; + +static struct bpf_iter_reg netlink_reg_info = { + .target = "netlink", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__netlink, sk), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &netlink_seq_info, }; static int __init bpf_iter_register(void) -- cgit v1.2.3 From f9c792729581bd8b8473af163e8ab426c2c61d89 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:10 -0700 Subject: bpf: Refactor to provide aux info to bpf_iter_init_seq_priv_t This patch refactored target bpf_iter_init_seq_priv_t callback function to accept additional information. This will be needed in later patches for map element targets since a particular map should be passed to traverse elements for that particular map. In the future, other information may be passed to target as well, e.g., pid, cgroup id, etc. to customize the iterator. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184110.590156-1-yhs@fb.com --- fs/proc/proc_net.c | 2 +- include/linux/bpf.h | 7 ++++++- include/linux/proc_fs.h | 3 ++- kernel/bpf/bpf_iter.c | 2 +- kernel/bpf/task_iter.c | 2 +- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv4/udp.c | 4 ++-- 7 files changed, 15 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index dba63b2429f0..ed8a6306990c 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -98,7 +98,7 @@ static const struct proc_ops proc_net_seq_ops = { .proc_release = seq_release_net, }; -int bpf_iter_init_seq_net(void *priv_data) +int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux) { #ifdef CONFIG_NET_NS struct seq_net_private *p = priv_data; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 127067f71fd4..ef52717336cf 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -33,11 +33,13 @@ struct btf; struct btf_type; struct exception_table_entry; struct seq_operations; +struct bpf_iter_aux_info; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; -typedef int (*bpf_iter_init_seq_priv_t)(void *private_data); +typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, + struct bpf_iter_aux_info *aux); typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); struct bpf_iter_seq_info { const struct seq_operations *seq_ops; @@ -1198,6 +1200,9 @@ int bpf_obj_get_user(const char __user *pathname, int flags); extern int bpf_iter_ ## target(args); \ int __init bpf_iter_ ## target(args) { return 0; } +struct bpf_iter_aux_info { +}; + #define BPF_ITER_CTX_ARG_MAX 2 struct bpf_iter_reg { const char *target; diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index d1eed1b43651..2df965cd0974 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -133,7 +133,8 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo void *data); extern struct pid *tgid_pidfd_to_pid(const struct file *file); -extern int bpf_iter_init_seq_net(void *priv_data); +struct bpf_iter_aux_info; +extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux); extern void bpf_iter_fini_seq_net(void *priv_data); #ifdef CONFIG_PROC_PID_ARCH_STATUS diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 5b2387d6aa1f..8fa94cb1b5a0 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -442,7 +442,7 @@ static int prepare_seq_file(struct file *file, struct bpf_iter_link *link) } if (tinfo->reg_info->seq_info->init_seq_private) { - err = tinfo->reg_info->seq_info->init_seq_private(priv_data->target_private); + err = tinfo->reg_info->seq_info->init_seq_private(priv_data->target_private, NULL); if (err) goto release_seq_file; } diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 6d9cd23869bf..232df29793e9 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -293,7 +293,7 @@ static void task_file_seq_stop(struct seq_file *seq, void *v) } } -static int init_seq_pidns(void *priv_data) +static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux) { struct bpf_iter_seq_task_common *common = priv_data; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index cb288fdcf2ca..5084333b5ab6 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2921,7 +2921,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = { DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta, struct sock_common *sk_common, uid_t uid) -static int bpf_iter_init_tcp(void *priv_data) +static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux) { struct tcp_iter_state *st = priv_data; struct tcp_seq_afinfo *afinfo; @@ -2933,7 +2933,7 @@ static int bpf_iter_init_tcp(void *priv_data) afinfo->family = AF_UNSPEC; st->bpf_seq_afinfo = afinfo; - ret = bpf_iter_init_seq_net(priv_data); + ret = bpf_iter_init_seq_net(priv_data, aux); if (ret) kfree(afinfo); return ret; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1bc50ec2caef..7ce31beccfc2 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -3181,7 +3181,7 @@ static struct pernet_operations __net_initdata udp_sysctl_ops = { DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, struct udp_sock *udp_sk, uid_t uid, int bucket) -static int bpf_iter_init_udp(void *priv_data) +static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) { struct udp_iter_state *st = priv_data; struct udp_seq_afinfo *afinfo; @@ -3194,7 +3194,7 @@ static int bpf_iter_init_udp(void *priv_data) afinfo->family = AF_UNSPEC; afinfo->udp_table = &udp_table; st->bpf_seq_afinfo = afinfo; - ret = bpf_iter_init_seq_net(priv_data); + ret = bpf_iter_init_seq_net(priv_data, aux); if (ret) kfree(afinfo); return ret; -- cgit v1.2.3 From afbf21dce668ef59482037596eaffbe5041e094c Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:11 -0700 Subject: bpf: Support readonly/readwrite buffers in verifier Readonly and readwrite buffer register states are introduced. Totally four states, PTR_TO_RDONLY_BUF[_OR_NULL] and PTR_TO_RDWR_BUF[_OR_NULL] are supported. As suggested by their respective names, PTR_TO_RDONLY_BUF[_OR_NULL] are for readonly buffers and PTR_TO_RDWR_BUF[_OR_NULL] for read/write buffers. These new register states will be used by later bpf map element iterator. New register states share some similarity to PTR_TO_TP_BUFFER as it will calculate accessed buffer size during verification time. The accessed buffer size will be later compared to other metrics during later attach/link_create time. Similar to reg_state PTR_TO_BTF_ID_OR_NULL in bpf iterator programs, PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL reg_types can be set at prog->aux->bpf_ctx_arg_aux, and bpf verifier will retrieve the values during btf_ctx_access(). Later bpf map element iterator implementation will show how such information will be assigned during target registeration time. The verifier is also enhanced such that PTR_TO_RDONLY_BUF can be passed to ARG_PTR_TO_MEM[_OR_NULL] helper argument, and PTR_TO_RDWR_BUF can be passed to ARG_PTR_TO_MEM[_OR_NULL] or ARG_PTR_TO_UNINIT_MEM. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184111.590274-1-yhs@fb.com --- include/linux/bpf.h | 6 ++++ kernel/bpf/btf.c | 13 ++++++++ kernel/bpf/verifier.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 104 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ef52717336cf..f9c4bb08f616 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -353,6 +353,10 @@ enum bpf_reg_type { PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */ PTR_TO_MEM, /* reg points to valid memory region */ PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ + PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ + PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ + PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ + PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ }; /* The information passed from prog-specific *_is_valid_access @@ -694,6 +698,8 @@ struct bpf_prog_aux { u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ u32 attach_btf_id; /* in-kernel BTF type id to attach to */ u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; const struct bpf_ctx_arg_aux *ctx_arg_info; struct bpf_prog *linked_prog; bool verifier_zext; /* Zero extensions has been inserted by verifier. */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ee36b7f60936..0fd6bb62be3a 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3806,6 +3806,19 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, btf_kind_str[BTF_INFO_KIND(t->info)]); return false; } + + /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ + for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { + const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; + + if (ctx_arg_info->offset == off && + (ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL || + ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) { + info->reg_type = ctx_arg_info->reg_type; + return true; + } + } + if (t->type == 0) /* This is a pointer to void. * It is the same as scalar from the verifier safety pov. diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9a6703bc3f36..8d6979db48d8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -409,7 +409,9 @@ static bool reg_type_may_be_null(enum bpf_reg_type type) type == PTR_TO_SOCK_COMMON_OR_NULL || type == PTR_TO_TCP_SOCK_OR_NULL || type == PTR_TO_BTF_ID_OR_NULL || - type == PTR_TO_MEM_OR_NULL; + type == PTR_TO_MEM_OR_NULL || + type == PTR_TO_RDONLY_BUF_OR_NULL || + type == PTR_TO_RDWR_BUF_OR_NULL; } static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) @@ -503,6 +505,10 @@ static const char * const reg_type_str[] = { [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", [PTR_TO_MEM] = "mem", [PTR_TO_MEM_OR_NULL] = "mem_or_null", + [PTR_TO_RDONLY_BUF] = "rdonly_buf", + [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", + [PTR_TO_RDWR_BUF] = "rdwr_buf", + [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", }; static char slot_type_char[] = { @@ -2173,6 +2179,10 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_XDP_SOCK: case PTR_TO_BTF_ID: case PTR_TO_BTF_ID_OR_NULL: + case PTR_TO_RDONLY_BUF: + case PTR_TO_RDONLY_BUF_OR_NULL: + case PTR_TO_RDWR_BUF: + case PTR_TO_RDWR_BUF_OR_NULL: return true; default: return false; @@ -3052,14 +3062,15 @@ int check_ctx_reg(struct bpf_verifier_env *env, return 0; } -static int check_tp_buffer_access(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, - int regno, int off, int size) +static int __check_buffer_access(struct bpf_verifier_env *env, + const char *buf_info, + const struct bpf_reg_state *reg, + int regno, int off, int size) { if (off < 0) { verbose(env, - "R%d invalid tracepoint buffer access: off=%d, size=%d", - regno, off, size); + "R%d invalid %s buffer access: off=%d, size=%d", + regno, buf_info, off, size); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { @@ -3071,12 +3082,45 @@ static int check_tp_buffer_access(struct bpf_verifier_env *env, regno, off, tn_buf); return -EACCES; } + + return 0; +} + +static int check_tp_buffer_access(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, + int regno, int off, int size) +{ + int err; + + err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); + if (err) + return err; + if (off + size > env->prog->aux->max_tp_access) env->prog->aux->max_tp_access = off + size; return 0; } +static int check_buffer_access(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, + int regno, int off, int size, + bool zero_size_allowed, + const char *buf_info, + u32 *max_access) +{ + int err; + + err = __check_buffer_access(env, buf_info, reg, regno, off, size); + if (err) + return err; + + if (off + size > *max_access) + *max_access = off + size; + + return 0; +} + /* BPF architecture zero extends alu32 ops into 64-bit registesr */ static void zext_32_to_64(struct bpf_reg_state *reg) { @@ -3427,6 +3471,23 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else if (reg->type == CONST_PTR_TO_MAP) { err = check_ptr_to_map_access(env, regs, regno, off, size, t, value_regno); + } else if (reg->type == PTR_TO_RDONLY_BUF) { + if (t == BPF_WRITE) { + verbose(env, "R%d cannot write into %s\n", + regno, reg_type_str[reg->type]); + return -EACCES; + } + err = check_buffer_access(env, reg, regno, off, size, "rdonly", + false, + &env->prog->aux->max_rdonly_access); + if (!err && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_RDWR_BUF) { + err = check_buffer_access(env, reg, regno, off, size, "rdwr", + false, + &env->prog->aux->max_rdwr_access); + if (!err && t == BPF_READ && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -3668,6 +3729,18 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, return check_mem_region_access(env, regno, reg->off, access_size, reg->mem_size, zero_size_allowed); + case PTR_TO_RDONLY_BUF: + if (meta && meta->raw_mode) + return -EACCES; + return check_buffer_access(env, reg, regno, reg->off, + access_size, zero_size_allowed, + "rdonly", + &env->prog->aux->max_rdonly_access); + case PTR_TO_RDWR_BUF: + return check_buffer_access(env, reg, regno, reg->off, + access_size, zero_size_allowed, + "rdwr", + &env->prog->aux->max_rdwr_access); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); @@ -3933,6 +4006,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != PTR_TO_MEM && + type != PTR_TO_RDONLY_BUF && + type != PTR_TO_RDWR_BUF && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; @@ -6806,6 +6881,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, reg->type = PTR_TO_BTF_ID; } else if (reg->type == PTR_TO_MEM_OR_NULL) { reg->type = PTR_TO_MEM; + } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) { + reg->type = PTR_TO_RDONLY_BUF; + } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) { + reg->type = PTR_TO_RDWR_BUF; } if (is_null) { /* We don't need id and ref_obj_id from this point -- cgit v1.2.3 From a5cbe05a6673b85bed2a63ffcfea6a96c6410cff Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:12 -0700 Subject: bpf: Implement bpf iterator for map elements The bpf iterator for map elements are implemented. The bpf program will receive four parameters: bpf_iter_meta *meta: the meta data bpf_map *map: the bpf_map whose elements are traversed void *key: the key of one element void *value: the value of the same element Here, meta and map pointers are always valid, and key has register type PTR_TO_RDONLY_BUF_OR_NULL and value has register type PTR_TO_RDWR_BUF_OR_NULL. The kernel will track the access range of key and value during verification time. Later, these values will be compared against the values in the actual map to ensure all accesses are within range. A new field iter_seq_info is added to bpf_map_ops which is used to add map type specific information, i.e., seq_ops, init/fini seq_file func and seq_file private data size. Subsequent patches will have actual implementation for bpf_map_ops->iter_seq_info. In user space, BPF_ITER_LINK_MAP_FD needs to be specified in prog attr->link_create.flags, which indicates that attr->link_create.target_fd is a map_fd. The reason for such an explicit flag is for possible future cases where one bpf iterator may allow more than one possible customization, e.g., pid and cgroup id for task_file. Current kernel internal implementation only allows the target to register at most one required bpf_iter_link_info. To support the above case, optional bpf_iter_link_info's are needed, the target can be extended to register such link infos, and user provided link_info needs to match one of target supported ones. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184112.590360-1-yhs@fb.com --- include/linux/bpf.h | 16 ++++++++ include/uapi/linux/bpf.h | 7 ++++ kernel/bpf/bpf_iter.c | 85 ++++++++++++++++++++++++++++++++++-------- kernel/bpf/map_iter.c | 30 ++++++++++++++- tools/include/uapi/linux/bpf.h | 7 ++++ 5 files changed, 128 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f9c4bb08f616..4175cf1f4665 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -107,6 +107,9 @@ struct bpf_map_ops { /* BTF name and id of struct allocated by map_alloc */ const char * const map_btf_name; int *map_btf_id; + + /* bpf_iter info used to open a seq_file */ + const struct bpf_iter_seq_info *iter_seq_info; }; struct bpf_map_memory { @@ -1207,12 +1210,18 @@ int bpf_obj_get_user(const char __user *pathname, int flags); int __init bpf_iter_ ## target(args) { return 0; } struct bpf_iter_aux_info { + struct bpf_map *map; }; +typedef int (*bpf_iter_check_target_t)(struct bpf_prog *prog, + struct bpf_iter_aux_info *aux); + #define BPF_ITER_CTX_ARG_MAX 2 struct bpf_iter_reg { const char *target; + bpf_iter_check_target_t check_target; u32 ctx_arg_info_size; + enum bpf_iter_link_info req_linfo; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; const struct bpf_iter_seq_info *seq_info; }; @@ -1223,6 +1232,13 @@ struct bpf_iter_meta { u64 seq_num; }; +struct bpf_iter__bpf_map_elem { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_map *, map); + __bpf_md_ptr(void *, key); + __bpf_md_ptr(void *, value); +}; + int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); bool bpf_iter_prog_supported(struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 54d0c886e3ba..828c2f6438f2 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -246,6 +246,13 @@ enum bpf_link_type { MAX_BPF_LINK_TYPE, }; +enum bpf_iter_link_info { + BPF_ITER_LINK_UNSPEC = 0, + BPF_ITER_LINK_MAP_FD = 1, + + MAX_BPF_ITER_LINK_INFO, +}; + /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 8fa94cb1b5a0..363b9cafc2d8 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -14,11 +14,13 @@ struct bpf_iter_target_info { struct bpf_iter_link { struct bpf_link link; + struct bpf_iter_aux_info aux; struct bpf_iter_target_info *tinfo; }; struct bpf_iter_priv_data { struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; struct bpf_prog *prog; u64 session_id; u64 seq_num; @@ -35,7 +37,8 @@ static DEFINE_MUTEX(link_mutex); /* incremented on every opened seq_file */ static atomic64_t session_id; -static int prepare_seq_file(struct file *file, struct bpf_iter_link *link); +static int prepare_seq_file(struct file *file, struct bpf_iter_link *link, + const struct bpf_iter_seq_info *seq_info); static void bpf_iter_inc_seq_num(struct seq_file *seq) { @@ -199,11 +202,25 @@ done: return copied; } +static const struct bpf_iter_seq_info * +__get_seq_info(struct bpf_iter_link *link) +{ + const struct bpf_iter_seq_info *seq_info; + + if (link->aux.map) { + seq_info = link->aux.map->ops->iter_seq_info; + if (seq_info) + return seq_info; + } + + return link->tinfo->reg_info->seq_info; +} + static int iter_open(struct inode *inode, struct file *file) { struct bpf_iter_link *link = inode->i_private; - return prepare_seq_file(file, link); + return prepare_seq_file(file, link, __get_seq_info(link)); } static int iter_release(struct inode *inode, struct file *file) @@ -218,8 +235,8 @@ static int iter_release(struct inode *inode, struct file *file) iter_priv = container_of(seq->private, struct bpf_iter_priv_data, target_private); - if (iter_priv->tinfo->reg_info->seq_info->fini_seq_private) - iter_priv->tinfo->reg_info->seq_info->fini_seq_private(seq->private); + if (iter_priv->seq_info->fini_seq_private) + iter_priv->seq_info->fini_seq_private(seq->private); bpf_prog_put(iter_priv->prog); seq->private = iter_priv; @@ -318,6 +335,11 @@ bool bpf_iter_prog_supported(struct bpf_prog *prog) static void bpf_iter_link_release(struct bpf_link *link) { + struct bpf_iter_link *iter_link = + container_of(link, struct bpf_iter_link, link); + + if (iter_link->aux.map) + bpf_map_put_with_uref(iter_link->aux.map); } static void bpf_iter_link_dealloc(struct bpf_link *link) @@ -370,14 +392,13 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { struct bpf_link_primer link_primer; struct bpf_iter_target_info *tinfo; + struct bpf_iter_aux_info aux = {}; struct bpf_iter_link *link; + u32 prog_btf_id, target_fd; bool existed = false; - u32 prog_btf_id; + struct bpf_map *map; int err; - if (attr->link_create.target_fd || attr->link_create.flags) - return -EINVAL; - prog_btf_id = prog->aux->attach_btf_id; mutex_lock(&targets_mutex); list_for_each_entry(tinfo, &targets, list) { @@ -390,6 +411,13 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) if (!existed) return -ENOENT; + /* Make sure user supplied flags are target expected. */ + target_fd = attr->link_create.target_fd; + if (attr->link_create.flags != tinfo->reg_info->req_linfo) + return -EINVAL; + if (!attr->link_create.flags && target_fd) + return -EINVAL; + link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); if (!link) return -ENOMEM; @@ -403,21 +431,45 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) return err; } + if (tinfo->reg_info->req_linfo == BPF_ITER_LINK_MAP_FD) { + map = bpf_map_get_with_uref(target_fd); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto cleanup_link; + } + + aux.map = map; + err = tinfo->reg_info->check_target(prog, &aux); + if (err) { + bpf_map_put_with_uref(map); + goto cleanup_link; + } + + link->aux.map = map; + } + return bpf_link_settle(&link_primer); + +cleanup_link: + bpf_link_cleanup(&link_primer); + return err; } static void init_seq_meta(struct bpf_iter_priv_data *priv_data, struct bpf_iter_target_info *tinfo, + const struct bpf_iter_seq_info *seq_info, struct bpf_prog *prog) { priv_data->tinfo = tinfo; + priv_data->seq_info = seq_info; priv_data->prog = prog; priv_data->session_id = atomic64_inc_return(&session_id); priv_data->seq_num = 0; priv_data->done_stop = false; } -static int prepare_seq_file(struct file *file, struct bpf_iter_link *link) +static int prepare_seq_file(struct file *file, struct bpf_iter_link *link, + const struct bpf_iter_seq_info *seq_info) { struct bpf_iter_priv_data *priv_data; struct bpf_iter_target_info *tinfo; @@ -433,21 +485,21 @@ static int prepare_seq_file(struct file *file, struct bpf_iter_link *link) tinfo = link->tinfo; total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) + - tinfo->reg_info->seq_info->seq_priv_size; - priv_data = __seq_open_private(file, tinfo->reg_info->seq_info->seq_ops, + seq_info->seq_priv_size; + priv_data = __seq_open_private(file, seq_info->seq_ops, total_priv_dsize); if (!priv_data) { err = -ENOMEM; goto release_prog; } - if (tinfo->reg_info->seq_info->init_seq_private) { - err = tinfo->reg_info->seq_info->init_seq_private(priv_data->target_private, NULL); + if (seq_info->init_seq_private) { + err = seq_info->init_seq_private(priv_data->target_private, &link->aux); if (err) goto release_seq_file; } - init_seq_meta(priv_data, tinfo, prog); + init_seq_meta(priv_data, tinfo, seq_info, prog); seq = file->private_data; seq->private = priv_data->target_private; @@ -463,6 +515,7 @@ release_prog: int bpf_iter_new_fd(struct bpf_link *link) { + struct bpf_iter_link *iter_link; struct file *file; unsigned int flags; int err, fd; @@ -481,8 +534,8 @@ int bpf_iter_new_fd(struct bpf_link *link) goto free_fd; } - err = prepare_seq_file(file, - container_of(link, struct bpf_iter_link, link)); + iter_link = container_of(link, struct bpf_iter_link, link); + err = prepare_seq_file(file, iter_link, __get_seq_info(iter_link)); if (err) goto free_file; diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 1a69241fb1e2..8a1f9b3355d0 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -98,10 +98,38 @@ static struct bpf_iter_reg bpf_map_reg_info = { .seq_info = &bpf_map_seq_info, }; +static int bpf_iter_check_map(struct bpf_prog *prog, + struct bpf_iter_aux_info *aux) +{ + return -EINVAL; +} + +DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta, + struct bpf_map *map, void *key, void *value) + +static const struct bpf_iter_reg bpf_map_elem_reg_info = { + .target = "bpf_map_elem", + .check_target = bpf_iter_check_map, + .req_linfo = BPF_ITER_LINK_MAP_FD, + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_map_elem, key), + PTR_TO_RDONLY_BUF_OR_NULL }, + { offsetof(struct bpf_iter__bpf_map_elem, value), + PTR_TO_RDWR_BUF_OR_NULL }, + }, +}; + static int __init bpf_map_iter_init(void) { + int ret; + bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id; - return bpf_iter_reg_target(&bpf_map_reg_info); + ret = bpf_iter_reg_target(&bpf_map_reg_info); + if (ret) + return ret; + + return bpf_iter_reg_target(&bpf_map_elem_reg_info); } late_initcall(bpf_map_iter_init); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 54d0c886e3ba..828c2f6438f2 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -246,6 +246,13 @@ enum bpf_link_type { MAX_BPF_LINK_TYPE, }; +enum bpf_iter_link_info { + BPF_ITER_LINK_UNSPEC = 0, + BPF_ITER_LINK_MAP_FD = 1, + + MAX_BPF_ITER_LINK_INFO, +}; + /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. -- cgit v1.2.3 From 7b04d6d60fcfb5b2200ffebb9cfb90927bdfeec7 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 23 Jul 2020 11:06:44 -0700 Subject: bpf: Separate bpf_get_[stack|stackid] for perf events BPF Calling get_perf_callchain() on perf_events from PEBS entries may cause unwinder errors. To fix this issue, the callchain is fetched early. Such perf_events are marked with __PERF_SAMPLE_CALLCHAIN_EARLY. Similarly, calling bpf_get_[stack|stackid] on perf_events from PEBS may also cause unwinder errors. To fix this, add separate version of these two helpers, bpf_get_[stack|stackid]_pe. These two hepers use callchain in bpf_perf_event_data_kern->data->callchain. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723180648.1429892-2-songliubraving@fb.com --- include/linux/bpf.h | 2 + kernel/bpf/stackmap.c | 184 ++++++++++++++++++++++++++++++++++++++++++----- kernel/trace/bpf_trace.c | 4 +- 3 files changed, 170 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 4175cf1f4665..8357be349133 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1675,6 +1675,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; extern const struct bpf_func_proto bpf_get_stack_proto; extern const struct bpf_func_proto bpf_get_task_stack_proto; +extern const struct bpf_func_proto bpf_get_stackid_proto_pe; +extern const struct bpf_func_proto bpf_get_stack_proto_pe; extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 48d8e739975f..5beb2f8c23da 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -387,11 +388,10 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) #endif } -BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, - u64, flags) +static long __bpf_get_stackid(struct bpf_map *map, + struct perf_callchain_entry *trace, u64 flags) { struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); - struct perf_callchain_entry *trace; struct stack_map_bucket *bucket, *new_bucket, *old_bucket; u32 max_depth = map->value_size / stack_map_data_size(map); /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ @@ -399,21 +399,9 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u32 skip = flags & BPF_F_SKIP_FIELD_MASK; u32 hash, id, trace_nr, trace_len; bool user = flags & BPF_F_USER_STACK; - bool kernel = !user; u64 *ips; bool hash_matches; - if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | - BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) - return -EINVAL; - - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, false, false); - - if (unlikely(!trace)) - /* couldn't fetch the stack trace */ - return -EFAULT; - /* get_perf_callchain() guarantees that trace->nr >= init_nr * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth */ @@ -478,6 +466,30 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, return id; } +BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, + u64, flags) +{ + u32 max_depth = map->value_size / stack_map_data_size(map); + /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ + u32 init_nr = sysctl_perf_event_max_stack - max_depth; + bool user = flags & BPF_F_USER_STACK; + struct perf_callchain_entry *trace; + bool kernel = !user; + + if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | + BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) + return -EINVAL; + + trace = get_perf_callchain(regs, init_nr, kernel, user, + sysctl_perf_event_max_stack, false, false); + + if (unlikely(!trace)) + /* couldn't fetch the stack trace */ + return -EFAULT; + + return __bpf_get_stackid(map, trace, flags); +} + const struct bpf_func_proto bpf_get_stackid_proto = { .func = bpf_get_stackid, .gpl_only = true, @@ -487,7 +499,77 @@ const struct bpf_func_proto bpf_get_stackid_proto = { .arg3_type = ARG_ANYTHING, }; +static __u64 count_kernel_ip(struct perf_callchain_entry *trace) +{ + __u64 nr_kernel = 0; + + while (nr_kernel < trace->nr) { + if (trace->ip[nr_kernel] == PERF_CONTEXT_USER) + break; + nr_kernel++; + } + return nr_kernel; +} + +BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx, + struct bpf_map *, map, u64, flags) +{ + struct perf_event *event = ctx->event; + struct perf_callchain_entry *trace; + bool kernel, user; + __u64 nr_kernel; + int ret; + + /* perf_sample_data doesn't have callchain, use bpf_get_stackid */ + if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) + return bpf_get_stackid((unsigned long)(ctx->regs), + (unsigned long) map, flags, 0, 0); + + if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | + BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) + return -EINVAL; + + user = flags & BPF_F_USER_STACK; + kernel = !user; + + trace = ctx->data->callchain; + if (unlikely(!trace)) + return -EFAULT; + + nr_kernel = count_kernel_ip(trace); + + if (kernel) { + __u64 nr = trace->nr; + + trace->nr = nr_kernel; + ret = __bpf_get_stackid(map, trace, flags); + + /* restore nr */ + trace->nr = nr; + } else { /* user */ + u64 skip = flags & BPF_F_SKIP_FIELD_MASK; + + skip += nr_kernel; + if (skip > BPF_F_SKIP_FIELD_MASK) + return -EFAULT; + + flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip; + ret = __bpf_get_stackid(map, trace, flags); + } + return ret; +} + +const struct bpf_func_proto bpf_get_stackid_proto_pe = { + .func = bpf_get_stackid_pe, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +}; + static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, + struct perf_callchain_entry *trace_in, void *buf, u32 size, u64 flags) { u32 init_nr, trace_nr, copy_len, elem_size, num_elem; @@ -520,7 +602,9 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, else init_nr = sysctl_perf_event_max_stack - num_elem; - if (kernel && task) + if (trace_in) + trace = trace_in; + else if (kernel && task) trace = get_callchain_entry_for_task(task, init_nr); else trace = get_perf_callchain(regs, init_nr, kernel, user, @@ -556,7 +640,7 @@ clear: BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, u64, flags) { - return __bpf_get_stack(regs, NULL, buf, size, flags); + return __bpf_get_stack(regs, NULL, NULL, buf, size, flags); } const struct bpf_func_proto bpf_get_stack_proto = { @@ -574,7 +658,7 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, { struct pt_regs *regs = task_pt_regs(task); - return __bpf_get_stack(regs, task, buf, size, flags); + return __bpf_get_stack(regs, task, NULL, buf, size, flags); } BTF_ID_LIST(bpf_get_task_stack_btf_ids) @@ -591,6 +675,70 @@ const struct bpf_func_proto bpf_get_task_stack_proto = { .btf_id = bpf_get_task_stack_btf_ids, }; +BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, + void *, buf, u32, size, u64, flags) +{ + struct perf_event *event = ctx->event; + struct perf_callchain_entry *trace; + bool kernel, user; + int err = -EINVAL; + __u64 nr_kernel; + + if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) + return __bpf_get_stack(ctx->regs, NULL, NULL, buf, size, flags); + + if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | + BPF_F_USER_BUILD_ID))) + goto clear; + + user = flags & BPF_F_USER_STACK; + kernel = !user; + + err = -EFAULT; + trace = ctx->data->callchain; + if (unlikely(!trace)) + goto clear; + + nr_kernel = count_kernel_ip(trace); + + if (kernel) { + __u64 nr = trace->nr; + + trace->nr = nr_kernel; + err = __bpf_get_stack(ctx->regs, NULL, trace, buf, + size, flags); + + /* restore nr */ + trace->nr = nr; + } else { /* user */ + u64 skip = flags & BPF_F_SKIP_FIELD_MASK; + + skip += nr_kernel; + if (skip > BPF_F_SKIP_FIELD_MASK) + goto clear; + + flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip; + err = __bpf_get_stack(ctx->regs, NULL, trace, buf, + size, flags); + } + return err; + +clear: + memset(buf, 0, size); + return err; + +} + +const struct bpf_func_proto bpf_get_stack_proto_pe = { + .func = bpf_get_stack_pe, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, +}; + /* Called from eBPF program */ static void *stack_map_lookup_elem(struct bpf_map *map, void *key) { diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3cc0dcb60ca2..cb91ef902cc4 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1411,9 +1411,9 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_perf_event_output: return &bpf_perf_event_output_proto_tp; case BPF_FUNC_get_stackid: - return &bpf_get_stackid_proto_tp; + return &bpf_get_stackid_proto_pe; case BPF_FUNC_get_stack: - return &bpf_get_stack_proto_tp; + return &bpf_get_stack_proto_pe; case BPF_FUNC_perf_prog_read_value: return &bpf_perf_prog_read_value_proto; case BPF_FUNC_read_branch_records: -- cgit v1.2.3 From 5d99cb2c86775b4780c02a339a9578bf9471ead9 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 23 Jul 2020 11:06:45 -0700 Subject: bpf: Fail PERF_EVENT_IOC_SET_BPF when bpf_get_[stack|stackid] cannot work bpf_get_[stack|stackid] on perf_events with precise_ip uses callchain attached to perf_sample_data. If this callchain is not presented, do not allow attaching BPF program that calls bpf_get_[stack|stackid] to this event. In the error case, -EPROTO is returned so that libbpf can identify this error and print proper hint message. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723180648.1429892-3-songliubraving@fb.com --- include/linux/filter.h | 3 ++- kernel/bpf/verifier.c | 3 +++ kernel/events/core.c | 18 ++++++++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/filter.h b/include/linux/filter.h index d07a6e973a7d..0a355b005bf4 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -533,7 +533,8 @@ struct bpf_prog { is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ - enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */ + enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ + call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8d6979db48d8..cd14e70f2d07 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4962,6 +4962,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn env->prog->has_callchain_buf = true; } + if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) + env->prog->call_get_stack = true; + if (changes_data) clear_all_pkt_pointers(env); return 0; diff --git a/kernel/events/core.c b/kernel/events/core.c index 856d98c36f56..ddcfd2fb5cc5 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9544,6 +9544,24 @@ static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) if (IS_ERR(prog)) return PTR_ERR(prog); + if (event->attr.precise_ip && + prog->call_get_stack && + (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) || + event->attr.exclude_callchain_kernel || + event->attr.exclude_callchain_user)) { + /* + * On perf_event with precise_ip, calling bpf_get_stack() + * may trigger unwinder warnings and occasional crashes. + * bpf_get_[stack|stackid] works around this issue by using + * callchain attached to perf_sample_data. If the + * perf_event does not full (kernel and user) callchain + * attached to perf_sample_data, do not allow attaching BPF + * program that calls bpf_get_[stack|stackid]. + */ + bpf_prog_put(prog); + return -EPROTO; + } + event->prog = prog; event->orig_overflow_handler = READ_ONCE(event->overflow_handler); WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); -- cgit v1.2.3 From 7d9c3427894fe70d1347b4820476bf37736d2ff0 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 23 Jul 2020 23:47:43 -0500 Subject: bpf: Make cgroup storages shared between programs on the same cgroup This change comes in several parts: One, the restriction that the CGROUP_STORAGE map can only be used by one program is removed. This results in the removal of the field 'aux' in struct bpf_cgroup_storage_map, and removal of relevant code associated with the field, and removal of now-noop functions bpf_free_cgroup_storage and bpf_cgroup_storage_release. Second, we permit a key of type u64 as the key to the map. Providing such a key type indicates that the map should ignore attach type when comparing map keys. However, for simplicity newly linked storage will still have the attach type at link time in its key struct. cgroup_storage_check_btf is adapted to accept u64 as the type of the key. Third, because the storages are now shared, the storages cannot be unconditionally freed on program detach. There could be two ways to solve this issue: * A. Reference count the usage of the storages, and free when the last program is detached. * B. Free only when the storage is impossible to be referred to again, i.e. when either the cgroup_bpf it is attached to, or the map itself, is freed. Option A has the side effect that, when the user detach and reattach a program, whether the program gets a fresh storage depends on whether there is another program attached using that storage. This could trigger races if the user is multi-threaded, and since nondeterminism in data races is evil, go with option B. The both the map and the cgroup_bpf now tracks their associated storages, and the storage unlink and free are removed from cgroup_bpf_detach and added to cgroup_bpf_release and cgroup_storage_map_free. The latter also new holds the cgroup_mutex to prevent any races with the former. Fourth, on attach, we reuse the old storage if the key already exists in the map, via cgroup_storage_lookup. If the storage does not exist yet, we create a new one, and publish it at the last step in the attach process. This does not create a race condition because for the whole attach the cgroup_mutex is held. We keep track of an array of new storages that was allocated and if the process fails only the new storages would get freed. Signed-off-by: YiFei Zhu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com --- include/linux/bpf-cgroup.h | 12 ++- kernel/bpf/cgroup.c | 67 ++++++++------ kernel/bpf/core.c | 12 --- kernel/bpf/local_storage.c | 216 ++++++++++++++++++++++++--------------------- 4 files changed, 164 insertions(+), 143 deletions(-) (limited to 'include') diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 2c6f26670acc..64f367044e25 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -46,7 +46,8 @@ struct bpf_cgroup_storage { }; struct bpf_cgroup_storage_map *map; struct bpf_cgroup_storage_key key; - struct list_head list; + struct list_head list_map; + struct list_head list_cg; struct rb_node node; struct rcu_head rcu; }; @@ -78,6 +79,9 @@ struct cgroup_bpf { struct list_head progs[MAX_BPF_ATTACH_TYPE]; u32 flags[MAX_BPF_ATTACH_TYPE]; + /* list of cgroup shared storages */ + struct list_head storages; + /* temp storage for effective prog array used by prog_attach/detach */ struct bpf_prog_array *inactive; @@ -161,6 +165,9 @@ static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); } +struct bpf_cgroup_storage * +cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, + void *key, bool locked); struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, enum bpf_cgroup_storage_type stype); void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); @@ -169,7 +176,6 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, enum bpf_attach_type type); void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); -void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map); int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, @@ -383,8 +389,6 @@ static inline void bpf_cgroup_storage_set( struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map) { return 0; } -static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, - struct bpf_map *map) {} static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } static inline void bpf_cgroup_storage_free( diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index ac53102e244a..957cce1d5168 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -37,17 +37,34 @@ static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) } static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], - struct bpf_prog *prog) + struct bpf_cgroup_storage *new_storages[], + enum bpf_attach_type type, + struct bpf_prog *prog, + struct cgroup *cgrp) { enum bpf_cgroup_storage_type stype; + struct bpf_cgroup_storage_key key; + struct bpf_map *map; + + key.cgroup_inode_id = cgroup_id(cgrp); + key.attach_type = type; for_each_cgroup_storage_type(stype) { + map = prog->aux->cgroup_storage[stype]; + if (!map) + continue; + + storages[stype] = cgroup_storage_lookup((void *)map, &key, false); + if (storages[stype]) + continue; + storages[stype] = bpf_cgroup_storage_alloc(prog, stype); if (IS_ERR(storages[stype])) { - storages[stype] = NULL; - bpf_cgroup_storages_free(storages); + bpf_cgroup_storages_free(new_storages); return -ENOMEM; } + + new_storages[stype] = storages[stype]; } return 0; @@ -63,7 +80,7 @@ static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], } static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], - struct cgroup* cgrp, + struct cgroup *cgrp, enum bpf_attach_type attach_type) { enum bpf_cgroup_storage_type stype; @@ -72,14 +89,6 @@ static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); } -static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[]) -{ - enum bpf_cgroup_storage_type stype; - - for_each_cgroup_storage_type(stype) - bpf_cgroup_storage_unlink(storages[stype]); -} - /* Called when bpf_cgroup_link is auto-detached from dying cgroup. * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It * doesn't free link memory, which will eventually be done by bpf_link's @@ -101,22 +110,23 @@ static void cgroup_bpf_release(struct work_struct *work) struct cgroup *p, *cgrp = container_of(work, struct cgroup, bpf.release_work); struct bpf_prog_array *old_array; + struct list_head *storages = &cgrp->bpf.storages; + struct bpf_cgroup_storage *storage, *stmp; + unsigned int type; mutex_lock(&cgroup_mutex); for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { struct list_head *progs = &cgrp->bpf.progs[type]; - struct bpf_prog_list *pl, *tmp; + struct bpf_prog_list *pl, *pltmp; - list_for_each_entry_safe(pl, tmp, progs, node) { + list_for_each_entry_safe(pl, pltmp, progs, node) { list_del(&pl->node); if (pl->prog) bpf_prog_put(pl->prog); if (pl->link) bpf_cgroup_link_auto_detach(pl->link); - bpf_cgroup_storages_unlink(pl->storage); - bpf_cgroup_storages_free(pl->storage); kfree(pl); static_branch_dec(&cgroup_bpf_enabled_key); } @@ -126,6 +136,11 @@ static void cgroup_bpf_release(struct work_struct *work) bpf_prog_array_free(old_array); } + list_for_each_entry_safe(storage, stmp, storages, list_cg) { + bpf_cgroup_storage_unlink(storage); + bpf_cgroup_storage_free(storage); + } + mutex_unlock(&cgroup_mutex); for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) @@ -290,6 +305,8 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) for (i = 0; i < NR; i++) INIT_LIST_HEAD(&cgrp->bpf.progs[i]); + INIT_LIST_HEAD(&cgrp->bpf.storages); + for (i = 0; i < NR; i++) if (compute_effective_progs(cgrp, i, &arrays[i])) goto cleanup; @@ -422,7 +439,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct list_head *progs = &cgrp->bpf.progs[type]; struct bpf_prog *old_prog = NULL; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; - struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; + struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; struct bpf_prog_list *pl; int err; @@ -455,17 +472,16 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, if (IS_ERR(pl)) return PTR_ERR(pl); - if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog)) + if (bpf_cgroup_storages_alloc(storage, new_storage, type, + prog ? : link->link.prog, cgrp)) return -ENOMEM; if (pl) { old_prog = pl->prog; - bpf_cgroup_storages_unlink(pl->storage); - bpf_cgroup_storages_assign(old_storage, pl->storage); } else { pl = kmalloc(sizeof(*pl), GFP_KERNEL); if (!pl) { - bpf_cgroup_storages_free(storage); + bpf_cgroup_storages_free(new_storage); return -ENOMEM; } list_add_tail(&pl->node, progs); @@ -480,12 +496,11 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, if (err) goto cleanup; - bpf_cgroup_storages_free(old_storage); if (old_prog) bpf_prog_put(old_prog); else static_branch_inc(&cgroup_bpf_enabled_key); - bpf_cgroup_storages_link(pl->storage, cgrp, type); + bpf_cgroup_storages_link(new_storage, cgrp, type); return 0; cleanup: @@ -493,9 +508,7 @@ cleanup: pl->prog = old_prog; pl->link = NULL; } - bpf_cgroup_storages_free(pl->storage); - bpf_cgroup_storages_assign(pl->storage, old_storage); - bpf_cgroup_storages_link(pl->storage, cgrp, type); + bpf_cgroup_storages_free(new_storage); if (!old_prog) { list_del(&pl->node); kfree(pl); @@ -679,8 +692,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, /* now can actually delete it from this cgroup list */ list_del(&pl->node); - bpf_cgroup_storages_unlink(pl->storage); - bpf_cgroup_storages_free(pl->storage); kfree(pl); if (list_empty(progs)) /* last program was detached, reset flags to zero */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7be02e555ab9..bde93344164d 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2097,24 +2097,12 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array, : 0; } -static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux) -{ - enum bpf_cgroup_storage_type stype; - - for_each_cgroup_storage_type(stype) { - if (!aux->cgroup_storage[stype]) - continue; - bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]); - } -} - void __bpf_free_used_maps(struct bpf_prog_aux *aux, struct bpf_map **used_maps, u32 len) { struct bpf_map *map; u32 i; - bpf_free_cgroup_storage(aux); for (i = 0; i < len; i++) { map = used_maps[i]; if (map->ops->map_poke_untrack) diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 51bd5a8cb01b..3b2c70197d78 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -9,6 +9,8 @@ #include #include +#include "../cgroup/cgroup-internal.h" + DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); #ifdef CONFIG_CGROUP_BPF @@ -20,7 +22,6 @@ struct bpf_cgroup_storage_map { struct bpf_map map; spinlock_t lock; - struct bpf_prog_aux *aux; struct rb_root root; struct list_head list; }; @@ -30,24 +31,41 @@ static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) return container_of(map, struct bpf_cgroup_storage_map, map); } -static int bpf_cgroup_storage_key_cmp( - const struct bpf_cgroup_storage_key *key1, - const struct bpf_cgroup_storage_key *key2) +static bool attach_type_isolated(const struct bpf_map *map) { - if (key1->cgroup_inode_id < key2->cgroup_inode_id) - return -1; - else if (key1->cgroup_inode_id > key2->cgroup_inode_id) - return 1; - else if (key1->attach_type < key2->attach_type) - return -1; - else if (key1->attach_type > key2->attach_type) - return 1; + return map->key_size == sizeof(struct bpf_cgroup_storage_key); +} + +static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map, + const void *_key1, const void *_key2) +{ + if (attach_type_isolated(&map->map)) { + const struct bpf_cgroup_storage_key *key1 = _key1; + const struct bpf_cgroup_storage_key *key2 = _key2; + + if (key1->cgroup_inode_id < key2->cgroup_inode_id) + return -1; + else if (key1->cgroup_inode_id > key2->cgroup_inode_id) + return 1; + else if (key1->attach_type < key2->attach_type) + return -1; + else if (key1->attach_type > key2->attach_type) + return 1; + } else { + const __u64 *cgroup_inode_id1 = _key1; + const __u64 *cgroup_inode_id2 = _key2; + + if (*cgroup_inode_id1 < *cgroup_inode_id2) + return -1; + else if (*cgroup_inode_id1 > *cgroup_inode_id2) + return 1; + } return 0; } -static struct bpf_cgroup_storage *cgroup_storage_lookup( - struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key, - bool locked) +struct bpf_cgroup_storage * +cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, + void *key, bool locked) { struct rb_root *root = &map->root; struct rb_node *node; @@ -61,7 +79,7 @@ static struct bpf_cgroup_storage *cgroup_storage_lookup( storage = container_of(node, struct bpf_cgroup_storage, node); - switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) { + switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) { case -1: node = node->rb_left; break; @@ -93,7 +111,7 @@ static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, this = container_of(*new, struct bpf_cgroup_storage, node); parent = *new; - switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) { + switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) { case -1: new = &((*new)->rb_left); break; @@ -111,10 +129,9 @@ static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, return 0; } -static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key) +static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; storage = cgroup_storage_lookup(map, key, false); @@ -124,17 +141,13 @@ static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key) return &READ_ONCE(storage->buf)->data[0]; } -static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, +static int cgroup_storage_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; struct bpf_storage_buffer *new; - if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST | BPF_NOEXIST))) - return -EINVAL; - - if (unlikely(flags & BPF_NOEXIST)) + if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST))) return -EINVAL; if (unlikely((flags & BPF_F_LOCK) && @@ -167,11 +180,10 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, return 0; } -int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key, +int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key, void *value) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; int cpu, off = 0; u32 size; @@ -197,11 +209,10 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key, return 0; } -int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key, +int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key, void *value, u64 map_flags) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; int cpu, off = 0; u32 size; @@ -232,12 +243,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key, return 0; } -static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key, +static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key, void *_next_key) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); - struct bpf_cgroup_storage_key *key = _key; - struct bpf_cgroup_storage_key *next = _next_key; struct bpf_cgroup_storage *storage; spin_lock_bh(&map->lock); @@ -250,17 +259,23 @@ static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key, if (!storage) goto enoent; - storage = list_next_entry(storage, list); + storage = list_next_entry(storage, list_map); if (!storage) goto enoent; } else { storage = list_first_entry(&map->list, - struct bpf_cgroup_storage, list); + struct bpf_cgroup_storage, list_map); } spin_unlock_bh(&map->lock); - next->attach_type = storage->key.attach_type; - next->cgroup_inode_id = storage->key.cgroup_inode_id; + + if (attach_type_isolated(&map->map)) { + struct bpf_cgroup_storage_key *next = _next_key; + *next = storage->key; + } else { + __u64 *next = _next_key; + *next = storage->key.cgroup_inode_id; + } return 0; enoent: @@ -275,7 +290,8 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) struct bpf_map_memory mem; int ret; - if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) + if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) && + attr->key_size != sizeof(__u64)) return ERR_PTR(-EINVAL); if (attr->value_size == 0) @@ -318,6 +334,17 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) static void cgroup_storage_map_free(struct bpf_map *_map) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); + struct list_head *storages = &map->list; + struct bpf_cgroup_storage *storage, *stmp; + + mutex_lock(&cgroup_mutex); + + list_for_each_entry_safe(storage, stmp, storages, list_map) { + bpf_cgroup_storage_unlink(storage); + bpf_cgroup_storage_free(storage); + } + + mutex_unlock(&cgroup_mutex); WARN_ON(!RB_EMPTY_ROOT(&map->root)); WARN_ON(!list_empty(&map->list)); @@ -335,49 +362,63 @@ static int cgroup_storage_check_btf(const struct bpf_map *map, const struct btf_type *key_type, const struct btf_type *value_type) { - struct btf_member *m; - u32 offset, size; - - /* Key is expected to be of struct bpf_cgroup_storage_key type, - * which is: - * struct bpf_cgroup_storage_key { - * __u64 cgroup_inode_id; - * __u32 attach_type; - * }; - */ + if (attach_type_isolated(map)) { + struct btf_member *m; + u32 offset, size; + + /* Key is expected to be of struct bpf_cgroup_storage_key type, + * which is: + * struct bpf_cgroup_storage_key { + * __u64 cgroup_inode_id; + * __u32 attach_type; + * }; + */ + + /* + * Key_type must be a structure with two fields. + */ + if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT || + BTF_INFO_VLEN(key_type->info) != 2) + return -EINVAL; + + /* + * The first field must be a 64 bit integer at 0 offset. + */ + m = (struct btf_member *)(key_type + 1); + size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id); + if (!btf_member_is_reg_int(btf, key_type, m, 0, size)) + return -EINVAL; + + /* + * The second field must be a 32 bit integer at 64 bit offset. + */ + m++; + offset = offsetof(struct bpf_cgroup_storage_key, attach_type); + size = sizeof_field(struct bpf_cgroup_storage_key, attach_type); + if (!btf_member_is_reg_int(btf, key_type, m, offset, size)) + return -EINVAL; + } else { + u32 int_data; - /* - * Key_type must be a structure with two fields. - */ - if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT || - BTF_INFO_VLEN(key_type->info) != 2) - return -EINVAL; + /* + * Key is expected to be u64, which stores the cgroup_inode_id + */ - /* - * The first field must be a 64 bit integer at 0 offset. - */ - m = (struct btf_member *)(key_type + 1); - size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id); - if (!btf_member_is_reg_int(btf, key_type, m, 0, size)) - return -EINVAL; + if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) + return -EINVAL; - /* - * The second field must be a 32 bit integer at 64 bit offset. - */ - m++; - offset = offsetof(struct bpf_cgroup_storage_key, attach_type); - size = sizeof_field(struct bpf_cgroup_storage_key, attach_type); - if (!btf_member_is_reg_int(btf, key_type, m, offset, size)) - return -EINVAL; + int_data = *(u32 *)(key_type + 1); + if (BTF_INT_BITS(int_data) != 64 || BTF_INT_OFFSET(int_data)) + return -EINVAL; + } return 0; } -static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key, +static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, struct seq_file *m) { enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; int cpu; @@ -426,38 +467,13 @@ const struct bpf_map_ops cgroup_storage_map_ops = { int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map) { enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); - struct bpf_cgroup_storage_map *map = map_to_storage(_map); - int ret = -EBUSY; - - spin_lock_bh(&map->lock); - if (map->aux && map->aux != aux) - goto unlock; if (aux->cgroup_storage[stype] && aux->cgroup_storage[stype] != _map) - goto unlock; + return -EBUSY; - map->aux = aux; aux->cgroup_storage[stype] = _map; - ret = 0; -unlock: - spin_unlock_bh(&map->lock); - - return ret; -} - -void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *_map) -{ - enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); - struct bpf_cgroup_storage_map *map = map_to_storage(_map); - - spin_lock_bh(&map->lock); - if (map->aux == aux) { - WARN_ON(aux->cgroup_storage[stype] != _map); - map->aux = NULL; - aux->cgroup_storage[stype] = NULL; - } - spin_unlock_bh(&map->lock); + return 0; } static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) @@ -578,7 +594,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, spin_lock_bh(&map->lock); WARN_ON(cgroup_storage_insert(map, storage)); - list_add(&storage->list, &map->list); + list_add(&storage->list_map, &map->list); + list_add(&storage->list_cg, &cgroup->bpf.storages); spin_unlock_bh(&map->lock); } @@ -596,7 +613,8 @@ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage) root = &map->root; rb_erase(&storage->node, root); - list_del(&storage->list); + list_del(&storage->list_map); + list_del(&storage->list_cg); spin_unlock_bh(&map->lock); } -- cgit v1.2.3 From 6cc7d1e8e9e06d45f9d1a39a5f465288d7cd8f9a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:54 -0700 Subject: bpf: Make bpf_link API available indepently of CONFIG_BPF_SYSCALL Similarly to bpf_prog, make bpf_link and related generic API available unconditionally to make it easier to have bpf_link support in various parts of the kernel. Stub out init/prime/settle/cleanup and inc/put APIs. Reported-by: kernel test robot Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-2-andriin@fb.com --- include/linux/bpf.h | 81 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 55 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8357be349133..40c5e206ecf2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -781,6 +781,32 @@ struct bpf_array_aux { struct work_struct work; }; +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + struct work_struct work; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *link); + void (*dealloc)(struct bpf_link *link); + int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog); + void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); + int (*fill_link_info)(const struct bpf_link *link, + struct bpf_link_info *info); +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + struct bpf_struct_ops_value; struct btf_type; struct btf_member; @@ -1164,32 +1190,6 @@ static inline bool bpf_bypass_spec_v4(void) int bpf_map_new_fd(struct bpf_map *map, int flags); int bpf_prog_new_fd(struct bpf_prog *prog); -struct bpf_link { - atomic64_t refcnt; - u32 id; - enum bpf_link_type type; - const struct bpf_link_ops *ops; - struct bpf_prog *prog; - struct work_struct work; -}; - -struct bpf_link_primer { - struct bpf_link *link; - struct file *file; - int fd; - u32 id; -}; - -struct bpf_link_ops { - void (*release)(struct bpf_link *link); - void (*dealloc)(struct bpf_link *link); - int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, - struct bpf_prog *old_prog); - void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); - int (*fill_link_info)(const struct bpf_link *link, - struct bpf_link_info *info); -}; - void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, struct bpf_prog *prog); int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); @@ -1401,6 +1401,35 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) { } +static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, + struct bpf_prog *prog) +{ +} + +static inline int bpf_link_prime(struct bpf_link *link, + struct bpf_link_primer *primer) +{ + return -EOPNOTSUPP; +} + +static inline int bpf_link_settle(struct bpf_link_primer *primer) +{ + return -EOPNOTSUPP; +} + +static inline void bpf_link_cleanup(struct bpf_link_primer *primer) +{ +} + +static inline void bpf_link_inc(struct bpf_link *link) +{ +} + +static inline void bpf_link_put(struct bpf_link *link) +{ +} + static inline int bpf_obj_get_user(const char __user *pathname, int flags) { return -EOPNOTSUPP; -- cgit v1.2.3 From 7f0a838254bdd9114b978ef2541a6ce330307e9e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:55 -0700 Subject: bpf, xdp: Maintain info on attached XDP BPF programs in net_device Instead of delegating to drivers, maintain information about which BPF programs are attached in which XDP modes (generic/skb, driver, or hardware) locally in net_device. This effectively obsoletes XDP_QUERY_PROG command. Such re-organization simplifies existing code already. But it also allows to further add bpf_link-based XDP attachments without drivers having to know about any of this at all, which seems like a good setup. XDP_SETUP_PROG/XDP_SETUP_PROG_HW are just low-level commands to driver to install/uninstall active BPF program. All the higher-level concerns about prog/link interaction will be contained within generic driver-agnostic logic. All the XDP_QUERY_PROG calls to driver in dev_xdp_uninstall() were removed. It's not clear for me why dev_xdp_uninstall() were passing previous prog_flags when resetting installed programs. That seems unnecessary, plus most drivers don't populate prog_flags anyways. Having XDP_SETUP_PROG vs XDP_SETUP_PROG_HW should be enough of an indicator of what is required of driver to correctly reset active BPF program. dev_xdp_uninstall() is also generalized as an iteration over all three supported mode. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-3-andriin@fb.com --- include/linux/netdevice.h | 17 ++++- net/core/dev.c | 158 ++++++++++++++++++++++++++-------------------- net/core/rtnetlink.c | 5 +- 3 files changed, 105 insertions(+), 75 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ac2cd3f49aba..cad44b40c776 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -889,6 +889,17 @@ struct netlink_ext_ack; struct xdp_umem; struct xdp_dev_bulk_queue; +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE +}; + +struct bpf_xdp_entity { + struct bpf_prog *prog; +}; + struct netdev_bpf { enum bpf_netdev_command command; union { @@ -2142,6 +2153,9 @@ struct net_device { #endif const struct udp_tunnel_nic_info *udp_tunnel_nic_info; struct udp_tunnel_nic *udp_tunnel_nic; + + /* protected by rtnl_lock */ + struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -3817,8 +3831,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); -u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, - enum bpf_netdev_command cmd); +u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); int xdp_umem_query(struct net_device *dev, u16 queue_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index fe2e387eed29..bf38fde667e9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8716,84 +8716,103 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) } EXPORT_SYMBOL(dev_change_proto_down_generic); -u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, - enum bpf_netdev_command cmd) +static enum bpf_xdp_mode dev_xdp_mode(u32 flags) { - struct netdev_bpf xdp; + if (flags & XDP_FLAGS_HW_MODE) + return XDP_MODE_HW; + if (flags & XDP_FLAGS_DRV_MODE) + return XDP_MODE_DRV; + return XDP_MODE_SKB; +} - if (!bpf_op) - return 0; +static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) +{ + switch (mode) { + case XDP_MODE_SKB: + return generic_xdp_install; + case XDP_MODE_DRV: + case XDP_MODE_HW: + return dev->netdev_ops->ndo_bpf; + default: + return NULL; + }; +} - memset(&xdp, 0, sizeof(xdp)); - xdp.command = cmd; +static struct bpf_prog *dev_xdp_prog(struct net_device *dev, + enum bpf_xdp_mode mode) +{ + return dev->xdp_state[mode].prog; +} + +u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) +{ + struct bpf_prog *prog = dev_xdp_prog(dev, mode); - /* Query must always succeed. */ - WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG); + return prog ? prog->aux->id : 0; +} - return xdp.prog_id; +static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, + struct bpf_prog *prog) +{ + dev->xdp_state[mode].prog = prog; } -static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, - struct netlink_ext_ack *extack, u32 flags, - struct bpf_prog *prog) +static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, + bpf_op_t bpf_op, struct netlink_ext_ack *extack, + u32 flags, struct bpf_prog *prog) { - bool non_hw = !(flags & XDP_FLAGS_HW_MODE); - struct bpf_prog *prev_prog = NULL; struct netdev_bpf xdp; int err; - if (non_hw) { - prev_prog = bpf_prog_by_id(__dev_xdp_query(dev, bpf_op, - XDP_QUERY_PROG)); - if (IS_ERR(prev_prog)) - prev_prog = NULL; - } - memset(&xdp, 0, sizeof(xdp)); - if (flags & XDP_FLAGS_HW_MODE) - xdp.command = XDP_SETUP_PROG_HW; - else - xdp.command = XDP_SETUP_PROG; + xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; xdp.extack = extack; xdp.flags = flags; xdp.prog = prog; + /* Drivers assume refcnt is already incremented (i.e, prog pointer is + * "moved" into driver), so they don't increment it on their own, but + * they do decrement refcnt when program is detached or replaced. + * Given net_device also owns link/prog, we need to bump refcnt here + * to prevent drivers from underflowing it. + */ + if (prog) + bpf_prog_inc(prog); err = bpf_op(dev, &xdp); - if (!err && non_hw) - bpf_prog_change_xdp(prev_prog, prog); + if (err) { + if (prog) + bpf_prog_put(prog); + return err; + } - if (prev_prog) - bpf_prog_put(prev_prog); + if (mode != XDP_MODE_HW) + bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); - return err; + return 0; } static void dev_xdp_uninstall(struct net_device *dev) { - struct netdev_bpf xdp; - bpf_op_t ndo_bpf; + struct bpf_prog *prog; + enum bpf_xdp_mode mode; + bpf_op_t bpf_op; - /* Remove generic XDP */ - WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL)); + ASSERT_RTNL(); - /* Remove from the driver */ - ndo_bpf = dev->netdev_ops->ndo_bpf; - if (!ndo_bpf) - return; + for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { + prog = dev_xdp_prog(dev, mode); + if (!prog) + continue; - memset(&xdp, 0, sizeof(xdp)); - xdp.command = XDP_QUERY_PROG; - WARN_ON(ndo_bpf(dev, &xdp)); - if (xdp.prog_id) - WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, - NULL)); + bpf_op = dev_xdp_bpf_op(dev, mode); + if (!bpf_op) + continue; - /* Remove HW offload */ - memset(&xdp, 0, sizeof(xdp)); - xdp.command = XDP_QUERY_PROG_HW; - if (!ndo_bpf(dev, &xdp) && xdp.prog_id) - WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, - NULL)); + WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); + + bpf_prog_put(prog); + dev_xdp_set_prog(dev, mode, NULL); + } } /** @@ -8810,29 +8829,22 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags) { const struct net_device_ops *ops = dev->netdev_ops; - enum bpf_netdev_command query; + enum bpf_xdp_mode mode = dev_xdp_mode(flags); + bool offload = mode == XDP_MODE_HW; u32 prog_id, expected_id = 0; - bpf_op_t bpf_op, bpf_chk; struct bpf_prog *prog; - bool offload; + bpf_op_t bpf_op; int err; ASSERT_RTNL(); - offload = flags & XDP_FLAGS_HW_MODE; - query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; - - bpf_op = bpf_chk = ops->ndo_bpf; - if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) { + bpf_op = dev_xdp_bpf_op(dev, mode); + if (!bpf_op) { NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode"); return -EOPNOTSUPP; } - if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE)) - bpf_op = generic_xdp_install; - if (bpf_op == bpf_chk) - bpf_chk = generic_xdp_install; - prog_id = __dev_xdp_query(dev, bpf_op, query); + prog_id = dev_xdp_prog_id(dev, mode); if (flags & XDP_FLAGS_REPLACE) { if (expected_fd >= 0) { prog = bpf_prog_get_type_dev(expected_fd, @@ -8850,8 +8862,11 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, } } if (fd >= 0) { - if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { - NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); + enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB + ? XDP_MODE_DRV : XDP_MODE_SKB; + + if (!offload && dev_xdp_prog_id(dev, other_mode)) { + NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); return -EEXIST; } @@ -8866,7 +8881,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, return PTR_ERR(prog); if (!offload && bpf_prog_is_dev_bound(prog->aux)) { - NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); + NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported"); bpf_prog_put(prog); return -EINVAL; } @@ -8895,11 +8910,14 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, prog = NULL; } - err = dev_xdp_install(dev, bpf_op, extack, flags, prog); - if (err < 0 && prog) + err = dev_xdp_install(dev, mode, bpf_op, extack, flags, prog); + if (err < 0 && prog) { bpf_prog_put(prog); + return err; + } + dev_xdp_set_prog(dev, mode, prog); - return err; + return 0; } /** diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 85a4b0101f76..58c484a28395 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1416,13 +1416,12 @@ static u32 rtnl_xdp_prog_skb(struct net_device *dev) static u32 rtnl_xdp_prog_drv(struct net_device *dev) { - return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG); + return dev_xdp_prog_id(dev, XDP_MODE_DRV); } static u32 rtnl_xdp_prog_hw(struct net_device *dev) { - return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, - XDP_QUERY_PROG_HW); + return dev_xdp_prog_id(dev, XDP_MODE_HW); } static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, -- cgit v1.2.3 From aa8d3a716b59db6c1ad6c68fb8aa05e31980da60 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:57 -0700 Subject: bpf, xdp: Add bpf_link-based XDP attachment API Add bpf_link-based API (bpf_xdp_link) to attach BPF XDP program through BPF_LINK_CREATE command. bpf_xdp_link is mutually exclusive with direct BPF program attachment, previous BPF program should be detached prior to attempting to create a new bpf_xdp_link attachment (for a given XDP mode). Once BPF link is attached, it can't be replaced by other BPF program attachment or link attachment. It will be detached only when the last BPF link FD is closed. bpf_xdp_link will be auto-detached when net_device is shutdown, similarly to how other BPF links behave (cgroup, flow_dissector). At that point bpf_link will become defunct, but won't be destroyed until last FD is closed. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-5-andriin@fb.com --- include/linux/netdevice.h | 4 ++ include/uapi/linux/bpf.h | 7 +- kernel/bpf/syscall.c | 5 ++ net/core/dev.c | 169 ++++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 178 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cad44b40c776..7d3c412fcfe5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -888,6 +888,7 @@ struct bpf_prog_offload_ops; struct netlink_ext_ack; struct xdp_umem; struct xdp_dev_bulk_queue; +struct bpf_xdp_link; enum bpf_xdp_mode { XDP_MODE_SKB = 0, @@ -898,6 +899,7 @@ enum bpf_xdp_mode { struct bpf_xdp_entity { struct bpf_prog *prog; + struct bpf_xdp_link *link; }; struct netdev_bpf { @@ -3831,7 +3833,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); +int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); + int xdp_umem_query(struct net_device *dev, u16 queue_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 828c2f6438f2..87823fb9c123 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -230,6 +230,7 @@ enum bpf_attach_type { BPF_CGROUP_INET_SOCK_RELEASE, BPF_XDP_CPUMAP, BPF_SK_LOOKUP, + BPF_XDP, __MAX_BPF_ATTACH_TYPE }; @@ -242,6 +243,7 @@ enum bpf_link_type { BPF_LINK_TYPE_CGROUP = 3, BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, MAX_BPF_LINK_TYPE, }; @@ -614,7 +616,10 @@ union bpf_attr { struct { /* struct used by BPF_LINK_CREATE command */ __u32 prog_fd; /* eBPF program to attach */ - __u32 target_fd; /* object to attach to */ + union { + __u32 target_fd; /* object to attach to */ + __u32 target_ifindex; /* target ifindex */ + }; __u32 attach_type; /* attach type */ __u32 flags; /* extra flags */ } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ee290b1f2d9e..0e8c88db7e7a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2824,6 +2824,8 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) return BPF_PROG_TYPE_TRACING; case BPF_SK_LOOKUP: return BPF_PROG_TYPE_SK_LOOKUP; + case BPF_XDP: + return BPF_PROG_TYPE_XDP; default: return BPF_PROG_TYPE_UNSPEC; } @@ -3921,6 +3923,9 @@ static int link_create(union bpf_attr *attr) case BPF_PROG_TYPE_SK_LOOKUP: ret = netns_bpf_link_create(attr, prog); break; + case BPF_PROG_TYPE_XDP: + ret = bpf_xdp_link_attach(attr, prog); + break; default: ret = -EINVAL; } diff --git a/net/core/dev.c b/net/core/dev.c index 521ce031ee35..e24248f3d675 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8716,6 +8716,12 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) } EXPORT_SYMBOL(dev_change_proto_down_generic); +struct bpf_xdp_link { + struct bpf_link link; + struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ + int flags; +}; + static enum bpf_xdp_mode dev_xdp_mode(u32 flags) { if (flags & XDP_FLAGS_HW_MODE) @@ -8738,9 +8744,19 @@ static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) }; } +static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, + enum bpf_xdp_mode mode) +{ + return dev->xdp_state[mode].link; +} + static struct bpf_prog *dev_xdp_prog(struct net_device *dev, enum bpf_xdp_mode mode) { + struct bpf_xdp_link *link = dev_xdp_link(dev, mode); + + if (link) + return link->link.prog; return dev->xdp_state[mode].prog; } @@ -8751,9 +8767,17 @@ u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) return prog ? prog->aux->id : 0; } +static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, + struct bpf_xdp_link *link) +{ + dev->xdp_state[mode].link = link; + dev->xdp_state[mode].prog = NULL; +} + static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, struct bpf_prog *prog) { + dev->xdp_state[mode].link = NULL; dev->xdp_state[mode].prog = prog; } @@ -8793,6 +8817,7 @@ static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, static void dev_xdp_uninstall(struct net_device *dev) { + struct bpf_xdp_link *link; struct bpf_prog *prog; enum bpf_xdp_mode mode; bpf_op_t bpf_op; @@ -8810,14 +8835,20 @@ static void dev_xdp_uninstall(struct net_device *dev) WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); - bpf_prog_put(prog); - dev_xdp_set_prog(dev, mode, NULL); + /* auto-detach link from net device */ + link = dev_xdp_link(dev, mode); + if (link) + link->dev = NULL; + else + bpf_prog_put(prog); + + dev_xdp_set_link(dev, mode, NULL); } } static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, - struct bpf_prog *new_prog, struct bpf_prog *old_prog, - u32 flags) + struct bpf_xdp_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog, u32 flags) { struct bpf_prog *cur_prog; enum bpf_xdp_mode mode; @@ -8826,6 +8857,14 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack ASSERT_RTNL(); + /* either link or prog attachment, never both */ + if (link && (new_prog || old_prog)) + return -EINVAL; + /* link supports only XDP mode flags */ + if (link && (flags & ~XDP_FLAGS_MODES)) { + NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); + return -EINVAL; + } /* just one XDP mode bit should be set, zero defaults to SKB mode */ if (hweight32(flags & XDP_FLAGS_MODES) > 1) { NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); @@ -8838,7 +8877,18 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack } mode = dev_xdp_mode(flags); + /* can't replace attached link */ + if (dev_xdp_link(dev, mode)) { + NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); + return -EBUSY; + } + cur_prog = dev_xdp_prog(dev, mode); + /* can't replace attached prog with link */ + if (link && cur_prog) { + NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); + return -EBUSY; + } if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { NL_SET_ERR_MSG(extack, "Active program does not match expected"); return -EEXIST; @@ -8848,6 +8898,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack return -EBUSY; } + /* put effective new program into new_prog */ + if (link) + new_prog = link->link.prog; + if (new_prog) { bool offload = mode == XDP_MODE_HW; enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB @@ -8884,13 +8938,116 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack return err; } - dev_xdp_set_prog(dev, mode, new_prog); + if (link) + dev_xdp_set_link(dev, mode, link); + else + dev_xdp_set_prog(dev, mode, new_prog); if (cur_prog) bpf_prog_put(cur_prog); return 0; } +static int dev_xdp_attach_link(struct net_device *dev, + struct netlink_ext_ack *extack, + struct bpf_xdp_link *link) +{ + return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); +} + +static int dev_xdp_detach_link(struct net_device *dev, + struct netlink_ext_ack *extack, + struct bpf_xdp_link *link) +{ + enum bpf_xdp_mode mode; + bpf_op_t bpf_op; + + ASSERT_RTNL(); + + mode = dev_xdp_mode(link->flags); + if (dev_xdp_link(dev, mode) != link) + return -EINVAL; + + bpf_op = dev_xdp_bpf_op(dev, mode); + WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); + dev_xdp_set_link(dev, mode, NULL); + return 0; +} + +static void bpf_xdp_link_release(struct bpf_link *link) +{ + struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); + + rtnl_lock(); + + /* if racing with net_device's tear down, xdp_link->dev might be + * already NULL, in which case link was already auto-detached + */ + if (xdp_link->dev) + WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); + + rtnl_unlock(); +} + +static void bpf_xdp_link_dealloc(struct bpf_link *link) +{ + struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); + + kfree(xdp_link); +} + +static const struct bpf_link_ops bpf_xdp_link_lops = { + .release = bpf_xdp_link_release, + .dealloc = bpf_xdp_link_dealloc, +}; + +int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct net *net = current->nsproxy->net_ns; + struct bpf_link_primer link_primer; + struct bpf_xdp_link *link; + struct net_device *dev; + int err, fd; + + dev = dev_get_by_index(net, attr->link_create.target_ifindex); + if (!dev) + return -EINVAL; + + link = kzalloc(sizeof(*link), GFP_USER); + if (!link) { + err = -ENOMEM; + goto out_put_dev; + } + + bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); + link->dev = dev; + link->flags = attr->link_create.flags; + + err = bpf_link_prime(&link->link, &link_primer); + if (err) { + kfree(link); + goto out_put_dev; + } + + rtnl_lock(); + err = dev_xdp_attach_link(dev, NULL, link); + rtnl_unlock(); + + if (err) { + bpf_link_cleanup(&link_primer); + goto out_put_dev; + } + + fd = bpf_link_settle(&link_primer); + /* link itself doesn't hold dev's refcnt to not complicate shutdown */ + dev_put(dev); + return fd; + +out_put_dev: + dev_put(dev); + return err; +} + /** * dev_change_xdp_fd - set or clear a bpf program for a device rx path * @dev: device @@ -8927,7 +9084,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, } } - err = dev_xdp_attach(dev, extack, new_prog, old_prog, flags); + err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); err_out: if (err && new_prog) -- cgit v1.2.3 From c1931c9784ebb5787c0784c112fb8baa5e8455b3 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:59 -0700 Subject: bpf: Implement BPF XDP link-specific introspection APIs Implement XDP link-specific show_fdinfo and link_info to emit ifindex. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-7-andriin@fb.com --- include/uapi/linux/bpf.h | 3 +++ net/core/dev.c | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 87823fb9c123..e1ba4ae6a916 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4069,6 +4069,9 @@ struct bpf_link_info { __u32 netns_ino; __u32 attach_type; } netns; + struct { + __u32 ifindex; + } xdp; }; } __attribute__((aligned(8))); diff --git a/net/core/dev.c b/net/core/dev.c index 49f284f51a22..82ce0920b172 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8996,6 +8996,35 @@ static void bpf_xdp_link_dealloc(struct bpf_link *link) kfree(xdp_link); } +static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, + struct seq_file *seq) +{ + struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); + u32 ifindex = 0; + + rtnl_lock(); + if (xdp_link->dev) + ifindex = xdp_link->dev->ifindex; + rtnl_unlock(); + + seq_printf(seq, "ifindex:\t%u\n", ifindex); +} + +static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); + u32 ifindex = 0; + + rtnl_lock(); + if (xdp_link->dev) + ifindex = xdp_link->dev->ifindex; + rtnl_unlock(); + + info->xdp.ifindex = ifindex; + return 0; +} + static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, struct bpf_prog *old_prog) { @@ -9041,6 +9070,8 @@ out_unlock: static const struct bpf_link_ops bpf_xdp_link_lops = { .release = bpf_xdp_link_release, .dealloc = bpf_xdp_link_dealloc, + .show_fdinfo = bpf_xdp_link_show_fdinfo, + .fill_link_info = bpf_xdp_link_fill_link_info, .update_prog = bpf_xdp_link_update, }; -- cgit v1.2.3 From e8407fdeb9a6866784e249881f6c786a0835faba Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:46:02 -0700 Subject: bpf, xdp: Remove XDP_QUERY_PROG and XDP_QUERY_PROG_HW XDP commands Now that BPF program/link management is centralized in generic net_device code, kernel code never queries program id from drivers, so XDP_QUERY_PROG/XDP_QUERY_PROG_HW commands are unnecessary. This patch removes all the implementations of those commands in kernel, along the xdp_attachment_query(). This patch was compile-tested on allyesconfig. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-10-andriin@fb.com --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 6 ------ drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 4 ---- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 3 --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 5 ----- drivers/net/ethernet/intel/i40e/i40e_main.c | 3 --- drivers/net/ethernet/intel/ice/ice_main.c | 3 --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ---- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 ------ drivers/net/ethernet/marvell/mvneta.c | 5 ----- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 3 --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 24 ---------------------- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 18 ---------------- .../net/ethernet/netronome/nfp/nfp_net_common.c | 4 ---- drivers/net/ethernet/qlogic/qede/qede_filter.c | 3 --- drivers/net/ethernet/sfc/efx.c | 5 ----- drivers/net/ethernet/socionext/netsec.c | 3 --- drivers/net/ethernet/ti/cpsw_priv.c | 3 --- drivers/net/hyperv/netvsc_bpf.c | 21 +------------------ drivers/net/netdevsim/bpf.c | 4 ---- drivers/net/netdevsim/netdevsim.h | 2 +- drivers/net/tun.c | 15 -------------- drivers/net/veth.c | 15 -------------- drivers/net/virtio_net.c | 17 --------------- drivers/net/xen-netfront.c | 21 ------------------- include/linux/netdevice.h | 8 -------- include/net/xdp.h | 2 -- net/core/dev.c | 4 ---- net/core/xdp.c | 9 -------- 28 files changed, 2 insertions(+), 218 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 6478c1e0d137..2a6c9725e092 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -576,15 +576,9 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf) */ static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf) { - struct ena_adapter *adapter = netdev_priv(netdev); - switch (bpf->command) { case XDP_SETUP_PROG: return ena_xdp_set(netdev, bpf); - case XDP_QUERY_PROG: - bpf->prog_id = adapter->xdp_bpf_prog ? - adapter->xdp_bpf_prog->aux->id : 0; - break; default: return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 5e3b4a3b69ea..2704a4709bc7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -330,10 +330,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: rc = bnxt_xdp_set(bp, xdp->prog); break; - case XDP_QUERY_PROG: - xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0; - rc = 0; - break; default: rc = -EINVAL; break; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 2ba0ce115e63..1c6163934e20 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1906,9 +1906,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return nicvf_xdp_setup(nic, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 9b4028c0e34c..17f6bcafc944 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2077,14 +2077,9 @@ out_err: static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) { - struct dpaa2_eth_priv *priv = netdev_priv(dev); - switch (xdp->command) { case XDP_SETUP_PROG: return setup_xdp(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; - break; default: return -EINVAL; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index dadbfb3d2a2b..d8315811cbdf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12923,9 +12923,6 @@ static int i40e_xdp(struct net_device *dev, switch (xdp->command) { case XDP_SETUP_PROG: return i40e_xdp_setup(vsi, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; - return 0; case XDP_SETUP_XSK_UMEM: return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 16a4096bb780..231f4b6e93d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2292,9 +2292,6 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; - return 0; case XDP_SETUP_XSK_UMEM: return ice_xsk_umem_setup(vsi, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4d898ff21a46..6f32b1706ab9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10190,10 +10190,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return ixgbe_xdp_setup(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = adapter->xdp_prog ? - adapter->xdp_prog->aux->id : 0; - return 0; case XDP_SETUP_XSK_UMEM: return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 6e9a397db583..a6267569bfa9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4502,15 +4502,9 @@ static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) { - struct ixgbevf_adapter *adapter = netdev_priv(dev); - switch (xdp->command) { case XDP_SETUP_PROG: return ixgbevf_xdp_setup(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = adapter->xdp_prog ? - adapter->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2c9277e73cef..6e3f9e2f883b 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4442,14 +4442,9 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) { - struct mvneta_port *pp = netdev_priv(dev); - switch (xdp->command) { case XDP_SETUP_PROG: return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = pp->xdp_prog ? pp->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 6a3f356640a0..cd5e9d60307e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4656,9 +4656,6 @@ static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return mvpp2_xdp_setup(port, xdp); - case XDP_QUERY_PROG: - xdp->prog_id = port->xdp_prog ? port->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2b8608f8f0a9..106513f772c3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2802,35 +2802,11 @@ unlock_out: return err; } -static u32 mlx4_xdp_query(struct net_device *dev) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; - const struct bpf_prog *xdp_prog; - u32 prog_id = 0; - - if (!priv->tx_ring_num[TX_XDP]) - return prog_id; - - mutex_lock(&mdev->state_lock); - xdp_prog = rcu_dereference_protected( - priv->rx_ring[0]->xdp_prog, - lockdep_is_held(&mdev->state_lock)); - if (xdp_prog) - prog_id = xdp_prog->aux->id; - mutex_unlock(&mdev->state_lock); - - return prog_id; -} - static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return mlx4_xdp_set(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = mlx4_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9d5d8b28bcd8..aa4fb503dac3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4518,29 +4518,11 @@ unlock: return err; } -static u32 mlx5e_xdp_query(struct net_device *dev) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - u32 prog_id = 0; - - mutex_lock(&priv->state_lock); - xdp_prog = priv->channels.params.xdp_prog; - if (xdp_prog) - prog_id = xdp_prog->aux->id; - mutex_unlock(&priv->state_lock); - - return prog_id; -} - static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return mlx5e_xdp_set(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = mlx5e_xdp_query(dev); - return 0; case XDP_SETUP_XSK_UMEM: return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 44608873d3d9..39ee23e8c0bf 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3614,10 +3614,6 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) return nfp_net_xdp_setup_drv(nn, xdp); case XDP_SETUP_PROG_HW: return nfp_net_xdp_setup_hw(nn, xdp); - case XDP_QUERY_PROG: - return xdp_attachment_query(&nn->xdp, xdp); - case XDP_QUERY_PROG_HW: - return xdp_attachment_query(&nn->xdp_hw, xdp); default: return nfp_app_bpf(nn->app, nn, xdp); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index b7d0b6ccebd3..f961f65d9372 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1045,9 +1045,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return qede_xdp_set(edev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index f16b4f236031..d60acaa3879d 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -653,15 +653,10 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct efx_nic *efx = netdev_priv(dev); - struct bpf_prog *xdp_prog; switch (xdp->command) { case XDP_SETUP_PROG: return efx_xdp_setup_prog(efx, xdp->prog); - case XDP_QUERY_PROG: - xdp_prog = rtnl_dereference(efx->xdp_prog); - xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 0f366cc50b74..25db667fa879 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1811,9 +1811,6 @@ static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return netsec_xdp_setup(priv, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index a399f3659346..d6d7a7d9c7ad 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1286,9 +1286,6 @@ int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) case XDP_SETUP_PROG: return cpsw_xdp_prog_setup(priv, bpf); - case XDP_QUERY_PROG: - return xdp_attachment_query(&priv->xdpi, bpf); - default: return -EINVAL; } diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index 8e4141552423..440486d9c999 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -163,16 +163,6 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) return ret; } -static u32 netvsc_xdp_query(struct netvsc_device *nvdev) -{ - struct bpf_prog *prog = netvsc_xdp_get(nvdev); - - if (prog) - return prog->aux->id; - - return 0; -} - int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) { struct net_device_context *ndevctx = netdev_priv(dev); @@ -182,12 +172,7 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) int ret; if (!nvdev || nvdev->destroy) { - if (bpf->command == XDP_QUERY_PROG) { - bpf->prog_id = 0; - return 0; /* Query must always succeed */ - } else { - return -ENODEV; - } + return -ENODEV; } switch (bpf->command) { @@ -208,10 +193,6 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) return ret; - case XDP_QUERY_PROG: - bpf->prog_id = netvsc_xdp_query(nvdev); - return 0; - default: return -EINVAL; } diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 0b362b8dac17..2e90512f3bbe 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -551,10 +551,6 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) ASSERT_RTNL(); switch (bpf->command) { - case XDP_QUERY_PROG: - return xdp_attachment_query(&ns->xdp, bpf); - case XDP_QUERY_PROG_HW: - return xdp_attachment_query(&ns->xdp_hw, bpf); case XDP_SETUP_PROG: err = nsim_setup_prog_checks(ns, bpf); if (err) diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index d164052e0393..284f7092241d 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -121,7 +121,7 @@ static inline void nsim_bpf_uninit(struct netdevsim *ns) static inline int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) { - return bpf->command == XDP_QUERY_PROG ? 0 : -EOPNOTSUPP; + return -EOPNOTSUPP; } static inline int nsim_bpf_disable_tc(struct netdevsim *ns) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7adeb91bd368..061bebe25cb1 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1184,26 +1184,11 @@ static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, return 0; } -static u32 tun_xdp_query(struct net_device *dev) -{ - struct tun_struct *tun = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - - xdp_prog = rtnl_dereference(tun->xdp_prog); - if (xdp_prog) - return xdp_prog->aux->id; - - return 0; -} - static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return tun_xdp_set(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = tun_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/drivers/net/veth.c b/drivers/net/veth.c index b594f03eeddb..e56cd562a664 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1198,26 +1198,11 @@ err: return err; } -static u32 veth_xdp_query(struct net_device *dev) -{ - struct veth_priv *priv = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - - xdp_prog = priv->_xdp_prog; - if (xdp_prog) - return xdp_prog->aux->id; - - return 0; -} - static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return veth_xdp_set(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = veth_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ba38765dc490..6fa8fe5ef160 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2490,28 +2490,11 @@ err: return err; } -static u32 virtnet_xdp_query(struct net_device *dev) -{ - struct virtnet_info *vi = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - int i; - - for (i = 0; i < vi->max_queue_pairs; i++) { - xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); - if (xdp_prog) - return xdp_prog->aux->id; - } - return 0; -} - static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return virtnet_xdp_set(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = virtnet_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a63e550c370e..458be6882b98 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1480,32 +1480,11 @@ static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return 0; } -static u32 xennet_xdp_query(struct net_device *dev) -{ - unsigned int num_queues = dev->real_num_tx_queues; - struct netfront_info *np = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - struct netfront_queue *queue; - unsigned int i; - - for (i = 0; i < num_queues; ++i) { - queue = &np->queues[i]; - xdp_prog = rtnl_dereference(queue->xdp_prog); - if (xdp_prog) - return xdp_prog->aux->id; - } - - return 0; -} - static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return xennet_xdp_set(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = xennet_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7d3c412fcfe5..1046763cd0dc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -876,8 +876,6 @@ enum bpf_netdev_command { */ XDP_SETUP_PROG, XDP_SETUP_PROG_HW, - XDP_QUERY_PROG, - XDP_QUERY_PROG_HW, /* BPF program for offload callbacks, invoked at program load time. */ BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE, @@ -911,12 +909,6 @@ struct netdev_bpf { struct bpf_prog *prog; struct netlink_ext_ack *extack; }; - /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ - struct { - u32 prog_id; - /* flags with which program was installed */ - u32 prog_flags; - }; /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ struct { struct bpf_offloaded_map *offmap; diff --git a/include/net/xdp.h b/include/net/xdp.h index dbe9c60797e1..3814fb631d52 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -240,8 +240,6 @@ struct xdp_attachment_info { }; struct netdev_bpf; -int xdp_attachment_query(struct xdp_attachment_info *info, - struct netdev_bpf *bpf); bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, struct netdev_bpf *bpf); void xdp_attachment_setup(struct xdp_attachment_info *info, diff --git a/net/core/dev.c b/net/core/dev.c index 82ce0920b172..a2a57988880a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5468,10 +5468,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) } break; - case XDP_QUERY_PROG: - xdp->prog_id = old ? old->aux->id : 0; - break; - default: ret = -EINVAL; break; diff --git a/net/core/xdp.c b/net/core/xdp.c index 3c45f99e26d5..48aba933a5a8 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -400,15 +400,6 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem) } EXPORT_SYMBOL_GPL(__xdp_release_frame); -int xdp_attachment_query(struct xdp_attachment_info *info, - struct netdev_bpf *bpf) -{ - bpf->prog_id = info->prog ? info->prog->aux->id : 0; - bpf->prog_flags = info->prog ? info->flags : 0; - return 0; -} -EXPORT_SYMBOL_GPL(xdp_attachment_query); - bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, struct netdev_bpf *bpf) { -- cgit v1.2.3 From aa65ff6b18e0366db1790609956a4ac7308c5668 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 24 Jul 2020 23:14:20 +1000 Subject: powerpc/64s: Implement queued spinlocks and rwlocks These have shown significantly improved performance and fairness when spinlock contention is moderate to high on very large systems. With this series including subsequent patches, on a 16 socket 1536 thread POWER9, a stress test such as same-file open/close from all CPUs gets big speedups, 11620op/s aggregate with simple spinlocks vs 384158op/s (33x faster), where the difference in throughput between the fastest and slowest thread goes from 7x to 1.4x. Thanks to the fast path being identical in terms of atomics and barriers (after a subsequent optimisation patch), single threaded performance is not changed (no measurable difference). On smaller systems, performance and fairness seems to be generally improved. Using dbench on tmpfs as a test (that starts to run into kernel spinlock contention), a 2-socket OpenPOWER POWER9 system was tested with bare metal and KVM guest configurations. Results can be found here: https://github.com/linuxppc/issues/issues/305#issuecomment-663487453 Observations are: - Queued spinlocks are equal when contention is insignificant, as expected and as measured with microbenchmarks. - When there is contention, on bare metal queued spinlocks have better throughput and max latency at all points. - When virtualised, queued spinlocks are slightly worse approaching peak throughput, but significantly better throughput and max latency at all points beyond peak, until queued spinlock maximum latency rises when clients are 2x vCPUs. The regressions haven't been analysed very well yet, there are a lot of things that can be tuned, particularly the paravirtualised locking, but the numbers already look like a good net win even on relatively small systems. Signed-off-by: Nicholas Piggin Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131423.1362108-4-npiggin@gmail.com --- arch/powerpc/Kconfig | 15 +++++++++++++++ arch/powerpc/include/asm/Kbuild | 1 + arch/powerpc/include/asm/qspinlock.h | 25 +++++++++++++++++++++++++ arch/powerpc/include/asm/spinlock.h | 5 +++++ arch/powerpc/include/asm/spinlock_types.h | 5 +++++ arch/powerpc/lib/Makefile | 3 +++ include/asm-generic/qspinlock.h | 2 ++ 7 files changed, 56 insertions(+) create mode 100644 arch/powerpc/include/asm/qspinlock.h (limited to 'include') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 81c0dee1cbff..a751edacf4bc 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -146,6 +146,8 @@ config PPC select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 + select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS + select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WEAK_RELEASE_ACQUIRE select BINFMT_ELF @@ -491,6 +493,19 @@ config HOTPLUG_CPU Say N if you are unsure. +config PPC_QUEUED_SPINLOCKS + bool "Queued spinlocks" + depends on SMP + help + Say Y here to use queued spinlocks which give better scalability and + fairness on large SMP and NUMA systems without harming single threaded + performance. + + This option is currently experimental, the code is more complex and + less tested so it defaults to "N" for the moment. + + If unsure, say "N". + config ARCH_CPU_PROBE_RELEASE def_bool y depends on HOTPLUG_CPU diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index dadbcf3a0b1e..27c2268dfd6c 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -6,5 +6,6 @@ generated-y += syscall_table_spu.h generic-y += export.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += qrwlock.h generic-y += vtime.h generic-y += early_ioremap.h diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h new file mode 100644 index 000000000000..c49e33e24edd --- /dev/null +++ b/arch/powerpc/include/asm/qspinlock.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_QSPINLOCK_H +#define _ASM_POWERPC_QSPINLOCK_H + +#include + +#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ + +#define smp_mb__after_spinlock() smp_mb() + +static __always_inline int queued_spin_is_locked(struct qspinlock *lock) +{ + /* + * This barrier was added to simple spinlocks by commit 51d7d5205d338, + * but it should now be possible to remove it, asm arm64 has done with + * commit c6f5d02b6a0f. + */ + smp_mb(); + return atomic_read(&lock->val); +} +#define queued_spin_is_locked queued_spin_is_locked + +#include + +#endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 21357fe05fe0..434615f1d761 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -3,7 +3,12 @@ #define __ASM_SPINLOCK_H #ifdef __KERNEL__ +#ifdef CONFIG_PPC_QUEUED_SPINLOCKS +#include +#include +#else #include +#endif #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 3906f52dae65..c5d742f18021 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -6,6 +6,11 @@ # error "please don't include this file directly" #endif +#ifdef CONFIG_PPC_QUEUED_SPINLOCKS +#include +#include +#else #include +#endif #endif diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 5e994cda8e40..d66a645503eb 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -41,7 +41,10 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ memcpy_64.o memcpy_mcsafe_64.o +ifndef CONFIG_PPC_QUEUED_SPINLOCKS obj64-$(CONFIG_SMP) += locks.o +endif + obj64-$(CONFIG_ALTIVEC) += vmx-helper.o obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o \ test_emulate_step_exec_instr.o diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index fde943d180e0..fb0a814d4395 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -12,6 +12,7 @@ #include +#ifndef queued_spin_is_locked /** * queued_spin_is_locked - is the spinlock locked? * @lock: Pointer to queued spinlock structure @@ -25,6 +26,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) */ return atomic_read(&lock->val); } +#endif /** * queued_spin_value_unlocked - is the spinlock structure unlocked? -- cgit v1.2.3 From 20c0e8269e9d515e677670902c7e1cc0209d6ad9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 24 Jul 2020 23:14:21 +1000 Subject: powerpc/pseries: Implement paravirt qspinlocks for SPLPAR This implements the generic paravirt qspinlocks using H_PROD and H_CONFER to kick and wait. This uses an un-directed yield to any CPU rather than the directed yield to a pre-empted lock holder that paravirtualised simple spinlocks use, that requires no kick hcall. This is something that could be investigated and improved in future. Performance results can be found in the commit which added queued spinlocks. Signed-off-by: Nicholas Piggin Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131423.1362108-5-npiggin@gmail.com --- arch/powerpc/include/asm/paravirt.h | 28 ++++++++++++ arch/powerpc/include/asm/qspinlock.h | 66 +++++++++++++++++++++++++++ arch/powerpc/include/asm/qspinlock_paravirt.h | 7 +++ arch/powerpc/include/asm/spinlock.h | 4 ++ arch/powerpc/platforms/pseries/Kconfig | 9 +++- arch/powerpc/platforms/pseries/setup.c | 4 +- include/asm-generic/qspinlock.h | 2 + 7 files changed, 118 insertions(+), 2 deletions(-) create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt.h (limited to 'include') diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index 339e8533464b..9362c94fe3aa 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -28,6 +28,16 @@ static inline void yield_to_preempted(int cpu, u32 yield_count) { plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); } + +static inline void prod_cpu(int cpu) +{ + plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); +} + +static inline void yield_to_any(void) +{ + plpar_hcall_norets(H_CONFER, -1, 0); +} #else static inline bool is_shared_processor(void) { @@ -44,6 +54,19 @@ static inline void yield_to_preempted(int cpu, u32 yield_count) { ___bad_yield_to_preempted(); /* This would be a bug */ } + +extern void ___bad_yield_to_any(void); +static inline void yield_to_any(void) +{ + ___bad_yield_to_any(); /* This would be a bug */ +} + +extern void ___bad_prod_cpu(void); +static inline void prod_cpu(int cpu) +{ + ___bad_prod_cpu(); /* This would be a bug */ +} + #endif #define vcpu_is_preempted vcpu_is_preempted @@ -56,4 +79,9 @@ static inline bool vcpu_is_preempted(int cpu) return false; } +static inline bool pv_is_native_spin_unlock(void) +{ + return !is_shared_processor(); +} + #endif /* _ASM_POWERPC_PARAVIRT_H */ diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h index c49e33e24edd..f5066f00a08c 100644 --- a/arch/powerpc/include/asm/qspinlock.h +++ b/arch/powerpc/include/asm/qspinlock.h @@ -3,9 +3,47 @@ #define _ASM_POWERPC_QSPINLOCK_H #include +#include #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */ +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); + +static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + if (!is_shared_processor()) + native_queued_spin_lock_slowpath(lock, val); + else + __pv_queued_spin_lock_slowpath(lock, val); +} + +#define queued_spin_unlock queued_spin_unlock +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + if (!is_shared_processor()) + smp_store_release(&lock->locked, 0); + else + __pv_queued_spin_unlock(lock); +} + +#else +extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +#endif + +static __always_inline void queued_spin_lock(struct qspinlock *lock) +{ + u32 val = 0; + + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) + return; + + queued_spin_lock_slowpath(lock, val); +} +#define queued_spin_lock queued_spin_lock + #define smp_mb__after_spinlock() smp_mb() static __always_inline int queued_spin_is_locked(struct qspinlock *lock) @@ -20,6 +58,34 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) } #define queued_spin_is_locked queued_spin_is_locked +#ifdef CONFIG_PARAVIRT_SPINLOCKS +#define SPIN_THRESHOLD (1<<15) /* not tuned */ + +static __always_inline void pv_wait(u8 *ptr, u8 val) +{ + if (*ptr != val) + return; + yield_to_any(); + /* + * We could pass in a CPU here if waiting in the queue and yield to + * the previous CPU in the queue. + */ +} + +static __always_inline void pv_kick(int cpu) +{ + prod_cpu(cpu); +} + +extern void __pv_init_lock_hash(void); + +static inline void pv_spinlocks_init(void) +{ + __pv_init_lock_hash(); +} + +#endif + #include #endif /* _ASM_POWERPC_QSPINLOCK_H */ diff --git a/arch/powerpc/include/asm/qspinlock_paravirt.h b/arch/powerpc/include/asm/qspinlock_paravirt.h new file mode 100644 index 000000000000..6b60e7736a47 --- /dev/null +++ b/arch/powerpc/include/asm/qspinlock_paravirt.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_QSPINLOCK_PARAVIRT_H +#define _ASM_POWERPC_QSPINLOCK_PARAVIRT_H + +EXPORT_SYMBOL(__pv_queued_spin_unlock); + +#endif /* _ASM_POWERPC_QSPINLOCK_PARAVIRT_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 434615f1d761..6ec72282888d 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -10,5 +10,9 @@ #include #endif +#ifndef CONFIG_PARAVIRT_SPINLOCKS +static inline void pv_spinlocks_init(void) { } +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 24c18362e5ea..5e037df2a3a1 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -25,15 +25,22 @@ config PPC_PSERIES select SWIOTLB default y +config PARAVIRT_SPINLOCKS + bool + config PPC_SPLPAR - depends on PPC_PSERIES bool "Support for shared-processor logical partitions" + depends on PPC_PSERIES + select PARAVIRT_SPINLOCKS if PPC_QUEUED_SPINLOCKS + default y help Enabling this option will make the kernel run more efficiently on logically-partitioned pSeries systems which use shared processors, that is, which share physical processors between two or more partitions. + Say Y if you are unsure. + config DTL bool "Dispatch Trace Log" depends on PPC_SPLPAR && DEBUG_FS diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ba34eb23e8f5..e29c9bf0a3b9 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -773,8 +773,10 @@ static void __init pSeries_setup_arch(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); - if (lppaca_shared_proc(get_lppaca())) + if (lppaca_shared_proc(get_lppaca())) { static_branch_enable(&shared_processor); + pv_spinlocks_init(); + } ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index fb0a814d4395..38ca14e79a86 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -69,6 +69,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock) extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +#ifndef queued_spin_lock /** * queued_spin_lock - acquire a queued spinlock * @lock: Pointer to queued spinlock structure @@ -82,6 +83,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) queued_spin_lock_slowpath(lock, val); } +#endif #ifndef queued_spin_unlock /** -- cgit v1.2.3 From 3135f5b73592988af0eb1b11ccbb72a8667be201 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 26 Jul 2020 18:14:43 +0200 Subject: entry: Correct __secure_computing() stub The original version of that used secure_computing() which has no arguments. Review requested to switch to __secure_computing() which has one. The function name was correct, but no argument added and of course compiling without SECCOMP was deemed overrated. Add the missing function argument. Fixes: 6823ecabf030 ("seccomp: Provide stub for __secure_computing()") Reported-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/linux/seccomp.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 03d28c32ad01..51f234b6d28f 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -58,10 +58,11 @@ static inline int seccomp_mode(struct seccomp *s) struct seccomp { }; struct seccomp_filter { }; +struct seccomp_data; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER static inline int secure_computing(void) { return 0; } -static inline int __secure_computing(void) { return 0; } +static inline int __secure_computing(const struct seccomp_data *sd) { return 0; } #else static inline void secure_computing_strict(int this_syscall) { return; } #endif -- cgit v1.2.3 From a68d5a502bbacfbd31f98371f777d574b3a91baf Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 24 Jul 2020 15:26:47 -0400 Subject: SUNRPC: Refresh the show_rqstp_flags() macro Ensure that show_rqstp_flags() can recognize and display the RQ_AUTHERR flag, added in commit 83dd59a0b9af ("SUNRPC/nfs: Fix return value for nfs4_callback_compound()") and the RQ_DATA flag, added in commit ff3ac5c3dc23 ("SUNRPC: Add a server side per-connection limit"). Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 6a12935b8b14..65d7dfbbc9cd 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1250,15 +1250,34 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class, DEFINE_SVCXDRBUF_EVENT(recvfrom); DEFINE_SVCXDRBUF_EVENT(sendto); +/* + * from include/linux/sunrpc/svc.h + */ +#define SVC_RQST_FLAG_LIST \ + svc_rqst_flag(SECURE) \ + svc_rqst_flag(LOCAL) \ + svc_rqst_flag(USEDEFERRAL) \ + svc_rqst_flag(DROPME) \ + svc_rqst_flag(SPLICE_OK) \ + svc_rqst_flag(VICTIM) \ + svc_rqst_flag(BUSY) \ + svc_rqst_flag(DATA) \ + svc_rqst_flag_end(AUTHERR) + +#undef svc_rqst_flag +#undef svc_rqst_flag_end +#define svc_rqst_flag(x) TRACE_DEFINE_ENUM(RQ_##x); +#define svc_rqst_flag_end(x) TRACE_DEFINE_ENUM(RQ_##x); + +SVC_RQST_FLAG_LIST + +#undef svc_rqst_flag +#undef svc_rqst_flag_end +#define svc_rqst_flag(x) { BIT(RQ_##x), #x }, +#define svc_rqst_flag_end(x) { BIT(RQ_##x), #x } + #define show_rqstp_flags(flags) \ - __print_flags(flags, "|", \ - { (1UL << RQ_SECURE), "RQ_SECURE"}, \ - { (1UL << RQ_LOCAL), "RQ_LOCAL"}, \ - { (1UL << RQ_USEDEFERRAL), "RQ_USEDEFERRAL"}, \ - { (1UL << RQ_DROPME), "RQ_DROPME"}, \ - { (1UL << RQ_SPLICE_OK), "RQ_SPLICE_OK"}, \ - { (1UL << RQ_VICTIM), "RQ_VICTIM"}, \ - { (1UL << RQ_BUSY), "RQ_BUSY"}) + __print_flags(flags, "|", SVC_RQST_FLAG_LIST) TRACE_EVENT(svc_recv, TP_PROTO(struct svc_rqst *rqst, int len), -- cgit v1.2.3 From ab92ffd5f6ac3ebd4a7650ef906702ab86127b45 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 18:30:01 -0700 Subject: power: fix duplicated words in bq2415x_charger.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop the doubled word "for". Change "It it" to "If it". Signed-off-by: Randy Dunlap Cc: linux-pm@vger.kernel.org Acked-by: Pali Rohár Signed-off-by: Sebastian Reichel --- include/linux/power/bq2415x_charger.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h index 4ca08321e251..f3c267f2a467 100644 --- a/include/linux/power/bq2415x_charger.h +++ b/include/linux/power/bq2415x_charger.h @@ -14,8 +14,8 @@ * value is -1 then default chip value (specified in datasheet) will be * used. * - * Value resistor_sense is needed for for configuring charge and - * termination current. It it is less or equal to zero, configuring charge + * Value resistor_sense is needed for configuring charge and + * termination current. If it is less or equal to zero, configuring charge * and termination current will not be possible. * * For automode support is needed to provide name of power supply device -- cgit v1.2.3 From a070bdbbb06d7787ec7844a4f1e059cf8b55205d Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Sun, 26 Jul 2020 01:23:37 +0200 Subject: gpio: regmap: fix type clash GPIO_REGMAP_ADDR_ZERO() cast to unsigned long but the actual config parameters are unsigned int. We use unsigned int here because that is the type which is used by the underlying regmap. Fixes: ebe363197e52 ("gpio: add a reusable generic gpio_chip using regmap") Reported-by: kernel test robot Signed-off-by: Michael Walle Link: https://lore.kernel.org/r/20200725232337.27581-1-michael@walle.cc Signed-off-by: Linus Walleij --- include/linux/gpio/regmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h index 4c1e6b34e824..ad76f3d0a6ba 100644 --- a/include/linux/gpio/regmap.h +++ b/include/linux/gpio/regmap.h @@ -8,7 +8,7 @@ struct gpio_regmap; struct irq_domain; struct regmap; -#define GPIO_REGMAP_ADDR_ZERO ((unsigned long)(-1)) +#define GPIO_REGMAP_ADDR_ZERO ((unsigned int)(-1)) #define GPIO_REGMAP_ADDR(addr) ((addr) ? : GPIO_REGMAP_ADDR_ZERO) /** -- cgit v1.2.3 From 6d4c4479f80141a2a24ac798a86942b1225206df Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Tue, 30 Jun 2020 21:41:26 +0800 Subject: irqchip/gic-v3: Remove unused register definition [maz: The GICv3 spec has evolved quite a bit since the draft the Linux driver was written against, and some register definitions are simply gone] As per the GICv3 specification, GIC{D,R}_SEIR are not assigned and the locations (0x0068) are actually Reserved. GICR_MOV{LPI,ALL}R are two IMP DEF registers and might be defined by some specific micro-architecture. As they're not used anywhere in the kernel, just drop all of them. Signed-off-by: Zenghui Yu [maz: added context explaination] Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200630134126.880-1-yuzenghui@huawei.com --- include/linux/irqchip/arm-gic-v3.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 6c36b6cc3edf..f6d092fdb93d 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -19,7 +19,6 @@ #define GICD_CLRSPI_NSR 0x0048 #define GICD_SETSPI_SR 0x0050 #define GICD_CLRSPI_SR 0x0058 -#define GICD_SEIR 0x0068 #define GICD_IGROUPR 0x0080 #define GICD_ISENABLER 0x0100 #define GICD_ICENABLER 0x0180 @@ -119,14 +118,11 @@ #define GICR_WAKER 0x0014 #define GICR_SETLPIR 0x0040 #define GICR_CLRLPIR 0x0048 -#define GICR_SEIR GICD_SEIR #define GICR_PROPBASER 0x0070 #define GICR_PENDBASER 0x0078 #define GICR_INVLPIR 0x00A0 #define GICR_INVALLR 0x00B0 #define GICR_SYNCR 0x00C0 -#define GICR_MOVLPIR 0x0100 -#define GICR_MOVALLR 0x0110 #define GICR_IDREGS GICD_IDREGS #define GICR_PIDR2 GICD_PIDR2 -- cgit v1.2.3 From b7640d765dbbde794c49198c9851f6026fb6e43e Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:28:53 -0700 Subject: irqchip: irq-bcm2836.h: drop a duplicated word Drop the repeated word "the" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Marc Zyngier Cc: Thomas Gleixner Cc: Jason Cooper Cc: Marc Zyngier Link: https://lore.kernel.org/r/20200719002853.20419-1-rdunlap@infradead.org --- include/linux/irqchip/irq-bcm2836.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/irqchip/irq-bcm2836.h b/include/linux/irqchip/irq-bcm2836.h index f224d6f9e550..ac5719d8f56b 100644 --- a/include/linux/irqchip/irq-bcm2836.h +++ b/include/linux/irqchip/irq-bcm2836.h @@ -30,7 +30,7 @@ */ #define LOCAL_MAILBOX_INT_CONTROL0 0x050 /* - * The CPU's interrupt status register. Bits are defined by the the + * The CPU's interrupt status register. Bits are defined by the * LOCAL_IRQ_* bits below. */ #define LOCAL_IRQ_PENDING0 0x060 -- cgit v1.2.3 From f8410e626569324cfe831aaecc0504cafc12b471 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Fri, 17 Jul 2020 17:06:34 -0700 Subject: irqchip: Add IRQCHIP_PLATFORM_DRIVER_BEGIN/END and IRQCHIP_MATCH helper macros Compiling an irqchip driver as a platform driver needs to bunch of things to be done right: - Making sure the parent domain is initialized first - Making sure the device can't be unbound from sysfs - Disallowing module unload if it's built as a module - Finding the parent node - Etc. Instead of trying to make sure all future irqchip platform drivers get this right, provide boilerplate macros that take care of all of this. An example use would look something like this. Where acme_foo_init and acme_bar_init are similar to what would be passed to IRQCHIP_DECLARE. IRQCHIP_PLATFORM_DRIVER_BEGIN(acme_irq) IRQCHIP_MATCH("acme,foo", acme_foo_init) IRQCHIP_MATCH("acme,bar", acme_bar_init) IRQCHIP_PLATFORM_DRIVER_END(acme_irq) Signed-off-by: Saravana Kannan Signed-off-by: Marc Zyngier Cc: John Stultz Link: https://lore.kernel.org/r/20200718000637.3632841-2-saravanak@google.com --- drivers/irqchip/irqchip.c | 29 +++++++++++++++++++++++++++++ include/linux/irqchip.h | 23 +++++++++++++++++++++++ 2 files changed, 52 insertions(+) (limited to 'include') diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c index 2b35e68bea82..1bb0e36c2bf3 100644 --- a/drivers/irqchip/irqchip.c +++ b/drivers/irqchip/irqchip.c @@ -10,8 +10,10 @@ #include #include +#include #include #include +#include /* * This special of_device_id is the sentinel at the end of the @@ -29,3 +31,30 @@ void __init irqchip_init(void) of_irq_init(__irqchip_of_table); acpi_probe_device_table(irqchip); } + +int platform_irqchip_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device_node *par_np = of_irq_find_parent(np); + of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev); + + if (!irq_init_cb) + return -EINVAL; + + if (par_np == np) + par_np = NULL; + + /* + * If there's a parent interrupt controller and none of the parent irq + * domains have been registered, that means the parent interrupt + * controller has not been initialized yet. it's not time for this + * interrupt controller to initialize. So, defer probe of this + * interrupt controller. The actual initialization callback of this + * interrupt controller can check for specific domains as necessary. + */ + if (par_np && !irq_find_matching_host(np, DOMAIN_BUS_ANY)) + return -EPROBE_DEFER; + + return irq_init_cb(np, par_np); +} +EXPORT_SYMBOL_GPL(platform_irqchip_probe); diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 447f22880a69..8e754d8b8155 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -13,6 +13,7 @@ #include #include +#include /* * This macro must be used by the different irqchip drivers to declare @@ -26,6 +27,28 @@ */ #define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) +extern int platform_irqchip_probe(struct platform_device *pdev); + +#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \ +static const struct of_device_id drv_name##_irqchip_match_table[] = { + +#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, .data = fn }, + +#define IRQCHIP_PLATFORM_DRIVER_END(drv_name) \ + {}, \ +}; \ +MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \ +static struct platform_driver drv_name##_driver = { \ + .probe = platform_irqchip_probe, \ + .driver = { \ + .name = #drv_name, \ + .owner = THIS_MODULE, \ + .of_match_table = drv_name##_irqchip_match_table, \ + .suppress_bind_attrs = true, \ + }, \ +}; \ +builtin_platform_driver(drv_name##_driver) + /* * This macro must be used by the different irqchip drivers to declare * the association between their version and their initialization function. -- cgit v1.2.3 From 762a21fd45e056e9ccd2ef5787b4ee0c5af9bec8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 26 Jul 2020 11:12:36 +0100 Subject: irqchip: Fix IRQCHIP_PLATFORM_DRIVER_* compilation by including module.h The newly introduced IRQCHIP_PLATFORM_DRIVER_* macros expand into module-related macros, but do so without including module.h. Depending on the driver and/or architecture, this happens to work, or not. Unconditionnaly include linux/module.h to sort it out. Fixes: f3b5e608ed6d ("irqchip: Add IRQCHIP_PLATFORM_DRIVER_BEGIN/END and IRQCHIP_MATCH helper macros") Reported-by: kernel test robot Cc: Saravana Kannan Signed-off-by: Marc Zyngier --- include/linux/irqchip.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 8e754d8b8155..67351aac65ef 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -12,6 +12,7 @@ #define _LINUX_IRQCHIP_H #include +#include #include #include -- cgit v1.2.3 From b4a461e72bcb28a512bbdd29a4cb70aede2d68d3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Jul 2020 16:22:48 +1000 Subject: printk: Make linux/printk.h self-contained As it stands if you include printk.h by itself it will fail to compile because it requires definitions from ratelimit.h. However, simply including ratelimit.h from printk.h does not work due to inclusion loops involving sched.h and kernel.h. This patch solves this by moving bits from ratelimit.h into a new header file which can then be included by printk.h without any worries about header loops. The build bot then revealed some intriguing failures arising out of this patch. On s390 there is an inclusion loop with asm/bug.h and linux/kernel.h that triggers a compile failure, because kernel.h will cause asm-generic/bug.h to be included before s390's own asm/bug.h has finished processing. This has been fixed by not including kernel.h in arch/s390/include/asm/bug.h. Signed-off-by: Herbert Xu Reviewed-by: Andy Shevchenko Reviewed-by: Sergey Senozhatsky Acked-by: Petr Mladek Acked-by: Steven Rostedt (VMware) Signed-off-by: Sergey Senozhatsky Link: https://lore.kernel.org/r/20200721062248.GA18383@gondor.apana.org.au --- arch/s390/include/asm/bug.h | 2 +- include/linux/printk.h | 1 + include/linux/ratelimit.h | 36 +--------------------------------- include/linux/ratelimit_types.h | 43 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 46 insertions(+), 36 deletions(-) create mode 100644 include/linux/ratelimit_types.h (limited to 'include') diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 7725f8006fdf..0b25f28351ed 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -2,7 +2,7 @@ #ifndef _ASM_S390_BUG_H #define _ASM_S390_BUG_H -#include +#include #ifdef CONFIG_BUG diff --git a/include/linux/printk.h b/include/linux/printk.h index fc8f03c54543..34c1a7be3e01 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -7,6 +7,7 @@ #include #include #include +#include extern const char linux_banner[]; extern const char linux_proc_banner[]; diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 8ddf79e9207a..b17e0cd0a30c 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -2,41 +2,10 @@ #ifndef _LINUX_RATELIMIT_H #define _LINUX_RATELIMIT_H -#include +#include #include #include -#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) -#define DEFAULT_RATELIMIT_BURST 10 - -/* issue num suppressed message on exit */ -#define RATELIMIT_MSG_ON_RELEASE BIT(0) - -struct ratelimit_state { - raw_spinlock_t lock; /* protect the state */ - - int interval; - int burst; - int printed; - int missed; - unsigned long begin; - unsigned long flags; -}; - -#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ - .interval = interval_init, \ - .burst = burst_init, \ - } - -#define RATELIMIT_STATE_INIT_DISABLED \ - RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) - -#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ - \ - struct ratelimit_state name = \ - RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ - static inline void ratelimit_state_init(struct ratelimit_state *rs, int interval, int burst) { @@ -73,9 +42,6 @@ ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags) extern struct ratelimit_state printk_ratelimit_state; -extern int ___ratelimit(struct ratelimit_state *rs, const char *func); -#define __ratelimit(state) ___ratelimit(state, __func__) - #ifdef CONFIG_PRINTK #define WARN_ON_RATELIMIT(condition, state) ({ \ diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h new file mode 100644 index 000000000000..b676aa419eef --- /dev/null +++ b/include/linux/ratelimit_types.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RATELIMIT_TYPES_H +#define _LINUX_RATELIMIT_TYPES_H + +#include +#include +#include + +#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) +#define DEFAULT_RATELIMIT_BURST 10 + +/* issue num suppressed message on exit */ +#define RATELIMIT_MSG_ON_RELEASE BIT(0) + +struct ratelimit_state { + raw_spinlock_t lock; /* protect the state */ + + int interval; + int burst; + int printed; + int missed; + unsigned long begin; + unsigned long flags; +}; + +#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .interval = interval_init, \ + .burst = burst_init, \ + } + +#define RATELIMIT_STATE_INIT_DISABLED \ + RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) + +#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ + \ + struct ratelimit_state name = \ + RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ + +extern int ___ratelimit(struct ratelimit_state *rs, const char *func); +#define __ratelimit(state) ___ratelimit(state, __func__) + +#endif /* _LINUX_RATELIMIT_TYPES_H */ -- cgit v1.2.3 From d97758e048e5fe91c7d8ff9ce5f030ee88d92161 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 23 Jul 2020 03:58:41 +0300 Subject: dmaengine: Introduce min burst length capability Some hardware aside from default 0/1 may have greater minimum burst transactions length constraints. Here we introduce the DMA device and slave capability, which if required can be initialized by the DMA engine driver with the device-specific value. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200723005848.31907-4-Sergey.Semin@baikalelectronics.ru Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 1 + include/linux/dmaengine.h | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'include') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2b06a7a8629d..2f1a7c0c5446 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -592,6 +592,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->src_addr_widths = device->src_addr_widths; caps->dst_addr_widths = device->dst_addr_widths; caps->directions = device->directions; + caps->min_burst = device->min_burst; caps->max_burst = device->max_burst; caps->residue_granularity = device->residue_granularity; caps->descriptor_reuse = device->descriptor_reuse; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 883e1e087de5..7d6e2aa26980 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -467,6 +467,7 @@ enum dma_residue_granularity { * Since the enum dma_transfer_direction is not defined as bit flag for * each type, the dma controller should set BIT() and same * should be checked by controller as well + * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer * @cmd_pause: true, if pause is supported (i.e. for reading residue or * for resume later) @@ -480,6 +481,7 @@ struct dma_slave_caps { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + u32 min_burst; u32 max_burst; bool cmd_pause; bool cmd_resume; @@ -771,6 +773,7 @@ struct dma_filter { * Since the enum dma_transfer_direction is not defined as bit flag for * each type, the dma controller should set BIT() and same * should be checked by controller as well + * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer * @residue_granularity: granularity of the transfer residue reported * by tx_status @@ -841,6 +844,7 @@ struct dma_device { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + u32 min_burst; u32 max_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; -- cgit v1.2.3 From b1b40b8fe7e8fb26e33bad1766ce322d2c63a6c7 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 23 Jul 2020 03:58:42 +0300 Subject: dmaengine: Introduce max SG burst capability Some devices may lack the support of the hardware accelerated SG list entries automatic walking through and execution. In this case a burden of the SG list traversal and DMA engine re-initialization lies on the DMA engine driver (normally implemented by using a DMA transfer completion IRQ to recharge the DMA device with a next SG list entry). But such solution may not be suitable for some DMA consumers. In particular SPI devices need both Tx and Rx DMA channels work synchronously in order to avoid the Rx FIFO overflow. In case if Rx DMA channel is paused for some time while the Tx DMA channel works implicitly pulling data into the Rx FIFO, the later will be eventually overflown, which will cause the data loss. So if SG list entries aren't automatically fetched by the DMA engine, but are one-by-one manually selected for execution in the ISRs/deferred work/etc., such problem will eventually happen due to the non-deterministic latencies of the service execution. In order to let the DMA consumer know about the DMA device capabilities regarding the hardware accelerated SG list traversal we introduce the max_sg_burst capability. It is supposed to be initialized by the DMA engine driver with 0 if there is no limitation of the number of SG entries atomically executed and with non-zero value if there is such constraints, so the upper limit is determined by the number set to the property. Suggested-by: Andy Shevchenko Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200723005848.31907-5-Sergey.Semin@baikalelectronics.ru Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 1 + include/linux/dmaengine.h | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'include') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2f1a7c0c5446..8177f78faeda 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -594,6 +594,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->directions = device->directions; caps->min_burst = device->min_burst; caps->max_burst = device->max_burst; + caps->max_sg_burst = device->max_sg_burst; caps->residue_granularity = device->residue_granularity; caps->descriptor_reuse = device->descriptor_reuse; caps->cmd_pause = !!device->device_pause; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 7d6e2aa26980..4cbe09e66db2 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -469,6 +469,9 @@ enum dma_residue_granularity { * should be checked by controller as well * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer + * @max_sg_burst: max number of SG list entries executed in a single burst + * DMA tansaction with no software intervention for reinitialization. + * Zero value means unlimited number of entries. * @cmd_pause: true, if pause is supported (i.e. for reading residue or * for resume later) * @cmd_resume: true, if resume is supported @@ -483,6 +486,7 @@ struct dma_slave_caps { u32 directions; u32 min_burst; u32 max_burst; + u32 max_sg_burst; bool cmd_pause; bool cmd_resume; bool cmd_terminate; @@ -775,6 +779,9 @@ struct dma_filter { * should be checked by controller as well * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer + * @max_sg_burst: max number of SG list entries executed in a single burst + * DMA tansaction with no software intervention for reinitialization. + * Zero value means unlimited number of entries. * @residue_granularity: granularity of the transfer residue reported * by tx_status * @device_alloc_chan_resources: allocate resources and return the @@ -846,6 +853,7 @@ struct dma_device { u32 directions; u32 min_burst; u32 max_burst; + u32 max_sg_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; -- cgit v1.2.3 From 3b6d694eb3eebd86ec44a119e730943ac8e03a6b Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 23 Jul 2020 03:58:43 +0300 Subject: dmaengine: Introduce DMA-device device_caps callback There are DMA devices (like ours version of Synopsys DW DMAC) which have DMA capabilities non-uniformly redistributed between the device channels. In order to provide a way of exposing the channel-specific parameters to the DMA engine consumers, we introduce a new DMA-device callback. In case if provided it gets called from the dma_get_slave_caps() method and is able to override the generic DMA-device capabilities. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200723005848.31907-6-Sergey.Semin@baikalelectronics.ru Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 10 ++++++++++ include/linux/dmaengine.h | 4 ++++ 2 files changed, 14 insertions(+) (limited to 'include') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8177f78faeda..a53e71d2bbd4 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -601,6 +601,16 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->cmd_resume = !!device->device_resume; caps->cmd_terminate = !!device->device_terminate_all; + /* + * DMA engine device might be configured with non-uniformly + * distributed slave capabilities per device channels. In this + * case the corresponding driver may provide the device_caps + * callback to override the generic capabilities with + * channel-specific ones. + */ + if (device->device_caps) + device->device_caps(chan, caps); + return 0; } EXPORT_SYMBOL_GPL(dma_get_slave_caps); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 4cbe09e66db2..d718671bfd25 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -801,6 +801,8 @@ struct dma_filter { * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address + * @device_caps: May be used to override the generic DMA slave capabilities + * with per-channel specific ones * @device_config: Pushes a new configuration to a channel, return 0 or an error * code * @device_pause: Pauses any transfer happening on a channel. Returns @@ -901,6 +903,8 @@ struct dma_device { struct dma_chan *chan, dma_addr_t dst, u64 data, unsigned long flags); + void (*device_caps)(struct dma_chan *chan, + struct dma_slave_caps *caps); int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); int (*device_pause)(struct dma_chan *chan); -- cgit v1.2.3 From 585d35451e94b2e1b0bf59ef55d3b4a1c8ab3d77 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 23 Jul 2020 03:58:46 +0300 Subject: dmaengine: dw: Initialize min and max burst DMA device capability According to the DW APB DMAC data book the minimum burst transaction length is 1 and it's true for any version of the controller since isn't parametrised in the coreAssembler so can't be changed at the IP-core synthesis stage. The maximum burst transaction can vary from channel to channel and from controller to controller depending on a IP-core parameter the system engineer activated during the IP-core synthesis. Let's initialise both min_burst and max_burst members of the DMA controller descriptor with extreme values so the DMA clients could use them to properly optimize the DMA requests. The channels and controller-specific max_burst length initialization will be introduced by the follow-up patches. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200723005848.31907-9-Sergey.Semin@baikalelectronics.ru Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 2 ++ include/linux/platform_data/dma-dw.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index fb95920c429e..afe5a2e465af 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1223,6 +1223,8 @@ int do_dma_probe(struct dw_dma_chip *chip) dw->dma.device_issue_pending = dwc_issue_pending; /* DMA capabilities */ + dw->dma.min_burst = DW_DMA_MIN_BURST; + dw->dma.max_burst = DW_DMA_MAX_BURST; dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index f3eaf9ec00a1..369e41e9dcc9 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -12,6 +12,8 @@ #define DW_DMA_MAX_NR_MASTERS 4 #define DW_DMA_MAX_NR_CHANNELS 8 +#define DW_DMA_MIN_BURST 1 +#define DW_DMA_MAX_BURST 256 /** * struct dw_dma_slave - Controller-specific information about a slave -- cgit v1.2.3 From ca7f2851712e7072e8f327882dc4bdaaae3a8079 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 23 Jul 2020 03:58:47 +0300 Subject: dmaengine: dw: Introduce max burst length hw config IP core of the DW DMA controller may be synthesized with different max burst length of the transfers per each channel. According to Synopsis having the fixed maximum burst transactions length may provide some performance gain. At the same time setting up the source and destination multi size exceeding the max burst length limitation may cause a serious problems. In our case the DMA transaction just hangs up. In order to fix this lets introduce the max burst length platform config of the DW DMA controller device and don't let the DMA channels configuration code exceed the burst length hardware limitation. Note the maximum burst length parameter can be detected either in runtime from the DWC parameter registers or from the dedicated DT property. Depending on the IP core configuration the maximum value can vary from channel to channel so by overriding the channel slave max_burst capability we make sure a DMA consumer will get the channel-specific max burst length. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200723005848.31907-10-Sergey.Semin@baikalelectronics.ru Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 16 ++++++++++++++++ drivers/dma/dw/of.c | 5 +++++ drivers/dma/dw/regs.h | 2 ++ include/linux/platform_data/dma-dw.h | 3 +++ 4 files changed, 26 insertions(+) (limited to 'include') diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index afe5a2e465af..588b9bae827c 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -791,6 +791,11 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); + dwc->dma_sconfig.src_maxburst = + clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst); + dwc->dma_sconfig.dst_maxburst = + clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst); + dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst); dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst); @@ -1049,6 +1054,13 @@ static void dwc_free_chan_resources(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "%s: done\n", __func__); } +static void dwc_caps(struct dma_chan *chan, struct dma_slave_caps *caps) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + + caps->max_burst = dwc->max_burst; +} + int do_dma_probe(struct dw_dma_chip *chip) { struct dw_dma *dw = chip->dw; @@ -1189,9 +1201,12 @@ int do_dma_probe(struct dw_dma_chip *chip) dwc->nollp = (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 || (dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1; + dwc->max_burst = + (0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7)); } else { dwc->block_size = pdata->block_size; dwc->nollp = !pdata->multi_block[i]; + dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST; } } @@ -1214,6 +1229,7 @@ int do_dma_probe(struct dw_dma_chip *chip) dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; + dw->dma.device_caps = dwc_caps; dw->dma.device_config = dwc_config; dw->dma.device_pause = dwc_pause; dw->dma.device_resume = dwc_resume; diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c index 9e27831dee32..1474b3817ef4 100644 --- a/drivers/dma/dw/of.c +++ b/drivers/dma/dw/of.c @@ -98,6 +98,11 @@ struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev) pdata->multi_block[tmp] = 1; } + if (of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst, + nr_channels)) { + memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels); + } + if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) { if (tmp > CHAN_PROTCTL_MASK) return NULL; diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 1ab840b06e79..76654bd13c1a 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -126,6 +126,7 @@ struct dw_dma_regs { /* Bitfields in DWC_PARAMS */ #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ #define DWC_PARAMS_HC_LLP 13 /* set LLP register to zero */ +#define DWC_PARAMS_MSIZE 16 /* max group transaction size */ /* bursts size */ enum dw_dma_msize { @@ -284,6 +285,7 @@ struct dw_dma_chan { /* hardware configuration */ unsigned int block_size; bool nollp; + u32 max_burst; /* custom slave configuration */ struct dw_dma_slave dws; diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 369e41e9dcc9..4f681df85c27 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -44,6 +44,8 @@ struct dw_dma_slave { * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) * @multi_block: Multi block transfers supported by hardware per channel. + * @max_burst: Maximum value of burst transaction size supported by hardware + * per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH). * @protctl: Protection control signals setting per channel. */ struct dw_dma_platform_data { @@ -58,6 +60,7 @@ struct dw_dma_platform_data { unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; + u32 max_burst[DW_DMA_MAX_NR_CHANNELS]; #define CHAN_PROTCTL_PRIVILEGED BIT(0) #define CHAN_PROTCTL_BUFFERABLE BIT(1) #define CHAN_PROTCTL_CACHEABLE BIT(2) -- cgit v1.2.3 From 1a0c02ba643ed05c07ddf14d87c3bec640666836 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 20 Jul 2020 08:50:58 -0700 Subject: dmaengine: idxd: add missing invalid flags field to completion Add missing "invalid flags" field to completion record struct. Reported-by: Nikhil Rao Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/159526025819.49266.13176787210106133664.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- include/uapi/linux/idxd.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 1f412fbf561b..5d07a3ff9e96 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -178,6 +178,12 @@ struct dsa_completion_record { uint32_t bytes_completed; uint64_t fault_addr; union { + /* common record */ + struct { + uint32_t invalid_flags:24; + uint32_t rsvd2:8; + }; + uint16_t delta_rec_size; uint16_t crc_val; -- cgit v1.2.3 From 6bd0dffa1a6e19e73964ae47c964f57c625cce05 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 21 Jul 2020 16:08:44 +0300 Subject: dmaengine: dw: Don't include unneeded header to platform data header Including device.h is too much for the dma-dw.h platform data header. Replace it with the headers of which dma-dw.h is direct user. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200721130844.64162-1-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/platform_data/dma-dw.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 4f681df85c27..fbbeb2f6189b 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -8,13 +8,16 @@ #ifndef _PLATFORM_DATA_DMA_DW_H #define _PLATFORM_DATA_DMA_DW_H -#include +#include +#include #define DW_DMA_MAX_NR_MASTERS 4 #define DW_DMA_MAX_NR_CHANNELS 8 #define DW_DMA_MIN_BURST 1 #define DW_DMA_MAX_BURST 256 +struct device; + /** * struct dw_dma_slave - Controller-specific information about a slave * -- cgit v1.2.3 From 280c7f95f858b103e62d84cae2d5ed9f5cf54d41 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 27 Jul 2020 12:14:28 +0200 Subject: Revert "test_firmware: Test platform fw loading on non-EFI systems" This reverts commit 2d38dbf89a06d0f689daec9842c5d3295c49777f as it broke the build in linux-next Reported-by: Stephen Rothwell Fixes: 2d38dbf89a06 ("test_firmware: Test platform fw loading on non-EFI systems") Cc: stable@vger.kernel.org Cc: Scott Branden Cc: Kees Cook Link: https://lore.kernel.org/r/20200727165539.0e8797ab@canb.auug.org.au Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/efi/embedded-firmware.c | 21 +++++---------------- drivers/firmware/efi/embedded-firmware.h | 19 ------------------- include/linux/efi_embedded_fw.h | 13 +++++++++++++ lib/test_firmware.c | 5 ----- 4 files changed, 18 insertions(+), 40 deletions(-) delete mode 100644 drivers/firmware/efi/embedded-firmware.h (limited to 'include') diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c index 0fb03cd0a5a2..a1b199de9006 100644 --- a/drivers/firmware/efi/embedded-firmware.c +++ b/drivers/firmware/efi/embedded-firmware.c @@ -14,22 +14,11 @@ #include #include -#include "embedded-firmware.h" - -#ifdef CONFIG_TEST_FIRMWARE -# define EFI_EMBEDDED_FW_VISIBILITY -#else -# define EFI_EMBEDDED_FW_VISIBILITY static -#endif - -EFI_EMBEDDED_FW_VISIBILITY LIST_HEAD(efi_embedded_fw_list); -EFI_EMBEDDED_FW_VISIBILITY bool efi_embedded_fw_checked; - /* Exported for use by lib/test_firmware.c only */ -#ifdef CONFIG_TEST_FIRMWARE +LIST_HEAD(efi_embedded_fw_list); EXPORT_SYMBOL_GPL(efi_embedded_fw_list); -EXPORT_SYMBOL_GPL(efi_embedded_fw_checked); -#endif + +static bool checked_for_fw; static const struct dmi_system_id * const embedded_fw_table[] = { #ifdef CONFIG_TOUCHSCREEN_DMI @@ -130,14 +119,14 @@ void __init efi_check_for_embedded_firmwares(void) } } - efi_embedded_fw_checked = true; + checked_for_fw = true; } int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size) { struct efi_embedded_fw *iter, *fw = NULL; - if (!efi_embedded_fw_checked) { + if (!checked_for_fw) { pr_warn("Warning %s called while we did not check for embedded fw\n", __func__); return -ENOENT; diff --git a/drivers/firmware/efi/embedded-firmware.h b/drivers/firmware/efi/embedded-firmware.h deleted file mode 100644 index 34113316d068..000000000000 --- a/drivers/firmware/efi/embedded-firmware.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _EFI_EMBEDDED_FW_INTERNAL_H_ -#define _EFI_EMBEDDED_FW_INTERNAL_H_ - -/* - * This struct and efi_embedded_fw_list are private to the efi-embedded fw - * implementation they only in separate header for use by lib/test_firmware.c. - */ -struct efi_embedded_fw { - struct list_head list; - const char *name; - const u8 *data; - size_t length; -}; - -extern struct list_head efi_embedded_fw_list; -extern bool efi_embedded_fw_checked; - -#endif /* _EFI_EMBEDDED_FW_INTERNAL_H_ */ diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h index 4ad5db9f5312..57eac5241303 100644 --- a/include/linux/efi_embedded_fw.h +++ b/include/linux/efi_embedded_fw.h @@ -7,6 +7,19 @@ #define EFI_EMBEDDED_FW_PREFIX_LEN 8 +/* + * This struct and efi_embedded_fw_list are private to the efi-embedded fw + * implementation they are in this header for use by lib/test_firmware.c only! + */ +struct efi_embedded_fw { + struct list_head list; + const char *name; + const u8 *data; + size_t length; +}; + +extern struct list_head efi_embedded_fw_list; + /** * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw * code to search for embedded firmwares. diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 62af792e151c..9fee2b93a8d1 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -489,7 +489,6 @@ out: static DEVICE_ATTR_WO(trigger_request); #ifdef CONFIG_EFI_EMBEDDED_FIRMWARE -#include "../drivers/firmware/efi/embedded-firmware.h" static ssize_t trigger_request_platform_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -502,7 +501,6 @@ static ssize_t trigger_request_platform_store(struct device *dev, }; struct efi_embedded_fw efi_embedded_fw; const struct firmware *firmware = NULL; - bool saved_efi_embedded_fw_checked; char *name; int rc; @@ -515,8 +513,6 @@ static ssize_t trigger_request_platform_store(struct device *dev, efi_embedded_fw.data = (void *)test_data; efi_embedded_fw.length = sizeof(test_data); list_add(&efi_embedded_fw.list, &efi_embedded_fw_list); - saved_efi_embedded_fw_checked = efi_embedded_fw_checked; - efi_embedded_fw_checked = true; pr_info("loading '%s'\n", name); rc = firmware_request_platform(&firmware, name, dev); @@ -534,7 +530,6 @@ static ssize_t trigger_request_platform_store(struct device *dev, rc = count; out: - efi_embedded_fw_checked = saved_efi_embedded_fw_checked; release_firmware(firmware); list_del(&efi_embedded_fw.list); kfree(name); -- cgit v1.2.3 From b8fcd0e588fc256bed3d65a4e23017c5582ecf48 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 30 Jun 2020 13:40:59 +0200 Subject: ACPICA: Preserve memory opregion mappings The ACPICA's strategy with respect to the handling of memory mappings associated with memory operation regions is to avoid mapping the entire region at once which may be problematic at least in principle (for example, it may lead to conflicts with overlapping mappings having different attributes created by drivers). It may also be wasteful, because memory opregions on some systems take up vast chunks of address space while the fields in those regions actually accessed by AML are sparsely distributed. For this reason, a one-page "window" is mapped for a given opregion on the first memory access through it and if that "window" does not cover an address range accessed through that opregion subsequently, it is unmapped and a new "window" is mapped to replace it. Next, if the new "window" is not sufficient to acess memory through the opregion in question in the future, it will be replaced with yet another "window" and so on. That may lead to a suboptimal sequence of memory mapping and unmapping operations, for example if two fields in one opregion separated from each other by a sufficiently wide chunk of unused address space are accessed in an alternating pattern. The situation may still be suboptimal if the deferred unmapping introduced previously is supported by the OS layer. For instance, the alternating memory access pattern mentioned above may produce a relatively long list of mappings to release with substantial duplication among the entries in it, which could be avoided if acpi_ex_system_memory_space_handler() did not release the mapping used by it previously as soon as the current access was not covered by it. In order to improve that, modify acpi_ex_system_memory_space_handler() to preserve all of the memory mappings created by it until the memory regions associated with them go away. Accordingly, update acpi_ev_system_memory_region_setup() to unmap all memory associated with memory opregions that go away. Reported-by: Dan Williams Tested-by: Xiang Li Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/evrgnini.c | 14 +++++---- drivers/acpi/acpica/exregion.c | 64 ++++++++++++++++++++++++++++++------------ include/acpi/actypes.h | 12 ++++++-- 3 files changed, 63 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index aefc0145e583..89be3ccdad53 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -38,6 +38,7 @@ acpi_ev_system_memory_region_setup(acpi_handle handle, union acpi_operand_object *region_desc = (union acpi_operand_object *)handle; struct acpi_mem_space_context *local_region_context; + struct acpi_mem_mapping *mm; ACPI_FUNCTION_TRACE(ev_system_memory_region_setup); @@ -46,13 +47,14 @@ acpi_ev_system_memory_region_setup(acpi_handle handle, local_region_context = (struct acpi_mem_space_context *)*region_context; - /* Delete a cached mapping if present */ + /* Delete memory mappings if present */ - if (local_region_context->mapped_length) { - acpi_os_unmap_memory(local_region_context-> - mapped_logical_address, - local_region_context-> - mapped_length); + while (local_region_context->first_mm) { + mm = local_region_context->first_mm; + local_region_context->first_mm = mm->next_mm; + acpi_os_unmap_memory(mm->logical_address, + mm->length); + ACPI_FREE(mm); } ACPI_FREE(local_region_context); *region_context = NULL; diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index d15a66de26c0..4914dbc44517 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -41,6 +41,7 @@ acpi_ex_system_memory_space_handler(u32 function, acpi_status status = AE_OK; void *logical_addr_ptr = NULL; struct acpi_mem_space_context *mem_info = region_context; + struct acpi_mem_mapping *mm = mem_info->cur_mm; u32 length; acpi_size map_length; acpi_size page_boundary_map_length; @@ -96,20 +97,37 @@ acpi_ex_system_memory_space_handler(u32 function, * Is 1) Address below the current mapping? OR * 2) Address beyond the current mapping? */ - if ((address < mem_info->mapped_physical_address) || - (((u64) address + length) > ((u64) - mem_info->mapped_physical_address + - mem_info->mapped_length))) { + if (!mm || (address < mm->physical_address) || + ((u64) address + length > (u64) mm->physical_address + mm->length)) { /* - * The request cannot be resolved by the current memory mapping; - * Delete the existing mapping and create a new one. + * The request cannot be resolved by the current memory mapping. + * + * Look for an existing saved mapping covering the address range + * at hand. If found, save it as the current one and carry out + * the access. */ - if (mem_info->mapped_length) { + for (mm = mem_info->first_mm; mm; mm = mm->next_mm) { + if (mm == mem_info->cur_mm) + continue; + + if (address < mm->physical_address) + continue; - /* Valid mapping, delete it */ + if ((u64) address + length > + (u64) mm->physical_address + mm->length) + continue; - acpi_os_unmap_memory(mem_info->mapped_logical_address, - mem_info->mapped_length); + mem_info->cur_mm = mm; + goto access; + } + + /* Create a new mappings list entry */ + mm = ACPI_ALLOCATE_ZEROED(sizeof(*mm)); + if (!mm) { + ACPI_ERROR((AE_INFO, + "Unable to save memory mapping at 0x%8.8X%8.8X, size %u", + ACPI_FORMAT_UINT64(address), length)); + return_ACPI_STATUS(AE_NO_MEMORY); } /* @@ -143,29 +161,39 @@ acpi_ex_system_memory_space_handler(u32 function, /* Create a new mapping starting at the address given */ - mem_info->mapped_logical_address = - acpi_os_map_memory(address, map_length); - if (!mem_info->mapped_logical_address) { + logical_addr_ptr = acpi_os_map_memory(address, map_length); + if (!logical_addr_ptr) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X%8.8X, size %u", ACPI_FORMAT_UINT64(address), (u32)map_length)); - mem_info->mapped_length = 0; + ACPI_FREE(mm); return_ACPI_STATUS(AE_NO_MEMORY); } /* Save the physical address and mapping size */ - mem_info->mapped_physical_address = address; - mem_info->mapped_length = map_length; + mm->logical_address = logical_addr_ptr; + mm->physical_address = address; + mm->length = map_length; + + /* + * Add the new entry to the mappigs list and save it as the + * current mapping. + */ + mm->next_mm = mem_info->first_mm; + mem_info->first_mm = mm; + + mem_info->cur_mm = mm; } +access: /* * Generate a logical pointer corresponding to the address we want to * access */ - logical_addr_ptr = mem_info->mapped_logical_address + - ((u64) address - (u64) mem_info->mapped_physical_address); + logical_addr_ptr = mm->logical_address + + ((u64) address - (u64) mm->physical_address); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n", diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index aa236b9e6f24..d005e35ab399 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -1201,12 +1201,18 @@ struct acpi_pci_id { u16 function; }; +struct acpi_mem_mapping { + acpi_physical_address physical_address; + u8 *logical_address; + acpi_size length; + struct acpi_mem_mapping *next_mm; +}; + struct acpi_mem_space_context { u32 length; acpi_physical_address address; - acpi_physical_address mapped_physical_address; - u8 *mapped_logical_address; - acpi_size mapped_length; + struct acpi_mem_mapping *cur_mm; + struct acpi_mem_mapping *first_mm; }; /* -- cgit v1.2.3 From 45e31869cc4fa8d7c7d2b890965b1b3abf0cfd6d Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 19 Jun 2020 15:24:46 +0300 Subject: btrfs: tracepoints: fix btrfs_trigger_flush symbolic string for flags When tracepoints use __print_symbolic to print textual representation of a value that comes from an ENUM each enum value needs to be exported to user space so that user space tools can convert the binary value data to the trings as user space does not know what those enums are about. Doing a trace-cmd record && trace-cmd report currently results in: kworker/u8:1-61 [000] 66.299527: btrfs_flush_space: 5302ee13-c65e-45bb-98ef-8fe3835bd943: state=3(0x3) flags=4(METADATA) num_bytes=2621440 ret=0 I.e state is not translated to its symbolic counterpart. With this patch applied the output is: fio-370 [002] 56.762402: btrfs_trigger_flush: d04cd7ac-38e2-452f-a7f5-8157529fd5f0: preempt: flush=3(BTRFS_RESERVE_FLUSH_ALL) flags=4(METADATA) bytes=655360 See also 190f0b76ca49 ("mm: tracing: Export enums in tracepoints to user space"). Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 360b0f9d2220..4ccd81fae385 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1042,11 +1042,33 @@ TRACE_EVENT(btrfs_space_reservation, __entry->bytes) ); -#define show_flush_action(action) \ - __print_symbolic(action, \ - { BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH"}, \ - { BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT"}, \ - { BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL"}) +#define FLUSH_ACTIONS \ + EM( BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH") \ + EM( BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT") \ + EM( BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL") \ + EMe(BTRFS_RESERVE_FLUSH_ALL_STEAL, "BTRFS_RESERVE_FLUSH_ALL_STEAL") + +/* + * First define the enums in the above macros to be exported to userspace via + * TRACE_DEFINE_ENUM(). + */ + +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +FLUSH_ACTIONS + +/* + * Now redefine the EM and EMe macros to map the enums to the strings that will + * be printed in the output + */ + +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} TRACE_EVENT(btrfs_trigger_flush, @@ -1071,7 +1093,7 @@ TRACE_EVENT(btrfs_trigger_flush, TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu", __get_str(reason), __entry->flush, - show_flush_action(__entry->flush), + __print_symbolic(__entry->flush, FLUSH_ACTIONS), __entry->flags, __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS), -- cgit v1.2.3 From 0840dd28b52f0ceba91265ec62bffdcedb82b4a9 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 19 Jun 2020 15:24:47 +0300 Subject: btrfs: tracepoints: fix extent type symbolic name print extent's type is an enum and this requires that the enum values be exported to user space so that user space tools can correctly map raw binary data to the symbolic name. Currently tracepoints using btrfs__file_extent_item_regular or btrfs__file_extent_item_inline result in the following output: fio-443 [002] 586.609450: btrfs_get_extent_show_fi_regular: f0c3bf8e-0174-4bcc-92aa-6c2d62430420:i root=5(FS_TREE) inode=258 size=2136457216 disk_isize=0 file extent range=[2126946304 2136457216] (num_bytes=9510912 ram_bytes=9510912 disk_bytenr=0 disk_num_bytes=0 extent_offset=0 type=0x1 compression=0 E.g type is 0x1 . With this patch applie the output is: disk_bytenr=141348864 disk_num_bytes=4096 extent_offset=0 type=REG compression=0 Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 4ccd81fae385..71401d4c3ccc 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -67,11 +67,22 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); (obj >= BTRFS_ROOT_TREE_OBJECTID && \ obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-" -#define show_fi_type(type) \ - __print_symbolic(type, \ - { BTRFS_FILE_EXTENT_INLINE, "INLINE" }, \ - { BTRFS_FILE_EXTENT_REG, "REG" }, \ - { BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC"}) +#define FI_TYPES \ + EM( BTRFS_FILE_EXTENT_INLINE, "INLINE") \ + EM( BTRFS_FILE_EXTENT_REG, "REG") \ + EMe(BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC") + +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +FI_TYPES + +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} #define show_qgroup_rsv_type(type) \ __print_symbolic(type, \ @@ -380,7 +391,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, __entry->disk_isize, __entry->extent_start, __entry->extent_end, __entry->num_bytes, __entry->ram_bytes, __entry->disk_bytenr, __entry->disk_num_bytes, - __entry->extent_offset, show_fi_type(__entry->extent_type), + __entry->extent_offset, __print_symbolic(__entry->extent_type, FI_TYPES), __entry->compression) ); @@ -421,7 +432,7 @@ DECLARE_EVENT_CLASS( "extent_type=%s compression=%u", show_root_type(__entry->root_obj), __entry->ino, __entry->isize, __entry->disk_isize, __entry->extent_start, - __entry->extent_end, show_fi_type(__entry->extent_type), + __entry->extent_end, __print_symbolic(__entry->extent_type, FI_TYPES), __entry->compression) ); -- cgit v1.2.3 From 5bca2c952c609b128b00a238fadb99cc0d3b65ab Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 19 Jun 2020 15:24:48 +0300 Subject: btrfs: tracepoints: move FLUSH_ACTIONS define Since all enums used in btrfs' tracepoints are going to be redefined to allow proper parsing of their values by userspace tools let's rearrange when they are defined. This will allow to use only a single set of #define EM/#undef EM sequence. No functional changes. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 45 +++++++++++++++++--------------------------- 1 file changed, 17 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 71401d4c3ccc..c214622957c4 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -67,18 +67,35 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); (obj >= BTRFS_ROOT_TREE_OBJECTID && \ obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-" +#define FLUSH_ACTIONS \ + EM( BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH") \ + EM( BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT") \ + EM( BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL") \ + EMe(BTRFS_RESERVE_FLUSH_ALL_STEAL, "BTRFS_RESERVE_FLUSH_ALL_STEAL") + #define FI_TYPES \ EM( BTRFS_FILE_EXTENT_INLINE, "INLINE") \ EM( BTRFS_FILE_EXTENT_REG, "REG") \ EMe(BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC") +/* + * First define the enums in the above macros to be exported to userspace via + * TRACE_DEFINE_ENUM(). + */ + #undef EM #undef EMe #define EM(a, b) TRACE_DEFINE_ENUM(a); #define EMe(a, b) TRACE_DEFINE_ENUM(a); +FLUSH_ACTIONS FI_TYPES +/* + * Now redefine the EM and EMe macros to map the enums to the strings that will + * be printed in the output + */ + #undef EM #undef EMe #define EM(a, b) {a, b}, @@ -1053,34 +1070,6 @@ TRACE_EVENT(btrfs_space_reservation, __entry->bytes) ); -#define FLUSH_ACTIONS \ - EM( BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH") \ - EM( BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT") \ - EM( BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL") \ - EMe(BTRFS_RESERVE_FLUSH_ALL_STEAL, "BTRFS_RESERVE_FLUSH_ALL_STEAL") - -/* - * First define the enums in the above macros to be exported to userspace via - * TRACE_DEFINE_ENUM(). - */ - -#undef EM -#undef EMe -#define EM(a, b) TRACE_DEFINE_ENUM(a); -#define EMe(a, b) TRACE_DEFINE_ENUM(a); - -FLUSH_ACTIONS - -/* - * Now redefine the EM and EMe macros to map the enums to the strings that will - * be printed in the output - */ - -#undef EM -#undef EMe -#define EM(a, b) {a, b}, -#define EMe(a, b) {a, b} - TRACE_EVENT(btrfs_trigger_flush, TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes, -- cgit v1.2.3 From 1cb1f0b2486b0893a3ebf20c42f2df27649ae2b4 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 19 Jun 2020 15:24:49 +0300 Subject: btrfs: tracepoints: fix qgroup reservation type printing Since qgroup's reservation types are define in a macro they must be exported to user space in order for user space tools to convert raw binary data to symbolic names. Currently trace-cmd report produces the following output: kworker/u8:2-459 [003] 1208.543587: qgroup_update_reserve: 2b742cae-e0e5-4def-9ef7-28a9b34a951e: qgid=5 type=0x2 cur_reserved=54870016 diff=-32768 With this fix the output is: kworker/u8:2-459 [003] 1208.543587: qgroup_update_reserve: 2b742cae-e0e5-4def-9ef7-28a9b34a951e: qgid=5 type=BTRFS_QGROUP_RSV_META_PREALLOC cur_reserved=54870016 diff=-32768 Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index c214622957c4..f0e95e3f1d1d 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -78,6 +78,11 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); EM( BTRFS_FILE_EXTENT_REG, "REG") \ EMe(BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC") +#define QGROUP_RSV_TYPES \ + EM( BTRFS_QGROUP_RSV_DATA, "DATA") \ + EM( BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS") \ + EMe(BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC") + /* * First define the enums in the above macros to be exported to userspace via * TRACE_DEFINE_ENUM(). @@ -90,6 +95,7 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); FLUSH_ACTIONS FI_TYPES +QGROUP_RSV_TYPES /* * Now redefine the EM and EMe macros to map the enums to the strings that will @@ -101,12 +107,6 @@ FI_TYPES #define EM(a, b) {a, b}, #define EMe(a, b) {a, b} -#define show_qgroup_rsv_type(type) \ - __print_symbolic(type, \ - { BTRFS_QGROUP_RSV_DATA, "DATA" }, \ - { BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS" }, \ - { BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC" }) - #define show_extent_io_tree_owner(owner) \ __print_symbolic(owner, \ { IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS" }, \ @@ -1712,7 +1712,7 @@ TRACE_EVENT(qgroup_update_reserve, ), TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld", - __entry->qgid, show_qgroup_rsv_type(__entry->type), + __entry->qgid, __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->cur_reserved, __entry->diff) ); @@ -1736,7 +1736,7 @@ TRACE_EVENT(qgroup_meta_reserve, TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld", show_root_type(__entry->refroot), - show_qgroup_rsv_type(__entry->type), __entry->diff) + __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff) ); TRACE_EVENT(qgroup_meta_convert, @@ -1757,8 +1757,8 @@ TRACE_EVENT(qgroup_meta_convert, TP_printk_btrfs("refroot=%llu(%s) type=%s->%s diff=%lld", show_root_type(__entry->refroot), - show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PREALLOC), - show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PERTRANS), + __print_symbolic(BTRFS_QGROUP_RSV_META_PREALLOC, QGROUP_RSV_TYPES), + __print_symbolic(BTRFS_QGROUP_RSV_META_PERTRANS, QGROUP_RSV_TYPES), __entry->diff) ); @@ -1784,7 +1784,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans, TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld", show_root_type(__entry->refroot), - show_qgroup_rsv_type(__entry->type), __entry->diff) + __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff) ); DECLARE_EVENT_CLASS(btrfs__prelim_ref, -- cgit v1.2.3 From c92bb3046ff639df5c39fea6cfedea6e9885e36d Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 19 Jun 2020 15:24:50 +0300 Subject: btrfs: tracepoints: switch extent_io_tree_owner to using EM macro This fixes correct pint out of the extent io tree owner in btrfs_set_extent_bit/btrfs_clear_extent_bit/btrfs_convert_extent_bit tracepoints. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index f0e95e3f1d1d..880a5c0e0f21 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -83,6 +83,18 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); EM( BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS") \ EMe(BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC") +#define IO_TREE_OWNER \ + EM( IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS") \ + EM( IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS") \ + EM( IO_TREE_INODE_IO, "INODE_IO") \ + EM( IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE") \ + EM( IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS") \ + EM( IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES") \ + EM( IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES") \ + EM( IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT") \ + EM( IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE") \ + EMe(IO_TREE_SELFTEST, "SELFTEST") + /* * First define the enums in the above macros to be exported to userspace via * TRACE_DEFINE_ENUM(). @@ -96,6 +108,7 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); FLUSH_ACTIONS FI_TYPES QGROUP_RSV_TYPES +IO_TREE_OWNER /* * Now redefine the EM and EMe macros to map the enums to the strings that will @@ -107,18 +120,6 @@ QGROUP_RSV_TYPES #define EM(a, b) {a, b}, #define EMe(a, b) {a, b} -#define show_extent_io_tree_owner(owner) \ - __print_symbolic(owner, \ - { IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS" }, \ - { IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS" }, \ - { IO_TREE_INODE_IO, "INODE_IO" }, \ - { IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE" }, \ - { IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS" }, \ - { IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES" }, \ - { IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES" }, \ - { IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT" }, \ - { IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE" }, \ - { IO_TREE_SELFTEST, "SELFTEST" }) #define BTRFS_GROUP_FLAGS \ { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \ @@ -1942,7 +1943,7 @@ TRACE_EVENT(btrfs_set_extent_bit, TP_printk_btrfs( "io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s", - show_extent_io_tree_owner(__entry->owner), __entry->ino, + __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino, __entry->rootid, __entry->start, __entry->len, __print_flags(__entry->set_bits, "|", EXTENT_FLAGS)) ); @@ -1981,7 +1982,7 @@ TRACE_EVENT(btrfs_clear_extent_bit, TP_printk_btrfs( "io_tree=%s ino=%llu root=%llu start=%llu len=%llu clear_bits=%s", - show_extent_io_tree_owner(__entry->owner), __entry->ino, + __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino, __entry->rootid, __entry->start, __entry->len, __print_flags(__entry->clear_bits, "|", EXTENT_FLAGS)) ); @@ -2022,7 +2023,7 @@ TRACE_EVENT(btrfs_convert_extent_bit, TP_printk_btrfs( "io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s clear_bits=%s", - show_extent_io_tree_owner(__entry->owner), __entry->ino, + __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino, __entry->rootid, __entry->start, __entry->len, __print_flags(__entry->set_bits , "|", EXTENT_FLAGS), __print_flags(__entry->clear_bits, "|", EXTENT_FLAGS)) -- cgit v1.2.3 From f0cdd15c219dd0be8cae47f60b773efe1361336e Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 19 Jun 2020 15:24:51 +0300 Subject: btrfs: tracepoints: convert flush states to using EM macros Only 6 out of all flush states were being printed correctly since only they were exported via the TRACE_DEFINE_ENUM macro. This patch converts all flush states to use the newly introduced EM macro so that they can all be printed correctly. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 880a5c0e0f21..863335ecb7e8 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -31,13 +31,6 @@ struct extent_io_tree; struct prelim_ref; struct btrfs_space_info; -TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS_NR); -TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS); -TRACE_DEFINE_ENUM(FLUSH_DELALLOC); -TRACE_DEFINE_ENUM(FLUSH_DELALLOC_WAIT); -TRACE_DEFINE_ENUM(ALLOC_CHUNK); -TRACE_DEFINE_ENUM(COMMIT_TRANS); - #define show_ref_type(type) \ __print_symbolic(type, \ { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \ @@ -95,6 +88,18 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); EM( IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE") \ EMe(IO_TREE_SELFTEST, "SELFTEST") +#define FLUSH_STATES \ + EM( FLUSH_DELAYED_ITEMS_NR, "FLUSH_DELAYED_ITEMS_NR") \ + EM( FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS") \ + EM( FLUSH_DELALLOC, "FLUSH_DELALLOC") \ + EM( FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT") \ + EM( FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR") \ + EM( FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS") \ + EM( ALLOC_CHUNK, "ALLOC_CHUNK") \ + EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \ + EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \ + EMe(COMMIT_TRANS, "COMMIT_TRANS") + /* * First define the enums in the above macros to be exported to userspace via * TRACE_DEFINE_ENUM(). @@ -109,6 +114,7 @@ FLUSH_ACTIONS FI_TYPES QGROUP_RSV_TYPES IO_TREE_OWNER +FLUSH_STATES /* * Now redefine the EM and EMe macros to map the enums to the strings that will @@ -1101,18 +1107,6 @@ TRACE_EVENT(btrfs_trigger_flush, __entry->bytes) ); -#define show_flush_state(state) \ - __print_symbolic(state, \ - { FLUSH_DELAYED_ITEMS_NR, "FLUSH_DELAYED_ITEMS_NR"}, \ - { FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS"}, \ - { FLUSH_DELALLOC, "FLUSH_DELALLOC"}, \ - { FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT"}, \ - { FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR"}, \ - { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \ - { ALLOC_CHUNK, "ALLOC_CHUNK"}, \ - { ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE"}, \ - { RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS"}, \ - { COMMIT_TRANS, "COMMIT_TRANS"}) TRACE_EVENT(btrfs_flush_space, @@ -1137,7 +1131,7 @@ TRACE_EVENT(btrfs_flush_space, TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d", __entry->state, - show_flush_state(__entry->state), + __print_symbolic(__entry->state, FLUSH_STATES), __entry->flags, __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS), -- cgit v1.2.3 From 06f67c47076e5c3ee65276171596479dcc3a3941 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Sun, 28 Jun 2020 13:07:14 +0800 Subject: btrfs: use __u16 for the return value of btrfs_qgroup_level() The qgroup level is limited to u16, so no need to use u64 for it. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/qgroup.c | 2 +- include/uapi/linux/btrfs_tree.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 787128d7e196..229e461dbfc3 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -538,7 +538,7 @@ bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info) if (qgroup->rsv.values[i]) { ret = true; btrfs_warn(fs_info, - "qgroup %llu/%llu has unreleased space, type %d rsv %llu", + "qgroup %hu/%llu has unreleased space, type %d rsv %llu", btrfs_qgroup_level(qgroup->qgroupid), btrfs_qgroup_subvolid(qgroup->qgroupid), i, qgroup->rsv.values[i]); diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index a3f3975df0de..9ba64ca6b4ac 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -913,9 +913,9 @@ struct btrfs_free_space_info { #define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0) #define BTRFS_QGROUP_LEVEL_SHIFT 48 -static inline __u64 btrfs_qgroup_level(__u64 qgroupid) +static inline __u16 btrfs_qgroup_level(__u64 qgroupid) { - return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT; + return (__u16)(qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT); } /* -- cgit v1.2.3 From 137c541821a83debb63b3fa8abdd1cbc41bdf3a1 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Mon, 13 Jul 2020 21:28:58 +0900 Subject: btrfs: pass checksum type via BTRFS_IOC_FS_INFO ioctl With the recent addition of filesystem checksum types other than CRC32c, it is not anymore hard-coded which checksum type a btrfs filesystem uses. Up to now there is no good way to read the filesystem checksum, apart from reading the filesystem UUID and then query sysfs for the checksum type. Add a new csum_type and csum_size fields to the BTRFS_IOC_FS_INFO ioctl command which usually is used to query filesystem features. Also add a flags member indicating that the kernel responded with a set csum_type and csum_size field. For compatibility reasons, only return the csum_type and csum_size if the BTRFS_FS_INFO_FLAG_CSUM_INFO flag was passed to the kernel. Also clear any unknown flags so we don't pass false positives to user-space newer than the kernel. To simplify further additions to the ioctl, also switch the padding to a u8 array. Pahole was used to verify the result of this switch: The csum members are added before flags, which might look odd, but this is to keep the alignment requirements and not to introduce holes in the structure. $ pahole -C btrfs_ioctl_fs_info_args fs/btrfs/btrfs.ko struct btrfs_ioctl_fs_info_args { __u64 max_id; /* 0 8 */ __u64 num_devices; /* 8 8 */ __u8 fsid[16]; /* 16 16 */ __u32 nodesize; /* 32 4 */ __u32 sectorsize; /* 36 4 */ __u32 clone_alignment; /* 40 4 */ __u16 csum_type; /* 44 2 */ __u16 csum_size; /* 46 2 */ __u64 flags; /* 48 8 */ __u8 reserved[968]; /* 56 968 */ /* size: 1024, cachelines: 16, members: 10 */ }; Fixes: 3951e7f050ac ("btrfs: add xxhash64 to checksumming algorithms") Fixes: 3831bf0094ab ("btrfs: add sha256 to checksumming algorithm") CC: stable@vger.kernel.org # 5.5+ Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 16 +++++++++++++--- include/uapi/linux/btrfs.h | 14 ++++++++++++-- 2 files changed, 25 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ab34179d7cbc..2854f4a40787 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3217,11 +3217,15 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_fs_info_args *fi_args; struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; + u64 flags_in; int ret = 0; - fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL); - if (!fi_args) - return -ENOMEM; + fi_args = memdup_user(arg, sizeof(*fi_args)); + if (IS_ERR(fi_args)) + return PTR_ERR(fi_args); + + flags_in = fi_args->flags; + memset(fi_args, 0, sizeof(*fi_args)); rcu_read_lock(); fi_args->num_devices = fs_devices->num_devices; @@ -3237,6 +3241,12 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, fi_args->sectorsize = fs_info->sectorsize; fi_args->clone_alignment = fs_info->sectorsize; + if (flags_in & BTRFS_FS_INFO_FLAG_CSUM_INFO) { + fi_args->csum_type = btrfs_super_csum_type(fs_info->super_copy); + fi_args->csum_size = btrfs_super_csum_size(fs_info->super_copy); + fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO; + } + if (copy_to_user(arg, fi_args, sizeof(*fi_args))) ret = -EFAULT; diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index e6b6cb0f8bc6..24f6848ad78e 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -243,6 +243,13 @@ struct btrfs_ioctl_dev_info_args { __u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */ }; +/* + * Retrieve information about the filesystem + */ + +/* Request information about checksum type and size */ +#define BTRFS_FS_INFO_FLAG_CSUM_INFO (1 << 0) + struct btrfs_ioctl_fs_info_args { __u64 max_id; /* out */ __u64 num_devices; /* out */ @@ -250,8 +257,11 @@ struct btrfs_ioctl_fs_info_args { __u32 nodesize; /* out */ __u32 sectorsize; /* out */ __u32 clone_alignment; /* out */ - __u32 reserved32; - __u64 reserved[122]; /* pad to 1k */ + /* See BTRFS_FS_INFO_FLAG_* */ + __u16 csum_type; /* out */ + __u16 csum_size; /* out */ + __u64 flags; /* in/out */ + __u8 reserved[968]; /* pad to 1k */ }; /* -- cgit v1.2.3 From 0fb408a558aadbaa58beb75b02c95741e1fbb514 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Mon, 13 Jul 2020 21:28:59 +0900 Subject: btrfs: add filesystem generation to FS_INFO ioctl Add retrieval of the filesystem's generation to the fsinfo ioctl. This is driven by setting the BTRFS_FS_INFO_FLAG_GENERATION flag in btrfs_ioctl_fs_info_args::flags. Reviewed-by: Nikolay Borisov Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 5 +++++ include/uapi/linux/btrfs.h | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2854f4a40787..55dd20d0f9cb 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3247,6 +3247,11 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO; } + if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) { + fi_args->generation = fs_info->generation; + fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION; + } + if (copy_to_user(arg, fi_args, sizeof(*fi_args))) ret = -EFAULT; diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 24f6848ad78e..9b82e01c191d 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -250,6 +250,9 @@ struct btrfs_ioctl_dev_info_args { /* Request information about checksum type and size */ #define BTRFS_FS_INFO_FLAG_CSUM_INFO (1 << 0) +/* Request information about filesystem generation */ +#define BTRFS_FS_INFO_FLAG_GENERATION (1 << 1) + struct btrfs_ioctl_fs_info_args { __u64 max_id; /* out */ __u64 num_devices; /* out */ @@ -261,7 +264,8 @@ struct btrfs_ioctl_fs_info_args { __u16 csum_type; /* out */ __u16 csum_size; /* out */ __u64 flags; /* in/out */ - __u8 reserved[968]; /* pad to 1k */ + __u64 generation; /* out */ + __u8 reserved[960]; /* pad to 1k */ }; /* -- cgit v1.2.3 From 49bac897683340457a0ab1f76b924a1220bdb604 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Mon, 13 Jul 2020 21:29:00 +0900 Subject: btrfs: add metadata_uuid to FS_INFO ioctl Add retrieval of the filesystem's metadata UUID to the fsinfo ioctl. This is driven by setting the BTRFS_FS_INFO_FLAG_METADATA_UUID flag in btrfs_ioctl_fs_info_args::flags. Reviewed-by: Nikolay Borisov Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 6 ++++++ include/uapi/linux/btrfs.h | 5 ++++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 55dd20d0f9cb..b4ddf51ae377 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3252,6 +3252,12 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION; } + if (flags_in & BTRFS_FS_INFO_FLAG_METADATA_UUID) { + memcpy(&fi_args->metadata_uuid, fs_devices->metadata_uuid, + sizeof(fi_args->metadata_uuid)); + fi_args->flags |= BTRFS_FS_INFO_FLAG_METADATA_UUID; + } + if (copy_to_user(arg, fi_args, sizeof(*fi_args))) ret = -EFAULT; diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 9b82e01c191d..2c39d15a2beb 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -252,6 +252,8 @@ struct btrfs_ioctl_dev_info_args { /* Request information about filesystem generation */ #define BTRFS_FS_INFO_FLAG_GENERATION (1 << 1) +/* Request information about filesystem metadata UUID */ +#define BTRFS_FS_INFO_FLAG_METADATA_UUID (1 << 2) struct btrfs_ioctl_fs_info_args { __u64 max_id; /* out */ @@ -265,7 +267,8 @@ struct btrfs_ioctl_fs_info_args { __u16 csum_size; /* out */ __u64 flags; /* in/out */ __u64 generation; /* out */ - __u8 reserved[960]; /* pad to 1k */ + __u8 metadata_uuid[BTRFS_FSID_SIZE]; /* out */ + __u8 reserved[944]; /* pad to 1k */ }; /* -- cgit v1.2.3 From 7a82e97a11b91a78e9da06ab3f70545953c07b5c Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Thu, 16 Jul 2020 14:42:48 +0200 Subject: PM: core: introduce pm_ptr() macro This macro is analogous to the infamous of_match_ptr(). If CONFIG_PM is enabled, this macro will resolve to its argument, otherwise to NULL. Signed-off-by: Paul Cercueil Reviewed-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- include/linux/pm.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/pm.h b/include/linux/pm.h index 121c104a4090..1f227c518db3 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -374,6 +374,12 @@ const struct dev_pm_ops name = { \ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ } +#ifdef CONFIG_PM +#define pm_ptr(_ptr) (_ptr) +#else +#define pm_ptr(_ptr) NULL +#endif + /* * PM_EVENT_ messages * -- cgit v1.2.3 From 756a64ce349cf2646c60456b110f7f1756bb8699 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Thu, 16 Jul 2020 14:42:49 +0200 Subject: PM: Make *_DEV_PM_OPS macros use __maybe_unused This way, when the dev_pm_ops instance is not referenced anywhere, it will simply be dropped by the compiler without a warning. Signed-off-by: Paul Cercueil Reviewed-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- include/linux/pm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/pm.h b/include/linux/pm.h index 1f227c518db3..a30a4b54df52 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -351,7 +351,7 @@ struct dev_pm_ops { * to RAM and hibernation. */ #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ -const struct dev_pm_ops name = { \ +const struct dev_pm_ops __maybe_unused name = { \ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } @@ -369,7 +369,7 @@ const struct dev_pm_ops name = { \ * .runtime_resume(), respectively (and analogously for hibernation). */ #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ -const struct dev_pm_ops name = { \ +const struct dev_pm_ops __maybe_unused name = { \ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ } -- cgit v1.2.3 From 0585c1c06a550c2a606c33ad45954892245512f6 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 27 Jul 2020 17:29:38 +0800 Subject: ACPI: Use valid link to the ACPI specification Currently, acpi.info is an invalid link to access ACPI specification, the new valid link is https://uefi.org/specifications. Signed-off-by: Tiezhu Yang Signed-off-by: Rafael J. Wysocki --- Documentation/hwmon/acpi_power_meter.rst | 2 +- drivers/acpi/Kconfig | 3 +-- include/linux/tboot.h | 2 +- tools/power/cpupower/man/cpupower-idle-info.1 | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/hwmon/acpi_power_meter.rst b/Documentation/hwmon/acpi_power_meter.rst index 4a0941ade0ca..8628c1161015 100644 --- a/Documentation/hwmon/acpi_power_meter.rst +++ b/Documentation/hwmon/acpi_power_meter.rst @@ -9,7 +9,7 @@ Supported systems: Prefix: 'power_meter' - Datasheet: http://acpi.info/, section 10.4. + Datasheet: https://uefi.org/specifications, section 10.4. Author: Darrick J. Wong diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ce2730d61a8f..7428cd2f9f6d 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -40,8 +40,7 @@ menuconfig ACPI the UEFI Forum and any UEFI member can join the ASWG and contribute to the ACPI specification. The specification is available at: - - + if ACPI diff --git a/include/linux/tboot.h b/include/linux/tboot.h index c7e424766360..5146d2574e85 100644 --- a/include/linux/tboot.h +++ b/include/linux/tboot.h @@ -44,7 +44,7 @@ struct tboot_acpi_generic_address { /* * combines Sx info from FADT and FACS tables per ACPI 2.0+ spec - * (http://www.acpi.info/) + * (https://uefi.org/specifications) */ struct tboot_acpi_sleep_info { struct tboot_acpi_generic_address pm1a_cnt_blk; diff --git a/tools/power/cpupower/man/cpupower-idle-info.1 b/tools/power/cpupower/man/cpupower-idle-info.1 index 80a1311fa747..20b6345c53ad 100644 --- a/tools/power/cpupower/man/cpupower-idle-info.1 +++ b/tools/power/cpupower/man/cpupower-idle-info.1 @@ -75,7 +75,7 @@ By default only values of core zero are displayed. How to display settings of other cores is described in the cpupower(1) manpage in the \-\-cpu option section. .SH REFERENCES -http://www.acpi.info/spec.htm +https://uefi.org/specifications .SH "FILES" .nf \fI/sys/devices/system/cpu/cpu*/cpuidle/state*\fP -- cgit v1.2.3 From 8365a898fe53f85529566501d3b3d88640b3975e Mon Sep 17 00:00:00 2001 From: Sumeet Pawnikar Date: Thu, 16 Jul 2020 23:14:55 +0530 Subject: powercap: Add Power Limit4 support Modern Intel Mobile platforms support power limit4 (PL4), which is the SoC package level maximum power limit (in Watts). It can be used to preemptively limits potential SoC power to prevent power spikes from tripping the power adapter and battery over-current protection. This patch enables this feature by exposing package level peak power capping control to userspace via RAPL sysfs interface. With this, application like DTPF can modify PL4 power limit, the similar way of other package power limit (PL1). As this feature is not tested on previous generations, here it is enabled only for the platform that has been verified to work, for safety concerns. Signed-off-by: Sumeet Pawnikar Co-developed-by: Zhang Rui Signed-off-by: Zhang Rui Reviewed-by: Srinivas Pandruvada Tested-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki --- Documentation/power/powercap/powercap.rst | 15 ++++++--- drivers/powercap/intel_rapl_common.c | 54 +++++++++++++++++++++++++++++-- drivers/powercap/intel_rapl_msr.c | 15 +++++++++ include/linux/intel_rapl.h | 5 ++- 4 files changed, 81 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/Documentation/power/powercap/powercap.rst b/Documentation/power/powercap/powercap.rst index 7ae3b44c7624..e75d12596dac 100644 --- a/Documentation/power/powercap/powercap.rst +++ b/Documentation/power/powercap/powercap.rst @@ -167,11 +167,13 @@ For example:: package-0 --------- -The Intel RAPL technology allows two constraints, short term and long term, -with two different time windows to be applied to each power zone. Thus for -each zone there are 2 attributes representing the constraint names, 2 power -limits and 2 attributes representing the sizes of the time windows. Such that, -constraint_j_* attributes correspond to the jth constraint (j = 0,1). +Depending on different power zones, the Intel RAPL technology allows +one or multiple constraints like short term, long term and peak power, +with different time windows to be applied to each power zone. +All the zones contain attributes representing the constraint names, +power limits and the sizes of the time windows. Note that time window +is not applicable to peak power. Here, constraint_j_* attributes +correspond to the jth constraint (j = 0,1,2). For example:: @@ -181,6 +183,9 @@ For example:: constraint_1_name constraint_1_power_limit_uw constraint_1_time_window_us + constraint_2_name + constraint_2_power_limit_uw + constraint_2_time_window_us Power Zone Attributes ===================== diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index b739ce4f390d..6f55aaef8afc 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -39,6 +39,8 @@ #define POWER_HIGH_LOCK BIT_ULL(63) #define POWER_LOW_LOCK BIT(31) +#define POWER_LIMIT4_MASK 0x1FFF + #define TIME_WINDOW1_MASK (0x7FULL<<17) #define TIME_WINDOW2_MASK (0x7FULL<<49) @@ -82,6 +84,7 @@ enum unit_type { static const char pl1_name[] = "long_term"; static const char pl2_name[] = "short_term"; +static const char pl4_name[] = "peak_power"; #define power_zone_to_rapl_domain(_zone) \ container_of(_zone, struct rapl_domain, power_zone) @@ -338,6 +341,9 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid, case PL2_ENABLE: rapl_write_data_raw(rd, POWER_LIMIT2, power_limit); break; + case PL4_ENABLE: + rapl_write_data_raw(rd, POWER_LIMIT4, power_limit); + break; default: ret = -EINVAL; } @@ -372,6 +378,9 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid, case PL2_ENABLE: prim = POWER_LIMIT2; break; + case PL4_ENABLE: + prim = POWER_LIMIT4; + break; default: put_online_cpus(); return -EINVAL; @@ -441,6 +450,13 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, case PL2_ENABLE: ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val); break; + case PL4_ENABLE: + /* + * Time window parameter is not applicable for PL4 entry + * so assigining '0' as default value. + */ + val = 0; + break; default: put_online_cpus(); return -EINVAL; @@ -484,6 +500,9 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data) case PL2_ENABLE: prim = MAX_POWER; break; + case PL4_ENABLE: + prim = MAX_POWER; + break; default: put_online_cpus(); return -EINVAL; @@ -493,6 +512,10 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data) else *data = val; + /* As a generalization rule, PL4 would be around two times PL2. */ + if (rd->rpl[id].prim_id == PL4_ENABLE) + *data = *data * 2; + put_online_cpus(); return ret; @@ -525,12 +548,22 @@ static void rapl_init_domains(struct rapl_package *rp) rd->id = i; rd->rpl[0].prim_id = PL1_ENABLE; rd->rpl[0].name = pl1_name; - /* some domain may support two power limits */ - if (rp->priv->limits[i] == 2) { + + /* + * The PL2 power domain is applicable for limits two + * and limits three + */ + if (rp->priv->limits[i] >= 2) { rd->rpl[1].prim_id = PL2_ENABLE; rd->rpl[1].name = pl2_name; } + /* Enable PL4 domain if the total power limits are three */ + if (rp->priv->limits[i] == 3) { + rd->rpl[2].prim_id = PL4_ENABLE; + rd->rpl[2].name = pl4_name; + } + for (j = 0; j < RAPL_DOMAIN_REG_MAX; j++) rd->regs[j] = rp->priv->regs[i][j]; @@ -599,6 +632,8 @@ static struct rapl_primitive_info rpi[] = { RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32, RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), + PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0, + RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0), PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31, RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15, @@ -609,6 +644,8 @@ static struct rapl_primitive_info rpi[] = { RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48, RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), + PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0, + RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0), PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17, RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49, @@ -1273,6 +1310,7 @@ void rapl_remove_package(struct rapl_package *rp) if (find_nr_power_limit(rd) > 1) { rapl_write_data_raw(rd, PL2_ENABLE, 0); rapl_write_data_raw(rd, PL2_CLAMP, 0); + rapl_write_data_raw(rd, PL4_ENABLE, 0); } if (rd->id == RAPL_DOMAIN_PACKAGE) { rd_package = rd; @@ -1381,6 +1419,13 @@ static void power_limit_state_save(void) if (ret) rd->rpl[i].last_power_limit = 0; break; + case PL4_ENABLE: + ret = rapl_read_data_raw(rd, + POWER_LIMIT4, true, + &rd->rpl[i].last_power_limit); + if (ret) + rd->rpl[i].last_power_limit = 0; + break; } } } @@ -1411,6 +1456,11 @@ static void power_limit_state_restore(void) rapl_write_data_raw(rd, POWER_LIMIT2, rd->rpl[i].last_power_limit); break; + case PL4_ENABLE: + if (rd->rpl[i].last_power_limit) + rapl_write_data_raw(rd, POWER_LIMIT4, + rd->rpl[i].last_power_limit); + break; } } } diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index d5487965bdfe..d2a2627507a9 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -28,6 +28,7 @@ /* Local defines */ #define MSR_PLATFORM_POWER_LIMIT 0x0000065C +#define MSR_VR_CURRENT_CONFIG 0x00000601 /* private data for RAPL MSR Interface */ static struct rapl_if_priv rapl_msr_priv = { @@ -123,13 +124,27 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra) return ra->err; } +/* List of verified CPUs. */ +static const struct x86_cpu_id pl4_support_ids[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY }, + {} +}; + static int rapl_msr_probe(struct platform_device *pdev) { + const struct x86_cpu_id *id = x86_match_cpu(pl4_support_ids); int ret; rapl_msr_priv.read_raw = rapl_msr_read_raw; rapl_msr_priv.write_raw = rapl_msr_write_raw; + if (id) { + rapl_msr_priv.limits[RAPL_DOMAIN_PACKAGE] = 3; + rapl_msr_priv.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4] = + MSR_VR_CURRENT_CONFIG; + pr_info("PL4 support detected.\n"); + } + rapl_msr_priv.control_type = powercap_register_control_type(NULL, "intel-rapl", NULL); if (IS_ERR(rapl_msr_priv.control_type)) { pr_debug("failed to register powercap control_type.\n"); diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index efb3ce892c20..3582176a1eca 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -29,6 +29,7 @@ enum rapl_domain_reg_id { RAPL_DOMAIN_REG_PERF, RAPL_DOMAIN_REG_POLICY, RAPL_DOMAIN_REG_INFO, + RAPL_DOMAIN_REG_PL4, RAPL_DOMAIN_REG_MAX, }; @@ -38,12 +39,14 @@ enum rapl_primitives { ENERGY_COUNTER, POWER_LIMIT1, POWER_LIMIT2, + POWER_LIMIT4, FW_LOCK, PL1_ENABLE, /* power limit 1, aka long term */ PL1_CLAMP, /* allow frequency to go below OS request */ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */ PL2_CLAMP, + PL4_ENABLE, /* power limit 4, aka max peak power */ TIME_WINDOW1, /* long term */ TIME_WINDOW2, /* short term */ @@ -65,7 +68,7 @@ struct rapl_domain_data { unsigned long timestamp; }; -#define NR_POWER_LIMITS (2) +#define NR_POWER_LIMITS (3) struct rapl_power_limit { struct powercap_zone_constraint *constraint; int prim_id; /* primitive ID used to enable */ -- cgit v1.2.3 From 10cfde5dc695856c4fe93f0679d2fdd8e0d2a147 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 20 Jul 2020 10:31:19 -0700 Subject: ACPICA: Replace one-element array with flexible-array ACPICA commit 7ba2f3d91a32f104765961fda0ed78b884ae193d The current codebase makes use of one-element arrays in the following form: struct something { int length; u8 data[1]; }; struct something *instance; instance = kmalloc(sizeof(*instance) + size, GFP_KERNEL); instance->length = size; memcpy(instance->data, source, size); but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the linux codebase from now on. This issue was found with the help of Coccinelle and audited _manually_. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Link: https://github.com/acpica/acpica/commit/7ba2f3d9 Signed-off-by: Gustavo A. R. Silva Signed-off-by: Erik Kaneda Signed-off-by: Bob Moore Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/utids.c | 3 +-- include/acpi/actypes.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 3bb06935a2ad..3e68864ef242 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -263,8 +263,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node, * 3) Size of the actual CID strings */ cid_list_size = sizeof(struct acpi_pnp_device_id_list) + - ((count - 1) * sizeof(struct acpi_pnp_device_id)) + - string_area_size; + (count * sizeof(struct acpi_pnp_device_id)) + string_area_size; cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size); if (!cid_list) { diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index aa236b9e6f24..735921c833f8 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -1146,7 +1146,7 @@ struct acpi_pnp_device_id { struct acpi_pnp_device_id_list { u32 count; /* Number of IDs in Ids array */ u32 list_size; /* Size of list, including ID strings */ - struct acpi_pnp_device_id ids[1]; /* ID array */ + struct acpi_pnp_device_id ids[]; /* ID array */ }; /* -- cgit v1.2.3 From 2861ba7a0c6c4ba835347686c19304c32ee15961 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Mon, 20 Jul 2020 10:31:21 -0700 Subject: ACPICA: Update version to 20200717 ACPICA commit c1adb9a2a775df7a85df0103342ebf090e1b2016 Version 20200717. Link: https://github.com/acpica/acpica/commit/c1adb9a2 Signed-off-by: Bob Moore Signed-off-by: Erik Kaneda Signed-off-by: Rafael J. Wysocki --- include/acpi/acpixf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 459d6981ca96..9dc816641286 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20200528 +#define ACPI_CA_VERSION 0x20200717 #include #include -- cgit v1.2.3 From 34facb04228b36006a37727fddee59cf069d95d4 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Mon, 27 Jul 2020 10:25:02 +0200 Subject: ASoC: dt-bindings: q6asm: Add Q6ASM_DAI_{TX_RX, TX, RX} defines Right now the direction of a DAI has to be specified as a literal number in the device tree, e.g.: dai@0 { reg = <0>; direction = <2>; }; but this does not make it immediately clear that this is a playback/RX-only DAI. Actually, q6asm-dai.c has useful defines for this. Move them to the dt-bindings header to allow using them in the dts(i) files. The example above then becomes: dai@0 { reg = <0>; direction = ; }; which is immediately recognizable as playback/RX-only DAI. Signed-off-by: Stephan Gerhold Reviewed-by: Srinivas Kandagatla Cc: Srinivas Kandagatla Link: https://lore.kernel.org/r/20200727082502.2341-1-stephan@gerhold.net Signed-off-by: Mark Brown --- Documentation/devicetree/bindings/sound/qcom,q6asm.txt | 9 +++++---- include/dt-bindings/sound/qcom,q6asm.h | 4 ++++ sound/soc/qcom/qdsp6/q6asm-dai.c | 3 --- 3 files changed, 9 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt index 6b9a88d0ea3f..8c4883becae9 100644 --- a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt +++ b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt @@ -39,9 +39,9 @@ configuration of each dai. Must contain the following properties. Usage: Required for Compress offload dais Value type: Definition: Specifies the direction of the dai stream - 0 for both tx and rx - 1 for only tx (Capture/Encode) - 2 for only rx (Playback/Decode) + Q6ASM_DAI_TX_RX (0) for both tx and rx + Q6ASM_DAI_TX (1) for only tx (Capture/Encode) + Q6ASM_DAI_RX (2) for only rx (Playback/Decode) - is-compress-dai: Usage: Required for Compress offload dais @@ -50,6 +50,7 @@ configuration of each dai. Must contain the following properties. = EXAMPLE +#include apr-service@7 { compatible = "qcom,q6asm"; @@ -62,7 +63,7 @@ apr-service@7 { dai@0 { reg = <0>; - direction = <2>; + direction = ; is-compress-dai; }; }; diff --git a/include/dt-bindings/sound/qcom,q6asm.h b/include/dt-bindings/sound/qcom,q6asm.h index 1eb77d87c2e8..f59d74f14395 100644 --- a/include/dt-bindings/sound/qcom,q6asm.h +++ b/include/dt-bindings/sound/qcom,q6asm.h @@ -19,4 +19,8 @@ #define MSM_FRONTEND_DAI_MULTIMEDIA15 14 #define MSM_FRONTEND_DAI_MULTIMEDIA16 15 +#define Q6ASM_DAI_TX_RX 0 +#define Q6ASM_DAI_TX 1 +#define Q6ASM_DAI_RX 2 + #endif /* __DT_BINDINGS_Q6_ASM_H__ */ diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index a2acb7564eb8..9b7b218f2a20 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c @@ -37,9 +37,6 @@ #define COMPR_PLAYBACK_MAX_FRAGMENT_SIZE (128 * 1024) #define COMPR_PLAYBACK_MIN_NUM_FRAGMENTS (4) #define COMPR_PLAYBACK_MAX_NUM_FRAGMENTS (16 * 4) -#define Q6ASM_DAI_TX_RX 0 -#define Q6ASM_DAI_TX 1 -#define Q6ASM_DAI_RX 2 #define ALAC_CH_LAYOUT_MONO ((101 << 16) | 1) #define ALAC_CH_LAYOUT_STEREO ((101 << 16) | 2) -- cgit v1.2.3 From 50c8a002bfd43798768ad07833b2b9d4b4d5274f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:29:03 -0700 Subject: platform/x86: ISST: drop a duplicated word in isst_if.h Drop the repeated word "for" in a comment. Signed-off-by: Randy Dunlap Cc: Srinivas Pandruvada Cc: platform-driver-x86@vger.kernel.org Cc: Darren Hart Cc: Andy Shevchenko Acked-by: Srinivas Pandruvada Signed-off-by: Andy Shevchenko --- include/uapi/linux/isst_if.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h index 0a52b7b093d3..ba078f8e9add 100644 --- a/include/uapi/linux/isst_if.h +++ b/include/uapi/linux/isst_if.h @@ -69,7 +69,7 @@ struct isst_if_cpu_maps { * @logical_cpu: Logical CPU number to get target PCI device. * @reg: PUNIT register offset * @value: For write operation value to write and for - * for read placeholder read value + * read placeholder read value * * Structure to specify read/write data to PUNIT registers. */ -- cgit v1.2.3 From a233547660a3915973d41e2a9a0923d0cf317a62 Mon Sep 17 00:00:00 2001 From: Pi-Hsun Shih Date: Fri, 24 Jul 2020 16:03:55 +0800 Subject: platform/chrome: cros_ec: Fix host command for regulator control. Since the host command number 0x012B conflicts with other EC host command, add one to all regulator control related host command. Also fix a wrong alignment on struct and sync the comment with the one in ChromeOS EC codebase. Fixes: dff08caf35ec ("platform/chrome: cros_ec: Add command for regulator control.") Signed-off-by: Pi-Hsun Shih Acked-by: Enric Balletbo i Serra Link: https://lore.kernel.org/r/20200724080358.619245-1-pihsun@chromium.org Signed-off-by: Mark Brown --- include/linux/platform_data/cros_ec_commands.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index a417b51b5764..91e77f53414d 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5438,7 +5438,7 @@ struct ec_response_rollback_info { * * Returns the regulator name and supported voltage list in mV. */ -#define EC_CMD_REGULATOR_GET_INFO 0x012B +#define EC_CMD_REGULATOR_GET_INFO 0x012C /* Maximum length of regulator name */ #define EC_REGULATOR_NAME_MAX_LEN 16 @@ -5454,12 +5454,12 @@ struct ec_response_regulator_get_info { char name[EC_REGULATOR_NAME_MAX_LEN]; uint16_t num_voltages; uint16_t voltages_mv[EC_REGULATOR_VOLTAGE_MAX_COUNT]; -} __ec_align1; +} __ec_align2; /* * Configure the regulator as enabled / disabled. */ -#define EC_CMD_REGULATOR_ENABLE 0x012C +#define EC_CMD_REGULATOR_ENABLE 0x012D struct ec_params_regulator_enable { uint32_t index; @@ -5471,7 +5471,7 @@ struct ec_params_regulator_enable { * * Returns 1 if the regulator is enabled, 0 if not. */ -#define EC_CMD_REGULATOR_IS_ENABLED 0x012D +#define EC_CMD_REGULATOR_IS_ENABLED 0x012E struct ec_params_regulator_is_enabled { uint32_t index; @@ -5489,7 +5489,7 @@ struct ec_response_regulator_is_enabled { * Also note that this might be called before the regulator is enabled, and the * setting should be in effect after the regulator is enabled. */ -#define EC_CMD_REGULATOR_SET_VOLTAGE 0x012E +#define EC_CMD_REGULATOR_SET_VOLTAGE 0x012F struct ec_params_regulator_set_voltage { uint32_t index; @@ -5500,9 +5500,10 @@ struct ec_params_regulator_set_voltage { /* * Get the currently configured voltage for the voltage regulator. * - * Note that this might be called before the regulator is enabled. + * Note that this might be called before the regulator is enabled, and this + * should return the configured output voltage if the regulator is enabled. */ -#define EC_CMD_REGULATOR_GET_VOLTAGE 0x012F +#define EC_CMD_REGULATOR_GET_VOLTAGE 0x0130 struct ec_params_regulator_get_voltage { uint32_t index; -- cgit v1.2.3 From cfd97f94d036bf36122fa19d075c5741347aa178 Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Sat, 25 Jul 2020 05:02:57 +0000 Subject: spi: correct kernel-doc inconsistency Silence documentation build warnings by correcting kernel-doc comment for spi_transfer struct. Signed-off-by: Colton Lewis Link: https://lore.kernel.org/r/20200725050242.279548-1-colton.w.lewis@protonmail.com Signed-off-by: Mark Brown --- include/linux/spi/spi.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index f8b721fcd5c6..99380c0825db 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -329,6 +329,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * every chipselect is connected to a slave. * @dma_alignment: SPI controller constraint on DMA buffers alignment. * @mode_bits: flags understood by this controller driver + * @buswidth_override_bits: flags to override for this controller driver * @bits_per_word_mask: A mask indicating which values of bits_per_word are * supported by the driver. Bit n indicates that a bits_per_word n+1 is * supported. If set, the SPI core will reject any transfer with an @@ -846,12 +847,7 @@ extern void spi_res_release(struct spi_controller *ctlr, * processed the word, i.e. the "pre" timestamp should be taken before * transmitting the "pre" word, and the "post" timestamp after receiving * transmit confirmation from the controller for the "post" word. - * @timestamped_pre: Set by the SPI controller driver to denote it has acted - * upon the @ptp_sts request. Not set when the SPI core has taken care of - * the task. SPI device drivers are free to print a warning if this comes - * back unset and they need the better resolution. - * @timestamped_post: See above. The reason why both exist is that these - * booleans are also used to keep state in the core SPI logic. + * @timestamped: true if the transfer has been timestamped * @error: Error status logged by spi controller driver. * * SPI transfers always write the same number of bytes as they read. -- cgit v1.2.3 From 2224635938814fc63004e30f7c41943812bd6f1c Mon Sep 17 00:00:00 2001 From: Meir Lichtinger Date: Thu, 16 Jul 2020 13:52:47 +0300 Subject: RDMA/mlx5: Use MLX5_SET macro instead of local structure Use generic mlx5 structure defined in mlx5_ifc.h to represent ConnectX device data structures instead of using structure defined specifically for mlx5_ib module. Link: https://lore.kernel.org/r/20200716105248.1423452-3-leon@kernel.org Signed-off-by: Meir Lichtinger Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 9 --------- drivers/infiniband/hw/mlx5/wr.c | 26 ++++++++++++++++---------- include/linux/mlx5/device.h | 1 - 3 files changed, 16 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 5dbe3eb0d9cb..d9dfe38f4160 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1423,15 +1423,6 @@ static inline void init_query_mad(struct ib_smp *mad) mad->method = IB_MGMT_METHOD_GET; } -static inline u8 convert_access(int acc) -{ - return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | - (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | - (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | - (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | - MLX5_PERM_LOCAL_READ; -} - static inline int is_qp1(enum ib_qp_type qp_type) { return qp_type == MLX5_IB_QPT_HW_GSI; diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c index 2c6df1c43b55..e58ecb46f8e3 100644 --- a/drivers/infiniband/hw/mlx5/wr.c +++ b/drivers/infiniband/hw/mlx5/wr.c @@ -383,20 +383,26 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, memset(seg, 0, sizeof(*seg)); if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) - seg->status = MLX5_MKEY_STATUS_FREE; - - seg->flags = convert_access(umrwr->access_flags); + MLX5_SET(mkc, seg, free, 1); + + MLX5_SET(mkc, seg, a, + !!(umrwr->access_flags & IB_ACCESS_REMOTE_ATOMIC)); + MLX5_SET(mkc, seg, rw, + !!(umrwr->access_flags & IB_ACCESS_REMOTE_WRITE)); + MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ)); + MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE)); + MLX5_SET(mkc, seg, lr, 1); if (umrwr->pd) - seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); + MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn); if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && !umrwr->length) - seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64); + MLX5_SET(mkc, seg, length64, 1); - seg->start_addr = cpu_to_be64(umrwr->virt_addr); - seg->len = cpu_to_be64(umrwr->length); - seg->log2_page_size = umrwr->page_shift; - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | - mlx5_mkey_variant(umrwr->mkey)); + MLX5_SET64(mkc, seg, start_addr, umrwr->virt_addr); + MLX5_SET64(mkc, seg, len, umrwr->length); + MLX5_SET(mkc, seg, log_page_size, umrwr->page_shift); + MLX5_SET(mkc, seg, qpn, 0xffffff); + MLX5_SET(mkc, seg, mkey_7_0, mlx5_mkey_variant(umrwr->mkey)); } static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 2aacf9a8ee4d..d184b579617f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1007,7 +1007,6 @@ enum { MLX5_MKEY_REMOTE_INVAL = 1 << 24, MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, MLX5_MKEY_BSF_EN = 1 << 30, - MLX5_MKEY_LEN64 = 1 << 31, }; struct mlx5_mkey_seg { -- cgit v1.2.3 From 896ec9735336f5adb576d372ed7e411bce2fc74c Mon Sep 17 00:00:00 2001 From: Meir Lichtinger Date: Thu, 16 Jul 2020 13:52:48 +0300 Subject: RDMA/mlx5: Set mkey relaxed ordering by UMR with ConnectX-7 Up to ConnectX-7 UMR is not used when user passes relaxed ordering access flag. ConnectX-7 supports setting relaxed ordering read/write mkey attribute by UMR, indicated by new HCA capabilities. With ConnectX-7 driver uses UMR when user set relaxed ordering access flag, in contrast to previous silicon models. Specifically it includes setting relvant flags of mkey context mask in UMR control segment, and relaxed ordering write and read flags in UMR mkey context segment. Link: https://lore.kernel.org/r/20200716105248.1423452-4-leon@kernel.org Signed-off-by: Meir Lichtinger Reviewed-by: Michael Guralnik Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 9 ++++++-- drivers/infiniband/hw/mlx5/wr.c | 44 ++++++++++++++++++++++++++++-------- include/linux/mlx5/device.h | 4 +++- 3 files changed, 45 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d9dfe38f4160..cdd04b3d7d51 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1524,8 +1524,13 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, return false; if (access_flags & IB_ACCESS_RELAXED_ORDERING && - (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) || - MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))) + MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) + return false; + + if (access_flags & IB_ACCESS_RELAXED_ORDERING && + MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) return false; return true; diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c index e58ecb46f8e3..4d4f8c22b3e6 100644 --- a/drivers/infiniband/hw/mlx5/wr.c +++ b/drivers/infiniband/hw/mlx5/wr.c @@ -263,7 +263,9 @@ static __be64 get_umr_update_translation_mask(void) return cpu_to_be64(result); } -static __be64 get_umr_update_access_mask(int atomic) +static __be64 get_umr_update_access_mask(int atomic, + int relaxed_ordering_write, + int relaxed_ordering_read) { u64 result; @@ -275,6 +277,12 @@ static __be64 get_umr_update_access_mask(int atomic) if (atomic) result |= MLX5_MKEY_MASK_A; + if (relaxed_ordering_write) + result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE; + + if (relaxed_ordering_read) + result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ; + return cpu_to_be64(result); } @@ -289,17 +297,28 @@ static __be64 get_umr_update_pd_mask(void) static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask) { - if ((mask & MLX5_MKEY_MASK_PAGE_SIZE && - MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) || - (mask & MLX5_MKEY_MASK_A && - MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))) + if (mask & MLX5_MKEY_MASK_PAGE_SIZE && + MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) + return -EPERM; + + if (mask & MLX5_MKEY_MASK_A && + MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) + return -EPERM; + + if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE && + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) + return -EPERM; + + if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ && + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) return -EPERM; + return 0; } static int set_reg_umr_segment(struct mlx5_ib_dev *dev, struct mlx5_wqe_umr_ctrl_seg *umr, - const struct ib_send_wr *wr, int atomic) + const struct ib_send_wr *wr) { const struct mlx5_umr_wr *umrwr = umr_wr(wr); @@ -325,7 +344,10 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev, if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) umr->mkey_mask |= get_umr_update_translation_mask(); if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) { - umr->mkey_mask |= get_umr_update_access_mask(atomic); + umr->mkey_mask |= get_umr_update_access_mask( + !!(MLX5_CAP_GEN(dev->mdev, atomic)), + !!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)), + !!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))); umr->mkey_mask |= get_umr_update_pd_mask(); } if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR) @@ -392,6 +414,11 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ)); MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE)); MLX5_SET(mkc, seg, lr, 1); + MLX5_SET(mkc, seg, relaxed_ordering_write, + !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING)); + MLX5_SET(mkc, seg, relaxed_ordering_read, + !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING)); + if (umrwr->pd) MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn); if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && @@ -1230,8 +1257,7 @@ static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; (*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey); - err = set_reg_umr_segment(dev, *seg, wr, - !!(MLX5_CAP_GEN(dev->mdev, atomic))); + err = set_reg_umr_segment(dev, *seg, wr); if (unlikely(err)) goto out; *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index d184b579617f..4d3376e20f5e 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -276,7 +276,9 @@ enum { MLX5_MKEY_MASK_RW = 1ull << 20, MLX5_MKEY_MASK_A = 1ull << 21, MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, - MLX5_MKEY_MASK_FREE = 1ull << 29, + MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25, + MLX5_MKEY_MASK_FREE = 1ull << 29, + MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47, }; enum { -- cgit v1.2.3 From f0c7baca180046824e07fc5f1326e83a8fd150c7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 24 Jul 2020 22:44:41 +0200 Subject: genirq/affinity: Make affinity setting if activated opt-in John reported that on a RK3288 system the perf per CPU interrupts are all affine to CPU0 and provided the analysis: "It looks like what happens is that because the interrupts are not per-CPU in the hardware, armpmu_request_irq() calls irq_force_affinity() while the interrupt is deactivated and then request_irq() with IRQF_PERCPU | IRQF_NOBALANCING. Now when irq_startup() runs with IRQ_STARTUP_NORMAL, it calls irq_setup_affinity() which returns early because IRQF_PERCPU and IRQF_NOBALANCING are set, leaving the interrupt on its original CPU." This was broken by the recent commit which blocked interrupt affinity setting in hardware before activation of the interrupt. While this works in general, it does not work for this particular case. As contrary to the initial analysis not all interrupt chip drivers implement an activate callback, the safe cure is to make the deferred interrupt affinity setting at activation time opt-in. Implement the necessary core logic and make the two irqchip implementations for which this is required opt-in. In hindsight this would have been the right thing to do, but ... Fixes: baedb87d1b53 ("genirq/affinity: Handle affinity setting on inactive interrupts correctly") Reported-by: John Keeping Signed-off-by: Thomas Gleixner Tested-by: Marc Zyngier Acked-by: Marc Zyngier Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/87blk4tzgm.fsf@nanos.tec.linutronix.de --- arch/x86/kernel/apic/vector.c | 4 ++++ drivers/irqchip/irq-gic-v3-its.c | 5 ++++- include/linux/irq.h | 13 +++++++++++++ kernel/irq/manage.c | 6 +++++- 4 files changed, 26 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 7649da2478d8..dae32d948bf2 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -560,6 +560,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, * as that can corrupt the affinity move state. */ irqd_set_handle_enforce_irqctx(irqd); + + /* Don't invoke affinity setter on deactivated interrupts */ + irqd_set_affinity_on_activate(irqd); + /* * Legacy vectors are already assigned when the IOAPIC * takes them over. They stay on the same vector. This is diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index beac4caefad9..103d850b5595 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3523,6 +3523,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, msi_alloc_info_t *info = args; struct its_device *its_dev = info->scratchpad[0].ptr; struct its_node *its = its_dev->its; + struct irq_data *irqd; irq_hw_number_t hwirq; int err; int i; @@ -3542,7 +3543,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &its_irq_chip, its_dev); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); pr_debug("ID:%d pID:%d vID:%d\n", (int)(hwirq + i - its_dev->event_map.lpi_base), (int)(hwirq + i), virq + i); diff --git a/include/linux/irq.h b/include/linux/irq.h index 8d5bc2c237d7..1b7f4dfee35b 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -213,6 +213,8 @@ struct irq_data { * required * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked * from actual interrupt context. + * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call + * irq_chip::irq_set_affinity() when deactivated. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -237,6 +239,7 @@ enum { IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), + IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -421,6 +424,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d) return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; } +static inline void irqd_set_affinity_on_activate(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; +} + +static inline bool irqd_affinity_on_activate(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 2a9fec53e159..48c38e09c673 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -320,12 +320,16 @@ static bool irq_set_affinity_deactivated(struct irq_data *data, struct irq_desc *desc = irq_data_to_desc(data); /* + * Handle irq chips which can handle affinity only in activated + * state correctly + * * If the interrupt is not yet activated, just store the affinity * mask and do not call the chip driver at all. On activation the * driver has to make sure anyway that the interrupt is in a * useable state so startup works. */ - if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data)) + if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || + irqd_is_activated(data) || !irqd_affinity_on_activate(data)) return false; cpumask_copy(desc->irq_common_data.affinity, mask); -- cgit v1.2.3 From 229f5879facf96e5640c0385f62b8cb5f27b8a43 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Wed, 22 Jul 2020 16:33:05 +0530 Subject: linux/kernel.h: Add PTR_ALIGN_DOWN macro Add a macro for aligning down a pointer. This is useful to get an aligned register address when a device allows only word access and doesn't allow half word or byte access. Link: https://lore.kernel.org/r/20200722110317.4744-4-kishon@ti.com Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Lorenzo Pieralisi Acked-by: Rob Herring --- include/linux/kernel.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 82d91547d122..7339a00c895e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -34,6 +34,7 @@ #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a))) #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) /* generic data direction definitions */ -- cgit v1.2.3 From b54cecf5e2293d15620f7b3f8d1bf486243d5643 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 7 Jun 2020 12:10:40 +0300 Subject: fsnotify: pass dir argument to handle_event() callback The 'inode' argument to handle_event(), sometimes referred to as 'to_tell' is somewhat obsolete. It is a remnant from the times when a group could only have an inode mark associated with an event. We now pass an iter_info array to the callback, with all marks associated with an event. Most backends ignore this argument, with two exceptions: 1. dnotify uses it for sanity check that event is on directory 2. fanotify uses it to report fid of directory on directory entry modification events Remove the 'inode' argument and add a 'dir' argument. The callback function signature is deliberately changed, because the meaning of the argument has changed and the arguments have been documented. The 'dir' argument is set to when 'file_name' is specified and it is referring to the directory that the 'file_name' entry belongs to. Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/nfsd/filecache.c | 6 +++--- fs/notify/dnotify/dnotify.c | 8 ++++---- fs/notify/fanotify/fanotify.c | 23 +++++++++++------------ fs/notify/fsnotify.c | 26 ++++++++++++-------------- fs/notify/inotify/inotify.h | 6 +++--- fs/notify/inotify/inotify_fsnotify.c | 7 +++---- fs/notify/inotify/inotify_user.c | 4 ++-- include/linux/fsnotify_backend.h | 16 +++++++++++++--- kernel/audit_fsnotify.c | 10 +++++----- kernel/audit_tree.c | 6 +++--- kernel/audit_watch.c | 6 +++--- 11 files changed, 62 insertions(+), 56 deletions(-) (limited to 'include') diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index ace8e5c30952..bbc7892d2928 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -598,9 +598,9 @@ static struct notifier_block nfsd_file_lease_notifier = { }; static int -nfsd_file_fsnotify_handle_event(struct fsnotify_group *group, - struct inode *to_tell, - u32 mask, const void *data, int data_type, +nfsd_file_fsnotify_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 7a42c2ebe28d..2d2eadfb5186 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -70,9 +70,9 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) * destroy the dnotify struct if it was not registered to receive multiple * events. */ -static int dnotify_handle_event(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, +static int dnotify_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { @@ -84,7 +84,7 @@ static int dnotify_handle_event(struct fsnotify_group *group, __u32 test_mask = mask & ~FS_EVENT_ON_CHILD; /* not a dir, dnotify doesn't care */ - if (!S_ISDIR(inode->i_mode)) + if (!dir && !(mask & FS_ISDIR)) return 0; if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info))) diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 41f5fc9a8f19..e417c64c365b 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -335,11 +335,11 @@ out: * FS_ATTRIB reports the child inode even if reported on a watched parent. * FS_CREATE reports the modified dir inode and not the created inode. */ -static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask, - const void *data, int data_type) +static struct inode *fanotify_fid_inode(u32 event_mask, const void *data, + int data_type, struct inode *dir) { if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) - return to_tell; + return dir; return fsnotify_data_inode(data, data_type); } @@ -416,14 +416,14 @@ static struct fanotify_event *fanotify_alloc_name_event(struct inode *id, } static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, - struct inode *inode, u32 mask, - const void *data, int data_type, + u32 mask, const void *data, + int data_type, struct inode *dir, const struct qstr *file_name, __kernel_fsid_t *fsid) { struct fanotify_event *event = NULL; gfp_t gfp = GFP_KERNEL_ACCOUNT; - struct inode *id = fanotify_fid_inode(inode, mask, data, data_type); + struct inode *id = fanotify_fid_inode(mask, data, data_type, dir); const struct path *path = fsnotify_data_path(data, data_type); /* @@ -507,9 +507,9 @@ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info) return fsid; } -static int fanotify_handle_event(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, +static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { @@ -546,8 +546,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, if (!mask) return 0; - pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, - mask); + pr_debug("%s: group=%p mask=%x\n", __func__, group, mask); if (fanotify_is_perm_event(mask)) { /* @@ -565,7 +564,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, return 0; } - event = fanotify_alloc_event(group, inode, mask, data, data_type, + event = fanotify_alloc_event(group, mask, data, data_type, dir, file_name, &fsid); ret = -ENOMEM; if (unlikely(!event)) { diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 30628a72ca01..c4ac4d13e10f 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -185,11 +185,9 @@ notify_child: } EXPORT_SYMBOL_GPL(__fsnotify_parent); -static int send_to_group(struct inode *to_tell, - __u32 mask, const void *data, - int data_is, u32 cookie, - const struct qstr *file_name, - struct fsnotify_iter_info *iter_info) +static int send_to_group(__u32 mask, const void *data, int data_type, + struct inode *dir, const struct qstr *file_name, + u32 cookie, struct fsnotify_iter_info *iter_info) { struct fsnotify_group *group = NULL; __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); @@ -225,15 +223,14 @@ static int send_to_group(struct inode *to_tell, } } - pr_debug("%s: group=%p to_tell=%p mask=%x marks_mask=%x marks_ignored_mask=%x" - " data=%p data_is=%d cookie=%d\n", - __func__, group, to_tell, mask, marks_mask, marks_ignored_mask, - data, data_is, cookie); + pr_debug("%s: group=%p mask=%x marks_mask=%x marks_ignored_mask=%x data=%p data_type=%d dir=%p cookie=%d\n", + __func__, group, mask, marks_mask, marks_ignored_mask, + data, data_type, dir, cookie); if (!(test_mask & marks_mask & ~marks_ignored_mask)) return 0; - return group->ops->handle_event(group, to_tell, mask, data, data_is, + return group->ops->handle_event(group, mask, data, data_type, dir, file_name, cookie, iter_info); } @@ -317,12 +314,13 @@ static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info) * out to all of the registered fsnotify_group. Those groups can then use the * notification event in whatever means they feel necessary. */ -int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, +int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_type, const struct qstr *file_name, u32 cookie) { - const struct path *path = fsnotify_data_path(data, data_is); + const struct path *path = fsnotify_data_path(data, data_type); struct fsnotify_iter_info iter_info = {}; struct super_block *sb = to_tell->i_sb; + struct inode *dir = file_name ? to_tell : NULL; struct mount *mnt = NULL; int ret = 0; __u32 test_mask, marks_mask; @@ -375,8 +373,8 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, * That's why this traversal is so complicated... */ while (fsnotify_iter_select_report_types(&iter_info)) { - ret = send_to_group(to_tell, mask, data, data_is, cookie, - file_name, &iter_info); + ret = send_to_group(mask, data, data_type, dir, file_name, + cookie, &iter_info); if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) goto out; diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h index 3f246f7b8a92..4327d0e9c364 100644 --- a/fs/notify/inotify/inotify.h +++ b/fs/notify/inotify/inotify.h @@ -24,9 +24,9 @@ static inline struct inotify_event_info *INOTIFY_E(struct fsnotify_event *fse) extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group); -extern int inotify_handle_event(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, +extern int inotify_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info); diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 9b481460a2dc..dfd455798a1b 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -55,9 +55,8 @@ static int inotify_merge(struct list_head *list, return event_compare(last_event, event); } -int inotify_handle_event(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, +int inotify_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { @@ -82,7 +81,7 @@ int inotify_handle_event(struct fsnotify_group *group, alloc_len += len + 1; } - pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, + pr_debug("%s: group=%p mark=%p mask=%x\n", __func__, group, inode_mark, mask); i_mark = container_of(inode_mark, struct inotify_inode_mark, diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index f88bbcc9efeb..5385d5817dd9 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -490,8 +490,8 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, fsn_mark); /* Queue ignore event for the watch */ - inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL, - FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info); + inotify_handle_event(group, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, + NULL, NULL, 0, &iter_info); i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); /* remove this mark from the idr */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 97300f3b8ff0..0de130cbf72d 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -108,6 +108,17 @@ struct mem_cgroup; * these operations for each relevant group. * * handle_event - main call for a group to handle an fs event + * @group: group to notify + * @mask: event type and flags + * @data: object that event happened on + * @data_type: type of object for fanotify_data_XXX() accessors + * @dir: optional directory associated with event - + * if @file_name is not NULL, this is the directory that + * @file_name is relative to + * @file_name: optional file name associated with event + * @cookie: inotify rename cookie + * @iter_info: array of marks from this group that are interested in the event + * * free_group_priv - called when a group refcnt hits 0 to clean up the private union * freeing_mark - called when a mark is being destroyed for some reason. The group * MUST be holding a reference on each mark and that reference must be @@ -115,9 +126,8 @@ struct mem_cgroup; * userspace messages that marks have been removed. */ struct fsnotify_ops { - int (*handle_event)(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, + int (*handle_event)(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info); void (*free_group_priv)(struct fsnotify_group *group); diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c index 3596448bfdab..30ca239285a3 100644 --- a/kernel/audit_fsnotify.c +++ b/kernel/audit_fsnotify.c @@ -152,11 +152,11 @@ static void audit_autoremove_mark_rule(struct audit_fsnotify_mark *audit_mark) } /* Update mark data in audit rules based on fsnotify events. */ -static int audit_mark_handle_event(struct fsnotify_group *group, - struct inode *to_tell, - u32 mask, const void *data, int data_type, - const struct qstr *dname, u32 cookie, - struct fsnotify_iter_info *iter_info) +static int audit_mark_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, + struct inode *dir, + const struct qstr *dname, u32 cookie, + struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); struct audit_fsnotify_mark *audit_mark; diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index e49c912f862d..2ce2ac1ce100 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -1037,9 +1037,9 @@ static void evict_chunk(struct audit_chunk *chunk) audit_schedule_prune(); } -static int audit_tree_handle_event(struct fsnotify_group *group, - struct inode *to_tell, - u32 mask, const void *data, int data_type, +static int audit_tree_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index e09c551ae52d..61fd601f1edf 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -464,9 +464,9 @@ void audit_remove_watch_rule(struct audit_krule *krule) } /* Update watch data in audit rules based on fsnotify events. */ -static int audit_watch_handle_event(struct fsnotify_group *group, - struct inode *to_tell, - u32 mask, const void *data, int data_type, +static int audit_watch_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *dname, u32 cookie, struct fsnotify_iter_info *iter_info) { -- cgit v1.2.3 From b4e9c9549f62329d2412f899635fddc5212b9cd4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 1 Jun 2020 19:42:40 -0400 Subject: introduction of regset ->get() wrappers, switching ELF coredumps to those Two new helpers: given a process and regset, dump into a buffer. regset_get() takes a buffer and size, regset_get_alloc() takes size and allocates a buffer. Return value in both cases is the amount of data actually dumped in case of success or -E... on error. In both cases the size is capped by regset->n * regset->size, so ->get() is called with offset 0 and size no more than what regset expects. binfmt_elf.c callers of ->get() are switched to using those; the other caller (copy_regset_to_user()) will need some preparations to switch. Signed-off-by: Al Viro --- fs/binfmt_elf.c | 54 ++++++++++++++++++++++++-------------------------- include/linux/regset.h | 9 +++++++++ kernel/Makefile | 2 +- kernel/regset.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 90 insertions(+), 29 deletions(-) create mode 100644 kernel/regset.c (limited to 'include') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 9fe3b51c116a..e922a6abdca8 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1821,7 +1821,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, long signr, size_t *total) { unsigned int i; - unsigned int regset0_size = regset_size(t->task, &view->regsets[0]); + int regset0_size; /* * NT_PRSTATUS is the one special case, because the regset data @@ -1830,8 +1830,10 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, * We assume that regset 0 is NT_PRSTATUS. */ fill_prstatus(&t->prstatus, t->task, signr); - (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size, - &t->prstatus.pr_reg, NULL); + regset0_size = regset_get(t->task, &view->regsets[0], + sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg); + if (regset0_size < 0) + return 0; fill_note(&t->notes[0], "CORE", NT_PRSTATUS, PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus); @@ -1846,32 +1848,28 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, */ for (i = 1; i < view->n; ++i) { const struct user_regset *regset = &view->regsets[i]; + int note_type = regset->core_note_type; + bool is_fpreg = note_type == NT_PRFPREG; + void *data; + int ret; + do_thread_regset_writeback(t->task, regset); - if (regset->core_note_type && regset->get && - (!regset->active || regset->active(t->task, regset) > 0)) { - int ret; - size_t size = regset_size(t->task, regset); - void *data = kzalloc(size, GFP_KERNEL); - if (unlikely(!data)) - return 0; - ret = regset->get(t->task, regset, - 0, size, data, NULL); - if (unlikely(ret)) - kfree(data); - else { - if (regset->core_note_type != NT_PRFPREG) - fill_note(&t->notes[i], "LINUX", - regset->core_note_type, - size, data); - else { - SET_PR_FPVALID(&t->prstatus, - 1, regset0_size); - fill_note(&t->notes[i], "CORE", - NT_PRFPREG, size, data); - } - *total += notesize(&t->notes[i]); - } - } + if (!note_type) // not for coredumps + continue; + if (regset->active && regset->active(t->task, regset) <= 0) + continue; + + ret = regset_get_alloc(t->task, regset, ~0U, &data); + if (ret < 0) + continue; + + if (is_fpreg) + SET_PR_FPVALID(&t->prstatus, 1, regset0_size); + + fill_note(&t->notes[i], is_fpreg ? "CORE" : "LINUX", + note_type, ret, data); + + *total += notesize(&t->notes[i]); } return 1; diff --git a/include/linux/regset.h b/include/linux/regset.h index 46d6ae68c455..968a032922d5 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -353,6 +353,15 @@ static inline int user_regset_copyin_ignore(unsigned int *pos, return 0; } +extern int regset_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int size, void *data); + +extern int regset_get_alloc(struct task_struct *target, + const struct user_regset *regset, + unsigned int size, + void **data); + /** * copy_regset_to_user - fetch a thread's user_regset data into user memory * @target: thread to be examined diff --git a/kernel/Makefile b/kernel/Makefile index f3218bc5ec69..e6e03380a0f1 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o \ extable.o params.o \ kthread.o sys_ni.o nsproxy.o \ notifier.o ksysfs.o cred.o reboot.o \ - async.o range.o smpboot.o ucount.o + async.o range.o smpboot.o ucount.o regset.o obj-$(CONFIG_MODULES) += kmod.o obj-$(CONFIG_MULTIUSER) += groups.o diff --git a/kernel/regset.c b/kernel/regset.c new file mode 100644 index 000000000000..6b39fa0993ec --- /dev/null +++ b/kernel/regset.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include + +static int __regset_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int size, + void **data) +{ + void *p = *data, *to_free = NULL; + int res; + + if (!regset->get) + return -EOPNOTSUPP; + if (size > regset->n * regset->size) + size = regset->n * regset->size; + if (!p) { + to_free = p = kzalloc(size, GFP_KERNEL); + if (!p) + return -ENOMEM; + } + res = regset->get(target, regset, 0, size, p, NULL); + if (unlikely(res < 0)) { + kfree(to_free); + return res; + } + *data = p; + if (regset->get_size) { // arm64-only kludge, will go away + unsigned max_size = regset->get_size(target, regset); + if (size > max_size) + size = max_size; + } + return size; +} + +int regset_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int size, + void *data) +{ + return __regset_get(target, regset, size, &data); +} +EXPORT_SYMBOL(regset_get); + +int regset_get_alloc(struct task_struct *target, + const struct user_regset *regset, + unsigned int size, + void **data) +{ + *data = NULL; + return __regset_get(target, regset, size, data); +} +EXPORT_SYMBOL(regset_get_alloc); -- cgit v1.2.3 From 1e6b57d6421f0343dd11619612e5ff8930cddf38 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 11 Jun 2020 11:11:32 -0400 Subject: unexport linux/elfcore.h It's unusable from userland - it uses elf_gregset_t, which is not provided by exported headers. glibc has it in sys/procfs.h, but the same file defines struct elf_prstatus, so linux/elfcore.h can't be included once sys/procfs.h has been pulled. Same goes for uclibc and dietlibc simply doesn't have elf_gregset_t defined anywhere. IOW, no userland source is including that thing. Signed-off-by: Al Viro --- include/linux/elfcore.h | 69 +++++++++++++++++++++++++++-- include/uapi/linux/elfcore.h | 101 ------------------------------------------- scripts/headers_install.sh | 1 - usr/include/Makefile | 1 - 4 files changed, 66 insertions(+), 106 deletions(-) delete mode 100644 include/uapi/linux/elfcore.h (limited to 'include') diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 4cad0e784b28..96ab215dad2d 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -5,12 +5,75 @@ #include #include #include - -#include -#include +#include +#include +#include +#include +#include +#include struct coredump_params; +struct elf_siginfo +{ + int si_signo; /* signal number */ + int si_code; /* extra code */ + int si_errno; /* errno */ +}; + +/* + * Definitions to generate Intel SVR4-like core files. + * These mostly have the same names as the SVR4 types with "elf_" + * tacked on the front to prevent clashes with linux definitions, + * and the typedef forms have been avoided. This is mostly like + * the SVR4 structure, but more Linuxy, with things that Linux does + * not support and which gdb doesn't really use excluded. + */ +struct elf_prstatus +{ + struct elf_siginfo pr_info; /* Info associated with signal */ + short pr_cursig; /* Current signal */ + unsigned long pr_sigpend; /* Set of pending signals */ + unsigned long pr_sighold; /* Set of held signals */ + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; /* User time */ + struct __kernel_old_timeval pr_stime; /* System time */ + struct __kernel_old_timeval pr_cutime; /* Cumulative user time */ + struct __kernel_old_timeval pr_cstime; /* Cumulative system time */ + elf_gregset_t pr_reg; /* GP registers */ +#ifdef CONFIG_BINFMT_ELF_FDPIC + /* When using FDPIC, the loadmap addresses need to be communicated + * to GDB in order for GDB to do the necessary relocations. The + * fields (below) used to communicate this information are placed + * immediately after ``pr_reg'', so that the loadmap addresses may + * be viewed as part of the register set if so desired. + */ + unsigned long pr_exec_fdpic_loadmap; + unsigned long pr_interp_fdpic_loadmap; +#endif + int pr_fpvalid; /* True if math co-processor being used. */ +}; + +#define ELF_PRARGSZ (80) /* Number of chars for args */ + +struct elf_prpsinfo +{ + char pr_state; /* numeric process state */ + char pr_sname; /* char for pr_state */ + char pr_zomb; /* zombie */ + char pr_nice; /* nice val */ + unsigned long pr_flag; /* flags */ + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; + /* Lots missing */ + char pr_fname[16]; /* filename of executable */ + char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ +}; + static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) { #ifdef ELF_CORE_COPY_REGS diff --git a/include/uapi/linux/elfcore.h b/include/uapi/linux/elfcore.h deleted file mode 100644 index baf03562306d..000000000000 --- a/include/uapi/linux/elfcore.h +++ /dev/null @@ -1,101 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _UAPI_LINUX_ELFCORE_H -#define _UAPI_LINUX_ELFCORE_H - -#include -#include -#include -#include -#include -#include - -struct elf_siginfo -{ - int si_signo; /* signal number */ - int si_code; /* extra code */ - int si_errno; /* errno */ -}; - - -#ifndef __KERNEL__ -typedef elf_greg_t greg_t; -typedef elf_gregset_t gregset_t; -typedef elf_fpregset_t fpregset_t; -typedef elf_fpxregset_t fpxregset_t; -#define NGREG ELF_NGREG -#endif - -/* - * Definitions to generate Intel SVR4-like core files. - * These mostly have the same names as the SVR4 types with "elf_" - * tacked on the front to prevent clashes with linux definitions, - * and the typedef forms have been avoided. This is mostly like - * the SVR4 structure, but more Linuxy, with things that Linux does - * not support and which gdb doesn't really use excluded. - * Fields present but not used are marked with "XXX". - */ -struct elf_prstatus -{ -#if 0 - long pr_flags; /* XXX Process flags */ - short pr_why; /* XXX Reason for process halt */ - short pr_what; /* XXX More detailed reason */ -#endif - struct elf_siginfo pr_info; /* Info associated with signal */ - short pr_cursig; /* Current signal */ - unsigned long pr_sigpend; /* Set of pending signals */ - unsigned long pr_sighold; /* Set of held signals */ -#if 0 - struct sigaltstack pr_altstack; /* Alternate stack info */ - struct sigaction pr_action; /* Signal action for current sig */ -#endif - pid_t pr_pid; - pid_t pr_ppid; - pid_t pr_pgrp; - pid_t pr_sid; - struct __kernel_old_timeval pr_utime; /* User time */ - struct __kernel_old_timeval pr_stime; /* System time */ - struct __kernel_old_timeval pr_cutime; /* Cumulative user time */ - struct __kernel_old_timeval pr_cstime; /* Cumulative system time */ -#if 0 - long pr_instr; /* Current instruction */ -#endif - elf_gregset_t pr_reg; /* GP registers */ -#ifdef CONFIG_BINFMT_ELF_FDPIC - /* When using FDPIC, the loadmap addresses need to be communicated - * to GDB in order for GDB to do the necessary relocations. The - * fields (below) used to communicate this information are placed - * immediately after ``pr_reg'', so that the loadmap addresses may - * be viewed as part of the register set if so desired. - */ - unsigned long pr_exec_fdpic_loadmap; - unsigned long pr_interp_fdpic_loadmap; -#endif - int pr_fpvalid; /* True if math co-processor being used. */ -}; - -#define ELF_PRARGSZ (80) /* Number of chars for args */ - -struct elf_prpsinfo -{ - char pr_state; /* numeric process state */ - char pr_sname; /* char for pr_state */ - char pr_zomb; /* zombie */ - char pr_nice; /* nice val */ - unsigned long pr_flag; /* flags */ - __kernel_uid_t pr_uid; - __kernel_gid_t pr_gid; - pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; - /* Lots missing */ - char pr_fname[16]; /* filename of executable */ - char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ -}; - -#ifndef __KERNEL__ -typedef struct elf_prstatus prstatus_t; -typedef struct elf_prpsinfo prpsinfo_t; -#define PRARGSZ ELF_PRARGSZ -#endif - - -#endif /* _UAPI_LINUX_ELFCORE_H */ diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index 955cf3aedf21..9314247bb222 100755 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh @@ -86,7 +86,6 @@ arch/x86/include/uapi/asm/auxvec.h:CONFIG_X86_64 arch/x86/include/uapi/asm/mman.h:CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS include/uapi/asm-generic/fcntl.h:CONFIG_64BIT include/uapi/linux/atmdev.h:CONFIG_COMPAT -include/uapi/linux/elfcore.h:CONFIG_BINFMT_ELF_FDPIC include/uapi/linux/eventpoll.h:CONFIG_PM_SLEEP include/uapi/linux/hw_breakpoint.h:CONFIG_HAVE_MIXED_BREAKPOINTS_REGS include/uapi/linux/pktcdvd.h:CONFIG_CDROM_PKTCDVD_WCACHE diff --git a/usr/include/Makefile b/usr/include/Makefile index 55362f3ab393..f6b3c85d900e 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -28,7 +28,6 @@ no-header-test += linux/am437x-vpfe.h no-header-test += linux/android/binder.h no-header-test += linux/android/binderfs.h no-header-test += linux/coda.h -no-header-test += linux/elfcore.h no-header-test += linux/errqueue.h no-header-test += linux/fsmap.h no-header-test += linux/hdlc/ioctl.h -- cgit v1.2.3 From 16aead81018ca404efe9bd928786824e7168151f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 14 Jun 2020 09:52:06 -0400 Subject: take fdpic-related parts of elf_prstatus out The only architecture where we might end up using both is arm, and there we definitely don't want fdpic-related fields in elf_prstatus - coredump layout of ELF binaries should not depend upon having the kernel built with the support of ELF_FDPIC ones. Just move the fdpic-modified variant into binfmt_elf_fdpic.c (and call it elf_prstatus_fdpic there) [name stolen from nico] Signed-off-by: Al Viro --- fs/binfmt_elf_fdpic.c | 32 +++++++++++++++++++++++++++++--- include/linux/elfcore-compat.h | 4 ---- include/linux/elfcore.h | 10 ---------- 3 files changed, 29 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 0f45521b237c..6e13d8bea32d 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1189,6 +1189,32 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, */ #ifdef CONFIG_ELF_CORE +struct elf_prstatus_fdpic +{ + struct elf_siginfo pr_info; /* Info associated with signal */ + short pr_cursig; /* Current signal */ + unsigned long pr_sigpend; /* Set of pending signals */ + unsigned long pr_sighold; /* Set of held signals */ + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; /* User time */ + struct __kernel_old_timeval pr_stime; /* System time */ + struct __kernel_old_timeval pr_cutime; /* Cumulative user time */ + struct __kernel_old_timeval pr_cstime; /* Cumulative system time */ + elf_gregset_t pr_reg; /* GP registers */ + /* When using FDPIC, the loadmap addresses need to be communicated + * to GDB in order for GDB to do the necessary relocations. The + * fields (below) used to communicate this information are placed + * immediately after ``pr_reg'', so that the loadmap addresses may + * be viewed as part of the register set if so desired. + */ + unsigned long pr_exec_fdpic_loadmap; + unsigned long pr_interp_fdpic_loadmap; + int pr_fpvalid; /* True if math co-processor being used. */ +}; + /* * Decide whether a segment is worth dumping; default is yes to be * sure (missing info is worse than too much; etc). @@ -1345,7 +1371,7 @@ static inline void fill_note(struct memelfnote *note, const char *name, int type * fill up all the fields in prstatus from the given task struct, except * registers which need to be filled up separately. */ -static void fill_prstatus(struct elf_prstatus *prstatus, +static void fill_prstatus(struct elf_prstatus_fdpic *prstatus, struct task_struct *p, long signr) { prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; @@ -1428,7 +1454,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, struct elf_thread_status { struct list_head list; - struct elf_prstatus prstatus; /* NT_PRSTATUS */ + struct elf_prstatus_fdpic prstatus; /* NT_PRSTATUS */ elf_fpregset_t fpu; /* NT_PRFPREG */ struct task_struct *thread; #ifdef ELF_CORE_COPY_XFPREGS @@ -1562,7 +1588,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) loff_t offset = 0, dataoff; int numnote; struct memelfnote *notes = NULL; - struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */ + struct elf_prstatus_fdpic *prstatus = NULL; /* NT_PRSTATUS */ struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */ LIST_HEAD(thread_list); struct list_head *t; diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h index 7a37f4ce9fd2..10485f0c9740 100644 --- a/include/linux/elfcore-compat.h +++ b/include/linux/elfcore-compat.h @@ -32,10 +32,6 @@ struct compat_elf_prstatus struct old_timeval32 pr_cutime; struct old_timeval32 pr_cstime; compat_elf_gregset_t pr_reg; -#ifdef CONFIG_BINFMT_ELF_FDPIC - compat_ulong_t pr_exec_fdpic_loadmap; - compat_ulong_t pr_interp_fdpic_loadmap; -#endif compat_int_t pr_fpvalid; }; diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 96ab215dad2d..adb8ee89f3fd 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -44,16 +44,6 @@ struct elf_prstatus struct __kernel_old_timeval pr_cutime; /* Cumulative user time */ struct __kernel_old_timeval pr_cstime; /* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ -#ifdef CONFIG_BINFMT_ELF_FDPIC - /* When using FDPIC, the loadmap addresses need to be communicated - * to GDB in order for GDB to do the necessary relocations. The - * fields (below) used to communicate this information are placed - * immediately after ``pr_reg'', so that the loadmap addresses may - * be viewed as part of the register set if so desired. - */ - unsigned long pr_exec_fdpic_loadmap; - unsigned long pr_interp_fdpic_loadmap; -#endif int pr_fpvalid; /* True if math co-processor being used. */ }; -- cgit v1.2.3 From 7a896028adcfbff4552e6748e8fc8d06036c132c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 13 Jun 2020 00:23:31 -0400 Subject: kill elf_fpxregs_t all uses are conditional upon ELF_CORE_COPY_XFPREGS, which has not been defined on any architecture since 2010 Signed-off-by: Al Viro --- arch/ia64/include/asm/elf.h | 2 -- arch/powerpc/include/asm/elf.h | 2 -- arch/x86/include/asm/elf.h | 2 -- fs/binfmt_elf.c | 30 ------------------------------ fs/binfmt_elf_fdpic.c | 28 ---------------------------- include/linux/elfcore.h | 7 ------- 6 files changed, 71 deletions(-) (limited to 'include') diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index c70bb9c11f52..6629301a2620 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -179,8 +179,6 @@ extern void ia64_init_addr_space (void); #define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t)) #define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t)) -typedef unsigned long elf_fpxregset_t; - typedef unsigned long elf_greg_t; typedef elf_greg_t elf_gregset_t[ELF_NGREG]; diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 57c229a86f08..53ed2ca40151 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -53,8 +53,6 @@ static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs, } #define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); -typedef elf_vrregset_t elf_fpxregset_t; - /* ELF_HWCAP yields a mask that user programs can use to figure out what instruction set this cpu supports. This could be done in userspace, but it's not easy, and we've already done it here. */ diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 452beed7892b..b9a5d488f1a5 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -21,8 +21,6 @@ typedef struct user_i387_struct elf_fpregset_t; #ifdef __i386__ -typedef struct user_fxsr_struct elf_fpxregset_t; - #define R_386_NONE 0 #define R_386_32 1 #define R_386_PC32 2 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index e922a6abdca8..13d053982dd7 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2038,9 +2038,6 @@ struct elf_thread_status struct elf_prstatus prstatus; /* NT_PRSTATUS */ elf_fpregset_t fpu; /* NT_PRFPREG */ struct task_struct *thread; -#ifdef ELF_CORE_COPY_XFPREGS - elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ -#endif struct memelfnote notes[3]; int num_notes; }; @@ -2071,15 +2068,6 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t) t->num_notes++; sz += notesize(&t->notes[1]); } - -#ifdef ELF_CORE_COPY_XFPREGS - if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { - fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE, - sizeof(t->xfpu), &t->xfpu); - t->num_notes++; - sz += notesize(&t->notes[2]); - } -#endif return sz; } @@ -2090,9 +2078,6 @@ struct elf_note_info { struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ struct list_head thread_list; elf_fpregset_t *fpu; -#ifdef ELF_CORE_COPY_XFPREGS - elf_fpxregset_t *xfpu; -#endif user_siginfo_t csigdata; int thread_status_size; int numnote; @@ -2116,11 +2101,6 @@ static int elf_note_info_init(struct elf_note_info *info) info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL); if (!info->fpu) return 0; -#ifdef ELF_CORE_COPY_XFPREGS - info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL); - if (!info->xfpu) - return 0; -#endif return 1; } @@ -2184,13 +2164,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, if (info->prstatus->pr_fpvalid) fill_note(info->notes + info->numnote++, "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu); -#ifdef ELF_CORE_COPY_XFPREGS - if (elf_core_copy_task_xfpregs(current, info->xfpu)) - fill_note(info->notes + info->numnote++, - "LINUX", ELF_CORE_XFPREG_TYPE, - sizeof(*info->xfpu), info->xfpu); -#endif - return 1; } @@ -2243,9 +2216,6 @@ static void free_note_info(struct elf_note_info *info) kfree(info->psinfo); kfree(info->notes); kfree(info->fpu); -#ifdef ELF_CORE_COPY_XFPREGS - kfree(info->xfpu); -#endif } #endif diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 6e13d8bea32d..a6ee92137529 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1457,9 +1457,6 @@ struct elf_thread_status struct elf_prstatus_fdpic prstatus; /* NT_PRSTATUS */ elf_fpregset_t fpu; /* NT_PRFPREG */ struct task_struct *thread; -#ifdef ELF_CORE_COPY_XFPREGS - elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ -#endif struct memelfnote notes[3]; int num_notes; }; @@ -1491,15 +1488,6 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t) t->num_notes++; sz += notesize(&t->notes[1]); } - -#ifdef ELF_CORE_COPY_XFPREGS - if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { - fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE, - sizeof(t->xfpu), &t->xfpu); - t->num_notes++; - sz += notesize(&t->notes[2]); - } -#endif return sz; } @@ -1593,9 +1581,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) LIST_HEAD(thread_list); struct list_head *t; elf_fpregset_t *fpu = NULL; -#ifdef ELF_CORE_COPY_XFPREGS - elf_fpxregset_t *xfpu = NULL; -#endif int thread_status_size = 0; elf_addr_t *auxv; struct elf_phdr *phdr4note = NULL; @@ -1634,11 +1619,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) fpu = kmalloc(sizeof(*fpu), GFP_KERNEL); if (!fpu) goto end_coredump; -#ifdef ELF_CORE_COPY_XFPREGS - xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL); - if (!xfpu) - goto end_coredump; -#endif for (ct = current->mm->core_state->dumper.next; ct; ct = ct->next) { @@ -1703,11 +1683,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) elf_core_copy_task_fpregs(current, cprm->regs, fpu))) fill_note(notes + numnote++, "CORE", NT_PRFPREG, sizeof(*fpu), fpu); -#ifdef ELF_CORE_COPY_XFPREGS - if (elf_core_copy_task_xfpregs(current, xfpu)) - fill_note(notes + numnote++, - "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu); -#endif offset += sizeof(*elf); /* Elf header */ offset += segs * sizeof(struct elf_phdr); /* Program headers */ @@ -1828,9 +1803,6 @@ end_coredump: kfree(notes); kfree(fpu); kfree(shdr4extnum); -#ifdef ELF_CORE_COPY_XFPREGS - kfree(xfpu); -#endif return has_dumped; #undef NUM_NOTES } diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index adb8ee89f3fd..46c3d691f677 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -104,13 +104,6 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg #endif } -#ifdef ELF_CORE_COPY_XFPREGS -static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) -{ - return ELF_CORE_COPY_XFPREGS(t, xfpu); -} -#endif - /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its -- cgit v1.2.3 From dc12d7968f9c9540494deb1285854b18ca4465ec Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 17 Feb 2020 12:25:14 -0500 Subject: copy_regset_to_user(): do all copyout at once. Turn copy_regset_to_user() into regset_get_alloc() + copy_to_user(). Now all ->get() calls have a kernel buffer as destination. Note that we'd already eliminated the callers of copy_regset_to_user() with non-zero offset; now that argument is simply unused. Uninlined, while we are at it. Signed-off-by: Al Viro --- include/linux/regset.h | 29 ++++------------------------- kernel/regset.c | 26 ++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/linux/regset.h b/include/linux/regset.h index 968a032922d5..af57c1db1924 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -362,31 +362,10 @@ extern int regset_get_alloc(struct task_struct *target, unsigned int size, void **data); -/** - * copy_regset_to_user - fetch a thread's user_regset data into user memory - * @target: thread to be examined - * @view: &struct user_regset_view describing user thread machine state - * @setno: index in @view->regsets - * @offset: offset into the regset data, in bytes - * @size: amount of data to copy, in bytes - * @data: user-mode pointer to copy into - */ -static inline int copy_regset_to_user(struct task_struct *target, - const struct user_regset_view *view, - unsigned int setno, - unsigned int offset, unsigned int size, - void __user *data) -{ - const struct user_regset *regset = &view->regsets[setno]; - - if (!regset->get) - return -EOPNOTSUPP; - - if (!access_ok(data, size)) - return -EFAULT; - - return regset->get(target, regset, offset, size, NULL, data); -} +extern int copy_regset_to_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, unsigned int offset, + unsigned int size, void __user *data); /** * copy_regset_from_user - store into thread's user_regset data from user memory diff --git a/kernel/regset.c b/kernel/regset.c index 6b39fa0993ec..0a610983ce43 100644 --- a/kernel/regset.c +++ b/kernel/regset.c @@ -52,3 +52,29 @@ int regset_get_alloc(struct task_struct *target, return __regset_get(target, regset, size, data); } EXPORT_SYMBOL(regset_get_alloc); + +/** + * copy_regset_to_user - fetch a thread's user_regset data into user memory + * @target: thread to be examined + * @view: &struct user_regset_view describing user thread machine state + * @setno: index in @view->regsets + * @offset: offset into the regset data, in bytes + * @size: amount of data to copy, in bytes + * @data: user-mode pointer to copy into + */ +int copy_regset_to_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, + unsigned int offset, unsigned int size, + void __user *data) +{ + const struct user_regset *regset = &view->regsets[setno]; + void *buf; + int ret; + + ret = regset_get_alloc(target, regset, size, &buf); + if (ret > 0) + ret = copy_to_user(data, buf, ret) ? -EFAULT : 0; + kfree(buf); + return ret; +} -- cgit v1.2.3 From 7717cb9bdd0421faa432a4e0d499fdba6e2394c8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 20 Feb 2020 20:48:16 -0500 Subject: regset: new method and helpers for it ->regset_get() takes task+regset+buffer, returns the amount of free space left in the buffer on success and -E... on error. buffer is represented as struct membuf - a pair of (kernel) pointer and amount of space left Primitives for writing to such: * membuf_write(buf, data, size) * membuf_zero(buf, size) * membuf_store(buf, value) These are implemented as inlines (in case of membuf_store - a macro). All writes are sequential; they become no-ops when there's no space left. Return value of all primitives is the amount of space left after the operation, so they can be used as return values of ->regset_get(). Example of use: // stores pt_regs of task + 64 bytes worth of zeroes + 32bit PID of task int foo_get(struct task_struct *task, const struct regset *regset, struct membuf to) { membuf_write(&to, task_pt_regs(task), sizeof(struct pt_regs)); membuf_zero(&to, 64); return membuf_store(&to, (u32)task_tgid_vnr(task)); } regset_get()/regset_get_alloc() taught to use that thing if present. By the end of the series all users of ->get() will be converted; then ->get() and ->get_size() can go. Note that unlike ->get() this thing always starts at offset 0 and, since it only writes to kernel buffer, can't fail on copyout. It can, of course, fail for other reasons, but those tend to be less numerous. The caller guarantees that the buffer size won't be bigger than regset->n * regset->size. That simplifies life for quite a few instances. Signed-off-by: Al Viro --- include/linux/regset.h | 51 ++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/regset.c | 12 +++++++++++- 2 files changed, 62 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/regset.h b/include/linux/regset.h index af57c1db1924..f6125a7d949d 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -17,6 +17,52 @@ struct task_struct; struct user_regset; +struct membuf { + void *p; + size_t left; +}; + +static inline int membuf_zero(struct membuf *s, size_t size) +{ + if (s->left) { + if (size > s->left) + size = s->left; + memset(s->p, 0, size); + s->p += size; + s->left -= size; + } + return s->left; +} + +static inline int membuf_write(struct membuf *s, const void *v, size_t size) +{ + if (s->left) { + if (size > s->left) + size = s->left; + memcpy(s->p, v, size); + s->p += size; + s->left -= size; + } + return s->left; +} + +/* current s->p must be aligned for v; v must be a scalar */ +#define membuf_store(s, v) \ +({ \ + struct membuf *__s = (s); \ + if (__s->left) { \ + typeof(v) __v = (v); \ + size_t __size = sizeof(__v); \ + if (unlikely(__size > __s->left)) { \ + __size = __s->left; \ + memcpy(__s->p, &__v, __size); \ + } else { \ + *(typeof(__v + 0) *)__s->p = __v; \ + } \ + __s->p += __size; \ + __s->left -= __size; \ + } \ + __s->left;}) /** * user_regset_active_fn - type of @active function in &struct user_regset @@ -57,6 +103,10 @@ typedef int user_regset_get_fn(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf); +typedef int user_regset_get2_fn(struct task_struct *target, + const struct user_regset *regset, + struct membuf to); + /** * user_regset_set_fn - type of @set function in &struct user_regset * @target: thread being examined @@ -186,6 +236,7 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, */ struct user_regset { user_regset_get_fn *get; + user_regset_get2_fn *regset_get; user_regset_set_fn *set; user_regset_active_fn *active; user_regset_writeback_fn *writeback; diff --git a/kernel/regset.c b/kernel/regset.c index 0a610983ce43..eaeaefbbd39e 100644 --- a/kernel/regset.c +++ b/kernel/regset.c @@ -11,7 +11,7 @@ static int __regset_get(struct task_struct *target, void *p = *data, *to_free = NULL; int res; - if (!regset->get) + if (!regset->get && !regset->regset_get) return -EOPNOTSUPP; if (size > regset->n * regset->size) size = regset->n * regset->size; @@ -20,6 +20,16 @@ static int __regset_get(struct task_struct *target, if (!p) return -ENOMEM; } + if (regset->regset_get) { + res = regset->regset_get(target, regset, + (struct membuf){.p = p, .left = size}); + if (res < 0) { + kfree(to_free); + return res; + } + *data = p; + return size - res; + } res = regset->get(target, regset, 0, size, p, NULL); if (unlikely(res < 0)) { kfree(to_free); -- cgit v1.2.3 From 1e6986c9db21265bac1435a344b4446c51a3f4d8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 16 Jun 2020 15:34:20 -0400 Subject: regset: kill ->get() no instances left Signed-off-by: Al Viro --- include/linux/regset.h | 22 ---------------------- kernel/regset.c | 24 +++++------------------- 2 files changed, 5 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/include/linux/regset.h b/include/linux/regset.h index f6125a7d949d..2a4a555b1617 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -82,27 +82,6 @@ static inline int membuf_write(struct membuf *s, const void *v, size_t size) typedef int user_regset_active_fn(struct task_struct *target, const struct user_regset *regset); -/** - * user_regset_get_fn - type of @get function in &struct user_regset - * @target: thread being examined - * @regset: regset being examined - * @pos: offset into the regset data to access, in bytes - * @count: amount of data to copy, in bytes - * @kbuf: if not %NULL, a kernel-space pointer to copy into - * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into - * - * Fetch register values. Return %0 on success; -%EIO or -%ENODEV - * are usual failure returns. The @pos and @count values are in - * bytes, but must be properly aligned. If @kbuf is non-null, that - * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then - * ubuf gives a userland pointer to access directly, and an -%EFAULT - * return value is possible. - */ -typedef int user_regset_get_fn(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf); - typedef int user_regset_get2_fn(struct task_struct *target, const struct user_regset *regset, struct membuf to); @@ -235,7 +214,6 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, * omitted when there is an @active function and it returns zero. */ struct user_regset { - user_regset_get_fn *get; user_regset_get2_fn *regset_get; user_regset_set_fn *set; user_regset_active_fn *active; diff --git a/kernel/regset.c b/kernel/regset.c index eaeaefbbd39e..586823786f39 100644 --- a/kernel/regset.c +++ b/kernel/regset.c @@ -11,7 +11,7 @@ static int __regset_get(struct task_struct *target, void *p = *data, *to_free = NULL; int res; - if (!regset->get && !regset->regset_get) + if (!regset->regset_get) return -EOPNOTSUPP; if (size > regset->n * regset->size) size = regset->n * regset->size; @@ -20,28 +20,14 @@ static int __regset_get(struct task_struct *target, if (!p) return -ENOMEM; } - if (regset->regset_get) { - res = regset->regset_get(target, regset, - (struct membuf){.p = p, .left = size}); - if (res < 0) { - kfree(to_free); - return res; - } - *data = p; - return size - res; - } - res = regset->get(target, regset, 0, size, p, NULL); - if (unlikely(res < 0)) { + res = regset->regset_get(target, regset, + (struct membuf){.p = p, .left = size}); + if (res < 0) { kfree(to_free); return res; } *data = p; - if (regset->get_size) { // arm64-only kludge, will go away - unsigned max_size = regset->get_size(target, regset); - if (size > max_size) - size = max_size; - } - return size; + return size - res; } int regset_get(struct task_struct *target, -- cgit v1.2.3 From c522401e0656b51e6a65ec112489cb078801aa9c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 17 Jun 2020 09:57:08 -0400 Subject: regset(): kill ->get_size() not used anymore Signed-off-by: Al Viro --- arch/arm64/kernel/ptrace.c | 13 ------------- include/linux/regset.h | 48 +--------------------------------------------- 2 files changed, 1 insertion(+), 60 deletions(-) (limited to 'include') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 902d987fdd35..5bf737d38b26 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -740,18 +740,6 @@ static unsigned int sve_size_from_header(struct user_sve_header const *header) return ALIGN(header->size, SVE_VQ_BYTES); } -static unsigned int sve_get_size(struct task_struct *target, - const struct user_regset *regset) -{ - struct user_sve_header header; - - if (!system_supports_sve()) - return 0; - - sve_init_header_from_task(&header, target); - return sve_size_from_header(&header); -} - static int sve_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) @@ -1130,7 +1118,6 @@ static const struct user_regset aarch64_regsets[] = { .align = SVE_VQ_BYTES, .regset_get = sve_get, .set = sve_set, - .get_size = sve_get_size, }, #endif #ifdef CONFIG_ARM64_PTR_AUTH diff --git a/include/linux/regset.h b/include/linux/regset.h index 2a4a555b1617..6b951a27bcaf 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -132,28 +132,6 @@ typedef int user_regset_writeback_fn(struct task_struct *target, const struct user_regset *regset, int immediate); -/** - * user_regset_get_size_fn - type of @get_size function in &struct user_regset - * @target: thread being examined - * @regset: regset being examined - * - * This call is optional; usually the pointer is %NULL. - * - * When provided, this function must return the current size of regset - * data, as observed by the @get function in &struct user_regset. The - * value returned must be a multiple of @size. The returned size is - * required to be valid only until the next time (if any) @regset is - * modified for @target. - * - * This function is intended for dynamically sized regsets. A regset - * that is statically sized does not need to implement it. - * - * This function should not be called directly: instead, callers should - * call regset_size() to determine the current size of a regset. - */ -typedef unsigned int user_regset_get_size_fn(struct task_struct *target, - const struct user_regset *regset); - /** * struct user_regset - accessible thread CPU state * @n: Number of slots (registers). @@ -165,7 +143,6 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, * @set: Function to store values. * @active: Function to report if regset is active, or %NULL. * @writeback: Function to write data back to user memory, or %NULL. - * @get_size: Function to return the regset's size, or %NULL. * * This data structure describes a machine resource we call a register set. * This is part of the state of an individual thread, not necessarily @@ -173,12 +150,7 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, * similar slots, given by @n. Each slot is @size bytes, and aligned to * @align bytes (which is at least @size). For dynamically-sized * regsets, @n must contain the maximum possible number of slots for the - * regset, and @get_size must point to a function that returns the - * current regset size. - * - * Callers that need to know only the current size of the regset and do - * not care about its internal structure should call regset_size() - * instead of inspecting @n or calling @get_size. + * regset. * * For backward compatibility, the @get and @set methods must pad to, or * accept, @n * @size bytes, even if the current regset size is smaller. @@ -218,7 +190,6 @@ struct user_regset { user_regset_set_fn *set; user_regset_active_fn *active; user_regset_writeback_fn *writeback; - user_regset_get_size_fn *get_size; unsigned int n; unsigned int size; unsigned int align; @@ -422,21 +393,4 @@ static inline int copy_regset_from_user(struct task_struct *target, return regset->set(target, regset, offset, size, NULL, data); } -/** - * regset_size - determine the current size of a regset - * @target: thread to be examined - * @regset: regset to be examined - * - * Note that the returned size is valid only until the next time - * (if any) @regset is modified for @target. - */ -static inline unsigned int regset_size(struct task_struct *target, - const struct user_regset *regset) -{ - if (!regset->get_size) - return regset->n * regset->size; - else - return regset->get_size(target, regset); -} - #endif /* */ -- cgit v1.2.3 From ce327e1c54119179066d6f3573a28001febc9265 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 17 Jun 2020 13:40:03 -0400 Subject: regset: kill user_regset_copyout{,_zero}() no callers left Signed-off-by: Al Viro --- include/linux/regset.h | 67 -------------------------------------------------- 1 file changed, 67 deletions(-) (limited to 'include') diff --git a/include/linux/regset.h b/include/linux/regset.h index 6b951a27bcaf..c3403f328257 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -238,44 +238,6 @@ struct user_regset_view { */ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); - -/* - * These are helpers for writing regset get/set functions in arch code. - * Because @start_pos and @end_pos are always compile-time constants, - * these are inlined into very little code though they look large. - * - * Use one or more calls sequentially for each chunk of regset data stored - * contiguously in memory. Call with constants for @start_pos and @end_pos, - * giving the range of byte positions in the regset that data corresponds - * to; @end_pos can be -1 if this chunk is at the end of the regset layout. - * Each call updates the arguments to point past its chunk. - */ - -static inline int user_regset_copyout(unsigned int *pos, unsigned int *count, - void **kbuf, - void __user **ubuf, const void *data, - const int start_pos, const int end_pos) -{ - if (*count == 0) - return 0; - BUG_ON(*pos < start_pos); - if (end_pos < 0 || *pos < end_pos) { - unsigned int copy = (end_pos < 0 ? *count - : min(*count, end_pos - *pos)); - data += *pos - start_pos; - if (*kbuf) { - memcpy(*kbuf, data, copy); - *kbuf += copy; - } else if (__copy_to_user(*ubuf, data, copy)) - return -EFAULT; - else - *ubuf += copy; - *pos += copy; - *count -= copy; - } - return 0; -} - static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, const void **kbuf, const void __user **ubuf, void *data, @@ -301,35 +263,6 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, return 0; } -/* - * These two parallel the two above, but for portions of a regset layout - * that always read as all-zero or for which writes are ignored. - */ -static inline int user_regset_copyout_zero(unsigned int *pos, - unsigned int *count, - void **kbuf, void __user **ubuf, - const int start_pos, - const int end_pos) -{ - if (*count == 0) - return 0; - BUG_ON(*pos < start_pos); - if (end_pos < 0 || *pos < end_pos) { - unsigned int copy = (end_pos < 0 ? *count - : min(*count, end_pos - *pos)); - if (*kbuf) { - memset(*kbuf, 0, copy); - *kbuf += copy; - } else if (clear_user(*ubuf, copy)) - return -EFAULT; - else - *ubuf += copy; - *pos += copy; - *count -= copy; - } - return 0; -} - static inline int user_regset_copyin_ignore(unsigned int *pos, unsigned int *count, const void **kbuf, -- cgit v1.2.3 From 6414e9b09ffd197803f8e86ce2fafdaf1de4e8e4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 12 Jul 2020 20:09:52 -0700 Subject: fs: define inode flags using bit numbers Define the VFS inode flags using bit numbers instead of hardcoding powers of 2, which has become unwieldy now that we're up to 65536. No change in the actual values. Signed-off-by: Eric Biggers Signed-off-by: Al Viro --- include/linux/fs.h | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 6c4ab4dc1cd7..9bf7a32f2932 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1982,27 +1982,27 @@ struct super_operations { /* * Inode flags - they have no relation to superblock flags now */ -#define S_SYNC 1 /* Writes are synced at once */ -#define S_NOATIME 2 /* Do not update access times */ -#define S_APPEND 4 /* Append-only file */ -#define S_IMMUTABLE 8 /* Immutable file */ -#define S_DEAD 16 /* removed, but still open directory */ -#define S_NOQUOTA 32 /* Inode is not counted to quota */ -#define S_DIRSYNC 64 /* Directory modifications are synchronous */ -#define S_NOCMTIME 128 /* Do not update file c/mtime */ -#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ -#define S_PRIVATE 512 /* Inode is fs-internal */ -#define S_IMA 1024 /* Inode has an associated IMA struct */ -#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ -#define S_NOSEC 4096 /* no suid or xattr security attributes */ +#define S_SYNC (1 << 0) /* Writes are synced at once */ +#define S_NOATIME (1 << 1) /* Do not update access times */ +#define S_APPEND (1 << 2) /* Append-only file */ +#define S_IMMUTABLE (1 << 3) /* Immutable file */ +#define S_DEAD (1 << 4) /* removed, but still open directory */ +#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */ +#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */ +#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */ +#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */ +#define S_PRIVATE (1 << 9) /* Inode is fs-internal */ +#define S_IMA (1 << 10) /* Inode has an associated IMA struct */ +#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */ +#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */ #ifdef CONFIG_FS_DAX -#define S_DAX 8192 /* Direct Access, avoiding the page cache */ +#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */ #else -#define S_DAX 0 /* Make all the DAX code disappear */ +#define S_DAX 0 /* Make all the DAX code disappear */ #endif -#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */ -#define S_CASEFOLD 32768 /* Casefolded file */ -#define S_VERITY 65536 /* Verity file (using fs/verity/) */ +#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */ +#define S_CASEFOLD (1 << 15) /* Casefolded file */ +#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */ /* * Note that nosuid etc flags are inode-specific: setting some file-system -- cgit v1.2.3 From 8f4c0e01789c18674acdf17cae3822b3dc3db715 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Wed, 22 Jul 2020 10:40:16 -0400 Subject: hsr: enhance netlink socket interface to support PRP Parallel Redundancy Protocol (PRP) is another redundancy protocol introduced by IEC 63439 standard. It is similar to HSR in many aspects:- - Use a pair of Ethernet interfaces to created the PRP device - Use a 6 byte redundancy protocol part (RCT, Redundancy Check Trailer) similar to HSR Tag. - Has Link Redundancy Entity (LRE) that works with RCT to implement redundancy. Key difference is that the protocol unit is a trailer instead of a prefix as in HSR. That makes it inter-operable with tradition network components such as bridges/switches which treat it as pad bytes, whereas HSR nodes requires some kind of translators (Called redbox) to talk to regular network devices. This features allows regular linux box to be converted to a DAN-P box. DAN-P stands for Dual Attached Node - PRP similar to DAN-H (Dual Attached Node - HSR). Add a comment at the header/source code to explicitly state that the driver files also handles PRP protocol as well. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- include/uapi/linux/hsr_netlink.h | 2 +- include/uapi/linux/if_link.h | 12 +++++++++++- net/hsr/Kconfig | 35 +++++++++++++++++++++++------------ net/hsr/hsr_debugfs.c | 2 +- net/hsr/hsr_device.c | 7 +++++-- net/hsr/hsr_device.h | 2 ++ net/hsr/hsr_forward.c | 2 ++ net/hsr/hsr_forward.h | 2 ++ net/hsr/hsr_framereg.c | 1 + net/hsr/hsr_framereg.h | 2 ++ net/hsr/hsr_main.c | 2 ++ net/hsr/hsr_main.h | 11 ++++++++++- net/hsr/hsr_netlink.c | 38 +++++++++++++++++++++++++++++++------- net/hsr/hsr_netlink.h | 2 ++ net/hsr/hsr_slave.c | 2 ++ net/hsr/hsr_slave.h | 2 ++ 16 files changed, 99 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/hsr_netlink.h b/include/uapi/linux/hsr_netlink.h index c218ef9c35dd..d540ea9bbef4 100644 --- a/include/uapi/linux/hsr_netlink.h +++ b/include/uapi/linux/hsr_netlink.h @@ -17,7 +17,7 @@ /* Generic Netlink HSR family definition */ -/* attributes */ +/* attributes for HSR or PRP node */ enum { HSR_A_UNSPEC, HSR_A_NODE_ADDR, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index af8f31987526..63af64646358 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -907,7 +907,14 @@ enum { #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1) -/* HSR section */ +/* HSR/PRP section, both uses same interface */ + +/* Different redundancy protocols for hsr device */ +enum { + HSR_PROTOCOL_HSR, + HSR_PROTOCOL_PRP, + HSR_PROTOCOL_MAX, +}; enum { IFLA_HSR_UNSPEC, @@ -917,6 +924,9 @@ enum { IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, IFLA_HSR_VERSION, /* HSR version */ + IFLA_HSR_PROTOCOL, /* Indicate different protocol than + * HSR. For example PRP. + */ __IFLA_HSR_MAX, }; diff --git a/net/hsr/Kconfig b/net/hsr/Kconfig index 8095b034e76e..1b048c17b6c8 100644 --- a/net/hsr/Kconfig +++ b/net/hsr/Kconfig @@ -4,24 +4,35 @@ # config HSR - tristate "High-availability Seamless Redundancy (HSR)" + tristate "High-availability Seamless Redundancy (HSR & PRP)" help + This enables IEC 62439 defined High-availability Seamless + Redundancy (HSR) and Parallel Redundancy Protocol (PRP). + If you say Y here, then your Linux box will be able to act as a - DANH ("Doubly attached node implementing HSR"). For this to work, - your Linux box needs (at least) two physical Ethernet interfaces, - and it must be connected as a node in a ring network together with - other HSR capable nodes. + DANH ("Doubly attached node implementing HSR") or DANP ("Doubly + attached node implementing PRP"). For this to work, your Linux box + needs (at least) two physical Ethernet interfaces. + + For DANH, it must be connected as a node in a ring network together + with other HSR capable nodes. All Ethernet frames sent over the HSR + device will be sent in both directions on the ring (over both slave + ports), giving a redundant, instant fail-over network. Each HSR node + in the ring acts like a bridge for HSR frames, but filters frames + that have been forwarded earlier. - All Ethernet frames sent over the hsr device will be sent in both - directions on the ring (over both slave ports), giving a redundant, - instant fail-over network. Each HSR node in the ring acts like a - bridge for HSR frames, but filters frames that have been forwarded - earlier. + For DANP, it must be connected as a node connecting to two + separate networks over the two slave interfaces. Like HSR, Ethernet + frames sent over the PRP device will be sent to both networks giving + a redundant, instant fail-over network. Unlike HSR, PRP networks + can have Singly Attached Nodes (SAN) such as PC, printer, bridges + etc and will be able to communicate with DANP nodes. This code is a "best effort" to comply with the HSR standard as described in IEC 62439-3:2010 (HSRv0) and IEC 62439-3:2012 (HSRv1), - but no compliancy tests have been made. Use iproute2 to select - the version you desire. + and PRP standard described in IEC 62439-4:2012 (PRP), but no + compliancy tests have been made. Use iproute2 to select the protocol + you would like to use. You need to perform any and all necessary tests yourself before relying on this code in a safety critical system! diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c index 9787ef11ca71..c1932c0a15be 100644 --- a/net/hsr/hsr_debugfs.c +++ b/net/hsr/hsr_debugfs.c @@ -1,5 +1,5 @@ /* - * hsr_debugfs code + * debugfs code for HSR & PRP * Copyright (C) 2019 Texas Instruments Incorporated * * Author(s): diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 8a927b647829..40ac45123a62 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -3,9 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se - * * This file contains device methods for creating, using and destroying - * virtual HSR devices. + * virtual HSR or PRP devices. */ #include @@ -427,6 +426,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr); + /* currently PRP is not supported */ + if (protocol_version == PRP_V1) + return -EPROTONOSUPPORT; + /* Make sure we recognize frames from ourselves in hsr_rcv() */ res = hsr_create_self_node(hsr, hsr_dev->dev_addr, slave[1]->dev_addr); diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h index b8f9262ed101..868373822ee4 100644 --- a/net/hsr/hsr_device.h +++ b/net/hsr/hsr_device.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_DEVICE_H diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index ab8dca0c0b65..55adb4dbd235 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * Frame router for HSR and PRP. */ #include "hsr_forward.h" diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h index 51a69295566c..b2a6fa319d94 100644 --- a/net/hsr/hsr_forward.h +++ b/net/hsr/hsr_forward.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_FORWARD_H diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 530de24b1fb5..13b2190e6556 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -8,6 +8,7 @@ * interface. A frame is identified by its source MAC address and its HSR * sequence number. This code keeps track of senders and their sequence numbers * to allow filtering of duplicate frames, and to detect HSR ring errors. + * Same code handles filtering of duplicates for PRP as well. */ #include diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 0f0fa12b4329..c06447780d05 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_FRAMEREG_H diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index 144da15f0a81..2fd1976e5b1c 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * Event handling for HSR and PRP devices. */ #include diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index f74193465bf5..8cf10d67d5f9 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_PRIVATE_H @@ -131,6 +133,13 @@ struct hsr_port { enum hsr_port_type type; }; +/* used by driver internally to differentiate various protocols */ +enum hsr_version { + HSR_V0 = 0, + HSR_V1, + PRP_V1, +}; + struct hsr_priv { struct rcu_head rcu_head; struct list_head ports; @@ -141,7 +150,7 @@ struct hsr_priv { int announce_count; u16 sequence_nr; u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */ - u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ + enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */ spinlock_t seqnr_lock; /* locking for sequence_nr */ spinlock_t list_lock; /* locking for node list */ unsigned char sup_multicast_addr[ETH_ALEN]; diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 6e14b7d22639..06c3cd988760 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -4,7 +4,7 @@ * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se * - * Routines for handling Netlink messages for HSR. + * Routines for handling Netlink messages for HSR and PRP. */ #include "hsr_netlink.h" @@ -22,6 +22,7 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { [IFLA_HSR_VERSION] = { .type = NLA_U8 }, [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN }, [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, + [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 }, }; /* Here, it seems a netdevice has already been allocated for us, and the @@ -31,8 +32,10 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + enum hsr_version proto_version; + unsigned char multicast_spec; + u8 proto = HSR_PROTOCOL_HSR; struct net_device *link[2]; - unsigned char multicast_spec, hsr_version; if (!data) { NL_SET_ERR_MSG_MOD(extack, "No slave devices specified"); @@ -69,18 +72,34 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, else multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]); + if (data[IFLA_HSR_PROTOCOL]) + proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]); + + if (proto >= HSR_PROTOCOL_MAX) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol\n"); + return -EINVAL; + } + if (!data[IFLA_HSR_VERSION]) { - hsr_version = 0; + proto_version = HSR_V0; } else { - hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]); - if (hsr_version > 1) { + if (proto == HSR_PROTOCOL_PRP) { + NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported\n"); + return -EINVAL; + } + + proto_version = nla_get_u8(data[IFLA_HSR_VERSION]); + if (proto_version > HSR_V1) { NL_SET_ERR_MSG_MOD(extack, - "Only versions 0..1 are supported"); + "Only HSR version 0/1 supported\n"); return -EINVAL; } } - return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack); + if (proto == HSR_PROTOCOL_PRP) + proto_version = PRP_V1; + + return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack); } static void hsr_dellink(struct net_device *dev, struct list_head *head) @@ -102,6 +121,7 @@ static void hsr_dellink(struct net_device *dev, struct list_head *head) static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct hsr_priv *hsr = netdev_priv(dev); + u8 proto = HSR_PROTOCOL_HSR; struct hsr_port *port; port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); @@ -120,6 +140,10 @@ static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) hsr->sup_multicast_addr) || nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr)) goto nla_put_failure; + if (hsr->prot_version == PRP_V1) + proto = HSR_PROTOCOL_PRP; + if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto)) + goto nla_put_failure; return 0; diff --git a/net/hsr/hsr_netlink.h b/net/hsr/hsr_netlink.h index 1121bb192a18..501552d9753b 100644 --- a/net/hsr/hsr_netlink.h +++ b/net/hsr/hsr_netlink.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_NETLINK_H diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index 25b6ffba26cd..b5c0834de338 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * Frame handler other utility functions for HSR and PRP. */ #include "hsr_slave.h" diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h index 8953ea279ce9..9708a4f0ec09 100644 --- a/net/hsr/hsr_slave.h +++ b/net/hsr/hsr_slave.h @@ -2,6 +2,8 @@ /* Copyright 2011-2014 Autronica Fire and Security AS * * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_SLAVE_H -- cgit v1.2.3 From 08b95c338e0c5a96e47f4ca314ea1e7580ecb5d7 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 8 Jul 2020 14:11:52 +0300 Subject: fanotify: remove event FAN_DIR_MODIFY It was never enabled in uapi and its functionality is about to be superseded by events FAN_CREATE, FAN_DELETE, FAN_MOVE with group flag FAN_REPORT_NAME. Keep a place holder variable name_event instead of removing the name recording code since it will be used by the new events. Link: https://lore.kernel.org/r/20200708111156.24659-17-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.c | 9 ++------- fs/notify/fsnotify.c | 2 +- include/linux/fsnotify.h | 6 ------ include/linux/fsnotify_backend.h | 4 +--- include/uapi/linux/fanotify.h | 1 - 5 files changed, 4 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index e417c64c365b..e6ba605732d7 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -425,6 +425,7 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, gfp_t gfp = GFP_KERNEL_ACCOUNT; struct inode *id = fanotify_fid_inode(mask, data, data_type, dir); const struct path *path = fsnotify_data_path(data, data_type); + bool name_event = false; /* * For queues with unlimited length lost events are not expected and @@ -442,12 +443,7 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, if (fanotify_is_perm_event(mask)) { event = fanotify_alloc_perm_event(path, gfp); - } else if (mask & FAN_DIR_MODIFY && !(WARN_ON_ONCE(!file_name))) { - /* - * For FAN_DIR_MODIFY event, we report the fid of the directory - * and the name of the modified entry. - * Allocate an fanotify_name_event struct and copy the name. - */ + } else if (name_event && file_name) { event = fanotify_alloc_name_event(id, fsid, file_name, gfp); } else if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { event = fanotify_alloc_fid_event(id, fsid, gfp); @@ -528,7 +524,6 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM); BUILD_BUG_ON(FAN_CREATE != FS_CREATE); BUILD_BUG_ON(FAN_DELETE != FS_DELETE); - BUILD_BUG_ON(FAN_DIR_MODIFY != FS_DIR_MODIFY); BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF); BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF); BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index c4ac4d13e10f..f12a554be3f0 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -393,7 +393,7 @@ static __init int fsnotify_init(void) { int ret; - BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 26); + BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 25); ret = init_srcu_struct(&fsnotify_mark_srcu); if (ret) diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 316c9b820517..9b2566d273a9 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -30,12 +30,6 @@ static inline void fsnotify_name(struct inode *dir, __u32 mask, const struct qstr *name, u32 cookie) { fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie); - /* - * Send another flavor of the event without child inode data and - * without the specific event type (e.g. FS_CREATE|FS_IS_DIR). - * The name is relative to the dir inode the event is reported to. - */ - fsnotify(dir, FS_DIR_MODIFY, dir, FSNOTIFY_EVENT_INODE, name, 0); } static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 0de130cbf72d..94a4ff3d5bbe 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -47,7 +47,6 @@ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ -#define FS_DIR_MODIFY 0x00080000 /* Directory entry was modified */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ /* This inode cares about things that happen to its children. Always set for @@ -67,8 +66,7 @@ * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event * when a directory entry inside a child subdir changes. */ -#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | \ - FS_DIR_MODIFY) +#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE) #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ FS_OPEN_EXEC_PERM) diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index a88c7c6d0692..7f2f17eacbf9 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -24,7 +24,6 @@ #define FAN_OPEN_PERM 0x00010000 /* File open in perm check */ #define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */ #define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */ -#define FAN_DIR_MODIFY 0x00080000 /* Directory entry was modified */ #define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */ -- cgit v1.2.3 From d809daf1b6add51eec001bf60b17885d697a299d Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 16 Jul 2020 11:42:12 +0300 Subject: fanotify: generalize test for FAN_REPORT_FID As preparation for new flags that report fids, define a bit set of flags for a group reporting fids, currently containing the only bit FAN_REPORT_FID. Link: https://lore.kernel.org/r/20200716084230.30611-5-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.c | 10 ++++++---- fs/notify/fanotify/fanotify_user.c | 12 ++++++------ include/linux/fanotify.h | 6 ++++-- 3 files changed, 16 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 3dc71a8e795a..b8c04a6f04c5 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -207,13 +207,14 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS | FANOTIFY_EVENT_FLAGS; const struct path *path = fsnotify_data_path(data, data_type); + unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); struct fsnotify_mark *mark; int type; pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n", __func__, iter_info->report_mask, event_mask, data, data_type); - if (!FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { + if (!fid_mode) { /* Do we have path to open a file descriptor? */ if (!path) return 0; @@ -264,7 +265,7 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, * fanotify_alloc_event() when group is reporting fid as indication * that event happened on child. */ - if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { + if (fid_mode) { /* Do not report event flags without any event */ if (!(test_mask & ~FANOTIFY_EVENT_FLAGS)) return 0; @@ -424,6 +425,7 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, gfp_t gfp = GFP_KERNEL_ACCOUNT; struct inode *id = fanotify_fid_inode(mask, data, data_type, dir); const struct path *path = fsnotify_data_path(data, data_type); + unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); bool name_event = false; /* @@ -444,7 +446,7 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, event = fanotify_alloc_perm_event(path, gfp); } else if (name_event && file_name) { event = fanotify_alloc_name_event(id, fsid, file_name, gfp); - } else if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { + } else if (fid_mode) { event = fanotify_alloc_fid_event(id, fsid, gfp); } else { event = fanotify_alloc_path_event(path, gfp); @@ -551,7 +553,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, return 0; } - if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { + if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) { fsid = fanotify_get_fsid(iter_info); /* Racing with mark destruction or creation? */ if (!fsid.val[0] && !fsid.val[1]) diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index c9a824e5c045..1e04caf8d6ba 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -100,7 +100,7 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group, if (fsnotify_notify_queue_is_empty(group)) goto out; - if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { + if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) { event_size += fanotify_event_info_len( FANOTIFY_E(fsnotify_peek_first_event(group))); } @@ -882,7 +882,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) return -EINVAL; } - if ((flags & FAN_REPORT_FID) && + if ((flags & FANOTIFY_FID_BITS) && (flags & FANOTIFY_CLASS_BITS) != FAN_CLASS_NOTIF) return -EINVAL; @@ -1040,7 +1040,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, __kernel_fsid_t __fsid, *fsid = NULL; u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS; unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; - unsigned int obj_type; + unsigned int obj_type, fid_mode; int ret; pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", @@ -1113,9 +1113,9 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, * inode events are not supported on a mount mark, because they do not * carry enough information (i.e. path) to be filtered by mount point. */ + fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); if (mask & FANOTIFY_INODE_EVENTS && - (!FAN_GROUP_FLAG(group, FAN_REPORT_FID) || - mark_type == FAN_MARK_MOUNT)) + (!fid_mode || mark_type == FAN_MARK_MOUNT)) goto fput_and_out; if (flags & FAN_MARK_FLUSH) { @@ -1140,7 +1140,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, goto path_put_and_out; } - if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { + if (fid_mode) { ret = fanotify_test_fid(&path, &__fsid); if (ret) goto path_put_and_out; diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index b79fa9bb7359..bbbee11d2521 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -18,8 +18,10 @@ #define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ FAN_CLASS_PRE_CONTENT) -#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \ - FAN_REPORT_TID | FAN_REPORT_FID | \ +#define FANOTIFY_FID_BITS (FAN_REPORT_FID) + +#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | FANOTIFY_FID_BITS | \ + FAN_REPORT_TID | \ FAN_CLOEXEC | FAN_NONBLOCK | \ FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS) -- cgit v1.2.3 From 6ba8d7107f27c1bde60a80bc5def027979af3e8e Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 16 Jul 2020 11:42:16 +0300 Subject: fsnotify: add object type "child" to object type iterator The object type iterator is used to collect all the marks of a specific group that have interest in an event. It is used by fanotify to get a single handle_event callback when an event has a match to either of inode/sb/mount marks of the group. The nature of fsnotify events is that they are associated with at most one sb at most one mount and at most one inode. When a parent and child are both watching, two events are sent to backend, one associated to parent inode and one associated to the child inode. This results in duplicate events in fanotify, which usually get merged before user reads them, but this is sub-optimal. It would be better if the same event is sent to backend with an object type iterator that has both the child inode and its parent, and let the backend decide if the event should be reported once (fanotify) or twice (inotify). Link: https://lore.kernel.org/r/20200716084230.30611-9-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- include/linux/fsnotify_backend.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 94a4ff3d5bbe..d22519001027 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -253,6 +253,7 @@ static inline const struct path *fsnotify_data_path(const void *data, enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_INODE, + FSNOTIFY_OBJ_TYPE_CHILD, FSNOTIFY_OBJ_TYPE_VFSMOUNT, FSNOTIFY_OBJ_TYPE_SB, FSNOTIFY_OBJ_TYPE_COUNT, @@ -260,6 +261,7 @@ enum fsnotify_obj_type { }; #define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) +#define FSNOTIFY_OBJ_TYPE_CHILD_FL (1U << FSNOTIFY_OBJ_TYPE_CHILD) #define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) #define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB) #define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) @@ -304,6 +306,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ } FSNOTIFY_ITER_FUNCS(inode, INODE) +FSNOTIFY_ITER_FUNCS(child, CHILD) FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) FSNOTIFY_ITER_FUNCS(sb, SB) -- cgit v1.2.3 From 8b7beaf9f185249f29912b5e2d7bc4147c5c2a6a Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 27 Jul 2020 13:43:39 -0600 Subject: PCI: Add Intel QuickAssist device IDs Add device IDs for the following Intel QuickAssist devices: DH895XCC, C3XXX and C62X. The defines in this patch are going to be referenced in two independent drivers, qat and vfio-pci. Signed-off-by: Giovanni Cabiddu Acked-by: Bjorn Helgaas Reviewed-by: Fiona Trahe Reviewed-by: Andy Shevchenko Signed-off-by: Alex Williamson --- include/linux/pci_ids.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0ad57693f392..f3166b1425ca 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2659,6 +2659,8 @@ #define PCI_DEVICE_ID_INTEL_80332_1 0x0332 #define PCI_DEVICE_ID_INTEL_80333_0 0x0370 #define PCI_DEVICE_ID_INTEL_80333_1 0x0372 +#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435 +#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 @@ -2708,6 +2710,8 @@ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 +#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2 +#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 @@ -2924,6 +2928,8 @@ #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 +#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8 +#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 -- cgit v1.2.3 From 2c2b0d880f1b4c01f30e14242977b82fa527342d Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Thu, 23 Jul 2020 23:09:57 -0400 Subject: drm/amdkfd: Add thermal throttling SMI event Add support for reporting thermal throttling events through SMI. Also, add a counter to count the number of throttling interrupts observed and report the count in the SMI event message. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 4 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 +++ drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 67 +++++++++++++++++++------- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h | 2 + drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 1 + drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 1 + drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 + drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 5 ++ include/uapi/linux/kfd_ioctl.h | 3 +- 10 files changed, 74 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index a0ea663ecdbc..92790db5edc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -789,4 +789,8 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) { } + +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) +{ +} #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index ffe149aafc39..a10507ecb750 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -270,5 +270,6 @@ int kgd2kfd_resume_mm(struct mm_struct *mm); int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, struct dma_fence *fence); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask); #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 4bfedaab183f..d5e790f046b4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -29,6 +29,7 @@ #include "cwsr_trap_handler.h" #include "kfd_iommu.h" #include "amdgpu_amdkfd.h" +#include "kfd_smi_events.h" #define MQD_SIZE_ALIGNED 768 @@ -1245,6 +1246,12 @@ void kfd_dec_compute_active(struct kfd_dev *kfd) WARN_ONCE(count < 0, "Compute profile ref. count error"); } +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) +{ + if (kfd) + kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 7b348bf9df21..86c2c3e97944 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -24,6 +24,7 @@ #include #include #include +#include "amdgpu.h" #include "amdgpu_vm.h" #include "kfd_priv.h" #include "kfd_smi_events.h" @@ -148,6 +149,54 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep) return 0; } +static void add_event_to_kfifo(struct kfd_dev *dev, unsigned long long smi_event, + char *event_msg, int len) +{ + struct kfd_smi_client *client; + + rcu_read_lock(); + + list_for_each_entry_rcu(client, &dev->smi_clients, list) { + if (!(READ_ONCE(client->events) & smi_event)) + continue; + spin_lock(&client->lock); + if (kfifo_avail(&client->fifo) >= len) { + kfifo_in(&client->fifo, event_msg, len); + wake_up_all(&client->wait_queue); + } else { + pr_debug("smi_event(EventID: %llu): no space left\n", + smi_event); + } + spin_unlock(&client->lock); + } + + rcu_read_unlock(); +} + +void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, + uint32_t throttle_bitmask) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; + /* + * ThermalThrottle msg = throttle_bitmask(8): + * thermal_interrupt_count(16): + * 16 bytes event + 1 byte space + 8 byte throttle_bitmask + + * 1 byte : + 16 byte thermal_interupt_counter + 1 byte \n + + * 1 byte \0 = 44 + */ + char fifo_in[44]; + int len; + + if (list_empty(&dev->smi_clients)) + return; + + len = snprintf(fifo_in, 44, "%x %x:%llx\n", + KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, + atomic64_read(&adev->smu.throttle_int_counter)); + + add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); +} + void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; @@ -156,7 +205,6 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) /* 16 bytes event + 1 byte space + 25 bytes msg + 1 byte \n = 43 */ char fifo_in[43]; - struct kfd_smi_client *client; int len; if (list_empty(&dev->smi_clients)) @@ -171,22 +219,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) len = snprintf(fifo_in, 43, "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT, task_info.pid, task_info.task_name); - rcu_read_lock(); - - list_for_each_entry_rcu(client, &dev->smi_clients, list) { - if (!(READ_ONCE(client->events) & KFD_SMI_EVENT_VMFAULT)) - continue; - spin_lock(&client->lock); - if (kfifo_avail(&client->fifo) >= len) { - kfifo_in(&client->fifo, fifo_in, len); - wake_up_all(&client->wait_queue); - } - else - pr_debug("smi_event(vmfault): no space left\n"); - spin_unlock(&client->lock); - } - - rcu_read_unlock(); + add_event_to_kfifo(dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len); } int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index a9cb218fef96..15537b2cccb5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -25,5 +25,7 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd); void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid); +void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, + uint32_t throttle_bitmask); #endif diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 0eeccf3341a3..7d9c40ad5780 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -640,6 +640,7 @@ static int smu_sw_init(void *handle) mutex_init(&smu->message_lock); INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); + atomic64_set(&smu->throttle_int_counter, 0); smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 3b9182c8c53f..f13979687b9e 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -2251,6 +2251,7 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu) dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n", log_buf); + kgd2kfd_smi_event_throttle(smu->adev->kfd.dev, throttler_status); } static const struct pptable_funcs arcturus_ppt_funcs = { diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 28312d6dc187..b57b10406390 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -446,6 +446,7 @@ struct smu_context bool dc_controlled_by_gpio; struct work_struct throttling_logging_work; + atomic64_t throttle_int_counter; }; struct i2c_adapter; diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index fd82402065e6..a9453ec01619 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1311,6 +1311,11 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, smu_v11_0_ack_ac_dc_interrupt(&adev->smu); break; case 0x7: + /* + * Increment the throttle interrupt counter + */ + atomic64_inc(&smu->throttle_int_counter); + if (!atomic_read(&adev->throttling_logging_enabled)) return 0; diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index f738c3b53f4e..df6c7a43aadc 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -450,7 +450,8 @@ struct kfd_ioctl_import_dmabuf_args { * KFD SMI(System Management Interface) events */ /* Event type (defined by bitmask) */ -#define KFD_SMI_EVENT_VMFAULT 0x0000000000000001 +#define KFD_SMI_EVENT_VMFAULT 0x0000000000000001 +#define KFD_SMI_EVENT_THERMAL_THROTTLE 0x0000000000000002 struct kfd_ioctl_smi_events_args { __u32 gpuid; /* to KFD */ -- cgit v1.2.3 From 82ace1efb3cb1d49a1681cc6e31156047d5ae1f2 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 22 Jul 2020 15:58:44 +0300 Subject: fsnotify: create helper fsnotify_inode() Simple helper to consolidate biolerplate code. Link: https://lore.kernel.org/r/20200722125849.17418-5-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/kernfs/file.c | 6 ++---- fs/notify/fsnotify.c | 2 +- include/linux/fsnotify.h | 26 +++++++++++--------------- kernel/trace/trace.c | 3 +-- 4 files changed, 15 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 5b1468bc509e..1d185bffc52f 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -910,10 +910,8 @@ repeat: kernfs_put(parent); } - if (!p_inode) { - fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, - NULL, 0); - } + if (!p_inode) + fsnotify_inode(inode, FS_MODIFY); iput(inode); } diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 0b0e01e04349..ba172b742de5 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -74,7 +74,7 @@ static void fsnotify_unmount_inodes(struct super_block *sb) iput(iput_inode); /* for each watch, send FS_UNMOUNT and then remove it */ - fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(inode, FS_UNMOUNT); fsnotify_inode_delete(inode); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 9b2566d273a9..f9db7c9b3ef1 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -38,6 +38,14 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); } +static inline void fsnotify_inode(struct inode *inode, __u32 mask) +{ + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); +} + /* Notify this dentry's parent about a child's events. */ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) @@ -105,12 +113,7 @@ static inline int fsnotify_perm(struct file *file, int mask) */ static inline void fsnotify_link_count(struct inode *inode) { - __u32 mask = FS_ATTRIB; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(inode, FS_ATTRIB); } /* @@ -125,7 +128,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, u32 fs_cookie = fsnotify_get_cookie(); __u32 old_dir_mask = FS_MOVED_FROM; __u32 new_dir_mask = FS_MOVED_TO; - __u32 mask = FS_MOVE_SELF; const struct qstr *new_name = &moved->d_name; if (old_dir == new_dir) @@ -134,7 +136,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, if (isdir) { old_dir_mask |= FS_ISDIR; new_dir_mask |= FS_ISDIR; - mask |= FS_ISDIR; } fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie); @@ -144,7 +145,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, fsnotify_link_count(target); if (source) - fsnotify(source, mask, source, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(source, FS_MOVE_SELF); audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); } @@ -169,12 +170,7 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) */ static inline void fsnotify_inoderemove(struct inode *inode) { - __u32 mask = FS_DELETE_SELF; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(inode, FS_DELETE_SELF); __fsnotify_inode_delete(inode); } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index bb62269724d5..0c655c039506 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1543,8 +1543,7 @@ static void latency_fsnotify_workfn(struct work_struct *work) { struct trace_array *tr = container_of(work, struct trace_array, fsnotify_work); - fsnotify(tr->d_max_latency->d_inode, FS_MODIFY, - tr->d_max_latency->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); } static void latency_fsnotify_workfn_irq(struct irq_work *iwork) -- cgit v1.2.3 From 40a100d3adc1ad7f0a34875468c499fcecd20ba4 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 22 Jul 2020 15:58:46 +0300 Subject: fsnotify: pass dir and inode arguments to fsnotify() The arguments of fsnotify() are overloaded and mean different things for different event types. Replace the to_tell argument with separate arguments @dir and @inode, because we may be sending to both dir and child. Using the @data argument to pass the child is not enough, because dirent events pass this argument (for audit), but we do not report to child. Document the new fsnotify() function argumenets. Link: https://lore.kernel.org/r/20200722125849.17418-7-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/kernfs/file.c | 5 ++-- fs/notify/fsnotify.c | 54 ++++++++++++++++++++++++++++------------ include/linux/fsnotify.h | 9 ++++--- include/linux/fsnotify_backend.h | 10 +++++--- 4 files changed, 52 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 1d185bffc52f..f277d023ebcd 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -902,8 +902,9 @@ repeat: if (parent) { p_inode = ilookup(info->sb, kernfs_ino(parent)); if (p_inode) { - fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD, - inode, FSNOTIFY_EVENT_INODE, &name, 0); + fsnotify(FS_MODIFY | FS_EVENT_ON_CHILD, + inode, FSNOTIFY_EVENT_INODE, + p_inode, &name, inode, 0); iput(p_inode); } diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index ba172b742de5..4a762c8c4a29 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -152,7 +152,7 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, { struct inode *inode = d_inode(dentry); struct dentry *parent; - struct inode *p_inode; + struct inode *p_inode = NULL; struct name_snapshot name; struct qstr *file_name = NULL; int ret = 0; @@ -171,14 +171,13 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type)); /* Notify both parent and child with child name info */ - inode = p_inode; take_dentry_name_snapshot(&name, dentry); file_name = &name.name; mask |= FS_EVENT_ON_CHILD; } notify: - ret = fsnotify(inode, mask, data, data_type, file_name, 0); + ret = fsnotify(mask, data, data_type, p_inode, file_name, inode, 0); if (file_name) release_dentry_name_snapshot(&name); @@ -312,18 +311,31 @@ static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info) } /* - * This is the main call to fsnotify. The VFS calls into hook specific functions - * in linux/fsnotify.h. Those functions then in turn call here. Here will call - * out to all of the registered fsnotify_group. Those groups can then use the - * notification event in whatever means they feel necessary. + * fsnotify - This is the main call to fsnotify. + * + * The VFS calls into hook specific functions in linux/fsnotify.h. + * Those functions then in turn call here. Here will call out to all of the + * registered fsnotify_group. Those groups can then use the notification event + * in whatever means they feel necessary. + * + * @mask: event type and flags + * @data: object that event happened on + * @data_type: type of object for fanotify_data_XXX() accessors + * @dir: optional directory associated with event - + * if @file_name is not NULL, this is the directory that + * @file_name is relative to + * @file_name: optional file name associated with event + * @inode: optional inode associated with event - + * either @dir or @inode must be non-NULL. + * if both are non-NULL event may be reported to both. + * @cookie: inotify rename cookie */ -int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_type, - const struct qstr *file_name, u32 cookie) +int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, + const struct qstr *file_name, struct inode *inode, u32 cookie) { const struct path *path = fsnotify_data_path(data, data_type); struct fsnotify_iter_info iter_info = {}; - struct super_block *sb = to_tell->i_sb; - struct inode *dir = file_name ? to_tell : NULL; + struct super_block *sb; struct mount *mnt = NULL; struct inode *child = NULL; int ret = 0; @@ -332,8 +344,18 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_type, if (path) mnt = real_mount(path->mnt); - if (mask & FS_EVENT_ON_CHILD) - child = fsnotify_data_inode(data, data_type); + if (!inode) { + /* Dirent event - report on TYPE_INODE to dir */ + inode = dir; + } else if (mask & FS_EVENT_ON_CHILD) { + /* + * Event on child - report on TYPE_INODE to dir + * and on TYPE_CHILD to child. + */ + child = inode; + inode = dir; + } + sb = inode->i_sb; /* * Optimization: srcu_read_lock() has a memory barrier which can @@ -342,12 +364,12 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_type, * SRCU because we have no references to any objects and do not * need SRCU to keep them "alive". */ - if (!to_tell->i_fsnotify_marks && !sb->s_fsnotify_marks && + if (!inode->i_fsnotify_marks && !sb->s_fsnotify_marks && (!mnt || !mnt->mnt_fsnotify_marks) && (!child || !child->i_fsnotify_marks)) return 0; - marks_mask = to_tell->i_fsnotify_mask | sb->s_fsnotify_mask; + marks_mask = inode->i_fsnotify_mask | sb->s_fsnotify_mask; if (mnt) marks_mask |= mnt->mnt_fsnotify_mask; if (child) @@ -365,7 +387,7 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_type, iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = - fsnotify_first_mark(&to_tell->i_fsnotify_marks); + fsnotify_first_mark(&inode->i_fsnotify_marks); iter_info.marks[FSNOTIFY_OBJ_TYPE_SB] = fsnotify_first_mark(&sb->s_fsnotify_marks); if (mnt) { diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index f9db7c9b3ef1..99922cac4fcd 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -23,13 +23,14 @@ * have changed (i.e. renamed over). * * Unlike fsnotify_parent(), the event will be reported regardless of the - * FS_EVENT_ON_CHILD mask on the parent inode. + * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only + * the child is interested and not the parent. */ static inline void fsnotify_name(struct inode *dir, __u32 mask, struct inode *child, const struct qstr *name, u32 cookie) { - fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie); + fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie); } static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, @@ -43,7 +44,7 @@ static inline void fsnotify_inode(struct inode *inode, __u32 mask) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify(mask, inode, FSNOTIFY_EVENT_INODE, NULL, NULL, inode, 0); } /* Notify this dentry's parent about a child's events. */ @@ -61,7 +62,7 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, return __fsnotify_parent(dentry, mask, data, data_type); notify_child: - return fsnotify(inode, mask, data, data_type, NULL, 0); + return fsnotify(mask, data, data_type, NULL, NULL, inode, 0); } /* diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index d22519001027..152520635bd3 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -387,8 +387,9 @@ struct fsnotify_mark { /* called from the vfs helpers */ /* main fsnotify call to send events */ -extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, - int data_type, const struct qstr *name, u32 cookie); +extern int fsnotify(__u32 mask, const void *data, int data_type, + struct inode *dir, const struct qstr *name, + struct inode *inode, u32 cookie); extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type); extern void __fsnotify_inode_delete(struct inode *inode); @@ -545,8 +546,9 @@ static inline void fsnotify_init_event(struct fsnotify_event *event, #else -static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, - int data_type, const struct qstr *name, u32 cookie) +static inline int fsnotify(__u32 mask, const void *data, int data_type, + struct inode *dir, const struct qstr *name, + struct inode *inode, u32 cookie) { return 0; } -- cgit v1.2.3 From 9b93f33105f5f9bd3d016ff870eb6000c9d89eff Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 16 Jul 2020 11:42:23 +0300 Subject: fsnotify: send event with parent/name info to sb/mount/non-dir marks Similar to events "on child" to watching directory, send event with parent/name info if sb/mount/non-dir marks are interested in parent/name info. The FS_EVENT_ON_CHILD flag can be set on sb/mount/non-dir marks to specify interest in parent/name info for events on non-directory inodes. Events on "orphan" children (disconnected dentries) are sent without parent/name info. Events on directories are sent with parent/name info only if the parent directory is watching. After this change, even groups that do not subscribe to events on children could get an event with mark iterator type TYPE_CHILD and without mark iterator type TYPE_INODE if fanotify has marks on the same objects. dnotify and inotify event handlers can already cope with that situation. audit does not subscribe to events that are possible on child, so won't get to this situation. nfsd does not access the marks iterator from its event handler at the moment, so it is not affected. This is a bit too fragile, so we should prepare all groups to cope with mark type TYPE_CHILD preferably using a generic helper. Link: https://lore.kernel.org/r/20200716084230.30611-16-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fsnotify.c | 74 +++++++++++++++++++++++++++++++++------- include/linux/fsnotify.h | 10 ++++-- include/linux/fsnotify_backend.h | 32 ++++++++++++++--- 3 files changed, 97 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 4a762c8c4a29..494d5d70323f 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -142,38 +142,81 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) spin_unlock(&inode->i_lock); } +/* Are inode/sb/mount interested in parent and name info with this event? */ +static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt, + __u32 mask) +{ + __u32 marks_mask = 0; + + /* We only send parent/name to inode/sb/mount for events on non-dir */ + if (mask & FS_ISDIR) + return false; + + /* Did either inode/sb/mount subscribe for events with parent/name? */ + marks_mask |= fsnotify_parent_needed_mask(inode->i_fsnotify_mask); + marks_mask |= fsnotify_parent_needed_mask(inode->i_sb->s_fsnotify_mask); + if (mnt) + marks_mask |= fsnotify_parent_needed_mask(mnt->mnt_fsnotify_mask); + + /* Did they subscribe for this event with parent/name info? */ + return mask & marks_mask; +} + /* * Notify this dentry's parent about a child's events with child name info - * if parent is watching. - * Notify only the child without name info if parent is not watching. + * if parent is watching or if inode/sb/mount are interested in events with + * parent and name info. + * + * Notify only the child without name info if parent is not watching and + * inode/sb/mount are not interested in events with parent and name info. */ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { + const struct path *path = fsnotify_data_path(data, data_type); + struct mount *mnt = path ? real_mount(path->mnt) : NULL; struct inode *inode = d_inode(dentry); struct dentry *parent; + bool parent_watched = dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED; + __u32 p_mask; struct inode *p_inode = NULL; struct name_snapshot name; struct qstr *file_name = NULL; int ret = 0; + /* + * Do inode/sb/mount care about parent and name info on non-dir? + * Do they care about any event at all? + */ + if (!inode->i_fsnotify_marks && !inode->i_sb->s_fsnotify_marks && + (!mnt || !mnt->mnt_fsnotify_marks) && !parent_watched) + return 0; + parent = NULL; - if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) + if (!parent_watched && !fsnotify_event_needs_parent(inode, mnt, mask)) goto notify; + /* Does parent inode care about events on children? */ parent = dget_parent(dentry); p_inode = parent->d_inode; - - if (unlikely(!fsnotify_inode_watches_children(p_inode))) { + p_mask = fsnotify_inode_watches_children(p_inode); + if (unlikely(parent_watched && !p_mask)) __fsnotify_update_child_dentry_flags(p_inode); - } else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) { + + /* + * Include parent/name in notification either if some notification + * groups require parent info (!parent_watched case) or the parent is + * interested in this event. + */ + if (!parent_watched || (mask & p_mask & ALL_FSNOTIFY_EVENTS)) { /* When notifying parent, child should be passed as data */ WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type)); /* Notify both parent and child with child name info */ take_dentry_name_snapshot(&name, dentry); file_name = &name.name; - mask |= FS_EVENT_ON_CHILD; + if (parent_watched) + mask |= FS_EVENT_ON_CHILD; } notify: @@ -349,8 +392,8 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, inode = dir; } else if (mask & FS_EVENT_ON_CHILD) { /* - * Event on child - report on TYPE_INODE to dir - * and on TYPE_CHILD to child. + * Event on child - report on TYPE_INODE to dir if it is + * watching children and on TYPE_CHILD to child. */ child = inode; inode = dir; @@ -364,14 +407,17 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, * SRCU because we have no references to any objects and do not * need SRCU to keep them "alive". */ - if (!inode->i_fsnotify_marks && !sb->s_fsnotify_marks && + if (!sb->s_fsnotify_marks && (!mnt || !mnt->mnt_fsnotify_marks) && + (!inode || !inode->i_fsnotify_marks) && (!child || !child->i_fsnotify_marks)) return 0; - marks_mask = inode->i_fsnotify_mask | sb->s_fsnotify_mask; + marks_mask = sb->s_fsnotify_mask; if (mnt) marks_mask |= mnt->mnt_fsnotify_mask; + if (inode) + marks_mask |= inode->i_fsnotify_mask; if (child) marks_mask |= child->i_fsnotify_mask; @@ -386,14 +432,16 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); - iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = - fsnotify_first_mark(&inode->i_fsnotify_marks); iter_info.marks[FSNOTIFY_OBJ_TYPE_SB] = fsnotify_first_mark(&sb->s_fsnotify_marks); if (mnt) { iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] = fsnotify_first_mark(&mnt->mnt_fsnotify_marks); } + if (inode) { + iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = + fsnotify_first_mark(&inode->i_fsnotify_marks); + } if (child) { iter_info.marks[FSNOTIFY_OBJ_TYPE_CHILD] = fsnotify_first_mark(&child->i_fsnotify_marks); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 99922cac4fcd..6e63f7e10da0 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -53,10 +53,16 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, { struct inode *inode = d_inode(dentry); - if (S_ISDIR(inode->i_mode)) + if (S_ISDIR(inode->i_mode)) { mask |= FS_ISDIR; - if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) + /* sb/mount marks are not interested in name of directory */ + if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) + goto notify_child; + } + + /* disconnected dentry cannot notify parent */ + if (IS_ROOT(dentry)) goto notify_child; return __fsnotify_parent(dentry, mask, data, data_type); diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 152520635bd3..32104cfc27a5 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -49,8 +49,11 @@ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ -/* This inode cares about things that happen to its children. Always set for - * dnotify and inotify. */ +/* + * Set on inode mark that cares about things that happen to its children. + * Always set for dnotify and inotify. + * Set on inode/sb/mount marks that care about parent/name info. + */ #define FS_EVENT_ON_CHILD 0x08000000 #define FS_DN_RENAME 0x10000000 /* file renamed */ @@ -72,14 +75,22 @@ FS_OPEN_EXEC_PERM) /* - * This is a list of all events that may get sent to a parent based on fs event - * happening to inodes inside that directory. + * This is a list of all events that may get sent to a parent that is watching + * with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory. */ #define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \ FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \ FS_OPEN | FS_OPEN_EXEC) +/* + * This is a list of all events that may get sent with the parent inode as the + * @to_tell argument of fsnotify(). + * It may include events that can be sent to an inode/sb/mount mark, but cannot + * be sent to a parent watching children. + */ +#define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD) + /* Events that can be reported to backends */ #define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \ FS_EVENTS_POSS_ON_CHILD | \ @@ -397,6 +408,19 @@ extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); extern u32 fsnotify_get_cookie(void); +static inline __u32 fsnotify_parent_needed_mask(__u32 mask) +{ + /* FS_EVENT_ON_CHILD is set on marks that want parent/name info */ + if (!(mask & FS_EVENT_ON_CHILD)) + return 0; + /* + * This object might be watched by a mark that cares about parent/name + * info, does it care about the specific set of events that can be + * reported with parent/name info? + */ + return mask & FS_EVENTS_POSS_TO_PARENT; +} + static inline int fsnotify_inode_watches_children(struct inode *inode) { /* FS_EVENT_ON_CHILD is set if the inode may care */ -- cgit v1.2.3 From 79cb299c7e181fad683bf6191edb8224b2412512 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 16 Jul 2020 11:42:24 +0300 Subject: fsnotify: remove check that source dentry is positive Remove the unneeded check for positive source dentry in fsnotify_move(). fsnotify_move() hook is mostly called from vfs_rename() under lock_rename() and vfs_rename() starts with may_delete() test that verifies positive source dentry. The only other caller of fsnotify_move() - debugfs_rename() also verifies positive source. Link: https://lore.kernel.org/r/20200716084230.30611-17-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- include/linux/fsnotify.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 6e63f7e10da0..f8acddcf54fb 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -150,9 +150,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, if (target) fsnotify_link_count(target); - - if (source) - fsnotify_inode(source, FS_MOVE_SELF); + fsnotify_inode(source, FS_MOVE_SELF); audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); } -- cgit v1.2.3 From 83b7a59896dd24015a34b7f00027f0ff3747972f Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 16 Jul 2020 11:42:26 +0300 Subject: fanotify: add basic support for FAN_REPORT_DIR_FID For now, the flag is mutually exclusive with FAN_REPORT_FID. Events include a single info record of type FAN_EVENT_INFO_TYPE_DFID with a directory file handle. For now, events are only reported for: - Directory modification events - Events on children of a watching directory - Events on directory objects Soon, we will add support for reporting the parent directory fid for events on non-directories with filesystem/mount mark and support for reporting both parent directory fid and child fid. Link: https://lore.kernel.org/r/20200716084230.30611-19-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.c | 34 ++++++++++++++++-- fs/notify/fanotify/fanotify_user.c | 71 ++++++++++++++++++++++++++++++++------ include/linux/fanotify.h | 2 +- include/uapi/linux/fanotify.h | 11 +++--- 4 files changed, 101 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 3baf93e998c1..fc2e1fab34af 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -223,7 +223,7 @@ out: static u32 fanotify_group_event_mask(struct fsnotify_group *group, struct fsnotify_iter_info *iter_info, u32 event_mask, const void *data, - int data_type) + int data_type, struct inode *dir) { __u32 marks_mask = 0, marks_ignored_mask = 0; __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS | @@ -243,6 +243,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, /* Path type events are only relevant for files and dirs */ if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry)) return 0; + } else if (!(fid_mode & FAN_REPORT_FID)) { + /* Do we have a directory inode to report? */ + if (!dir && !(event_mask & FS_ISDIR)) + return 0; } fsnotify_foreach_obj_type(type) { @@ -396,6 +400,28 @@ static struct inode *fanotify_fid_inode(u32 event_mask, const void *data, return fsnotify_data_inode(data, data_type); } +/* + * The inode to use as identifier when reporting dir fid depends on the event. + * Report the modified directory inode on dirent modification events. + * Report the "victim" inode if "victim" is a directory. + * Report the parent inode if "victim" is not a directory and event is + * reported to parent. + * Otherwise, do not report dir fid. + */ +static struct inode *fanotify_dfid_inode(u32 event_mask, const void *data, + int data_type, struct inode *dir) +{ + struct inode *inode = fsnotify_data_inode(data, data_type); + + if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) + return dir; + + if (S_ISDIR(inode->i_mode)) + return inode; + + return dir; +} + static struct fanotify_event *fanotify_alloc_path_event(const struct path *path, gfp_t gfp) { @@ -491,10 +517,14 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, struct fanotify_event *event = NULL; gfp_t gfp = GFP_KERNEL_ACCOUNT; struct inode *id = fanotify_fid_inode(mask, data, data_type, dir); + struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir); const struct path *path = fsnotify_data_path(data, data_type); unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); bool name_event = false; + if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) + id = dirid; + /* * For queues with unlimited length lost events are not expected and * can possibly have security implications. Avoid losing events when @@ -605,7 +635,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19); mask = fanotify_group_event_mask(group, iter_info, mask, data, - data_type); + data_type, dir); if (!mask) return 0; diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 3842ef00b52e..e494400711c9 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -216,7 +216,7 @@ static int process_access_response(struct fsnotify_group *group, } static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, - const char *name, size_t name_len, + int info_type, const char *name, size_t name_len, char __user *buf, size_t count) { struct fanotify_event_info_fid info = { }; @@ -229,7 +229,7 @@ static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, pr_debug("%s: fh_len=%zu name_len=%zu, info_len=%zu, count=%zu\n", __func__, fh_len, name_len, info_len, count); - if (!fh_len || (name && !name_len)) + if (!fh_len) return 0; if (WARN_ON_ONCE(len < sizeof(info) || len > count)) @@ -239,8 +239,21 @@ static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, * Copy event info fid header followed by variable sized file handle * and optionally followed by variable sized filename. */ - info.hdr.info_type = name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : - FAN_EVENT_INFO_TYPE_FID; + switch (info_type) { + case FAN_EVENT_INFO_TYPE_FID: + case FAN_EVENT_INFO_TYPE_DFID: + if (WARN_ON_ONCE(name_len)) + return -EFAULT; + break; + case FAN_EVENT_INFO_TYPE_DFID_NAME: + if (WARN_ON_ONCE(!name || !name_len)) + return -EFAULT; + break; + default: + return -EFAULT; + } + + info.hdr.info_type = info_type; info.hdr.len = len; info.fsid = *fsid; if (copy_to_user(buf, &info, sizeof(info))) @@ -304,8 +317,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, struct fanotify_event_metadata metadata; struct path *path = fanotify_event_path(event); struct fanotify_info *info = fanotify_event_info(event); + unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); struct file *f = NULL; int ret, fd = FAN_NOFD; + int info_type = 0; pr_debug("%s: group=%p event=%p\n", __func__, group, event); @@ -346,9 +361,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, /* Event info records order is: dir fid + name, child fid */ if (fanotify_event_dir_fh_len(event)) { + info_type = FAN_EVENT_INFO_TYPE_DFID_NAME; ret = copy_info_to_user(fanotify_event_fsid(event), fanotify_info_dir_fh(info), - fanotify_info_name(info), + info_type, fanotify_info_name(info), info->name_len, buf, count); if (ret < 0) return ret; @@ -358,9 +374,33 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, } if (fanotify_event_object_fh_len(event)) { + if (fid_mode == FAN_REPORT_FID || info_type) { + /* + * With only group flag FAN_REPORT_FID only type FID is + * reported. Second info record type is always FID. + */ + info_type = FAN_EVENT_INFO_TYPE_FID; + } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) || + (event->mask & FAN_ONDIR)) { + /* + * With group flag FAN_REPORT_DIR_FID, a single info + * record has type DFID for directory entry modification + * event and for event on a directory. + */ + info_type = FAN_EVENT_INFO_TYPE_DFID; + } else { + /* + * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID, + * a single info record has type FID for event on a + * non-directory, when there is no directory to report. + * For example, on FAN_DELETE_SELF event. + */ + info_type = FAN_EVENT_INFO_TYPE_FID; + } + ret = copy_info_to_user(fanotify_event_fsid(event), fanotify_event_object_fh(event), - NULL, 0, buf, count); + info_type, NULL, 0, buf, count); if (ret < 0) return ret; @@ -861,6 +901,8 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) struct fsnotify_group *group; int f_flags, fd; struct user_struct *user; + unsigned int fid_mode = flags & FANOTIFY_FID_BITS; + unsigned int class = flags & FANOTIFY_CLASS_BITS; pr_debug("%s: flags=%x event_f_flags=%x\n", __func__, flags, event_f_flags); @@ -887,10 +929,19 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) return -EINVAL; } - if ((flags & FANOTIFY_FID_BITS) && - (flags & FANOTIFY_CLASS_BITS) != FAN_CLASS_NOTIF) + if (fid_mode && class != FAN_CLASS_NOTIF) return -EINVAL; + /* Reporting either object fid or dir fid */ + switch (fid_mode) { + case 0: + case FAN_REPORT_FID: + case FAN_REPORT_DIR_FID: + break; + default: + return -EINVAL; + } + user = get_current_user(); if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) { free_uid(user); @@ -926,7 +977,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) group->fanotify_data.f_flags = event_f_flags; init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); - switch (flags & FANOTIFY_CLASS_BITS) { + switch (class) { case FAN_CLASS_NOTIF: group->priority = FS_PRIO_0; break; @@ -1236,7 +1287,7 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark, */ static int __init fanotify_user_setup(void) { - BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 8); + BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 9); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9); fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index bbbee11d2521..4ddac97b2bf7 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -18,7 +18,7 @@ #define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ FAN_CLASS_PRE_CONTENT) -#define FANOTIFY_FID_BITS (FAN_REPORT_FID) +#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DIR_FID) #define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | FANOTIFY_FID_BITS | \ FAN_REPORT_TID | \ diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index 7f2f17eacbf9..21afebf77fd7 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -53,6 +53,7 @@ /* Flags to determine fanotify event format */ #define FAN_REPORT_TID 0x00000100 /* event->pid is thread id */ #define FAN_REPORT_FID 0x00000200 /* Report unique file id */ +#define FAN_REPORT_DIR_FID 0x00000400 /* Report unique directory id */ /* Deprecated - do not use this in programs and do not add new flags here! */ #define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \ @@ -117,6 +118,7 @@ struct fanotify_event_metadata { #define FAN_EVENT_INFO_TYPE_FID 1 #define FAN_EVENT_INFO_TYPE_DFID_NAME 2 +#define FAN_EVENT_INFO_TYPE_DFID 3 /* Variable length info record following event metadata */ struct fanotify_event_info_header { @@ -126,10 +128,11 @@ struct fanotify_event_info_header { }; /* - * Unique file identifier info record. This is used both for - * FAN_EVENT_INFO_TYPE_FID records and for FAN_EVENT_INFO_TYPE_DFID_NAME - * records. For FAN_EVENT_INFO_TYPE_DFID_NAME there is additionally a null - * terminated name immediately after the file handle. + * Unique file identifier info record. + * This structure is used for records of types FAN_EVENT_INFO_TYPE_FID, + * FAN_EVENT_INFO_TYPE_DFID and FAN_EVENT_INFO_TYPE_DFID_NAME. + * For FAN_EVENT_INFO_TYPE_DFID_NAME there is additionally a null terminated + * name immediately after the file handle. */ struct fanotify_event_info_fid { struct fanotify_event_info_header hdr; -- cgit v1.2.3 From 929943b38daf817f2e6d303ea04401651fc3bc05 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 16 Jul 2020 11:42:28 +0300 Subject: fanotify: add support for FAN_REPORT_NAME Introduce a new fanotify_init() flag FAN_REPORT_NAME. It requires the flag FAN_REPORT_DIR_FID and there is a constant for setting both flags named FAN_REPORT_DFID_NAME. For a group with flag FAN_REPORT_NAME, the parent fid and name are reported for directory entry modification events (create/detete/move) and for events on non-directory objects. Events on directories themselves are reported with their own fid and "." as the name. The parent fid and name are reported with an info record of type FAN_EVENT_INFO_TYPE_DFID_NAME, similar to the way that parent fid is reported with into type FAN_EVENT_INFO_TYPE_DFID, but with an appended null terminated name string. Link: https://lore.kernel.org/r/20200716084230.30611-21-amir73il@gmail.com Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify.c | 18 ++++++++++++++- fs/notify/fanotify/fanotify_user.c | 45 ++++++++++++++++++++++++++++++-------- include/linux/fanotify.h | 2 +- include/uapi/linux/fanotify.h | 4 ++++ 4 files changed, 58 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index fc2e1fab34af..d793f3e56b26 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -522,9 +522,25 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); bool name_event = false; - if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) + if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) { id = dirid; + /* + * We record file name only in a group with FAN_REPORT_NAME + * and when we have a directory inode to report. + * + * For directory entry modification event, we record the fid of + * the directory and the name of the modified entry. + * + * For event on non-directory that is reported to parent, we + * record the fid of the parent and the name of the child. + */ + if ((fid_mode & FAN_REPORT_NAME) && + ((mask & ALL_FSNOTIFY_DIRENT_EVENTS) || + !(mask & FAN_ONDIR))) + name_event = true; + } + /* * For queues with unlimited length lost events are not expected and * can possibly have security implications. Avoid losing events when diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 7caa64d028ba..6b839790cb42 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -64,18 +64,27 @@ static int fanotify_fid_info_len(int fh_len, int name_len) return roundup(FANOTIFY_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN); } -static int fanotify_event_info_len(struct fanotify_event *event) +static int fanotify_event_info_len(unsigned int fid_mode, + struct fanotify_event *event) { struct fanotify_info *info = fanotify_event_info(event); int dir_fh_len = fanotify_event_dir_fh_len(event); int fh_len = fanotify_event_object_fh_len(event); int info_len = 0; + int dot_len = 0; - if (dir_fh_len) + if (dir_fh_len) { info_len += fanotify_fid_info_len(dir_fh_len, info->name_len); + } else if ((fid_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) { + /* + * With group flag FAN_REPORT_NAME, if name was not recorded in + * event on a directory, we will report the name ".". + */ + dot_len = 1; + } if (fh_len) - info_len += fanotify_fid_info_len(fh_len, 0); + info_len += fanotify_fid_info_len(fh_len, dot_len); return info_len; } @@ -91,6 +100,7 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group, { size_t event_size = FAN_EVENT_METADATA_LEN; struct fanotify_event *event = NULL; + unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); pr_debug("%s: group=%p count=%zd\n", __func__, group, count); @@ -98,8 +108,8 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group, if (fsnotify_notify_queue_is_empty(group)) goto out; - if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) { - event_size += fanotify_event_info_len( + if (fid_mode) { + event_size += fanotify_event_info_len(fid_mode, FANOTIFY_E(fsnotify_peek_first_event(group))); } @@ -325,7 +335,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, pr_debug("%s: group=%p event=%p\n", __func__, group, event); metadata.event_len = FAN_EVENT_METADATA_LEN + - fanotify_event_info_len(event); + fanotify_event_info_len(fid_mode, event); metadata.metadata_len = FAN_EVENT_METADATA_LEN; metadata.vers = FANOTIFY_METADATA_VERSION; metadata.reserved = 0; @@ -374,12 +384,25 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, } if (fanotify_event_object_fh_len(event)) { + const char *dot = NULL; + int dot_len = 0; + if (fid_mode == FAN_REPORT_FID || info_type) { /* * With only group flag FAN_REPORT_FID only type FID is * reported. Second info record type is always FID. */ info_type = FAN_EVENT_INFO_TYPE_FID; + } else if ((fid_mode & FAN_REPORT_NAME) && + (event->mask & FAN_ONDIR)) { + /* + * With group flag FAN_REPORT_NAME, if name was not + * recorded in an event on a directory, report the + * name "." with info type DFID_NAME. + */ + info_type = FAN_EVENT_INFO_TYPE_DFID_NAME; + dot = "."; + dot_len = 1; } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) || (event->mask & FAN_ONDIR)) { /* @@ -400,7 +423,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, ret = copy_info_to_user(fanotify_event_fsid(event), fanotify_event_object_fh(event), - info_type, NULL, 0, buf, count); + info_type, dot, dot_len, buf, count); if (ret < 0) return ret; @@ -932,11 +955,15 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) if (fid_mode && class != FAN_CLASS_NOTIF) return -EINVAL; - /* Reporting either object fid or dir fid */ + /* + * Reporting either object fid or dir fid. + * Child name is reported with parent fid so requires dir fid. + */ switch (fid_mode) { case 0: case FAN_REPORT_FID: case FAN_REPORT_DIR_FID: + case FAN_REPORT_DFID_NAME: break; default: return -EINVAL; @@ -1294,7 +1321,7 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark, */ static int __init fanotify_user_setup(void) { - BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 9); + BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9); fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index 4ddac97b2bf7..3e9c56ee651f 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -18,7 +18,7 @@ #define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ FAN_CLASS_PRE_CONTENT) -#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DIR_FID) +#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME) #define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | FANOTIFY_FID_BITS | \ FAN_REPORT_TID | \ diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index 21afebf77fd7..fbf9c5c7dd59 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -54,6 +54,10 @@ #define FAN_REPORT_TID 0x00000100 /* event->pid is thread id */ #define FAN_REPORT_FID 0x00000200 /* Report unique file id */ #define FAN_REPORT_DIR_FID 0x00000400 /* Report unique directory id */ +#define FAN_REPORT_NAME 0x00000800 /* Report events with name */ + +/* Convenience macro - FAN_REPORT_NAME requires FAN_REPORT_DIR_FID */ +#define FAN_REPORT_DFID_NAME (FAN_REPORT_DIR_FID | FAN_REPORT_NAME) /* Deprecated - do not use this in programs and do not add new flags here! */ #define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \ -- cgit v1.2.3 From b9a1b9772509cbc6f6aa8bcd0b019f6347a2b631 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 22 Jul 2020 15:58:48 +0300 Subject: fsnotify: create method handle_inode_event() in fsnotify_operations The method handle_event() grew a lot of complexity due to the design of fanotify and merging of ignore masks. Most backends do not care about this complex functionality, so we can hide this complexity from them. Introduce a method handle_inode_event() that serves those backends and passes a single inode mark and less arguments. This change converts all backends except fanotify and inotify to use the simplified handle_inode_event() method. In pricipal, inotify could have also used the new method, but that would require passing more arguments on the simple helper (data, data_type, cookie), so we leave it with the handle_event() method. Link: https://lore.kernel.org/r/20200722125849.17418-9-amir73il@gmail.com Suggested-by: Jan Kara Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/nfsd/filecache.c | 12 ++++------ fs/notify/dnotify/dnotify.c | 38 +++++++---------------------- fs/notify/fsnotify.c | 52 ++++++++++++++++++++++++++++++++++++++-- include/linux/fsnotify_backend.h | 19 ++++++++++++--- kernel/audit_fsnotify.c | 20 +++++++--------- kernel/audit_tree.c | 10 ++++---- kernel/audit_watch.c | 17 ++++++------- 7 files changed, 97 insertions(+), 71 deletions(-) (limited to 'include') diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index bbc7892d2928..c8b9d2667ee6 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -598,14 +598,10 @@ static struct notifier_block nfsd_file_lease_notifier = { }; static int -nfsd_file_fsnotify_handle_event(struct fsnotify_group *group, u32 mask, - const void *data, int data_type, - struct inode *dir, - const struct qstr *file_name, u32 cookie, - struct fsnotify_iter_info *iter_info) +nfsd_file_fsnotify_handle_event(struct fsnotify_mark *mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *name) { - struct inode *inode = fsnotify_data_inode(data, data_type); - trace_nfsd_file_fsnotify_handle_event(inode, mask); /* Should be no marks on non-regular files */ @@ -626,7 +622,7 @@ nfsd_file_fsnotify_handle_event(struct fsnotify_group *group, u32 mask, static const struct fsnotify_ops nfsd_file_fsnotify_ops = { - .handle_event = nfsd_file_fsnotify_handle_event, + .handle_inode_event = nfsd_file_fsnotify_handle_event, .free_mark = nfsd_file_mark_free, }; diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index ca78d3f78da8..5dcda8f20c04 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -70,8 +70,9 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) * destroy the dnotify struct if it was not registered to receive multiple * events. */ -static void dnotify_one_event(struct fsnotify_group *group, u32 mask, - struct fsnotify_mark *inode_mark) +static int dnotify_handle_event(struct fsnotify_mark *inode_mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *name) { struct dnotify_mark *dn_mark; struct dnotify_struct *dn; @@ -79,6 +80,10 @@ static void dnotify_one_event(struct fsnotify_group *group, u32 mask, struct fown_struct *fown; __u32 test_mask = mask & ~FS_EVENT_ON_CHILD; + /* not a dir, dnotify doesn't care */ + if (!dir && !(mask & FS_ISDIR)) + return 0; + dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); spin_lock(&inode_mark->lock); @@ -100,33 +105,6 @@ static void dnotify_one_event(struct fsnotify_group *group, u32 mask, } spin_unlock(&inode_mark->lock); -} - -static int dnotify_handle_event(struct fsnotify_group *group, u32 mask, - const void *data, int data_type, - struct inode *dir, - const struct qstr *file_name, u32 cookie, - struct fsnotify_iter_info *iter_info) -{ - struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); - struct fsnotify_mark *child_mark = fsnotify_iter_child_mark(iter_info); - - /* not a dir, dnotify doesn't care */ - if (!dir && !(mask & FS_ISDIR)) - return 0; - - if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info))) - return 0; - - /* - * Some events can be sent on both parent dir and subdir marks - * (e.g. DN_ATTRIB). If both parent dir and subdir are watching, - * report the event once to parent dir and once to subdir. - */ - if (inode_mark) - dnotify_one_event(group, mask, inode_mark); - if (child_mark) - dnotify_one_event(group, mask, child_mark); return 0; } @@ -143,7 +121,7 @@ static void dnotify_free_mark(struct fsnotify_mark *fsn_mark) } static const struct fsnotify_ops dnotify_fsnotify_ops = { - .handle_event = dnotify_handle_event, + .handle_inode_event = dnotify_handle_event, .free_mark = dnotify_free_mark, }; diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 494d5d70323f..a960ec3a569a 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -230,6 +230,49 @@ notify: } EXPORT_SYMBOL_GPL(__fsnotify_parent); +static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *name, + u32 cookie, struct fsnotify_iter_info *iter_info) +{ + struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); + struct fsnotify_mark *child_mark = fsnotify_iter_child_mark(iter_info); + struct inode *inode = fsnotify_data_inode(data, data_type); + const struct fsnotify_ops *ops = group->ops; + int ret; + + if (WARN_ON_ONCE(!ops->handle_inode_event)) + return 0; + + if (WARN_ON_ONCE(fsnotify_iter_sb_mark(iter_info)) || + WARN_ON_ONCE(fsnotify_iter_vfsmount_mark(iter_info))) + return 0; + + /* + * An event can be sent on child mark iterator instead of inode mark + * iterator because of other groups that have interest of this inode + * and have marks on both parent and child. We can simplify this case. + */ + if (!inode_mark) { + inode_mark = child_mark; + child_mark = NULL; + dir = NULL; + name = NULL; + } + + ret = ops->handle_inode_event(inode_mark, mask, inode, dir, name); + if (ret || !child_mark) + return ret; + + /* + * Some events can be sent on both parent dir and child marks + * (e.g. FS_ATTRIB). If both parent dir and child are watching, + * report the event once to parent dir with name and once to child + * without name. + */ + return ops->handle_inode_event(child_mark, mask, inode, NULL, NULL); +} + static int send_to_group(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) @@ -275,8 +318,13 @@ static int send_to_group(__u32 mask, const void *data, int data_type, if (!(test_mask & marks_mask & ~marks_ignored_mask)) return 0; - return group->ops->handle_event(group, mask, data, data_type, dir, - file_name, cookie, iter_info); + if (group->ops->handle_event) { + return group->ops->handle_event(group, mask, data, data_type, dir, + file_name, cookie, iter_info); + } + + return fsnotify_handle_event(group, mask, data, data_type, dir, + file_name, cookie, iter_info); } static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp) diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 32104cfc27a5..f8529a3a2923 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -128,17 +128,30 @@ struct mem_cgroup; * @cookie: inotify rename cookie * @iter_info: array of marks from this group that are interested in the event * + * handle_inode_event - simple variant of handle_event() for groups that only + * have inode marks and don't have ignore mask + * @mark: mark to notify + * @mask: event type and flags + * @inode: inode that event happened on + * @dir: optional directory associated with event - + * if @file_name is not NULL, this is the directory that + * @file_name is relative to. + * @file_name: optional file name associated with event + * * free_group_priv - called when a group refcnt hits 0 to clean up the private union * freeing_mark - called when a mark is being destroyed for some reason. The group - * MUST be holding a reference on each mark and that reference must be - * dropped in this function. inotify uses this function to send - * userspace messages that marks have been removed. + * MUST be holding a reference on each mark and that reference must be + * dropped in this function. inotify uses this function to send + * userspace messages that marks have been removed. */ struct fsnotify_ops { int (*handle_event)(struct fsnotify_group *group, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info); + int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *file_name); void (*free_group_priv)(struct fsnotify_group *group); void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); void (*free_event)(struct fsnotify_event *event); diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c index bd3a6b79316a..bfcfcd61adb6 100644 --- a/kernel/audit_fsnotify.c +++ b/kernel/audit_fsnotify.c @@ -152,35 +152,31 @@ static void audit_autoremove_mark_rule(struct audit_fsnotify_mark *audit_mark) } /* Update mark data in audit rules based on fsnotify events. */ -static int audit_mark_handle_event(struct fsnotify_group *group, u32 mask, - const void *data, int data_type, - struct inode *dir, - const struct qstr *dname, u32 cookie, - struct fsnotify_iter_info *iter_info) +static int audit_mark_handle_event(struct fsnotify_mark *inode_mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *dname) { - struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); struct audit_fsnotify_mark *audit_mark; - const struct inode *inode = fsnotify_data_inode(data, data_type); audit_mark = container_of(inode_mark, struct audit_fsnotify_mark, mark); - BUG_ON(group != audit_fsnotify_group); - - if (WARN_ON(!inode)) + if (WARN_ON_ONCE(inode_mark->group != audit_fsnotify_group) || + WARN_ON_ONCE(!inode)) return 0; if (mask & (FS_CREATE|FS_MOVED_TO|FS_DELETE|FS_MOVED_FROM)) { if (audit_compare_dname_path(dname, audit_mark->path, AUDIT_NAME_FULL)) return 0; audit_update_mark(audit_mark, inode); - } else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF)) + } else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF)) { audit_autoremove_mark_rule(audit_mark); + } return 0; } static const struct fsnotify_ops audit_mark_fsnotify_ops = { - .handle_event = audit_mark_handle_event, + .handle_inode_event = audit_mark_handle_event, .free_mark = audit_fsnotify_free_mark, }; diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 2ce2ac1ce100..025d24abf15d 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -1037,11 +1037,9 @@ static void evict_chunk(struct audit_chunk *chunk) audit_schedule_prune(); } -static int audit_tree_handle_event(struct fsnotify_group *group, u32 mask, - const void *data, int data_type, - struct inode *dir, - const struct qstr *file_name, u32 cookie, - struct fsnotify_iter_info *iter_info) +static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *file_name) { return 0; } @@ -1070,7 +1068,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *mark, } static const struct fsnotify_ops audit_tree_ops = { - .handle_event = audit_tree_handle_event, + .handle_inode_event = audit_tree_handle_event, .freeing_mark = audit_tree_freeing_mark, .free_mark = audit_tree_destroy_watch, }; diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index e23d54bcc587..246e5ba704c0 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -464,20 +464,17 @@ void audit_remove_watch_rule(struct audit_krule *krule) } /* Update watch data in audit rules based on fsnotify events. */ -static int audit_watch_handle_event(struct fsnotify_group *group, u32 mask, - const void *data, int data_type, - struct inode *dir, - const struct qstr *dname, u32 cookie, - struct fsnotify_iter_info *iter_info) +static int audit_watch_handle_event(struct fsnotify_mark *inode_mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *dname) { - struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); - const struct inode *inode = fsnotify_data_inode(data, data_type); struct audit_parent *parent; parent = container_of(inode_mark, struct audit_parent, mark); - BUG_ON(group != audit_watch_group); - WARN_ON(!inode); + if (WARN_ON_ONCE(inode_mark->group != audit_watch_group) || + WARN_ON_ONCE(!inode)) + return 0; if (mask & (FS_CREATE|FS_MOVED_TO) && inode) audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0); @@ -490,7 +487,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group, u32 mask, } static const struct fsnotify_ops audit_watch_fsnotify_ops = { - .handle_event = audit_watch_handle_event, + .handle_inode_event = audit_watch_handle_event, .free_mark = audit_watch_free_mark, }; -- cgit v1.2.3 From c1b7b8d42b5422627b0a8268416a60748f8d000f Mon Sep 17 00:00:00 2001 From: 王文虎 Date: Mon, 27 Jul 2020 21:39:51 +0800 Subject: sched: Fix a typo in a comment Change the comment typo: "direcly" -> "directly". Signed-off-by: Wang Wenhu Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/AAcAXwBTDSpsKN-5iyIOtaqk.1.1595857191899.Hmail.wenhu.wang@vivo.com --- include/linux/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 5033813fecd5..adf0125190d4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -374,7 +374,7 @@ struct util_est { * For cfs_rq, they are the aggregated values of all runnable and blocked * sched_entities. * - * The load/runnable/util_avg doesn't direcly factor frequency scaling and CPU + * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU * capacity scaling. The scaling is done through the rq_clock_pelt that is used * for computing those signals (see update_rq_clock_pelt()) * -- cgit v1.2.3 From b538304da7855f4e31f91f915f259936eb67a1e6 Mon Sep 17 00:00:00 2001 From: Konrad Dybcio Date: Sun, 26 Jul 2020 13:11:58 +0200 Subject: clk: qcom: gcc-sdm660: Add missing modem reset This will be required in order to support the modem upstream. Signed-off-by: Konrad Dybcio Link: https://lore.kernel.org/r/20200726111215.22361-2-konradybcio@gmail.com Fixes: f2a76a2955c0 ("clk: qcom: Add Global Clock controller (GCC) driver for SDM660") Signed-off-by: Stephen Boyd --- drivers/clk/qcom/gcc-sdm660.c | 1 + include/dt-bindings/clock/qcom,gcc-sdm660.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c index bf5730832ef3..a85283786278 100644 --- a/drivers/clk/qcom/gcc-sdm660.c +++ b/drivers/clk/qcom/gcc-sdm660.c @@ -2402,6 +2402,7 @@ static const struct qcom_reset_map gcc_sdm660_resets[] = { [GCC_USB_20_BCR] = { 0x2f000 }, [GCC_USB_30_BCR] = { 0xf000 }, [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 }, + [GCC_MSS_RESTART] = { 0x79000 }, }; static const struct regmap_config gcc_sdm660_regmap_config = { diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h index 468302282913..df8a6f3d367e 100644 --- a/include/dt-bindings/clock/qcom,gcc-sdm660.h +++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h @@ -152,5 +152,6 @@ #define GCC_USB_20_BCR 6 #define GCC_USB_30_BCR 7 #define GCC_USB_PHY_CFG_AHB2PHY_BCR 8 +#define GCC_MSS_RESTART 9 #endif -- cgit v1.2.3 From af60459a543483c5ad155a92daa8c3a6c00a0829 Mon Sep 17 00:00:00 2001 From: Chunyan Zhang Date: Fri, 24 Jul 2020 20:21:47 +0800 Subject: math64: New DIV_S64_ROUND_CLOSEST helper Provide DIV_S64_ROUND_CLOSEST helper which uses div_s64 to perform division rounded to the closest integer using signed 64bit dividend and signed 32bit divisor. Signed-off-by: Chunyan Zhang Signed-off-by: Sebastian Reichel --- include/linux/math64.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include') diff --git a/include/linux/math64.h b/include/linux/math64.h index 11a267413e8e..cd0693989436 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -279,4 +279,23 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \ ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); }) +/* + * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer + * @dividend: signed 64bit dividend + * @divisor: signed 32bit divisor + * + * Divide signed 64bit dividend by signed 32bit divisor + * and round to closest integer. + * + * Return: dividend / divisor rounded to nearest integer + */ +#define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \ +{ \ + s64 __x = (dividend); \ + s32 __d = (divisor); \ + ((__x > 0) == (__d > 0)) ? \ + div_s64((__x + (__d / 2)), __d) : \ + div_s64((__x - (__d / 2)), __d); \ +} \ +) #endif /* _LINUX_MATH64_H */ -- cgit v1.2.3 From 316810e883fbcfd88167fd53858294bf0c2bcd40 Mon Sep 17 00:00:00 2001 From: "周琰杰 (Zhou Yanjie)" Date: Sat, 25 Jul 2020 13:11:34 +0800 Subject: dt-bindings: clock: Add RTC related clocks for Ingenic SoCs. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add RTC related clocks bindings for the JZ4780 SoC, the X1000 SoC, and the X1830 SoC from Ingenic. Tested-by: 周正 (Zhou Zheng) Signed-off-by: 周琰杰 (Zhou Yanjie) Link: https://lore.kernel.org/r/20200725051136.58220-2-zhouyanjie@wanyeetech.com Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/jz4780-cgu.h | 2 ++ include/dt-bindings/clock/x1000-cgu.h | 2 ++ include/dt-bindings/clock/x1830-cgu.h | 2 ++ 3 files changed, 6 insertions(+) (limited to 'include') diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/jz4780-cgu.h index 1859ce53ee38..cb07a0978301 100644 --- a/include/dt-bindings/clock/jz4780-cgu.h +++ b/include/dt-bindings/clock/jz4780-cgu.h @@ -85,5 +85,7 @@ #define JZ4780_CLK_DES 70 #define JZ4780_CLK_X2D 71 #define JZ4780_CLK_CORE1 72 +#define JZ4780_CLK_EXCLK_DIV512 73 +#define JZ4780_CLK_RTC 74 #endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */ diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/x1000-cgu.h index 0367c8c02e16..f187e0719fd3 100644 --- a/include/dt-bindings/clock/x1000-cgu.h +++ b/include/dt-bindings/clock/x1000-cgu.h @@ -48,5 +48,7 @@ #define X1000_CLK_SSI 33 #define X1000_CLK_OST 34 #define X1000_CLK_PDMA 35 +#define X1000_CLK_EXCLK_DIV512 36 +#define X1000_CLK_RTC 37 #endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */ diff --git a/include/dt-bindings/clock/x1830-cgu.h b/include/dt-bindings/clock/x1830-cgu.h index 801e1d09c881..88455376a950 100644 --- a/include/dt-bindings/clock/x1830-cgu.h +++ b/include/dt-bindings/clock/x1830-cgu.h @@ -51,5 +51,7 @@ #define X1830_CLK_TCU 36 #define X1830_CLK_DTRNG 37 #define X1830_CLK_OST 38 +#define X1830_CLK_EXCLK_DIV512 39 +#define X1830_CLK_RTC 40 #endif /* __DT_BINDINGS_CLOCK_X1830_CGU_H__ */ -- cgit v1.2.3 From acb3b78de3f7698724e59719fbe50b8c92928e83 Mon Sep 17 00:00:00 2001 From: "周琰杰 (Zhou Yanjie)" Date: Sat, 25 Jul 2020 13:11:35 +0800 Subject: dt-bindings: clock: Add tabs to align code. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The "JZ4780_CLK_LCD0PIXCLK" and the "JZ4780_CLK_LCD1PIXCLK" in the "jz4780.h" and the new added "JZ4780_CLK_EXCLK_DIV512" in the previous patch is too long, add tabs to other lines to align them. Tested-by: 周正 (Zhou Zheng) Signed-off-by: 周琰杰 (Zhou Yanjie) Link: https://lore.kernel.org/r/20200725051136.58220-3-zhouyanjie@wanyeetech.com Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/jz4780-cgu.h | 144 ++++++++++++++++----------------- 1 file changed, 72 insertions(+), 72 deletions(-) (limited to 'include') diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/jz4780-cgu.h index cb07a0978301..85cf8eb5081b 100644 --- a/include/dt-bindings/clock/jz4780-cgu.h +++ b/include/dt-bindings/clock/jz4780-cgu.h @@ -12,80 +12,80 @@ #ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ #define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ -#define JZ4780_CLK_EXCLK 0 -#define JZ4780_CLK_RTCLK 1 -#define JZ4780_CLK_APLL 2 -#define JZ4780_CLK_MPLL 3 -#define JZ4780_CLK_EPLL 4 -#define JZ4780_CLK_VPLL 5 -#define JZ4780_CLK_OTGPHY 6 -#define JZ4780_CLK_SCLKA 7 -#define JZ4780_CLK_CPUMUX 8 -#define JZ4780_CLK_CPU 9 -#define JZ4780_CLK_L2CACHE 10 -#define JZ4780_CLK_AHB0 11 -#define JZ4780_CLK_AHB2PMUX 12 -#define JZ4780_CLK_AHB2 13 -#define JZ4780_CLK_PCLK 14 -#define JZ4780_CLK_DDR 15 -#define JZ4780_CLK_VPU 16 -#define JZ4780_CLK_I2SPLL 17 -#define JZ4780_CLK_I2S 18 +#define JZ4780_CLK_EXCLK 0 +#define JZ4780_CLK_RTCLK 1 +#define JZ4780_CLK_APLL 2 +#define JZ4780_CLK_MPLL 3 +#define JZ4780_CLK_EPLL 4 +#define JZ4780_CLK_VPLL 5 +#define JZ4780_CLK_OTGPHY 6 +#define JZ4780_CLK_SCLKA 7 +#define JZ4780_CLK_CPUMUX 8 +#define JZ4780_CLK_CPU 9 +#define JZ4780_CLK_L2CACHE 10 +#define JZ4780_CLK_AHB0 11 +#define JZ4780_CLK_AHB2PMUX 12 +#define JZ4780_CLK_AHB2 13 +#define JZ4780_CLK_PCLK 14 +#define JZ4780_CLK_DDR 15 +#define JZ4780_CLK_VPU 16 +#define JZ4780_CLK_I2SPLL 17 +#define JZ4780_CLK_I2S 18 #define JZ4780_CLK_LCD0PIXCLK 19 #define JZ4780_CLK_LCD1PIXCLK 20 -#define JZ4780_CLK_MSCMUX 21 -#define JZ4780_CLK_MSC0 22 -#define JZ4780_CLK_MSC1 23 -#define JZ4780_CLK_MSC2 24 -#define JZ4780_CLK_UHC 25 -#define JZ4780_CLK_SSIPLL 26 -#define JZ4780_CLK_SSI 27 -#define JZ4780_CLK_CIMMCLK 28 -#define JZ4780_CLK_PCMPLL 29 -#define JZ4780_CLK_PCM 30 -#define JZ4780_CLK_GPU 31 -#define JZ4780_CLK_HDMI 32 -#define JZ4780_CLK_BCH 33 -#define JZ4780_CLK_NEMC 34 -#define JZ4780_CLK_OTG0 35 -#define JZ4780_CLK_SSI0 36 -#define JZ4780_CLK_SMB0 37 -#define JZ4780_CLK_SMB1 38 -#define JZ4780_CLK_SCC 39 -#define JZ4780_CLK_AIC 40 -#define JZ4780_CLK_TSSI0 41 -#define JZ4780_CLK_OWI 42 -#define JZ4780_CLK_KBC 43 -#define JZ4780_CLK_SADC 44 -#define JZ4780_CLK_UART0 45 -#define JZ4780_CLK_UART1 46 -#define JZ4780_CLK_UART2 47 -#define JZ4780_CLK_UART3 48 -#define JZ4780_CLK_SSI1 49 -#define JZ4780_CLK_SSI2 50 -#define JZ4780_CLK_PDMA 51 -#define JZ4780_CLK_GPS 52 -#define JZ4780_CLK_MAC 53 -#define JZ4780_CLK_SMB2 54 -#define JZ4780_CLK_CIM 55 -#define JZ4780_CLK_LCD 56 -#define JZ4780_CLK_TVE 57 -#define JZ4780_CLK_IPU 58 -#define JZ4780_CLK_DDR0 59 -#define JZ4780_CLK_DDR1 60 -#define JZ4780_CLK_SMB3 61 -#define JZ4780_CLK_TSSI1 62 -#define JZ4780_CLK_COMPRESS 63 -#define JZ4780_CLK_AIC1 64 -#define JZ4780_CLK_GPVLC 65 -#define JZ4780_CLK_OTG1 66 -#define JZ4780_CLK_UART4 67 -#define JZ4780_CLK_AHBMON 68 -#define JZ4780_CLK_SMB4 69 -#define JZ4780_CLK_DES 70 -#define JZ4780_CLK_X2D 71 -#define JZ4780_CLK_CORE1 72 +#define JZ4780_CLK_MSCMUX 21 +#define JZ4780_CLK_MSC0 22 +#define JZ4780_CLK_MSC1 23 +#define JZ4780_CLK_MSC2 24 +#define JZ4780_CLK_UHC 25 +#define JZ4780_CLK_SSIPLL 26 +#define JZ4780_CLK_SSI 27 +#define JZ4780_CLK_CIMMCLK 28 +#define JZ4780_CLK_PCMPLL 29 +#define JZ4780_CLK_PCM 30 +#define JZ4780_CLK_GPU 31 +#define JZ4780_CLK_HDMI 32 +#define JZ4780_CLK_BCH 33 +#define JZ4780_CLK_NEMC 34 +#define JZ4780_CLK_OTG0 35 +#define JZ4780_CLK_SSI0 36 +#define JZ4780_CLK_SMB0 37 +#define JZ4780_CLK_SMB1 38 +#define JZ4780_CLK_SCC 39 +#define JZ4780_CLK_AIC 40 +#define JZ4780_CLK_TSSI0 41 +#define JZ4780_CLK_OWI 42 +#define JZ4780_CLK_KBC 43 +#define JZ4780_CLK_SADC 44 +#define JZ4780_CLK_UART0 45 +#define JZ4780_CLK_UART1 46 +#define JZ4780_CLK_UART2 47 +#define JZ4780_CLK_UART3 48 +#define JZ4780_CLK_SSI1 49 +#define JZ4780_CLK_SSI2 50 +#define JZ4780_CLK_PDMA 51 +#define JZ4780_CLK_GPS 52 +#define JZ4780_CLK_MAC 53 +#define JZ4780_CLK_SMB2 54 +#define JZ4780_CLK_CIM 55 +#define JZ4780_CLK_LCD 56 +#define JZ4780_CLK_TVE 57 +#define JZ4780_CLK_IPU 58 +#define JZ4780_CLK_DDR0 59 +#define JZ4780_CLK_DDR1 60 +#define JZ4780_CLK_SMB3 61 +#define JZ4780_CLK_TSSI1 62 +#define JZ4780_CLK_COMPRESS 63 +#define JZ4780_CLK_AIC1 64 +#define JZ4780_CLK_GPVLC 65 +#define JZ4780_CLK_OTG1 66 +#define JZ4780_CLK_UART4 67 +#define JZ4780_CLK_AHBMON 68 +#define JZ4780_CLK_SMB4 69 +#define JZ4780_CLK_DES 70 +#define JZ4780_CLK_X2D 71 +#define JZ4780_CLK_CORE1 72 #define JZ4780_CLK_EXCLK_DIV512 73 -#define JZ4780_CLK_RTC 74 +#define JZ4780_CLK_RTC 74 #endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */ -- cgit v1.2.3 From 62b9825827518843f0f93dec6730ddcde14eb5b2 Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Mon, 17 Feb 2020 09:26:43 +0800 Subject: usb: chipidea: add query_available_role interface The glue layer may need to know current available role to do some setting, eg, the wakeup setting. So we add ci_hdrc_query_available_role for that. Signed-off-by: Peter Chen --- drivers/usb/chipidea/core.c | 27 +++++++++++++++++++++++++++ include/linux/usb/chipidea.h | 2 ++ 2 files changed, 29 insertions(+) (limited to 'include') diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 9a7c53d09ab4..87ae3c8686a7 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -877,6 +877,33 @@ void ci_hdrc_remove_device(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(ci_hdrc_remove_device); +/** + * ci_hdrc_query_available_role: get runtime available operation mode + * + * The glue layer can get current operation mode (host/peripheral/otg) + * This function should be called after ci core device has created. + * + * @pdev: the platform device of ci core. + * + * Return runtime usb_dr_mode. + */ +enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev) +{ + struct ci_hdrc *ci = platform_get_drvdata(pdev); + + if (!ci) + return USB_DR_MODE_UNKNOWN; + if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET]) + return USB_DR_MODE_OTG; + else if (ci->roles[CI_ROLE_HOST]) + return USB_DR_MODE_HOST; + else if (ci->roles[CI_ROLE_GADGET]) + return USB_DR_MODE_PERIPHERAL; + else + return USB_DR_MODE_UNKNOWN; +} +EXPORT_SYMBOL_GPL(ci_hdrc_query_available_role); + static inline void ci_role_destroy(struct ci_hdrc *ci) { ci_hdrc_gadget_destroy(ci); diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 54167a2d28ea..025b41687ce9 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -99,5 +99,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev, struct ci_hdrc_platform_data *platdata); /* Remove ci hdrc device */ void ci_hdrc_remove_device(struct platform_device *pdev); +/* Get current available role */ +enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev); #endif -- cgit v1.2.3 From 87b517a80419346a44319dc21cad9472a01b9204 Mon Sep 17 00:00:00 2001 From: Jonathan Marek Date: Thu, 9 Jul 2020 09:52:43 -0400 Subject: dt-bindings: power: Add missing rpmpd rpmh regulator level Add RPMH_REGULATOR_LEVEL_SVS_L0, used by sm8250. Acked-by: Rob Herring Signed-off-by: Jonathan Marek Link: https://lore.kernel.org/r/20200709135251.643-13-jonathan@marek.ca Signed-off-by: Bjorn Andersson --- include/dt-bindings/power/qcom-rpmpd.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index dc146e44228b..5e61eaf73bdd 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -55,6 +55,7 @@ #define RPMH_REGULATOR_LEVEL_MIN_SVS 48 #define RPMH_REGULATOR_LEVEL_LOW_SVS 64 #define RPMH_REGULATOR_LEVEL_SVS 128 +#define RPMH_REGULATOR_LEVEL_SVS_L0 144 #define RPMH_REGULATOR_LEVEL_SVS_L1 192 #define RPMH_REGULATOR_LEVEL_SVS_L2 224 #define RPMH_REGULATOR_LEVEL_NOM 256 -- cgit v1.2.3 From cde1a8a992875a7479c4321b2a4a190c2e92ec2a Mon Sep 17 00:00:00 2001 From: Ismael Ferreras Morezuelas Date: Sun, 26 Jul 2020 23:12:28 +0200 Subject: Bluetooth: btusb: Fix and detect most of the Chinese Bluetooth controllers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For some reason they tend to squat on the very first CSR/ Cambridge Silicon Radio VID/PID instead of paying fees. This is an extremely common problem; the issue goes as back as 2013 and these devices are only getting more popular, even rebranded by reputable vendors and sold by retailers everywhere. So, at this point in time there are hundreds of modern dongles reusing the ID of what originally was an early Bluetooth 1.1 controller. Linux is the only place where they don't work due to spotty checks in our detection code. It only covered a minimum subset. So what's the big idea? Take advantage of the fact that all CSR chips report the same internal version as both the LMP sub-version and HCI revision number. It always matches, couple that with the manufacturer code, that rarely lies, and we now have a good idea of who is who. Additionally, by compiling a list of user-reported HCI/lsusb dumps, and searching around for legit CSR dongles in similar product ranges we can find what CSR BlueCore firmware supported which Bluetooth versions. That way we can narrow down ranges of fakes for each of them. e.g. Real CSR dongles with LMP subversion 0x73 are old enough that support BT 1.1 only; so it's a dead giveaway when some third-party BT 4.0 dongle reuses it. So, to sum things up; there are multiple classes of fake controllers reusing the same 0A12:0001 VID/PID. This has been broken for a while. Known 'fake' bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891 IC markings on 0x7558: FR3191AHAL 749H15143 (???) https://bugzilla.kernel.org/show_bug.cgi?id=60824 Fixes: 81cac64ba258ae (Deal with USB devices that are faking CSR vendor) Reported-by: Michał Wiśniewski Tested-by: Mike Johnson Tested-by: Ricardo Rodrigues Tested-by: M.Hanny Sabbagh Tested-by: Oussama BEN BRAHIM Tested-by: Ismael Ferreras Morezuelas Signed-off-by: Ismael Ferreras Morezuelas Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 74 +++++++++++++++++++++++++++++++++------ include/net/bluetooth/bluetooth.h | 2 ++ include/net/bluetooth/hci.h | 11 ++++++ net/bluetooth/hci_core.c | 6 ++-- 4 files changed, 81 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index aa0bc9942afd..1f51494f5818 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1742,6 +1742,7 @@ static int btusb_setup_csr(struct hci_dev *hdev) { struct hci_rp_read_local_version *rp; struct sk_buff *skb; + bool is_fake = false; BT_DBG("%s", hdev->name); @@ -1761,18 +1762,69 @@ static int btusb_setup_csr(struct hci_dev *hdev) rp = (struct hci_rp_read_local_version *)skb->data; - /* Detect controllers which aren't real CSR ones. */ + /* Detect a wide host of Chinese controllers that aren't CSR. + * + * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891 + * + * The main thing they have in common is that these are really popular low-cost + * options that support newer Bluetooth versions but rely on heavy VID/PID + * squatting of this poor old Bluetooth 1.1 device. Even sold as such. + * + * We detect actual CSR devices by checking that the HCI manufacturer code + * is Cambridge Silicon Radio (10) and ensuring that LMP sub-version and + * HCI rev values always match. As they both store the firmware number. + */ if (le16_to_cpu(rp->manufacturer) != 10 || - le16_to_cpu(rp->lmp_subver) == 0x0c5c) { + le16_to_cpu(rp->hci_rev) != le16_to_cpu(rp->lmp_subver)) + is_fake = true; + + /* Known legit CSR firmware build numbers and their supported BT versions: + * - 1.1 (0x1) -> 0x0073, 0x020d, 0x033c, 0x034e + * - 1.2 (0x2) -> 0x04d9, 0x0529 + * - 2.0 (0x3) -> 0x07a6, 0x07ad, 0x0c5c + * - 2.1 (0x4) -> 0x149c, 0x1735, 0x1899 (0x1899 is a BlueCore4-External) + * - 4.0 (0x6) -> 0x1d86, 0x2031, 0x22bb + * + * e.g. Real CSR dongles with LMP subversion 0x73 are old enough that + * support BT 1.1 only; so it's a dead giveaway when some + * third-party BT 4.0 dongle reuses it. + */ + else if (le16_to_cpu(rp->lmp_subver) <= 0x034e && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_1_1) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x0529 && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_1_2) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x0c5c && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_2_0) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x1899 && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_2_1) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x22bb && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_4_0) + is_fake = true; + + if (is_fake) { + bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds..."); + + /* Generally these clones have big discrepancies between + * advertised features and what's actually supported. + * Probably will need to be expanded in the future; + * without these the controller will lock up. + */ + set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); + /* Clear the reset quirk since this is not an actual * early Bluetooth 1.1 device from CSR. */ clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); - - /* These fake CSR controllers have all a broken - * stored link key handling and so just disable it. - */ - set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); + clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); } kfree_skb(skb); @@ -4070,11 +4122,13 @@ static int btusb_probe(struct usb_interface *intf, if (bcdDevice < 0x117) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + /* This must be set first in case we disable it for fakes */ + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + /* Fake CSR devices with broken commands */ - if (bcdDevice <= 0x100 || bcdDevice == 0x134) + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 && + le16_to_cpu(udev->descriptor.idProduct) == 0x0001) hdev->setup = btusb_setup_csr; - - set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); } if (id->driver_info & BTUSB_SNIFFER) { diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 7ee8041af803..9125effbf448 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -41,6 +41,8 @@ #define BLUETOOTH_VER_1_1 1 #define BLUETOOTH_VER_1_2 2 #define BLUETOOTH_VER_2_0 3 +#define BLUETOOTH_VER_2_1 4 +#define BLUETOOTH_VER_4_0 6 /* Reserv for core and drivers use */ #define BT_SKB_RESERVE 8 diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 1f18f71363e9..1317dfd8f962 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -227,6 +227,17 @@ enum { * supported. */ HCI_QUIRK_VALID_LE_STATES, + + /* When this quirk is set, then erroneous data reporting + * is ignored. This is mainly due to the fact that the HCI + * Read Default Erroneous Data Reporting command is advertised, + * but not supported; these controllers often reply with unknown + * command and tend to lock up randomly. Needing a hard reset. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, }; /* HCI device flags */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 6509f785dd14..2891e16c1cc1 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -605,7 +605,8 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) if (hdev->commands[8] & 0x01) hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); - if (hdev->commands[18] & 0x04) + if (hdev->commands[18] & 0x04 && + !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL); /* Some older Broadcom based Bluetooth 1.2 controllers do not @@ -850,7 +851,8 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt) /* Set erroneous data reporting if supported to the wideband speech * setting value */ - if (hdev->commands[18] & 0x08) { + if (hdev->commands[18] & 0x08 && + !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) { bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); -- cgit v1.2.3 From e885d5d94793ef342e49d55672baabbc16e32bb1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 16 Jul 2020 16:36:50 +1000 Subject: lockdep: Move list.h inclusion into lockdep.h Currently lockdep_types.h includes list.h without actually using any of its macros or functions. All it needs are the type definitions which were moved into types.h long ago. This potentially causes inclusion loops because both are included by many core header files. This patch moves the list.h inclusion into lockdep.h. Note that we could probably remove it completely but that could potentially result in compile failures should any end users not include list.h directly and also be unlucky enough to not get list.h via some other header file. Reported-by: Petr Mladek Signed-off-by: Herbert Xu Signed-off-by: Peter Zijlstra (Intel) Tested-by: Petr Mladek Link: https://lkml.kernel.org/r/20200716063649.GA23065@gondor.apana.org.au --- include/linux/lockdep.h | 1 + include/linux/lockdep_types.h | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 3b73cf84f77d..b1ad5c045353 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -21,6 +21,7 @@ extern int lock_stat; #ifdef CONFIG_LOCKDEP #include +#include #include #include diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 7b9350624577..bb35b449f533 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -32,8 +32,6 @@ enum lockdep_wait_type { #ifdef CONFIG_LOCKDEP -#include - /* * We'd rather not expose kernel/lockdep_states.h this wide, but we do need * the total number of states... :-( -- cgit v1.2.3 From d6945242f45d4745a8169fdab7afeb40f4e36f06 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Mon, 18 May 2020 11:13:52 +0300 Subject: net/mlx5: Hold pages RB tree per VF Per page request event, FW request to allocated or release pages for a single function. Driver maintains FW pages object per function, so there is no need to hold one global page data-base. Instead, have a page data-base per function, which will improve performance release flow in all cases, especially for "release all pages". As the range of function IDs is large and not sequential, use xarray to store a per function ID page data-base, where the function ID is the key. Upon first allocation of a page to a function ID, create the page data-base per function. This data-base will be released only at pagealloc mechanism cleanup. NIC: ConnectX-4 Lx CPU: Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz Test case: 32 VFs, measure release pages on one VF as part of FLR Before: 0.021 Sec After: 0.014 Sec The improvement depends on amount of VFs and memory utilization by them. Time measurements above were taken from idle system. Signed-off-by: Eran Ben Elisha Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/pagealloc.c | 142 +++++++++++++++------ include/linux/mlx5/driver.h | 2 +- 2 files changed, 105 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 5ddd18639a1e..1b20e3397dde 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mlx5_core.h" #include "lib/eq.h" @@ -73,15 +74,45 @@ enum { MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, }; +static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id) +{ + struct rb_root *root; + int err; + + root = xa_load(&dev->priv.page_root_xa, func_id); + if (root) + return root; + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + return ERR_PTR(-ENOMEM); + + err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL); + if (err) { + kfree(root); + return ERR_PTR(err); + } + + *root = RB_ROOT; + + return root; +} + static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) { - struct rb_root *root = &dev->priv.page_root; - struct rb_node **new = &root->rb_node; struct rb_node *parent = NULL; + struct rb_root *root; + struct rb_node **new; struct fw_page *nfp; struct fw_page *tfp; int i; + root = page_root_per_func_id(dev, func_id); + if (IS_ERR(root)) + return PTR_ERR(root); + + new = &root->rb_node; + while (*new) { parent = *new; tfp = rb_entry(parent, struct fw_page, rb_node); @@ -111,13 +142,20 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u return 0; } -static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr, + u32 func_id) { - struct rb_root *root = &dev->priv.page_root; - struct rb_node *tmp = root->rb_node; struct fw_page *result = NULL; + struct rb_root *root; + struct rb_node *tmp; struct fw_page *tfp; + root = xa_load(&dev->priv.page_root_xa, func_id); + if (WARN_ON_ONCE(!root)) + return NULL; + + tmp = root->rb_node; + while (tmp) { tfp = rb_entry(tmp, struct fw_page, rb_node); if (tfp->addr < addr) { @@ -191,7 +229,13 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id) static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, bool in_free_list) { - rb_erase(&fwp->rb_node, &dev->priv.page_root); + struct rb_root *root; + + root = xa_load(&dev->priv.page_root_xa, fwp->func_id); + if (WARN_ON_ONCE(!root)) + return; + + rb_erase(&fwp->rb_node, root); if (in_free_list) list_del(&fwp->list); dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK, @@ -200,12 +244,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, kfree(fwp); } -static void free_4k(struct mlx5_core_dev *dev, u64 addr) +static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id) { struct fw_page *fwp; int n; - fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id); if (!fwp) { mlx5_core_warn_rl(dev, "page not found\n"); return; @@ -340,7 +384,7 @@ retry: out_4k: for (i--; i >= 0; i--) - free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id); out_free: kvfree(in); if (notify_fail) @@ -351,16 +395,19 @@ out_free: static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, bool ec_function) { + struct rb_root *root; struct rb_node *p; int npages = 0; - p = rb_first(&dev->priv.page_root); + root = xa_load(&dev->priv.page_root_xa, func_id); + if (WARN_ON_ONCE(!root)) + return; + + p = rb_first(root); while (p) { struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node); p = rb_next(p); - if (fwp->func_id != func_id) - continue; npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count); free_fwp(dev, fwp, fwp->free_count); } @@ -378,6 +425,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 *in, int in_size, u32 *out, int out_size) { + struct rb_root *root; struct fw_page *fwp; struct rb_node *p; u32 func_id; @@ -391,12 +439,14 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, npages = MLX5_GET(manage_pages_in, in, input_num_entries); func_id = MLX5_GET(manage_pages_in, in, function_id); - p = rb_first(&dev->priv.page_root); + root = xa_load(&dev->priv.page_root_xa, func_id); + if (WARN_ON_ONCE(!root)) + return -EEXIST; + + p = rb_first(root); while (p && i < npages) { fwp = rb_entry(p, struct fw_page, rb_node); p = rb_next(p); - if (fwp->func_id != func_id) - continue; MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); i++; @@ -446,7 +496,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, } for (i = 0; i < num_claimed; i++) - free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id); if (nclaimed) *nclaimed = num_claimed; @@ -560,35 +610,49 @@ static int optimal_reclaimed_pages(void) return ret; } -int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, + struct rb_root *root, u16 func_id) { unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); - struct fw_page *fwp; - struct rb_node *p; - int nclaimed = 0; - int err = 0; - do { - p = rb_first(&dev->priv.page_root); - if (p) { - fwp = rb_entry(p, struct fw_page, rb_node); - err = reclaim_pages(dev, fwp->func_id, - optimal_reclaimed_pages(), - &nclaimed, mlx5_core_is_ecpf(dev)); - - if (err) { - mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", - err); - return err; - } - if (nclaimed) - end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + while (!RB_EMPTY_ROOT(root)) { + int nclaimed; + int err; + + err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), + &nclaimed, mlx5_core_is_ecpf(dev)); + if (err) { + mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", + err, func_id); + return err; } + + if (nclaimed) + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + if (time_after(jiffies, end)) { mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); break; } - } while (p); + } + + return 0; +} + +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +{ + struct rb_root *root; + unsigned long id; + void *entry; + + xa_for_each(&dev->priv.page_root_xa, id, entry) { + root = entry; + mlx5_reclaim_root_pages(dev, root, id); + xa_erase(&dev->priv.page_root_xa, id); + kfree(root); + } + + WARN_ON(!xa_empty(&dev->priv.page_root_xa)); WARN(dev->priv.fw_pages, "FW pages counter is %d after reclaiming all pages\n", @@ -605,17 +669,19 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) int mlx5_pagealloc_init(struct mlx5_core_dev *dev) { - dev->priv.page_root = RB_ROOT; INIT_LIST_HEAD(&dev->priv.free_list); dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); if (!dev->priv.pg_wq) return -ENOMEM; + xa_init(&dev->priv.page_root_xa); + return 0; } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { + xa_destroy(&dev->priv.page_root_xa); destroy_workqueue(dev->priv.pg_wq); } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6a97ad601991..a0fcc4d13e93 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -541,7 +541,7 @@ struct mlx5_priv { /* pages stuff */ struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; - struct rb_root page_root; + struct xarray page_root_xa; int fw_pages; atomic_t reg_pages; struct list_head free_list; -- cgit v1.2.3 From e1613b5714ee6c186c9628e9958edf65e9d9cddd Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 27 Jul 2020 15:47:15 -0700 Subject: bpf: Fix bpf_ringbuf_output() signature to return long Due to bpf tree fix merge, bpf_ringbuf_output() signature ended up with int as a return type, while all other helpers got converted to returning long. So fix it in bpf-next now. Fixes: b0659d8a950d ("bpf: Fix definition of bpf_ringbuf_output() helper in UAPI comments") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200727224715.652037-1-andriin@fb.com --- include/uapi/linux/bpf.h | 2 +- tools/include/uapi/linux/bpf.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e1ba4ae6a916..eb5e0c38eb2c 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3241,7 +3241,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) + * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e1ba4ae6a916..eb5e0c38eb2c 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3241,7 +3241,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) + * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification -- cgit v1.2.3 From 7d8365771ffb0edc336f2cd45e96ef8214a83dca Mon Sep 17 00:00:00 2001 From: Paul Menzel Date: Fri, 3 Jul 2020 16:29:38 +0200 Subject: moduleparams: Add hexint type parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For bitmasks printing values in hex is more convenient. Prefix with `0x` to make it clear, that it’s a hex value, and pad it out. Using the helper for `amdgpu.ppfeaturemask`, it will look like below. Before: $ more /sys/module/amdgpu/parameters/ppfeaturemask 4294950911 After: $ more /sys/module/amdgpu/parameters/ppfeaturemask 0xffffbfff Cc: linux-kernel@vger.kernel.org Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Paul Menzel Acked-by: Linus Torvalds Reviewed-by: Christian König Signed-off-by: Christian König Link: https://patchwork.freedesktop.org/patch/374726/ --- include/linux/moduleparam.h | 7 ++++++- kernel/params.c | 17 +++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 3ef917ff0964..cff7261e98bb 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -118,7 +118,7 @@ struct kparam_array * you can create your own by defining those variables. * * Standard types are: - * byte, short, ushort, int, uint, long, ulong + * byte, hexint, short, ushort, int, uint, long, ulong * charp: a character pointer * bool: a bool, values 0/1, y/n, Y/N. * invbool: the above, only sense-reversed (N = true). @@ -448,6 +448,11 @@ extern int param_set_ullong(const char *val, const struct kernel_param *kp); extern int param_get_ullong(char *buffer, const struct kernel_param *kp); #define param_check_ullong(name, p) __param_check(name, p, unsigned long long) +extern const struct kernel_param_ops param_ops_hexint; +extern int param_set_hexint(const char *val, const struct kernel_param *kp); +extern int param_get_hexint(char *buffer, const struct kernel_param *kp); +#define param_check_hexint(name, p) param_check_uint(name, p) + extern const struct kernel_param_ops param_ops_charp; extern int param_set_charp(const char *val, const struct kernel_param *kp); extern int param_get_charp(char *buffer, const struct kernel_param *kp); diff --git a/kernel/params.c b/kernel/params.c index 111eee82b999..3835fb82c64b 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -233,14 +233,15 @@ char *parse_args(const char *doing, EXPORT_SYMBOL(param_ops_##name) -STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8); -STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16); -STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16); -STANDARD_PARAM_DEF(int, int, "%i", kstrtoint); -STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint); -STANDARD_PARAM_DEF(long, long, "%li", kstrtol); -STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul); -STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); +STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8); +STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16); +STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16); +STANDARD_PARAM_DEF(int, int, "%i", kstrtoint); +STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint); +STANDARD_PARAM_DEF(long, long, "%li", kstrtol); +STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul); +STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); +STANDARD_PARAM_DEF(hexint, unsigned int, "%#08x", kstrtouint); int param_set_charp(const char *val, const struct kernel_param *kp) { -- cgit v1.2.3 From d1718a1b7a86743b9c517bf9521695ba909c734f Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 19 Jun 2020 09:20:03 +0100 Subject: ACPI/IORT: Make iort_get_device_domain IRQ domain agnostic iort_get_device_domain() is PCI specific but it need not be, since it can be used to retrieve IRQ domain nexus of any kind by adding an irq_domain_bus_token input to it. Make it PCI agnostic by also renaming the requestor ID input to a more generic ID name. Signed-off-by: Lorenzo Pieralisi Acked-by: Bjorn Helgaas # pci/msi.c Cc: Will Deacon Cc: Hanjun Guo Cc: Bjorn Helgaas Cc: Sudeep Holla Cc: Robin Murphy Cc: "Rafael J. Wysocki" Link: https://lore.kernel.org/r/20200619082013.13661-3-lorenzo.pieralisi@arm.com Signed-off-by: Catalin Marinas --- drivers/acpi/arm64/iort.c | 14 +++++++------- drivers/pci/msi.c | 3 ++- include/linux/acpi_iort.h | 7 ++++--- 3 files changed, 13 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 5eee81758184..902e2aaca946 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -550,7 +550,6 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev) node = iort_get_iort_node(dev->fwnode); if (node) return node; - /* * if not, then it should be a platform device defined in * DSDT/SSDT (with Named Component node in IORT) @@ -641,13 +640,13 @@ static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base) /** * iort_dev_find_its_id() - Find the ITS identifier for a device * @dev: The device. - * @req_id: Device's requester ID + * @id: Device's ID * @idx: Index of the ITS identifier list. * @its_id: ITS identifier. * * Returns: 0 on success, appropriate error value otherwise */ -static int iort_dev_find_its_id(struct device *dev, u32 req_id, +static int iort_dev_find_its_id(struct device *dev, u32 id, unsigned int idx, int *its_id) { struct acpi_iort_its_group *its; @@ -657,7 +656,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, if (!node) return -ENXIO; - node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); + node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE); if (!node) return -ENXIO; @@ -680,19 +679,20 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, * * Returns: the MSI domain for this device, NULL otherwise */ -struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) +struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, + enum irq_domain_bus_token bus_token) { struct fwnode_handle *handle; int its_id; - if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) + if (iort_dev_find_its_id(dev, id, 0, &its_id)) return NULL; handle = iort_find_domain_token(its_id); if (!handle) return NULL; - return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); + return irq_find_matching_fwnode(handle, bus_token); } static void iort_set_device_domain(struct device *dev, diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 6b43a5455c7a..74a91f52ecc0 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1558,7 +1558,8 @@ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); dom = of_msi_map_get_device_domain(&pdev->dev, rid); if (!dom) - dom = iort_get_device_domain(&pdev->dev, rid); + dom = iort_get_device_domain(&pdev->dev, rid, + DOMAIN_BUS_PCI_MSI); return dom; } #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 8e7e2ec37f1b..08ec6bd2297f 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -29,7 +29,8 @@ struct fwnode_handle *iort_find_domain_token(int trans_id); #ifdef CONFIG_ACPI_IORT void acpi_iort_init(void); u32 iort_msi_map_rid(struct device *dev, u32 req_id); -struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); +struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, + enum irq_domain_bus_token bus_token); void acpi_configure_pmsi_domain(struct device *dev); int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); /* IOMMU interface */ @@ -40,8 +41,8 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); static inline void acpi_iort_init(void) { } static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) { return req_id; } -static inline struct irq_domain *iort_get_device_domain(struct device *dev, - u32 req_id) +static inline struct irq_domain *iort_get_device_domain( + struct device *dev, u32 id, enum irq_domain_bus_token bus_token) { return NULL; } static inline void acpi_configure_pmsi_domain(struct device *dev) { } /* IOMMU interface */ -- cgit v1.2.3 From 39c3cf566ceafa7c1ae331a5f26fbb685d670001 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 19 Jun 2020 09:20:04 +0100 Subject: ACPI/IORT: Make iort_msi_map_rid() PCI agnostic There is nothing PCI specific in iort_msi_map_rid(). Rename the function using a bus protocol agnostic name, iort_msi_map_id(), and convert current callers to it. Signed-off-by: Lorenzo Pieralisi Acked-by: Bjorn Helgaas Cc: Will Deacon Cc: Hanjun Guo Cc: Bjorn Helgaas Cc: Sudeep Holla Cc: Robin Murphy Cc: "Rafael J. Wysocki" Link: https://lore.kernel.org/r/20200619082013.13661-4-lorenzo.pieralisi@arm.com Signed-off-by: Catalin Marinas --- drivers/acpi/arm64/iort.c | 12 ++++++------ drivers/pci/msi.c | 2 +- include/linux/acpi_iort.h | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 902e2aaca946..53f9ef515089 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -568,22 +568,22 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev) } /** - * iort_msi_map_rid() - Map a MSI requester ID for a device + * iort_msi_map_id() - Map a MSI input ID for a device * @dev: The device for which the mapping is to be done. - * @req_id: The device requester ID. + * @input_id: The device input ID. * - * Returns: mapped MSI RID on success, input requester ID otherwise + * Returns: mapped MSI ID on success, input ID otherwise */ -u32 iort_msi_map_rid(struct device *dev, u32 req_id) +u32 iort_msi_map_id(struct device *dev, u32 input_id) { struct acpi_iort_node *node; u32 dev_id; node = iort_find_dev_node(dev); if (!node) - return req_id; + return input_id; - iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); + iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE); return dev_id; } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 74a91f52ecc0..77f48b95e277 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1536,7 +1536,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) of_node = irq_domain_get_of_node(domain); rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) : - iort_msi_map_rid(&pdev->dev, rid); + iort_msi_map_id(&pdev->dev, rid); return rid; } diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 08ec6bd2297f..e51425e083da 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -28,7 +28,7 @@ void iort_deregister_domain_token(int trans_id); struct fwnode_handle *iort_find_domain_token(int trans_id); #ifdef CONFIG_ACPI_IORT void acpi_iort_init(void); -u32 iort_msi_map_rid(struct device *dev, u32 req_id); +u32 iort_msi_map_id(struct device *dev, u32 id); struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, enum irq_domain_bus_token bus_token); void acpi_configure_pmsi_domain(struct device *dev); @@ -39,8 +39,8 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev); int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); #else static inline void acpi_iort_init(void) { } -static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) -{ return req_id; } +static inline u32 iort_msi_map_id(struct device *dev, u32 id) +{ return id; } static inline struct irq_domain *iort_get_device_domain( struct device *dev, u32 id, enum irq_domain_bus_token bus_token) { return NULL; } -- cgit v1.2.3 From b8e069a2a8da02137605ba585837a3a0c45df01a Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 19 Jun 2020 09:20:06 +0100 Subject: ACPI/IORT: Add an input ID to acpi_dma_configure() Some HW devices are created as child devices of proprietary busses, that have a bus specific policy defining how the child devices wires representing the devices ID are translated into IOMMU and IRQ controllers device IDs. Current IORT code provides translations for: - PCI devices, where the device ID is well identified at bus level as the requester ID (RID) - Platform devices that are endpoint devices where the device ID is retrieved from the ACPI object IORT mappings (Named components single mappings). A platform device is represented in IORT as a named component node For devices that are child devices of proprietary busses the IORT firmware represents the bus node as a named component node in IORT and it is up to that named component node to define in/out bus specific ID translations for the bus child devices that are allocated and created in a bus specific manner. In order to make IORT ID translations available for proprietary bus child devices, the current ACPI (and IORT) code must be augmented to provide an additional ID parameter to acpi_dma_configure() representing the child devices input ID. This ID is bus specific and it is retrieved in bus specific code. By adding an ID parameter to acpi_dma_configure(), the IORT code can map the child device ID to an IOMMU stream ID through the IORT named component representing the bus in/out ID mappings. Signed-off-by: Lorenzo Pieralisi Cc: Will Deacon Cc: Hanjun Guo Cc: Sudeep Holla Cc: Robin Murphy Cc: "Rafael J. Wysocki" Link: https://lore.kernel.org/r/20200619082013.13661-6-lorenzo.pieralisi@arm.com Signed-off-by: Catalin Marinas --- drivers/acpi/arm64/iort.c | 59 +++++++++++++++++++++++++++++++++++------------ drivers/acpi/scan.c | 8 ++++--- include/acpi/acpi_bus.h | 9 ++++++-- include/linux/acpi.h | 7 ++++++ include/linux/acpi_iort.h | 7 +++--- 5 files changed, 67 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 421c6976ab81..ec782e4a0fe4 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -978,19 +978,54 @@ static void iort_named_component_init(struct device *dev, nc->node_flags); } +static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node) +{ + struct acpi_iort_node *parent; + int err = -ENODEV, i = 0; + u32 streamid = 0; + + do { + + parent = iort_node_map_platform_id(node, &streamid, + IORT_IOMMU_TYPE, + i++); + + if (parent) + err = iort_iommu_xlate(dev, parent, streamid); + } while (parent && !err); + + return err; +} + +static int iort_nc_iommu_map_id(struct device *dev, + struct acpi_iort_node *node, + const u32 *in_id) +{ + struct acpi_iort_node *parent; + u32 streamid; + + parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE); + if (parent) + return iort_iommu_xlate(dev, parent, streamid); + + return -ENODEV; +} + + /** - * iort_iommu_configure - Set-up IOMMU configuration for a device. + * iort_iommu_configure_id - Set-up IOMMU configuration for a device. * * @dev: device to configure + * @id_in: optional input id const value pointer * * Returns: iommu_ops pointer on configuration success * NULL on configuration failure */ -const struct iommu_ops *iort_iommu_configure(struct device *dev) +const struct iommu_ops *iort_iommu_configure_id(struct device *dev, + const u32 *id_in) { - struct acpi_iort_node *node, *parent; + struct acpi_iort_node *node; const struct iommu_ops *ops; - u32 streamid = 0; int err = -ENODEV; /* @@ -1019,21 +1054,13 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) if (fwspec && iort_pci_rc_supports_ats(node)) fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; } else { - int i = 0; - node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, iort_match_node_callback, dev); if (!node) return NULL; - do { - parent = iort_node_map_platform_id(node, &streamid, - IORT_IOMMU_TYPE, - i++); - - if (parent) - err = iort_iommu_xlate(dev, parent, streamid); - } while (parent && !err); + err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) : + iort_nc_iommu_map(dev, node); if (!err) iort_named_component_init(dev, node); @@ -1058,6 +1085,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) return ops; } + #else static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) { return NULL; } @@ -1066,7 +1094,8 @@ static inline int iort_add_device_replay(const struct iommu_ops *ops, { return 0; } int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) { return 0; } -const struct iommu_ops *iort_iommu_configure(struct device *dev) +const struct iommu_ops *iort_iommu_configure_id(struct device *dev, + const u32 *input_id) { return NULL; } #endif diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 8777faced51a..2142f1554761 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1457,8 +1457,10 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, * acpi_dma_configure - Set-up DMA configuration for the device. * @dev: The pointer to the device * @attr: device dma attributes + * @input_id: input device id const value pointer */ -int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) +int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, + const u32 *input_id) { const struct iommu_ops *iommu; u64 dma_addr = 0, size = 0; @@ -1470,7 +1472,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) iort_dma_setup(dev, &dma_addr, &size); - iommu = iort_iommu_configure(dev); + iommu = iort_iommu_configure_id(dev, input_id); if (PTR_ERR(iommu) == -EPROBE_DEFER) return -EPROBE_DEFER; @@ -1479,7 +1481,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) return 0; } -EXPORT_SYMBOL_GPL(acpi_dma_configure); +EXPORT_SYMBOL_GPL(acpi_dma_configure_id); static void acpi_init_coherency(struct acpi_device *adev) { diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 5afb6ceb284f..a3abcc4b7d9f 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -588,8 +588,13 @@ bool acpi_dma_supported(struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, u64 *size); -int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); - +int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, + const u32 *input_id); +static inline int acpi_dma_configure(struct device *dev, + enum dev_dma_attr attr) +{ + return acpi_dma_configure_id(dev, attr, NULL); +} struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children); int acpi_is_root_bridge(acpi_handle); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d661cd0ee64d..6d2c47489d90 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -905,6 +905,13 @@ static inline int acpi_dma_configure(struct device *dev, return 0; } +static inline int acpi_dma_configure_id(struct device *dev, + enum dev_dma_attr attr, + const u32 *input_id) +{ + return 0; +} + #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index e51425e083da..20a32120bb88 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -35,7 +35,8 @@ void acpi_configure_pmsi_domain(struct device *dev); int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); /* IOMMU interface */ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); -const struct iommu_ops *iort_iommu_configure(struct device *dev); +const struct iommu_ops *iort_iommu_configure_id(struct device *dev, + const u32 *id_in); int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); #else static inline void acpi_iort_init(void) { } @@ -48,8 +49,8 @@ static inline void acpi_configure_pmsi_domain(struct device *dev) { } /* IOMMU interface */ static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size) { } -static inline const struct iommu_ops *iort_iommu_configure( - struct device *dev) +static inline const struct iommu_ops *iort_iommu_configure_id( + struct device *dev, const u32 *id_in) { return NULL; } static inline int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) -- cgit v1.2.3 From 746a71d02b5d15817fcb13c956ba999a87773952 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 19 Jun 2020 09:20:07 +0100 Subject: of/iommu: Make of_map_rid() PCI agnostic There is nothing PCI specific (other than the RID - requester ID) in the of_map_rid() implementation, so the same function can be reused for input/output IDs mapping for other busses just as well. Rename the RID instances/names to a generic "id" tag. No functionality change intended. Signed-off-by: Lorenzo Pieralisi Reviewed-by: Rob Herring Acked-by: Joerg Roedel Cc: Rob Herring Cc: Joerg Roedel Cc: Robin Murphy Cc: Marc Zyngier Link: https://lore.kernel.org/r/20200619082013.13661-7-lorenzo.pieralisi@arm.com Signed-off-by: Catalin Marinas --- drivers/iommu/of_iommu.c | 4 ++-- drivers/of/base.c | 42 +++++++++++++++++++++--------------------- drivers/of/irq.c | 2 +- include/linux/of.h | 4 ++-- 4 files changed, 26 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 20738aacac89..016316244737 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -129,7 +129,7 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) struct of_phandle_args iommu_spec = { .args_count = 1 }; int err; - err = of_map_rid(info->np, alias, "iommu-map", "iommu-map-mask", + err = of_map_id(info->np, alias, "iommu-map", "iommu-map-mask", &iommu_spec.np, iommu_spec.args); if (err) return err == -ENODEV ? NO_IOMMU : err; @@ -145,7 +145,7 @@ static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev, struct of_phandle_args iommu_spec = { .args_count = 1 }; int err; - err = of_map_rid(master_np, mc_dev->icid, "iommu-map", + err = of_map_id(master_np, mc_dev->icid, "iommu-map", "iommu-map-mask", &iommu_spec.np, iommu_spec.args); if (err) diff --git a/drivers/of/base.c b/drivers/of/base.c index ae03b1218b06..ea44fea99813 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -2201,15 +2201,15 @@ int of_find_last_cache_level(unsigned int cpu) } /** - * of_map_rid - Translate a requester ID through a downstream mapping. + * of_map_id - Translate an ID through a downstream mapping. * @np: root complex device node. - * @rid: device requester ID to map. + * @id: device ID to map. * @map_name: property name of the map to use. * @map_mask_name: optional property name of the mask to use. * @target: optional pointer to a target device node. * @id_out: optional pointer to receive the translated ID. * - * Given a device requester ID, look up the appropriate implementation-defined + * Given a device ID, look up the appropriate implementation-defined * platform ID and/or the target device which receives transactions on that * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or * @id_out may be NULL if only the other is required. If @target points to @@ -2219,11 +2219,11 @@ int of_find_last_cache_level(unsigned int cpu) * * Return: 0 on success or a standard error code on failure. */ -int of_map_rid(struct device_node *np, u32 rid, +int of_map_id(struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out) { - u32 map_mask, masked_rid; + u32 map_mask, masked_id; int map_len; const __be32 *map = NULL; @@ -2235,7 +2235,7 @@ int of_map_rid(struct device_node *np, u32 rid, if (target) return -ENODEV; /* Otherwise, no map implies no translation */ - *id_out = rid; + *id_out = id; return 0; } @@ -2255,22 +2255,22 @@ int of_map_rid(struct device_node *np, u32 rid, if (map_mask_name) of_property_read_u32(np, map_mask_name, &map_mask); - masked_rid = map_mask & rid; + masked_id = map_mask & id; for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { struct device_node *phandle_node; - u32 rid_base = be32_to_cpup(map + 0); + u32 id_base = be32_to_cpup(map + 0); u32 phandle = be32_to_cpup(map + 1); u32 out_base = be32_to_cpup(map + 2); - u32 rid_len = be32_to_cpup(map + 3); + u32 id_len = be32_to_cpup(map + 3); - if (rid_base & ~map_mask) { - pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n", + if (id_base & ~map_mask) { + pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n", np, map_name, map_name, - map_mask, rid_base); + map_mask, id_base); return -EFAULT; } - if (masked_rid < rid_base || masked_rid >= rid_base + rid_len) + if (masked_id < id_base || masked_id >= id_base + id_len) continue; phandle_node = of_find_node_by_phandle(phandle); @@ -2288,20 +2288,20 @@ int of_map_rid(struct device_node *np, u32 rid, } if (id_out) - *id_out = masked_rid - rid_base + out_base; + *id_out = masked_id - id_base + out_base; - pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n", - np, map_name, map_mask, rid_base, out_base, - rid_len, rid, masked_rid - rid_base + out_base); + pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n", + np, map_name, map_mask, id_base, out_base, + id_len, id, masked_id - id_base + out_base); return 0; } - pr_info("%pOF: no %s translation for rid 0x%x on %pOF\n", np, map_name, - rid, target && *target ? *target : NULL); + pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name, + id, target && *target ? *target : NULL); /* Bypasses translation */ if (id_out) - *id_out = rid; + *id_out = id; return 0; } -EXPORT_SYMBOL_GPL(of_map_rid); +EXPORT_SYMBOL_GPL(of_map_id); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index a296eaf52a5b..d632bc5b3a2d 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -587,7 +587,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np, * "msi-map" property. */ for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) - if (!of_map_rid(parent_dev->of_node, rid_in, "msi-map", + if (!of_map_id(parent_dev->of_node, rid_in, "msi-map", "msi-map-mask", np, &rid_out)) break; return rid_out; diff --git a/include/linux/of.h b/include/linux/of.h index c669c0a4732f..60abe3f636ad 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -554,7 +554,7 @@ bool of_console_check(struct device_node *dn, char *name, int index); extern int of_cpu_node_to_id(struct device_node *np); -int of_map_rid(struct device_node *np, u32 rid, +int of_map_id(struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out); @@ -978,7 +978,7 @@ static inline int of_cpu_node_to_id(struct device_node *np) return -ENODEV; } -static inline int of_map_rid(struct device_node *np, u32 rid, +static inline int of_map_id(struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out) { -- cgit v1.2.3 From a081bd4af4ce80d845a0bab355ab5d0822db8058 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 19 Jun 2020 09:20:08 +0100 Subject: of/device: Add input id to of_dma_configure() Devices sitting on proprietary busses have a device ID space that is owned by the respective bus and related firmware bindings. In order to let the generic OF layer handle the input translations to an IOMMU id, for such busses the current of_dma_configure() interface should be extended in order to allow the bus layer to provide the device input id parameter - that is retrieved/assigned in bus specific code and firmware. Augment of_dma_configure() to add an optional input_id parameter, leaving current functionality unchanged. Signed-off-by: Lorenzo Pieralisi Reviewed-by: Rob Herring Cc: Rob Herring Cc: Robin Murphy Cc: Joerg Roedel Cc: Laurentiu Tudor Link: https://lore.kernel.org/r/20200619082013.13661-8-lorenzo.pieralisi@arm.com Signed-off-by: Catalin Marinas --- drivers/bus/fsl-mc/fsl-mc-bus.c | 4 +- drivers/iommu/of_iommu.c | 81 ++++++++++++++++++++++------------------- drivers/of/device.c | 8 ++-- include/linux/of_device.h | 16 +++++++- include/linux/of_iommu.h | 6 ++- 5 files changed, 70 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 40526da5c6a6..8ead3f0238f2 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -118,11 +118,13 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) static int fsl_mc_dma_configure(struct device *dev) { struct device *dma_dev = dev; + struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); + u32 input_id = mc_dev->icid; while (dev_is_fsl_mc(dma_dev)) dma_dev = dma_dev->parent; - return of_dma_configure(dev, dma_dev->of_node, 0); + return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 016316244737..e505b9130a1c 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -118,46 +118,66 @@ static int of_iommu_xlate(struct device *dev, return ret; } -struct of_pci_iommu_alias_info { - struct device *dev; - struct device_node *np; -}; - -static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) +static int of_iommu_configure_dev_id(struct device_node *master_np, + struct device *dev, + const u32 *id) { - struct of_pci_iommu_alias_info *info = data; struct of_phandle_args iommu_spec = { .args_count = 1 }; int err; - err = of_map_id(info->np, alias, "iommu-map", "iommu-map-mask", - &iommu_spec.np, iommu_spec.args); + err = of_map_id(master_np, *id, "iommu-map", + "iommu-map-mask", &iommu_spec.np, + iommu_spec.args); if (err) return err == -ENODEV ? NO_IOMMU : err; - err = of_iommu_xlate(info->dev, &iommu_spec); + err = of_iommu_xlate(dev, &iommu_spec); of_node_put(iommu_spec.np); return err; } -static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev, - struct device_node *master_np) +static int of_iommu_configure_dev(struct device_node *master_np, + struct device *dev) { - struct of_phandle_args iommu_spec = { .args_count = 1 }; - int err; - - err = of_map_id(master_np, mc_dev->icid, "iommu-map", - "iommu-map-mask", &iommu_spec.np, - iommu_spec.args); - if (err) - return err == -ENODEV ? NO_IOMMU : err; + struct of_phandle_args iommu_spec; + int err = NO_IOMMU, idx = 0; + + while (!of_parse_phandle_with_args(master_np, "iommus", + "#iommu-cells", + idx, &iommu_spec)) { + err = of_iommu_xlate(dev, &iommu_spec); + of_node_put(iommu_spec.np); + idx++; + if (err) + break; + } - err = of_iommu_xlate(&mc_dev->dev, &iommu_spec); - of_node_put(iommu_spec.np); return err; } +struct of_pci_iommu_alias_info { + struct device *dev; + struct device_node *np; +}; + +static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) +{ + struct of_pci_iommu_alias_info *info = data; + u32 input_id = alias; + + return of_iommu_configure_dev_id(info->np, info->dev, &input_id); +} + +static int of_iommu_configure_device(struct device_node *master_np, + struct device *dev, const u32 *id) +{ + return (id) ? of_iommu_configure_dev_id(master_np, dev, id) : + of_iommu_configure_dev(master_np, dev); +} + const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np) + struct device_node *master_np, + const u32 *id) { const struct iommu_ops *ops = NULL; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); @@ -188,21 +208,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, pci_request_acs(); err = pci_for_each_dma_alias(to_pci_dev(dev), of_pci_iommu_init, &info); - } else if (dev_is_fsl_mc(dev)) { - err = of_fsl_mc_iommu_init(to_fsl_mc_device(dev), master_np); } else { - struct of_phandle_args iommu_spec; - int idx = 0; - - while (!of_parse_phandle_with_args(master_np, "iommus", - "#iommu-cells", - idx, &iommu_spec)) { - err = of_iommu_xlate(dev, &iommu_spec); - of_node_put(iommu_spec.np); - idx++; - if (err) - break; - } + err = of_iommu_configure_device(master_np, dev, id); fwspec = dev_iommu_fwspec_get(dev); if (!err && fwspec) diff --git a/drivers/of/device.c b/drivers/of/device.c index 27203bfd0b22..b439c1e05434 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -78,6 +78,7 @@ int of_device_add(struct platform_device *ofdev) * @np: Pointer to OF node having DMA configuration * @force_dma: Whether device is to be set up by of_dma_configure() even if * DMA capability is not explicitly described by firmware. + * @id: Optional const pointer value input id * * Try to get devices's DMA configuration from DT and update it * accordingly. @@ -86,7 +87,8 @@ int of_device_add(struct platform_device *ofdev) * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events * to fix up DMA configuration. */ -int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) +int of_dma_configure_id(struct device *dev, struct device_node *np, + bool force_dma, const u32 *id) { u64 dma_addr, paddr, size = 0; int ret; @@ -160,7 +162,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) dev_dbg(dev, "device is%sdma coherent\n", coherent ? " " : " not "); - iommu = of_iommu_configure(dev, np); + iommu = of_iommu_configure(dev, np, id); if (PTR_ERR(iommu) == -EPROBE_DEFER) return -EPROBE_DEFER; @@ -171,7 +173,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) return 0; } -EXPORT_SYMBOL_GPL(of_dma_configure); +EXPORT_SYMBOL_GPL(of_dma_configure_id); int of_device_register(struct platform_device *pdev) { diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 8d31e39dd564..07ca187fc5e4 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -55,9 +55,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) return of_node_get(cpu_dev->of_node); } -int of_dma_configure(struct device *dev, +int of_dma_configure_id(struct device *dev, struct device_node *np, - bool force_dma); + bool force_dma, const u32 *id); +static inline int of_dma_configure(struct device *dev, + struct device_node *np, + bool force_dma) +{ + return of_dma_configure_id(dev, np, force_dma, NULL); +} #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -106,6 +112,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) return NULL; } +static inline int of_dma_configure_id(struct device *dev, + struct device_node *np, + bool force_dma) +{ + return 0; +} static inline int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index f3d40dd7bb66..16f4b3e87f20 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -13,7 +13,8 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix, size_t *size); extern const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np); + struct device_node *master_np, + const u32 *id); #else @@ -25,7 +26,8 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, } static inline const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np) + struct device_node *master_np, + const u32 *id) { return NULL; } -- cgit v1.2.3 From 6f881aba01109a01a43e4f135673c19190f61133 Mon Sep 17 00:00:00 2001 From: Diana Craciun Date: Fri, 19 Jun 2020 09:20:10 +0100 Subject: of/irq: make of_msi_map_get_device_domain() bus agnostic of_msi_map_get_device_domain() is PCI specific but it need not be and can be easily changed to be bus agnostic in order to be used by other busses by adding an IRQ domain bus token as an input parameter. Signed-off-by: Diana Craciun Signed-off-by: Lorenzo Pieralisi Reviewed-by: Rob Herring Acked-by: Bjorn Helgaas # pci/msi.c Cc: Bjorn Helgaas Cc: Rob Herring Cc: Marc Zyngier Link: https://lore.kernel.org/r/20200619082013.13661-10-lorenzo.pieralisi@arm.com Signed-off-by: Catalin Marinas --- drivers/of/irq.c | 8 +++++--- drivers/pci/msi.c | 2 +- include/linux/of_irq.h | 5 +++-- 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/of/irq.c b/drivers/of/irq.c index d632bc5b3a2d..1005e4f349ef 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -613,18 +613,20 @@ u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in) * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain * @dev: device for which the mapping is to be done. * @rid: Requester ID for the device. + * @bus_token: Bus token * * Walk up the device hierarchy looking for devices with a "msi-map" * property. * * Returns: the MSI domain for this device (or NULL on failure) */ -struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid) +struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, + u32 bus_token) { struct device_node *np = NULL; - __of_msi_map_rid(dev, &np, rid); - return irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI); + __of_msi_map_rid(dev, &np, id); + return irq_find_matching_host(np, bus_token); } /** diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 77f48b95e277..b4bfe0b03b2d 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1556,7 +1556,7 @@ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) u32 rid = pci_dev_id(pdev); pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); - dom = of_msi_map_get_device_domain(&pdev->dev, rid); + dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI); if (!dom) dom = iort_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI); diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 1214cabb2247..7142a3722758 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -52,7 +52,8 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev, struct device_node *np, enum irq_domain_bus_token token); extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, - u32 rid); + u32 id, + u32 bus_token); extern void of_msi_configure(struct device *dev, struct device_node *np); u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); #else @@ -85,7 +86,7 @@ static inline struct irq_domain *of_msi_get_domain(struct device *dev, return NULL; } static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev, - u32 rid) + u32 id, u32 bus_token) { return NULL; } -- cgit v1.2.3 From 2bcdd8f2c07f1aa1bfd34fa0dab8e06949e34846 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 19 Jun 2020 09:20:11 +0100 Subject: of/irq: Make of_msi_map_rid() PCI bus agnostic There is nothing PCI bus specific in the of_msi_map_rid() implementation other than the requester ID tag for the input ID space. Rename requester ID to a more generic ID so that the translation code can be used by all busses that require input/output ID translations. No functional change intended. Signed-off-by: Lorenzo Pieralisi Reviewed-by: Rob Herring Cc: Bjorn Helgaas Cc: Rob Herring Cc: Marc Zyngier Link: https://lore.kernel.org/r/20200619082013.13661-11-lorenzo.pieralisi@arm.com Signed-off-by: Catalin Marinas --- drivers/of/irq.c | 28 ++++++++++++++-------------- drivers/pci/msi.c | 2 +- include/linux/of_irq.h | 8 ++++---- 3 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 1005e4f349ef..25d17b8a1a1a 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -576,43 +576,43 @@ err: } } -static u32 __of_msi_map_rid(struct device *dev, struct device_node **np, - u32 rid_in) +static u32 __of_msi_map_id(struct device *dev, struct device_node **np, + u32 id_in) { struct device *parent_dev; - u32 rid_out = rid_in; + u32 id_out = id_in; /* * Walk up the device parent links looking for one with a * "msi-map" property. */ for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) - if (!of_map_id(parent_dev->of_node, rid_in, "msi-map", - "msi-map-mask", np, &rid_out)) + if (!of_map_id(parent_dev->of_node, id_in, "msi-map", + "msi-map-mask", np, &id_out)) break; - return rid_out; + return id_out; } /** - * of_msi_map_rid - Map a MSI requester ID for a device. + * of_msi_map_id - Map a MSI ID for a device. * @dev: device for which the mapping is to be done. * @msi_np: device node of the expected msi controller. - * @rid_in: unmapped MSI requester ID for the device. + * @id_in: unmapped MSI ID for the device. * * Walk up the device hierarchy looking for devices with a "msi-map" - * property. If found, apply the mapping to @rid_in. + * property. If found, apply the mapping to @id_in. * - * Returns the mapped MSI requester ID. + * Returns the mapped MSI ID. */ -u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in) +u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in) { - return __of_msi_map_rid(dev, &msi_np, rid_in); + return __of_msi_map_id(dev, &msi_np, id_in); } /** * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain * @dev: device for which the mapping is to be done. - * @rid: Requester ID for the device. + * @id: Device ID. * @bus_token: Bus token * * Walk up the device hierarchy looking for devices with a "msi-map" @@ -625,7 +625,7 @@ struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, { struct device_node *np = NULL; - __of_msi_map_rid(dev, &np, id); + __of_msi_map_id(dev, &np, id); return irq_find_matching_host(np, bus_token); } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index b4bfe0b03b2d..19aeadb22f11 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1535,7 +1535,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); of_node = irq_domain_get_of_node(domain); - rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) : + rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) : iort_msi_map_id(&pdev->dev, rid); return rid; diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 7142a3722758..e8b78139f78c 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -55,7 +55,7 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, u32 bus_token); extern void of_msi_configure(struct device *dev, struct device_node *np); -u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); +u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in); #else static inline int of_irq_count(struct device_node *dev) { @@ -93,10 +93,10 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev static inline void of_msi_configure(struct device *dev, struct device_node *np) { } -static inline u32 of_msi_map_rid(struct device *dev, - struct device_node *msi_np, u32 rid_in) +static inline u32 of_msi_map_id(struct device *dev, + struct device_node *msi_np, u32 id_in) { - return rid_in; + return id_in; } #endif -- cgit v1.2.3 From 0516c2f6ae6570a0c4a081189d71a48cfadc34a7 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Tue, 28 Jul 2020 18:20:36 +0200 Subject: block: Remove callback typedefs for blk_mq_ops No need to define typedefs for the callbacks, because there is not a single user except blk_mq_ops. Signed-off-by: Daniel Wagner Signed-off-by: Jens Axboe --- include/linux/blk-mq.h | 50 ++++++++++++++++++-------------------------------- 1 file changed, 18 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 23230c1d031e..9d2d5ad367a4 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -267,27 +267,9 @@ struct blk_mq_queue_data { bool last; }; -typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, - const struct blk_mq_queue_data *); -typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); -typedef bool (get_budget_fn)(struct request_queue *); -typedef void (put_budget_fn)(struct request_queue *); -typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); -typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); -typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); -typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, - unsigned int, unsigned int); -typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, - unsigned int); - typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, bool); typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); -typedef int (poll_fn)(struct blk_mq_hw_ctx *); -typedef int (map_queues_fn)(struct blk_mq_tag_set *set); -typedef bool (busy_fn)(struct request_queue *); -typedef void (complete_fn)(struct request *); -typedef void (cleanup_rq_fn)(struct request *); /** * struct blk_mq_ops - Callback functions that implements block driver @@ -297,7 +279,8 @@ struct blk_mq_ops { /** * @queue_rq: Queue a new request from block IO. */ - queue_rq_fn *queue_rq; + blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, + const struct blk_mq_queue_data *); /** * @commit_rqs: If a driver uses bd->last to judge when to submit @@ -306,7 +289,7 @@ struct blk_mq_ops { * purpose of kicking the hardware (which the last request otherwise * would have done). */ - commit_rqs_fn *commit_rqs; + void (*commit_rqs)(struct blk_mq_hw_ctx *); /** * @get_budget: Reserve budget before queue request, once .queue_rq is @@ -314,37 +297,38 @@ struct blk_mq_ops { * reserved budget. Also we have to handle failure case * of .get_budget for avoiding I/O deadlock. */ - get_budget_fn *get_budget; + bool (*get_budget)(struct request_queue *); + /** * @put_budget: Release the reserved budget. */ - put_budget_fn *put_budget; + void (*put_budget)(struct request_queue *); /** * @timeout: Called on request timeout. */ - timeout_fn *timeout; + enum blk_eh_timer_return (*timeout)(struct request *, bool); /** * @poll: Called to poll for completion of a specific tag. */ - poll_fn *poll; + int (*poll)(struct blk_mq_hw_ctx *); /** * @complete: Mark the request as complete. */ - complete_fn *complete; + void (*complete)(struct request *); /** * @init_hctx: Called when the block layer side of a hardware queue has * been set up, allowing the driver to allocate/init matching * structures. */ - init_hctx_fn *init_hctx; + int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); /** * @exit_hctx: Ditto for exit/teardown. */ - exit_hctx_fn *exit_hctx; + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); /** * @init_request: Called for every command allocated by the block layer @@ -353,11 +337,13 @@ struct blk_mq_ops { * Tag greater than or equal to queue_depth is for setting up * flush request. */ - init_request_fn *init_request; + int (*init_request)(struct blk_mq_tag_set *set, struct request *, + unsigned int, unsigned int); /** * @exit_request: Ditto for exit/teardown. */ - exit_request_fn *exit_request; + void (*exit_request)(struct blk_mq_tag_set *set, struct request *, + unsigned int); /** * @initialize_rq_fn: Called from inside blk_get_request(). @@ -368,18 +354,18 @@ struct blk_mq_ops { * @cleanup_rq: Called before freeing one request which isn't completed * yet, and usually for freeing the driver private data. */ - cleanup_rq_fn *cleanup_rq; + void (*cleanup_rq)(struct request *); /** * @busy: If set, returns whether or not this queue currently is busy. */ - busy_fn *busy; + bool (*busy)(struct request_queue *); /** * @map_queues: This allows drivers specify their own queue mapping by * overriding the setup-time function that builds the mq_map. */ - map_queues_fn *map_queues; + int (*map_queues)(struct blk_mq_tag_set *set); #ifdef CONFIG_BLK_DEBUG_FS /** -- cgit v1.2.3 From 5143192cd410c4fc83be09a2e73423765aee072b Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Thu, 23 Jul 2020 15:30:00 -0700 Subject: mm/migrate: add a flags parameter to migrate_vma The src_owner field in struct migrate_vma is being used for two purposes, it acts as a selection filter for which types of pages are to be migrated and it identifies device private pages owned by the caller. Split this into separate parameters so the src_owner field can be used just to identify device private pages owned by the caller of migrate_vma_setup(). Rename the src_owner field to pgmap_owner to reflect it is now used only to identify which device private pages to migrate. Link: https://lore.kernel.org/r/20200723223004.9586-3-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Reviewed-by: Bharata B Rao Signed-off-by: Jason Gunthorpe --- arch/powerpc/kvm/book3s_hv_uvmem.c | 4 +++- drivers/gpu/drm/nouveau/nouveau_dmem.c | 4 +++- include/linux/migrate.h | 13 +++++++++---- lib/test_hmm.c | 15 ++++----------- mm/migrate.c | 6 ++++-- 5 files changed, 23 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 09d8119024db..6850bd04bcb9 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -400,6 +400,7 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, mig.end = end; mig.src = &src_pfn; mig.dst = &dst_pfn; + mig.flags = MIGRATE_VMA_SELECT_SYSTEM; /* * We come here with mmap_lock write lock held just for @@ -577,7 +578,8 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, mig.end = end; mig.src = &src_pfn; mig.dst = &dst_pfn; - mig.src_owner = &kvmppc_uvmem_pgmap; + mig.pgmap_owner = &kvmppc_uvmem_pgmap; + mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; mutex_lock(&kvm->arch.uvmem_lock); /* The requested page is already paged-out, nothing to do */ diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index e5c230d9ae24..78b9e3c2a5b3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -182,7 +182,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) .end = vmf->address + PAGE_SIZE, .src = &src, .dst = &dst, - .src_owner = drm->dev, + .pgmap_owner = drm->dev, + .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE, }; /* @@ -615,6 +616,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, struct migrate_vma args = { .vma = vma, .start = start, + .flags = MIGRATE_VMA_SELECT_SYSTEM, }; unsigned long i; u64 *pfns; diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 3e546cbf03dd..16e03a51e5cf 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -180,6 +180,11 @@ static inline unsigned long migrate_pfn(unsigned long pfn) return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; } +enum migrate_vma_direction { + MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, + MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, +}; + struct migrate_vma { struct vm_area_struct *vma; /* @@ -199,11 +204,11 @@ struct migrate_vma { /* * Set to the owner value also stored in page->pgmap->owner for - * migrating out of device private memory. If set only device - * private pages with this owner are migrated. If not set - * device private pages are not migrated at all. + * migrating out of device private memory. The flags also need to + * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. */ - void *src_owner; + void *pgmap_owner; + unsigned long flags; }; int migrate_vma_setup(struct migrate_vma *args); diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 9aa577afc269..e78a1414f58e 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -585,15 +585,6 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, */ spage = migrate_pfn_to_page(*src); - /* - * Don't migrate device private pages from our own driver or - * others. For our own we would do a device private memory copy - * not a migration and for others, we would need to fault the - * other device's page into system memory first. - */ - if (spage && is_zone_device_page(spage)) - continue; - dpage = dmirror_devmem_alloc_page(mdevice); if (!dpage) continue; @@ -702,7 +693,8 @@ static int dmirror_migrate(struct dmirror *dmirror, args.dst = dst_pfns; args.start = addr; args.end = next; - args.src_owner = NULL; + args.pgmap_owner = NULL; + args.flags = MIGRATE_VMA_SELECT_SYSTEM; ret = migrate_vma_setup(&args); if (ret) goto out; @@ -1053,7 +1045,8 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf) args.end = args.start + PAGE_SIZE; args.src = &src_pfns; args.dst = &dst_pfns; - args.src_owner = dmirror->mdevice; + args.pgmap_owner = dmirror->mdevice; + args.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; if (migrate_vma_setup(&args)) return VM_FAULT_SIGBUS; diff --git a/mm/migrate.c b/mm/migrate.c index f37729673558..e3ea68e3a08b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2287,7 +2287,9 @@ again: goto next; page = device_private_entry_to_page(entry); - if (page->pgmap->owner != migrate->src_owner) + if (!(migrate->flags & + MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || + page->pgmap->owner != migrate->pgmap_owner) goto next; mpfn = migrate_pfn(page_to_pfn(page)) | @@ -2295,7 +2297,7 @@ again: if (is_write_device_private_entry(entry)) mpfn |= MIGRATE_PFN_WRITE; } else { - if (migrate->src_owner) + if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) goto next; pfn = pte_pfn(pte); if (is_zero_pfn(pfn)) { -- cgit v1.2.3 From 998427b3ad2c769082853880cf353557ec0ec77d Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Thu, 23 Jul 2020 15:30:01 -0700 Subject: mm/notifier: add migration invalidation type Currently migrate_vma_setup() calls mmu_notifier_invalidate_range_start() which flushes all device private page mappings whether or not a page is being migrated to/from device private memory. In order to not disrupt device mappings that are not being migrated, shift the responsibility for clearing device private mappings to the device driver and leave CPU page table unmapping handled by migrate_vma_setup(). To support this, the caller of migrate_vma_setup() should always set struct migrate_vma::pgmap_owner to a non NULL value that matches the device private page->pgmap->owner. This value is then passed to the struct mmu_notifier_range with a new event type which the driver's invalidation function can use to avoid device MMU invalidations. Link: https://lore.kernel.org/r/20200723223004.9586-4-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Signed-off-by: Jason Gunthorpe --- include/linux/migrate.h | 3 +++ include/linux/mmu_notifier.h | 6 ++++++ mm/migrate.c | 8 +++++++- 3 files changed, 16 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 16e03a51e5cf..540998d9810b 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -206,6 +206,9 @@ struct migrate_vma { * Set to the owner value also stored in page->pgmap->owner for * migrating out of device private memory. The flags also need to * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. + * The caller should always set this field when using mmu notifier + * callbacks to avoid device MMU invalidations for device private + * pages that are not being migrated. */ void *pgmap_owner; unsigned long flags; diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index fc68f3570e19..c6f0708195cd 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -38,6 +38,10 @@ struct mmu_interval_notifier; * * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal * that the mm refcount is zero and the range is no longer accessible. + * + * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal + * a device driver to possibly ignore the invalidation if the + * migrate_pgmap_owner field matches the driver's device private pgmap owner. */ enum mmu_notifier_event { MMU_NOTIFY_UNMAP = 0, @@ -46,6 +50,7 @@ enum mmu_notifier_event { MMU_NOTIFY_PROTECTION_PAGE, MMU_NOTIFY_SOFT_DIRTY, MMU_NOTIFY_RELEASE, + MMU_NOTIFY_MIGRATE, }; #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0) @@ -264,6 +269,7 @@ struct mmu_notifier_range { unsigned long end; unsigned flags; enum mmu_notifier_event event; + void *migrate_pgmap_owner; }; static inline int mm_has_notifiers(struct mm_struct *mm) diff --git a/mm/migrate.c b/mm/migrate.c index e3ea68e3a08b..96e1f41a991e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2392,8 +2392,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate) { struct mmu_notifier_range range; - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, + /* + * Note that the pgmap_owner is passed to the mmu notifier callback so + * that the registered device driver can skip invalidating device + * private page mappings that won't be migrated. + */ + mmu_notifier_range_init(&range, MMU_NOTIFY_MIGRATE, 0, migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end); + range.migrate_pgmap_owner = migrate->pgmap_owner; mmu_notifier_invalidate_range_start(&range); walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, -- cgit v1.2.3 From 035bfd051eae5b365368be915dfaf916aa501a52 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 18:38:34 +0200 Subject: net: make sockptr_is_null strict aliasing safe While the kernel in general is not strict aliasing safe we can trivially do that in sockptr_is_null without affecting code generation, so always check the actually assigned union member. Reported-by: Jan Engelhardt Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 7d5cdb2b30b5..b13ea1422f93 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -64,7 +64,9 @@ static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) static inline bool sockptr_is_null(sockptr_t sockptr) { - return !sockptr.user && !sockptr.kernel; + if (sockptr_is_kernel(sockptr)) + return !sockptr.kernel; + return !sockptr.user; } static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) -- cgit v1.2.3 From d3c48151512922dd35f1f393b30b9138e4441d14 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 18:38:35 +0200 Subject: net: remove sockptr_advance sockptr_advance never properly worked. Replace it with _offset variants of copy_from_sockptr and copy_to_sockptr. Fixes: ba423fdaa589 ("net: add a new sockptr_t type") Reported-by: Jason A. Donenfeld Reported-by: Ido Schimmel Signed-off-by: Christoph Hellwig Acked-by: Jason A. Donenfeld Tested-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/crypto/chelsio/chtls/chtls_main.c | 12 ++++++------ include/linux/sockptr.h | 27 +++++++++++++-------------- net/dccp/proto.c | 5 ++--- net/ipv4/netfilter/arp_tables.c | 8 ++++---- net/ipv4/netfilter/ip_tables.c | 8 ++++---- net/ipv4/tcp.c | 5 +++-- net/ipv6/ip6_flowlabel.c | 11 ++++++----- net/ipv6/netfilter/ip6_tables.c | 8 ++++---- net/netfilter/x_tables.c | 7 ++++--- net/tls/tls_main.c | 6 +++--- 10 files changed, 49 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index c3058dcdb33c..66d247efd561 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -525,9 +525,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, /* Obtain version and type from previous copy */ crypto_info[0] = tmp_crypto_info; /* Now copy the following data */ - sockptr_advance(optval, sizeof(*crypto_info)); - rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info), - optval, + rc = copy_from_sockptr_offset((char *)crypto_info + + sizeof(*crypto_info), + optval, sizeof(*crypto_info), sizeof(struct tls12_crypto_info_aes_gcm_128) - sizeof(*crypto_info)); @@ -542,9 +542,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, } case TLS_CIPHER_AES_GCM_256: { crypto_info[0] = tmp_crypto_info; - sockptr_advance(optval, sizeof(*crypto_info)); - rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info), - optval, + rc = copy_from_sockptr_offset((char *)crypto_info + + sizeof(*crypto_info), + optval, sizeof(*crypto_info), sizeof(struct tls12_crypto_info_aes_gcm_256) - sizeof(*crypto_info)); diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index b13ea1422f93..9e6c81d474cb 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -69,19 +69,26 @@ static inline bool sockptr_is_null(sockptr_t sockptr) return !sockptr.user; } -static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) +static inline int copy_from_sockptr_offset(void *dst, sockptr_t src, + size_t offset, size_t size) { if (!sockptr_is_kernel(src)) - return copy_from_user(dst, src.user, size); - memcpy(dst, src.kernel, size); + return copy_from_user(dst, src.user + offset, size); + memcpy(dst, src.kernel + offset, size); return 0; } -static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) +static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) +{ + return copy_from_sockptr_offset(dst, src, 0, size); +} + +static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset, + const void *src, size_t size) { if (!sockptr_is_kernel(dst)) - return copy_to_user(dst.user, src, size); - memcpy(dst.kernel, src, size); + return copy_to_user(dst.user + offset, src, size); + memcpy(dst.kernel + offset, src, size); return 0; } @@ -112,14 +119,6 @@ static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) return p; } -static inline void sockptr_advance(sockptr_t sockptr, size_t len) -{ - if (sockptr_is_kernel(sockptr)) - sockptr.kernel += len; - else - sockptr.user += len; -} - static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) { if (sockptr_is_kernel(src)) { diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 2e9e8449698f..d148ab1530e5 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -426,9 +426,8 @@ static int dccp_setsockopt_service(struct sock *sk, const __be32 service, return -ENOMEM; sl->dccpsl_nr = optlen / sizeof(u32) - 1; - sockptr_advance(optval, sizeof(service)); - if (copy_from_sockptr(sl->dccpsl_list, optval, - optlen - sizeof(service)) || + if (copy_from_sockptr_offset(sl->dccpsl_list, optval, + sizeof(service), optlen - sizeof(service)) || dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) { kfree(sl); return -EFAULT; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 9a1567dbc022..d1e04d2b5170 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -971,8 +971,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1267,8 +1267,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index f2a9680303d8..f15bc21d7301 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1126,8 +1126,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1508,8 +1508,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 27de9380ed14..4afec552f211 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2801,12 +2801,13 @@ static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, { struct tcp_sock *tp = tcp_sk(sk); struct tcp_repair_opt opt; + size_t offset = 0; while (len >= sizeof(opt)) { - if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) + if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) return -EFAULT; - sockptr_advance(optbuf, sizeof(opt)); + offset += sizeof(opt); len -= sizeof(opt); switch (opt.opt_code) { diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 215b6f5e733e..2d655260dedc 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -401,8 +401,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, memset(fl->opt, 0, sizeof(*fl->opt)); fl->opt->tot_len = sizeof(*fl->opt) + olen; err = -EFAULT; - sockptr_advance(optval, CMSG_ALIGN(sizeof(*freq))); - if (copy_from_sockptr(fl->opt + 1, optval, olen)) + if (copy_from_sockptr_offset(fl->opt + 1, optval, + CMSG_ALIGN(sizeof(*freq)), olen)) goto done; msg.msg_controllen = olen; @@ -703,9 +703,10 @@ release: goto recheck; if (!freq->flr_label) { - sockptr_advance(optval, - offsetof(struct in6_flowlabel_req, flr_label)); - if (copy_to_sockptr(optval, &fl->label, sizeof(fl->label))) { + size_t offset = offsetof(struct in6_flowlabel_req, flr_label); + + if (copy_to_sockptr_offset(optval, offset, &fl->label, + sizeof(fl->label))) { /* Intentionally ignore fault. */ } } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 1d52957a413f..2e2119bfcf13 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1143,8 +1143,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1517,8 +1517,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index b97eb4b538fd..91bf6635ea9e 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1050,6 +1050,7 @@ EXPORT_SYMBOL_GPL(xt_check_target); void *xt_copy_counters(sockptr_t arg, unsigned int len, struct xt_counters_info *info) { + size_t offset; void *mem; u64 size; @@ -1067,7 +1068,7 @@ void *xt_copy_counters(sockptr_t arg, unsigned int len, memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); info->num_counters = compat_tmp.num_counters; - sockptr_advance(arg, sizeof(compat_tmp)); + offset = sizeof(compat_tmp); } else #endif { @@ -1078,7 +1079,7 @@ void *xt_copy_counters(sockptr_t arg, unsigned int len, if (copy_from_sockptr(info, arg, sizeof(*info)) != 0) return ERR_PTR(-EFAULT); - sockptr_advance(arg, sizeof(*info)); + offset = sizeof(*info); } info->name[sizeof(info->name) - 1] = '\0'; @@ -1092,7 +1093,7 @@ void *xt_copy_counters(sockptr_t arg, unsigned int len, if (!mem) return ERR_PTR(-ENOMEM); - if (copy_from_sockptr(mem, arg, len) == 0) + if (copy_from_sockptr_offset(mem, arg, offset, len) == 0) return mem; vfree(mem); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index d77f7d821130..bbc52b088d29 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -522,9 +522,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, goto err_crypto_info; } - sockptr_advance(optval, sizeof(*crypto_info)); - rc = copy_from_sockptr(crypto_info + 1, optval, - optlen - sizeof(*crypto_info)); + rc = copy_from_sockptr_offset(crypto_info + 1, optval, + sizeof(*crypto_info), + optlen - sizeof(*crypto_info)); if (rc) { rc = -EFAULT; goto err_crypto_info; -- cgit v1.2.3 From a31edb2059ed4e498f9aa8230c734b59d0ad797a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 18:38:36 +0200 Subject: net: improve the user pointer check in init_user_sockptr Make sure not just the pointer itself but the whole range lies in the user address space. For that pass the length and then use the access_ok helper to do the check. Fixes: 6d04fe15f78a ("net: optimize the sockptr_t for unified kernel/user address spaces") Reported-by: David Laight Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 18 ++++++------------ net/ipv4/bpfilter/sockopt.c | 2 +- net/socket.c | 2 +- 3 files changed, 8 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 9e6c81d474cb..96840def9d69 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -27,14 +27,6 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p) { return (sockptr_t) { .kernel = p }; } - -static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) -{ - if ((unsigned long)p >= TASK_SIZE) - return -EFAULT; - sp->user = p; - return 0; -} #else /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ typedef struct { union { @@ -53,14 +45,16 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p) { return (sockptr_t) { .kernel = p, .is_kernel = true }; } +#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ -static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) +static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p, + size_t size) { - sp->user = p; - sp->is_kernel = false; + if (!access_ok(p, size)) + return -EFAULT; + *sp = (sockptr_t) { .user = p }; return 0; } -#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ static inline bool sockptr_is_null(sockptr_t sockptr) { diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 94f18d2352d0..545b2640f019 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -65,7 +65,7 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, if (get_user(len, optlen)) return -EFAULT; - err = init_user_sockptr(&optval, user_optval); + err = init_user_sockptr(&optval, user_optval, len); if (err) return err; return bpfilter_mbox_request(sk, optname, optval, len, false); diff --git a/net/socket.c b/net/socket.c index 94ca4547cd7c..aff52e81653c 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2105,7 +2105,7 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, if (optlen < 0) return -EINVAL; - err = init_user_sockptr(&optval, user_optval); + err = init_user_sockptr(&optval, user_optval, optlen); if (err) return err; -- cgit v1.2.3 From 2f96593ecc37e98bf99525f0629128080533867f Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Tue, 28 Jul 2020 23:36:55 +0800 Subject: of_address: Add bus type match for pci ranges parser So the parser can be used to parse range property of ISA bus. As they're all using PCI-like method of range property, there is no need start a new parser. Signed-off-by: Jiaxun Yang Reviewed-by: Rob Herring Signed-off-by: Thomas Bogendoerfer --- drivers/of/address.c | 29 +++++++++++++++++------------ include/linux/of_address.h | 4 ++++ 2 files changed, 21 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/of/address.c b/drivers/of/address.c index 8eea3f6e29a4..813936d419ad 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -49,6 +49,7 @@ struct of_bus { u64 (*map)(__be32 *addr, const __be32 *range, int na, int ns, int pna); int (*translate)(__be32 *addr, u64 offset, int na); + bool has_flags; unsigned int (*get_flags)(const __be32 *addr); }; @@ -364,6 +365,7 @@ static struct of_bus of_busses[] = { .count_cells = of_bus_pci_count_cells, .map = of_bus_pci_map, .translate = of_bus_pci_translate, + .has_flags = true, .get_flags = of_bus_pci_get_flags, }, #endif /* CONFIG_PCI */ @@ -375,6 +377,7 @@ static struct of_bus of_busses[] = { .count_cells = of_bus_isa_count_cells, .map = of_bus_isa_map, .translate = of_bus_isa_translate, + .has_flags = true, .get_flags = of_bus_isa_get_flags, }, /* Default */ @@ -698,9 +701,10 @@ static int parser_init(struct of_pci_range_parser *parser, parser->node = node; parser->pna = of_n_addr_cells(node); - parser->na = of_bus_n_addr_cells(node); - parser->ns = of_bus_n_size_cells(node); parser->dma = !strcmp(name, "dma-ranges"); + parser->bus = of_match_bus(node); + + parser->bus->count_cells(parser->node, &parser->na, &parser->ns); parser->range = of_get_property(node, name, &rlen); if (parser->range == NULL) @@ -732,6 +736,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, int na = parser->na; int ns = parser->ns; int np = parser->pna + na + ns; + int busflag_na = 0; if (!range) return NULL; @@ -739,12 +744,13 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, if (!parser->range || parser->range + np > parser->end) return NULL; - if (parser->na == 3) - range->flags = of_bus_pci_get_flags(parser->range); - else - range->flags = 0; + range->flags = parser->bus->get_flags(parser->range); + + /* A extra cell for resource flags */ + if (parser->bus->has_flags) + busflag_na = 1; - range->pci_addr = of_read_number(parser->range, na); + range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na); if (parser->dma) range->cpu_addr = of_translate_dma_address(parser->node, @@ -759,11 +765,10 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, /* Now consume following elements while they are contiguous */ while (parser->range + np <= parser->end) { u32 flags = 0; - u64 pci_addr, cpu_addr, size; + u64 bus_addr, cpu_addr, size; - if (parser->na == 3) - flags = of_bus_pci_get_flags(parser->range); - pci_addr = of_read_number(parser->range, na); + flags = parser->bus->get_flags(parser->range); + bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na); if (parser->dma) cpu_addr = of_translate_dma_address(parser->node, parser->range + na); @@ -774,7 +779,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, if (flags != range->flags) break; - if (pci_addr != range->pci_addr + range->size || + if (bus_addr != range->bus_addr + range->size || cpu_addr != range->cpu_addr + range->size) break; diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 763022ed3456..88bc943405cd 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -6,8 +6,11 @@ #include #include +struct of_bus; + struct of_pci_range_parser { struct device_node *node; + struct of_bus *bus; const __be32 *range; const __be32 *end; int na; @@ -119,6 +122,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) return NULL; } #endif +#define of_range_parser_init of_pci_range_parser_init #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, -- cgit v1.2.3 From b8265621f4888af9494e1d685620871ec81bc33d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 23 Jul 2020 17:21:59 -0700 Subject: Add pldmfw library for PLDM firmware update The pldmfw library is used to implement common logic needed to flash devices based on firmware files using the format described by the PLDM for Firmware Update standard. This library consists of logic to parse the PLDM file format from a firmware file object, as well as common logic for sending the relevant PLDM header data to the device firmware. A simple ops table is provided so that device drivers can implement device specific hardware interactions while keeping the common logic to the pldmfw library. This library will be used by the Intel ice networking driver as part of implementing device flash update via devlink. The library aims to be vendor and device agnostic. For this reason, it has been placed in lib/pldmfw, in the hopes that other devices which use the PLDM firmware file format may benefit from it in the future. However, do note that not all features defined in the PLDM standard have been implemented. Signed-off-by: Jacob Keller Signed-off-by: David S. Miller --- Documentation/driver-api/index.rst | 1 + Documentation/driver-api/pldmfw/driver-ops.rst | 56 ++ Documentation/driver-api/pldmfw/file-format.rst | 203 ++++++ Documentation/driver-api/pldmfw/index.rst | 72 ++ MAINTAINERS | 7 + include/linux/pldmfw.h | 165 +++++ lib/Kconfig | 4 + lib/Makefile | 3 + lib/pldmfw/Makefile | 2 + lib/pldmfw/pldmfw.c | 879 ++++++++++++++++++++++++ lib/pldmfw/pldmfw_private.h | 238 +++++++ 11 files changed, 1630 insertions(+) create mode 100644 Documentation/driver-api/pldmfw/driver-ops.rst create mode 100644 Documentation/driver-api/pldmfw/file-format.rst create mode 100644 Documentation/driver-api/pldmfw/index.rst create mode 100644 include/linux/pldmfw.h create mode 100644 lib/pldmfw/Makefile create mode 100644 lib/pldmfw/pldmfw.c create mode 100644 lib/pldmfw/pldmfw_private.h (limited to 'include') diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index 6567187e7687..7fc1e0cccae7 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -95,6 +95,7 @@ available subsections can be seen below. phy/index pti_intel_mid pwm + pldmfw/index rfkill serial/index sm501 diff --git a/Documentation/driver-api/pldmfw/driver-ops.rst b/Documentation/driver-api/pldmfw/driver-ops.rst new file mode 100644 index 000000000000..f0654783d3b3 --- /dev/null +++ b/Documentation/driver-api/pldmfw/driver-ops.rst @@ -0,0 +1,56 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +========================= +Driver-specific callbacks +========================= + +The ``pldmfw`` module relies on the device driver for implementing device +specific behavior using the following operations. + +``.match_record`` +----------------- + +The ``.match_record`` operation is used to determine whether a given PLDM +record matches the device being updated. This requires comparing the record +descriptors in the record with information from the device. Many record +descriptors are defined by the PLDM standard, but it is also allowed for +devices to implement their own descriptors. + +The ``.match_record`` operation should return true if a given record matches +the device. + +``.send_package_data`` +---------------------- + +The ``.send_package_data`` operation is used to send the device-specific +package data in a record to the device firmware. If the matching record +provides package data, ``pldmfw`` will call the ``.send_package_data`` +function with a pointer to the package data and with the package data +length. The device driver should send this data to firmware. + +``.send_component_table`` +------------------------- + +The ``.send_component_table`` operation is used to forward component +information to the device. It is called once for each applicable component, +that is, for each component indicated by the matching record. The +device driver should send the component information to the device firmware, +and wait for a response. The provided transfer flag indicates whether this +is the first, last, or a middle component, and is expected to be forwarded +to firmware as part of the component table information. The driver should an +error in the case when the firmware indicates that the component cannot be +updated, or return zero if the component can be updated. + +``.flash_component`` +-------------------- + +The ``.flash_component`` operation is used to inform the device driver to +flash a given component. The driver must perform any steps necessary to send +the component data to the device. + +``.finalize_update`` +-------------------- + +The ``.finalize_update`` operation is used by the ``pldmfw`` library in +order to allow the device driver to perform any remaining device specific +logic needed to finish the update. diff --git a/Documentation/driver-api/pldmfw/file-format.rst b/Documentation/driver-api/pldmfw/file-format.rst new file mode 100644 index 000000000000..b7a9cebe09c6 --- /dev/null +++ b/Documentation/driver-api/pldmfw/file-format.rst @@ -0,0 +1,203 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +================================== +PLDM Firmware file format overview +================================== + +A PLDM firmware package is a binary file which contains a header that +describes the contents of the firmware package. This includes an initial +package header, one or more firmware records, and one or more components +describing the actual flash contents to program. + +This diagram provides an overview of the file format:: + + overall file layout + +----------------------+ + | | + | Package Header | + | | + +----------------------+ + | | + | Device Records | + | | + +----------------------+ + | | + | Component Info | + | | + +----------------------+ + | | + | Package Header CRC | + | | + +----------------------+ + | | + | Component Image 1 | + | | + +----------------------+ + | | + | Component Image 2 | + | | + +----------------------+ + | | + | ... | + | | + +----------------------+ + | | + | Component Image N | + | | + +----------------------+ + +Package Header +============== + +The package header begins with the UUID of the PLDM file format, and +contains information about the version of the format that the file uses. It +also includes the total header size, a release date, the size of the +component bitmap, and an overall package version. + +The following diagram provides an overview of the package header:: + + header layout + +-------------------------+ + | PLDM UUID | + +-------------------------+ + | Format Revision | + +-------------------------+ + | Header Size | + +-------------------------+ + | Release Date | + +-------------------------+ + | Component Bitmap Length | + +-------------------------+ + | Package Version Info | + +-------------------------+ + +Device Records +============== + +The device firmware records area starts with a count indicating the total +number of records in the file, followed by each record. A single device +record describes what device matches this record. All valid PLDM firmware +files must contain at least one record, but optionally may contain more than +one record if they support multiple devices. + +Each record will identify the device it supports via TLVs that describe the +device, such as the PCI device and vendor information. It will also indicate +which set of components that are used by this device. It is possible that +only subset of provided components will be used by a given record. A record +may also optionally contain device-specific package data that will be used +by the device firmware during the update process. + +The following diagram provides an overview of the device record area:: + + area layout + +---------------+ + | | + | Record Count | + | | + +---------------+ + | | + | Record 1 | + | | + +---------------+ + | | + | Record 2 | + | | + +---------------+ + | | + | ... | + | | + +---------------+ + | | + | Record N | + | | + +---------------+ + + record layout + +-----------------------+ + | Record Length | + +-----------------------+ + | Descriptor Count | + +-----------------------+ + | Option Flags | + +-----------------------+ + | Version Settings | + +-----------------------+ + | Package Data Length | + +-----------------------+ + | Applicable Components | + +-----------------------+ + | Version String | + +-----------------------+ + | Descriptor TLVs | + +-----------------------+ + | Package Data | + +-----------------------+ + +Component Info +============== + +The component information area begins with a count of the number of +components. Following this count is a description for each component. The +component information points to the location in the file where the component +data is stored, and includes version data used to identify the version of +the component. + +The following diagram provides an overview of the component area:: + + area layout + +-----------------+ + | | + | Component Count | + | | + +-----------------+ + | | + | Component 1 | + | | + +-----------------+ + | | + | Component 2 | + | | + +-----------------+ + | | + | ... | + | | + +-----------------+ + | | + | Component N | + | | + +-----------------+ + + component layout + +------------------------+ + | Classification | + +------------------------+ + | Component Identifier | + +------------------------+ + | Comparison Stamp | + +------------------------+ + | Component Options | + +------------------------+ + | Activation Method | + +------------------------+ + | Location Offset | + +------------------------+ + | Component Size | + +------------------------+ + | Component Version Info | + +------------------------+ + | Package Data | + +------------------------+ + + +Package Header CRC +================== + +Following the component information is a short 4-byte CRC calculated over +the contents of all of the header information. + +Component Images +================ + +The component images follow the package header information in the PLDM +firmware file. Each of these is simply a binary chunk with its start and +size defined by the matching component structure in the component info area. diff --git a/Documentation/driver-api/pldmfw/index.rst b/Documentation/driver-api/pldmfw/index.rst new file mode 100644 index 000000000000..ad2c33ece30f --- /dev/null +++ b/Documentation/driver-api/pldmfw/index.rst @@ -0,0 +1,72 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +================================== +PLDM Firmware Flash Update Library +================================== + +``pldmfw`` implements functionality for updating the flash on a device using +the PLDM for Firmware Update standard +. + +.. toctree:: + :maxdepth: 1 + + file-format + driver-ops + +================================== +Overview of the ``pldmfw`` library +================================== + +The ``pldmfw`` library is intended to be used by device drivers for +implementing device flash update based on firmware files following the PLDM +firwmare file format. + +It is implemented using an ops table that allows device drivers to provide +the underlying device specific functionality. + +``pldmfw`` implements logic to parse the packed binary format of the PLDM +firmware file into data structures, and then uses the provided function +operations to determine if the firmware file is a match for the device. If +so, it sends the record and component data to the firmware using the device +specific implementations provided by device drivers. Once the device +firmware indicates that the update may be performed, the firmware data is +sent to the device for programming. + +Parsing the PLDM file +===================== + +The PLDM file format uses packed binary data, with most multi-byte fields +stored in the Little Endian format. Several pieces of data are variable +length, including version strings and the number of records and components. +Due to this, it is not straight forward to index the record, record +descriptors, or components. + +To avoid proliferating access to the packed binary data, the ``pldmfw`` +library parses and extracts this data into simpler structures for ease of +access. + +In order to safely process the firmware file, care is taken to avoid +unaligned access of multi-byte fields, and to properly convert from Little +Endian to CPU host format. Additionally the records, descriptors, and +components are stored in linked lists. + +Performing a flash update +========================= + +To perform a flash update, the ``pldmfw`` module performs the following +steps + +1. Parse the firmware file for record and component information +2. Scan through the records and determine if the device matches any record + in the file. The first matched record will be used. +3. If the matching record provides package data, send this package data to + the device. +4. For each component that the record indicates, send the component data to + the device. For each component, the firmware may respond with an + indication of whether the update is suitable or not. If any component is + not suitable, the update is canceled. +5. For each component, send the binary data to the device firmware for + updating. +6. After all components are programmed, perform any final device-specific + actions to finalize the update. diff --git a/MAINTAINERS b/MAINTAINERS index b61f9063faf2..7bc4360b592f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13604,6 +13604,13 @@ S: Maintained F: Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml F: drivers/iio/chemical/pms7003.c +PLDMFW LIBRARY +M: Jacob Keller +S: Maintained +F: Documentation/driver-api/pldmfw/ +F: include/linux/pldmfw.h +F: lib/pldmfw/ + PLX DMA DRIVER M: Logan Gunthorpe S: Maintained diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h new file mode 100644 index 000000000000..0fc831338226 --- /dev/null +++ b/include/linux/pldmfw.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#ifndef _PLDMFW_H_ +#define _PLDMFW_H_ + +#include +#include + +#define PLDM_DEVICE_UPDATE_CONTINUE_AFTER_FAIL BIT(0) + +#define PLDM_STRING_TYPE_UNKNOWN 0 +#define PLDM_STRING_TYPE_ASCII 1 +#define PLDM_STRING_TYPE_UTF8 2 +#define PLDM_STRING_TYPE_UTF16 3 +#define PLDM_STRING_TYPE_UTF16LE 4 +#define PLDM_STRING_TYPE_UTF16BE 5 + +struct pldmfw_record { + struct list_head entry; + + /* List of descriptor TLVs */ + struct list_head descs; + + /* Component Set version string*/ + const u8 *version_string; + u8 version_type; + u8 version_len; + + /* Package Data length */ + u16 package_data_len; + + /* Bitfield of Device Update Flags */ + u32 device_update_flags; + + /* Package Data block */ + const u8 *package_data; + + /* Bitmap of components applicable to this record */ + unsigned long *component_bitmap; + u16 component_bitmap_len; +}; + +/* Standard descriptor TLV identifiers */ +#define PLDM_DESC_ID_PCI_VENDOR_ID 0x0000 +#define PLDM_DESC_ID_IANA_ENTERPRISE_ID 0x0001 +#define PLDM_DESC_ID_UUID 0x0002 +#define PLDM_DESC_ID_PNP_VENDOR_ID 0x0003 +#define PLDM_DESC_ID_ACPI_VENDOR_ID 0x0004 +#define PLDM_DESC_ID_PCI_DEVICE_ID 0x0100 +#define PLDM_DESC_ID_PCI_SUBVENDOR_ID 0x0101 +#define PLDM_DESC_ID_PCI_SUBDEV_ID 0x0102 +#define PLDM_DESC_ID_PCI_REVISION_ID 0x0103 +#define PLDM_DESC_ID_PNP_PRODUCT_ID 0x0104 +#define PLDM_DESC_ID_ACPI_PRODUCT_ID 0x0105 +#define PLDM_DESC_ID_VENDOR_DEFINED 0xFFFF + +struct pldmfw_desc_tlv { + struct list_head entry; + + const u8 *data; + u16 type; + u16 size; +}; + +#define PLDM_CLASSIFICATION_UNKNOWN 0x0000 +#define PLDM_CLASSIFICATION_OTHER 0x0001 +#define PLDM_CLASSIFICATION_DRIVER 0x0002 +#define PLDM_CLASSIFICATION_CONFIG_SW 0x0003 +#define PLDM_CLASSIFICATION_APP_SW 0x0004 +#define PLDM_CLASSIFICATION_INSTRUMENTATION 0x0005 +#define PLDM_CLASSIFICATION_BIOS 0x0006 +#define PLDM_CLASSIFICATION_DIAGNOSTIC_SW 0x0007 +#define PLDM_CLASSIFICATION_OS 0x0008 +#define PLDM_CLASSIFICATION_MIDDLEWARE 0x0009 +#define PLDM_CLASSIFICATION_FIRMWARE 0x000A +#define PLDM_CLASSIFICATION_CODE 0x000B +#define PLDM_CLASSIFICATION_SERVICE_PACK 0x000C +#define PLDM_CLASSIFICATION_SOFTWARE_BUNDLE 0x000D + +#define PLDM_ACTIVATION_METHOD_AUTO BIT(0) +#define PLDM_ACTIVATION_METHOD_SELF_CONTAINED BIT(1) +#define PLDM_ACTIVATION_METHOD_MEDIUM_SPECIFIC BIT(2) +#define PLDM_ACTIVATION_METHOD_REBOOT BIT(3) +#define PLDM_ACTIVATION_METHOD_DC_CYCLE BIT(4) +#define PLDM_ACTIVATION_METHOD_AC_CYCLE BIT(5) + +#define PLDMFW_COMPONENT_OPTION_FORCE_UPDATE BIT(0) +#define PLDMFW_COMPONENT_OPTION_USE_COMPARISON_STAMP BIT(1) + +struct pldmfw_component { + struct list_head entry; + + /* component identifier */ + u16 classification; + u16 identifier; + + u16 options; + u16 activation_method; + + u32 comparison_stamp; + + u32 component_size; + const u8 *component_data; + + /* Component version string */ + const u8 *version_string; + u8 version_type; + u8 version_len; + + /* component index */ + u8 index; + +}; + +/* Transfer flag used for sending components to the firmware */ +#define PLDM_TRANSFER_FLAG_START BIT(0) +#define PLDM_TRANSFER_FLAG_MIDDLE BIT(1) +#define PLDM_TRANSFER_FLAG_END BIT(2) + +struct pldmfw_ops; + +/* Main entry point to the PLDM firmware update engine. Device drivers + * should embed this in a private structure and use container_of to obtain + * a pointer to their own data, used to implement the device specific + * operations. + */ +struct pldmfw { + const struct pldmfw_ops *ops; + struct device *dev; +}; + +bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record); + +/* Operations invoked by the generic PLDM firmware update engine. Used to + * implement device specific logic. + * + * @match_record: check if the device matches the given record. For + * convenience, a standard implementation is provided for PCI devices. + * + * @send_package_data: send the package data associated with the matching + * record to firmware. + * + * @send_component_table: send the component data associated with a given + * component to firmware. Called once for each applicable component. + * + * @flash_component: Flash the data for a given component to the device. + * Called once for each applicable component, after all component tables have + * been sent. + * + * @finalize_update: (optional) Finish the update. Called after all components + * have been flashed. + */ +struct pldmfw_ops { + bool (*match_record)(struct pldmfw *context, struct pldmfw_record *record); + int (*send_package_data)(struct pldmfw *context, const u8 *data, u16 length); + int (*send_component_table)(struct pldmfw *context, struct pldmfw_component *component, + u8 transfer_flag); + int (*flash_component)(struct pldmfw *context, struct pldmfw_component *component); + int (*finalize_update)(struct pldmfw *context); +}; + +int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw); + +#endif diff --git a/lib/Kconfig b/lib/Kconfig index df3f3da95990..3ffbca6998e5 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -676,3 +676,7 @@ config GENERIC_LIB_CMPDI2 config GENERIC_LIB_UCMPDI2 bool + +config PLDMFW + bool + default n diff --git a/lib/Makefile b/lib/Makefile index b1c42c10073b..281888ff713b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -315,6 +315,9 @@ obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o obj-$(CONFIG_OBJAGG) += objagg.o +# pldmfw library +obj-$(CONFIG_PLDMFW) += pldmfw/ + # KUnit tests obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o diff --git a/lib/pldmfw/Makefile b/lib/pldmfw/Makefile new file mode 100644 index 000000000000..99ad10711abe --- /dev/null +++ b/lib/pldmfw/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_PLDMFW) += pldmfw.o diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c new file mode 100644 index 000000000000..e5d4b3b2af81 --- /dev/null +++ b/lib/pldmfw/pldmfw.c @@ -0,0 +1,879 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pldmfw_private.h" + +/* Internal structure used to store details about the PLDM image file as it is + * being validated and processed. + */ +struct pldmfw_priv { + struct pldmfw *context; + const struct firmware *fw; + + /* current offset of firmware image */ + size_t offset; + + struct list_head records; + struct list_head components; + + /* PLDM Firmware Package Header */ + const struct __pldm_header *header; + u16 total_header_size; + + /* length of the component bitmap */ + u16 component_bitmap_len; + u16 bitmap_size; + + /* Start of the component image information */ + u16 component_count; + const u8 *component_start; + + /* Start pf the firmware device id records */ + const u8 *record_start; + u8 record_count; + + /* The CRC at the end of the package header */ + u32 header_crc; + + struct pldmfw_record *matching_record; +}; + +/** + * pldm_check_fw_space - Verify that the firmware image has space left + * @data: pointer to private data + * @offset: offset to start from + * @length: length to check for + * + * Verify that the firmware data can hold a chunk of bytes with the specified + * offset and length. + * + * Returns: zero on success, or -EFAULT if the image does not have enough + * space left to fit the expected length. + */ +static int +pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length) +{ + size_t expected_size = offset + length; + struct device *dev = data->context->dev; + + if (data->fw->size < expected_size) { + dev_dbg(dev, "Firmware file size smaller than expected. Got %zu bytes, needed %zu bytes\n", + data->fw->size, expected_size); + return -EFAULT; + } + + return 0; +} + +/** + * pldm_move_fw_offset - Move the current firmware offset forward + * @data: pointer to private data + * @bytes_to_move: number of bytes to move the offset forward by + * + * Check that there is enough space past the current offset, and then move the + * offset forward by this ammount. + * + * Returns: zero on success, or -EFAULT if the image is too small to fit the + * expected length. + */ +static int +pldm_move_fw_offset(struct pldmfw_priv *data, size_t bytes_to_move) +{ + int err; + + err = pldm_check_fw_space(data, data->offset, bytes_to_move); + if (err) + return err; + + data->offset += bytes_to_move; + + return 0; +} + +/** + * pldm_parse_header - Validate and extract details about the PLDM header + * @data: pointer to private data + * + * Performs initial basic verification of the PLDM image, up to the first + * firmware record. + * + * This includes the following checks and extractions + * + * * Verify that the UUID at the start of the header matches the expected + * value as defined in the DSP0267 PLDM specification + * * Check that the revision is 0x01 + * * Extract the total header_size and verify that the image is large enough + * to contain at least the length of this header + * * Extract the size of the component bitmap length + * * Extract a pointer to the start of the record area + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_parse_header(struct pldmfw_priv *data) +{ + const struct __pldmfw_record_area *record_area; + struct device *dev = data->context->dev; + const struct __pldm_header *header; + size_t header_size; + int err; + + err = pldm_move_fw_offset(data, sizeof(*header)); + if (err) + return err; + + header = (const struct __pldm_header *)data->fw->data; + data->header = header; + + if (!uuid_equal(&header->id, &pldm_firmware_header_id)) { + dev_dbg(dev, "Invalid package header identifier. Expected UUID %pUB, but got %pUB\n", + &pldm_firmware_header_id, &header->id); + return -EINVAL; + } + + if (header->revision != PACKAGE_HEADER_FORMAT_REVISION) { + dev_dbg(dev, "Invalid package header revision. Expected revision %u but got %u\n", + PACKAGE_HEADER_FORMAT_REVISION, header->revision); + return -EOPNOTSUPP; + } + + data->total_header_size = get_unaligned_le16(&header->size); + header_size = data->total_header_size - sizeof(*header); + + err = pldm_check_fw_space(data, data->offset, header_size); + if (err) + return err; + + data->component_bitmap_len = + get_unaligned_le16(&header->component_bitmap_len); + + if (data->component_bitmap_len % 8 != 0) { + dev_dbg(dev, "Invalid component bitmap length. The length is %u, which is not a multiple of 8\n", + data->component_bitmap_len); + return -EINVAL; + } + + data->bitmap_size = data->component_bitmap_len / 8; + + err = pldm_move_fw_offset(data, header->version_len); + if (err) + return err; + + /* extract a pointer to the record area, which just follows the main + * PLDM header data. + */ + record_area = (const struct __pldmfw_record_area *)(data->fw->data + + data->offset); + + err = pldm_move_fw_offset(data, sizeof(*record_area)); + if (err) + return err; + + data->record_count = record_area->record_count; + data->record_start = record_area->records; + + return 0; +} + +/** + * pldm_check_desc_tlv_len - Check that the length matches expectation + * @data: pointer to image details + * @type: the descriptor type + * @size: the length from the descriptor header + * + * If the descriptor type is one of the documented descriptor types according + * to the standard, verify that the provided length matches. + * + * If the type is not recognized or is VENDOR_DEFINED, return zero. + * + * Returns: zero on success, or -EINVAL if the specified size of a standard + * TLV does not match the expected value defined for that TLV. + */ +static int +pldm_check_desc_tlv_len(struct pldmfw_priv *data, u16 type, u16 size) +{ + struct device *dev = data->context->dev; + u16 expected_size; + + switch (type) { + case PLDM_DESC_ID_PCI_VENDOR_ID: + case PLDM_DESC_ID_PCI_DEVICE_ID: + case PLDM_DESC_ID_PCI_SUBVENDOR_ID: + case PLDM_DESC_ID_PCI_SUBDEV_ID: + expected_size = 2; + break; + case PLDM_DESC_ID_PCI_REVISION_ID: + expected_size = 1; + break; + case PLDM_DESC_ID_PNP_VENDOR_ID: + expected_size = 3; + break; + case PLDM_DESC_ID_IANA_ENTERPRISE_ID: + case PLDM_DESC_ID_ACPI_VENDOR_ID: + case PLDM_DESC_ID_PNP_PRODUCT_ID: + case PLDM_DESC_ID_ACPI_PRODUCT_ID: + expected_size = 4; + break; + case PLDM_DESC_ID_UUID: + expected_size = 16; + break; + case PLDM_DESC_ID_VENDOR_DEFINED: + return 0; + default: + /* Do not report an error on an unexpected TLV */ + dev_dbg(dev, "Found unrecognized TLV type 0x%04x\n", type); + return 0; + } + + if (size != expected_size) { + dev_dbg(dev, "Found TLV type 0x%04x with unexpected length. Got %u bytes, but expected %u bytes\n", + type, size, expected_size); + return -EINVAL; + } + + return 0; +} + +/** + * pldm_parse_desc_tlvs - Check and skip past a number of TLVs + * @data: pointer to private data + * @record: pointer to the record this TLV belongs too + * @desc_count: descriptor count + * + * From the current offset, read and extract the descriptor TLVs, updating the + * current offset each time. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8 desc_count) +{ + const struct __pldmfw_desc_tlv *__desc; + const u8 *desc_start; + u8 i; + + desc_start = data->fw->data + data->offset; + + pldm_for_each_desc_tlv(i, __desc, desc_start, desc_count) { + struct pldmfw_desc_tlv *desc; + int err; + u16 type, size; + + err = pldm_move_fw_offset(data, sizeof(*__desc)); + if (err) + return err; + + type = get_unaligned_le16(&__desc->type); + + /* According to DSP0267, this only includes the data field */ + size = get_unaligned_le16(&__desc->size); + + err = pldm_check_desc_tlv_len(data, type, size); + if (err) + return err; + + /* check that we have space and move the offset forward */ + err = pldm_move_fw_offset(data, size); + if (err) + return err; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + desc->type = type; + desc->size = size; + desc->data = __desc->data; + + list_add_tail(&desc->entry, &record->descs); + } + + return 0; +} + +/** + * pldm_parse_one_record - Verify size of one PLDM record + * @data: pointer to image details + * @__record: pointer to the record to check + * + * This function checks that the record size does not exceed either the size + * of the firmware file or the total length specified in the header section. + * + * It also verifies that the recorded length of the start of the record + * matches the size calculated by adding the static structure length, the + * component bitmap length, the version string length, the length of all + * descriptor TLVs, and the length of the package data. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +pldm_parse_one_record(struct pldmfw_priv *data, + const struct __pldmfw_record_info *__record) +{ + struct pldmfw_record *record; + size_t measured_length; + int err; + const u8 *bitmap_ptr; + u16 record_len; + int i; + + /* Make a copy and insert it into the record list */ + record = kzalloc(sizeof(*record), GFP_KERNEL); + if (!record) + return -ENOMEM; + + INIT_LIST_HEAD(&record->descs); + list_add_tail(&record->entry, &data->records); + + /* Then check that we have space and move the offset */ + err = pldm_move_fw_offset(data, sizeof(*__record)); + if (err) + return err; + + record_len = get_unaligned_le16(&__record->record_len); + record->package_data_len = get_unaligned_le16(&__record->package_data_len); + record->version_len = __record->version_len; + record->version_type = __record->version_type; + + bitmap_ptr = data->fw->data + data->offset; + + /* check that we have space for the component bitmap length */ + err = pldm_move_fw_offset(data, data->bitmap_size); + if (err) + return err; + + record->component_bitmap_len = data->component_bitmap_len; + record->component_bitmap = bitmap_zalloc(record->component_bitmap_len, + GFP_KERNEL); + if (!record->component_bitmap) + return -ENOMEM; + + for (i = 0; i < data->bitmap_size; i++) + bitmap_set_value8(record->component_bitmap, bitmap_ptr[i], i * 8); + + record->version_string = data->fw->data + data->offset; + + err = pldm_move_fw_offset(data, record->version_len); + if (err) + return err; + + /* Scan through the descriptor TLVs and find the end */ + err = pldm_parse_desc_tlvs(data, record, __record->descriptor_count); + if (err) + return err; + + record->package_data = data->fw->data + data->offset; + + err = pldm_move_fw_offset(data, record->package_data_len); + if (err) + return err; + + measured_length = data->offset - ((const u8 *)__record - data->fw->data); + if (measured_length != record_len) { + dev_dbg(data->context->dev, "Unexpected record length. Measured record length is %zu bytes, expected length is %u bytes\n", + measured_length, record_len); + return -EFAULT; + } + + return 0; +} + +/** + * pldm_parse_records - Locate the start of the component area + * @data: pointer to private data + * + * Extract the record count, and loop through each record, searching for the + * component area. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_parse_records(struct pldmfw_priv *data) +{ + const struct __pldmfw_component_area *component_area; + const struct __pldmfw_record_info *record; + int err; + u8 i; + + pldm_for_each_record(i, record, data->record_start, data->record_count) { + err = pldm_parse_one_record(data, record); + if (err) + return err; + } + + /* Extract a pointer to the component area, which just follows the + * PLDM device record data. + */ + component_area = (const struct __pldmfw_component_area *)(data->fw->data + data->offset); + + err = pldm_move_fw_offset(data, sizeof(*component_area)); + if (err) + return err; + + data->component_count = + get_unaligned_le16(&component_area->component_image_count); + data->component_start = component_area->components; + + return 0; +} + +/** + * pldm_parse_components - Locate the CRC header checksum + * @data: pointer to private data + * + * Extract the component count, and find the pointer to the component area. + * Scan through each component searching for the end, which should point to + * the package header checksum. + * + * Extract the package header CRC and save it for verification. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_parse_components(struct pldmfw_priv *data) +{ + const struct __pldmfw_component_info *__component; + struct device *dev = data->context->dev; + const u8 *header_crc_ptr; + int err; + u8 i; + + pldm_for_each_component(i, __component, data->component_start, data->component_count) { + struct pldmfw_component *component; + u32 offset, size; + + err = pldm_move_fw_offset(data, sizeof(*__component)); + if (err) + return err; + + err = pldm_move_fw_offset(data, __component->version_len); + if (err) + return err; + + offset = get_unaligned_le32(&__component->location_offset); + size = get_unaligned_le32(&__component->size); + + err = pldm_check_fw_space(data, offset, size); + if (err) + return err; + + component = kzalloc(sizeof(*component), GFP_KERNEL); + if (!component) + return -ENOMEM; + + component->index = i; + component->classification = get_unaligned_le16(&__component->classification); + component->identifier = get_unaligned_le16(&__component->identifier); + component->comparison_stamp = get_unaligned_le32(&__component->comparison_stamp); + component->options = get_unaligned_le16(&__component->options); + component->activation_method = get_unaligned_le16(&__component->activation_method); + component->version_type = __component->version_type; + component->version_len = __component->version_len; + component->version_string = __component->version_string; + component->component_data = data->fw->data + offset; + component->component_size = size; + + list_add_tail(&component->entry, &data->components); + } + + header_crc_ptr = data->fw->data + data->offset; + + err = pldm_move_fw_offset(data, sizeof(data->header_crc)); + if (err) + return err; + + /* Make sure that we reached the expected offset */ + if (data->offset != data->total_header_size) { + dev_dbg(dev, "Invalid firmware header size. Expected %u but got %zu\n", + data->total_header_size, data->offset); + return -EFAULT; + } + + data->header_crc = get_unaligned_le32(header_crc_ptr); + + return 0; +} + +/** + * pldm_verify_header_crc - Verify that the CRC in the header matches + * @data: pointer to private data + * + * Calculates the 32-bit CRC using the standard IEEE 802.3 CRC polynomial and + * compares it to the value stored in the header. + * + * Returns: zero on success if the CRC matches, or -EBADMSG on an invalid CRC. + */ +static int pldm_verify_header_crc(struct pldmfw_priv *data) +{ + struct device *dev = data->context->dev; + u32 calculated_crc; + size_t length; + + /* Calculate the 32-bit CRC of the header header contents up to but + * not including the checksum. Note that the Linux crc32_le function + * does not perform an expected final XOR. + */ + length = data->offset - sizeof(data->header_crc); + calculated_crc = crc32_le(~0, data->fw->data, length) ^ ~0; + + if (calculated_crc != data->header_crc) { + dev_dbg(dev, "Invalid CRC in firmware header. Got 0x%08x but expected 0x%08x\n", + calculated_crc, data->header_crc); + return -EBADMSG; + } + + return 0; +} + +/** + * pldmfw_free_priv - Free memory allocated while parsing the PLDM image + * @data: pointer to the PLDM data structure + * + * Loops through and clears all allocated memory associated with each + * allocated descriptor, record, and component. + */ +static void pldmfw_free_priv(struct pldmfw_priv *data) +{ + struct pldmfw_component *component, *c_safe; + struct pldmfw_record *record, *r_safe; + struct pldmfw_desc_tlv *desc, *d_safe; + + list_for_each_entry_safe(component, c_safe, &data->components, entry) { + list_del(&component->entry); + kfree(component); + } + + list_for_each_entry_safe(record, r_safe, &data->records, entry) { + list_for_each_entry_safe(desc, d_safe, &record->descs, entry) { + list_del(&desc->entry); + kfree(desc); + } + + if (record->component_bitmap) { + bitmap_free(record->component_bitmap); + record->component_bitmap = NULL; + } + + list_del(&record->entry); + kfree(record); + } +} + +/** + * pldm_parse_image - parse and extract details from PLDM image + * @data: pointer to private data + * + * Verify that the firmware file contains valid data for a PLDM firmware + * file. Extract useful pointers and data from the firmware file and store + * them in the data structure. + * + * The PLDM firmware file format is defined in DMTF DSP0267 1.0.0. Care + * should be taken to use get_unaligned_le* when accessing data from the + * pointers in data. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_parse_image(struct pldmfw_priv *data) +{ + int err; + + if (WARN_ON(!(data->context->dev && data->fw->data && data->fw->size))) + return -EINVAL; + + err = pldm_parse_header(data); + if (err) + return err; + + err = pldm_parse_records(data); + if (err) + return err; + + err = pldm_parse_components(data); + if (err) + return err; + + return pldm_verify_header_crc(data); +} + +/* these are u32 so that we can store PCI_ANY_ID */ +struct pldm_pci_record_id { + int vendor; + int device; + int subsystem_vendor; + int subsystem_device; +}; + +/** + * pldmfw_op_pci_match_record - Check if a PCI device matches the record + * @context: PLDM fw update structure + * @record: list of records extracted from the PLDM image + * + * Determine of the PCI device associated with this device matches the record + * data provided. + * + * Searches the descriptor TLVs and extracts the relevant descriptor data into + * a pldm_pci_record_id. This is then compared against the PCI device ID + * information. + * + * Returns: true if the device matches the record, false otherwise. + */ +bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record) +{ + struct pci_dev *pdev = to_pci_dev(context->dev); + struct pldm_pci_record_id id = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subsystem_vendor = PCI_ANY_ID, + .subsystem_device = PCI_ANY_ID, + }; + struct pldmfw_desc_tlv *desc; + + list_for_each_entry(desc, &record->descs, entry) { + u16 value; + int *ptr; + + switch (desc->type) { + case PLDM_DESC_ID_PCI_VENDOR_ID: + ptr = &id.vendor; + break; + case PLDM_DESC_ID_PCI_DEVICE_ID: + ptr = &id.device; + break; + case PLDM_DESC_ID_PCI_SUBVENDOR_ID: + ptr = &id.subsystem_vendor; + break; + case PLDM_DESC_ID_PCI_SUBDEV_ID: + ptr = &id.subsystem_device; + break; + default: + /* Skip unrelated TLVs */ + continue; + } + + value = get_unaligned_le16(desc->data); + /* A value of zero for one of the descriptors is sometimes + * used when the record should ignore this field when matching + * device. For example if the record applies to any subsystem + * device or vendor. + */ + if (value) + *ptr = (int)value; + else + *ptr = PCI_ANY_ID; + } + + if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) && + (id.device == PCI_ANY_ID || id.device == pdev->device) && + (id.subsystem_vendor == PCI_ANY_ID || id.subsystem_vendor == pdev->subsystem_vendor) && + (id.subsystem_device == PCI_ANY_ID || id.subsystem_device == pdev->subsystem_device)) + return true; + else + return false; +} +EXPORT_SYMBOL(pldmfw_op_pci_match_record); + +/** + * pldm_find_matching_record - Find the first matching PLDM record + * @data: pointer to private data + * + * Search through PLDM records and find the first matching entry. It is + * expected that only one entry matches. + * + * Store a pointer to the matching record, if found. + * + * Returns: zero on success, or -ENOENT if no matching record is found. + */ +static int pldm_find_matching_record(struct pldmfw_priv *data) +{ + struct pldmfw_record *record; + + list_for_each_entry(record, &data->records, entry) { + if (data->context->ops->match_record(data->context, record)) { + data->matching_record = record; + return 0; + } + } + + return -ENOENT; +} + +/** + * pldm_send_package_data - Send firmware the package data for the record + * @data: pointer to private data + * + * Send the package data associated with the matching record to the firmware, + * using the send_pkg_data operation. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +pldm_send_package_data(struct pldmfw_priv *data) +{ + struct pldmfw_record *record = data->matching_record; + const struct pldmfw_ops *ops = data->context->ops; + + return ops->send_package_data(data->context, record->package_data, + record->package_data_len); +} + +/** + * pldm_send_component_tables - Send component table information to firmware + * @data: pointer to private data + * + * Loop over each component, sending the applicable components to the firmware + * via the send_component_table operation. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +pldm_send_component_tables(struct pldmfw_priv *data) +{ + unsigned long *bitmap = data->matching_record->component_bitmap; + struct pldmfw_component *component; + int err; + + list_for_each_entry(component, &data->components, entry) { + u8 index = component->index, transfer_flag = 0; + + /* Skip components which are not intended for this device */ + if (!test_bit(index, bitmap)) + continue; + + /* determine whether this is the start, middle, end, or both + * the start and end of the component tables + */ + if (index == find_first_bit(bitmap, data->component_bitmap_len)) + transfer_flag |= PLDM_TRANSFER_FLAG_START; + if (index == find_last_bit(bitmap, data->component_bitmap_len)) + transfer_flag |= PLDM_TRANSFER_FLAG_END; + if (!transfer_flag) + transfer_flag = PLDM_TRANSFER_FLAG_MIDDLE; + + err = data->context->ops->send_component_table(data->context, + component, + transfer_flag); + if (err) + return err; + } + + return 0; +} + +/** + * pldm_flash_components - Program each component to device flash + * @data: pointer to private data + * + * Loop through each component that is active for the matching device record, + * and send it to the device driver for flashing. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_flash_components(struct pldmfw_priv *data) +{ + unsigned long *bitmap = data->matching_record->component_bitmap; + struct pldmfw_component *component; + int err; + + list_for_each_entry(component, &data->components, entry) { + u8 index = component->index; + + /* Skip components which are not intended for this device */ + if (!test_bit(index, bitmap)) + continue; + + err = data->context->ops->flash_component(data->context, component); + if (err) + return err; + } + + return 0; +} + +/** + * pldm_finalize_update - Finalize the device flash update + * @data: pointer to private data + * + * Tell the device driver to perform any remaining logic to complete the + * device update. + * + * Returns: zero on success, or a PLFM_FWU error indicating the reason for + * failure. + */ +static int pldm_finalize_update(struct pldmfw_priv *data) +{ + if (data->context->ops->finalize_update) + return data->context->ops->finalize_update(data->context); + + return 0; +} + +/** + * pldmfw_flash_image - Write a PLDM-formatted firmware image to the device + * @context: ops and data for firmware update + * @fw: firmware object pointing to the relevant firmware file to program + * + * Parse the data for a given firmware file, verifying that it is a valid PLDM + * formatted image that matches this device. + * + * Extract the device record Package Data and Component Tables and send them + * to the device firmware. Extract and write the flash data for each of the + * components indicated in the firmware file. + * + * Returns: zero on success, or a negative error code on failure. + */ +int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw) +{ + struct pldmfw_priv *data; + int err; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + INIT_LIST_HEAD(&data->records); + INIT_LIST_HEAD(&data->components); + + data->fw = fw; + data->context = context; + + err = pldm_parse_image(data); + if (err) + goto out_release_data; + + err = pldm_find_matching_record(data); + if (err) + goto out_release_data; + + err = pldm_send_package_data(data); + if (err) + goto out_release_data; + + err = pldm_send_component_tables(data); + if (err) + goto out_release_data; + + err = pldm_flash_components(data); + if (err) + goto out_release_data; + + err = pldm_finalize_update(data); + +out_release_data: + pldmfw_free_priv(data); + kfree(data); + + return err; +} +EXPORT_SYMBOL(pldmfw_flash_image); + +MODULE_AUTHOR("Jacob Keller "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("PLDM firmware flash update library"); diff --git a/lib/pldmfw/pldmfw_private.h b/lib/pldmfw/pldmfw_private.h new file mode 100644 index 000000000000..687ef2200692 --- /dev/null +++ b/lib/pldmfw/pldmfw_private.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#ifndef _PLDMFW_PRIVATE_H_ +#define _PLDMFW_PRIVATE_H_ + +/* The following data structures define the layout of a firmware binary + * following the "PLDM For Firmware Update Specification", DMTF standard + * #DSP0267. + * + * pldmfw.c uses these structures to implement a simple engine that will parse + * a fw binary file in this format and perform a firmware update for a given + * device. + * + * Due to the variable sized data layout, alignment of fields within these + * structures is not guaranteed when reading. For this reason, all multi-byte + * field accesses should be done using the unaligned access macros. + * Additionally, the standard specifies that multi-byte fields are in + * LittleEndian format. + * + * The structure definitions are not made public, in order to keep direct + * accesses within code that is prepared to deal with the limitation of + * unaligned access. + */ + +/* UUID for PLDM firmware packages: f018878c-cb7d-4943-9800-a02f059aca02 */ +static const uuid_t pldm_firmware_header_id = + UUID_INIT(0xf018878c, 0xcb7d, 0x4943, + 0x98, 0x00, 0xa0, 0x2f, 0x05, 0x9a, 0xca, 0x02); + +/* Revision number of the PLDM header format this code supports */ +#define PACKAGE_HEADER_FORMAT_REVISION 0x01 + +/* timestamp104 structure defined in PLDM Base specification */ +#define PLDM_TIMESTAMP_SIZE 13 +struct __pldm_timestamp { + u8 b[PLDM_TIMESTAMP_SIZE]; +} __packed __aligned(1); + +/* Package Header Information */ +struct __pldm_header { + uuid_t id; /* PackageHeaderIdentifier */ + u8 revision; /* PackageHeaderFormatRevision */ + __le16 size; /* PackageHeaderSize */ + struct __pldm_timestamp release_date; /* PackageReleaseDateTime */ + __le16 component_bitmap_len; /* ComponentBitmapBitLength */ + u8 version_type; /* PackageVersionStringType */ + u8 version_len; /* PackageVersionStringLength */ + + /* + * DSP0267 also includes the following variable length fields at the + * end of this structure: + * + * PackageVersionString, length is version_len. + * + * The total size of this section is + * sizeof(pldm_header) + version_len; + */ + u8 version_string[]; /* PackageVersionString */ +} __packed __aligned(1); + +/* Firmware Device ID Record */ +struct __pldmfw_record_info { + __le16 record_len; /* RecordLength */ + u8 descriptor_count; /* DescriptorCount */ + __le32 device_update_flags; /* DeviceUpdateOptionFlags */ + u8 version_type; /* ComponentImageSetVersionType */ + u8 version_len; /* ComponentImageSetVersionLength */ + __le16 package_data_len; /* FirmwareDevicePackageDataLength */ + + /* + * DSP0267 also includes the following variable length fields at the + * end of this structure: + * + * ApplicableComponents, length is component_bitmap_len from header + * ComponentImageSetVersionString, length is version_len + * RecordDescriptors, a series of TLVs with 16bit type and length + * FirmwareDevicePackageData, length is package_data_len + * + * The total size of each record is + * sizeof(pldmfw_record_info) + + * component_bitmap_len (converted to bytes!) + + * version_len + + * + + * package_data_len + */ + u8 variable_record_data[]; +} __packed __aligned(1); + +/* Firmware Descriptor Definition */ +struct __pldmfw_desc_tlv { + __le16 type; /* DescriptorType */ + __le16 size; /* DescriptorSize */ + u8 data[]; /* DescriptorData */ +} __aligned(1); + +/* Firmware Device Identification Area */ +struct __pldmfw_record_area { + u8 record_count; /* DeviceIDRecordCount */ + /* This is not a struct type because the size of each record varies */ + u8 records[]; +} __aligned(1); + +/* Individual Component Image Information */ +struct __pldmfw_component_info { + __le16 classification; /* ComponentClassfication */ + __le16 identifier; /* ComponentIdentifier */ + __le32 comparison_stamp; /* ComponentComparisonStamp */ + __le16 options; /* componentOptions */ + __le16 activation_method; /* RequestedComponentActivationMethod */ + __le32 location_offset; /* ComponentLocationOffset */ + __le32 size; /* ComponentSize */ + u8 version_type; /* ComponentVersionStringType */ + u8 version_len; /* ComponentVersionStringLength */ + + /* + * DSP0267 also includes the following variable length fields at the + * end of this structure: + * + * ComponentVersionString, length is version_len + * + * The total size of this section is + * sizeof(pldmfw_component_info) + version_len; + */ + u8 version_string[]; /* ComponentVersionString */ +} __packed __aligned(1); + +/* Component Image Information Area */ +struct __pldmfw_component_area { + __le16 component_image_count; + /* This is not a struct type because the component size varies */ + u8 components[]; +} __aligned(1); + +/** + * pldm_first_desc_tlv + * @start: byte offset of the start of the descriptor TLVs + * + * Converts the starting offset of the descriptor TLVs into a pointer to the + * first descriptor. + */ +#define pldm_first_desc_tlv(start) \ + ((const struct __pldmfw_desc_tlv *)(start)) + +/** + * pldm_next_desc_tlv + * @desc: pointer to a descriptor TLV + * + * Finds the pointer to the next descriptor following a given descriptor + */ +#define pldm_next_desc_tlv(desc) \ + ((const struct __pldmfw_desc_tlv *)((desc)->data + \ + get_unaligned_le16(&(desc)->size))) + +/** + * pldm_for_each_desc_tlv + * @i: variable to store descriptor index + * @desc: variable to store descriptor pointer + * @start: byte offset of the start of the descriptors + * @count: the number of descriptors + * + * for loop macro to iterate over all of the descriptors of a given PLDM + * record. + */ +#define pldm_for_each_desc_tlv(i, desc, start, count) \ + for ((i) = 0, (desc) = pldm_first_desc_tlv(start); \ + (i) < (count); \ + (i)++, (desc) = pldm_next_desc_tlv(desc)) + +/** + * pldm_first_record + * @start: byte offset of the start of the PLDM records + * + * Converts a starting offset of the PLDM records into a pointer to the first + * record. + */ +#define pldm_first_record(start) \ + ((const struct __pldmfw_record_info *)(start)) + +/** + * pldm_next_record + * @record: pointer to a PLDM record + * + * Finds a pointer to the next record following a given record + */ +#define pldm_next_record(record) \ + ((const struct __pldmfw_record_info *) \ + ((const u8 *)(record) + get_unaligned_le16(&(record)->record_len))) + +/** + * pldm_for_each_record + * @i: variable to store record index + * @record: variable to store record pointer + * @start: byte offset of the start of the records + * @count: the number of records + * + * for loop macro to iterate over all of the records of a PLDM file. + */ +#define pldm_for_each_record(i, record, start, count) \ + for ((i) = 0, (record) = pldm_first_record(start); \ + (i) < (count); \ + (i)++, (record) = pldm_next_record(record)) + +/** + * pldm_first_component + * @start: byte offset of the start of the PLDM components + * + * Convert a starting offset of the PLDM components into a pointer to the + * first component + */ +#define pldm_first_component(start) \ + ((const struct __pldmfw_component_info *)(start)) + +/** + * pldm_next_component + * @component: pointer to a PLDM component + * + * Finds a pointer to the next component following a given component + */ +#define pldm_next_component(component) \ + ((const struct __pldmfw_component_info *)((component)->version_string + \ + (component)->version_len)) + +/** + * pldm_for_each_component + * @i: variable to store component index + * @component: variable to store component pointer + * @start: byte offset to the start of the first component + * @count: the number of components + * + * for loop macro to iterate over all of the components of a PLDM file. + */ +#define pldm_for_each_component(i, component, start, count) \ + for ((i) = 0, (component) = pldm_first_component(start); \ + (i) < (count); \ + (i)++, (component) = pldm_next_component(component)) + +#endif -- cgit v1.2.3 From 2f3ee5e481ce850b5b51a306b01a5e9187b206ae Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 24 Jul 2020 13:11:42 -0500 Subject: remoteproc: kill IPA notify code The IPA code now uses the generic remoteproc SSR notification mechanism. This makes the original IPA notification code unused and unnecessary, so get rid of it. This is effectively a revert of commit d7f5f3c89c1a ("remoteproc: add IPA notification to q6v5 driver"). Reviewed-by: Bjorn Andersson Signed-off-by: Alex Elder Link: https://lore.kernel.org/r/20200724181142.13581-3-elder@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/Kconfig | 4 -- drivers/remoteproc/Makefile | 1 - drivers/remoteproc/qcom_q6v5_ipa_notify.c | 85 ------------------------- drivers/remoteproc/qcom_q6v5_mss.c | 38 ----------- include/linux/remoteproc/qcom_q6v5_ipa_notify.h | 82 ------------------------ 5 files changed, 210 deletions(-) delete mode 100644 drivers/remoteproc/qcom_q6v5_ipa_notify.c delete mode 100644 include/linux/remoteproc/qcom_q6v5_ipa_notify.h (limited to 'include') diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 3e70d972a186..48315dc4a30c 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -154,7 +154,6 @@ config QCOM_Q6V5_MSS select QCOM_MDT_LOADER select QCOM_PIL_INFO select QCOM_Q6V5_COMMON - select QCOM_Q6V5_IPA_NOTIFY select QCOM_RPROC_COMMON select QCOM_SCM help @@ -196,9 +195,6 @@ config QCOM_Q6V5_WCSS Say y here to support the Qualcomm Peripheral Image Loader for the Hexagon V5 based WCSS remote processors. -config QCOM_Q6V5_IPA_NOTIFY - tristate - config QCOM_SYSMON tristate "Qualcomm sysmon driver" depends on RPMSG diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 7ce1b8ef7677..4d4307dc8fa9 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -24,7 +24,6 @@ obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o -obj-$(CONFIG_QCOM_Q6V5_IPA_NOTIFY) += qcom_q6v5_ipa_notify.o obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o qcom_wcnss_pil-y += qcom_wcnss.o diff --git a/drivers/remoteproc/qcom_q6v5_ipa_notify.c b/drivers/remoteproc/qcom_q6v5_ipa_notify.c deleted file mode 100644 index e1c10a128bfd..000000000000 --- a/drivers/remoteproc/qcom_q6v5_ipa_notify.c +++ /dev/null @@ -1,85 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * Qualcomm IPA notification subdev support - * - * Copyright (C) 2019 Linaro Ltd. - */ - -#include -#include -#include -#include - -static void -ipa_notify_common(struct rproc_subdev *subdev, enum qcom_rproc_event event) -{ - struct qcom_rproc_ipa_notify *ipa_notify; - qcom_ipa_notify_t notify; - - ipa_notify = container_of(subdev, struct qcom_rproc_ipa_notify, subdev); - notify = ipa_notify->notify; - if (notify) - notify(ipa_notify->data, event); -} - -static int ipa_notify_prepare(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_STARTING); - - return 0; -} - -static int ipa_notify_start(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_RUNNING); - - return 0; -} - -static void ipa_notify_stop(struct rproc_subdev *subdev, bool crashed) - -{ - ipa_notify_common(subdev, crashed ? MODEM_CRASHED : MODEM_STOPPING); -} - -static void ipa_notify_unprepare(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_OFFLINE); -} - -static void ipa_notify_removing(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_REMOVING); -} - -/* Register the IPA notification subdevice with the Q6V5 MSS remoteproc */ -void qcom_add_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify) -{ - ipa_notify->notify = NULL; - ipa_notify->data = NULL; - ipa_notify->subdev.prepare = ipa_notify_prepare; - ipa_notify->subdev.start = ipa_notify_start; - ipa_notify->subdev.stop = ipa_notify_stop; - ipa_notify->subdev.unprepare = ipa_notify_unprepare; - - rproc_add_subdev(rproc, &ipa_notify->subdev); -} -EXPORT_SYMBOL_GPL(qcom_add_ipa_notify_subdev); - -/* Remove the IPA notification subdevice */ -void qcom_remove_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify) -{ - struct rproc_subdev *subdev = &ipa_notify->subdev; - - ipa_notify_removing(subdev); - - rproc_remove_subdev(rproc, subdev); - ipa_notify->notify = NULL; /* Make it obvious */ -} -EXPORT_SYMBOL_GPL(qcom_remove_ipa_notify_subdev); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Qualcomm IPA notification remoteproc subdev"); diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index ae3739082fdd..68f158931010 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -23,7 +23,6 @@ #include #include #include -#include "linux/remoteproc/qcom_q6v5_ipa_notify.h" #include #include #include @@ -197,7 +196,6 @@ struct q6v5 { struct qcom_rproc_glink glink_subdev; struct qcom_rproc_subdev smd_subdev; struct qcom_rproc_ssr ssr_subdev; - struct qcom_rproc_ipa_notify ipa_notify_subdev; struct qcom_sysmon *sysmon; bool need_mem_protection; bool has_alt_reset; @@ -1607,39 +1605,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) return 0; } -#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) - -/* Register IPA notification function */ -int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, - void *data) -{ - struct qcom_rproc_ipa_notify *ipa_notify; - struct q6v5 *qproc = rproc->priv; - - if (!notify) - return -EINVAL; - - ipa_notify = &qproc->ipa_notify_subdev; - if (ipa_notify->notify) - return -EBUSY; - - ipa_notify->notify = notify; - ipa_notify->data = data; - - return 0; -} -EXPORT_SYMBOL_GPL(qcom_register_ipa_notify); - -/* Deregister IPA notification function */ -void qcom_deregister_ipa_notify(struct rproc *rproc) -{ - struct q6v5 *qproc = rproc->priv; - - qproc->ipa_notify_subdev.notify = NULL; -} -EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify); -#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - static int q6v5_probe(struct platform_device *pdev) { const struct rproc_hexagon_res *desc; @@ -1766,7 +1731,6 @@ static int q6v5_probe(struct platform_device *pdev) qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); qcom_add_smd_subdev(rproc, &qproc->smd_subdev); qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); - qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); if (IS_ERR(qproc->sysmon)) { ret = PTR_ERR(qproc->sysmon); @@ -1782,7 +1746,6 @@ static int q6v5_probe(struct platform_device *pdev) remove_sysmon_subdev: qcom_remove_sysmon_subdev(qproc->sysmon); remove_subdevs: - qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev); qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); @@ -1804,7 +1767,6 @@ static int q6v5_remove(struct platform_device *pdev) rproc_del(rproc); qcom_remove_sysmon_subdev(qproc->sysmon); - qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); diff --git a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h deleted file mode 100644 index 0820edc0ab7d..000000000000 --- a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* Copyright (C) 2019 Linaro Ltd. */ - -#ifndef __QCOM_Q6V5_IPA_NOTIFY_H__ -#define __QCOM_Q6V5_IPA_NOTIFY_H__ - -#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) - -#include - -enum qcom_rproc_event { - MODEM_STARTING = 0, /* Modem is about to be started */ - MODEM_RUNNING = 1, /* Startup complete; modem is operational */ - MODEM_STOPPING = 2, /* Modem is about to shut down */ - MODEM_CRASHED = 3, /* Modem has crashed (implies stopping) */ - MODEM_OFFLINE = 4, /* Modem is now offline */ - MODEM_REMOVING = 5, /* Modem is about to be removed */ -}; - -typedef void (*qcom_ipa_notify_t)(void *data, enum qcom_rproc_event event); - -struct qcom_rproc_ipa_notify { - struct rproc_subdev subdev; - - qcom_ipa_notify_t notify; - void *data; -}; - -/** - * qcom_add_ipa_notify_subdev() - Register IPA notification subdevice - * @rproc: rproc handle - * @ipa_notify: IPA notification subdevice handle - * - * Register the @ipa_notify subdevice with the @rproc so modem events - * can be sent to IPA when they occur. - * - * This is defined in "qcom_q6v5_ipa_notify.c". - */ -void qcom_add_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify); - -/** - * qcom_remove_ipa_notify_subdev() - Remove IPA SSR subdevice - * @rproc: rproc handle - * @ipa_notify: IPA notification subdevice handle - * - * This is defined in "qcom_q6v5_ipa_notify.c". - */ -void qcom_remove_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify); - -/** - * qcom_register_ipa_notify() - Register IPA notification function - * @rproc: Remote processor handle - * @notify: Non-null IPA notification callback function pointer - * @data: Data supplied to IPA notification callback function - * - * @Return: 0 if successful, or a negative error code otherwise - * - * This is defined in "qcom_q6v5_mss.c". - */ -int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, - void *data); -/** - * qcom_deregister_ipa_notify() - Deregister IPA notification function - * @rproc: Remote processor handle - * - * This is defined in "qcom_q6v5_mss.c". - */ -void qcom_deregister_ipa_notify(struct rproc *rproc); - -#else /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - -struct qcom_rproc_ipa_notify { /* empty */ }; - -#define qcom_add_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ -#define qcom_remove_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ - -#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - -#endif /* !__QCOM_Q6V5_IPA_NOTIFY_H__ */ -- cgit v1.2.3 From b9aaec8f0be518096d1377082e0abe6a85e86ff3 Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Sun, 26 Jul 2020 15:48:16 -0700 Subject: fib: use indirect call wrappers in the most common fib_rules_ops This avoids another inderect call per RX packet which save us around 20-40 ns. Changelog: v1 -> v2: - Move declaraions to fib_rules.h to remove warnings Reported-by: kernel test robot Signed-off-by: Brian Vazquez Signed-off-by: David S. Miller --- include/net/fib_rules.h | 18 ++++++++++++++++++ net/core/fib_rules.c | 18 ++++++++++++++---- net/ipv4/fib_rules.c | 12 ++++++++---- net/ipv6/fib6_rules.c | 12 ++++++++---- 4 files changed, 48 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index a259050f84af..4b10676c69d1 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -10,6 +10,7 @@ #include #include #include +#include struct fib_kuid_range { kuid_t start; @@ -203,4 +204,21 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack); int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack); + +INDIRECT_CALLABLE_DECLARE(int fib6_rule_match(struct fib_rule *rule, + struct flowi *fl, int flags)); +INDIRECT_CALLABLE_DECLARE(int fib4_rule_match(struct fib_rule *rule, + struct flowi *fl, int flags)); + +INDIRECT_CALLABLE_DECLARE(int fib6_rule_action(struct fib_rule *rule, + struct flowi *flp, int flags, + struct fib_lookup_arg *arg)); +INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule, + struct flowi *flp, int flags, + struct fib_lookup_arg *arg)); + +INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule, + struct fib_lookup_arg *arg)); +INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule, + struct fib_lookup_arg *arg)); #endif diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index bd7eba9066f8..e7a8f87b0bb2 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -14,6 +14,7 @@ #include #include #include +#include static const struct fib_kuid_range fib_kuid_range_unset = { KUIDT_INIT(0), @@ -267,7 +268,10 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, uid_gt(fl->flowi_uid, rule->uid_range.end)) goto out; - ret = ops->match(rule, fl, flags); + ret = INDIRECT_CALL_INET(ops->match, + fib6_rule_match, + fib4_rule_match, + rule, fl, flags); out: return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; } @@ -298,9 +302,15 @@ jumped: } else if (rule->action == FR_ACT_NOP) continue; else - err = ops->action(rule, fl, flags, arg); - - if (!err && ops->suppress && ops->suppress(rule, arg)) + err = INDIRECT_CALL_INET(ops->action, + fib6_rule_action, + fib4_rule_action, + rule, fl, flags, arg); + + if (!err && ops->suppress && INDIRECT_CALL_INET(ops->suppress, + fib6_rule_suppress, + fib4_rule_suppress, + rule, arg)) continue; if (err != -EAGAIN) { diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index f99e3bac5cab..ce54a30c2ef1 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -29,6 +29,7 @@ #include #include #include +#include struct fib4_rule { struct fib_rule common; @@ -103,8 +104,9 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, } EXPORT_SYMBOL_GPL(__fib_lookup); -static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, - int flags, struct fib_lookup_arg *arg) +INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule, + struct flowi *flp, int flags, + struct fib_lookup_arg *arg) { int err = -EAGAIN; struct fib_table *tbl; @@ -138,7 +140,8 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, return err; } -static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) +INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule, + struct fib_lookup_arg *arg) { struct fib_result *result = (struct fib_result *) arg->result; struct net_device *dev = NULL; @@ -169,7 +172,8 @@ suppress_route: return true; } -static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) +INDIRECT_CALLABLE_SCOPE int fib4_rule_match(struct fib_rule *rule, + struct flowi *fl, int flags) { struct fib4_rule *r = (struct fib4_rule *) rule; struct flowi4 *fl4 = &fl->u.ip4; diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 6053ef851555..8f9a83314de7 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -255,8 +256,9 @@ out: return err; } -static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, - int flags, struct fib_lookup_arg *arg) +INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule, + struct flowi *flp, int flags, + struct fib_lookup_arg *arg) { if (arg->lookup_ptr == fib6_table_lookup) return fib6_rule_action_alt(rule, flp, flags, arg); @@ -264,7 +266,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, return __fib6_rule_action(rule, flp, flags, arg); } -static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) +INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule, + struct fib_lookup_arg *arg) { struct fib6_result *res = arg->result; struct rt6_info *rt = res->rt6; @@ -296,7 +299,8 @@ suppress_route: return true; } -static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) +INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule, + struct flowi *fl, int flags) { struct fib6_rule *r = (struct fib6_rule *) rule; struct flowi6 *fl6 = &fl->u.ip6; -- cgit v1.2.3 From 4299f85a67480cdb43a3c4c1840a05259727e83c Mon Sep 17 00:00:00 2001 From: Lars Povlsen Date: Mon, 27 Jul 2020 10:42:08 +0200 Subject: dt-bindings: clock: sparx5: Add bindings include file The Sparx5 support 9 different clock outputs. This include file has defines for each supported clock ordinal. Signed-off-by: Lars Povlsen Reviewed-by: Stephen Boyd Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20200727084211.6632-8-lars.povlsen@microchip.com Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/microchip,sparx5.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 include/dt-bindings/clock/microchip,sparx5.h (limited to 'include') diff --git a/include/dt-bindings/clock/microchip,sparx5.h b/include/dt-bindings/clock/microchip,sparx5.h new file mode 100644 index 000000000000..4b04dabacec2 --- /dev/null +++ b/include/dt-bindings/clock/microchip,sparx5.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019 Microchip Inc. + * + * Author: Lars Povlsen + */ + +#ifndef _DT_BINDINGS_CLK_SPARX5_H +#define _DT_BINDINGS_CLK_SPARX5_H + +#define CLK_ID_CORE 0 +#define CLK_ID_DDR 1 +#define CLK_ID_CPU2 2 +#define CLK_ID_ARM2 3 +#define CLK_ID_AUX1 4 +#define CLK_ID_AUX2 5 +#define CLK_ID_AUX3 6 +#define CLK_ID_AUX4 7 +#define CLK_ID_SYNCE 8 + +#define N_CLOCKS 9 + +#endif -- cgit v1.2.3 From 48001ea50d17f3eb06a552e9ecf21f7fc01b25da Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:08:18 -0700 Subject: PM, libnvdimm: Add runtime firmware activation support Abstract platform specific mechanics for nvdimm firmware activation behind a handful of generic ops. At the bus level ->activate_state() indicates the unified state (idle, busy, armed) of all DIMMs on the bus, and ->capability() indicates the system state expectations for activate. At the DIMM level ->activate_state() indicates the per-DIMM state, ->activate_result() indicates the outcome of the last activation attempt, and ->arm() attempts to transition the DIMM from 'idle' to 'armed'. A new hibernate_quiet_exec() facility is added to support firmware activation in an OS defined system quiesce state. It leverages the fact that the hibernate-freeze state wants to assert that a memory hibernation snapshot can be taken. This is in contrast to a platform firmware defined quiesce state that may forcefully quiet the memory controller independent of whether an individual device-driver properly supports hibernate-freeze. The libnvdimm sysfs interface is extended to support detection of a firmware activate capability. The mechanism supports enumeration and triggering of firmware activate, optionally in the hibernate_quiet_exec() context. [rafael: hibernate_quiet_exec() proposal] [vishal: fix up sparse warning, grammar in Documentation/] Cc: Pavel Machek Cc: Ira Weiny Cc: Len Brown Cc: Jonathan Corbet Cc: Dave Jiang Cc: Vishal Verma Reported-by: kernel test robot Co-developed-by: "Rafael J. Wysocki" Signed-off-by: "Rafael J. Wysocki" Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- Documentation/ABI/testing/sysfs-bus-nvdimm | 2 + .../driver-api/nvdimm/firmware-activate.rst | 86 ++++++++++++ drivers/nvdimm/core.c | 149 +++++++++++++++++++++ drivers/nvdimm/dimm_devs.c | 115 ++++++++++++++++ drivers/nvdimm/nd-core.h | 1 + include/linux/libnvdimm.h | 44 ++++++ include/linux/suspend.h | 6 + kernel/power/hibernate.c | 97 ++++++++++++++ 8 files changed, 500 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-bus-nvdimm create mode 100644 Documentation/driver-api/nvdimm/firmware-activate.rst (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-bus-nvdimm b/Documentation/ABI/testing/sysfs-bus-nvdimm new file mode 100644 index 000000000000..d64380262be8 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-nvdimm @@ -0,0 +1,2 @@ +The libnvdimm sub-system implements a common sysfs interface for +platform nvdimm resources. See Documentation/driver-api/nvdimm/. diff --git a/Documentation/driver-api/nvdimm/firmware-activate.rst b/Documentation/driver-api/nvdimm/firmware-activate.rst new file mode 100644 index 000000000000..7ee7decbbdc3 --- /dev/null +++ b/Documentation/driver-api/nvdimm/firmware-activate.rst @@ -0,0 +1,86 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================== +NVDIMM Runtime Firmware Activation +================================== + +Some persistent memory devices run a firmware locally on the device / +"DIMM" to perform tasks like media management, capacity provisioning, +and health monitoring. The process of updating that firmware typically +involves a reboot because it has implications for in-flight memory +transactions. However, reboots are disruptive and at least the Intel +persistent memory platform implementation, described by the Intel ACPI +DSM specification [1], has added support for activating firmware at +runtime. + +A native sysfs interface is implemented in libnvdimm to allow platform +to advertise and control their local runtime firmware activation +capability. + +The libnvdimm bus object, ndbusX, implements an ndbusX/firmware/activate +attribute that shows the state of the firmware activation as one of 'idle', +'armed', 'overflow', and 'busy'. + +- idle: + No devices are set / armed to activate firmware + +- armed: + At least one device is armed + +- busy: + In the busy state armed devices are in the process of transitioning + back to idle and completing an activation cycle. + +- overflow: + If the platform has a concept of incremental work needed to perform + the activation it could be the case that too many DIMMs are armed for + activation. In that scenario the potential for firmware activation to + timeout is indicated by the 'overflow' state. + +The 'ndbusX/firmware/activate' property can be written with a value of +either 'live', or 'quiesce'. A value of 'quiesce' triggers the kernel to +run firmware activation from within the equivalent of the hibernation +'freeze' state where drivers and applications are notified to stop their +modifications of system memory. A value of 'live' attempts +firmware activation without this hibernation cycle. The +'ndbusX/firmware/activate' property will be elided completely if no +firmware activation capability is detected. + +Another property 'ndbusX/firmware/capability' indicates a value of +'live' or 'quiesce', where 'live' indicates that the firmware +does not require or inflict any quiesce period on the system to update +firmware. A capability value of 'quiesce' indicates that firmware does +expect and injects a quiet period for the memory controller, but 'live' +may still be written to 'ndbusX/firmware/activate' as an override to +assume the risk of racing firmware update with in-flight device and +application activity. The 'ndbusX/firmware/capability' property will be +elided completely if no firmware activation capability is detected. + +The libnvdimm memory-device / DIMM object, nmemX, implements +'nmemX/firmware/activate' and 'nmemX/firmware/result' attributes to +communicate the per-device firmware activation state. Similar to the +'ndbusX/firmware/activate' attribute, the 'nmemX/firmware/activate' +attribute indicates 'idle', 'armed', or 'busy'. The state transitions +from 'armed' to 'idle' when the system is prepared to activate firmware, +firmware staged + state set to armed, and 'ndbusX/firmware/activate' is +triggered. After that activation event the nmemX/firmware/result +attribute reflects the state of the last activation as one of: + +- none: + No runtime activation triggered since the last time the device was reset + +- success: + The last runtime activation completed successfully. + +- fail: + The last runtime activation failed for device-specific reasons. + +- not_staged: + The last runtime activation failed due to a sequencing error of the + firmware image not being staged. + +- need_reset: + Runtime firmware activation failed, but the firmware can still be + activated via the legacy method of power-cycling the system. + +[1]: https://docs.pmem.io/persistent-memory/ diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index fe9bd6febdd2..c21ba0602029 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -4,6 +4,7 @@ */ #include #include +#include #include #include #include @@ -389,8 +390,156 @@ static const struct attribute_group nvdimm_bus_attribute_group = { .attrs = nvdimm_bus_attributes, }; +static ssize_t capability_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_capability cap; + + if (!nd_desc->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + nvdimm_bus_unlock(dev); + + switch (cap) { + case NVDIMM_FWA_CAP_QUIESCE: + return sprintf(buf, "quiesce\n"); + case NVDIMM_FWA_CAP_LIVE: + return sprintf(buf, "live\n"); + default: + return -EOPNOTSUPP; + } +} + +static DEVICE_ATTR_RO(capability); + +static ssize_t activate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_capability cap; + enum nvdimm_fwa_state state; + + if (!nd_desc->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + state = nd_desc->fw_ops->activate_state(nd_desc); + nvdimm_bus_unlock(dev); + + if (cap < NVDIMM_FWA_CAP_QUIESCE) + return -EOPNOTSUPP; + + switch (state) { + case NVDIMM_FWA_IDLE: + return sprintf(buf, "idle\n"); + case NVDIMM_FWA_BUSY: + return sprintf(buf, "busy\n"); + case NVDIMM_FWA_ARMED: + return sprintf(buf, "armed\n"); + case NVDIMM_FWA_ARM_OVERFLOW: + return sprintf(buf, "overflow\n"); + default: + return -ENXIO; + } +} + +static int exec_firmware_activate(void *data) +{ + struct nvdimm_bus_descriptor *nd_desc = data; + + return nd_desc->fw_ops->activate(nd_desc); +} + +static ssize_t activate_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_state state; + bool quiesce; + ssize_t rc; + + if (!nd_desc->fw_ops) + return -EOPNOTSUPP; + + if (sysfs_streq(buf, "live")) + quiesce = false; + else if (sysfs_streq(buf, "quiesce")) + quiesce = true; + else + return -EINVAL; + + nvdimm_bus_lock(dev); + state = nd_desc->fw_ops->activate_state(nd_desc); + + switch (state) { + case NVDIMM_FWA_BUSY: + rc = -EBUSY; + break; + case NVDIMM_FWA_ARMED: + case NVDIMM_FWA_ARM_OVERFLOW: + if (quiesce) + rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc); + else + rc = nd_desc->fw_ops->activate(nd_desc); + break; + case NVDIMM_FWA_IDLE: + default: + rc = -ENXIO; + } + nvdimm_bus_unlock(dev); + + if (rc == 0) + rc = len; + return rc; +} + +static DEVICE_ATTR_ADMIN_RW(activate); + +static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, typeof(*dev), kobj); + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_capability cap; + + /* + * Both 'activate' and 'capability' disappear when no ops + * detected, or a negative capability is indicated. + */ + if (!nd_desc->fw_ops) + return 0; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + nvdimm_bus_unlock(dev); + + if (cap < NVDIMM_FWA_CAP_QUIESCE) + return 0; + + return a->mode; +} +static struct attribute *nvdimm_bus_firmware_attributes[] = { + &dev_attr_activate.attr, + &dev_attr_capability.attr, + NULL, +}; + +static const struct attribute_group nvdimm_bus_firmware_attribute_group = { + .name = "firmware", + .attrs = nvdimm_bus_firmware_attributes, + .is_visible = nvdimm_bus_firmware_visible, +}; + const struct attribute_group *nvdimm_bus_attribute_groups[] = { &nvdimm_bus_attribute_group, + &nvdimm_bus_firmware_attribute_group, NULL, }; diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index b7b77e8d9027..85b53a7f44f2 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -446,9 +446,124 @@ static const struct attribute_group nvdimm_attribute_group = { .is_visible = nvdimm_visible, }; +static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_result result; + + if (!nvdimm->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + result = nvdimm->fw_ops->activate_result(nvdimm); + nvdimm_bus_unlock(dev); + + switch (result) { + case NVDIMM_FWA_RESULT_NONE: + return sprintf(buf, "none\n"); + case NVDIMM_FWA_RESULT_SUCCESS: + return sprintf(buf, "success\n"); + case NVDIMM_FWA_RESULT_FAIL: + return sprintf(buf, "fail\n"); + case NVDIMM_FWA_RESULT_NOTSTAGED: + return sprintf(buf, "not_staged\n"); + case NVDIMM_FWA_RESULT_NEEDRESET: + return sprintf(buf, "need_reset\n"); + default: + return -ENXIO; + } +} +static DEVICE_ATTR_ADMIN_RO(result); + +static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_state state; + + if (!nvdimm->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + state = nvdimm->fw_ops->activate_state(nvdimm); + nvdimm_bus_unlock(dev); + + switch (state) { + case NVDIMM_FWA_IDLE: + return sprintf(buf, "idle\n"); + case NVDIMM_FWA_BUSY: + return sprintf(buf, "busy\n"); + case NVDIMM_FWA_ARMED: + return sprintf(buf, "armed\n"); + default: + return -ENXIO; + } +} + +static ssize_t activate_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_trigger arg; + int rc; + + if (!nvdimm->fw_ops) + return -EOPNOTSUPP; + + if (sysfs_streq(buf, "arm")) + arg = NVDIMM_FWA_ARM; + else if (sysfs_streq(buf, "disarm")) + arg = NVDIMM_FWA_DISARM; + else + return -EINVAL; + + nvdimm_bus_lock(dev); + rc = nvdimm->fw_ops->arm(nvdimm, arg); + nvdimm_bus_unlock(dev); + + if (rc < 0) + return rc; + return len; +} +static DEVICE_ATTR_ADMIN_RW(activate); + +static struct attribute *nvdimm_firmware_attributes[] = { + &dev_attr_activate.attr, + &dev_attr_result.attr, +}; + +static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, typeof(*dev), kobj); + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_capability cap; + + if (!nd_desc->fw_ops) + return 0; + if (!nvdimm->fw_ops) + return 0; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + nvdimm_bus_unlock(dev); + + if (cap < NVDIMM_FWA_CAP_QUIESCE) + return 0; + + return a->mode; +} + +static const struct attribute_group nvdimm_firmware_attribute_group = { + .name = "firmware", + .attrs = nvdimm_firmware_attributes, + .is_visible = nvdimm_firmware_visible, +}; + static const struct attribute_group *nvdimm_attribute_groups[] = { &nd_device_attribute_group, &nvdimm_attribute_group, + &nvdimm_firmware_attribute_group, NULL, }; diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index ddb9d97d9129..564faa36a3ca 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -45,6 +45,7 @@ struct nvdimm { struct kernfs_node *overwrite_state; } sec; struct delayed_work dwork; + const struct nvdimm_fw_ops *fw_ops; }; static inline unsigned long nvdimm_security_flags( diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index ad9898ece7d3..15dbcb718316 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -86,6 +86,7 @@ struct nvdimm_bus_descriptor { int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *data); + const struct nvdimm_bus_fw_ops *fw_ops; }; struct nd_cmd_desc { @@ -200,6 +201,49 @@ struct nvdimm_security_ops { int (*query_overwrite)(struct nvdimm *nvdimm); }; +enum nvdimm_fwa_state { + NVDIMM_FWA_INVALID, + NVDIMM_FWA_IDLE, + NVDIMM_FWA_ARMED, + NVDIMM_FWA_BUSY, + NVDIMM_FWA_ARM_OVERFLOW, +}; + +enum nvdimm_fwa_trigger { + NVDIMM_FWA_ARM, + NVDIMM_FWA_DISARM, +}; + +enum nvdimm_fwa_capability { + NVDIMM_FWA_CAP_INVALID, + NVDIMM_FWA_CAP_NONE, + NVDIMM_FWA_CAP_QUIESCE, + NVDIMM_FWA_CAP_LIVE, +}; + +enum nvdimm_fwa_result { + NVDIMM_FWA_RESULT_INVALID, + NVDIMM_FWA_RESULT_NONE, + NVDIMM_FWA_RESULT_SUCCESS, + NVDIMM_FWA_RESULT_NOTSTAGED, + NVDIMM_FWA_RESULT_NEEDRESET, + NVDIMM_FWA_RESULT_FAIL, +}; + +struct nvdimm_bus_fw_ops { + enum nvdimm_fwa_state (*activate_state) + (struct nvdimm_bus_descriptor *nd_desc); + enum nvdimm_fwa_capability (*capability) + (struct nvdimm_bus_descriptor *nd_desc); + int (*activate)(struct nvdimm_bus_descriptor *nd_desc); +}; + +struct nvdimm_fw_ops { + enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm); + enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm); + int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg); +}; + void badrange_init(struct badrange *badrange); int badrange_add(struct badrange *badrange, u64 addr, u64 length); void badrange_forget(struct badrange *badrange, phys_addr_t start, diff --git a/include/linux/suspend.h b/include/linux/suspend.h index b960098acfb0..cb9afad82a90 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -453,6 +453,8 @@ extern bool hibernation_available(void); asmlinkage int swsusp_save(void); extern struct pbe *restore_pblist; int pfn_is_nosave(unsigned long pfn); + +int hibernate_quiet_exec(int (*func)(void *data), void *data); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} @@ -464,6 +466,10 @@ static inline void hibernation_set_ops(const struct platform_hibernation_ops *op static inline int hibernate(void) { return -ENOSYS; } static inline bool system_entering_hibernation(void) { return false; } static inline bool hibernation_available(void) { return false; } + +static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) { + return -ENOTSUPP; +} #endif /* CONFIG_HIBERNATION */ #ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 02ec716a4927..e6fab3f09c98 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -795,6 +795,103 @@ int hibernate(void) return error; } +/** + * hibernate_quiet_exec - Execute a function with all devices frozen. + * @func: Function to execute. + * @data: Data pointer to pass to @func. + * + * Return the @func return value or an error code if it cannot be executed. + */ +int hibernate_quiet_exec(int (*func)(void *data), void *data) +{ + int error, nr_calls = 0; + + lock_system_sleep(); + + if (!hibernate_acquire()) { + error = -EBUSY; + goto unlock; + } + + pm_prepare_console(); + + error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); + if (error) { + nr_calls--; + goto exit; + } + + error = freeze_processes(); + if (error) + goto exit; + + lock_device_hotplug(); + + pm_suspend_clear_flags(); + + error = platform_begin(true); + if (error) + goto thaw; + + error = freeze_kernel_threads(); + if (error) + goto thaw; + + error = dpm_prepare(PMSG_FREEZE); + if (error) + goto dpm_complete; + + suspend_console(); + + error = dpm_suspend(PMSG_FREEZE); + if (error) + goto dpm_resume; + + error = dpm_suspend_end(PMSG_FREEZE); + if (error) + goto dpm_resume; + + error = platform_pre_snapshot(true); + if (error) + goto skip; + + error = func(data); + +skip: + platform_finish(true); + + dpm_resume_start(PMSG_THAW); + +dpm_resume: + dpm_resume(PMSG_THAW); + + resume_console(); + +dpm_complete: + dpm_complete(PMSG_THAW); + + thaw_kernel_threads(); + +thaw: + platform_end(true); + + unlock_device_hotplug(); + + thaw_processes(); + +exit: + __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL); + + pm_restore_console(); + + hibernate_release(); + +unlock: + unlock_system_sleep(); + + return error; +} +EXPORT_SYMBOL_GPL(hibernate_quiet_exec); /** * software_resume - Resume from a saved hibernation image. -- cgit v1.2.3 From a1facc1fffc17a65e2c12a8de7434b9325ec0324 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:08:24 -0700 Subject: ACPI: NFIT: Add runtime firmware activate support Plumb the platform specific backend for the generic libnvdimm firmware activate interface. Register dimm level operations to arm/disarm activation, and register bus level operations to report the dynamic platform-quiesce time relative to the number of dimms armed for firmware activation. A new nfit-specific bus attribute "firmware_activate_noidle" is added to allow the activation to switch between platform enforced, and OS opportunistic device quiesce. In other words, let the hibernate cycle handle in-flight device-dma rather than the platform attempting to increase PCI-E timeouts and the like. Cc: Dave Jiang Cc: Ira Weiny Cc: Vishal Verma Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- Documentation/ABI/testing/sysfs-bus-nfit | 19 ++ drivers/acpi/nfit/core.c | 41 +++- drivers/acpi/nfit/intel.c | 386 +++++++++++++++++++++++++++++++ drivers/acpi/nfit/intel.h | 3 + drivers/acpi/nfit/nfit.h | 10 + drivers/nvdimm/dimm_devs.c | 4 +- include/linux/libnvdimm.h | 5 +- 7 files changed, 461 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-bus-nfit b/Documentation/ABI/testing/sysfs-bus-nfit index a1cb44dcb908..e4f76e7eab93 100644 --- a/Documentation/ABI/testing/sysfs-bus-nfit +++ b/Documentation/ABI/testing/sysfs-bus-nfit @@ -202,6 +202,25 @@ Description: functions. See the section named 'NVDIMM Root Device _DSMs' in the ACPI specification. +What: /sys/bus/nd/devices/ndbusX/nfit/firmware_activate_noidle +Date: Apr, 2020 +KernelVersion: v5.8 +Contact: linux-nvdimm@lists.01.org +Description: + (RW) The Intel platform implementation of firmware activate + support exposes an option let the platform force idle devices in + the system over the activation event, or trust that the OS will + do it. The safe default is to let the platform force idle + devices since the kernel is already in a suspend state, and on + the chance that a driver does not properly quiesce bus-mastering + after a suspend callback the platform will handle it. However, + the activation might abort if, for example, platform firmware + determines that the activation time exceeds the max PCI-E + completion timeout. Since the platform does not know whether the + OS is running the activation from a suspend context it aborts, + but if the system owner trusts driver suspend callback to be + sufficient then 'firmware_activation_noidle' can be + enabled to bypass the activation abort. What: /sys/bus/nd/devices/regionX/nfit/range_index Date: Jun, 2015 diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 78cc9e2d2aa3..fb775b967c52 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1392,8 +1392,12 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) struct device *dev = container_of(kobj, struct device, kobj); struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); - if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) - return 0; + if (a == &dev_attr_scrub.attr) + return ars_supported(nvdimm_bus) ? a->mode : 0; + + if (a == &dev_attr_firmware_activate_noidle.attr) + return intel_fwa_supported(nvdimm_bus) ? a->mode : 0; + return a->mode; } @@ -1402,6 +1406,7 @@ static struct attribute *acpi_nfit_attributes[] = { &dev_attr_scrub.attr, &dev_attr_hw_error_scrub.attr, &dev_attr_bus_dsm_mask.attr, + &dev_attr_firmware_activate_noidle.attr, NULL, }; @@ -2019,6 +2024,26 @@ static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family) } } +static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops( + struct nfit_mem *nfit_mem) +{ + unsigned long mask; + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + + if (!nd_desc->fw_ops) + return NULL; + + if (nfit_mem->family != NVDIMM_FAMILY_INTEL) + return NULL; + + mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK; + if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) + return NULL; + + return intel_fw_ops; +} + static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) { struct nfit_mem *nfit_mem; @@ -2095,7 +2120,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) acpi_nfit_dimm_attribute_groups, flags, cmd_mask, flush ? flush->hint_count : 0, nfit_mem->flush_wpq, &nfit_mem->id[0], - acpi_nfit_get_security_ops(nfit_mem->family)); + acpi_nfit_get_security_ops(nfit_mem->family), + acpi_nfit_get_fw_ops(nfit_mem)); if (!nvdimm) return -ENOMEM; @@ -2170,8 +2196,10 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) if (acpi_desc->bus_cmd_force_en) { nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; mask = &nd_desc->bus_family_mask; - if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) + if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) { set_bit(NVDIMM_BUS_FAMILY_INTEL, mask); + nd_desc->fw_ops = intel_bus_fw_ops; + } } adev = to_acpi_dev(acpi_desc); @@ -2202,6 +2230,11 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, mask); + + if (*mask == dsm_mask) { + set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask); + nd_desc->fw_ops = intel_bus_fw_ops; + } } static ssize_t range_index_show(struct device *dev, diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 1113b679cd7b..8dd792a55730 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -7,6 +7,48 @@ #include "intel.h" #include "nfit.h" +static ssize_t firmware_activate_noidle_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N"); +} + +static ssize_t firmware_activate_noidle_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + ssize_t rc; + bool val; + + rc = kstrtobool(buf, &val); + if (rc) + return rc; + if (val != acpi_desc->fwa_noidle) + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID; + acpi_desc->fwa_noidle = val; + return size; +} +DEVICE_ATTR_RW(firmware_activate_noidle); + +bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + unsigned long *mask; + + if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask)) + return false; + + mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; + return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; +} + static unsigned long intel_security_flags(struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) { @@ -389,3 +431,347 @@ static const struct nvdimm_security_ops __intel_security_ops = { }; const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops; + +static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc, + struct nd_intel_bus_fw_activate_businfo *info) +{ + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_bus_fw_activate_businfo cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, + .nd_family = NVDIMM_BUS_FAMILY_INTEL, + .nd_size_out = + sizeof(struct nd_intel_bus_fw_activate_businfo), + .nd_fw_size = + sizeof(struct nd_intel_bus_fw_activate_businfo), + }, + }; + int rc; + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), + NULL); + *info = nd_cmd.cmd; + return rc; +} + +/* The fw_ops expect to be called with the nvdimm_bus_lock() held */ +static enum nvdimm_fwa_state intel_bus_fwa_state( + struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + struct nd_intel_bus_fw_activate_businfo info; + struct device *dev = acpi_desc->dev; + enum nvdimm_fwa_state state; + int rc; + + /* + * It should not be possible for platform firmware to return + * busy because activate is a synchronous operation. Treat it + * similar to invalid, i.e. always refresh / poll the status. + */ + switch (acpi_desc->fwa_state) { + case NVDIMM_FWA_INVALID: + case NVDIMM_FWA_BUSY: + break; + default: + /* check if capability needs to be refreshed */ + if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) + break; + return acpi_desc->fwa_state; + } + + /* Refresh with platform firmware */ + rc = intel_bus_fwa_businfo(nd_desc, &info); + if (rc) + return NVDIMM_FWA_INVALID; + + switch (info.state) { + case ND_INTEL_FWA_IDLE: + state = NVDIMM_FWA_IDLE; + break; + case ND_INTEL_FWA_BUSY: + state = NVDIMM_FWA_BUSY; + break; + case ND_INTEL_FWA_ARMED: + if (info.activate_tmo > info.max_quiesce_tmo) + state = NVDIMM_FWA_ARM_OVERFLOW; + else + state = NVDIMM_FWA_ARMED; + break; + default: + dev_err_once(dev, "invalid firmware activate state %d\n", + info.state); + return NVDIMM_FWA_INVALID; + } + + /* + * Capability data is available in the same payload as state. It + * is expected to be static. + */ + if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) { + if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE) + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE; + else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) { + /* + * Skip hibernate cycle by default if platform + * indicates that it does not need devices to be + * quiesced. + */ + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE; + } else + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE; + } + + acpi_desc->fwa_state = state; + + return state; +} + +static enum nvdimm_fwa_capability intel_bus_fwa_capability( + struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID) + return acpi_desc->fwa_cap; + + if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID) + return acpi_desc->fwa_cap; + + return NVDIMM_FWA_CAP_INVALID; +} + +static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_bus_fw_activate cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE, + .nd_family = NVDIMM_BUS_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.iodev_state), + .nd_size_out = + sizeof(struct nd_intel_bus_fw_activate), + .nd_fw_size = + sizeof(struct nd_intel_bus_fw_activate), + }, + /* + * Even though activate is run from a suspended context, + * for safety, still ask platform firmware to force + * quiesce devices by default. Let a module + * parameter override that policy. + */ + .cmd = { + .iodev_state = acpi_desc->fwa_noidle + ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE + : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE, + }, + }; + int rc; + + switch (intel_bus_fwa_state(nd_desc)) { + case NVDIMM_FWA_ARMED: + case NVDIMM_FWA_ARM_OVERFLOW: + break; + default: + return -ENXIO; + } + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), + NULL); + + /* + * Whether the command succeeded, or failed, the agent checking + * for the result needs to query the DIMMs individually. + * Increment the activation count to invalidate all the DIMM + * states at once (it's otherwise not possible to take + * acpi_desc->init_mutex in this context) + */ + acpi_desc->fwa_state = NVDIMM_FWA_INVALID; + acpi_desc->fwa_count++; + + dev_dbg(acpi_desc->dev, "result: %d\n", rc); + + return rc; +} + +static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = { + .activate_state = intel_bus_fwa_state, + .capability = intel_bus_fwa_capability, + .activate = intel_bus_fwa_activate, +}; + +const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops; + +static int intel_fwa_dimminfo(struct nvdimm *nvdimm, + struct nd_intel_fw_activate_dimminfo *info) +{ + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_fw_activate_dimminfo cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, + .nd_family = NVDIMM_FAMILY_INTEL, + .nd_size_out = + sizeof(struct nd_intel_fw_activate_dimminfo), + .nd_fw_size = + sizeof(struct nd_intel_fw_activate_dimminfo), + }, + }; + int rc; + + rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); + *info = nd_cmd.cmd; + return rc; +} + +static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct nd_intel_fw_activate_dimminfo info; + int rc; + + /* + * Similar to the bus state, since activate is synchronous the + * busy state should resolve within the context of 'activate'. + */ + switch (nfit_mem->fwa_state) { + case NVDIMM_FWA_INVALID: + case NVDIMM_FWA_BUSY: + break; + default: + /* If no activations occurred the old state is still valid */ + if (nfit_mem->fwa_count == acpi_desc->fwa_count) + return nfit_mem->fwa_state; + } + + rc = intel_fwa_dimminfo(nvdimm, &info); + if (rc) + return NVDIMM_FWA_INVALID; + + switch (info.state) { + case ND_INTEL_FWA_IDLE: + nfit_mem->fwa_state = NVDIMM_FWA_IDLE; + break; + case ND_INTEL_FWA_BUSY: + nfit_mem->fwa_state = NVDIMM_FWA_BUSY; + break; + case ND_INTEL_FWA_ARMED: + nfit_mem->fwa_state = NVDIMM_FWA_ARMED; + break; + default: + nfit_mem->fwa_state = NVDIMM_FWA_INVALID; + break; + } + + switch (info.result) { + case ND_INTEL_DIMM_FWA_NONE: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE; + break; + case ND_INTEL_DIMM_FWA_SUCCESS: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS; + break; + case ND_INTEL_DIMM_FWA_NOTSTAGED: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED; + break; + case ND_INTEL_DIMM_FWA_NEEDRESET: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET; + break; + case ND_INTEL_DIMM_FWA_MEDIAFAILED: + case ND_INTEL_DIMM_FWA_ABORT: + case ND_INTEL_DIMM_FWA_NOTSUPP: + case ND_INTEL_DIMM_FWA_ERROR: + default: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL; + break; + } + + nfit_mem->fwa_count = acpi_desc->fwa_count; + + return nfit_mem->fwa_state; +} + +static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + + if (nfit_mem->fwa_count == acpi_desc->fwa_count + && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID) + return nfit_mem->fwa_result; + + if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID) + return nfit_mem->fwa_result; + + return NVDIMM_FWA_RESULT_INVALID; +} + +static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_fw_activate_arm cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM, + .nd_family = NVDIMM_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.activate_arm), + .nd_size_out = + sizeof(struct nd_intel_fw_activate_arm), + .nd_fw_size = + sizeof(struct nd_intel_fw_activate_arm), + }, + .cmd = { + .activate_arm = arm == NVDIMM_FWA_ARM + ? ND_INTEL_DIMM_FWA_ARM + : ND_INTEL_DIMM_FWA_DISARM, + }, + }; + int rc; + + switch (intel_fwa_state(nvdimm)) { + case NVDIMM_FWA_INVALID: + return -ENXIO; + case NVDIMM_FWA_BUSY: + return -EBUSY; + case NVDIMM_FWA_IDLE: + if (arm == NVDIMM_FWA_DISARM) + return 0; + break; + case NVDIMM_FWA_ARMED: + if (arm == NVDIMM_FWA_ARM) + return 0; + break; + default: + return -ENXIO; + } + + /* + * Invalidate the bus-level state, now that we're committed to + * changing the 'arm' state. + */ + acpi_desc->fwa_state = NVDIMM_FWA_INVALID; + nfit_mem->fwa_state = NVDIMM_FWA_INVALID; + + rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); + + dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM + ? "arm" : "disarm", rc); + return rc; +} + +static const struct nvdimm_fw_ops __intel_fw_ops = { + .activate_state = intel_fwa_state, + .activate_result = intel_fwa_result, + .arm = intel_fwa_arm, +}; + +const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops; diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h index 49a598623024..b768234ccebc 100644 --- a/drivers/acpi/nfit/intel.h +++ b/drivers/acpi/nfit/intel.h @@ -169,4 +169,7 @@ struct nd_intel_bus_fw_activate { u8 iodev_state; u32 status; } __packed; + +extern const struct nvdimm_fw_ops *intel_fw_ops; +extern const struct nvdimm_bus_fw_ops *intel_bus_fw_ops; #endif diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 97c122628975..67b7807ed200 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -220,6 +220,9 @@ struct nfit_mem { struct list_head list; struct acpi_device *adev; struct acpi_nfit_desc *acpi_desc; + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_result fwa_result; + int fwa_count; char id[NFIT_DIMM_ID_LEN+1]; struct resource *flush_wpq; unsigned long dsm_mask; @@ -265,6 +268,11 @@ struct acpi_nfit_desc { unsigned int scrub_tmo; int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, void *iobuf, u64 len, int rw); + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_capability fwa_cap; + int fwa_count; + bool fwa_noidle; + bool fwa_nosuspend; }; enum scrub_mode { @@ -367,4 +375,6 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event); int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc); void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); +bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus); +extern struct device_attribute dev_attr_firmware_activate_noidle; #endif /* __NFIT_H__ */ diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 85b53a7f44f2..2f0815e15986 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -582,7 +582,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq, const char *dimm_id, - const struct nvdimm_security_ops *sec_ops) + const struct nvdimm_security_ops *sec_ops, + const struct nvdimm_fw_ops *fw_ops) { struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); struct device *dev; @@ -612,6 +613,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, dev->devt = MKDEV(nvdimm_major, nvdimm->id); dev->groups = groups; nvdimm->sec.ops = sec_ops; + nvdimm->fw_ops = fw_ops; nvdimm->sec.overwrite_tmo = 0; INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query); /* diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 15dbcb718316..01f251b6e36c 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -269,14 +269,15 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq, const char *dimm_id, - const struct nvdimm_security_ops *sec_ops); + const struct nvdimm_security_ops *sec_ops, + const struct nvdimm_fw_ops *fw_ops); static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq) { return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, - cmd_mask, num_flush, flush_wpq, NULL, NULL); + cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL); } const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); -- cgit v1.2.3 From 4e108d4f281609a4f6e413d736be7364671016c5 Mon Sep 17 00:00:00 2001 From: Hou Pu Date: Thu, 16 Jul 2020 06:02:11 -0400 Subject: scsi: target: iscsi: Fix login error when receiving iscsi_target_sk_data_ready() could be invoked indirectly by iscsi_target_do_login_rx() from the workqueue like this: iscsi_target_do_login_rx() iscsi_target_do_login() iscsi_target_do_tx_login_io() iscsit_put_login_tx() iscsi_login_tx_data() tx_data() sock_sendmsg_nosec() tcp_sendmsg() release_sock() sk_backlog_rcv() tcp_v4_do_rcv() tcp_data_ready() iscsi_target_sk_data_ready() At that time LOGIN_FLAGS_READ_ACTIVE is not cleared and iscsi_target_sk_data_ready will not read data from the socket. Some iscsi initiators (libiscsi) will wait forever for a reply. LOGIN_FLAGS_READ_ACTIVE should be cleared early just after doing the receive and before writing to the socket in iscsi_target_do_login_rx. Unfortunately, LOGIN_FLAGS_READ_ACTIVE is also used by sk_state_change to do login cleanup if a socket was closed at login time. It is supposed to be cleared after the login PDU is successfully processed and replied. Introduce another flag, LOGIN_FLAGS_WRITE_ACTIVE, to cover the transmit part. Link: https://lore.kernel.org/r/20200716100212.4237-2-houpu@bytedance.com Reviewed-by: Mike Christie Signed-off-by: Hou Pu Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/iscsi_target_nego.c | 34 ++++++++++++++++++++++++++++---- include/target/iscsi/iscsi_target_core.h | 9 +++++---- 2 files changed, 35 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 685d771b51d4..43154fdba991 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -625,13 +625,37 @@ static void iscsi_target_do_login_rx(struct work_struct *work) pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", conn, current->comm, current->pid); + /* + * LOGIN_FLAGS_READ_ACTIVE is cleared so that sk_data_ready + * could be triggered again after this. + * + * LOGIN_FLAGS_WRITE_ACTIVE is cleared after we successfully + * process a login PDU, so that sk_state_chage can do login + * cleanup as needed if the socket is closed. If a delayed work is + * ongoing (LOGIN_FLAGS_WRITE_ACTIVE or LOGIN_FLAGS_READ_ACTIVE), + * sk_state_change will leave the cleanup to the delayed work or + * it will schedule a delayed work to do cleanup. + */ + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + if (!test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags)) { + clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); + set_bit(LOGIN_FLAGS_WRITE_ACTIVE, &conn->login_flags); + } + write_unlock_bh(&sk->sk_callback_lock); + } + rc = iscsi_target_do_login(conn, login); if (rc < 0) { goto err; } else if (!rc) { - if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) + if (iscsi_target_sk_check_and_clear(conn, + LOGIN_FLAGS_WRITE_ACTIVE)) goto err; } else if (rc == 1) { + cancel_delayed_work(&conn->login_work); iscsi_target_nego_release(conn); iscsi_post_login_handler(np, conn, zero_tsih); iscsit_deaccess_np(np, tpg, tpg_np); @@ -640,6 +664,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) err: iscsi_target_restore_sock_callbacks(conn); + cancel_delayed_work(&conn->login_work); iscsi_target_login_drop(conn, login); iscsit_deaccess_np(np, tpg, tpg_np); } @@ -670,9 +695,10 @@ static void iscsi_target_sk_state_change(struct sock *sk) state = __iscsi_target_sk_check_close(sk); pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); - if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { - pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" - " conn: %p\n", conn); + if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags) || + test_bit(LOGIN_FLAGS_WRITE_ACTIVE, &conn->login_flags)) { + pr_debug("Got LOGIN_FLAGS_{READ|WRITE}_ACTIVE=1" + " sk_state_change conn: %p\n", conn); if (state) set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); write_unlock_bh(&sk->sk_callback_lock); diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 4fda324f4b35..1eccb2ac7d02 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -556,10 +556,11 @@ struct iscsi_conn { struct socket *sock; void (*orig_data_ready)(struct sock *); void (*orig_state_change)(struct sock *); -#define LOGIN_FLAGS_READ_ACTIVE 1 -#define LOGIN_FLAGS_CLOSED 2 -#define LOGIN_FLAGS_READY 4 -#define LOGIN_FLAGS_INITIAL_PDU 8 +#define LOGIN_FLAGS_READY 0 +#define LOGIN_FLAGS_INITIAL_PDU 1 +#define LOGIN_FLAGS_READ_ACTIVE 2 +#define LOGIN_FLAGS_WRITE_ACTIVE 3 +#define LOGIN_FLAGS_CLOSED 4 unsigned long login_flags; struct delayed_work login_work; struct iscsi_login *login; -- cgit v1.2.3 From 2e45a1a9c75d39f85df3f288e205fecb6f788e02 Mon Sep 17 00:00:00 2001 From: Bodo Stroesser Date: Sun, 26 Jul 2020 17:35:04 +0200 Subject: scsi: target: Add tmr_notify backend function Target core is modified to call an optional backend callback function if a TMR is received or commands are aborted implicitly after a PR command was received. The backend function takes as parameters the se_dev, the type of the TMR, and the list of aborted commands. If no commands were aborted, an empty list is supplied. Link: https://lore.kernel.org/r/20200726153510.13077-3-bstroesser@ts.fujitsu.com Reviewed-by: Mike Christie Signed-off-by: Bodo Stroesser Signed-off-by: Martin K. Petersen --- drivers/target/target_core_tmr.c | 16 +++++++++++++++- drivers/target/target_core_transport.c | 1 + include/target/target_core_backend.h | 2 ++ include/target/target_core_base.h | 1 + 4 files changed, 19 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 73c4155f3c1e..e4513ef09159 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -116,6 +116,7 @@ void core_tmr_abort_task( struct se_tmr_req *tmr, struct se_session *se_sess) { + LIST_HEAD(aborted_list); struct se_cmd *se_cmd, *next; unsigned long flags; bool rc; @@ -144,7 +145,7 @@ void core_tmr_abort_task( if (!rc) continue; - list_del_init(&se_cmd->state_list); + list_move_tail(&se_cmd->state_list, &aborted_list); se_cmd->state_active = false; spin_unlock_irqrestore(&dev->execute_task_lock, flags); @@ -157,6 +158,11 @@ void core_tmr_abort_task( WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < 0); + if (dev->transport->tmr_notify) + dev->transport->tmr_notify(dev, TMR_ABORT_TASK, + &aborted_list); + + list_del_init(&se_cmd->state_list); target_put_cmd_and_wait(se_cmd); printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" @@ -167,6 +173,9 @@ void core_tmr_abort_task( } spin_unlock_irqrestore(&dev->execute_task_lock, flags); + if (dev->transport->tmr_notify) + dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list); + printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n", tmr->ref_task_tag); tmr->response = TMR_TASK_DOES_NOT_EXIST; @@ -318,6 +327,11 @@ static void core_tmr_drain_state_list( } spin_unlock_irqrestore(&dev->execute_task_lock, flags); + if (dev->transport->tmr_notify) + dev->transport->tmr_notify(dev, preempt_and_abort_list ? + TMR_LUN_RESET_PRO : TMR_LUN_RESET, + &drain_task_list); + while (!list_empty(&drain_task_list)) { cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); list_del_init(&cmd->state_list); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index e6e1fa68de54..9fb0be0aa620 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2946,6 +2946,7 @@ static const char *target_tmf_name(enum tcm_tmreq_table tmf) case TMR_LUN_RESET: return "LUN_RESET"; case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; + case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; case TMR_UNKNOWN: break; } return "(?)"; diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index f51452e3b984..6336780d83a7 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -40,6 +40,8 @@ struct target_backend_ops { ssize_t (*show_configfs_dev_params)(struct se_device *, char *); sense_reason_t (*parse_cdb)(struct se_cmd *cmd); + void (*tmr_notify)(struct se_device *se_dev, enum tcm_tmreq_table, + struct list_head *aborted_cmds); u32 (*get_device_type)(struct se_device *); sector_t (*get_blocks)(struct se_device *); sector_t (*get_alignment_offset_lbas)(struct se_device *); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 18c3f277b770..549947d407cf 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -207,6 +207,7 @@ enum tcm_tmreq_table { TMR_LUN_RESET = 5, TMR_TARGET_WARM_RESET = 6, TMR_TARGET_COLD_RESET = 7, + TMR_LUN_RESET_PRO = 0x80, TMR_UNKNOWN = 0xff, }; -- cgit v1.2.3 From bc2d214af5dbcf1e53be9a23d95905a585657ff4 Mon Sep 17 00:00:00 2001 From: Bodo Stroesser Date: Sun, 26 Jul 2020 17:35:09 +0200 Subject: scsi: target: tcmu: Implement tmr_notify callback This patch implements the tmr_notify callback for tcmu. When the callback is called, tcmu checks the list of aborted commands it received as parameter: - aborted commands in the qfull_queue are removed from the queue and target_complete_command is called - from the cmd_ids of aborted commands currently uncompleted in cmd ring it creates a list of aborted cmd_ids. Finally a TMR notification is written to cmd ring containing TMR type and cmd_id list. If there is no space in ring, the TMR notification is queued on a TMR specific queue. The TMR specific queue 'tmr_queue' can be seen as a extension of the cmd ring. At the end of each iexecution of tcmu_complete_commands() we check whether tmr_queue contains TMRs and try to move them onto the ring. If tmr_queue is not empty after that, we don't call run_qfull_queue() because commands must not overtake TMRs. This way we guarantee that cmd_ids in TMR notification received by userspace either match an active, not yet completed command or are no longer valid due to userspace having complete some cmd_ids meanwhile. New commands that were assigned to an aborted cmd_id will always appear on the cmd ring _after_ the TMR. Link: https://lore.kernel.org/r/20200726153510.13077-8-bstroesser@ts.fujitsu.com Reviewed-by: Mike Christie Signed-off-by: Bodo Stroesser Signed-off-by: Martin K. Petersen --- drivers/target/target_core_user.c | 225 ++++++++++++++++++++++++++++++++-- include/uapi/linux/target_core_user.h | 25 ++++ 2 files changed, 241 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index bddd40f07929..cb5a561a46e8 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -137,6 +137,7 @@ struct tcmu_dev { struct mutex cmdr_lock; struct list_head qfull_queue; + struct list_head tmr_queue; uint32_t dbi_max; uint32_t dbi_thresh; @@ -183,6 +184,15 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 unsigned long flags; }; + +struct tcmu_tmr { + struct list_head queue_entry; + + uint8_t tmr_type; + uint32_t tmr_cmd_cnt; + int16_t tmr_cmd_ids[0]; +}; + /* * To avoid dead lock the mutex lock order should always be: * @@ -844,6 +854,9 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, return false; } + if (!data_needed) + return true; + /* try to check and get the data blocks as needed */ space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); if ((space * DATA_BLOCK_SIZE) < data_needed) { @@ -1106,6 +1119,60 @@ queue: return 1; } +/** + * queue_tmr_ring - queue tmr info to ring or internally + * @udev: related tcmu_dev + * @tmr: tcmu_tmr containing tmr info to queue + * + * Returns: + * 0 success + * 1 internally queued to wait for ring memory to free. + */ +static int +queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) +{ + struct tcmu_tmr_entry *entry; + int cmd_size; + int id_list_sz; + struct tcmu_mailbox *mb = udev->mb_addr; + uint32_t cmd_head; + + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) + goto out_free; + + id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt; + cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE); + + if (!list_empty(&udev->tmr_queue) || + !is_ring_space_avail(udev, NULL, cmd_size, 0)) { + list_add_tail(&tmr->queue_entry, &udev->tmr_queue); + pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n", + tmr, udev->name); + return 1; + } + + cmd_head = ring_insert_padding(udev, cmd_size); + + entry = (void *)mb + CMDR_OFF + cmd_head; + memset(entry, 0, cmd_size); + tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR); + tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size); + entry->tmr_type = tmr->tmr_type; + entry->cmd_cnt = tmr->tmr_cmd_cnt; + memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz); + tcmu_flush_dcache_range(entry, cmd_size); + + UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + uio_event_notify(&udev->uio_info); + +out_free: + kfree(tmr); + + return 0; +} + static sense_reason_t tcmu_queue_cmd(struct se_cmd *se_cmd) { @@ -1141,6 +1208,85 @@ static void tcmu_set_next_deadline(struct list_head *queue, del_timer(timer); } +static int +tcmu_tmr_type(enum tcm_tmreq_table tmf) +{ + switch (tmf) { + case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK; + case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET; + case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA; + case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET; + case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET; + case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET; + case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET; + case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO; + default: return TCMU_TMR_UNKNOWN; + } +} + +static void +tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, + struct list_head *cmd_list) +{ + int i = 0, cmd_cnt = 0; + bool unqueued = false; + uint16_t *cmd_ids = NULL; + struct tcmu_cmd *cmd; + struct se_cmd *se_cmd; + struct tcmu_tmr *tmr; + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + mutex_lock(&udev->cmdr_lock); + + /* First we check for aborted commands in qfull_queue */ + list_for_each_entry(se_cmd, cmd_list, state_list) { + i++; + if (!se_cmd->priv) + continue; + cmd = se_cmd->priv; + /* Commands on qfull queue have no id yet */ + if (cmd->cmd_id) { + cmd_cnt++; + continue; + } + pr_debug("Removing aborted command %p from queue on dev %s.\n", + cmd, udev->name); + + list_del_init(&cmd->queue_entry); + tcmu_free_cmd(cmd); + target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED); + unqueued = true; + } + if (unqueued) + tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); + + pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", + tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); + + tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL); + if (!tmr) + goto unlock; + + tmr->tmr_type = tcmu_tmr_type(tmf); + tmr->tmr_cmd_cnt = cmd_cnt; + + if (cmd_cnt != 0) { + cmd_cnt = 0; + list_for_each_entry(se_cmd, cmd_list, state_list) { + if (!se_cmd->priv) + continue; + cmd = se_cmd->priv; + if (cmd->cmd_id) + tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id; + } + } + + queue_tmr_ring(udev, tmr); + +unlock: + mutex_unlock(&udev->cmdr_lock); +} + static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) { struct se_cmd *se_cmd = cmd->se_cmd; @@ -1208,11 +1354,43 @@ out: tcmu_free_cmd(cmd); } +static int tcmu_run_tmr_queue(struct tcmu_dev *udev) +{ + struct tcmu_tmr *tmr, *tmp; + LIST_HEAD(tmrs); + + if (list_empty(&udev->tmr_queue)) + return 1; + + pr_debug("running %s's tmr queue\n", udev->name); + + list_splice_init(&udev->tmr_queue, &tmrs); + + list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) { + list_del_init(&tmr->queue_entry); + + pr_debug("removing tmr %p on dev %s from queue\n", + tmr, udev->name); + + if (queue_tmr_ring(udev, tmr)) { + pr_debug("ran out of space during tmr queue run\n"); + /* + * tmr was requeued, so just put all tmrs back in + * the queue + */ + list_splice_tail(&tmrs, &udev->tmr_queue); + return 0; + } + } + + return 1; +} + static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) { struct tcmu_mailbox *mb; struct tcmu_cmd *cmd; - int handled = 0; + bool free_space = false; if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { pr_err("ring broken, not handling completions\n"); @@ -1235,7 +1413,10 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? ring_left : sizeof(*entry)); - if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { + free_space = true; + + if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD || + tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) { UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(entry->hdr.len_op), udev->cmdr_size); @@ -1256,9 +1437,9 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(entry->hdr.len_op), udev->cmdr_size); - - handled++; } + if (free_space) + free_space = tcmu_run_tmr_queue(udev); if (atomic_read(&global_db_count) > tcmu_global_max_blocks && idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { @@ -1271,7 +1452,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) if (udev->cmd_time_out) tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); - return handled; + return free_space; } static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) @@ -1381,6 +1562,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); INIT_LIST_HEAD(&udev->qfull_queue); + INIT_LIST_HEAD(&udev->tmr_queue); INIT_LIST_HEAD(&udev->inflight_queue); idr_init(&udev->commands); @@ -1455,8 +1637,8 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); mutex_lock(&udev->cmdr_lock); - tcmu_handle_completions(udev); - run_qfull_queue(udev, false); + if (tcmu_handle_completions(udev)) + run_qfull_queue(udev, false); mutex_unlock(&udev->cmdr_lock); return 0; @@ -1609,6 +1791,16 @@ static void tcmu_blocks_release(struct radix_tree_root *blocks, } } +static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) +{ + struct tcmu_tmr *tmr, *tmp; + + list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { + list_del_init(&tmr->queue_entry); + kfree(tmr); + } +} + static void tcmu_dev_kref_release(struct kref *kref) { struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); @@ -1631,6 +1823,8 @@ static void tcmu_dev_kref_release(struct kref *kref) if (tcmu_check_and_free_pending_cmd(cmd) != 0) all_expired = false; } + /* There can be left over TMR cmds. Remove them. */ + tcmu_remove_all_queued_tmr(udev); if (!list_empty(&udev->qfull_queue)) all_expired = false; idr_destroy(&udev->commands); @@ -1885,7 +2079,9 @@ static int tcmu_configure_device(struct se_device *dev) /* Initialise the mailbox of the ring buffer */ mb = udev->mb_addr; mb->version = TCMU_MAILBOX_VERSION; - mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN; + mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | + TCMU_MAILBOX_FLAG_CAP_READ_LEN | + TCMU_MAILBOX_FLAG_CAP_TMR; mb->cmdr_off = CMDR_OFF; mb->cmdr_size = udev->cmdr_size; @@ -2055,6 +2251,15 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) del_timer(&udev->cmd_timer); + /* + * ring is empty and qfull queue never contains aborted commands. + * So TMRs in tmr queue do not contain relevant cmd_ids. + * After a ring reset userspace should do a fresh start, so + * even LUN RESET message is no longer relevant. + * Therefore remove all TMRs from qfull queue + */ + tcmu_remove_all_queued_tmr(udev); + run_qfull_queue(udev, false); mutex_unlock(&udev->cmdr_lock); @@ -2607,6 +2812,7 @@ static struct target_backend_ops tcmu_ops = { .destroy_device = tcmu_destroy_device, .free_device = tcmu_free_device, .parse_cdb = tcmu_parse_cdb, + .tmr_notify = tcmu_tmr_notify, .set_configfs_dev_params = tcmu_set_configfs_dev_params, .show_configfs_dev_params = tcmu_show_configfs_dev_params, .get_device_type = sbc_get_device_type, @@ -2633,7 +2839,8 @@ static void find_free_blocks(void) } /* Try to complete the finished commands first */ - tcmu_handle_completions(udev); + if (tcmu_handle_completions(udev)) + run_qfull_queue(udev, false); /* Skip the udevs in idle */ if (!udev->dbi_thresh) { diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index b7b57967d90f..95b1597f16ae 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -45,6 +45,7 @@ #define ALIGN_SIZE 64 /* Should be enough for most CPUs */ #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */ #define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */ +#define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */ struct tcmu_mailbox { __u16 version; @@ -62,6 +63,7 @@ struct tcmu_mailbox { enum tcmu_opcode { TCMU_OP_PAD = 0, TCMU_OP_CMD, + TCMU_OP_TMR, }; /* @@ -128,6 +130,29 @@ struct tcmu_cmd_entry { } __packed; +struct tcmu_tmr_entry { + struct tcmu_cmd_entry_hdr hdr; + +#define TCMU_TMR_UNKNOWN 0 +#define TCMU_TMR_ABORT_TASK 1 +#define TCMU_TMR_ABORT_TASK_SET 2 +#define TCMU_TMR_CLEAR_ACA 3 +#define TCMU_TMR_CLEAR_TASK_SET 4 +#define TCMU_TMR_LUN_RESET 5 +#define TCMU_TMR_TARGET_WARM_RESET 6 +#define TCMU_TMR_TARGET_COLD_RESET 7 +/* Pseudo reset due to received PR OUT */ +#define TCMU_TMR_LUN_RESET_PRO 128 + __u8 tmr_type; + + __u8 __pad1; + __u16 __pad2; + __u32 cmd_cnt; + __u64 __pad3; + __u64 __pad4; + __u16 cmd_ids[0]; +} __packed; + #define TCMU_OP_ALIGN_SIZE sizeof(__u64) enum tcmu_genl_cmd { -- cgit v1.2.3 From fe5e26a70cc544004b9aa9c3b96cb1d1cc132e09 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:30:15 -0700 Subject: nvme-fc: drop a duplicated word in a comment Drop the repeated word "a" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Christoph Hellwig --- include/linux/nvme-fc-driver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 41e7795a3ee4..2a38f2b477a5 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -672,7 +672,7 @@ enum { * Values set by the LLDD indicating completion status of the FCP operation. * Must be set prior to calling the done() callback. * @transferred_length: amount of DATA_OUT payload data received by a - * a WRITEDATA operation. If not a WRITEDATA operation, value must + * WRITEDATA operation. If not a WRITEDATA operation, value must * be set to 0. Should equal transfer_length on success. * @fcp_error: status of the FCP operation. Must be 0 on success; on failure * must be a NVME_SC_FC_xxxx value. -- cgit v1.2.3 From c1fef73f793b7fd9d2ffcb5ef85807ea55bf7adb Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Fri, 24 Jul 2020 11:25:17 -0600 Subject: nvmet: add passthru code to process commands Add passthru command handling capability for the NVMeOF target and export passthru APIs which are used to integrate passthru code with nvmet-core. The new file passthru.c handles passthru cmd parsing and execution. In the passthru mode, we create a block layer request from the nvmet request and map the data on to the block layer request. Admin commands and features are on an allow list as there are a number of each that don't make too much sense with passthrough. We use an allow list such that new commands can be considered before being blindly passed through. In both cases, vendor specific commands are always allowed. We also reject reservation IO commands as the underlying device cannot differentiate between multiple hosts behind a fabric. Based-on-a-patch-by: Chaitanya Kulkarni Signed-off-by: Logan Gunthorpe Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/target/Makefile | 1 + drivers/nvme/target/admin-cmd.c | 7 +- drivers/nvme/target/core.c | 3 + drivers/nvme/target/nvmet.h | 39 ++++ drivers/nvme/target/passthru.c | 458 ++++++++++++++++++++++++++++++++++++++++ include/linux/nvme.h | 4 + 6 files changed, 510 insertions(+), 2 deletions(-) create mode 100644 drivers/nvme/target/passthru.c (limited to 'include') diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile index 2b33836f3d3e..ebf91fc4c72e 100644 --- a/drivers/nvme/target/Makefile +++ b/drivers/nvme/target/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \ discovery.o io-cmd-file.o io-cmd-bdev.o +nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o nvme-loop-y += loop.o nvmet-rdma-y += rdma.o nvmet-fc-y += fc.o diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 55918fcef80b..e9fe91786bbb 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -749,7 +749,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) return 0; } -static void nvmet_execute_set_features(struct nvmet_req *req) +void nvmet_execute_set_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = req->sq->ctrl->subsys; u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); @@ -824,7 +824,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req) nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); } -static void nvmet_execute_get_features(struct nvmet_req *req) +void nvmet_execute_get_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = req->sq->ctrl->subsys; u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); @@ -940,6 +940,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (unlikely(ret)) return ret; + if (nvmet_req_passthru_ctrl(req)) + return nvmet_parse_passthru_admin_cmd(req); + switch (cmd->common.opcode) { case nvme_admin_get_log_page: req->execute = nvmet_execute_get_log_page; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 59621b816f6e..c5a1c82e699b 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -849,6 +849,9 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) if (unlikely(ret)) return ret; + if (nvmet_req_passthru_ctrl(req)) + return nvmet_parse_passthru_io_cmd(req); + req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); if (unlikely(!req->ns)) { req->error_loc = offsetof(struct nvme_common_command, nsid); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 49e14111446c..51719dc62083 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -242,6 +242,10 @@ struct nvmet_subsys { struct config_group allowed_hosts_group; struct nvmet_subsys_model __rcu *model; + +#ifdef CONFIG_NVME_TARGET_PASSTHRU + struct nvme_ctrl *passthru_ctrl; +#endif /* CONFIG_NVME_TARGET_PASSTHRU */ }; static inline struct nvmet_subsys *to_subsys(struct config_item *item) @@ -321,6 +325,11 @@ struct nvmet_req { struct bio_vec *bvec; struct work_struct work; } f; + struct { + struct request *rq; + struct work_struct work; + bool use_workqueue; + } p; }; int sg_cnt; int metadata_sg_cnt; @@ -400,6 +409,8 @@ void nvmet_req_complete(struct nvmet_req *req, u16 status); int nvmet_req_alloc_sgls(struct nvmet_req *req); void nvmet_req_free_sgls(struct nvmet_req *req); +void nvmet_execute_set_features(struct nvmet_req *req); +void nvmet_execute_get_features(struct nvmet_req *req); void nvmet_execute_keep_alive(struct nvmet_req *req); void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, @@ -532,6 +543,34 @@ static inline u32 nvmet_dsm_len(struct nvmet_req *req) sizeof(struct nvme_dsm_range); } +#ifdef CONFIG_NVME_TARGET_PASSTHRU +u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req); +u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req); +static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys) +{ + return subsys->passthru_ctrl; +} +#else /* CONFIG_NVME_TARGET_PASSTHRU */ +static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) +{ + return 0; +} +static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) +{ + return 0; +} +static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys) +{ + return NULL; +} +#endif /* CONFIG_NVME_TARGET_PASSTHRU */ + +static inline struct nvme_ctrl * +nvmet_req_passthru_ctrl(struct nvmet_req *req) +{ + return nvmet_passthru_ctrl(req->sq->ctrl->subsys); +} + u16 errno_to_nvme_status(struct nvmet_req *req, int errno); /* Convert a 32-bit number to a 16-bit 0's based number */ diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c new file mode 100644 index 000000000000..b9727553ab30 --- /dev/null +++ b/drivers/nvme/target/passthru.c @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe Over Fabrics Target Passthrough command implementation. + * + * Copyright (c) 2017-2018 Western Digital Corporation or its + * affiliates. + * Copyright (c) 2019-2020, Eideticom Inc. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include + +#include "../host/nvme.h" +#include "nvmet.h" + +MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU); + +static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl; + u16 status = NVME_SC_SUCCESS; + struct nvme_id_ctrl *id; + u32 max_hw_sectors; + int page_shift; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) + return NVME_SC_INTERNAL; + + status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id)); + if (status) + goto out_free; + + id->cntlid = cpu_to_le16(ctrl->cntlid); + id->ver = cpu_to_le32(ctrl->subsys->ver); + + /* + * The passthru NVMe driver may have a limit on the number of segments + * which depends on the host's memory fragementation. To solve this, + * ensure mdts is limited to the pages equal to the number of segments. + */ + max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9), + pctrl->max_hw_sectors); + + page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; + + id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; + + id->acl = 3; + /* + * We export aerl limit for the fabrics controller, update this when + * passthru based aerl support is added. + */ + id->aerl = NVMET_ASYNC_EVENTS - 1; + + /* emulate kas as most of the PCIe ctrl don't have a support for kas */ + id->kas = cpu_to_le16(NVMET_KAS); + + /* don't support host memory buffer */ + id->hmpre = 0; + id->hmmin = 0; + + id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes); + id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); + id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); + + /* don't support fuse commands */ + id->fuses = 0; + + id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ + if (ctrl->ops->flags & NVMF_KEYED_SGLS) + id->sgls |= cpu_to_le32(1 << 2); + if (req->port->inline_data_size) + id->sgls |= cpu_to_le32(1 << 20); + + /* + * When passsthru controller is setup using nvme-loop transport it will + * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in + * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl() + * code path with duplicate ctr subsynqn. In order to prevent that we + * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. + */ + memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); + + /* use fabric id-ctrl values */ + id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + + req->port->inline_data_size) / 16); + id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); + + id->msdbd = ctrl->ops->msdbd; + + /* Support multipath connections with fabrics */ + id->cmic |= 1 << 1; + + /* Disable reservations, see nvmet_parse_passthru_io_cmd() */ + id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS); + + status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl)); + +out_free: + kfree(id); + return status; +} + +static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req) +{ + u16 status = NVME_SC_SUCCESS; + struct nvme_id_ns *id; + int i; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) + return NVME_SC_INTERNAL; + + status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns)); + if (status) + goto out_free; + + for (i = 0; i < (id->nlbaf + 1); i++) + if (id->lbaf[i].ms) + memset(&id->lbaf[i], 0, sizeof(id->lbaf[i])); + + id->flbas = id->flbas & ~(1 << 4); + + /* + * Presently the NVMEof target code does not support sending + * metadata, so we must disable it here. This should be updated + * once target starts supporting metadata. + */ + id->mc = 0; + + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); + +out_free: + kfree(id); + return status; +} + +static void nvmet_passthru_execute_cmd_work(struct work_struct *w) +{ + struct nvmet_req *req = container_of(w, struct nvmet_req, p.work); + struct request *rq = req->p.rq; + u16 status; + + nvme_execute_passthru_rq(rq); + + status = nvme_req(rq)->status; + if (status == NVME_SC_SUCCESS && + req->cmd->common.opcode == nvme_admin_identify) { + switch (req->cmd->identify.cns) { + case NVME_ID_CNS_CTRL: + nvmet_passthru_override_id_ctrl(req); + break; + case NVME_ID_CNS_NS: + nvmet_passthru_override_id_ns(req); + break; + } + } + + req->cqe->result = nvme_req(rq)->result; + nvmet_req_complete(req, status); + blk_put_request(rq); +} + +static void nvmet_passthru_req_done(struct request *rq, + blk_status_t blk_status) +{ + struct nvmet_req *req = rq->end_io_data; + + req->cqe->result = nvme_req(rq)->result; + nvmet_req_complete(req, nvme_req(rq)->status); + blk_put_request(rq); +} + +static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) +{ + int sg_cnt = req->sg_cnt; + struct scatterlist *sg; + int op_flags = 0; + struct bio *bio; + int i, ret; + + if (req->cmd->common.opcode == nvme_cmd_flush) + op_flags = REQ_FUA; + else if (nvme_is_write(req->cmd)) + op_flags = REQ_SYNC | REQ_IDLE; + + bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); + bio->bi_end_io = bio_put; + bio->bi_opf = req_op(rq) | op_flags; + + for_each_sg(req->sg, sg, req->sg_cnt, i) { + if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, + sg->offset) < sg->length) { + bio_put(bio); + return -EINVAL; + } + sg_cnt--; + } + + ret = blk_rq_append_bio(rq, &bio); + if (unlikely(ret)) { + bio_put(bio); + return ret; + } + + return 0; +} + +static void nvmet_passthru_execute_cmd(struct nvmet_req *req) +{ + struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req); + struct request_queue *q = ctrl->admin_q; + struct nvme_ns *ns = NULL; + struct request *rq = NULL; + u32 effects; + u16 status; + int ret; + + if (likely(req->sq->qid != 0)) { + u32 nsid = le32_to_cpu(req->cmd->common.nsid); + + ns = nvme_find_get_ns(ctrl, nsid); + if (unlikely(!ns)) { + pr_err("failed to get passthru ns nsid:%u\n", nsid); + status = NVME_SC_INVALID_NS | NVME_SC_DNR; + goto fail_out; + } + + q = ns->queue; + } + + rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); + if (IS_ERR(rq)) { + rq = NULL; + status = NVME_SC_INTERNAL; + goto fail_out; + } + + if (req->sg_cnt) { + ret = nvmet_passthru_map_sg(req, rq); + if (unlikely(ret)) { + status = NVME_SC_INTERNAL; + goto fail_out; + } + } + + /* + * If there are effects for the command we are about to execute, or + * an end_req function we need to use nvme_execute_passthru_rq() + * synchronously in a work item seeing the end_req function and + * nvme_passthru_end() can't be called in the request done callback + * which is typically in interrupt context. + */ + effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); + if (req->p.use_workqueue || effects) { + INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); + req->p.rq = rq; + schedule_work(&req->p.work); + } else { + rq->end_io_data = req; + blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0, + nvmet_passthru_req_done); + } + + if (ns) + nvme_put_ns(ns); + + return; + +fail_out: + if (ns) + nvme_put_ns(ns); + nvmet_req_complete(req, status); + blk_put_request(rq); +} + +/* + * We need to emulate set host behaviour to ensure that any requested + * behaviour of the target's host matches the requested behaviour + * of the device's host and fail otherwise. + */ +static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req) +{ + struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req); + struct nvme_feat_host_behavior *host; + u16 status = NVME_SC_INTERNAL; + int ret; + + host = kzalloc(sizeof(*host) * 2, GFP_KERNEL); + if (!host) + goto out_complete_req; + + ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, + host, sizeof(*host), NULL); + if (ret) + goto out_free_host; + + status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host)); + if (status) + goto out_free_host; + + if (memcmp(&host[0], &host[1], sizeof(host[0]))) { + pr_warn("target host has requested different behaviour from the local host\n"); + status = NVME_SC_INTERNAL; + } + +out_free_host: + kfree(host); +out_complete_req: + nvmet_req_complete(req, status); +} + +static u16 nvmet_setup_passthru_command(struct nvmet_req *req) +{ + req->p.use_workqueue = false; + req->execute = nvmet_passthru_execute_cmd; + return NVME_SC_SUCCESS; +} + +u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) +{ + switch (req->cmd->common.opcode) { + case nvme_cmd_resv_register: + case nvme_cmd_resv_report: + case nvme_cmd_resv_acquire: + case nvme_cmd_resv_release: + /* + * Reservations cannot be supported properly because the + * underlying device has no way of differentiating different + * hosts that connect via fabrics. This could potentially be + * emulated in the future if regular targets grow support for + * this feature. + */ + return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + } + + return nvmet_setup_passthru_command(req); +} + +/* + * Only features that are emulated or specifically allowed in the list are + * passed down to the controller. This function implements the allow list for + * both get and set features. + */ +static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) +{ + switch (le32_to_cpu(req->cmd->features.fid)) { + case NVME_FEAT_ARBITRATION: + case NVME_FEAT_POWER_MGMT: + case NVME_FEAT_LBA_RANGE: + case NVME_FEAT_TEMP_THRESH: + case NVME_FEAT_ERR_RECOVERY: + case NVME_FEAT_VOLATILE_WC: + case NVME_FEAT_WRITE_ATOMIC: + case NVME_FEAT_AUTO_PST: + case NVME_FEAT_TIMESTAMP: + case NVME_FEAT_HCTM: + case NVME_FEAT_NOPSC: + case NVME_FEAT_RRL: + case NVME_FEAT_PLM_CONFIG: + case NVME_FEAT_PLM_WINDOW: + case NVME_FEAT_HOST_BEHAVIOR: + case NVME_FEAT_SANITIZE: + case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END: + return nvmet_setup_passthru_command(req); + + case NVME_FEAT_ASYNC_EVENT: + /* There is no support for forwarding ASYNC events */ + case NVME_FEAT_IRQ_COALESCE: + case NVME_FEAT_IRQ_CONFIG: + /* The IRQ settings will not apply to the target controller */ + case NVME_FEAT_HOST_MEM_BUF: + /* + * Any HMB that's set will not be passed through and will + * not work as expected + */ + case NVME_FEAT_SW_PROGRESS: + /* + * The Pre-Boot Software Load Count doesn't make much + * sense for a target to export + */ + case NVME_FEAT_RESV_MASK: + case NVME_FEAT_RESV_PERSIST: + /* No reservations, see nvmet_parse_passthru_io_cmd() */ + default: + return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + } +} + +u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) +{ + /* + * Passthru all vendor specific commands + */ + if (req->cmd->common.opcode >= nvme_admin_vendor_start) + return nvmet_setup_passthru_command(req); + + switch (req->cmd->common.opcode) { + case nvme_admin_async_event: + req->execute = nvmet_execute_async_event; + return NVME_SC_SUCCESS; + case nvme_admin_keep_alive: + /* + * Most PCIe ctrls don't support keep alive cmd, we route keep + * alive to the non-passthru mode. In future please change this + * code when PCIe ctrls with keep alive support available. + */ + req->execute = nvmet_execute_keep_alive; + return NVME_SC_SUCCESS; + case nvme_admin_set_features: + switch (le32_to_cpu(req->cmd->features.fid)) { + case NVME_FEAT_ASYNC_EVENT: + case NVME_FEAT_KATO: + case NVME_FEAT_NUM_QUEUES: + case NVME_FEAT_HOST_ID: + req->execute = nvmet_execute_set_features; + return NVME_SC_SUCCESS; + case NVME_FEAT_HOST_BEHAVIOR: + req->execute = nvmet_passthru_set_host_behaviour; + return NVME_SC_SUCCESS; + default: + return nvmet_passthru_get_set_features(req); + } + break; + case nvme_admin_get_features: + switch (le32_to_cpu(req->cmd->features.fid)) { + case NVME_FEAT_ASYNC_EVENT: + case NVME_FEAT_KATO: + case NVME_FEAT_NUM_QUEUES: + case NVME_FEAT_HOST_ID: + req->execute = nvmet_execute_get_features; + return NVME_SC_SUCCESS; + default: + return nvmet_passthru_get_set_features(req); + } + break; + case nvme_admin_identify: + switch (req->cmd->identify.cns) { + case NVME_ID_CNS_CTRL: + req->execute = nvmet_passthru_execute_cmd; + req->p.use_workqueue = true; + return NVME_SC_SUCCESS; + case NVME_ID_CNS_NS: + req->execute = nvmet_passthru_execute_cmd; + req->p.use_workqueue = true; + return NVME_SC_SUCCESS; + default: + return nvmet_setup_passthru_command(req); + } + case nvme_admin_get_log_page: + return nvmet_setup_passthru_command(req); + default: + /* Reject commands not in the allowlist above */ + return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + } +} diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 1643005d21e3..d92535997687 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -312,6 +312,7 @@ enum { NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, NVME_CTRL_ONCS_DSM = 1 << 2, NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, + NVME_CTRL_ONCS_RESERVATIONS = 1 << 5, NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, @@ -982,6 +983,7 @@ enum nvme_admin_opcode { nvme_admin_security_recv = 0x82, nvme_admin_sanitize_nvm = 0x84, nvme_admin_get_lba_status = 0x86, + nvme_admin_vendor_start = 0xC0, }; #define nvme_admin_opcode_name(opcode) { opcode, #opcode } @@ -1045,6 +1047,8 @@ enum { NVME_FEAT_RESV_MASK = 0x82, NVME_FEAT_RESV_PERSIST = 0x83, NVME_FEAT_WRITE_PROTECT = 0x84, + NVME_FEAT_VENDOR_START = 0xC0, + NVME_FEAT_VENDOR_END = 0xFF, NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, -- cgit v1.2.3 From 13685c4a08fca9dd76bf53bfcbadc044ab2a08cb Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Thu, 16 Jul 2020 12:03:45 +0100 Subject: sched/uclamp: Add a new sysctl to control RT default boost value RT tasks by default run at the highest capacity/performance level. When uclamp is selected this default behavior is retained by enforcing the requested uclamp.min (p->uclamp_req[UCLAMP_MIN]) of the RT tasks to be uclamp_none(UCLAMP_MAX), which is SCHED_CAPACITY_SCALE; the maximum value. This is also referred to as 'the default boost value of RT tasks'. See commit 1a00d999971c ("sched/uclamp: Set default clamps for RT tasks"). On battery powered devices, it is desired to control this default (currently hardcoded) behavior at runtime to reduce energy consumed by RT tasks. For example, a mobile device manufacturer where big.LITTLE architecture is dominant, the performance of the little cores varies across SoCs, and on high end ones the big cores could be too power hungry. Given the diversity of SoCs, the new knob allows manufactures to tune the best performance/power for RT tasks for the particular hardware they run on. They could opt to further tune the value when the user selects a different power saving mode or when the device is actively charging. The runtime aspect of it further helps in creating a single kernel image that can be run on multiple devices that require different tuning. Keep in mind that a lot of RT tasks in the system are created by the kernel. On Android for instance I can see over 50 RT tasks, only a handful of which created by the Android framework. To control the default behavior globally by system admins and device integrator, introduce the new sysctl_sched_uclamp_util_min_rt_default to change the default boost value of the RT tasks. I anticipate this to be mostly in the form of modifying the init script of a particular device. To avoid polluting the fast path with unnecessary code, the approach taken is to synchronously do the update by traversing all the existing tasks in the system. This could race with a concurrent fork(), which is dealt with by introducing sched_post_fork() function which will ensure the racy fork will get the right update applied. Tested on Juno-r2 in combination with the RT capacity awareness [1]. By default an RT task will go to the highest capacity CPU and run at the maximum frequency, which is particularly energy inefficient on high end mobile devices because the biggest core[s] are 'huge' and power hungry. With this patch the RT task can be controlled to run anywhere by default, and doesn't cause the frequency to be maximum all the time. Yet any task that really needs to be boosted can easily escape this default behavior by modifying its requested uclamp.min value (p->uclamp_req[UCLAMP_MIN]) via sched_setattr() syscall. [1] 804d402fb6f6: ("sched/rt: Make RT capacity-aware") Signed-off-by: Qais Yousef Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200716110347.19553-2-qais.yousef@arm.com --- include/linux/sched.h | 10 +++- include/linux/sched/sysctl.h | 1 + include/linux/sched/task.h | 1 + kernel/fork.c | 1 + kernel/sched/core.c | 119 ++++++++++++++++++++++++++++++++++++++++--- kernel/sysctl.c | 7 +++ 6 files changed, 131 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index adf0125190d4..a6bf77c34687 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -686,9 +686,15 @@ struct task_struct { struct sched_dl_entity dl; #ifdef CONFIG_UCLAMP_TASK - /* Clamp values requested for a scheduling entity */ + /* + * Clamp values requested for a scheduling entity. + * Must be updated with task_rq_lock() held. + */ struct uclamp_se uclamp_req[UCLAMP_CNT]; - /* Effective clamp values used for a scheduling entity */ + /* + * Effective clamp values used for a scheduling entity. + * Must be updated with task_rq_lock() held. + */ struct uclamp_se uclamp[UCLAMP_CNT]; #endif diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 24be30a40814..3c31ba88aca5 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -67,6 +67,7 @@ extern unsigned int sysctl_sched_dl_period_min; #ifdef CONFIG_UCLAMP_TASK extern unsigned int sysctl_sched_uclamp_util_min; extern unsigned int sysctl_sched_uclamp_util_max; +extern unsigned int sysctl_sched_uclamp_util_min_rt_default; #endif #ifdef CONFIG_CFS_BANDWIDTH diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 38359071236a..e7ddab095baf 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -55,6 +55,7 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); +extern void sched_post_fork(struct task_struct *p); extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); diff --git a/kernel/fork.c b/kernel/fork.c index efc5493203ae..e75c2e41f3d1 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2304,6 +2304,7 @@ static __latent_entropy struct task_struct *copy_process( write_unlock_irq(&tasklist_lock); proc_fork_connector(p); + sched_post_fork(p); cgroup_post_fork(p, args); perf_event_fork(p); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e44d83f3e0e6..12e1f3a2cabc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -889,6 +889,23 @@ unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; /* Max allowed maximum utilization */ unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; +/* + * By default RT tasks run at the maximum performance point/capacity of the + * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to + * SCHED_CAPACITY_SCALE. + * + * This knob allows admins to change the default behavior when uclamp is being + * used. In battery powered devices, particularly, running at the maximum + * capacity and frequency will increase energy consumption and shorten the + * battery life. + * + * This knob only affects RT tasks that their uclamp_se->user_defined == false. + * + * This knob will not override the system default sched_util_clamp_min defined + * above. + */ +unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; + /* All clamps are required to be less or equal than these values */ static struct uclamp_se uclamp_default[UCLAMP_CNT]; @@ -991,6 +1008,64 @@ unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, return uclamp_idle_value(rq, clamp_id, clamp_value); } +static void __uclamp_update_util_min_rt_default(struct task_struct *p) +{ + unsigned int default_util_min; + struct uclamp_se *uc_se; + + lockdep_assert_held(&p->pi_lock); + + uc_se = &p->uclamp_req[UCLAMP_MIN]; + + /* Only sync if user didn't override the default */ + if (uc_se->user_defined) + return; + + default_util_min = sysctl_sched_uclamp_util_min_rt_default; + uclamp_se_set(uc_se, default_util_min, false); +} + +static void uclamp_update_util_min_rt_default(struct task_struct *p) +{ + struct rq_flags rf; + struct rq *rq; + + if (!rt_task(p)) + return; + + /* Protect updates to p->uclamp_* */ + rq = task_rq_lock(p, &rf); + __uclamp_update_util_min_rt_default(p); + task_rq_unlock(rq, p, &rf); +} + +static void uclamp_sync_util_min_rt_default(void) +{ + struct task_struct *g, *p; + + /* + * copy_process() sysctl_uclamp + * uclamp_min_rt = X; + * write_lock(&tasklist_lock) read_lock(&tasklist_lock) + * // link thread smp_mb__after_spinlock() + * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); + * sched_post_fork() for_each_process_thread() + * __uclamp_sync_rt() __uclamp_sync_rt() + * + * Ensures that either sched_post_fork() will observe the new + * uclamp_min_rt or for_each_process_thread() will observe the new + * task. + */ + read_lock(&tasklist_lock); + smp_mb__after_spinlock(); + read_unlock(&tasklist_lock); + + rcu_read_lock(); + for_each_process_thread(g, p) + uclamp_update_util_min_rt_default(p); + rcu_read_unlock(); +} + static inline struct uclamp_se uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) { @@ -1278,12 +1353,13 @@ int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { bool update_root_tg = false; - int old_min, old_max; + int old_min, old_max, old_min_rt; int result; mutex_lock(&uclamp_mutex); old_min = sysctl_sched_uclamp_util_min; old_max = sysctl_sched_uclamp_util_max; + old_min_rt = sysctl_sched_uclamp_util_min_rt_default; result = proc_dointvec(table, write, buffer, lenp, ppos); if (result) @@ -1292,7 +1368,9 @@ int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, goto done; if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || - sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) { + sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || + sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { + result = -EINVAL; goto undo; } @@ -1313,6 +1391,11 @@ int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, uclamp_update_root_tg(); } + if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { + static_branch_enable(&sched_uclamp_used); + uclamp_sync_util_min_rt_default(); + } + /* * We update all RUNNABLE tasks only when task groups are in use. * Otherwise, keep it simple and do just a lazy update at each next @@ -1324,6 +1407,7 @@ int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, undo: sysctl_sched_uclamp_util_min = old_min; sysctl_sched_uclamp_util_max = old_max; + sysctl_sched_uclamp_util_min_rt_default = old_min_rt; done: mutex_unlock(&uclamp_mutex); @@ -1369,17 +1453,20 @@ static void __setscheduler_uclamp(struct task_struct *p, */ for_each_clamp_id(clamp_id) { struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; - unsigned int clamp_value = uclamp_none(clamp_id); /* Keep using defined clamps across class changes */ if (uc_se->user_defined) continue; - /* By default, RT tasks always get 100% boost */ + /* + * RT by default have a 100% boost value that could be modified + * at runtime. + */ if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) - clamp_value = uclamp_none(UCLAMP_MAX); + __uclamp_update_util_min_rt_default(p); + else + uclamp_se_set(uc_se, uclamp_none(clamp_id), false); - uclamp_se_set(uc_se, clamp_value, false); } if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) @@ -1400,6 +1487,10 @@ static void uclamp_fork(struct task_struct *p) { enum uclamp_id clamp_id; + /* + * We don't need to hold task_rq_lock() when updating p->uclamp_* here + * as the task is still at its early fork stages. + */ for_each_clamp_id(clamp_id) p->uclamp[clamp_id].active = false; @@ -1412,6 +1503,11 @@ static void uclamp_fork(struct task_struct *p) } } +static void uclamp_post_fork(struct task_struct *p) +{ + uclamp_update_util_min_rt_default(p); +} + static void __init init_uclamp_rq(struct rq *rq) { enum uclamp_id clamp_id; @@ -1462,6 +1558,7 @@ static inline int uclamp_validate(struct task_struct *p, static void __setscheduler_uclamp(struct task_struct *p, const struct sched_attr *attr) { } static inline void uclamp_fork(struct task_struct *p) { } +static inline void uclamp_post_fork(struct task_struct *p) { } static inline void init_uclamp(void) { } #endif /* CONFIG_UCLAMP_TASK */ @@ -3205,6 +3302,11 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) return 0; } +void sched_post_fork(struct task_struct *p) +{ + uclamp_post_fork(p); +} + unsigned long to_ratio(u64 period, u64 runtime) { if (runtime == RUNTIME_INF) @@ -5724,6 +5826,11 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, kattr.sched_nice = task_nice(p); #ifdef CONFIG_UCLAMP_TASK + /* + * This could race with another potential updater, but this is fine + * because it'll correctly read the old or the new value. We don't need + * to guarantee who wins the race as long as it doesn't return garbage. + */ kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; #endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4aea67d3d552..1b4d2dc270a5 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1815,6 +1815,13 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = sysctl_sched_uclamp_handler, }, + { + .procname = "sched_util_clamp_min_rt_default", + .data = &sysctl_sched_uclamp_util_min_rt_default, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sysctl_sched_uclamp_handler, + }, #endif #ifdef CONFIG_SCHED_AUTOGROUP { -- cgit v1.2.3 From 556c811f24b30cc883733a2eaf9e939817589231 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 22 Jul 2020 17:03:09 +0300 Subject: RDMA/efa: Expose maximum TX doorbell batch The device reports the maximum number of bytes to be written before ringing the doorbell (zero means unlimited). This patch queries the max batch size and reports it back to the userspace library. Link: https://lore.kernel.org/r/20200722140312.3651-2-galpress@amazon.com Reviewed-by: Daniel Kranzdorf Reviewed-by: Firas JahJah Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_admin_cmds_defs.h | 11 +++++++++++ drivers/infiniband/hw/efa/efa_com_cmd.c | 1 + drivers/infiniband/hw/efa/efa_com_cmd.h | 1 + drivers/infiniband/hw/efa/efa_verbs.c | 1 + include/uapi/rdma/efa-abi.h | 4 +++- 5 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h index bef2bd291054..03e7388af06e 100644 --- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h @@ -632,6 +632,17 @@ struct efa_admin_feature_queue_attr_desc { /* Maximum number of SGEs for a single RDMA read WQE */ u16 max_wr_rdma_sges; + + /* + * Maximum number of bytes that can be written to SQ between two + * consecutive doorbells (in units of 64B). Driver must ensure that only + * complete WQEs are written to queue before issuing a doorbell. + * Examples: max_tx_batch=16 and WQE size = 64B, means up to 16 WQEs can + * be written to SQ between two consecutive doorbells. max_tx_batch=11 + * and WQE size = 128B, means up to 5 WQEs can be written to SQ between + * two consecutive doorbells. Zero means unlimited. + */ + u16 max_tx_batch; }; struct efa_admin_feature_aenq_desc { diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index fabd8df2e78f..53cfde5c43d8 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c @@ -480,6 +480,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, result->max_llq_size = resp.u.queue_attr.max_llq_size; result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq; result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges; + result->max_tx_batch = resp.u.queue_attr.max_tx_batch; err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR); if (err) { diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h index 41ce4a476ee6..8df2a26d57d4 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.h +++ b/drivers/infiniband/hw/efa/efa_com_cmd.h @@ -127,6 +127,7 @@ struct efa_com_get_device_attr_result { u16 max_sq_sge; u16 max_rq_sge; u16 max_wr_rdma_sge; + u16 max_tx_batch; u8 db_bar; }; diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 08313f7c73bc..f49d14cebe4a 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1525,6 +1525,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq; resp.inline_buf_size = dev->dev_attr.inline_buf_size; resp.max_llq_size = dev->dev_attr.max_llq_size; + resp.max_tx_batch = dev->dev_attr.max_tx_batch; if (udata && udata->outlen) { err = ib_copy_to_udata(udata, &resp, diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index 53b6e2036a9b..10781763da37 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ /* - * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef EFA_ABI_USER_H @@ -31,6 +31,8 @@ struct efa_ibv_alloc_ucontext_resp { __u16 sub_cqs_per_cq; __u16 inline_buf_size; __u32 max_llq_size; /* bytes */ + __u16 max_tx_batch; /* units of 64 bytes */ + __u8 reserved_90[6]; }; struct efa_ibv_alloc_pd_resp { -- cgit v1.2.3 From da2924bdca99768442c5e0ed0a9024ae79d62765 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 22 Jul 2020 17:03:10 +0300 Subject: RDMA/efa: Expose minimum SQ size The device reports the minimum SQ size required for creation. This patch queries the min SQ size and reports it back to the userspace library. Link: https://lore.kernel.org/r/20200722140312.3651-3-galpress@amazon.com Reviewed-by: Firas JahJah Reviewed-by: Shadi Ammouri Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_admin_cmds_defs.h | 4 ++-- drivers/infiniband/hw/efa/efa_com_cmd.c | 1 + drivers/infiniband/hw/efa/efa_com_cmd.h | 1 + drivers/infiniband/hw/efa/efa_verbs.c | 1 + include/uapi/rdma/efa-abi.h | 3 ++- 5 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h index 03e7388af06e..5484b08bbc5d 100644 --- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h @@ -606,8 +606,8 @@ struct efa_admin_feature_queue_attr_desc { /* Number of sub-CQs to be created for each CQ */ u16 sub_cqs_per_cq; - /* MBZ */ - u16 reserved; + /* Minimum number of WQEs per SQ */ + u16 min_sq_depth; /* Maximum number of SGEs (buffers) allowed for a single send WQE */ u16 max_wr_send_sges; diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index 53cfde5c43d8..6ac23627f65a 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c @@ -481,6 +481,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq; result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges; result->max_tx_batch = resp.u.queue_attr.max_tx_batch; + result->min_sq_depth = resp.u.queue_attr.min_sq_depth; err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR); if (err) { diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h index 8df2a26d57d4..190bac23f585 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.h +++ b/drivers/infiniband/hw/efa/efa_com_cmd.h @@ -128,6 +128,7 @@ struct efa_com_get_device_attr_result { u16 max_rq_sge; u16 max_wr_rdma_sge; u16 max_tx_batch; + u16 min_sq_depth; u8 db_bar; }; diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index f49d14cebe4a..26102ab333b2 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1526,6 +1526,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) resp.inline_buf_size = dev->dev_attr.inline_buf_size; resp.max_llq_size = dev->dev_attr.max_llq_size; resp.max_tx_batch = dev->dev_attr.max_tx_batch; + resp.min_sq_wr = dev->dev_attr.min_sq_depth; if (udata && udata->outlen) { err = ib_copy_to_udata(udata, &resp, diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index 10781763da37..7ef2306f8dd4 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -32,7 +32,8 @@ struct efa_ibv_alloc_ucontext_resp { __u16 inline_buf_size; __u32 max_llq_size; /* bytes */ __u16 max_tx_batch; /* units of 64 bytes */ - __u8 reserved_90[6]; + __u16 min_sq_wr; + __u8 reserved_a0[4]; }; struct efa_ibv_alloc_pd_resp { -- cgit v1.2.3 From a5d87b698547233321466b2dc91271f5855a4df6 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 22 Jul 2020 17:03:11 +0300 Subject: RDMA/efa: User/kernel compatibility handshake mechanism Introduce a mechanism that performs an handshake between the userspace provider and kernel driver which verifies that the user supports all required features in order to operate correctly. The handshake verifies the needed functionality by comparing the reported device caps and the provider caps. If the device reports a non-zero capability the appropriate comp mask is required from the userspace provider in order to allocate the context. Link: https://lore.kernel.org/r/20200722140312.3651-4-galpress@amazon.com Reviewed-by: Shadi Ammouri Reviewed-by: Yossi Leybovich Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_verbs.c | 40 +++++++++++++++++++++++++++++++++++ include/uapi/rdma/efa-abi.h | 10 +++++++++ 2 files changed, 50 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 26102ab333b2..fda175836fb6 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1501,11 +1501,39 @@ static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn) return efa_com_dealloc_uar(&dev->edev, ¶ms); } +#define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \ + (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \ + NULL : #_attr) + +static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext, + const struct efa_ibv_alloc_ucontext_cmd *cmd) +{ + struct efa_dev *dev = to_edev(ibucontext->device); + char *attr_str; + + if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch, + EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str)) + goto err; + + if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth, + EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR, + attr_str)) + goto err; + + return 0; + +err: + ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n", + attr_str); + return -EOPNOTSUPP; +} + int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) { struct efa_ucontext *ucontext = to_eucontext(ibucontext); struct efa_dev *dev = to_edev(ibucontext->device); struct efa_ibv_alloc_ucontext_resp resp = {}; + struct efa_ibv_alloc_ucontext_cmd cmd = {}; struct efa_com_alloc_uar_result result; int err; @@ -1514,6 +1542,18 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) * we will ack input fields in our response. */ + err = ib_copy_from_udata(&cmd, udata, + min(sizeof(cmd), udata->inlen)); + if (err) { + ibdev_dbg(&dev->ibdev, + "Cannot copy udata for alloc_ucontext\n"); + goto err_out; + } + + err = efa_user_comp_handshake(ibucontext, &cmd); + if (err) + goto err_out; + err = efa_com_alloc_uar(&dev->edev, &result); if (err) goto err_out; diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index 7ef2306f8dd4..507a2862bedb 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -20,6 +20,16 @@ * hex bit offset of the field. */ +enum { + EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH = 1 << 0, + EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR = 1 << 1, +}; + +struct efa_ibv_alloc_ucontext_cmd { + __u32 comp_mask; + __u8 reserved_20[4]; +}; + enum efa_ibv_user_cmds_supp_udata { EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE = 1 << 0, EFA_USER_CMDS_SUPP_UDATA_CREATE_AH = 1 << 1, -- cgit v1.2.3 From c75d42e4c768c403f259f6c7f6217c850cf11be9 Mon Sep 17 00:00:00 2001 From: Alastair D'Silva Date: Wed, 15 Apr 2020 11:23:42 +1000 Subject: ocxl: Remove unnecessary externs Function declarations don't need externs, remove the existing ones so they are consistent with newer code Signed-off-by: Alastair D'Silva Acked-by: Andrew Donnellan Acked-by: Frederic Barrat Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200415012343.919255-2-alastair@d-silva.org --- arch/powerpc/include/asm/pnv-ocxl.h | 40 ++++++++++++++++++------------------- include/misc/ocxl.h | 6 +++--- 2 files changed, 22 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h index 7de82647e761..ee79d2cd9fb6 100644 --- a/arch/powerpc/include/asm/pnv-ocxl.h +++ b/arch/powerpc/include/asm/pnv-ocxl.h @@ -9,28 +9,26 @@ #define PNV_OCXL_TL_BITS_PER_RATE 4 #define PNV_OCXL_TL_RATE_BUF_SIZE ((PNV_OCXL_TL_MAX_TEMPLATE+1) * PNV_OCXL_TL_BITS_PER_RATE / 8) -extern int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled, - u16 *supported); -extern int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count); +int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled, u16 *supported); +int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count); -extern int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap, +int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap, char *rate_buf, int rate_buf_size); -extern int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap, - uint64_t rate_buf_phys, int rate_buf_size); - -extern int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq); -extern void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar, - void __iomem *tfc, void __iomem *pe_handle); -extern int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr, - void __iomem **dar, void __iomem **tfc, - void __iomem **pe_handle); - -extern int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, - void **platform_data); -extern void pnv_ocxl_spa_release(void *platform_data); -extern int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle); - -extern int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr); -extern void pnv_ocxl_free_xive_irq(u32 irq); +int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap, + uint64_t rate_buf_phys, int rate_buf_size); + +int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq); +void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar, + void __iomem *tfc, void __iomem *pe_handle); +int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr, + void __iomem **dar, void __iomem **tfc, + void __iomem **pe_handle); + +int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, void **platform_data); +void pnv_ocxl_spa_release(void *platform_data); +int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle); + +int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr); +void pnv_ocxl_free_xive_irq(u32 irq); #endif /* _ASM_PNV_OCXL_H */ diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h index 06dd5839e438..0a762e387418 100644 --- a/include/misc/ocxl.h +++ b/include/misc/ocxl.h @@ -173,7 +173,7 @@ int ocxl_context_detach(struct ocxl_context *ctx); * * Returns 0 on success, negative on failure */ -extern int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); +int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); /** * Frees an IRQ associated with an AFU context @@ -182,7 +182,7 @@ extern int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); * * Returns 0 on success, negative on failure */ -extern int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); +int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); /** * Gets the address of the trigger page for an IRQ @@ -193,7 +193,7 @@ extern int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); * * returns the trigger page address, or 0 if the IRQ is not valid */ -extern u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id); +u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id); /** * Provide a callback to be called when an IRQ is triggered -- cgit v1.2.3 From 3591538a31af37cf6a2d83f1da99e651a822af8b Mon Sep 17 00:00:00 2001 From: Alastair D'Silva Date: Wed, 15 Apr 2020 11:23:43 +1000 Subject: ocxl: Address kernel doc errors & warnings This patch addresses warnings and errors from the kernel doc scripts for the OpenCAPI driver. It also makes minor tweaks to make the docs more consistent. Signed-off-by: Alastair D'Silva Acked-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200415012343.919255-3-alastair@d-silva.org --- drivers/misc/ocxl/config.c | 24 +++++----- drivers/misc/ocxl/ocxl_internal.h | 9 ++-- include/misc/ocxl.h | 96 ++++++++++++++++----------------------- 3 files changed, 55 insertions(+), 74 deletions(-) (limited to 'include') diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index 42f7a1298775..4d490b92d951 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -344,16 +344,16 @@ static int read_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn, } /** - * Read the template version from the AFU - * dev: the device for the AFU - * fn: the AFU offsets - * len: outputs the template length - * version: outputs the major<<8,minor version + * read_template_version() - Read the template version from the AFU + * @dev: the device for the AFU + * @fn: the AFU offsets + * @len: outputs the template length + * @version: outputs the major<<8,minor version * * Returns 0 on success, negative on failure */ static int read_template_version(struct pci_dev *dev, struct ocxl_fn_config *fn, - u16 *len, u16 *version) + u16 *len, u16 *version) { u32 val32; u8 major, minor; @@ -547,16 +547,16 @@ static int validate_afu(struct pci_dev *dev, struct ocxl_afu_config *afu) } /** - * Populate AFU metadata regarding LPC memory - * dev: the device for the AFU - * fn: the AFU offsets - * afu: the AFU struct to populate the LPC metadata into + * read_afu_lpc_memory_info() - Populate AFU metadata regarding LPC memory + * @dev: the device for the AFU + * @fn: the AFU offsets + * @afu: the AFU struct to populate the LPC metadata into * * Returns 0 on success, negative on failure */ static int read_afu_lpc_memory_info(struct pci_dev *dev, - struct ocxl_fn_config *fn, - struct ocxl_afu_config *afu) + struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu) { int rc; u32 val32; diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h index af9a84aeee6f..0bad0a123af6 100644 --- a/drivers/misc/ocxl/ocxl_internal.h +++ b/drivers/misc/ocxl/ocxl_internal.h @@ -128,11 +128,12 @@ int ocxl_config_check_afu_index(struct pci_dev *dev, struct ocxl_fn_config *fn, int afu_idx); /** - * Update values within a Process Element + * ocxl_link_update_pe() - Update values within a Process Element + * @link_handle: the link handle associated with the process element + * @pasid: the PASID for the AFU context + * @tid: the new thread id for the process element * - * link_handle: the link handle associated with the process element - * pasid: the PASID for the AFU context - * tid: the new thread id for the process element + * Returns 0 on success */ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid); diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h index 0a762e387418..357ef1aadbc0 100644 --- a/include/misc/ocxl.h +++ b/include/misc/ocxl.h @@ -62,8 +62,7 @@ struct ocxl_context; // Device detection & initialisation /** - * Open an OpenCAPI function on an OpenCAPI device - * + * ocxl_function_open() - Open an OpenCAPI function on an OpenCAPI device * @dev: The PCI device that contains the function * * Returns an opaque pointer to the function, or an error pointer (check with IS_ERR) @@ -71,8 +70,7 @@ struct ocxl_context; struct ocxl_fn *ocxl_function_open(struct pci_dev *dev); /** - * Get the list of AFUs associated with a PCI function device - * + * ocxl_function_afu_list() - Get the list of AFUs associated with a PCI function device * Returns a list of struct ocxl_afu * * * @fn: The OpenCAPI function containing the AFUs @@ -80,8 +78,7 @@ struct ocxl_fn *ocxl_function_open(struct pci_dev *dev); struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn); /** - * Fetch an AFU instance from an OpenCAPI function - * + * ocxl_function_fetch_afu() - Fetch an AFU instance from an OpenCAPI function * @fn: The OpenCAPI function to get the AFU from * @afu_idx: The index of the AFU to get * @@ -92,23 +89,20 @@ struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn); struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx); /** - * Take a reference to an AFU - * + * ocxl_afu_get() - Take a reference to an AFU * @afu: The AFU to increment the reference count on */ void ocxl_afu_get(struct ocxl_afu *afu); /** - * Release a reference to an AFU - * + * ocxl_afu_put() - Release a reference to an AFU * @afu: The AFU to decrement the reference count on */ void ocxl_afu_put(struct ocxl_afu *afu); /** - * Get the configuration information for an OpenCAPI function - * + * ocxl_function_config() - Get the configuration information for an OpenCAPI function * @fn: The OpenCAPI function to get the config for * * Returns the function config, or NULL on error @@ -116,8 +110,7 @@ void ocxl_afu_put(struct ocxl_afu *afu); const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn); /** - * Close an OpenCAPI function - * + * ocxl_function_close() - Close an OpenCAPI function * This will free any AFUs previously retrieved from the function, and * detach and associated contexts. The contexts must by freed by the caller. * @@ -129,8 +122,7 @@ void ocxl_function_close(struct ocxl_fn *fn); // Context allocation /** - * Allocate an OpenCAPI context - * + * ocxl_context_alloc() - Allocate an OpenCAPI context * @context: The OpenCAPI context to allocate, must be freed with ocxl_context_free * @afu: The AFU the context belongs to * @mapping: The mapping to unmap when the context is closed (may be NULL) @@ -139,14 +131,13 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, struct address_space *mapping); /** - * Free an OpenCAPI context - * + * ocxl_context_free() - Free an OpenCAPI context * @ctx: The OpenCAPI context to free */ void ocxl_context_free(struct ocxl_context *ctx); /** - * Grant access to an MM to an OpenCAPI context + * ocxl_context_attach() - Grant access to an MM to an OpenCAPI context * @ctx: The OpenCAPI context to attach * @amr: The value of the AMR register to restrict access * @mm: The mm to attach to the context @@ -157,7 +148,7 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm); /** - * Detach an MM from an OpenCAPI context + * ocxl_context_detach() - Detach an MM from an OpenCAPI context * @ctx: The OpenCAPI context to attach * * Returns 0 on success, negative on failure @@ -167,7 +158,7 @@ int ocxl_context_detach(struct ocxl_context *ctx); // AFU IRQs /** - * Allocate an IRQ associated with an AFU context + * ocxl_afu_irq_alloc() - Allocate an IRQ associated with an AFU context * @ctx: the AFU context * @irq_id: out, the IRQ ID * @@ -176,7 +167,7 @@ int ocxl_context_detach(struct ocxl_context *ctx); int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); /** - * Frees an IRQ associated with an AFU context + * ocxl_afu_irq_free() - Frees an IRQ associated with an AFU context * @ctx: the AFU context * @irq_id: the IRQ ID * @@ -185,7 +176,7 @@ int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id); int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); /** - * Gets the address of the trigger page for an IRQ + * ocxl_afu_irq_get_addr() - Gets the address of the trigger page for an IRQ * This can then be provided to an AFU which will write to that * page to trigger the IRQ. * @ctx: The AFU context that the IRQ is associated with @@ -196,7 +187,7 @@ int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id); u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id); /** - * Provide a callback to be called when an IRQ is triggered + * ocxl_irq_set_handler() - Provide a callback to be called when an IRQ is triggered * @ctx: The AFU context that the IRQ is associated with * @irq_id: The IRQ ID * @handler: the callback to be called when the IRQ is triggered @@ -213,8 +204,7 @@ int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id, // AFU Metadata /** - * Get a pointer to the config for an AFU - * + * ocxl_afu_config() - Get a pointer to the config for an AFU * @afu: a pointer to the AFU to get the config for * * Returns a pointer to the AFU config @@ -222,27 +212,24 @@ int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id, struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu); /** - * Assign opaque hardware specific information to an OpenCAPI AFU. - * - * @dev: The PCI device associated with the OpenCAPI device + * ocxl_afu_set_private() - Assign opaque hardware specific information to an OpenCAPI AFU. + * @afu: The OpenCAPI AFU * @private: the opaque hardware specific information to assign to the driver */ void ocxl_afu_set_private(struct ocxl_afu *afu, void *private); /** - * Fetch the hardware specific information associated with an external OpenCAPI - * AFU. This may be consumed by an external OpenCAPI driver. - * - * @afu: The AFU + * ocxl_afu_get_private() - Fetch the hardware specific information associated with + * an external OpenCAPI AFU. This may be consumed by an external OpenCAPI driver. + * @afu: The OpenCAPI AFU * * Returns the opaque pointer associated with the device, or NULL if not set */ -void *ocxl_afu_get_private(struct ocxl_afu *dev); +void *ocxl_afu_get_private(struct ocxl_afu *afu); // Global MMIO /** - * Read a 32 bit value from global MMIO - * + * ocxl_global_mmio_read32() - Read a 32 bit value from global MMIO * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -251,11 +238,10 @@ void *ocxl_afu_get_private(struct ocxl_afu *dev); * Returns 0 for success, negative on error */ int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u32 *val); + enum ocxl_endian endian, u32 *val); /** - * Read a 64 bit value from global MMIO - * + * ocxl_global_mmio_read64() - Read a 64 bit value from global MMIO * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -264,11 +250,10 @@ int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u64 *val); + enum ocxl_endian endian, u64 *val); /** - * Write a 32 bit value to global MMIO - * + * ocxl_global_mmio_write32() - Write a 32 bit value to global MMIO * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -277,11 +262,10 @@ int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u32 val); + enum ocxl_endian endian, u32 val); /** - * Write a 64 bit value to global MMIO - * + * ocxl_global_mmio_write64() - Write a 64 bit value to global MMIO * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -290,11 +274,10 @@ int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u64 val); + enum ocxl_endian endian, u64 val); /** - * Set bits in a 32 bit global MMIO register - * + * ocxl_global_mmio_set32() - Set bits in a 32 bit global MMIO register * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -303,11 +286,10 @@ int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u32 mask); + enum ocxl_endian endian, u32 mask); /** - * Set bits in a 64 bit global MMIO register - * + * ocxl_global_mmio_set64() - Set bits in a 64 bit global MMIO register * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -316,11 +298,10 @@ int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u64 mask); + enum ocxl_endian endian, u64 mask); /** - * Set bits in a 32 bit global MMIO register - * + * ocxl_global_mmio_clear32() - Set bits in a 32 bit global MMIO register * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -329,11 +310,10 @@ int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u32 mask); + enum ocxl_endian endian, u32 mask); /** - * Set bits in a 64 bit global MMIO register - * + * ocxl_global_mmio_clear64() - Set bits in a 64 bit global MMIO register * @afu: The AFU * @offset: The Offset from the start of MMIO * @endian: the endianness that the MMIO data is in @@ -342,7 +322,7 @@ int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset, * Returns 0 for success, negative on error */ int ocxl_global_mmio_clear64(struct ocxl_afu *afu, size_t offset, - enum ocxl_endian endian, u64 mask); + enum ocxl_endian endian, u64 mask); // Functions left here are for compatibility with the cxlflash driver -- cgit v1.2.3 From f891f19736bdf404845f97d8038054be37160ea8 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Wed, 29 Jul 2020 17:09:19 +0530 Subject: kexec_file: Allow archs to handle special regions while locating memory hole Some architectures may have special memory regions, within the given memory range, which can't be used for the buffer in a kexec segment. Implement weak arch_kexec_locate_mem_hole() definition which arch code may override, to take care of special regions, while trying to locate a memory hole. Also, add the missing declarations for arch overridable functions and and drop the __weak descriptors in the declarations to avoid non-weak definitions from becoming weak. Signed-off-by: Hari Bathini Tested-by: Pingfan Liu Reviewed-by: Thiago Jung Bauermann Acked-by: Dave Young Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/159602273603.575379.17665852963340380839.stgit@hbathini --- include/linux/kexec.h | 29 ++++++++++++++++++----------- kernel/kexec_file.c | 16 ++++++++++++++-- 2 files changed, 32 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ea67910ae6b7..9e93bef52968 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -183,17 +183,24 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); -int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len); -void * __weak arch_kexec_kernel_image_load(struct kimage *image); -int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); -int __weak arch_kexec_apply_relocations(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); +/* Architectures may override the below functions */ +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len); +void *arch_kexec_kernel_image_load(struct kimage *image); +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int arch_kexec_apply_relocations(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#ifdef CONFIG_KEXEC_SIG +int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, + unsigned long buf_len); +#endif +int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index bb05fd52de85..eb42d2efa16a 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -657,6 +657,19 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf) return ret == 1 ? 0 : -EADDRNOTAVAIL; } +/** + * arch_kexec_locate_mem_hole - Find free memory to place the segments. + * @kbuf: Parameters for the memory search. + * + * On success, kbuf->mem will have the start address of the memory region found. + * + * Return: 0 on success, negative errno on error. + */ +int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) +{ + return kexec_locate_mem_hole(kbuf); +} + /** * kexec_add_buffer - place a buffer in a kexec segment * @kbuf: Buffer contents and memory parameters. @@ -669,7 +682,6 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf) */ int kexec_add_buffer(struct kexec_buf *kbuf) { - struct kexec_segment *ksegment; int ret; @@ -697,7 +709,7 @@ int kexec_add_buffer(struct kexec_buf *kbuf) kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE); /* Walk the RAM ranges and allocate a suitable range for the buffer */ - ret = kexec_locate_mem_hole(kbuf); + ret = arch_kexec_locate_mem_hole(kbuf); if (ret) return ret; -- cgit v1.2.3 From 7ca8cf5347f720b07a0b32a924b768f5710547e7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jul 2020 22:31:05 +1000 Subject: locking/atomic: Move ATOMIC_INIT into linux/types.h This patch moves ATOMIC_INIT from asm/atomic.h into linux/types.h. This allows users of atomic_t to use ATOMIC_INIT without having to include atomic.h as that way may lead to header loops. Signed-off-by: Herbert Xu Signed-off-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Link: https://lkml.kernel.org/r/20200729123105.GB7047@gondor.apana.org.au --- arch/alpha/include/asm/atomic.h | 1 - arch/arc/include/asm/atomic.h | 2 -- arch/arm/include/asm/atomic.h | 2 -- arch/arm64/include/asm/atomic.h | 2 -- arch/h8300/include/asm/atomic.h | 2 -- arch/hexagon/include/asm/atomic.h | 2 -- arch/ia64/include/asm/atomic.h | 1 - arch/m68k/include/asm/atomic.h | 2 -- arch/mips/include/asm/atomic.h | 1 - arch/parisc/include/asm/atomic.h | 2 -- arch/powerpc/include/asm/atomic.h | 2 -- arch/riscv/include/asm/atomic.h | 2 -- arch/s390/include/asm/atomic.h | 2 -- arch/sh/include/asm/atomic.h | 2 -- arch/sparc/include/asm/atomic_32.h | 2 -- arch/sparc/include/asm/atomic_64.h | 1 - arch/x86/include/asm/atomic.h | 2 -- arch/xtensa/include/asm/atomic.h | 2 -- include/asm-generic/atomic.h | 2 -- include/linux/types.h | 2 ++ 20 files changed, 2 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 2144530d1428..e2093994fd0d 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -24,7 +24,6 @@ #define __atomic_acquire_fence() #define __atomic_post_full_fence() -#define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 7298ce84762e..c614857eb209 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -14,8 +14,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - #ifndef CONFIG_ARC_PLAT_EZNPS #define atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 75bb2c543e59..455eb19a5ac1 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -15,8 +15,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - #ifdef __KERNEL__ /* diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index a08890da696c..015ddffaf6ca 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -99,8 +99,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v) return __lse_ll_sc_body(atomic64_dec_if_positive, v); } -#define ATOMIC_INIT(i) { (i) } - #define arch_atomic_read(v) __READ_ONCE((v)->counter) #define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i)) diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index c6b6a06231b2..a990d151f163 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -12,8 +12,6 @@ * resource counting etc.. */ -#define ATOMIC_INIT(i) { (i) } - #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 0231d69c8bf2..4ab895d7111f 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -12,8 +12,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - /* Normal writes in our arch don't clear lock reservations */ static inline void atomic_set(atomic_t *v, int new) diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 50440f3ddc43..f267d956458f 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -19,7 +19,6 @@ #include -#define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 47228b0d4163..756c5cc58f94 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -16,8 +16,6 @@ * We do not have SMP m68k systems, so we don't have to deal with that. */ -#define ATOMIC_INIT(i) { (i) } - #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index e5ac88392d1f..f904084fcb1f 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -45,7 +45,6 @@ static __always_inline type pfx##_xchg(pfx##_t *v, type n) \ return xchg(&v->counter, n); \ } -#define ATOMIC_INIT(i) { (i) } ATOMIC_OPS(atomic, int) #ifdef CONFIG_64BIT diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 118953d41763..f960e2f32b1b 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -136,8 +136,6 @@ ATOMIC_OPS(xor, ^=) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define ATOMIC_INIT(i) { (i) } - #ifdef CONFIG_64BIT #define ATOMIC64_INIT(i) { (i) } diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 498785ffc25f..0311c3c42960 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -11,8 +11,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - /* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with * a "bne-" instruction at the end, so an isync is enough as a acquire barrier diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 96f95c9ebd97..400a8c8b6de7 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -19,8 +19,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - #define __atomic_acquire_fence() \ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 491ad53a0d4e..cae473a7b6f7 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -15,8 +15,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - static inline int atomic_read(const atomic_t *v) { int c; diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index f37b95a80232..7c2a8a703b9a 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -19,8 +19,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 94c930f0bc62..efad5532f169 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -18,8 +18,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - int atomic_add_return(int, atomic_t *); int atomic_fetch_add(int, atomic_t *); int atomic_fetch_and(int, atomic_t *); diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index b60448397d4f..6b235d3d1d9d 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -12,7 +12,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index bf35e476a776..b6cac6e9bb70 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -14,8 +14,6 @@ * resource counting etc.. */ -#define ATOMIC_INIT(i) { (i) } - /** * arch_atomic_read - read atomic variable * @v: pointer of type atomic_t diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 3e7c6134ed32..744c2f463845 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -19,8 +19,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - /* * This Xtensa implementation assumes that the right mechanism * for exclusion is for locking interrupts to level EXCM_LEVEL. diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 286867f593d2..11f96f40f4a7 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -159,8 +159,6 @@ ATOMIC_OP(xor, ^) * resource counting etc.. */ -#define ATOMIC_INIT(i) { (i) } - /** * atomic_read - read atomic variable * @v: pointer of type atomic_t diff --git a/include/linux/types.h b/include/linux/types.h index d3021c879179..a147977602b5 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -167,6 +167,8 @@ typedef struct { int counter; } atomic_t; +#define ATOMIC_INIT(i) { (i) } + #ifdef CONFIG_64BIT typedef struct { s64 counter; -- cgit v1.2.3 From 459e39538e612b8dd130d34b93c9bfc89ecc836c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jul 2020 22:33:16 +1000 Subject: locking/qspinlock: Do not include atomic.h from qspinlock_types.h This patch breaks a header loop involving qspinlock_types.h. The issue is that qspinlock_types.h includes atomic.h, which then eventually includes kernel.h which could lead back to the original file via spinlock_types.h. As ATOMIC_INIT is now defined by linux/types.h, there is no longer any need to include atomic.h from qspinlock_types.h. This also allows the CONFIG_PARAVIRT hack to be removed since it was trying to prevent exactly this loop. Signed-off-by: Herbert Xu Signed-off-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Link: https://lkml.kernel.org/r/20200729123316.GC7047@gondor.apana.org.au --- include/asm-generic/qspinlock.h | 1 + include/asm-generic/qspinlock_types.h | 8 -------- 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index fde943d180e0..2b26cd729b94 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -11,6 +11,7 @@ #define __ASM_GENERIC_QSPINLOCK_H #include +#include /** * queued_spin_is_locked - is the spinlock locked? diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index 56d1309d32f8..2fd1fb89ec36 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -9,15 +9,7 @@ #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H #define __ASM_GENERIC_QSPINLOCK_TYPES_H -/* - * Including atomic.h with PARAVIRT on will cause compilation errors because - * of recursive header file incluson via paravirt_types.h. So don't include - * it if PARAVIRT is on. - */ -#ifndef CONFIG_PARAVIRT #include -#include -#endif typedef struct qspinlock { union { -- cgit v1.2.3 From 0d24f65e933ca89d55d17f6dbdb2a72ca88f0992 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:07 +0200 Subject: Documentation: locking: Describe seqlock design and usage Proper documentation for the design and usage of sequence counters and sequential locks does not exist. Complete the seqlock.h documentation as follows: - Divide all documentation on a seqcount_t vs. seqlock_t basis. The description for both mechanisms was intermingled, which is incorrect since the usage constrains for each type are vastly different. - Add an introductory paragraph describing the internal design of, and rationale for, sequence counters. - Document seqcount_t writer non-preemptibility requirement, which was not previously documented anywhere, and provide a clear rationale. - Provide template code for seqcount_t and seqlock_t initialization and reader/writer critical sections. - Recommend using seqlock_t by default. It implicitly handles the serialization and non-preemptibility requirements of writers. At seqlock.h: - Remove references to brlocks as they've long been removed from the kernel. - Remove references to gcc-3.x since the kernel's minimum supported gcc version is 4.9. References: 0f6ed63b1707 ("no need to keep brlock macros anymore...") References: 6ec4476ac825 ("Raise gcc version requirement to 4.9") Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-2-a.darwish@linutronix.de --- Documentation/locking/index.rst | 1 + Documentation/locking/seqlock.rst | 170 ++++++++++++++++++++++++++++++++++++++ include/linux/seqlock.h | 85 +++++++++---------- 3 files changed, 211 insertions(+), 45 deletions(-) create mode 100644 Documentation/locking/seqlock.rst (limited to 'include') diff --git a/Documentation/locking/index.rst b/Documentation/locking/index.rst index d785878cad65..7003bd5aeff4 100644 --- a/Documentation/locking/index.rst +++ b/Documentation/locking/index.rst @@ -14,6 +14,7 @@ locking mutex-design rt-mutex-design rt-mutex + seqlock spinlocks ww-mutex-design preempt-locking diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst new file mode 100644 index 000000000000..366dd368d90a --- /dev/null +++ b/Documentation/locking/seqlock.rst @@ -0,0 +1,170 @@ +====================================== +Sequence counters and sequential locks +====================================== + +Introduction +============ + +Sequence counters are a reader-writer consistency mechanism with +lockless readers (read-only retry loops), and no writer starvation. They +are used for data that's rarely written to (e.g. system time), where the +reader wants a consistent set of information and is willing to retry if +that information changes. + +A data set is consistent when the sequence count at the beginning of the +read side critical section is even and the same sequence count value is +read again at the end of the critical section. The data in the set must +be copied out inside the read side critical section. If the sequence +count has changed between the start and the end of the critical section, +the reader must retry. + +Writers increment the sequence count at the start and the end of their +critical section. After starting the critical section the sequence count +is odd and indicates to the readers that an update is in progress. At +the end of the write side critical section the sequence count becomes +even again which lets readers make progress. + +A sequence counter write side critical section must never be preempted +or interrupted by read side sections. Otherwise the reader will spin for +the entire scheduler tick due to the odd sequence count value and the +interrupted writer. If that reader belongs to a real-time scheduling +class, it can spin forever and the kernel will livelock. + +This mechanism cannot be used if the protected data contains pointers, +as the writer can invalidate a pointer that the reader is following. + + +.. _seqcount_t: + +Sequence counters (``seqcount_t``) +================================== + +This is the the raw counting mechanism, which does not protect against +multiple writers. Write side critical sections must thus be serialized +by an external lock. + +If the write serialization primitive is not implicitly disabling +preemption, preemption must be explicitly disabled before entering the +write side section. If the read section can be invoked from hardirq or +softirq contexts, interrupts or bottom halves must also be respectively +disabled before entering the write section. + +If it's desired to automatically handle the sequence counter +requirements of writer serialization and non-preemptibility, use +:ref:`seqlock_t` instead. + +Initialization:: + + /* dynamic */ + seqcount_t foo_seqcount; + seqcount_init(&foo_seqcount); + + /* static */ + static seqcount_t foo_seqcount = SEQCNT_ZERO(foo_seqcount); + + /* C99 struct init */ + struct { + .seq = SEQCNT_ZERO(foo.seq), + } foo; + +Write path:: + + /* Serialized context with disabled preemption */ + + write_seqcount_begin(&foo_seqcount); + + /* ... [[write-side critical section]] ... */ + + write_seqcount_end(&foo_seqcount); + +Read path:: + + do { + seq = read_seqcount_begin(&foo_seqcount); + + /* ... [[read-side critical section]] ... */ + + } while (read_seqcount_retry(&foo_seqcount, seq)); + + +.. _seqlock_t: + +Sequential locks (``seqlock_t``) +================================ + +This contains the :ref:`seqcount_t` mechanism earlier discussed, plus an +embedded spinlock for writer serialization and non-preemptibility. + +If the read side section can be invoked from hardirq or softirq context, +use the write side function variants which disable interrupts or bottom +halves respectively. + +Initialization:: + + /* dynamic */ + seqlock_t foo_seqlock; + seqlock_init(&foo_seqlock); + + /* static */ + static DEFINE_SEQLOCK(foo_seqlock); + + /* C99 struct init */ + struct { + .seql = __SEQLOCK_UNLOCKED(foo.seql) + } foo; + +Write path:: + + write_seqlock(&foo_seqlock); + + /* ... [[write-side critical section]] ... */ + + write_sequnlock(&foo_seqlock); + +Read path, three categories: + +1. Normal Sequence readers which never block a writer but they must + retry if a writer is in progress by detecting change in the sequence + number. Writers do not wait for a sequence reader:: + + do { + seq = read_seqbegin(&foo_seqlock); + + /* ... [[read-side critical section]] ... */ + + } while (read_seqretry(&foo_seqlock, seq)); + +2. Locking readers which will wait if a writer or another locking reader + is in progress. A locking reader in progress will also block a writer + from entering its critical section. This read lock is + exclusive. Unlike rwlock_t, only one locking reader can acquire it:: + + read_seqlock_excl(&foo_seqlock); + + /* ... [[read-side critical section]] ... */ + + read_sequnlock_excl(&foo_seqlock); + +3. Conditional lockless reader (as in 1), or locking reader (as in 2), + according to a passed marker. This is used to avoid lockless readers + starvation (too much retry loops) in case of a sharp spike in write + activity. First, a lockless read is tried (even marker passed). If + that trial fails (odd sequence counter is returned, which is used as + the next iteration marker), the lockless read is transformed to a + full locking read and no retry loop is necessary:: + + /* marker; even initialization */ + int seq = 0; + do { + read_seqbegin_or_lock(&foo_seqlock, &seq); + + /* ... [[read-side critical section]] ... */ + + } while (need_seqretry(&foo_seqlock, seq)); + done_seqretry(&foo_seqlock, seq); + + +API documentation +================= + +.. kernel-doc:: include/linux/seqlock.h diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 8b97204f35a7..299d68f10325 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -1,36 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SEQLOCK_H #define __LINUX_SEQLOCK_H + /* - * Reader/writer consistent mechanism without starving writers. This type of - * lock for data where the reader wants a consistent set of information - * and is willing to retry if the information changes. There are two types - * of readers: - * 1. Sequence readers which never block a writer but they may have to retry - * if a writer is in progress by detecting change in sequence number. - * Writers do not wait for a sequence reader. - * 2. Locking readers which will wait if a writer or another locking reader - * is in progress. A locking reader in progress will also block a writer - * from going forward. Unlike the regular rwlock, the read lock here is - * exclusive so that only one locking reader can get it. - * - * This is not as cache friendly as brlock. Also, this may not work well - * for data that contains pointers, because any writer could - * invalidate a pointer that a reader was following. - * - * Expected non-blocking reader usage: - * do { - * seq = read_seqbegin(&foo); - * ... - * } while (read_seqretry(&foo, seq)); - * - * - * On non-SMP the spin locks disappear but the writer still needs - * to increment the sequence variables because an interrupt routine could - * change the state of the data. - * - * Based on x86_64 vsyscall gettimeofday - * by Keith Owens and Andrea Arcangeli + * seqcount_t / seqlock_t - a reader-writer consistency mechanism with + * lockless readers (read-only retry loops), and no writer starvation. + * + * See Documentation/locking/seqlock.rst + * + * Copyrights: + * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli */ #include @@ -41,8 +20,8 @@ #include /* - * The seqlock interface does not prescribe a precise sequence of read - * begin/retry/end. For readers, typically there is a call to + * The seqlock seqcount_t interface does not prescribe a precise sequence of + * read begin/retry/end. For readers, typically there is a call to * read_seqcount_begin() and read_seqcount_retry(), however, there are more * esoteric cases which do not follow this pattern. * @@ -50,16 +29,30 @@ * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as * atomics; if there is a matching read_seqcount_retry() call, no following - * memory operations are considered atomic. Usage of seqlocks via seqlock_t - * interface is not affected. + * memory operations are considered atomic. Usage of the seqlock_t interface + * is not affected. */ #define KCSAN_SEQLOCK_REGION_MAX 1000 /* - * Version using sequence counter only. - * This can be used when code has its own mutex protecting the - * updating starting before the write_seqcountbeqin() and ending - * after the write_seqcount_end(). + * Sequence counters (seqcount_t) + * + * This is the raw counting mechanism, without any writer protection. + * + * Write side critical sections must be serialized and non-preemptible. + * + * If readers can be invoked from hardirq or softirq contexts, + * interrupts or bottom halves must also be respectively disabled before + * entering the write section. + * + * This mechanism can't be used if the protected data contains pointers, + * as the writer can invalidate a pointer that a reader is following. + * + * If it's desired to automatically handle the sequence counter writer + * serialization and non-preemptibility requirements, use a sequential + * lock (seqlock_t) instead. + * + * See Documentation/locking/seqlock.rst */ typedef struct seqcount { unsigned sequence; @@ -398,10 +391,6 @@ static inline void raw_write_seqcount_latch(seqcount_t *s) smp_wmb(); /* increment "sequence" before following stores */ } -/* - * Sequence counter only version assumes that callers are using their - * own mutexing. - */ static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) { raw_write_seqcount_begin(s); @@ -434,15 +423,21 @@ static inline void write_seqcount_invalidate(seqcount_t *s) kcsan_nestable_atomic_end(); } +/* + * Sequential locks (seqlock_t) + * + * Sequence counters with an embedded spinlock for writer serialization + * and non-preemptibility. + * + * For more info, see: + * - Comments on top of seqcount_t + * - Documentation/locking/seqlock.rst + */ typedef struct { struct seqcount seqcount; spinlock_t lock; } seqlock_t; -/* - * These macros triggered gcc-3.x compile-time problems. We think these are - * OK now. Be cautious. - */ #define __SEQLOCK_UNLOCKED(lockname) \ { \ .seqcount = SEQCNT_ZERO(lockname), \ -- cgit v1.2.3 From 15cbe67bbd3adeb4854c42713dbeaf2ff876beee Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:08 +0200 Subject: seqlock: Properly format kernel-doc code samples Align the code samples and note sections inside kernel-doc comments with tabs. This way they can be properly parsed and rendered by Sphinx. It also makes the code samples easier to read from text editors. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-3-a.darwish@linutronix.de --- include/linux/seqlock.h | 108 +++++++++++++++++++++++++----------------------- 1 file changed, 56 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 299d68f10325..6c4f68ef1393 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -263,32 +263,32 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * atomically, avoiding compiler optimizations; b) to document which writes are * meant to propagate to the reader critical section. This is necessary because * neither writes before and after the barrier are enclosed in a seq-writer - * critical section that would ensure readers are aware of ongoing writes. + * critical section that would ensure readers are aware of ongoing writes:: * - * seqcount_t seq; - * bool X = true, Y = false; + * seqcount_t seq; + * bool X = true, Y = false; * - * void read(void) - * { - * bool x, y; + * void read(void) + * { + * bool x, y; * - * do { - * int s = read_seqcount_begin(&seq); + * do { + * int s = read_seqcount_begin(&seq); * - * x = X; y = Y; + * x = X; y = Y; * - * } while (read_seqcount_retry(&seq, s)); + * } while (read_seqcount_retry(&seq, s)); * - * BUG_ON(!x && !y); + * BUG_ON(!x && !y); * } * * void write(void) * { - * WRITE_ONCE(Y, true); + * WRITE_ONCE(Y, true); * - * raw_write_seqcount_barrier(seq); + * raw_write_seqcount_barrier(seq); * - * WRITE_ONCE(X, false); + * WRITE_ONCE(X, false); * } */ static inline void raw_write_seqcount_barrier(seqcount_t *s) @@ -325,64 +325,68 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) * Very simply put: we first modify one copy and then the other. This ensures * there is always one copy in a stable state, ready to give us an answer. * - * The basic form is a data structure like: + * The basic form is a data structure like:: * - * struct latch_struct { - * seqcount_t seq; - * struct data_struct data[2]; - * }; + * struct latch_struct { + * seqcount_t seq; + * struct data_struct data[2]; + * }; * * Where a modification, which is assumed to be externally serialized, does the - * following: + * following:: * - * void latch_modify(struct latch_struct *latch, ...) - * { - * smp_wmb(); <- Ensure that the last data[1] update is visible - * latch->seq++; - * smp_wmb(); <- Ensure that the seqcount update is visible + * void latch_modify(struct latch_struct *latch, ...) + * { + * smp_wmb(); // Ensure that the last data[1] update is visible + * latch->seq++; + * smp_wmb(); // Ensure that the seqcount update is visible * - * modify(latch->data[0], ...); + * modify(latch->data[0], ...); * - * smp_wmb(); <- Ensure that the data[0] update is visible - * latch->seq++; - * smp_wmb(); <- Ensure that the seqcount update is visible + * smp_wmb(); // Ensure that the data[0] update is visible + * latch->seq++; + * smp_wmb(); // Ensure that the seqcount update is visible * - * modify(latch->data[1], ...); - * } + * modify(latch->data[1], ...); + * } * - * The query will have a form like: + * The query will have a form like:: * - * struct entry *latch_query(struct latch_struct *latch, ...) - * { - * struct entry *entry; - * unsigned seq, idx; + * struct entry *latch_query(struct latch_struct *latch, ...) + * { + * struct entry *entry; + * unsigned seq, idx; * - * do { - * seq = raw_read_seqcount_latch(&latch->seq); + * do { + * seq = raw_read_seqcount_latch(&latch->seq); * - * idx = seq & 0x01; - * entry = data_query(latch->data[idx], ...); + * idx = seq & 0x01; + * entry = data_query(latch->data[idx], ...); * - * smp_rmb(); - * } while (seq != latch->seq); + * smp_rmb(); + * } while (seq != latch->seq); * - * return entry; - * } + * return entry; + * } * * So during the modification, queries are first redirected to data[1]. Then we * modify data[0]. When that is complete, we redirect queries back to data[0] * and we can modify data[1]. * - * NOTE: The non-requirement for atomic modifications does _NOT_ include - * the publishing of new entries in the case where data is a dynamic - * data structure. + * NOTE: + * + * The non-requirement for atomic modifications does _NOT_ include + * the publishing of new entries in the case where data is a dynamic + * data structure. + * + * An iteration might start in data[0] and get suspended long enough + * to miss an entire modification sequence, once it resumes it might + * observe the new entry. * - * An iteration might start in data[0] and get suspended long enough - * to miss an entire modification sequence, once it resumes it might - * observe the new entry. + * NOTE: * - * NOTE: When data is a dynamic data structure; one should use regular RCU - * patterns to manage the lifetimes of the objects within. + * When data is a dynamic data structure; one should use regular RCU + * patterns to manage the lifetimes of the objects within. */ static inline void raw_write_seqcount_latch(seqcount_t *s) { -- cgit v1.2.3 From d3b35b87f436c1b226a8061bee9c8875ba6658bd Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:09 +0200 Subject: seqlock: seqcount_t latch: End read sections with read_seqcount_retry() The seqcount_t latch reader example at the raw_write_seqcount_latch() kernel-doc comment ends the latch read section with a manual smp memory barrier and sequence counter comparison. This is technically correct, but it is suboptimal: read_seqcount_retry() already contains the same logic of an smp memory barrier and sequence counter comparison. End the latch read critical section example with read_seqcount_retry(). Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-4-a.darwish@linutronix.de --- include/linux/seqlock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 6c4f68ef1393..d724b5e5408d 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -363,8 +363,8 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) * idx = seq & 0x01; * entry = data_query(latch->data[idx], ...); * - * smp_rmb(); - * } while (seq != latch->seq); + * // read_seqcount_retry() includes needed smp_rmb() + * } while (read_seqcount_retry(&latch->seq, seq)); * * return entry; * } -- cgit v1.2.3 From f4a27cbcec90ac04ee60e04b222e1449dcdba0bd Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:10 +0200 Subject: seqlock: Reorder seqcount_t and seqlock_t API definitions The seqlock.h seqcount_t and seqlock_t API definitions are presented in the chronological order of their development rather than the order that makes most sense to readers. This makes it hard to follow and understand the header file code. Group and reorder all of the exported seqlock.h functions according to their function. First, group together the seqcount_t standard read path functions: - __read_seqcount_begin() - raw_read_seqcount_begin() - read_seqcount_begin() since each function is implemented exactly in terms of the one above it. Then, group the special-case seqcount_t readers on their own as: - raw_read_seqcount() - raw_seqcount_begin() since the only difference between the two functions is that the second one masks the sequence counter LSB while the first one does not. Note that raw_seqcount_begin() can actually be implemented in terms of raw_read_seqcount(), which will be done in a follow-up commit. Then, group the seqcount_t write path functions, instead of injecting unrelated seqcount_t latch functions between them, and order them as: - raw_write_seqcount_begin() - raw_write_seqcount_end() - write_seqcount_begin_nested() - write_seqcount_begin() - write_seqcount_end() - raw_write_seqcount_barrier() - write_seqcount_invalidate() which is the expected natural order. This also isolates the seqcount_t latch functions into their own area, at the end of the sequence counters section, and before jumping to the next one: sequential locks (seqlock_t). Do a similar grouping and reordering for seqlock_t "locking" readers vs. the "conditionally locking or lockless" ones. No implementation code was changed in any of the reordering above. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-5-a.darwish@linutronix.de --- include/linux/seqlock.h | 158 ++++++++++++++++++++++++------------------------ 1 file changed, 78 insertions(+), 80 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index d724b5e5408d..4c1456008d89 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -128,23 +128,6 @@ repeat: return ret; } -/** - * raw_read_seqcount - Read the raw seqcount - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry - * - * raw_read_seqcount opens a read critical section of the given - * seqcount without any lockdep checking and without checking or - * masking the LSB. Calling code is responsible for handling that. - */ -static inline unsigned raw_read_seqcount(const seqcount_t *s) -{ - unsigned ret = READ_ONCE(s->sequence); - smp_rmb(); - kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); - return ret; -} - /** * raw_read_seqcount_begin - start seq-read critical section w/o lockdep * @s: pointer to seqcount_t @@ -176,6 +159,23 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) return raw_read_seqcount_begin(s); } +/** + * raw_read_seqcount - Read the raw seqcount + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * raw_read_seqcount opens a read critical section of the given + * seqcount without any lockdep checking and without checking or + * masking the LSB. Calling code is responsible for handling that. + */ +static inline unsigned raw_read_seqcount(const seqcount_t *s) +{ + unsigned ret = READ_ONCE(s->sequence); + smp_rmb(); + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); + return ret; +} + /** * raw_seqcount_begin - begin a seq-read critical section * @s: pointer to seqcount_t @@ -234,8 +234,6 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) return __read_seqcount_retry(s, start); } - - static inline void raw_write_seqcount_begin(seqcount_t *s) { kcsan_nestable_atomic_begin(); @@ -250,6 +248,23 @@ static inline void raw_write_seqcount_end(seqcount_t *s) kcsan_nestable_atomic_end(); } +static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) +{ + raw_write_seqcount_begin(s); + seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); +} + +static inline void write_seqcount_begin(seqcount_t *s) +{ + write_seqcount_begin_nested(s, 0); +} + +static inline void write_seqcount_end(seqcount_t *s) +{ + seqcount_release(&s->dep_map, _RET_IP_); + raw_write_seqcount_end(s); +} + /** * raw_write_seqcount_barrier - do a seq write barrier * @s: pointer to seqcount_t @@ -300,6 +315,21 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) kcsan_nestable_atomic_end(); } +/** + * write_seqcount_invalidate - invalidate in-progress read-side seq operations + * @s: pointer to seqcount_t + * + * After write_seqcount_invalidate, no read-side seq operations will complete + * successfully and see data older than this. + */ +static inline void write_seqcount_invalidate(seqcount_t *s) +{ + smp_wmb(); + kcsan_nestable_atomic_begin(); + s->sequence+=2; + kcsan_nestable_atomic_end(); +} + static inline int raw_read_seqcount_latch(seqcount_t *s) { /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ @@ -395,38 +425,6 @@ static inline void raw_write_seqcount_latch(seqcount_t *s) smp_wmb(); /* increment "sequence" before following stores */ } -static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) -{ - raw_write_seqcount_begin(s); - seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); -} - -static inline void write_seqcount_begin(seqcount_t *s) -{ - write_seqcount_begin_nested(s, 0); -} - -static inline void write_seqcount_end(seqcount_t *s) -{ - seqcount_release(&s->dep_map, _RET_IP_); - raw_write_seqcount_end(s); -} - -/** - * write_seqcount_invalidate - invalidate in-progress read-side seq operations - * @s: pointer to seqcount_t - * - * After write_seqcount_invalidate, no read-side seq operations will complete - * successfully and see data older than this. - */ -static inline void write_seqcount_invalidate(seqcount_t *s) -{ - smp_wmb(); - kcsan_nestable_atomic_begin(); - s->sequence+=2; - kcsan_nestable_atomic_end(); -} - /* * Sequential locks (seqlock_t) * @@ -555,35 +553,6 @@ static inline void read_sequnlock_excl(seqlock_t *sl) spin_unlock(&sl->lock); } -/** - * read_seqbegin_or_lock - begin a sequence number check or locking block - * @lock: sequence lock - * @seq : sequence number to be checked - * - * First try it once optimistically without taking the lock. If that fails, - * take the lock. The sequence number is also used as a marker for deciding - * whether to be a reader (even) or writer (odd). - * N.B. seq must be initialized to an even number to begin with. - */ -static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) -{ - if (!(*seq & 1)) /* Even */ - *seq = read_seqbegin(lock); - else /* Odd */ - read_seqlock_excl(lock); -} - -static inline int need_seqretry(seqlock_t *lock, int seq) -{ - return !(seq & 1) && read_seqretry(lock, seq); -} - -static inline void done_seqretry(seqlock_t *lock, int seq) -{ - if (seq & 1) - read_sequnlock_excl(lock); -} - static inline void read_seqlock_excl_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); @@ -621,6 +590,35 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) spin_unlock_irqrestore(&sl->lock, flags); } +/** + * read_seqbegin_or_lock - begin a sequence number check or locking block + * @lock: sequence lock + * @seq : sequence number to be checked + * + * First try it once optimistically without taking the lock. If that fails, + * take the lock. The sequence number is also used as a marker for deciding + * whether to be a reader (even) or writer (odd). + * N.B. seq must be initialized to an even number to begin with. + */ +static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) +{ + if (!(*seq & 1)) /* Even */ + *seq = read_seqbegin(lock); + else /* Odd */ + read_seqlock_excl(lock); +} + +static inline int need_seqretry(seqlock_t *lock, int seq) +{ + return !(seq & 1) && read_seqretry(lock, seq); +} + +static inline void done_seqretry(seqlock_t *lock, int seq) +{ + if (seq & 1) + read_sequnlock_excl(lock); +} + static inline unsigned long read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) { -- cgit v1.2.3 From 89b88845e05752b3d684eaf147f457c8dfa99c5f Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:11 +0200 Subject: seqlock: Add kernel-doc for seqcount_t and seqlock_t APIs seqlock.h is now included by kernel's RST documentation, but a small number of the the exported seqlock.h functions are kernel-doc annotated. Add kernel-doc for all seqlock.h exported APIs. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-6-a.darwish@linutronix.de --- include/linux/seqlock.h | 425 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 348 insertions(+), 77 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 4c1456008d89..85fb3ac93ffb 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -75,6 +75,10 @@ static inline void __seqcount_init(seqcount_t *s, const char *name, # define SEQCOUNT_DEP_MAP_INIT(lockname) \ .dep_map = { .name = #lockname } \ +/** + * seqcount_init() - runtime initializer for seqcount_t + * @s: Pointer to the seqcount_t instance + */ # define seqcount_init(s) \ do { \ static struct lock_class_key __key; \ @@ -98,13 +102,15 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) # define seqcount_lockdep_reader_access(x) #endif -#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)} - +/** + * SEQCNT_ZERO() - static initializer for seqcount_t + * @name: Name of the seqcount_t instance + */ +#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } /** - * __read_seqcount_begin - begin a seq-read critical section (without barrier) - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier + * @s: Pointer to seqcount_t * * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -113,6 +119,8 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * * Use carefully, only in critical code, and comment how the barrier is * provided. + * + * Return: count to be passed to read_seqcount_retry() */ static inline unsigned __read_seqcount_begin(const seqcount_t *s) { @@ -129,13 +137,10 @@ repeat: } /** - * raw_read_seqcount_begin - start seq-read critical section w/o lockdep - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep + * @s: Pointer to seqcount_t * - * raw_read_seqcount_begin opens a read critical section of the given - * seqcount, but without any lockdep checking. Validity of the critical - * section is tested by checking read_seqcount_retry function. + * Return: count to be passed to read_seqcount_retry() */ static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) { @@ -145,13 +150,10 @@ static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) } /** - * read_seqcount_begin - begin a seq-read critical section - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * read_seqcount_begin() - begin a seqcount_t read critical section + * @s: Pointer to seqcount_t * - * read_seqcount_begin opens a read critical section of the given seqcount. - * Validity of the critical section is tested by checking read_seqcount_retry - * function. + * Return: count to be passed to read_seqcount_retry() */ static inline unsigned read_seqcount_begin(const seqcount_t *s) { @@ -160,13 +162,15 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) } /** - * raw_read_seqcount - Read the raw seqcount - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * raw_read_seqcount() - read the raw seqcount_t counter value + * @s: Pointer to seqcount_t * * raw_read_seqcount opens a read critical section of the given - * seqcount without any lockdep checking and without checking or - * masking the LSB. Calling code is responsible for handling that. + * seqcount_t, without any lockdep checking, and without checking or + * masking the sequence counter LSB. Calling code is responsible for + * handling that. + * + * Return: count to be passed to read_seqcount_retry() */ static inline unsigned raw_read_seqcount(const seqcount_t *s) { @@ -177,18 +181,21 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) } /** - * raw_seqcount_begin - begin a seq-read critical section - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * raw_seqcount_begin() - begin a seqcount_t read critical section w/o + * lockdep and w/o counter stabilization + * @s: Pointer to seqcount_t * - * raw_seqcount_begin opens a read critical section of the given seqcount. - * Validity of the critical section is tested by checking read_seqcount_retry - * function. + * raw_seqcount_begin opens a read critical section of the given + * seqcount_t. Unlike read_seqcount_begin(), this function will not wait + * for the count to stabilize. If a writer is active when it begins, it + * will fail the read_seqcount_retry() at the end of the read critical + * section instead of stabilizing at the beginning of it. * - * Unlike read_seqcount_begin(), this function will not wait for the count - * to stabilize. If a writer is active when we begin, we will fail the - * read_seqcount_retry() instead of stabilizing at the beginning of the - * critical section. + * Use this only in special kernel hot paths where the read section is + * small and has a high probability of success through other external + * means. It will save a single branching instruction. + * + * Return: count to be passed to read_seqcount_retry() */ static inline unsigned raw_seqcount_begin(const seqcount_t *s) { @@ -199,10 +206,9 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) } /** - * __read_seqcount_retry - end a seq-read critical section (without barrier) - * @s: pointer to seqcount_t - * @start: count, from read_seqcount_begin - * Returns: 1 if retry is required, else 0 + * __read_seqcount_retry() - end a seqcount_t read section w/o barrier + * @s: Pointer to seqcount_t + * @start: count, from read_seqcount_begin() * * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -211,6 +217,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) * * Use carefully, only in critical code, and comment how the barrier is * provided. + * + * Return: true if a read section retry is required, else false */ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) { @@ -219,14 +227,15 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) } /** - * read_seqcount_retry - end a seq-read critical section - * @s: pointer to seqcount_t - * @start: count, from read_seqcount_begin - * Returns: 1 if retry is required, else 0 + * read_seqcount_retry() - end a seqcount_t read critical section + * @s: Pointer to seqcount_t + * @start: count, from read_seqcount_begin() * - * read_seqcount_retry closes a read critical section of the given seqcount. - * If the critical section was invalid, it must be ignored (and typically - * retried). + * read_seqcount_retry closes the read critical section of given + * seqcount_t. If the critical section was invalid, it must be ignored + * (and typically retried). + * + * Return: true if a read section retry is required, else false */ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) { @@ -234,6 +243,10 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) return __read_seqcount_retry(s, start); } +/** + * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep + * @s: Pointer to seqcount_t + */ static inline void raw_write_seqcount_begin(seqcount_t *s) { kcsan_nestable_atomic_begin(); @@ -241,6 +254,10 @@ static inline void raw_write_seqcount_begin(seqcount_t *s) smp_wmb(); } +/** + * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep + * @s: Pointer to seqcount_t + */ static inline void raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); @@ -248,17 +265,42 @@ static inline void raw_write_seqcount_end(seqcount_t *s) kcsan_nestable_atomic_end(); } +/** + * write_seqcount_begin_nested() - start a seqcount_t write section with + * custom lockdep nesting level + * @s: Pointer to seqcount_t + * @subclass: lockdep nesting level + * + * See Documentation/locking/lockdep-design.rst + */ static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) { raw_write_seqcount_begin(s); seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); } +/** + * write_seqcount_begin() - start a seqcount_t write side critical section + * @s: Pointer to seqcount_t + * + * write_seqcount_begin opens a write side critical section of the given + * seqcount_t. + * + * Context: seqcount_t write side critical sections must be serialized and + * non-preemptible. If readers can be invoked from hardirq or softirq + * context, interrupts or bottom halves must be respectively disabled. + */ static inline void write_seqcount_begin(seqcount_t *s) { write_seqcount_begin_nested(s, 0); } +/** + * write_seqcount_end() - end a seqcount_t write side critical section + * @s: Pointer to seqcount_t + * + * The write section must've been opened with write_seqcount_begin(). + */ static inline void write_seqcount_end(seqcount_t *s) { seqcount_release(&s->dep_map, _RET_IP_); @@ -266,12 +308,12 @@ static inline void write_seqcount_end(seqcount_t *s) } /** - * raw_write_seqcount_barrier - do a seq write barrier - * @s: pointer to seqcount_t + * raw_write_seqcount_barrier() - do a seqcount_t write barrier + * @s: Pointer to seqcount_t * - * This can be used to provide an ordering guarantee instead of the - * usual consistency guarantee. It is one wmb cheaper, because we can - * collapse the two back-to-back wmb()s. + * This can be used to provide an ordering guarantee instead of the usual + * consistency guarantee. It is one wmb cheaper, because it can collapse + * the two back-to-back wmb()s. * * Note that writes surrounding the barrier should be declared atomic (e.g. * via WRITE_ONCE): a) to ensure the writes become visible to other threads @@ -316,11 +358,12 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) } /** - * write_seqcount_invalidate - invalidate in-progress read-side seq operations - * @s: pointer to seqcount_t + * write_seqcount_invalidate() - invalidate in-progress seqcount_t read + * side operations + * @s: Pointer to seqcount_t * - * After write_seqcount_invalidate, no read-side seq operations will complete - * successfully and see data older than this. + * After write_seqcount_invalidate, no seqcount_t read side operations + * will complete successfully and see data older than this. */ static inline void write_seqcount_invalidate(seqcount_t *s) { @@ -330,6 +373,21 @@ static inline void write_seqcount_invalidate(seqcount_t *s) kcsan_nestable_atomic_end(); } +/** + * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy + * @s: Pointer to seqcount_t + * + * Use seqcount_t latching to switch between two storage places protected + * by a sequence counter. Doing so allows having interruptible, preemptible, + * seqcount_t write side critical sections. + * + * Check raw_write_seqcount_latch() for more details and a full reader and + * writer usage example. + * + * Return: sequence counter raw value. Use the lowest bit as an index for + * picking which data copy to read. The full counter value must then be + * checked with read_seqcount_retry(). + */ static inline int raw_read_seqcount_latch(seqcount_t *s) { /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ @@ -338,8 +396,8 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) } /** - * raw_write_seqcount_latch - redirect readers to even/odd copy - * @s: pointer to seqcount_t + * raw_write_seqcount_latch() - redirect readers to even/odd copy + * @s: Pointer to seqcount_t * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -446,17 +504,28 @@ typedef struct { .lock = __SPIN_LOCK_UNLOCKED(lockname) \ } -#define seqlock_init(x) \ +/** + * seqlock_init() - dynamic initializer for seqlock_t + * @sl: Pointer to the seqlock_t instance + */ +#define seqlock_init(sl) \ do { \ - seqcount_init(&(x)->seqcount); \ - spin_lock_init(&(x)->lock); \ + seqcount_init(&(sl)->seqcount); \ + spin_lock_init(&(sl)->lock); \ } while (0) -#define DEFINE_SEQLOCK(x) \ - seqlock_t x = __SEQLOCK_UNLOCKED(x) +/** + * DEFINE_SEQLOCK() - Define a statically allocated seqlock_t + * @sl: Name of the seqlock_t instance + */ +#define DEFINE_SEQLOCK(sl) \ + seqlock_t sl = __SEQLOCK_UNLOCKED(sl) -/* - * Read side functions for starting and finalizing a read side section. +/** + * read_seqbegin() - start a seqlock_t read side critical section + * @sl: Pointer to seqlock_t + * + * Return: count, to be passed to read_seqretry() */ static inline unsigned read_seqbegin(const seqlock_t *sl) { @@ -467,6 +536,17 @@ static inline unsigned read_seqbegin(const seqlock_t *sl) return ret; } +/** + * read_seqretry() - end a seqlock_t read side section + * @sl: Pointer to seqlock_t + * @start: count, from read_seqbegin() + * + * read_seqretry closes the read side critical section of given seqlock_t. + * If the critical section was invalid, it must be ignored (and typically + * retried). + * + * Return: true if a read section retry is required, else false + */ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { /* @@ -478,10 +558,18 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) return read_seqcount_retry(&sl->seqcount, start); } -/* - * Lock out other writers and update the count. - * Acts like a normal spin_lock/unlock. - * Don't need preempt_disable() because that is in the spin_lock already. +/** + * write_seqlock() - start a seqlock_t write side critical section + * @sl: Pointer to seqlock_t + * + * write_seqlock opens a write side critical section for the given + * seqlock_t. It also implicitly acquires the spinlock_t embedded inside + * that sequential lock. All seqlock_t write side sections are thus + * automatically serialized and non-preemptible. + * + * Context: if the seqlock_t read section, or other write side critical + * sections, can be invoked from hardirq or softirq contexts, use the + * _irqsave or _bh variants of this function instead. */ static inline void write_seqlock(seqlock_t *sl) { @@ -489,30 +577,66 @@ static inline void write_seqlock(seqlock_t *sl) write_seqcount_begin(&sl->seqcount); } +/** + * write_sequnlock() - end a seqlock_t write side critical section + * @sl: Pointer to seqlock_t + * + * write_sequnlock closes the (serialized and non-preemptible) write side + * critical section of given seqlock_t. + */ static inline void write_sequnlock(seqlock_t *sl) { write_seqcount_end(&sl->seqcount); spin_unlock(&sl->lock); } +/** + * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section + * @sl: Pointer to seqlock_t + * + * _bh variant of write_seqlock(). Use only if the read side section, or + * other write side sections, can be invoked from softirq contexts. + */ static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); write_seqcount_begin(&sl->seqcount); } +/** + * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section + * @sl: Pointer to seqlock_t + * + * write_sequnlock_bh closes the serialized, non-preemptible, and + * softirqs-disabled, seqlock_t write side critical section opened with + * write_seqlock_bh(). + */ static inline void write_sequnlock_bh(seqlock_t *sl) { write_seqcount_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } +/** + * write_seqlock_irq() - start a non-interruptible seqlock_t write section + * @sl: Pointer to seqlock_t + * + * _irq variant of write_seqlock(). Use only if the read side section, or + * other write sections, can be invoked from hardirq contexts. + */ static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); write_seqcount_begin(&sl->seqcount); } +/** + * write_sequnlock_irq() - end a non-interruptible seqlock_t write section + * @sl: Pointer to seqlock_t + * + * write_sequnlock_irq closes the serialized and non-interruptible + * seqlock_t write side section opened with write_seqlock_irq(). + */ static inline void write_sequnlock_irq(seqlock_t *sl) { write_seqcount_end(&sl->seqcount); @@ -528,9 +652,28 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) return flags; } +/** + * write_seqlock_irqsave() - start a non-interruptible seqlock_t write + * section + * @lock: Pointer to seqlock_t + * @flags: Stack-allocated storage for saving caller's local interrupt + * state, to be passed to write_sequnlock_irqrestore(). + * + * _irqsave variant of write_seqlock(). Use it only if the read side + * section, or other write sections, can be invoked from hardirq context. + */ #define write_seqlock_irqsave(lock, flags) \ do { flags = __write_seqlock_irqsave(lock); } while (0) +/** + * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write + * section + * @sl: Pointer to seqlock_t + * @flags: Caller's saved interrupt state, from write_seqlock_irqsave() + * + * write_sequnlock_irqrestore closes the serialized and non-interruptible + * seqlock_t write section previously opened with write_seqlock_irqsave(). + */ static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { @@ -538,36 +681,79 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) spin_unlock_irqrestore(&sl->lock, flags); } -/* - * A locking reader exclusively locks out other writers and locking readers, - * but doesn't update the sequence number. Acts like a normal spin_lock/unlock. - * Don't need preempt_disable() because that is in the spin_lock already. +/** + * read_seqlock_excl() - begin a seqlock_t locking reader section + * @sl: Pointer to seqlock_t + * + * read_seqlock_excl opens a seqlock_t locking reader critical section. A + * locking reader exclusively locks out *both* other writers *and* other + * locking readers, but it does not update the embedded sequence number. + * + * Locking readers act like a normal spin_lock()/spin_unlock(). + * + * Context: if the seqlock_t write section, *or other read sections*, can + * be invoked from hardirq or softirq contexts, use the _irqsave or _bh + * variant of this function instead. + * + * The opened read section must be closed with read_sequnlock_excl(). */ static inline void read_seqlock_excl(seqlock_t *sl) { spin_lock(&sl->lock); } +/** + * read_sequnlock_excl() - end a seqlock_t locking reader critical section + * @sl: Pointer to seqlock_t + */ static inline void read_sequnlock_excl(seqlock_t *sl) { spin_unlock(&sl->lock); } +/** + * read_seqlock_excl_bh() - start a seqlock_t locking reader section with + * softirqs disabled + * @sl: Pointer to seqlock_t + * + * _bh variant of read_seqlock_excl(). Use this variant only if the + * seqlock_t write side section, *or other read sections*, can be invoked + * from softirq contexts. + */ static inline void read_seqlock_excl_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); } +/** + * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking + * reader section + * @sl: Pointer to seqlock_t + */ static inline void read_sequnlock_excl_bh(seqlock_t *sl) { spin_unlock_bh(&sl->lock); } +/** + * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking + * reader section + * @sl: Pointer to seqlock_t + * + * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t + * write side section, *or other read sections*, can be invoked from a + * hardirq context. + */ static inline void read_seqlock_excl_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); } +/** + * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t + * locking reader section + * @sl: Pointer to seqlock_t + */ static inline void read_sequnlock_excl_irq(seqlock_t *sl) { spin_unlock_irq(&sl->lock); @@ -581,9 +767,26 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) return flags; } +/** + * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t + * locking reader section + * @lock: Pointer to seqlock_t + * @flags: Stack-allocated storage for saving caller's local interrupt + * state, to be passed to read_sequnlock_excl_irqrestore(). + * + * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t + * write side section, *or other read sections*, can be invoked from a + * hardirq context. + */ #define read_seqlock_excl_irqsave(lock, flags) \ do { flags = __read_seqlock_excl_irqsave(lock); } while (0) +/** + * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t + * locking reader section + * @sl: Pointer to seqlock_t + * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave() + */ static inline void read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) { @@ -591,14 +794,35 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) } /** - * read_seqbegin_or_lock - begin a sequence number check or locking block - * @lock: sequence lock - * @seq : sequence number to be checked - * - * First try it once optimistically without taking the lock. If that fails, - * take the lock. The sequence number is also used as a marker for deciding - * whether to be a reader (even) or writer (odd). - * N.B. seq must be initialized to an even number to begin with. + * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader + * @lock: Pointer to seqlock_t + * @seq : Marker and return parameter. If the passed value is even, the + * reader will become a *lockless* seqlock_t reader as in read_seqbegin(). + * If the passed value is odd, the reader will become a *locking* reader + * as in read_seqlock_excl(). In the first call to this function, the + * caller *must* initialize and pass an even value to @seq; this way, a + * lockless read can be optimistically tried first. + * + * read_seqbegin_or_lock is an API designed to optimistically try a normal + * lockless seqlock_t read section first. If an odd counter is found, the + * lockless read trial has failed, and the next read iteration transforms + * itself into a full seqlock_t locking reader. + * + * This is typically used to avoid seqlock_t lockless readers starvation + * (too much retry loops) in the case of a sharp spike in write side + * activity. + * + * Context: if the seqlock_t write section, *or other read sections*, can + * be invoked from hardirq or softirq contexts, use the _irqsave or _bh + * variant of this function instead. + * + * Check Documentation/locking/seqlock.rst for template example code. + * + * Return: the encountered sequence counter value, through the @seq + * parameter, which is overloaded as a return parameter. This returned + * value must be checked with need_seqretry(). If the read section need to + * be retried, this returned value must also be passed as the @seq + * parameter of the next read_seqbegin_or_lock() iteration. */ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) { @@ -608,17 +832,52 @@ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) read_seqlock_excl(lock); } +/** + * need_seqretry() - validate seqlock_t "locking or lockless" read section + * @lock: Pointer to seqlock_t + * @seq: sequence count, from read_seqbegin_or_lock() + * + * Return: true if a read section retry is required, false otherwise + */ static inline int need_seqretry(seqlock_t *lock, int seq) { return !(seq & 1) && read_seqretry(lock, seq); } +/** + * done_seqretry() - end seqlock_t "locking or lockless" reader section + * @lock: Pointer to seqlock_t + * @seq: count, from read_seqbegin_or_lock() + * + * done_seqretry finishes the seqlock_t read side critical section started + * with read_seqbegin_or_lock() and validated by need_seqretry(). + */ static inline void done_seqretry(seqlock_t *lock, int seq) { if (seq & 1) read_sequnlock_excl(lock); } +/** + * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or + * a non-interruptible locking reader + * @lock: Pointer to seqlock_t + * @seq: Marker and return parameter. Check read_seqbegin_or_lock(). + * + * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if + * the seqlock_t write section, *or other read sections*, can be invoked + * from hardirq context. + * + * Note: Interrupts will be disabled only for "locking reader" mode. + * + * Return: + * + * 1. The saved local interrupts state in case of a locking reader, to + * be passed to done_seqretry_irqrestore(). + * + * 2. The encountered sequence counter value, returned through @seq + * overloaded as a return parameter. Check read_seqbegin_or_lock(). + */ static inline unsigned long read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) { @@ -632,6 +891,18 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) return flags; } +/** + * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a + * non-interruptible locking reader section + * @lock: Pointer to seqlock_t + * @seq: Count, from read_seqbegin_or_lock_irqsave() + * @flags: Caller's saved local interrupt state in case of a locking + * reader, also from read_seqbegin_or_lock_irqsave() + * + * This is the _irqrestore variant of done_seqretry(). The read section + * must've been opened with read_seqbegin_or_lock_irqsave(), and validated + * by need_seqretry(). + */ static inline void done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) { -- cgit v1.2.3 From 932e46365226324d2cf26d8bdec8b51ceb296948 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:12 +0200 Subject: seqlock: Implement raw_seqcount_begin() in terms of raw_read_seqcount() raw_seqcount_begin() has the same code as raw_read_seqcount(), with the exception of masking the sequence counter's LSB before returning it to the caller. Note, raw_seqcount_begin() masks the counter's LSB before returning it to the caller so that read_seqcount_retry() can fail if the counter is odd -- without the overhead of an extra branching instruction. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-7-a.darwish@linutronix.de --- include/linux/seqlock.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 85fb3ac93ffb..e885702d8b82 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -199,10 +199,11 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) */ static inline unsigned raw_seqcount_begin(const seqcount_t *s) { - unsigned ret = READ_ONCE(s->sequence); - smp_rmb(); - kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); - return ret & ~1; + /* + * If the counter is odd, let read_seqcount_retry() fail + * by decrementing the counter. + */ + return raw_read_seqcount(s) & ~1; } /** -- cgit v1.2.3 From 8fd8ad5c5dfcb09cf62abadd4043eaf1afbbd0ce Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:13 +0200 Subject: lockdep: Add preemption enabled/disabled assertion APIs Asserting that preemption is enabled or disabled is a critical sanity check. Developers are usually reluctant to add such a check in a fastpath as reading the preemption count can be costly. Extend the lockdep API with macros asserting that preemption is disabled or enabled. If lockdep is disabled, or if the underlying architecture does not support kernel preemption, this assert has no runtime overhead. References: f54bb2ec02c8 ("locking/lockdep: Add IRQs disabled/enabled assertion APIs: ...") Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-8-a.darwish@linutronix.de --- include/linux/lockdep.h | 19 +++++++++++++++++++ lib/Kconfig.debug | 1 + 2 files changed, 20 insertions(+) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 7aafba0ddcf9..39a35699d0d6 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -549,6 +549,22 @@ do { \ WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ } while (0) +#define lockdep_assert_preemption_enabled() \ +do { \ + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ + debug_locks && \ + (preempt_count() != 0 || \ + !this_cpu_read(hardirqs_enabled))); \ +} while (0) + +#define lockdep_assert_preemption_disabled() \ +do { \ + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ + debug_locks && \ + (preempt_count() == 0 && \ + this_cpu_read(hardirqs_enabled))); \ +} while (0) + #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) @@ -557,6 +573,9 @@ do { \ # define lockdep_assert_irqs_enabled() do { } while (0) # define lockdep_assert_irqs_disabled() do { } while (0) # define lockdep_assert_in_irq() do { } while (0) + +# define lockdep_assert_preemption_enabled() do { } while (0) +# define lockdep_assert_preemption_disabled() do { } while (0) #endif #ifdef CONFIG_PROVE_RAW_LOCK_NESTING diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 9ad9210d70a1..5379931ba3b5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1117,6 +1117,7 @@ config PROVE_LOCKING select DEBUG_RWSEMS select DEBUG_WW_MUTEX_SLOWPATH select DEBUG_LOCK_ALLOC + select PREEMPT_COUNT if !ARCH_NO_PREEMPT select TRACE_IRQFLAGS default n help -- cgit v1.2.3 From 859247d39fb008ea812e8f0c398a58a20c12899e Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:14 +0200 Subject: seqlock: lockdep assert non-preemptibility on seqcount_t write Preemption must be disabled before entering a sequence count write side critical section. Failing to do so, the seqcount read side can preempt the write side section and spin for the entire scheduler tick. If that reader belongs to a real-time scheduling class, it can spin forever and the kernel will livelock. Assert through lockdep that preemption is disabled for seqcount writers. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-9-a.darwish@linutronix.de --- include/linux/seqlock.h | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e885702d8b82..54bc20496392 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -266,6 +266,12 @@ static inline void raw_write_seqcount_end(seqcount_t *s) kcsan_nestable_atomic_end(); } +static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass) +{ + raw_write_seqcount_begin(s); + seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); +} + /** * write_seqcount_begin_nested() - start a seqcount_t write section with * custom lockdep nesting level @@ -276,8 +282,19 @@ static inline void raw_write_seqcount_end(seqcount_t *s) */ static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) { - raw_write_seqcount_begin(s); - seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); + lockdep_assert_preemption_disabled(); + __write_seqcount_begin_nested(s, subclass); +} + +/* + * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks. + * + * Use for internal seqlock.h code where it's known that preemption is + * already disabled. For example, seqlock_t write side functions. + */ +static inline void __write_seqcount_begin(seqcount_t *s) +{ + __write_seqcount_begin_nested(s, 0); } /** @@ -575,7 +592,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __write_seqcount_begin(&sl->seqcount); } /** @@ -601,7 +618,7 @@ static inline void write_sequnlock(seqlock_t *sl) static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __write_seqcount_begin(&sl->seqcount); } /** @@ -628,7 +645,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl) static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __write_seqcount_begin(&sl->seqcount); } /** @@ -649,7 +666,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - write_seqcount_begin(&sl->seqcount); + __write_seqcount_begin(&sl->seqcount); return flags; } -- cgit v1.2.3 From 55f3560df975f557c48aa6afc636808f31ecb87a Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:15 +0200 Subject: seqlock: Extend seqcount API with associated locks A sequence counter write side critical section must be protected by some form of locking to serialize writers. If the serialization primitive is not disabling preemption implicitly, preemption has to be explicitly disabled before entering the write side critical section. There is no built-in debugging mechanism to verify that the lock used for writer serialization is held and preemption is disabled. Some usage sites like dma-buf have explicit lockdep checks for the writer-side lock, but this covers only a small portion of the sequence counter usage in the kernel. Add new sequence counter types which allows to associate a lock to the sequence counter at initialization time. The seqcount API functions are extended to provide appropriate lockdep assertions depending on the seqcount/lock type. For sequence counters with associated locks that do not implicitly disable preemption, preemption protection is enforced in the sequence counter write side functions. This removes the need to explicitly add preempt_disable/enable() around the write side critical sections: the write_begin/end() functions for these new sequence counter types automatically do this. Introduce the following seqcount types with associated locks: seqcount_spinlock_t seqcount_raw_spinlock_t seqcount_rwlock_t seqcount_mutex_t seqcount_ww_mutex_t Extend the seqcount read and write functions to branch out to the specific seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel API explosion per each new seqcount_LOCKTYPE_t added. Add such compile-time type detection logic into a new, internal, seqlock header. Document the proper seqcount_LOCKTYPE_t usage, and rationale, at Documentation/locking/seqlock.rst. If lockdep is disabled, this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-10-a.darwish@linutronix.de --- Documentation/locking/seqlock.rst | 52 +++++ include/linux/seqlock.h | 464 ++++++++++++++++++++++++++++++++------ 2 files changed, 447 insertions(+), 69 deletions(-) (limited to 'include') diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst index 366dd368d90a..62c5ad98c11c 100644 --- a/Documentation/locking/seqlock.rst +++ b/Documentation/locking/seqlock.rst @@ -87,6 +87,58 @@ Read path:: } while (read_seqcount_retry(&foo_seqcount, seq)); +.. _seqcount_locktype_t: + +Sequence counters with associated locks (``seqcount_LOCKTYPE_t``) +----------------------------------------------------------------- + +As discussed at :ref:`seqcount_t`, sequence count write side critical +sections must be serialized and non-preemptible. This variant of +sequence counters associate the lock used for writer serialization at +initialization time, which enables lockdep to validate that the write +side critical sections are properly serialized. + +This lock association is a NOOP if lockdep is disabled and has neither +storage nor runtime overhead. If lockdep is enabled, the lock pointer is +stored in struct seqcount and lockdep's "lock is held" assertions are +injected at the beginning of the write side critical section to validate +that it is properly protected. + +For lock types which do not implicitly disable preemption, preemption +protection is enforced in the write side function. + +The following sequence counters with associated locks are defined: + + - ``seqcount_spinlock_t`` + - ``seqcount_raw_spinlock_t`` + - ``seqcount_rwlock_t`` + - ``seqcount_mutex_t`` + - ``seqcount_ww_mutex_t`` + +The plain seqcount read and write APIs branch out to the specific +seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel +API explosion per each new seqcount LOCKTYPE. + +Initialization (replace "LOCKTYPE" with one of the supported locks):: + + /* dynamic */ + seqcount_LOCKTYPE_t foo_seqcount; + seqcount_LOCKTYPE_init(&foo_seqcount, &lock); + + /* static */ + static seqcount_LOCKTYPE_t foo_seqcount = + SEQCNT_LOCKTYPE_ZERO(foo_seqcount, &lock); + + /* C99 struct init */ + struct { + .seq = SEQCNT_LOCKTYPE_ZERO(foo.seq, &lock), + } foo; + +Write path: same as in :ref:`seqcount_t`, while running from a context +with the associated LOCKTYPE lock acquired. + +Read path: same as in :ref:`seqcount_t`. + .. _seqlock_t: Sequential locks (``seqlock_t``) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 54bc20496392..8c16a494c968 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -10,13 +10,17 @@ * * Copyrights: * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli + * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH */ -#include -#include -#include #include #include +#include +#include +#include +#include +#include + #include /* @@ -48,6 +52,10 @@ * This mechanism can't be used if the protected data contains pointers, * as the writer can invalidate a pointer that a reader is following. * + * If the write serialization mechanism is one of the common kernel + * locking primitives, use a sequence counter with associated lock + * (seqcount_LOCKTYPE_t) instead. + * * If it's desired to automatically handle the sequence counter writer * serialization and non-preemptibility requirements, use a sequential * lock (seqlock_t) instead. @@ -108,9 +116,267 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) */ #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } +/* + * Sequence counters with associated locks (seqcount_LOCKTYPE_t) + * + * A sequence counter which associates the lock used for writer + * serialization at initialization time. This enables lockdep to validate + * that the write side critical section is properly serialized. + * + * For associated locks which do not implicitly disable preemption, + * preemption protection is enforced in the write side function. + * + * Lockdep is never used in any for the raw write variants. + * + * See Documentation/locking/seqlock.rst + */ + +#ifdef CONFIG_LOCKDEP +#define __SEQ_LOCKDEP(expr) expr +#else +#define __SEQ_LOCKDEP(expr) +#endif + +#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ + .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ + __SEQ_LOCKDEP(.lock = (assoc_lock)) \ +} + +#define seqcount_locktype_init(s, assoc_lock) \ +do { \ + seqcount_init(&(s)->seqcount); \ + __SEQ_LOCKDEP((s)->lock = (assoc_lock)); \ +} while (0) + +/** + * typedef seqcount_spinlock_t - sequence counter with spinlock associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated spinlock + * + * A plain sequence counter with external writer synchronization by a + * spinlock. The spinlock is associated to the sequence count in the + * static initializer or init function. This enables lockdep to validate + * that the write side critical section is properly serialized. + */ +typedef struct seqcount_spinlock { + seqcount_t seqcount; + __SEQ_LOCKDEP(spinlock_t *lock); +} seqcount_spinlock_t; + +/** + * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t + * @name: Name of the seqcount_spinlock_t instance + * @lock: Pointer to the associated spinlock + */ +#define SEQCNT_SPINLOCK_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_spinlock_init - runtime initializer for seqcount_spinlock_t + * @s: Pointer to the seqcount_spinlock_t instance + * @lock: Pointer to the associated spinlock + */ +#define seqcount_spinlock_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/** + * typedef seqcount_raw_spinlock_t - sequence count with raw spinlock associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated raw spinlock + * + * A plain sequence counter with external writer synchronization by a + * raw spinlock. The raw spinlock is associated to the sequence count in + * the static initializer or init function. This enables lockdep to + * validate that the write side critical section is properly serialized. + */ +typedef struct seqcount_raw_spinlock { + seqcount_t seqcount; + __SEQ_LOCKDEP(raw_spinlock_t *lock); +} seqcount_raw_spinlock_t; + +/** + * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t + * @name: Name of the seqcount_raw_spinlock_t instance + * @lock: Pointer to the associated raw_spinlock + */ +#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_raw_spinlock_init - runtime initializer for seqcount_raw_spinlock_t + * @s: Pointer to the seqcount_raw_spinlock_t instance + * @lock: Pointer to the associated raw_spinlock + */ +#define seqcount_raw_spinlock_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/** + * typedef seqcount_rwlock_t - sequence count with rwlock associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated rwlock + * + * A plain sequence counter with external writer synchronization by a + * rwlock. The rwlock is associated to the sequence count in the static + * initializer or init function. This enables lockdep to validate that + * the write side critical section is properly serialized. + */ +typedef struct seqcount_rwlock { + seqcount_t seqcount; + __SEQ_LOCKDEP(rwlock_t *lock); +} seqcount_rwlock_t; + +/** + * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t + * @name: Name of the seqcount_rwlock_t instance + * @lock: Pointer to the associated rwlock + */ +#define SEQCNT_RWLOCK_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_rwlock_init - runtime initializer for seqcount_rwlock_t + * @s: Pointer to the seqcount_rwlock_t instance + * @lock: Pointer to the associated rwlock + */ +#define seqcount_rwlock_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/** + * typedef seqcount_mutex_t - sequence count with mutex associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated mutex + * + * A plain sequence counter with external writer synchronization by a + * mutex. The mutex is associated to the sequence counter in the static + * initializer or init function. This enables lockdep to validate that + * the write side critical section is properly serialized. + * + * The write side API functions write_seqcount_begin()/end() automatically + * disable and enable preemption when used with seqcount_mutex_t. + */ +typedef struct seqcount_mutex { + seqcount_t seqcount; + __SEQ_LOCKDEP(struct mutex *lock); +} seqcount_mutex_t; + +/** + * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t + * @name: Name of the seqcount_mutex_t instance + * @lock: Pointer to the associated mutex + */ +#define SEQCNT_MUTEX_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_mutex_init - runtime initializer for seqcount_mutex_t + * @s: Pointer to the seqcount_mutex_t instance + * @lock: Pointer to the associated mutex + */ +#define seqcount_mutex_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/** + * typedef seqcount_ww_mutex_t - sequence count with ww_mutex associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated ww_mutex + * + * A plain sequence counter with external writer synchronization by a + * ww_mutex. The ww_mutex is associated to the sequence counter in the static + * initializer or init function. This enables lockdep to validate that + * the write side critical section is properly serialized. + * + * The write side API functions write_seqcount_begin()/end() automatically + * disable and enable preemption when used with seqcount_ww_mutex_t. + */ +typedef struct seqcount_ww_mutex { + seqcount_t seqcount; + __SEQ_LOCKDEP(struct ww_mutex *lock); +} seqcount_ww_mutex_t; + +/** + * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t + * @name: Name of the seqcount_ww_mutex_t instance + * @lock: Pointer to the associated ww_mutex + */ +#define SEQCNT_WW_MUTEX_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_ww_mutex_init - runtime initializer for seqcount_ww_mutex_t + * @s: Pointer to the seqcount_ww_mutex_t instance + * @lock: Pointer to the associated ww_mutex + */ +#define seqcount_ww_mutex_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/* + * @preempt: Is the associated write serialization lock preemtpible? + */ +#define SEQCOUNT_LOCKTYPE(locktype, preempt, lockmember) \ +static inline seqcount_t * \ +__seqcount_##locktype##_ptr(seqcount_##locktype##_t *s) \ +{ \ + return &s->seqcount; \ +} \ + \ +static inline bool \ +__seqcount_##locktype##_preemptible(seqcount_##locktype##_t *s) \ +{ \ + return preempt; \ +} \ + \ +static inline void \ +__seqcount_##locktype##_assert(seqcount_##locktype##_t *s) \ +{ \ + __SEQ_LOCKDEP(lockdep_assert_held(lockmember)); \ +} + +/* + * Similar hooks, but for plain seqcount_t + */ + +static inline seqcount_t *__seqcount_ptr(seqcount_t *s) +{ + return s; +} + +static inline bool __seqcount_preemptible(seqcount_t *s) +{ + return false; +} + +static inline void __seqcount_assert(seqcount_t *s) +{ + lockdep_assert_preemption_disabled(); +} + +/* + * @s: Pointer to seqcount_locktype_t, generated hooks first parameter. + */ +SEQCOUNT_LOCKTYPE(raw_spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(rwlock, false, s->lock) +SEQCOUNT_LOCKTYPE(mutex, true, s->lock) +SEQCOUNT_LOCKTYPE(ww_mutex, true, &s->lock->base) + +#define __seqprop_case(s, locktype, prop) \ + seqcount_##locktype##_t: __seqcount_##locktype##_##prop((void *)(s)) + +#define __seqprop(s, prop) _Generic(*(s), \ + seqcount_t: __seqcount_##prop((void *)(s)), \ + __seqprop_case((s), raw_spinlock, prop), \ + __seqprop_case((s), spinlock, prop), \ + __seqprop_case((s), rwlock, prop), \ + __seqprop_case((s), mutex, prop), \ + __seqprop_case((s), ww_mutex, prop)) + +#define __to_seqcount_t(s) __seqprop(s, ptr) +#define __associated_lock_exists_and_is_preemptible(s) __seqprop(s, preemptible) +#define __assert_write_section_is_protected(s) __seqprop(s, assert) + /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -122,7 +388,10 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned __read_seqcount_begin(const seqcount_t *s) +#define __read_seqcount_begin(s) \ + __read_seqcount_t_begin(__to_seqcount_t(s)) + +static inline unsigned __read_seqcount_t_begin(const seqcount_t *s) { unsigned ret; @@ -138,32 +407,38 @@ repeat: /** * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) +#define raw_read_seqcount_begin(s) \ + raw_read_seqcount_t_begin(__to_seqcount_t(s)) + +static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s) { - unsigned ret = __read_seqcount_begin(s); + unsigned ret = __read_seqcount_t_begin(s); smp_rmb(); return ret; } /** * read_seqcount_begin() - begin a seqcount_t read critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned read_seqcount_begin(const seqcount_t *s) +#define read_seqcount_begin(s) \ + read_seqcount_t_begin(__to_seqcount_t(s)) + +static inline unsigned read_seqcount_t_begin(const seqcount_t *s) { seqcount_lockdep_reader_access(s); - return raw_read_seqcount_begin(s); + return raw_read_seqcount_t_begin(s); } /** * raw_read_seqcount() - read the raw seqcount_t counter value - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * raw_read_seqcount opens a read critical section of the given * seqcount_t, without any lockdep checking, and without checking or @@ -172,7 +447,10 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_read_seqcount(const seqcount_t *s) +#define raw_read_seqcount(s) \ + raw_read_seqcount_t(__to_seqcount_t(s)) + +static inline unsigned raw_read_seqcount_t(const seqcount_t *s) { unsigned ret = READ_ONCE(s->sequence); smp_rmb(); @@ -183,7 +461,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) /** * raw_seqcount_begin() - begin a seqcount_t read critical section w/o * lockdep and w/o counter stabilization - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * raw_seqcount_begin opens a read critical section of the given * seqcount_t. Unlike read_seqcount_begin(), this function will not wait @@ -197,18 +475,21 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_seqcount_begin(const seqcount_t *s) +#define raw_seqcount_begin(s) \ + raw_seqcount_t_begin(__to_seqcount_t(s)) + +static inline unsigned raw_seqcount_t_begin(const seqcount_t *s) { /* * If the counter is odd, let read_seqcount_retry() fail * by decrementing the counter. */ - return raw_read_seqcount(s) & ~1; + return raw_read_seqcount_t(s) & ~1; } /** * __read_seqcount_retry() - end a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * @start: count, from read_seqcount_begin() * * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() @@ -221,7 +502,10 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) * * Return: true if a read section retry is required, else false */ -static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) +#define __read_seqcount_retry(s, start) \ + __read_seqcount_t_retry(__to_seqcount_t(s), start) + +static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) { kcsan_atomic_next(0); return unlikely(READ_ONCE(s->sequence) != start); @@ -229,7 +513,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) /** * read_seqcount_retry() - end a seqcount_t read critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * @start: count, from read_seqcount_begin() * * read_seqcount_retry closes the read critical section of given @@ -238,17 +522,28 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) * * Return: true if a read section retry is required, else false */ -static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) +#define read_seqcount_retry(s, start) \ + read_seqcount_t_retry(__to_seqcount_t(s), start) + +static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) { smp_rmb(); - return __read_seqcount_retry(s, start); + return __read_seqcount_t_retry(s, start); } /** * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants */ -static inline void raw_write_seqcount_begin(seqcount_t *s) +#define raw_write_seqcount_begin(s) \ +do { \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_disable(); \ + \ + raw_write_seqcount_t_begin(__to_seqcount_t(s)); \ +} while (0) + +static inline void raw_write_seqcount_t_begin(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -257,49 +552,50 @@ static inline void raw_write_seqcount_begin(seqcount_t *s) /** * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants */ -static inline void raw_write_seqcount_end(seqcount_t *s) +#define raw_write_seqcount_end(s) \ +do { \ + raw_write_seqcount_t_end(__to_seqcount_t(s)); \ + \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_enable(); \ +} while (0) + +static inline void raw_write_seqcount_t_end(seqcount_t *s) { smp_wmb(); s->sequence++; kcsan_nestable_atomic_end(); } -static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass) -{ - raw_write_seqcount_begin(s); - seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); -} - /** * write_seqcount_begin_nested() - start a seqcount_t write section with * custom lockdep nesting level - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * @subclass: lockdep nesting level * * See Documentation/locking/lockdep-design.rst */ -static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) -{ - lockdep_assert_preemption_disabled(); - __write_seqcount_begin_nested(s, subclass); -} - -/* - * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks. - * - * Use for internal seqlock.h code where it's known that preemption is - * already disabled. For example, seqlock_t write side functions. - */ -static inline void __write_seqcount_begin(seqcount_t *s) +#define write_seqcount_begin_nested(s, subclass) \ +do { \ + __assert_write_section_is_protected(s); \ + \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_disable(); \ + \ + write_seqcount_t_begin_nested(__to_seqcount_t(s), subclass); \ +} while (0) + +static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) { - __write_seqcount_begin_nested(s, 0); + raw_write_seqcount_t_begin(s); + seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); } /** * write_seqcount_begin() - start a seqcount_t write side critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * write_seqcount_begin opens a write side critical section of the given * seqcount_t. @@ -308,26 +604,44 @@ static inline void __write_seqcount_begin(seqcount_t *s) * non-preemptible. If readers can be invoked from hardirq or softirq * context, interrupts or bottom halves must be respectively disabled. */ -static inline void write_seqcount_begin(seqcount_t *s) +#define write_seqcount_begin(s) \ +do { \ + __assert_write_section_is_protected(s); \ + \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_disable(); \ + \ + write_seqcount_t_begin(__to_seqcount_t(s)); \ +} while (0) + +static inline void write_seqcount_t_begin(seqcount_t *s) { - write_seqcount_begin_nested(s, 0); + write_seqcount_t_begin_nested(s, 0); } /** * write_seqcount_end() - end a seqcount_t write side critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * The write section must've been opened with write_seqcount_begin(). */ -static inline void write_seqcount_end(seqcount_t *s) +#define write_seqcount_end(s) \ +do { \ + write_seqcount_t_end(__to_seqcount_t(s)); \ + \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_enable(); \ +} while (0) + +static inline void write_seqcount_t_end(seqcount_t *s) { seqcount_release(&s->dep_map, _RET_IP_); - raw_write_seqcount_end(s); + raw_write_seqcount_t_end(s); } /** * raw_write_seqcount_barrier() - do a seqcount_t write barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * This can be used to provide an ordering guarantee instead of the usual * consistency guarantee. It is one wmb cheaper, because it can collapse @@ -366,7 +680,10 @@ static inline void write_seqcount_end(seqcount_t *s) * WRITE_ONCE(X, false); * } */ -static inline void raw_write_seqcount_barrier(seqcount_t *s) +#define raw_write_seqcount_barrier(s) \ + raw_write_seqcount_t_barrier(__to_seqcount_t(s)) + +static inline void raw_write_seqcount_t_barrier(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -378,12 +695,15 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) /** * write_seqcount_invalidate() - invalidate in-progress seqcount_t read * side operations - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * After write_seqcount_invalidate, no seqcount_t read side operations * will complete successfully and see data older than this. */ -static inline void write_seqcount_invalidate(seqcount_t *s) +#define write_seqcount_invalidate(s) \ + write_seqcount_t_invalidate(__to_seqcount_t(s)) + +static inline void write_seqcount_t_invalidate(seqcount_t *s) { smp_wmb(); kcsan_nestable_atomic_begin(); @@ -393,7 +713,7 @@ static inline void write_seqcount_invalidate(seqcount_t *s) /** * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * Use seqcount_t latching to switch between two storage places protected * by a sequence counter. Doing so allows having interruptible, preemptible, @@ -406,7 +726,10 @@ static inline void write_seqcount_invalidate(seqcount_t *s) * picking which data copy to read. The full counter value must then be * checked with read_seqcount_retry(). */ -static inline int raw_read_seqcount_latch(seqcount_t *s) +#define raw_read_seqcount_latch(s) \ + raw_read_seqcount_t_latch(__to_seqcount_t(s)) + +static inline int raw_read_seqcount_t_latch(seqcount_t *s) { /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ int seq = READ_ONCE(s->sequence); /* ^^^ */ @@ -415,7 +738,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) /** * raw_write_seqcount_latch() - redirect readers to even/odd copy - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -494,7 +817,10 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) * When data is a dynamic data structure; one should use regular RCU * patterns to manage the lifetimes of the objects within. */ -static inline void raw_write_seqcount_latch(seqcount_t *s) +#define raw_write_seqcount_latch(s) \ + raw_write_seqcount_t_latch(__to_seqcount_t(s)) + +static inline void raw_write_seqcount_t_latch(seqcount_t *s) { smp_wmb(); /* prior stores before incrementing "sequence" */ s->sequence++; @@ -592,7 +918,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } /** @@ -604,7 +930,7 @@ static inline void write_seqlock(seqlock_t *sl) */ static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock(&sl->lock); } @@ -618,7 +944,7 @@ static inline void write_sequnlock(seqlock_t *sl) static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } /** @@ -631,7 +957,7 @@ static inline void write_seqlock_bh(seqlock_t *sl) */ static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } @@ -645,7 +971,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl) static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } /** @@ -657,7 +983,7 @@ static inline void write_seqlock_irq(seqlock_t *sl) */ static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_irq(&sl->lock); } @@ -666,7 +992,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); return flags; } @@ -695,13 +1021,13 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_irqrestore(&sl->lock, flags); } /** * read_seqlock_excl() - begin a seqlock_t locking reader section - * @sl: Pointer to seqlock_t + * @sl: Pointer to seqlock_t * * read_seqlock_excl opens a seqlock_t locking reader critical section. A * locking reader exclusively locks out *both* other writers *and* other -- cgit v1.2.3 From ec8702da570ebb59f38471007bf71359c51b027b Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:16 +0200 Subject: seqlock: Align multi-line macros newline escapes at 72 columns Parent commit, "seqlock: Extend seqcount API with associated locks", introduced a big number of multi-line macros that are newline-escaped at 72 columns. For overall cohesion, align the earlier-existing macros similarly. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-11-a.darwish@linutronix.de --- include/linux/seqlock.h | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 8c16a494c968..b48729988325 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -80,17 +80,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name, } #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SEQCOUNT_DEP_MAP_INIT(lockname) \ - .dep_map = { .name = #lockname } \ + +# define SEQCOUNT_DEP_MAP_INIT(lockname) \ + .dep_map = { .name = #lockname } /** * seqcount_init() - runtime initializer for seqcount_t * @s: Pointer to the seqcount_t instance */ -# define seqcount_init(s) \ - do { \ - static struct lock_class_key __key; \ - __seqcount_init((s), #s, &__key); \ +# define seqcount_init(s) \ + do { \ + static struct lock_class_key __key; \ + __seqcount_init((s), #s, &__key); \ } while (0) static inline void seqcount_lockdep_reader_access(const seqcount_t *s) @@ -842,20 +843,20 @@ typedef struct { spinlock_t lock; } seqlock_t; -#define __SEQLOCK_UNLOCKED(lockname) \ - { \ - .seqcount = SEQCNT_ZERO(lockname), \ - .lock = __SPIN_LOCK_UNLOCKED(lockname) \ +#define __SEQLOCK_UNLOCKED(lockname) \ + { \ + .seqcount = SEQCNT_ZERO(lockname), \ + .lock = __SPIN_LOCK_UNLOCKED(lockname) \ } /** * seqlock_init() - dynamic initializer for seqlock_t * @sl: Pointer to the seqlock_t instance */ -#define seqlock_init(sl) \ - do { \ - seqcount_init(&(sl)->seqcount); \ - spin_lock_init(&(sl)->lock); \ +#define seqlock_init(sl) \ + do { \ + seqcount_init(&(sl)->seqcount); \ + spin_lock_init(&(sl)->lock); \ } while (0) /** -- cgit v1.2.3 From 318ce71f3e3ae4108c1665f3860afa8a2a4c9f02 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:17 +0200 Subject: dma-buf: Remove custom seqcount lockdep class key Commit 3c3b177a9369 ("reservation: add support for read-only access using rcu") introduced a sequence counter to manage updates to reservations. Back then, the reservation object initializer reservation_object_init() was always inlined. Having the sequence counter initialization inlined meant that each of the call sites would have a different lockdep class key, which would've broken lockdep's deadlock detection. The aforementioned commit thus introduced, and exported, a custom seqcount lockdep class key and name. The commit 8735f16803f00 ("dma-buf: cleanup reservation_object_init...") transformed the reservation object initializer to a normal non-inlined C function. seqcount_init(), which automatically defines the seqcount lockdep class key and must be called non-inlined, can now be safely used. Remove the seqcount custom lockdep class key, name, and export. Use seqcount_init() inside the dma reservation object initializer. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Sebastian Andrzej Siewior Acked-by: Daniel Vetter Link: https://lkml.kernel.org/r/20200720155530.1173732-12-a.darwish@linutronix.de --- drivers/dma-buf/dma-resv.c | 9 +-------- include/linux/dma-resv.h | 2 -- 2 files changed, 1 insertion(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index b45f8514dc82..15efa0c2dacb 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -51,12 +51,6 @@ DEFINE_WD_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); -struct lock_class_key reservation_seqcount_class; -EXPORT_SYMBOL(reservation_seqcount_class); - -const char reservation_seqcount_string[] = "reservation_seqcount"; -EXPORT_SYMBOL(reservation_seqcount_string); - /** * dma_resv_list_alloc - allocate fence list * @shared_max: number of fences we need space for @@ -135,9 +129,8 @@ subsys_initcall(dma_resv_lockdep); void dma_resv_init(struct dma_resv *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); + seqcount_init(&obj->seq); - __seqcount_init(&obj->seq, reservation_seqcount_string, - &reservation_seqcount_class); RCU_INIT_POINTER(obj->fence, NULL); RCU_INIT_POINTER(obj->fence_excl, NULL); } diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index ee50d10f052b..a6538ae7d93f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -46,8 +46,6 @@ #include extern struct ww_class reservation_ww_class; -extern struct lock_class_key reservation_seqcount_class; -extern const char reservation_seqcount_string[]; /** * struct dma_resv_list - a list of shared fences -- cgit v1.2.3 From cd29f22019ec4ab998d2e1e8c831c7c42db4aa7d Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:18 +0200 Subject: dma-buf: Use sequence counter with associated wound/wait mutex A sequence counter write side critical section must be protected by some form of locking to serialize writers. If the serialization primitive is not disabling preemption implicitly, preemption has to be explicitly disabled before entering the sequence counter write side critical section. The dma-buf reservation subsystem uses plain sequence counters to manage updates to reservations. Writer serialization is accomplished through a wound/wait mutex. Acquiring a wound/wait mutex does not disable preemption, so this needs to be done manually before and after the write side critical section. Use the newly-added seqcount_ww_mutex_t instead: - It associates the ww_mutex with the sequence count, which enables lockdep to validate that the write side critical section is properly serialized. - It removes the need to explicitly add preempt_disable/enable() around the write side critical section because the write_begin/end() functions for this new data type automatically do this. If lockdep is disabled this ww_mutex lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Acked-by: Daniel Vetter Link: https://lkml.kernel.org/r/20200720155530.1173732-13-a.darwish@linutronix.de --- drivers/dma-buf/dma-resv.c | 8 +------- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 -- include/linux/dma-resv.h | 2 +- 3 files changed, 2 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 15efa0c2dacb..a7631352a486 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -129,7 +129,7 @@ subsys_initcall(dma_resv_lockdep); void dma_resv_init(struct dma_resv *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); - seqcount_init(&obj->seq); + seqcount_ww_mutex_init(&obj->seq, &obj->lock); RCU_INIT_POINTER(obj->fence, NULL); RCU_INIT_POINTER(obj->fence_excl, NULL); @@ -260,7 +260,6 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) fobj = dma_resv_get_list(obj); count = fobj->shared_count; - preempt_disable(); write_seqcount_begin(&obj->seq); for (i = 0; i < count; ++i) { @@ -282,7 +281,6 @@ replace: smp_store_mb(fobj->shared_count, count); write_seqcount_end(&obj->seq); - preempt_enable(); dma_fence_put(old); } EXPORT_SYMBOL(dma_resv_add_shared_fence); @@ -309,14 +307,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) if (fence) dma_fence_get(fence); - preempt_disable(); write_seqcount_begin(&obj->seq); /* write_seqcount_begin provides the necessary memory barrier */ RCU_INIT_POINTER(obj->fence_excl, fence); if (old) old->shared_count = 0; write_seqcount_end(&obj->seq); - preempt_enable(); /* inplace update, no shared fences */ while (i--) @@ -394,13 +390,11 @@ retry: src_list = dma_resv_get_list(dst); old = dma_resv_get_excl(dst); - preempt_disable(); write_seqcount_begin(&dst->seq); /* write_seqcount_begin provides the necessary memory barrier */ RCU_INIT_POINTER(dst->fence_excl, new); RCU_INIT_POINTER(dst->fence, dst_list); write_seqcount_end(&dst->seq); - preempt_enable(); dma_resv_list_free(src_list); dma_fence_put(old); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index b91b5171270f..ff4b583cb96a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -258,11 +258,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, new->shared_count = k; /* Install the new fence list, seqcount provides the barriers */ - preempt_disable(); write_seqcount_begin(&resv->seq); RCU_INIT_POINTER(resv->fence, new); write_seqcount_end(&resv->seq); - preempt_enable(); /* Drop the references to the removed fences or move them to ef_list */ for (i = j, k = 0; i < old->shared_count; ++i) { diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index a6538ae7d93f..d44a77e8a7e3 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -69,7 +69,7 @@ struct dma_resv_list { */ struct dma_resv { struct ww_mutex lock; - seqcount_t seq; + seqcount_ww_mutex_t seq; struct dma_fence __rcu *fence_excl; struct dma_resv_list __rcu *fence; -- cgit v1.2.3 From b75058614fdd3140074a640b514f6a0b4d485a2d Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:19 +0200 Subject: sched: tasks: Use sequence counter with associated spinlock A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_spinlock_t data type, which allows to associate a spinlock with the sequence counter. This enables lockdep to verify that the spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-14-a.darwish@linutronix.de --- include/linux/sched.h | 2 +- init/init_task.c | 3 ++- kernel/fork.c | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 8d1de021b315..9a9d8263962d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1050,7 +1050,7 @@ struct task_struct { /* Protected by ->alloc_lock: */ nodemask_t mems_allowed; /* Seqence number to catch updates: */ - seqcount_t mems_allowed_seq; + seqcount_spinlock_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; #endif diff --git a/init/init_task.c b/init/init_task.c index 15089d15010a..94fe3ba1bb60 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -154,7 +154,8 @@ struct task_struct init_task .trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list), #endif #ifdef CONFIG_CPUSETS - .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq), + .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq, + &init_task.alloc_lock), #endif #ifdef CONFIG_RT_MUTEXES .pi_waiters = RB_ROOT_CACHED, diff --git a/kernel/fork.c b/kernel/fork.c index 70d9d0a4de2a..fc72f09a61b2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2032,7 +2032,7 @@ static __latent_entropy struct task_struct *copy_process( #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; p->cpuset_slab_spread_rotor = NUMA_NO_NODE; - seqcount_init(&p->mems_allowed_seq); + seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; -- cgit v1.2.3 From 8201d923f492703a7d6c980cff3034759a452b86 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:20 +0200 Subject: netfilter: conntrack: Use sequence counter with associated spinlock A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_spinlock_t data type, which allows to associate a spinlock with the sequence counter. This enables lockdep to verify that the spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-15-a.darwish@linutronix.de --- include/net/netfilter/nf_conntrack.h | 2 +- net/netfilter/nf_conntrack_core.c | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 90690e37a56f..ea4e2010b246 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -286,7 +286,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize); extern struct hlist_nulls_head *nf_conntrack_hash; extern unsigned int nf_conntrack_htable_size; -extern seqcount_t nf_conntrack_generation; +extern seqcount_spinlock_t nf_conntrack_generation; extern unsigned int nf_conntrack_max; /* must be called with rcu read lock held */ diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f33d72c5b06e..b597b5b16ba1 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_max); -seqcount_t nf_conntrack_generation __read_mostly; +seqcount_spinlock_t nf_conntrack_generation __read_mostly; static unsigned int nf_conntrack_hash_rnd __read_mostly; static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, @@ -2600,7 +2600,8 @@ int nf_conntrack_init_start(void) /* struct nf_ct_ext uses u8 to store offsets/size */ BUILD_BUG_ON(total_extension_size() > 255u); - seqcount_init(&nf_conntrack_generation); + seqcount_spinlock_init(&nf_conntrack_generation, + &nf_conntrack_locks_all_lock); for (i = 0; i < CONNTRACK_LOCKS; i++) spin_lock_init(&nf_conntrack_locks[i]); -- cgit v1.2.3 From 26475371976c69489d3a8e6c8bbf35afbbc25055 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:24 +0200 Subject: vfs: Use sequence counter with associated spinlock A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_spinlock_t data type, which allows to associate a spinlock with the sequence counter. This enables lockdep to verify that the spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-19-a.darwish@linutronix.de --- fs/dcache.c | 2 +- fs/fs_struct.c | 4 ++-- include/linux/dcache.h | 2 +- include/linux/fs_struct.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/dcache.c b/fs/dcache.c index 361ea7ab30ea..ea0485861d93 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1746,7 +1746,7 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) dentry->d_lockref.count = 1; dentry->d_flags = 0; spin_lock_init(&dentry->d_lock); - seqcount_init(&dentry->d_seq); + seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); dentry->d_inode = NULL; dentry->d_parent = dentry; dentry->d_sb = sb; diff --git a/fs/fs_struct.c b/fs/fs_struct.c index ca639ed967b7..04b3f5b9c629 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -117,7 +117,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) fs->users = 1; fs->in_exec = 0; spin_lock_init(&fs->lock); - seqcount_init(&fs->seq); + seqcount_spinlock_init(&fs->seq, &fs->lock); fs->umask = old->umask; spin_lock(&old->lock); @@ -163,6 +163,6 @@ EXPORT_SYMBOL(current_umask); struct fs_struct init_fs = { .users = 1, .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), - .seq = SEQCNT_ZERO(init_fs.seq), + .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock), .umask = 0022, }; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index a81f0c3cf352..65d975bf9390 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -89,7 +89,7 @@ extern struct dentry_stat_t dentry_stat; struct dentry { /* RCU lookup touched fields */ unsigned int d_flags; /* protected by d_lock */ - seqcount_t d_seq; /* per dentry seqlock */ + seqcount_spinlock_t d_seq; /* per dentry seqlock */ struct hlist_bl_node d_hash; /* lookup hash list */ struct dentry *d_parent; /* parent directory */ struct qstr d_name; diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index cf1015abfbf2..783b48dedb72 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -9,7 +9,7 @@ struct fs_struct { int users; spinlock_t lock; - seqcount_t seq; + seqcount_spinlock_t seq; int umask; int in_exec; struct path root, pwd; -- cgit v1.2.3 From 5c73b9a2b1b4ecc809a914aa64970157b3d8c936 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:29 +0200 Subject: kvm/eventfd: Use sequence counter with associated spinlock A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_spinlock_t data type, which allows to associate a spinlock with the sequence counter. This enables lockdep to verify that the spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Acked-by: Paolo Bonzini Link: https://lkml.kernel.org/r/20200720155530.1173732-24-a.darwish@linutronix.de --- include/linux/kvm_irqfd.h | 2 +- virt/kvm/eventfd.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index dc1da020305b..dac047abdba7 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -42,7 +42,7 @@ struct kvm_kernel_irqfd { wait_queue_entry_t wait; /* Update side is protected by irqfds.lock */ struct kvm_kernel_irq_routing_entry irq_entry; - seqcount_t irq_entry_sc; + seqcount_spinlock_t irq_entry_sc; /* Used for level IRQ fast-path */ int gsi; struct work_struct inject; diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index ef7ed916ad4a..d6408bb497dc 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -303,7 +303,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) INIT_LIST_HEAD(&irqfd->list); INIT_WORK(&irqfd->inject, irqfd_inject); INIT_WORK(&irqfd->shutdown, irqfd_shutdown); - seqcount_init(&irqfd->irq_entry_sc); + seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock); f = fdget(args->fd); if (!f.file) { -- cgit v1.2.3 From af5a06b582ec3d7b0160b4faaa65f73d8dcf989f Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:30 +0200 Subject: hrtimer: Use sequence counter with associated raw spinlock A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_raw_spinlock_t data type, which allows to associate a raw spinlock with the sequence counter. This enables lockdep to verify that the raw spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-25-a.darwish@linutronix.de --- include/linux/hrtimer.h | 2 +- kernel/time/hrtimer.c | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 15c8ac313678..25993b86ac5c 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -159,7 +159,7 @@ struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; unsigned int index; clockid_t clockid; - seqcount_t seq; + seqcount_raw_spinlock_t seq; struct hrtimer *running; struct timerqueue_head active; ktime_t (*get_time)(void); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index d89da1c7e005..c4038511d5c9 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -135,7 +135,11 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { * timer->base->cpu_base */ static struct hrtimer_cpu_base migration_cpu_base = { - .clock_base = { { .cpu_base = &migration_cpu_base, }, }, + .clock_base = { { + .cpu_base = &migration_cpu_base, + .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq, + &migration_cpu_base.lock), + }, }, }; #define migration_base migration_cpu_base.clock_base[0] @@ -1998,8 +2002,11 @@ int hrtimers_prepare_cpu(unsigned int cpu) int i; for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { - cpu_base->clock_base[i].cpu_base = cpu_base; - timerqueue_init_head(&cpu_base->clock_base[i].active); + struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i]; + + clock_b->cpu_base = cpu_base; + seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock); + timerqueue_init_head(&clock_b->active); } cpu_base->cpu = cpu; -- cgit v1.2.3 From e55687fe5c1e4849e5559a0a49199c9ca3fff36e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 11:56:22 +0200 Subject: seqlock: s/__SEQ_LOCKDEP/__SEQ_LOCK/g __SEQ_LOCKDEP() is an expression gate for the seqcount_LOCKNAME_t::lock member. Rename it to be about the member, not the gate condition. Later (PREEMPT_RT) patches will make the member available for !LOCKDEP configs. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index b48729988325..c689abab06c8 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -133,20 +133,20 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) */ #ifdef CONFIG_LOCKDEP -#define __SEQ_LOCKDEP(expr) expr +#define __SEQ_LOCK(expr) expr #else -#define __SEQ_LOCKDEP(expr) +#define __SEQ_LOCK(expr) #endif #define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ - __SEQ_LOCKDEP(.lock = (assoc_lock)) \ + __SEQ_LOCK(.lock = (assoc_lock)) \ } #define seqcount_locktype_init(s, assoc_lock) \ do { \ seqcount_init(&(s)->seqcount); \ - __SEQ_LOCKDEP((s)->lock = (assoc_lock)); \ + __SEQ_LOCK((s)->lock = (assoc_lock)); \ } while (0) /** @@ -161,7 +161,7 @@ do { \ */ typedef struct seqcount_spinlock { seqcount_t seqcount; - __SEQ_LOCKDEP(spinlock_t *lock); + __SEQ_LOCK(spinlock_t *lock); } seqcount_spinlock_t; /** @@ -192,7 +192,7 @@ typedef struct seqcount_spinlock { */ typedef struct seqcount_raw_spinlock { seqcount_t seqcount; - __SEQ_LOCKDEP(raw_spinlock_t *lock); + __SEQ_LOCK(raw_spinlock_t *lock); } seqcount_raw_spinlock_t; /** @@ -223,7 +223,7 @@ typedef struct seqcount_raw_spinlock { */ typedef struct seqcount_rwlock { seqcount_t seqcount; - __SEQ_LOCKDEP(rwlock_t *lock); + __SEQ_LOCK(rwlock_t *lock); } seqcount_rwlock_t; /** @@ -257,7 +257,7 @@ typedef struct seqcount_rwlock { */ typedef struct seqcount_mutex { seqcount_t seqcount; - __SEQ_LOCKDEP(struct mutex *lock); + __SEQ_LOCK(struct mutex *lock); } seqcount_mutex_t; /** @@ -291,7 +291,7 @@ typedef struct seqcount_mutex { */ typedef struct seqcount_ww_mutex { seqcount_t seqcount; - __SEQ_LOCKDEP(struct ww_mutex *lock); + __SEQ_LOCK(struct ww_mutex *lock); } seqcount_ww_mutex_t; /** @@ -329,7 +329,7 @@ __seqcount_##locktype##_preemptible(seqcount_##locktype##_t *s) \ static inline void \ __seqcount_##locktype##_assert(seqcount_##locktype##_t *s) \ { \ - __SEQ_LOCKDEP(lockdep_assert_held(lockmember)); \ + __SEQ_LOCK(lockdep_assert_held(lockmember)); \ } /* -- cgit v1.2.3 From a8772dccb2ec7b139db1b3ba782ecb12ed92d7c3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 11:56:49 +0200 Subject: seqlock: Fold seqcount_LOCKNAME_t definition Manual repetition is boring and error prone. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 142 +++++++++++++----------------------------------- 1 file changed, 39 insertions(+), 103 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index c689abab06c8..4b259bb4d4b9 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -149,21 +149,6 @@ do { \ __SEQ_LOCK((s)->lock = (assoc_lock)); \ } while (0) -/** - * typedef seqcount_spinlock_t - sequence counter with spinlock associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated spinlock - * - * A plain sequence counter with external writer synchronization by a - * spinlock. The spinlock is associated to the sequence count in the - * static initializer or init function. This enables lockdep to validate - * that the write side critical section is properly serialized. - */ -typedef struct seqcount_spinlock { - seqcount_t seqcount; - __SEQ_LOCK(spinlock_t *lock); -} seqcount_spinlock_t; - /** * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t * @name: Name of the seqcount_spinlock_t instance @@ -180,21 +165,6 @@ typedef struct seqcount_spinlock { #define seqcount_spinlock_init(s, lock) \ seqcount_locktype_init(s, lock) -/** - * typedef seqcount_raw_spinlock_t - sequence count with raw spinlock associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated raw spinlock - * - * A plain sequence counter with external writer synchronization by a - * raw spinlock. The raw spinlock is associated to the sequence count in - * the static initializer or init function. This enables lockdep to - * validate that the write side critical section is properly serialized. - */ -typedef struct seqcount_raw_spinlock { - seqcount_t seqcount; - __SEQ_LOCK(raw_spinlock_t *lock); -} seqcount_raw_spinlock_t; - /** * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t * @name: Name of the seqcount_raw_spinlock_t instance @@ -211,21 +181,6 @@ typedef struct seqcount_raw_spinlock { #define seqcount_raw_spinlock_init(s, lock) \ seqcount_locktype_init(s, lock) -/** - * typedef seqcount_rwlock_t - sequence count with rwlock associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated rwlock - * - * A plain sequence counter with external writer synchronization by a - * rwlock. The rwlock is associated to the sequence count in the static - * initializer or init function. This enables lockdep to validate that - * the write side critical section is properly serialized. - */ -typedef struct seqcount_rwlock { - seqcount_t seqcount; - __SEQ_LOCK(rwlock_t *lock); -} seqcount_rwlock_t; - /** * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t * @name: Name of the seqcount_rwlock_t instance @@ -242,24 +197,6 @@ typedef struct seqcount_rwlock { #define seqcount_rwlock_init(s, lock) \ seqcount_locktype_init(s, lock) -/** - * typedef seqcount_mutex_t - sequence count with mutex associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated mutex - * - * A plain sequence counter with external writer synchronization by a - * mutex. The mutex is associated to the sequence counter in the static - * initializer or init function. This enables lockdep to validate that - * the write side critical section is properly serialized. - * - * The write side API functions write_seqcount_begin()/end() automatically - * disable and enable preemption when used with seqcount_mutex_t. - */ -typedef struct seqcount_mutex { - seqcount_t seqcount; - __SEQ_LOCK(struct mutex *lock); -} seqcount_mutex_t; - /** * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t * @name: Name of the seqcount_mutex_t instance @@ -276,24 +213,6 @@ typedef struct seqcount_mutex { #define seqcount_mutex_init(s, lock) \ seqcount_locktype_init(s, lock) -/** - * typedef seqcount_ww_mutex_t - sequence count with ww_mutex associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated ww_mutex - * - * A plain sequence counter with external writer synchronization by a - * ww_mutex. The ww_mutex is associated to the sequence counter in the static - * initializer or init function. This enables lockdep to validate that - * the write side critical section is properly serialized. - * - * The write side API functions write_seqcount_begin()/end() automatically - * disable and enable preemption when used with seqcount_ww_mutex_t. - */ -typedef struct seqcount_ww_mutex { - seqcount_t seqcount; - __SEQ_LOCK(struct ww_mutex *lock); -} seqcount_ww_mutex_t; - /** * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t * @name: Name of the seqcount_ww_mutex_t instance @@ -310,30 +229,50 @@ typedef struct seqcount_ww_mutex { #define seqcount_ww_mutex_init(s, lock) \ seqcount_locktype_init(s, lock) -/* - * @preempt: Is the associated write serialization lock preemtpible? +/** + * typedef seqcount_LOCKNAME_t - sequence counter with spinlock associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated spinlock + * + * A plain sequence counter with external writer synchronization by a + * spinlock. The spinlock is associated to the sequence count in the + * static initializer or init function. This enables lockdep to validate + * that the write side critical section is properly serialized. */ -#define SEQCOUNT_LOCKTYPE(locktype, preempt, lockmember) \ -static inline seqcount_t * \ -__seqcount_##locktype##_ptr(seqcount_##locktype##_t *s) \ + +/* + * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers + * @locktype: actual typename + * @lockname: name + * @preemptible: preemptibility of above locktype + * @lockmember: argument for lockdep_assert_held() + */ +#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \ +typedef struct seqcount_##lockname { \ + seqcount_t seqcount; \ + __SEQ_LOCK(locktype *lock); \ +} seqcount_##lockname##_t; \ + \ +static __always_inline seqcount_t * \ +__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \ { \ return &s->seqcount; \ } \ \ -static inline bool \ -__seqcount_##locktype##_preemptible(seqcount_##locktype##_t *s) \ +static __always_inline bool \ +__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \ { \ - return preempt; \ + return preemptible; \ } \ \ -static inline void \ -__seqcount_##locktype##_assert(seqcount_##locktype##_t *s) \ +static __always_inline void \ +__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \ { \ __SEQ_LOCK(lockdep_assert_held(lockmember)); \ } /* - * Similar hooks, but for plain seqcount_t + * __seqprop() for seqcount_t */ static inline seqcount_t *__seqcount_ptr(seqcount_t *s) @@ -351,17 +290,14 @@ static inline void __seqcount_assert(seqcount_t *s) lockdep_assert_preemption_disabled(); } -/* - * @s: Pointer to seqcount_locktype_t, generated hooks first parameter. - */ -SEQCOUNT_LOCKTYPE(raw_spinlock, false, s->lock) -SEQCOUNT_LOCKTYPE(spinlock, false, s->lock) -SEQCOUNT_LOCKTYPE(rwlock, false, s->lock) -SEQCOUNT_LOCKTYPE(mutex, true, s->lock) -SEQCOUNT_LOCKTYPE(ww_mutex, true, &s->lock->base) - -#define __seqprop_case(s, locktype, prop) \ - seqcount_##locktype##_t: __seqcount_##locktype##_##prop((void *)(s)) +SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock) +SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock) +SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) + +#define __seqprop_case(s, lockname, prop) \ + seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s)) #define __seqprop(s, prop) _Generic(*(s), \ seqcount_t: __seqcount_##prop((void *)(s)), \ -- cgit v1.2.3 From e4e9ab3f9f91ad3b88d12363f890e8ad9b59b645 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 12:00:53 +0200 Subject: seqlock: Fold seqcount_LOCKNAME_init() definition Manual repetition is boring and error prone. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 61 ++++++++++++------------------------------------- 1 file changed, 14 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 4b259bb4d4b9..501ff47d1e8e 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -143,12 +143,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) __SEQ_LOCK(.lock = (assoc_lock)) \ } -#define seqcount_locktype_init(s, assoc_lock) \ -do { \ - seqcount_init(&(s)->seqcount); \ - __SEQ_LOCK((s)->lock = (assoc_lock)); \ -} while (0) - /** * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t * @name: Name of the seqcount_spinlock_t instance @@ -157,14 +151,6 @@ do { \ #define SEQCNT_SPINLOCK_ZERO(name, lock) \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) -/** - * seqcount_spinlock_init - runtime initializer for seqcount_spinlock_t - * @s: Pointer to the seqcount_spinlock_t instance - * @lock: Pointer to the associated spinlock - */ -#define seqcount_spinlock_init(s, lock) \ - seqcount_locktype_init(s, lock) - /** * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t * @name: Name of the seqcount_raw_spinlock_t instance @@ -173,14 +159,6 @@ do { \ #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) -/** - * seqcount_raw_spinlock_init - runtime initializer for seqcount_raw_spinlock_t - * @s: Pointer to the seqcount_raw_spinlock_t instance - * @lock: Pointer to the associated raw_spinlock - */ -#define seqcount_raw_spinlock_init(s, lock) \ - seqcount_locktype_init(s, lock) - /** * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t * @name: Name of the seqcount_rwlock_t instance @@ -189,14 +167,6 @@ do { \ #define SEQCNT_RWLOCK_ZERO(name, lock) \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) -/** - * seqcount_rwlock_init - runtime initializer for seqcount_rwlock_t - * @s: Pointer to the seqcount_rwlock_t instance - * @lock: Pointer to the associated rwlock - */ -#define seqcount_rwlock_init(s, lock) \ - seqcount_locktype_init(s, lock) - /** * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t * @name: Name of the seqcount_mutex_t instance @@ -205,14 +175,6 @@ do { \ #define SEQCNT_MUTEX_ZERO(name, lock) \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) -/** - * seqcount_mutex_init - runtime initializer for seqcount_mutex_t - * @s: Pointer to the seqcount_mutex_t instance - * @lock: Pointer to the associated mutex - */ -#define seqcount_mutex_init(s, lock) \ - seqcount_locktype_init(s, lock) - /** * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t * @name: Name of the seqcount_ww_mutex_t instance @@ -222,15 +184,7 @@ do { \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) /** - * seqcount_ww_mutex_init - runtime initializer for seqcount_ww_mutex_t - * @s: Pointer to the seqcount_ww_mutex_t instance - * @lock: Pointer to the associated ww_mutex - */ -#define seqcount_ww_mutex_init(s, lock) \ - seqcount_locktype_init(s, lock) - -/** - * typedef seqcount_LOCKNAME_t - sequence counter with spinlock associated + * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated * @seqcount: The real sequence counter * @lock: Pointer to the associated spinlock * @@ -240,6 +194,12 @@ do { \ * that the write side critical section is properly serialized. */ +/** + * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t + * @s: Pointer to the seqcount_LOCKNAME_t instance + * @lock: Pointer to the associated LOCKTYPE + */ + /* * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers * @locktype: actual typename @@ -253,6 +213,13 @@ typedef struct seqcount_##lockname { \ __SEQ_LOCK(locktype *lock); \ } seqcount_##lockname##_t; \ \ +static __always_inline void \ +seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \ +{ \ + seqcount_init(&s->seqcount); \ + __SEQ_LOCK(s->lock = lock); \ +} \ + \ static __always_inline seqcount_t * \ __seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \ { \ -- cgit v1.2.3 From 0efc94c5d15c3da0a69543d86ad2180f39256ed6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 12:03:13 +0200 Subject: seqcount: Compress SEQCNT_LOCKNAME_ZERO() Less is more. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 63 ++++++++++++++----------------------------------- 1 file changed, 18 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 501ff47d1e8e..251dcd6f5cd8 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -138,51 +138,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) #define __SEQ_LOCK(expr) #endif -#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ - .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ - __SEQ_LOCK(.lock = (assoc_lock)) \ -} - -/** - * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t - * @name: Name of the seqcount_spinlock_t instance - * @lock: Pointer to the associated spinlock - */ -#define SEQCNT_SPINLOCK_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - -/** - * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t - * @name: Name of the seqcount_raw_spinlock_t instance - * @lock: Pointer to the associated raw_spinlock - */ -#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - -/** - * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t - * @name: Name of the seqcount_rwlock_t instance - * @lock: Pointer to the associated rwlock - */ -#define SEQCNT_RWLOCK_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - -/** - * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t - * @name: Name of the seqcount_mutex_t instance - * @lock: Pointer to the associated mutex - */ -#define SEQCNT_MUTEX_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - -/** - * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t - * @name: Name of the seqcount_ww_mutex_t instance - * @lock: Pointer to the associated ww_mutex - */ -#define SEQCNT_WW_MUTEX_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - /** * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated * @seqcount: The real sequence counter @@ -263,6 +218,24 @@ SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock) SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock) SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) +/** + * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t + * @name: Name of the seqcount_LOCKNAME_t instance + * @lock: Pointer to the associated LOCKTYPE + */ + +#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ + .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ + __SEQ_LOCK(.lock = (assoc_lock)) \ +} + +#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) + + #define __seqprop_case(s, lockname, prop) \ seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s)) -- cgit v1.2.3 From b5e6a027bd327daa679ca55182a920659e2cbb90 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 12:11:49 +0200 Subject: seqcount: More consistent seqprop names Attempt uniformity and brevity. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 52 ++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 251dcd6f5cd8..a076f783aa36 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -247,9 +247,9 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) __seqprop_case((s), mutex, prop), \ __seqprop_case((s), ww_mutex, prop)) -#define __to_seqcount_t(s) __seqprop(s, ptr) -#define __associated_lock_exists_and_is_preemptible(s) __seqprop(s, preemptible) -#define __assert_write_section_is_protected(s) __seqprop(s, assert) +#define __seqcount_ptr(s) __seqprop(s, ptr) +#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible) +#define __seqcount_assert_lock_held(s) __seqprop(s, assert) /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier @@ -266,7 +266,7 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) * Return: count to be passed to read_seqcount_retry() */ #define __read_seqcount_begin(s) \ - __read_seqcount_t_begin(__to_seqcount_t(s)) + __read_seqcount_t_begin(__seqcount_ptr(s)) static inline unsigned __read_seqcount_t_begin(const seqcount_t *s) { @@ -289,7 +289,7 @@ repeat: * Return: count to be passed to read_seqcount_retry() */ #define raw_read_seqcount_begin(s) \ - raw_read_seqcount_t_begin(__to_seqcount_t(s)) + raw_read_seqcount_t_begin(__seqcount_ptr(s)) static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s) { @@ -305,7 +305,7 @@ static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s) * Return: count to be passed to read_seqcount_retry() */ #define read_seqcount_begin(s) \ - read_seqcount_t_begin(__to_seqcount_t(s)) + read_seqcount_t_begin(__seqcount_ptr(s)) static inline unsigned read_seqcount_t_begin(const seqcount_t *s) { @@ -325,7 +325,7 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s) * Return: count to be passed to read_seqcount_retry() */ #define raw_read_seqcount(s) \ - raw_read_seqcount_t(__to_seqcount_t(s)) + raw_read_seqcount_t(__seqcount_ptr(s)) static inline unsigned raw_read_seqcount_t(const seqcount_t *s) { @@ -353,7 +353,7 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s) * Return: count to be passed to read_seqcount_retry() */ #define raw_seqcount_begin(s) \ - raw_seqcount_t_begin(__to_seqcount_t(s)) + raw_seqcount_t_begin(__seqcount_ptr(s)) static inline unsigned raw_seqcount_t_begin(const seqcount_t *s) { @@ -380,7 +380,7 @@ static inline unsigned raw_seqcount_t_begin(const seqcount_t *s) * Return: true if a read section retry is required, else false */ #define __read_seqcount_retry(s, start) \ - __read_seqcount_t_retry(__to_seqcount_t(s), start) + __read_seqcount_t_retry(__seqcount_ptr(s), start) static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) { @@ -400,7 +400,7 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) * Return: true if a read section retry is required, else false */ #define read_seqcount_retry(s, start) \ - read_seqcount_t_retry(__to_seqcount_t(s), start) + read_seqcount_t_retry(__seqcount_ptr(s), start) static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) { @@ -414,10 +414,10 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) */ #define raw_write_seqcount_begin(s) \ do { \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_disable(); \ \ - raw_write_seqcount_t_begin(__to_seqcount_t(s)); \ + raw_write_seqcount_t_begin(__seqcount_ptr(s)); \ } while (0) static inline void raw_write_seqcount_t_begin(seqcount_t *s) @@ -433,9 +433,9 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s) */ #define raw_write_seqcount_end(s) \ do { \ - raw_write_seqcount_t_end(__to_seqcount_t(s)); \ + raw_write_seqcount_t_end(__seqcount_ptr(s)); \ \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_enable(); \ } while (0) @@ -456,12 +456,12 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s) */ #define write_seqcount_begin_nested(s, subclass) \ do { \ - __assert_write_section_is_protected(s); \ + __seqcount_assert_lock_held(s); \ \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_disable(); \ \ - write_seqcount_t_begin_nested(__to_seqcount_t(s), subclass); \ + write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \ } while (0) static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) @@ -483,12 +483,12 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) */ #define write_seqcount_begin(s) \ do { \ - __assert_write_section_is_protected(s); \ + __seqcount_assert_lock_held(s); \ \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_disable(); \ \ - write_seqcount_t_begin(__to_seqcount_t(s)); \ + write_seqcount_t_begin(__seqcount_ptr(s)); \ } while (0) static inline void write_seqcount_t_begin(seqcount_t *s) @@ -504,9 +504,9 @@ static inline void write_seqcount_t_begin(seqcount_t *s) */ #define write_seqcount_end(s) \ do { \ - write_seqcount_t_end(__to_seqcount_t(s)); \ + write_seqcount_t_end(__seqcount_ptr(s)); \ \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_enable(); \ } while (0) @@ -558,7 +558,7 @@ static inline void write_seqcount_t_end(seqcount_t *s) * } */ #define raw_write_seqcount_barrier(s) \ - raw_write_seqcount_t_barrier(__to_seqcount_t(s)) + raw_write_seqcount_t_barrier(__seqcount_ptr(s)) static inline void raw_write_seqcount_t_barrier(seqcount_t *s) { @@ -578,7 +578,7 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s) * will complete successfully and see data older than this. */ #define write_seqcount_invalidate(s) \ - write_seqcount_t_invalidate(__to_seqcount_t(s)) + write_seqcount_t_invalidate(__seqcount_ptr(s)) static inline void write_seqcount_t_invalidate(seqcount_t *s) { @@ -604,7 +604,7 @@ static inline void write_seqcount_t_invalidate(seqcount_t *s) * checked with read_seqcount_retry(). */ #define raw_read_seqcount_latch(s) \ - raw_read_seqcount_t_latch(__to_seqcount_t(s)) + raw_read_seqcount_t_latch(__seqcount_ptr(s)) static inline int raw_read_seqcount_t_latch(seqcount_t *s) { @@ -695,7 +695,7 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s) * patterns to manage the lifetimes of the objects within. */ #define raw_write_seqcount_latch(s) \ - raw_write_seqcount_t_latch(__to_seqcount_t(s)) + raw_write_seqcount_t_latch(__seqcount_ptr(s)) static inline void raw_write_seqcount_t_latch(seqcount_t *s) { -- cgit v1.2.3 From 5037d368b2c2c90e9432d477e5562dce1c33d5c9 Mon Sep 17 00:00:00 2001 From: Andreas Färber Date: Wed, 22 Jul 2020 11:06:57 +0100 Subject: nvmem: core: Add nvmem_cell_read_u8() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Complement the u16, u32 and u64 helpers with a u8 variant to ease accessing byte-sized values. This helper will be useful for Realtek Digital Home Center platforms, which store some byte and sub-byte sized values in non-volatile memory. Signed-off-by: Andreas Färber Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20200722100705.7772-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/core.c | 15 +++++++++++++++ include/linux/nvmem-consumer.h | 1 + 2 files changed, 16 insertions(+) (limited to 'include') diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 95bed31391cd..d6bacc878500 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -1374,6 +1374,21 @@ static int nvmem_cell_read_common(struct device *dev, const char *cell_id, return 0; } +/** + * nvmem_cell_read_u8() - Read a cell value as a u8 + * + * @dev: Device that requests the nvmem cell. + * @cell_id: Name of nvmem cell to read. + * @val: pointer to output value. + * + * Return: 0 on success or negative errno. + */ +int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) +{ + return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); +} +EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); + /** * nvmem_cell_read_u16() - Read a cell value as a u16 * diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 1b311d27c9b8..052293f4cbdb 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -61,6 +61,7 @@ void nvmem_cell_put(struct nvmem_cell *cell); void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len); +int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val); int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val); int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val); int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val); -- cgit v1.2.3 From 731aa3fae8137ebca83a01d20fbf3effb4798fc8 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Wed, 22 Jul 2020 11:06:58 +0100 Subject: nvmem: core: add support to auto devid For nvmem providers which have multiple instances, it is required to suffix the provider name with proper id, so that they do not confict for the same name. Currently the core does not handle this case properly eventhough core already has logic to generate the id. This patch add new devid type NVMEM_DEVID_AUTO for providers to be able to allow core to assign id and append it to provier name. Reported-by: Shawn Guo Signed-off-by: Srinivas Kandagatla Tested-by: Shawn Guo Link: https://lore.kernel.org/r/20200722100705.7772-8-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/core.c | 10 ++++++++-- include/linux/nvmem-provider.h | 3 +++ 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index d6bacc878500..6cd3edb2eaf6 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -635,12 +635,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (!config->no_of_node) nvmem->dev.of_node = config->dev->of_node; - if (config->id == -1 && config->name) { + switch (config->id) { + case NVMEM_DEVID_NONE: dev_set_name(&nvmem->dev, "%s", config->name); - } else { + break; + case NVMEM_DEVID_AUTO: + dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); + break; + default: dev_set_name(&nvmem->dev, "%s%d", config->name ? : "nvmem", config->name ? config->id : nvmem->id); + break; } nvmem->read_only = device_property_present(config->dev, "read-only") || diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 6d6f8e5d24c9..06409a6c40bc 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -27,6 +27,9 @@ enum nvmem_type { NVMEM_TYPE_BATTERY_BACKED, }; +#define NVMEM_DEVID_NONE (-1) +#define NVMEM_DEVID_AUTO (-2) + /** * struct nvmem_config - NVMEM device configuration * -- cgit v1.2.3 From 868f3ee6e452bc2b89e68183a1700fcbbe0807b1 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 23 Jul 2020 03:33:54 +0300 Subject: serial: 8250: Add 8250 port clock update method Some platforms can be designed in a way so the UART port reference clock might be asynchronously changed at some point. In Baikal-T1 SoC this may happen due to the reference clock being shared between two UART ports, on the Allwinner SoC the reference clock is derived from the CPU clock, so any CPU frequency change should get to be known/reflected by/in the UART controller as well. But it's not enough to just update the uart_port->uartclk field of the corresponding UART port, the 8250 controller reference clock divisor should be altered so to preserve current baud rate setting. All of these things is done in a coherent way by calling the serial8250_update_uartclk() method provided in this patch. Though note that it isn't supposed to be called from within the UART port callbacks because the locks using to the protect the UART port data are already taken in there. Signed-off-by: Serge Semin Link: https://lore.kernel.org/r/20200723003357.26897-2-Sergey.Semin@baikalelectronics.ru Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_port.c | 40 +++++++++++++++++++++++++++++++++++++ include/linux/serial_8250.h | 2 ++ 2 files changed, 42 insertions(+) (limited to 'include') diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index d64ca77d9cfa..09475695effd 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2632,6 +2632,46 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, (port->uartclk + tolerance) / 16); } +/* + * Note in order to avoid the tty port mutex deadlock don't use the next method + * within the uart port callbacks. Primarily it's supposed to be utilized to + * handle a sudden reference clock rate change. + */ +void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk) +{ + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int baud, quot, frac = 0; + struct ktermios *termios; + unsigned long flags; + + mutex_lock(&port->state->port.mutex); + + if (port->uartclk == uartclk) + goto out_lock; + + port->uartclk = uartclk; + termios = &port->state->port.tty->termios; + + baud = serial8250_get_baud_rate(port, termios, NULL); + quot = serial8250_get_divisor(port, baud, &frac); + + serial8250_rpm_get(up); + spin_lock_irqsave(&port->lock, flags); + + uart_update_timeout(port, termios->c_cflag, baud); + + serial8250_set_divisor(port, baud, quot, frac); + serial_port_out(port, UART_LCR, up->lcr); + serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS); + + spin_unlock_irqrestore(&port->lock, flags); + serial8250_rpm_put(up); + +out_lock: + mutex_unlock(&port->state->port.mutex); +} +EXPORT_SYMBOL_GPL(serial8250_update_uartclk); + void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 6545f8cfc8fa..2b70f736b091 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -155,6 +155,8 @@ extern int early_serial_setup(struct uart_port *port); extern int early_serial8250_setup(struct earlycon_device *device, const char *options); +extern void serial8250_update_uartclk(struct uart_port *port, + unsigned int uartclk); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); extern void serial8250_do_set_ldisc(struct uart_port *port, -- cgit v1.2.3 From efe9711214e6138a5a2a46ca4068bfce50c03444 Mon Sep 17 00:00:00 2001 From: Neal Liu Date: Mon, 27 Jul 2020 11:25:46 +0800 Subject: cpuidle: change enter_s2idle() prototype Control Flow Integrity(CFI) is a security mechanism that disallows changes to the original control flow graph of a compiled binary, making it significantly harder to perform such attacks. init_state_node() assign same function callback to different function pointer declarations. static int init_state_node(struct cpuidle_state *idle_state, const struct of_device_id *matches, struct device_node *state_node) { ... idle_state->enter = match_id->data; ... idle_state->enter_s2idle = match_id->data; } Function declarations: struct cpuidle_state { ... int (*enter) (struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); void (*enter_s2idle) (struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); }; In this case, either enter() or enter_s2idle() would cause CFI check failed since they use same callee. Align function prototype of enter() since it needs return value for some use cases. The return value of enter_s2idle() is no need currently. Signed-off-by: Neal Liu Reviewed-by: Sami Tolvanen Signed-off-by: Rafael J. Wysocki --- drivers/acpi/processor_idle.c | 6 ++++-- drivers/cpuidle/cpuidle-tegra.c | 8 +++++--- drivers/idle/intel_idle.c | 6 ++++-- include/linux/cpuidle.h | 9 ++++++--- 4 files changed, 19 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 75534c5b5433..6ffb6c99c2de 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -655,8 +655,8 @@ static int acpi_idle_enter(struct cpuidle_device *dev, return index; } -static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static int acpi_idle_enter_s2idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -674,6 +674,8 @@ static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, } } acpi_idle_do_entry(cx); + + return 0; } static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c index 150045849d78..a12fb141875a 100644 --- a/drivers/cpuidle/cpuidle-tegra.c +++ b/drivers/cpuidle/cpuidle-tegra.c @@ -253,11 +253,13 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev, return err ? -1 : index; } -static void tegra114_enter_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) +static int tegra114_enter_s2idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) { tegra_cpuidle_enter(dev, drv, index); + + return 0; } /* diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index fa23a7ea01ac..3f86f36dab2b 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -173,13 +173,15 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, * Invoked as a suspend-to-idle callback routine with frozen user space, frozen * scheduler tick and suspended scheduler clock on the target CPU. */ -static __cpuidle void intel_idle_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { unsigned long eax = flg2MWAIT(drv->states[index].flags); unsigned long ecx = 1; /* break on interrupt flag */ mwait_idle_with_hints(eax, ecx); + + return 0; } /* diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index ec2ef63771f0..b65909ae4e20 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -65,10 +65,13 @@ struct cpuidle_state { * CPUs execute ->enter_s2idle with the local tick or entire timekeeping * suspended, so it must not re-enable interrupts at any point (even * temporarily) or attempt to change states of clock event devices. + * + * This callback may point to the same function as ->enter if all of + * the above requirements are met by it. */ - void (*enter_s2idle) (struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index); + int (*enter_s2idle)(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index); }; /* Idle State Flags */ -- cgit v1.2.3 From 6bf9d8f6f0df3f7aa852dc111c960bc04578c7c5 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 19 Jul 2020 10:25:21 +0300 Subject: RDMA/include: Replace license text with SPDX tags The header files in RDMA subsystem are dual licensed and can be described by simple SPDX tag, so replace all of them at once together with making them use the same coding style for header guard defines. Link: https://lore.kernel.org/r/20200719072521.135260-1-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- include/rdma/ib.h | 31 ++--------------------- include/rdma/ib_addr.h | 31 ++--------------------- include/rdma/ib_cache.h | 29 +-------------------- include/rdma/ib_cm.h | 1 + include/rdma/ib_hdrs.h | 44 +------------------------------- include/rdma/ib_mad.h | 31 ++--------------------- include/rdma/ib_marshall.h | 31 ++--------------------- include/rdma/ib_pack.h | 29 +-------------------- include/rdma/ib_pma.h | 31 ++--------------------- include/rdma/ib_sa.h | 29 +-------------------- include/rdma/ib_smi.h | 31 ++--------------------- include/rdma/ib_umem.h | 29 +-------------------- include/rdma/ib_umem_odp.h | 29 +-------------------- include/rdma/ib_verbs.h | 31 ++--------------------- include/rdma/iw_cm.h | 30 ++-------------------- include/rdma/iw_portmap.h | 30 ++-------------------- include/rdma/opa_addr.h | 44 +------------------------------- include/rdma/opa_port_info.h | 31 ++--------------------- include/rdma/opa_smi.h | 31 ++--------------------- include/rdma/opa_vnic.h | 49 +++--------------------------------- include/rdma/rdma_cm.h | 31 ++--------------------- include/rdma/rdma_cm_ib.h | 31 ++--------------------- include/rdma/rdma_netlink.h | 2 +- include/rdma/rdma_vt.h | 50 +++--------------------------------- include/rdma/rdmavt_cq.h | 53 +++------------------------------------ include/rdma/rdmavt_mr.h | 50 +++--------------------------------- include/rdma/rdmavt_qp.h | 50 +++--------------------------------- include/rdma/uverbs_ioctl.h | 29 +-------------------- include/rdma/uverbs_named_ioctl.h | 29 +-------------------- include/rdma/uverbs_std_types.h | 29 +-------------------- include/rdma/uverbs_types.h | 29 +-------------------- 31 files changed, 59 insertions(+), 946 deletions(-) (limited to 'include') diff --git a/include/rdma/ib.h b/include/rdma/ib.h index fe2fc9e91588..83139b9ce409 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -1,36 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2010 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(_RDMA_IB_H) +#ifndef _RDMA_IB_H #define _RDMA_IB_H #include diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 2734c895c1bf..b0e636ac6690 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -1,37 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2005 Voltaire Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(IB_ADDR_H) +#ifndef IB_ADDR_H #define IB_ADDR_H #include diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index e06d13388ae7..66a8f369a2fa 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -1,35 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _IB_CACHE_H diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 0f1ea5f2d01c..382427add677 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -6,6 +6,7 @@ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved. */ + #ifndef IB_CM_H #define IB_CM_H diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h index 9a90bd031e8c..57c1ac881d08 100644 --- a/include/rdma/ib_hdrs.h +++ b/include/rdma/ib_hdrs.h @@ -1,48 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2016 - 2018 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #ifndef IB_HDRS_H diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 8c093fc1bb9f..8dfb1ddf345a 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -1,40 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(IB_MAD_H) +#ifndef IB_MAD_H #define IB_MAD_H #include diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h index 8ebf84ae9ed1..1838869aad28 100644 --- a/include/rdma/ib_marshall.h +++ b/include/rdma/ib_marshall.h @@ -1,36 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(IB_USER_MARSHALL_H) +#ifndef IB_USER_MARSHALL_H #define IB_USER_MARSHALL_H #include diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h index 7ea1382ad0e5..a9162f25beaf 100644 --- a/include/rdma/ib_pack.h +++ b/include/rdma/ib_pack.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef IB_PACK_H diff --git a/include/rdma/ib_pma.h b/include/rdma/ib_pma.h index 2f8a65c1fca7..44c618203785 100644 --- a/include/rdma/ib_pma.h +++ b/include/rdma/ib_pma.h @@ -1,38 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(IB_PMA_H) +#ifndef IB_PMA_H #define IB_PMA_H #include diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 19520979b84c..693285e76f13 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -1,35 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2006 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef IB_SA_H diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h index 7be0028f155c..fdb8633cbaff 100644 --- a/include/rdma/ib_smi.h +++ b/include/rdma/ib_smi.h @@ -1,40 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(IB_SMI_H) +#ifndef IB_SMI_H #define IB_SMI_H #include diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index e3518fd6b95b..71f573a418bf 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2007 Cisco Systems. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef IB_UMEM_H diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index 64314ff76612..d16d2c17e733 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2014 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef IB_UMEM_ODP_H diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 087f001fd020..c0b2fa7e9b95 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. @@ -6,37 +7,9 @@ * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(IB_VERBS_H) +#ifndef IB_VERBS_H #define IB_VERBS_H #include diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h index 5aa8a9c76aa0..91975400e1b3 100644 --- a/include/rdma/iw_cm.h +++ b/include/rdma/iw_cm.h @@ -1,35 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef IW_CM_H #define IW_CM_H diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h index c89535047c42..6dbc1a235974 100644 --- a/include/rdma/iw_portmap.h +++ b/include/rdma/iw_portmap.h @@ -1,35 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2014 Intel Corporation. All rights reserved. * Copyright (c) 2014 Chelsio, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _IW_PORTMAP_H #define _IW_PORTMAP_H diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h index 66d4393d339c..c6bfa2640bac 100644 --- a/include/rdma/opa_addr.h +++ b/include/rdma/opa_addr.h @@ -1,48 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2017 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #ifndef OPA_ADDR_H diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 0d9e6d74c385..73bcac90a048 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -1,36 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2014-2020 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(OPA_PORT_INFO_H) +#ifndef OPA_PORT_INFO_H #define OPA_PORT_INFO_H #include diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h index c7b2ef12792d..a9f1b5700e98 100644 --- a/include/rdma/opa_smi.h +++ b/include/rdma/opa_smi.h @@ -1,36 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2014 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(OPA_SMI_H) +#ifndef OPA_SMI_H #define OPA_SMI_H #include diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h index 6f244e759b4f..cbe3c2811455 100644 --- a/include/rdma/opa_vnic.h +++ b/include/rdma/opa_vnic.h @@ -1,52 +1,11 @@ -#ifndef _OPA_VNIC_H -#define _OPA_VNIC_H +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2017 - 2020 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ +#ifndef _OPA_VNIC_H +#define _OPA_VNIC_H + /* * This file contains Intel Omni-Path (OPA) Virtual Network Interface * Controller (VNIC) specific declarations. diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 939d7abe026f..cf5da2ae49bf 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -1,37 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2005 Voltaire Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(RDMA_CM_H) +#ifndef RDMA_CM_H #define RDMA_CM_H #include diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h index 6a69d71a21a5..8354e7de7815 100644 --- a/include/rdma/rdma_cm_ib.h +++ b/include/rdma/rdma_cm_ib.h @@ -1,36 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2006 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ -#if !defined(RDMA_CM_IB_H) +#ifndef RDMA_CM_IB_H #define RDMA_CM_IB_H #include diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index ab22759de7ea..2758d9df71ee 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ + #ifndef _RDMA_NETLINK_H #define _RDMA_NETLINK_H - #include #include diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index ac5a9430abb6..9fd217b24916 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -1,53 +1,11 @@ -#ifndef DEF_RDMA_VT_H -#define DEF_RDMA_VT_H - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2016 - 2019 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ +#ifndef DEF_RDMA_VT_H +#define DEF_RDMA_VT_H + /* * Structure that low level drivers will populate in order to register with the * rdmavt layer. diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h index 574eb7278f46..1fe2bb5a63b0 100644 --- a/include/rdma/rdmavt_cq.h +++ b/include/rdma/rdmavt_cq.h @@ -1,56 +1,11 @@ -#ifndef DEF_RDMAVT_INCCQ_H -#define DEF_RDMAVT_INCCQ_H - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * * Copyright(c) 2016 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ +#ifndef DEF_RDMAVT_INCCQ_H +#define DEF_RDMAVT_INCCQ_H + #include #include #include diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index ce6c888f7fe7..c3367e9833ef 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h @@ -1,53 +1,11 @@ -#ifndef DEF_RDMAVT_INCMR_H -#define DEF_RDMAVT_INCMR_H - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2016 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ +#ifndef DEF_RDMAVT_INCMR_H +#define DEF_RDMAVT_INCMR_H + /* * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once * drivers no longer need access to the MR directly. diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index c4369a6c2951..f88392204ae9 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -1,53 +1,11 @@ -#ifndef DEF_RDMAVT_INCQP_H -#define DEF_RDMAVT_INCQP_H - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2016 - 2020 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ +#ifndef DEF_RDMAVT_INCQP_H +#define DEF_RDMAVT_INCQP_H + #include #include #include diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index db419c8dbd10..b00270c72740 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _UVERBS_IOCTL_ diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h index 6ae6cf8e4c2e..f04f5126f61e 100644 --- a/include/rdma/uverbs_named_ioctl.h +++ b/include/rdma/uverbs_named_ioctl.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _UVERBS_NAMED_IOCTL_ diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index 8451b19103ee..fe0512116958 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _UVERBS_STD_TYPES__ diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h index c15b298aa62f..06db27e35f40 100644 --- a/include/rdma/uverbs_types.h +++ b/include/rdma/uverbs_types.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _UVERBS_TYPES_ -- cgit v1.2.3 From 50935339c394adfb3d7253055e3bc10ee70264b0 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sat, 25 Jul 2020 19:02:25 +0200 Subject: netfilter: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/xt_connmark.h | 2 +- net/decnet/netfilter/dn_rtmsg.c | 2 +- net/netfilter/Kconfig | 2 +- net/netfilter/nfnetlink_acct.c | 2 +- net/netfilter/nft_set_pipapo.c | 4 ++-- net/netfilter/xt_CONNSECMARK.c | 2 +- net/netfilter/xt_connmark.c | 2 +- net/netfilter/xt_nfacct.c | 2 +- net/netfilter/xt_time.c | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/netfilter/xt_connmark.h b/include/uapi/linux/netfilter/xt_connmark.h index 1aa5c955ee1e..f01c19b83a2b 100644 --- a/include/uapi/linux/netfilter/xt_connmark.h +++ b/include/uapi/linux/netfilter/xt_connmark.h @@ -4,7 +4,7 @@ #include -/* Copyright (C) 2002,2004 MARA Systems AB +/* Copyright (C) 2002,2004 MARA Systems AB * by Henrik Nordstrom * * This program is free software; you can redistribute it and/or modify diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index dc705769acc9..26a9193df783 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -6,7 +6,7 @@ * * DECnet Routing Message Grabulator * - * (C) 2000 ChyGwyn Limited - http://www.chygwyn.com/ + * (C) 2000 ChyGwyn Limited - https://www.chygwyn.com/ * * Author: Steven Whitehouse */ diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 0ffe2b8723c4..25313c29d799 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -447,7 +447,7 @@ config NF_TABLES replace the existing {ip,ip6,arp,eb}_tables infrastructure. It provides a pseudo-state machine with an extensible instruction-set (also known as expressions) that the userspace 'nft' utility - (http://www.netfilter.org/projects/nftables) uses to build the + (https://www.netfilter.org/projects/nftables) uses to build the rule-set. It also comes with the generic set infrastructure that allows you to construct mappings between matchings and actions for performance lookups. diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 5827117f2635..5bfec829c12f 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * (C) 2011 Pablo Neira Ayuso - * (C) 2011 Intra2net AG + * (C) 2011 Intra2net AG */ #include #include diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index cc6082a5f7ad..9944523f5c2c 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -312,7 +312,7 @@ * Jay Ligatti, Josh Kuhn, and Chris Gage. * Proceedings of the IEEE International Conference on Computer * Communication Networks (ICCCN), August 2010. - * http://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf + * https://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf * * [Rottenstreich 2010] * Worst-Case TCAM Rule Expansion @@ -325,7 +325,7 @@ * Kirill Kogan, Sergey Nikolenko, Ori Rottenstreich, William Culhane, * and Patrick Eugster. * Proceedings of the 2014 ACM conference on SIGCOMM, August 2014. - * http://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf + * https://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf */ #include diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index a5c8b653476a..76acecf3e757 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c @@ -6,7 +6,7 @@ * with the SECMARK target and state match. * * Based somewhat on CONNMARK: - * Copyright (C) 2002,2004 MARA Systems AB + * Copyright (C) 2002,2004 MARA Systems AB * by Henrik Nordstrom * * (C) 2006,2008 Red Hat, Inc., James Morris diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index eec2f3a88d73..e5ebc0810675 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -2,7 +2,7 @@ /* * xt_connmark - Netfilter module to operate on connection marks * - * Copyright (C) 2002,2004 MARA Systems AB + * Copyright (C) 2002,2004 MARA Systems AB * by Henrik Nordstrom * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * Jan Engelhardt diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c index 5aab6df74e0f..a97c2259bbc8 100644 --- a/net/netfilter/xt_nfacct.c +++ b/net/netfilter/xt_nfacct.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * (C) 2011 Pablo Neira Ayuso - * (C) 2011 Intra2net AG + * (C) 2011 Intra2net AG */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index 67cb98489415..6aa12d0f54e2 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c @@ -5,7 +5,7 @@ * based on ipt_time by Fabrice MARIE * This is a module which is used for time matching * It is using some modified code from dietlibc (localtime() function) - * that you can find at http://www.fefe.de/dietlibc/ + * that you can find at https://www.fefe.de/dietlibc/ * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from gnu.org/gpl. */ -- cgit v1.2.3 From 2167c40657981f8a3d1ff1f04d4bda6b4a64f8e2 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 28 Jul 2020 23:03:41 +0200 Subject: PCI: Remove unused pci_lost_interrupt() 388c8c16abaf ("PCI: add routines for debugging and handling lost interrupts") added pci_lost_interrupt() that apparently never has had a single user. Remove it. Link: https://lore.kernel.org/r/e328d059-3068-6a40-28df-f81f616d15a0@gmail.com Signed-off-by: Heiner Kallweit Signed-off-by: Bjorn Helgaas --- drivers/pci/irq.c | 50 -------------------------------------------------- include/linux/pci.h | 7 ------- 2 files changed, 57 deletions(-) (limited to 'include') diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index a1de501a2729..12ecd0aaa28d 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -6,61 +6,11 @@ * Copyright (C) 2017 Christoph Hellwig. */ -#include #include #include #include #include -static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason) -{ - struct pci_dev *parent = to_pci_dev(pdev->dev.parent); - - pci_err(pdev, "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n", - dev_name(&parent->dev), parent->vendor, parent->device); - pci_err(pdev, "%s\n", reason); - pci_err(pdev, "Please report to linux-kernel@vger.kernel.org\n"); - WARN_ON(1); -} - -/** - * pci_lost_interrupt - reports a lost PCI interrupt - * @pdev: device whose interrupt is lost - * - * The primary function of this routine is to report a lost interrupt - * in a standard way which users can recognise (instead of blaming the - * driver). - * - * Returns: - * a suggestion for fixing it (although the driver is not required to - * act on this). - */ -enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev) -{ - if (pdev->msi_enabled || pdev->msix_enabled) { - enum pci_lost_interrupt_reason ret; - - if (pdev->msix_enabled) { - pci_note_irq_problem(pdev, "MSIX routing failure"); - ret = PCI_LOST_IRQ_DISABLE_MSIX; - } else { - pci_note_irq_problem(pdev, "MSI routing failure"); - ret = PCI_LOST_IRQ_DISABLE_MSI; - } - return ret; - } -#ifdef CONFIG_ACPI - if (!(acpi_disabled || acpi_noirq)) { - pci_note_irq_problem(pdev, "Potential ACPI misrouting please reboot with acpi=noirq"); - /* currently no way to fix acpi on the fly */ - return PCI_LOST_IRQ_DISABLE_ACPI; - } -#endif - pci_note_irq_problem(pdev, "unknown cause (not MSI or ACPI)"); - return PCI_LOST_IRQ_NO_INFORMATION; -} -EXPORT_SYMBOL(pci_lost_interrupt); - /** * pci_request_irq - allocate an interrupt line for a PCI device * @dev: PCI device to operate on diff --git a/include/linux/pci.h b/include/linux/pci.h index c79d83304e52..2dcd67f509a8 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1053,13 +1053,6 @@ void pci_sort_breadthfirst(void); /* Generic PCI functions exported to card drivers */ -enum pci_lost_interrupt_reason { - PCI_LOST_IRQ_NO_INFORMATION = 0, - PCI_LOST_IRQ_DISABLE_MSI, - PCI_LOST_IRQ_DISABLE_MSIX, - PCI_LOST_IRQ_DISABLE_ACPI, -}; -enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev); int pci_find_capability(struct pci_dev *dev, int cap); int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); int pci_find_ext_capability(struct pci_dev *dev, int cap); -- cgit v1.2.3 From 6f24ff97e3231a5303841c5196a6f460f8485eb4 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Wed, 29 Jul 2020 13:31:43 -0500 Subject: power: supply: bq27xxx_battery: Add the BQ27Z561 Battery monitor Add the Texas Instruments BQ27Z561 battery monitor. The register address map is laid out the same as compared to other devices within the file. The battery status register has differing bits to determine if the battery is full, discharging or dead. Signed-off-by: Dan Murphy Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq27xxx_battery.c | 68 +++++++++++++++++++++++++++++- drivers/power/supply/bq27xxx_battery_i2c.c | 2 + include/linux/power/bq27xxx_battery.h | 1 + 3 files changed, 70 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index acaafed037be..a05b9a2d112d 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -43,6 +43,7 @@ * https://www.ti.com/product/bq27411-g1 * https://www.ti.com/product/bq27441-g1 * https://www.ti.com/product/bq27621-g1 + * https://www.ti.com/product/bq27z561 */ #include @@ -79,6 +80,11 @@ #define BQ27000_FLAG_FC BIT(5) #define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */ +/* BQ27Z561 has different layout for Flags register */ +#define BQ27Z561_FLAG_FDC BIT(4) /* Battery fully discharged */ +#define BQ27Z561_FLAG_FC BIT(5) /* Battery fully charged */ +#define BQ27Z561_FLAG_DIS_CH BIT(6) /* Battery is discharging */ + /* control register params */ #define BQ27XXX_SEALED 0x20 #define BQ27XXX_SET_CFGUPDATE 0x13 @@ -431,12 +437,32 @@ static u8 [BQ27XXX_REG_DCAP] = 0x3c, [BQ27XXX_REG_AP] = 0x18, BQ27XXX_DM_REG_ROWS, - }; + }, #define bq27411_regs bq27421_regs #define bq27425_regs bq27421_regs #define bq27426_regs bq27421_regs #define bq27441_regs bq27421_regs #define bq27621_regs bq27421_regs + bq27z561_regs[BQ27XXX_REG_MAX] = { + [BQ27XXX_REG_CTRL] = 0x00, + [BQ27XXX_REG_TEMP] = 0x06, + [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, + [BQ27XXX_REG_VOLT] = 0x08, + [BQ27XXX_REG_AI] = 0x14, + [BQ27XXX_REG_FLAGS] = 0x0a, + [BQ27XXX_REG_TTE] = 0x16, + [BQ27XXX_REG_TTF] = 0x18, + [BQ27XXX_REG_TTES] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR, + [BQ27XXX_REG_NAC] = INVALID_REG_ADDR, + [BQ27XXX_REG_FCC] = 0x12, + [BQ27XXX_REG_CYCT] = 0x2a, + [BQ27XXX_REG_AE] = 0x22, + [BQ27XXX_REG_SOC] = 0x2c, + [BQ27XXX_REG_DCAP] = 0x3c, + [BQ27XXX_REG_AP] = 0x22, + BQ27XXX_DM_REG_ROWS, + }; static enum power_supply_property bq27000_props[] = { POWER_SUPPLY_PROP_STATUS, @@ -672,6 +698,25 @@ static enum power_supply_property bq27421_props[] = { #define bq27441_props bq27421_props #define bq27621_props bq27421_props +static enum power_supply_property bq27z561_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_POWER_AVG, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_MANUFACTURER, +}; + struct bq27xxx_dm_reg { u8 subclass_id; u8 offset; @@ -767,11 +812,14 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = { #define bq27621_dm_regs 0 #endif +#define bq27z561_dm_regs 0 + #define BQ27XXX_O_ZERO 0x00000001 #define BQ27XXX_O_OTDC 0x00000002 /* has OTC/OTD overtemperature flags */ #define BQ27XXX_O_UTOT 0x00000004 /* has OT overtemperature flag */ #define BQ27XXX_O_CFGUP 0x00000008 #define BQ27XXX_O_RAM 0x00000010 +#define BQ27Z561_O_BITS 0x00000020 #define BQ27XXX_DATA(ref, key, opt) { \ .opts = (opt), \ @@ -816,6 +864,7 @@ static struct { [BQ27426] = BQ27XXX_DATA(bq27426, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), [BQ27441] = BQ27XXX_DATA(bq27441, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), [BQ27621] = BQ27XXX_DATA(bq27621, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), + [BQ27Z561] = BQ27XXX_DATA(bq27z561, 0 , BQ27Z561_O_BITS), }; static DEFINE_MUTEX(bq27xxx_list_lock); @@ -1551,6 +1600,8 @@ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags) { if (di->opts & BQ27XXX_O_ZERO) return flags & (BQ27000_FLAG_EDV1 | BQ27000_FLAG_EDVF); + else if (di->opts & BQ27Z561_O_BITS) + return flags & BQ27Z561_FLAG_FDC; else return flags & (BQ27XXX_FLAG_SOC1 | BQ27XXX_FLAG_SOCF); } @@ -1595,6 +1646,7 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di) cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR) cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); + cache.charge_full = bq27xxx_battery_read_fcc(di); cache.capacity = bq27xxx_battery_read_soc(di); if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) @@ -1682,6 +1734,13 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di, status = POWER_SUPPLY_STATUS_NOT_CHARGING; else status = POWER_SUPPLY_STATUS_DISCHARGING; + } else if (di->opts & BQ27Z561_O_BITS) { + if (di->cache.flags & BQ27Z561_FLAG_FC) + status = POWER_SUPPLY_STATUS_FULL; + else if (di->cache.flags & BQ27Z561_FLAG_DIS_CH) + status = POWER_SUPPLY_STATUS_DISCHARGING; + else + status = POWER_SUPPLY_STATUS_CHARGING; } else { if (di->cache.flags & BQ27XXX_FLAG_FC) status = POWER_SUPPLY_STATUS_FULL; @@ -1710,6 +1769,13 @@ static int bq27xxx_battery_capacity_level(struct bq27xxx_device_info *di, level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; else level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; + } else if (di->opts & BQ27Z561_O_BITS) { + if (di->cache.flags & BQ27Z561_FLAG_FC) + level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + else if (di->cache.flags & BQ27Z561_FLAG_FDC) + level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; + else + level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; } else { if (di->cache.flags & BQ27XXX_FLAG_FC) level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index 8e114a7abfc9..15f4e75786ab 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -253,6 +253,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = { { "bq27426", BQ27426 }, { "bq27441", BQ27441 }, { "bq27621", BQ27621 }, + { "bq27z561", BQ27Z561 }, {}, }; MODULE_DEVICE_TABLE(i2c, bq27xxx_i2c_id_table); @@ -286,6 +287,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = { { .compatible = "ti,bq27426" }, { .compatible = "ti,bq27441" }, { .compatible = "ti,bq27621" }, + { .compatible = "ti,bq27z561" }, {}, }; MODULE_DEVICE_TABLE(of, bq27xxx_battery_i2c_of_match_table); diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 507c5e214c42..1f6ea5d5063d 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -30,6 +30,7 @@ enum bq27xxx_chip { BQ27426, BQ27441, BQ27621, + BQ27Z561, }; struct bq27xxx_device_info; -- cgit v1.2.3 From 707d678a5c7c5e80d1caac6c6b021171f5ecde58 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Wed, 29 Jul 2020 13:31:45 -0500 Subject: power: supply: bq27xxx_battery: Add the BQ28z610 Battery monitor Add the Texas Instruments BQ28z610 battery monitor. The register address map is laid out the same as compared to other devices within the file. The battery status register bits are similar to the bq27z561 but they are different compared to other fuel gauge devices within this file. Signed-off-by: Dan Murphy Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq27xxx_battery.c | 42 ++++++++++++++++++++++++++++++ drivers/power/supply/bq27xxx_battery_i2c.c | 2 ++ include/linux/power/bq27xxx_battery.h | 1 + 3 files changed, 45 insertions(+) (limited to 'include') diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index a05b9a2d112d..a123f6e21f08 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -44,6 +44,7 @@ * https://www.ti.com/product/bq27441-g1 * https://www.ti.com/product/bq27621-g1 * https://www.ti.com/product/bq27z561 + * https://www.ti.com/product/bq28z610 */ #include @@ -462,6 +463,26 @@ static u8 [BQ27XXX_REG_DCAP] = 0x3c, [BQ27XXX_REG_AP] = 0x22, BQ27XXX_DM_REG_ROWS, + }, + bq28z610_regs[BQ27XXX_REG_MAX] = { + [BQ27XXX_REG_CTRL] = 0x00, + [BQ27XXX_REG_TEMP] = 0x06, + [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, + [BQ27XXX_REG_VOLT] = 0x08, + [BQ27XXX_REG_AI] = 0x14, + [BQ27XXX_REG_FLAGS] = 0x0a, + [BQ27XXX_REG_TTE] = 0x16, + [BQ27XXX_REG_TTF] = 0x18, + [BQ27XXX_REG_TTES] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR, + [BQ27XXX_REG_NAC] = INVALID_REG_ADDR, + [BQ27XXX_REG_FCC] = 0x12, + [BQ27XXX_REG_CYCT] = 0x2a, + [BQ27XXX_REG_AE] = 0x22, + [BQ27XXX_REG_SOC] = 0x2c, + [BQ27XXX_REG_DCAP] = 0x3c, + [BQ27XXX_REG_AP] = 0x22, + BQ27XXX_DM_REG_ROWS, }; static enum power_supply_property bq27000_props[] = { @@ -717,6 +738,25 @@ static enum power_supply_property bq27z561_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, }; +static enum power_supply_property bq28z610_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_POWER_AVG, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_MANUFACTURER, +}; + struct bq27xxx_dm_reg { u8 subclass_id; u8 offset; @@ -813,6 +853,7 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = { #endif #define bq27z561_dm_regs 0 +#define bq28z610_dm_regs 0 #define BQ27XXX_O_ZERO 0x00000001 #define BQ27XXX_O_OTDC 0x00000002 /* has OTC/OTD overtemperature flags */ @@ -865,6 +906,7 @@ static struct { [BQ27441] = BQ27XXX_DATA(bq27441, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), [BQ27621] = BQ27XXX_DATA(bq27621, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), [BQ27Z561] = BQ27XXX_DATA(bq27z561, 0 , BQ27Z561_O_BITS), + [BQ28Z610] = BQ27XXX_DATA(bq28z610, 0 , BQ27Z561_O_BITS), }; static DEFINE_MUTEX(bq27xxx_list_lock); diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index 15f4e75786ab..ab02456d69e5 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -254,6 +254,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = { { "bq27441", BQ27441 }, { "bq27621", BQ27621 }, { "bq27z561", BQ27Z561 }, + { "bq28z610", BQ28Z610 }, {}, }; MODULE_DEVICE_TABLE(i2c, bq27xxx_i2c_id_table); @@ -288,6 +289,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = { { .compatible = "ti,bq27441" }, { .compatible = "ti,bq27621" }, { .compatible = "ti,bq27z561" }, + { .compatible = "ti,bq28z610" }, {}, }; MODULE_DEVICE_TABLE(of, bq27xxx_battery_i2c_of_match_table); diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 1f6ea5d5063d..987d9652aa4e 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -31,6 +31,7 @@ enum bq27xxx_chip { BQ27441, BQ27621, BQ27Z561, + BQ28Z610, }; struct bq27xxx_device_info; -- cgit v1.2.3 From 3ae1f39aef08c5c584b0d0ce2186273be3f14d16 Mon Sep 17 00:00:00 2001 From: Sibi Sankar Date: Sat, 6 Jun 2020 03:03:30 +0530 Subject: OPP: Add and export helper to set bandwidth Add and export 'dev_pm_opp_set_bw' to set the bandwidth levels associated with an OPP. Signed-off-by: Sibi Sankar Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 31 +++++++++++++++++++++++++++++++ include/linux/pm_opp.h | 6 ++++++ 2 files changed, 37 insertions(+) (limited to 'include') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index dfbd3d10410c..6937bf45f497 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -831,6 +831,37 @@ static int _set_required_opps(struct device *dev, return ret; } +/** + * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp + * @dev: device for which we do this operation + * @opp: opp based on which the bandwidth levels are to be configured + * + * This configures the bandwidth to the levels specified by the OPP. However + * if the OPP specified is NULL the bandwidth levels are cleared out. + * + * Return: 0 on success or a negative error value. + */ +int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) +{ + struct opp_table *opp_table; + int ret; + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "%s: device opp table doesn't exist\n", __func__); + return PTR_ERR(opp_table); + } + + if (opp) + ret = _set_opp_bw(opp_table, opp, dev, false); + else + ret = _set_opp_bw(opp_table, NULL, dev, true); + + dev_pm_opp_put_opp_table(opp_table); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw); + /** * dev_pm_opp_set_rate() - Configure new OPP based on frequency * @dev: device for which we do this operation diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index d5c4a329321d..ae68417c0ae0 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -151,6 +151,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names void dev_pm_opp_detach_genpd(struct opp_table *opp_table); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); +int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); void dev_pm_opp_remove_table(struct device *dev); @@ -342,6 +343,11 @@ static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_f return -ENOTSUPP; } +static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) +{ + return -EOPNOTSUPP; +} + static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { return -ENOTSUPP; -- cgit v1.2.3 From 292072c38768bb2321cf643b27cdf8fd8282d028 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 30 Jul 2020 08:59:40 +0530 Subject: cpufreq: cached_resolved_idx can not be negative It is not possible for cached_resolved_idx to be invalid here as the cpufreq core always sets index to a positive value. Change its type to unsigned int and fix qcom usage a bit. Signed-off-by: Viresh Kumar --- drivers/cpufreq/cpufreq.c | 2 +- drivers/cpufreq/qcom-cpufreq-hw.c | 5 +---- include/linux/cpufreq.h | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0128de3603df..053d72e52a31 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -538,7 +538,7 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, policy->cached_target_freq = target_freq; if (cpufreq_driver->target_index) { - int idx; + unsigned int idx; idx = cpufreq_frequency_table_target(policy, target_freq, CPUFREQ_RELATION_L); diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index fa68fa8ebd95..599818d38717 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -112,13 +112,10 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { void __iomem *perf_state_reg = policy->driver_data; - int index; + unsigned int index; unsigned long freq; index = policy->cached_resolved_idx; - if (index < 0) - return 0; - writel_relaxed(index, perf_state_reg); freq = policy->freq_table[index].frequency; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 3494f6763597..540c3ea4eb3c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -127,7 +127,7 @@ struct cpufreq_policy { /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ unsigned int cached_target_freq; - int cached_resolved_idx; + unsigned int cached_resolved_idx; /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ -- cgit v1.2.3 From c8376994c86c4eb02b9a1032cd3a8d44c911d671 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 4 Jun 2020 10:23:14 +0200 Subject: initrd: remove support for multiple floppies Remove the special handling for multiple floppies in the initrd code. No one should be using floppies for booting these days. (famous last words..) Includes a spelling fix from Colin Ian King . Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- arch/arm/kernel/atags_parse.c | 2 -- arch/sh/kernel/setup.c | 2 -- arch/sparc/kernel/setup_32.c | 2 -- arch/sparc/kernel/setup_64.c | 2 -- arch/x86/kernel/setup.c | 2 -- include/linux/initrd.h | 6 ---- init/do_mounts.c | 69 +++++-------------------------------------- init/do_mounts.h | 1 - init/do_mounts_rd.c | 20 ++++--------- 9 files changed, 12 insertions(+), 94 deletions(-) (limited to 'include') diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index ce02f92f4ab2..6c12d9fe694e 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -91,8 +91,6 @@ __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); static int __init parse_tag_ramdisk(const struct tag *tag) { rd_image_start = tag->u.ramdisk.start; - rd_doload = (tag->u.ramdisk.flags & 1) == 0; - rd_prompt = (tag->u.ramdisk.flags & 2) == 0; if (tag->u.ramdisk.size) rd_size = tag->u.ramdisk.size; diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 67f5a3b44c2e..4144be650d41 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -290,8 +290,6 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; - rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); - rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 6d07b85b9e24..eea43a1aef1b 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -353,8 +353,6 @@ void __init setup_arch(char **cmdline_p) ROOT_DEV = old_decode_dev(root_dev); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; - rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); - rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); #endif prom_setsync(prom_sync_me); diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index f765fda871eb..d87244197d5c 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -659,8 +659,6 @@ void __init setup_arch(char **cmdline_p) ROOT_DEV = old_decode_dev(root_dev); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; - rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); - rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); #endif task_thread_info(&init_task)->kregs = &fake_swapper_regs; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index a3767e74c758..b9a68d8e06d8 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -870,8 +870,6 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_BLK_DEV_RAM rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; - rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); - rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); #endif #ifdef CONFIG_EFI if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, diff --git a/include/linux/initrd.h b/include/linux/initrd.h index aa5914355728..8db6f8c8030b 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -2,12 +2,6 @@ #define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ -/* 1 = load ramdisk, 0 = don't load */ -extern int rd_doload; - -/* 1 = prompt for ramdisk, 0 = don't prompt */ -extern int rd_prompt; - /* starting block # of image */ extern int rd_image_start; diff --git a/init/do_mounts.c b/init/do_mounts.c index 1a4dfa17fb28..a7f22cf58c7e 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -28,8 +28,6 @@ #include "do_mounts.h" -int __initdata rd_doload; /* 1 = load RAM disk, 0 = don't load */ - int root_mountflags = MS_RDONLY | MS_SILENT; static char * __initdata root_device_name; static char __initdata saved_root_name[64]; @@ -39,7 +37,7 @@ dev_t ROOT_DEV; static int __init load_ramdisk(char *str) { - rd_doload = simple_strtol(str,NULL,0) & 3; + pr_warn("ignoring the deprecated load_ramdisk= option\n"); return 1; } __setup("load_ramdisk=", load_ramdisk); @@ -553,66 +551,20 @@ static int __init mount_cifs_root(void) } #endif -#if defined(CONFIG_BLK_DEV_RAM) || defined(CONFIG_BLK_DEV_FD) -void __init change_floppy(char *fmt, ...) -{ - struct termios termios; - char buf[80]; - char c; - int fd; - va_list args; - va_start(args, fmt); - vsprintf(buf, fmt, args); - va_end(args); - fd = ksys_open("/dev/root", O_RDWR | O_NDELAY, 0); - if (fd >= 0) { - ksys_ioctl(fd, FDEJECT, 0); - ksys_close(fd); - } - printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf); - fd = ksys_open("/dev/console", O_RDWR, 0); - if (fd >= 0) { - ksys_ioctl(fd, TCGETS, (long)&termios); - termios.c_lflag &= ~ICANON; - ksys_ioctl(fd, TCSETSF, (long)&termios); - ksys_read(fd, &c, 1); - termios.c_lflag |= ICANON; - ksys_ioctl(fd, TCSETSF, (long)&termios); - ksys_close(fd); - } -} -#endif - void __init mount_root(void) { #ifdef CONFIG_ROOT_NFS if (ROOT_DEV == Root_NFS) { - if (mount_nfs_root()) - return; - - printk(KERN_ERR "VFS: Unable to mount root fs via NFS, trying floppy.\n"); - ROOT_DEV = Root_FD0; + if (!mount_nfs_root()) + printk(KERN_ERR "VFS: Unable to mount root fs via NFS.\n"); + return; } #endif #ifdef CONFIG_CIFS_ROOT if (ROOT_DEV == Root_CIFS) { - if (mount_cifs_root()) - return; - - printk(KERN_ERR "VFS: Unable to mount root fs via SMB, trying floppy.\n"); - ROOT_DEV = Root_FD0; - } -#endif -#ifdef CONFIG_BLK_DEV_FD - if (MAJOR(ROOT_DEV) == FLOPPY_MAJOR) { - /* rd_doload is 2 for a dual initrd/ramload setup */ - if (rd_doload==2) { - if (rd_load_disk(1)) { - ROOT_DEV = Root_RAM1; - root_device_name = NULL; - } - } else - change_floppy("root floppy"); + if (!mount_cifs_root()) + printk(KERN_ERR "VFS: Unable to mount root fs via SMB.\n"); + return; } #endif #ifdef CONFIG_BLOCK @@ -631,8 +583,6 @@ void __init mount_root(void) */ void __init prepare_namespace(void) { - int is_floppy; - if (root_delay) { printk(KERN_INFO "Waiting %d sec before mounting root device...\n", root_delay); @@ -675,11 +625,6 @@ void __init prepare_namespace(void) async_synchronize_full(); } - is_floppy = MAJOR(ROOT_DEV) == FLOPPY_MAJOR; - - if (is_floppy && rd_doload && rd_load_disk(0)) - ROOT_DEV = Root_RAM0; - mount_root(); out: devtmpfs_mount(); diff --git a/init/do_mounts.h b/init/do_mounts.h index 50d6c8941e15..c855b3f0e06d 100644 --- a/init/do_mounts.h +++ b/init/do_mounts.h @@ -9,7 +9,6 @@ #include #include -void change_floppy(char *fmt, ...); void mount_block_root(char *name, int flags); void mount_root(void); extern int root_mountflags; diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c index 32fb049d18f9..0a5c3ebee61d 100644 --- a/init/do_mounts_rd.c +++ b/init/do_mounts_rd.c @@ -15,11 +15,9 @@ #include -int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */ - static int __init prompt_ramdisk(char *str) { - rd_prompt = simple_strtol(str,NULL,0) & 1; + pr_warn("ignoring the deprecated prompt_ramdisk= option\n"); return 1; } __setup("prompt_ramdisk=", prompt_ramdisk); @@ -178,7 +176,7 @@ int __init rd_load_image(char *from) int res = 0; int in_fd, out_fd; unsigned long rd_blocks, devblocks; - int nblocks, i, disk; + int nblocks, i; char *buf = NULL; unsigned short rotate = 0; decompress_fn decompressor = NULL; @@ -243,21 +241,15 @@ int __init rd_load_image(char *from) printk(KERN_NOTICE "RAMDISK: Loading %dKiB [%ld disk%s] into ram disk... ", nblocks, ((nblocks-1)/devblocks)+1, nblocks>devblocks ? "s" : ""); - for (i = 0, disk = 1; i < nblocks; i++) { + for (i = 0; i < nblocks; i++) { if (i && (i % devblocks == 0)) { - pr_cont("done disk #%d.\n", disk++); + pr_cont("done disk #1.\n"); rotate = 0; if (ksys_close(in_fd)) { printk("Error closing the disk.\n"); goto noclose_input; } - change_floppy("disk #%d", disk); - in_fd = ksys_open(from, O_RDONLY, 0); - if (in_fd < 0) { - printk("Error opening disk.\n"); - goto noclose_input; - } - printk("Loading disk #%d... ", disk); + break; } ksys_read(in_fd, buf, BLOCK_SIZE); ksys_write(out_fd, buf, BLOCK_SIZE); @@ -284,8 +276,6 @@ out: int __init rd_load_disk(int n) { - if (rd_prompt) - change_floppy("root floppy disk to be loaded into RAM disk"); create_dev("/dev/root", ROOT_DEV); create_dev("/dev/ram", MKDEV(RAMDISK_MAJOR, n)); return rd_load_image("/dev/root"); -- cgit v1.2.3 From bef173299613404f55b11180d9a865861637f31d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 6 Jun 2020 14:49:58 +0200 Subject: initrd: switch initrd loading to struct file based APIs There is no good reason to mess with file descriptors from in-kernel code, switch the initrd loading to struct file based read and writes instead. Also Pass an explicit offset instead of ->f_pos, and to make that easier, use file scope file structs and offsets everywhere except for identify_ramdisk_image instead of the current strange mix. Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- fs/read_write.c | 2 +- include/linux/syscalls.h | 1 - init/do_mounts_rd.c | 79 ++++++++++++++++++++++++------------------------ 3 files changed, 40 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/fs/read_write.c b/fs/read_write.c index 4fb797822567..5db58b8c78d0 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -301,7 +301,7 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int whence) } EXPORT_SYMBOL(vfs_llseek); -off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence) +static off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence) { off_t retval; struct fd f = fdget_pos(fd); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index b951a87da987..10843a6adb77 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1246,7 +1246,6 @@ int ksys_fchown(unsigned int fd, uid_t user, gid_t group); int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, unsigned int count); int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); -off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence); ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); void ksys_sync(void); int ksys_unshare(unsigned long unshare_flags); diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c index 0a5c3ebee61d..d4255c10432a 100644 --- a/init/do_mounts_rd.c +++ b/init/do_mounts_rd.c @@ -14,6 +14,8 @@ #include +static struct file *in_file, *out_file; +static loff_t in_pos, out_pos; static int __init prompt_ramdisk(char *str) { @@ -31,7 +33,7 @@ static int __init ramdisk_start_setup(char *str) } __setup("ramdisk_start=", ramdisk_start_setup); -static int __init crd_load(int in_fd, int out_fd, decompress_fn deco); +static int __init crd_load(decompress_fn deco); /* * This routine tries to find a RAM disk image to load, and returns the @@ -53,7 +55,8 @@ static int __init crd_load(int in_fd, int out_fd, decompress_fn deco); * lz4 */ static int __init -identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor) +identify_ramdisk_image(struct file *file, loff_t pos, + decompress_fn *decompressor) { const int size = 512; struct minix_super_block *minixsb; @@ -64,6 +67,7 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor) unsigned char *buf; const char *compress_name; unsigned long n; + int start_block = rd_image_start; buf = kmalloc(size, GFP_KERNEL); if (!buf) @@ -78,8 +82,8 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor) /* * Read block 0 to test for compressed kernel */ - ksys_lseek(fd, start_block * BLOCK_SIZE, 0); - ksys_read(fd, buf, size); + pos = start_block * BLOCK_SIZE; + kernel_read(file, buf, size, &pos); *decompressor = decompress_method(buf, size, &compress_name); if (compress_name) { @@ -124,8 +128,8 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor) /* * Read 512 bytes further to check if cramfs is padded */ - ksys_lseek(fd, start_block * BLOCK_SIZE + 0x200, 0); - ksys_read(fd, buf, size); + pos = start_block * BLOCK_SIZE + 0x200; + kernel_read(file, buf, size, &pos); if (cramfsb->magic == CRAMFS_MAGIC) { printk(KERN_NOTICE @@ -138,8 +142,8 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor) /* * Read block 1 to test for minix and ext2 superblock */ - ksys_lseek(fd, (start_block+1) * BLOCK_SIZE, 0); - ksys_read(fd, buf, size); + pos = (start_block + 1) * BLOCK_SIZE; + kernel_read(file, buf, size, &pos); /* Try minix */ if (minixsb->s_magic == MINIX_SUPER_MAGIC || @@ -166,15 +170,22 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor) start_block); done: - ksys_lseek(fd, start_block * BLOCK_SIZE, 0); kfree(buf); return nblocks; } +static unsigned long nr_blocks(struct file *file) +{ + struct inode *inode = file->f_mapping->host; + + if (!S_ISBLK(inode->i_mode)) + return 0; + return i_size_read(inode) >> 10; +} + int __init rd_load_image(char *from) { int res = 0; - int in_fd, out_fd; unsigned long rd_blocks, devblocks; int nblocks, i; char *buf = NULL; @@ -184,20 +195,21 @@ int __init rd_load_image(char *from) char rotator[4] = { '|' , '/' , '-' , '\\' }; #endif - out_fd = ksys_open("/dev/ram", O_RDWR, 0); - if (out_fd < 0) + out_file = filp_open("/dev/ram", O_RDWR, 0); + if (IS_ERR(out_file)) goto out; - in_fd = ksys_open(from, O_RDONLY, 0); - if (in_fd < 0) + in_file = filp_open(from, O_RDONLY, 0); + if (IS_ERR(in_file)) goto noclose_input; - nblocks = identify_ramdisk_image(in_fd, rd_image_start, &decompressor); + in_pos = rd_image_start * BLOCK_SIZE; + nblocks = identify_ramdisk_image(in_file, in_pos, &decompressor); if (nblocks < 0) goto done; if (nblocks == 0) { - if (crd_load(in_fd, out_fd, decompressor) == 0) + if (crd_load(decompressor) == 0) goto successful_load; goto done; } @@ -206,11 +218,7 @@ int __init rd_load_image(char *from) * NOTE NOTE: nblocks is not actually blocks but * the number of kibibytes of data to load into a ramdisk. */ - if (ksys_ioctl(out_fd, BLKGETSIZE, (unsigned long)&rd_blocks) < 0) - rd_blocks = 0; - else - rd_blocks >>= 1; - + rd_blocks = nr_blocks(out_file); if (nblocks > rd_blocks) { printk("RAMDISK: image too big! (%dKiB/%ldKiB)\n", nblocks, rd_blocks); @@ -220,13 +228,10 @@ int __init rd_load_image(char *from) /* * OK, time to copy in the data */ - if (ksys_ioctl(in_fd, BLKGETSIZE, (unsigned long)&devblocks) < 0) - devblocks = 0; - else - devblocks >>= 1; - if (strcmp(from, "/initrd.image") == 0) devblocks = nblocks; + else + devblocks = nr_blocks(in_file); if (devblocks == 0) { printk(KERN_ERR "RAMDISK: could not determine device size\n"); @@ -245,14 +250,11 @@ int __init rd_load_image(char *from) if (i && (i % devblocks == 0)) { pr_cont("done disk #1.\n"); rotate = 0; - if (ksys_close(in_fd)) { - printk("Error closing the disk.\n"); - goto noclose_input; - } + fput(in_file); break; } - ksys_read(in_fd, buf, BLOCK_SIZE); - ksys_write(out_fd, buf, BLOCK_SIZE); + kernel_read(in_file, buf, BLOCK_SIZE, &in_pos); + kernel_write(out_file, buf, BLOCK_SIZE, &out_pos); #if !defined(CONFIG_S390) if (!(i % 16)) { pr_cont("%c\b", rotator[rotate & 0x3]); @@ -265,9 +267,9 @@ int __init rd_load_image(char *from) successful_load: res = 1; done: - ksys_close(in_fd); + fput(in_file); noclose_input: - ksys_close(out_fd); + fput(out_file); out: kfree(buf); ksys_unlink("/dev/ram"); @@ -283,11 +285,10 @@ int __init rd_load_disk(int n) static int exit_code; static int decompress_error; -static int crd_infd, crd_outfd; static long __init compr_fill(void *buf, unsigned long len) { - long r = ksys_read(crd_infd, buf, len); + long r = kernel_read(in_file, buf, len, &in_pos); if (r < 0) printk(KERN_ERR "RAMDISK: error while reading compressed data"); else if (r == 0) @@ -297,7 +298,7 @@ static long __init compr_fill(void *buf, unsigned long len) static long __init compr_flush(void *window, unsigned long outcnt) { - long written = ksys_write(crd_outfd, window, outcnt); + long written = kernel_write(out_file, window, outcnt, &out_pos); if (written != outcnt) { if (decompress_error == 0) printk(KERN_ERR @@ -316,11 +317,9 @@ static void __init error(char *x) decompress_error = 1; } -static int __init crd_load(int in_fd, int out_fd, decompress_fn deco) +static int __init crd_load(decompress_fn deco) { int result; - crd_infd = in_fd; - crd_outfd = out_fd; if (!deco) { pr_emerg("Invalid ramdisk decompression routine. " -- cgit v1.2.3 From a787e5400a1ceeb0ef92d71ec43aeb35b1fa1334 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Mon, 13 Jul 2020 16:43:21 +0200 Subject: driver core: add device probe log helper During probe every time driver gets resource it should usually check for error printk some message if it is not -EPROBE_DEFER and return the error. This pattern is simple but requires adding few lines after any resource acquisition code, as a result it is often omitted or implemented only partially. dev_err_probe helps to replace such code sequences with simple call, so code: if (err != -EPROBE_DEFER) dev_err(dev, ...); return err; becomes: return dev_err_probe(dev, err, ...); Signed-off-by: Andrzej Hajda Reviewed-by: Rafael J. Wysocki Reviewed-by: Mark Brown Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200713144324.23654-2-a.hajda@samsung.com Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 42 ++++++++++++++++++++++++++++++++++++++++++ include/linux/device.h | 3 +++ 2 files changed, 45 insertions(+) (limited to 'include') diff --git a/drivers/base/core.c b/drivers/base/core.c index 4d05868d9356..1606488ed741 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -4203,6 +4203,48 @@ define_dev_printk_level(_dev_info, KERN_INFO); #endif +/** + * dev_err_probe - probe error check and log helper + * @dev: the pointer to the struct device + * @err: error value to test + * @fmt: printf-style format string + * @...: arguments as specified in the format string + * + * This helper implements common pattern present in probe functions for error + * checking: print debug or error message depending if the error value is + * -EPROBE_DEFER and propagate error upwards. + * It replaces code sequence: + * if (err != -EPROBE_DEFER) + * dev_err(dev, ...); + * else + * dev_dbg(dev, ...); + * return err; + * with + * return dev_err_probe(dev, err, ...); + * + * Returns @err. + * + */ +int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + if (err != -EPROBE_DEFER) + dev_err(dev, "error %d: %pV", err, &vaf); + else + dev_dbg(dev, "error %d: %pV", err, &vaf); + + va_end(args); + + return err; +} +EXPORT_SYMBOL_GPL(dev_err_probe); + static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) { return fwnode && !IS_ERR(fwnode->secondary); diff --git a/include/linux/device.h b/include/linux/device.h index a120657d7587..065c24008f8b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -962,6 +962,9 @@ void device_link_remove(void *consumer, struct device *supplier); void device_links_supplier_sync_state_pause(void); void device_links_supplier_sync_state_resume(void); +extern __printf(3, 4) +int dev_err_probe(const struct device *dev, int err, const char *fmt, ...); + /* Create alias, so I can be autoloaded. */ #define MODULE_ALIAS_CHARDEV(major,minor) \ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) -- cgit v1.2.3 From 6540351e6f27ef718e3cf5b46349633f3ec57859 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Thu, 23 Jul 2020 18:08:56 +0530 Subject: Bluetooth: Translate additional address type correctly When using controller based address resolution, then the new address types 0x02 and 0x03 are used. These types need to be converted back into either public address or random address types. Signed-off-by: Marcel Holtmann Signed-off-by: Sathish Narsimman Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 6 ++++-- net/bluetooth/hci_core.c | 9 +++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 1317dfd8f962..c36dccd6718e 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -2279,8 +2279,10 @@ struct hci_ev_le_conn_complete { #define LE_EXT_ADV_SCAN_RSP 0x0008 #define LE_EXT_ADV_LEGACY_PDU 0x0010 -#define ADDR_LE_DEV_PUBLIC 0x00 -#define ADDR_LE_DEV_RANDOM 0x01 +#define ADDR_LE_DEV_PUBLIC 0x00 +#define ADDR_LE_DEV_RANDOM 0x01 +#define ADDR_LE_DEV_PUBLIC_RESOLVED 0x02 +#define ADDR_LE_DEV_RANDOM_RESOLVED 0x03 #define HCI_EV_LE_ADVERTISING_REPORT 0x02 struct hci_ev_le_advertising_info { diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 4ba23b821cbf..3f89bd639860 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3292,6 +3292,15 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, { struct hci_conn_params *param; + switch (addr_type) { + case ADDR_LE_DEV_PUBLIC_RESOLVED: + addr_type = ADDR_LE_DEV_PUBLIC; + break; + case ADDR_LE_DEV_RANDOM_RESOLVED: + addr_type = ADDR_LE_DEV_RANDOM; + break; + } + list_for_each_entry(param, list, action) { if (bacmp(¶m->addr, addr) == 0 && param->addr_type == addr_type) -- cgit v1.2.3 From e1d572357599d142df5764b39731b6eb55a22beb Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Thu, 23 Jul 2020 18:08:57 +0530 Subject: Bluetooth: Configure controller address resolution if available When the LL Privacy support is available, then as part of enabling or disabling passive background scanning, it is required to set up the controller based address resolution as well. Since only passive background scanning is utilizing the whitelist, the address resolution is now bound to the whitelist and passive background scanning. All other resolution can be easily done by the host stack. Signed-off-by: Marcel Holtmann Signed-off-by: Sathish Narsimman Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 3 +++ net/bluetooth/hci_request.c | 26 +++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index bee1b4778ccc..8caac20556b4 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1359,6 +1359,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) +/* Use LL Privacy based address resolution if supported */ +#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) + /* Use ext scanning if set ext scan param and ext scan enable is supported */ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ ((dev)->commands[37] & 0x40)) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 7c0c2fda04ad..7d0ba53ffed0 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -675,6 +675,12 @@ void hci_req_add_le_scan_disable(struct hci_request *req) cp.enable = LE_SCAN_DISABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } + + if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { + __u8 enable = 0x00; + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); + } } static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, @@ -816,7 +822,8 @@ static bool scan_use_rpa(struct hci_dev *hdev) } static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, - u16 window, u8 own_addr_type, u8 filter_policy) + u16 window, u8 own_addr_type, u8 filter_policy, + bool addr_resolv) { struct hci_dev *hdev = req->hdev; @@ -825,6 +832,11 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, return; } + if (use_ll_privacy(hdev) && addr_resolv) { + u8 enable = 0x01; + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); + } + /* Use ext scanning if set ext scan param and ext scan enable is * supported */ @@ -898,12 +910,18 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, } } +/* Ensure to call hci_req_add_le_scan_disable() first to disable the + * controller based address resolution to be able to reconfigure + * resolving list. + */ void hci_req_add_le_passive_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 own_addr_type; u8 filter_policy; u16 window, interval; + /* Background scanning should run with address resolution */ + bool addr_resolv = true; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); @@ -949,7 +967,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req) bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy); hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, - own_addr_type, filter_policy); + own_addr_type, filter_policy, addr_resolv); } static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) @@ -2789,6 +2807,8 @@ static int active_scan(struct hci_request *req, unsigned long opt) u8 own_addr_type; /* White list is not used for discovery */ u8 filter_policy = 0x00; + /* Discovery doesn't require controller address resolution */ + bool addr_resolv = false; int err; BT_DBG("%s", hdev->name); @@ -2811,7 +2831,7 @@ static int active_scan(struct hci_request *req, unsigned long opt) hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, hdev->le_scan_window_discovery, own_addr_type, - filter_policy); + filter_policy, addr_resolv); return 0; } -- cgit v1.2.3 From b2cc23398e8166b38f8715026273503b081c2a7a Mon Sep 17 00:00:00 2001 From: Sathish Narasimman Date: Thu, 23 Jul 2020 18:09:02 +0530 Subject: Bluetooth: Enable RPA Timeout Enable RPA timeout during bluetooth initialization. The RPA timeout value is used from hdev, which initialized from debug_fs Signed-off-by: Sathish Narasimman Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 2 ++ net/bluetooth/hci_core.c | 8 ++++++++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index c36dccd6718e..dd82cce77a7a 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1648,6 +1648,8 @@ struct hci_rp_le_read_resolv_list_size { #define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d +#define HCI_OP_LE_SET_RPA_TIMEOUT 0x202e + #define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f struct hci_rp_le_read_max_data_len { __u8 status; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 3f89bd639860..68bfe57b6625 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -763,6 +763,14 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); } + if (hdev->commands[35] & 0x40) { + __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout); + + /* Set RPA timeout */ + hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2, + &rpa_timeout); + } + if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { /* Read LE Maximum Data Length */ hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); -- cgit v1.2.3 From 4dc3bab8687f1ea11322611de6d4138b43eccdcd Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 2 Jul 2020 20:41:28 +0900 Subject: PM / devfreq: Add support delayed timer for polling mode Until now, the devfreq driver using polling mode like simple_ondemand governor have used only deferrable timer for reducing the redundant power consumption. It reduces the CPU wake-up from idle due to polling mode which check the status of Non-CPU device. But, it has a problem for Non-CPU device like DMC device with DMA operation. Some Non-CPU device need to do monitor continuously regardless of CPU state in order to decide the proper next status of Non-CPU device. So, add support the delayed timer for polling mode to support the repetitive monitoring. The devfreq driver and user can select the kind of timer on either deferrable and delayed timer. For example, change the timer type of DMC device based on Exynos5422-based Odroid-XU3 as following: - If want to use deferrable timer as following: echo deferrable > /sys/class/devfreq/10c20000.memory-controller/timer - If want to use delayed timer as following: echo delayed > /sys/class/devfreq/10c20000.memory-controller/timer Reviewed-by: Bartlomiej Zolnierkiewicz Reviewed-by: Lukasz Luba Signed-off-by: Chanwoo Choi --- Documentation/ABI/testing/sysfs-class-devfreq | 12 ++++ drivers/devfreq/devfreq.c | 86 ++++++++++++++++++++++++++- include/linux/devfreq.h | 9 +++ 3 files changed, 106 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq index 9758eb85ade3..deefffb3bbe4 100644 --- a/Documentation/ABI/testing/sysfs-class-devfreq +++ b/Documentation/ABI/testing/sysfs-class-devfreq @@ -108,3 +108,15 @@ Description: frequency requested by governors and min_freq. The max_freq overrides min_freq because max_freq may be used to throttle devices to avoid overheating. + +What: /sys/class/devfreq/.../timer +Date: July 2020 +Contact: Chanwoo Choi +Description: + This ABI shows and stores the kind of work timer by users. + This work timer is used by devfreq workqueue in order to + monitor the device status such as utilization. The user + can change the work timer on runtime according to their demand + as following: + echo deferrable > /sys/class/devfreq/.../timer + echo delayed > /sys/class/devfreq/.../timer diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 52b9c3e141f3..5320c3b37f35 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -49,6 +49,11 @@ static LIST_HEAD(devfreq_governor_list); static LIST_HEAD(devfreq_list); static DEFINE_MUTEX(devfreq_list_lock); +static const char timer_name[][DEVFREQ_NAME_LEN] = { + [DEVFREQ_TIMER_DEFERRABLE] = { "deferrable" }, + [DEVFREQ_TIMER_DELAYED] = { "delayed" }, +}; + /** * find_device_devfreq() - find devfreq struct using device pointer * @dev: device pointer used to lookup device devfreq. @@ -454,7 +459,17 @@ void devfreq_monitor_start(struct devfreq *devfreq) if (devfreq->governor->interrupt_driven) return; - INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); + switch (devfreq->profile->timer) { + case DEVFREQ_TIMER_DEFERRABLE: + INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); + break; + case DEVFREQ_TIMER_DELAYED: + INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor); + break; + default: + return; + } + if (devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); @@ -771,6 +786,11 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->data = data; devfreq->nb.notifier_call = devfreq_notifier_call; + if (devfreq->profile->timer < 0 + || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) { + goto err_out; + } + if (!devfreq->profile->max_state && !devfreq->profile->freq_table) { mutex_unlock(&devfreq->lock); err = set_freq_table(devfreq); @@ -1625,6 +1645,69 @@ static ssize_t trans_stat_store(struct device *dev, } static DEVICE_ATTR_RW(trans_stat); +static ssize_t timer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct devfreq *df = to_devfreq(dev); + + if (!df->profile) + return -EINVAL; + + return sprintf(buf, "%s\n", timer_name[df->profile->timer]); +} + +static ssize_t timer_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct devfreq *df = to_devfreq(dev); + char str_timer[DEVFREQ_NAME_LEN + 1]; + int timer = -1; + int ret = 0, i; + + if (!df->governor || !df->profile) + return -EINVAL; + + ret = sscanf(buf, "%16s", str_timer); + if (ret != 1) + return -EINVAL; + + for (i = 0; i < DEVFREQ_TIMER_NUM; i++) { + if (!strncmp(timer_name[i], str_timer, DEVFREQ_NAME_LEN)) { + timer = i; + break; + } + } + + if (timer < 0) { + ret = -EINVAL; + goto out; + } + + if (df->profile->timer == timer) { + ret = 0; + goto out; + } + + mutex_lock(&df->lock); + df->profile->timer = timer; + mutex_unlock(&df->lock); + + ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); + if (ret) { + dev_warn(dev, "%s: Governor %s not stopped(%d)\n", + __func__, df->governor->name, ret); + goto out; + } + + ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); + if (ret) + dev_warn(dev, "%s: Governor %s not started(%d)\n", + __func__, df->governor->name, ret); +out: + return ret ? ret : count; +} +static DEVICE_ATTR_RW(timer); + static struct attribute *devfreq_attrs[] = { &dev_attr_name.attr, &dev_attr_governor.attr, @@ -1636,6 +1719,7 @@ static struct attribute *devfreq_attrs[] = { &dev_attr_min_freq.attr, &dev_attr_max_freq.attr, &dev_attr_trans_stat.attr, + &dev_attr_timer.attr, NULL, }; ATTRIBUTE_GROUPS(devfreq); diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 57e871a559a9..12782fbb4c25 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -31,6 +31,13 @@ #define DEVFREQ_PRECHANGE (0) #define DEVFREQ_POSTCHANGE (1) +/* DEVFREQ work timers */ +enum devfreq_timer { + DEVFREQ_TIMER_DEFERRABLE = 0, + DEVFREQ_TIMER_DELAYED, + DEVFREQ_TIMER_NUM, +}; + struct devfreq; struct devfreq_governor; @@ -70,6 +77,7 @@ struct devfreq_dev_status { * @initial_freq: The operating frequency when devfreq_add_device() is * called. * @polling_ms: The polling interval in ms. 0 disables polling. + * @timer: Timer type is either deferrable or delayed timer. * @target: The device should set its operating frequency at * freq or lowest-upper-than-freq value. If freq is * higher than any operable frequency, set maximum. @@ -96,6 +104,7 @@ struct devfreq_dev_status { struct devfreq_dev_profile { unsigned long initial_freq; unsigned int polling_ms; + enum devfreq_timer timer; int (*target)(struct device *dev, unsigned long *freq, u32 flags); int (*get_dev_status)(struct device *dev, -- cgit v1.2.3 From cbbdfa6f331980c6786b4ca5df53c37b90df3246 Mon Sep 17 00:00:00 2001 From: Sathish Narasimman Date: Thu, 23 Jul 2020 18:09:03 +0530 Subject: Bluetooth: Enable controller RPA resolution using Experimental feature This patch adds support to enable the use of RPA Address resolution using expermental feature mgmt command. Signed-off-by: Sathish Narasimman Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 1 + net/bluetooth/hci_event.c | 1 + net/bluetooth/hci_request.c | 7 ++- net/bluetooth/mgmt.c | 142 +++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 148 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index dd82cce77a7a..c8e67042a3b1 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -318,6 +318,7 @@ enum { HCI_FORCE_BREDR_SMP, HCI_FORCE_STATIC_ADDR, HCI_LL_RPA_RESOLUTION, + HCI_ENABLE_LL_PRIVACY, HCI_CMD_PENDING, HCI_FORCE_NO_MITM, diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 628831b15c0a..33d8458fdd4a 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5230,6 +5230,7 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, le16_to_cpu(ev->supervision_timeout)); if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) hci_req_disable_address_resolution(hdev); } diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 70e077cc7dfa..435400a43a78 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -678,8 +678,10 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) /* Disable address resolution */ if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { __u8 enable = 0x00; + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } } @@ -870,8 +872,11 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, return; } - if (use_ll_privacy(hdev) && addr_resolv) { + if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && + addr_resolv) { u8 enable = 0x01; + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 47bcfe2fb14c..4ec0fee80344 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -795,10 +795,15 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (lmp_le_capable(hdev)) { settings |= MGMT_SETTING_LE; - settings |= MGMT_SETTING_ADVERTISING; settings |= MGMT_SETTING_SECURE_CONN; settings |= MGMT_SETTING_PRIVACY; settings |= MGMT_SETTING_STATIC_ADDRESS; + + /* When the experimental feature for LL Privacy support is + * enabled, then advertising is no longer supported. + */ + if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + settings |= MGMT_SETTING_ADVERTISING; } if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || @@ -3759,10 +3764,16 @@ static const u8 simult_central_periph_uuid[16] = { 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67, }; +/* 15c0a148-c273-11ea-b3de-0242ac130004 */ +static const u8 rpa_resolution_uuid[16] = { + 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3, + 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15, +}; + static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { - char buf[44]; + char buf[62]; /* Enough space for 3 features */ struct mgmt_rp_read_exp_features_info *rp = (void *)buf; u16 idx = 0; u32 flags; @@ -3795,6 +3806,17 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } + if (hdev && use_ll_privacy(hdev)) { + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + flags = BIT(0) | BIT(1); + else + flags = BIT(1); + + memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + rp->feature_count = cpu_to_le16(idx); /* After reading the experimental features information, enable @@ -3807,6 +3829,21 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, 0, rp, sizeof(*rp) + (20 * idx)); } +static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, + struct sock *skip) +{ + struct mgmt_ev_exp_feature_changed ev; + + memset(&ev, 0, sizeof(ev)); + memcpy(ev.uuid, rpa_resolution_uuid, 16); + ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1)); + + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, + &ev, sizeof(ev), + HCI_MGMT_EXP_FEATURE_EVENTS, skip); + +} + #ifdef CONFIG_BT_FEATURE_DEBUG static int exp_debug_feature_changed(bool enabled, struct sock *skip) { @@ -3845,6 +3882,16 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, } #endif + if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { + bool changed = hci_dev_test_flag(hdev, + HCI_ENABLE_LL_PRIVACY); + + hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); + + if (changed) + exp_ll_privacy_feature_changed(false, hdev, sk); + } + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, @@ -3895,6 +3942,69 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, } #endif + if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) { + bool val, changed; + int err; + u32 flags; + + /* Command requires to use the controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Changes can only be made when controller is powered down */ + if (hdev_is_powered(hdev)) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_NOT_POWERED); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = !!cp->param[0]; + + if (val) { + changed = !hci_dev_test_flag(hdev, + HCI_ENABLE_LL_PRIVACY); + hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); + hci_dev_clear_flag(hdev, HCI_ADVERTISING); + + /* Enable LL privacy + supported settings changed */ + flags = BIT(0) | BIT(1); + } else { + changed = hci_dev_test_flag(hdev, + HCI_ENABLE_LL_PRIVACY); + hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); + + /* Disable LL privacy + supported settings changed */ + flags = BIT(1); + } + + memcpy(rp.uuid, rpa_resolution_uuid, 16); + rp.flags = cpu_to_le32(flags); + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_ll_privacy_feature_changed(val, hdev, sk); + + return err; + } + return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_NOT_SUPPORTED); @@ -5040,6 +5150,13 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, status); + /* Enabling the experimental LL Privay support disables support for + * advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_NOT_SUPPORTED); + if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); @@ -7112,6 +7229,13 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, MGMT_STATUS_REJECTED); + /* Enabling the experimental LL Privay support disables support for + * advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_NOT_SUPPORTED); + hci_dev_lock(hdev); rp_len = sizeof(*rp) + hdev->adv_instance_cnt; @@ -7315,6 +7439,13 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, status); + /* Enabling the experimental LL Privay support disables support for + * advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_NOT_SUPPORTED); + if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); @@ -7479,6 +7610,13 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev, bt_dev_dbg(hdev, "sock %p", sk); + /* Enabling the experimental LL Privay support disables support for + * advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_NOT_SUPPORTED); + hci_dev_lock(hdev); if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) { -- cgit v1.2.3 From 1ccf2fe35c30f79102ad129c5aa71059daaaed7f Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 30 Jul 2020 11:44:41 +0200 Subject: KVM: arm: Add trace name for ARM_NISV Commit c726200dd106d ("KVM: arm/arm64: Allow reporting non-ISV data aborts to userspace") introduced a mechanism to deflect MMIO traffic the kernel can not handle to user space. For that, it introduced a new exit reason. However, it did not update the trace point array that gives human readable names to these exit reasons inside the trace log. Let's fix that up after the fact, so that trace logs are pretty even when we get user space MMIO traps on ARM. Fixes: c726200dd106d ("KVM: arm/arm64: Allow reporting non-ISV data aborts to userspace") Signed-off-by: Alexander Graf Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200730094441.18231-1-graf@amazon.com --- include/trace/events/kvm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 2c735a3e6613..9417a34aad08 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -17,7 +17,7 @@ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \ ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\ ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \ - ERSN(HYPERV) + ERSN(HYPERV), ERSN(ARM_NISV) TRACE_EVENT(kvm_userspace_exit, TP_PROTO(__u32 reason, int errno), -- cgit v1.2.3 From 0de967f24e6c26fca3845e5a5970866ae59ac766 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Thu, 30 Jul 2020 17:51:17 +0100 Subject: thermal: Update power allocator and devfreq cooling to SPDX licensing Update the license to the SPDX licensing format. Signed-off-by: Lukasz Luba Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200730165117.13998-1-lukasz.luba@arm.com --- drivers/thermal/devfreq_cooling.c | 10 +--------- drivers/thermal/gov_power_allocator.c | 9 +-------- include/linux/devfreq_cooling.h | 9 +-------- 3 files changed, 3 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index f7f32e98331b..a12d29096229 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -1,18 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * devfreq_cooling: Thermal cooling device implementation for devices using * devfreq * * Copyright (C) 2014-2015 ARM Limited * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * TODO: * - If OPPs are added or removed after devfreq cooling has * registered, the devfreq cooling won't react to it. diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 44636475b2a3..5cb518d8f156 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -1,16 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * A power allocator to manage temperature * * Copyright (C) 2014 ARM Ltd. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #define pr_fmt(fmt) "Power allocator: " fmt diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 79a6e37a1d6f..9df2dfca68dd 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -1,17 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * devfreq_cooling: Thermal cooling device implementation for devices using * devfreq * * Copyright (C) 2014-2015 ARM Limited * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __DEVFREQ_COOLING_H__ -- cgit v1.2.3 From b13fecb1c3a603c4b8e99b306fecf4f668c11b32 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 13 Jul 2020 15:01:26 -0700 Subject: treewide: Replace DECLARE_TASKLET() with DECLARE_TASKLET_OLD() This converts all the existing DECLARE_TASKLET() (and ...DISABLED) macros with DECLARE_TASKLET_OLD() in preparation for refactoring the tasklet callback type. All existing DECLARE_TASKLET() users had a "0" data argument, it has been removed here as well. Reviewed-by: Greg Kroah-Hartman Acked-by: Thomas Gleixner Signed-off-by: Kees Cook --- drivers/input/keyboard/omap-keypad.c | 2 +- drivers/input/serio/hil_mlc.c | 2 +- drivers/net/wan/farsync.c | 4 ++-- drivers/s390/crypto/ap_bus.c | 2 +- drivers/staging/most/dim2/dim2.c | 2 +- drivers/staging/octeon/ethernet-tx.c | 2 +- drivers/tty/vt/keyboard.c | 2 +- drivers/usb/gadget/udc/snps_udc_core.c | 2 +- drivers/usb/host/fhci-sched.c | 2 +- include/linux/interrupt.h | 15 ++++++++++----- kernel/backtracetest.c | 2 +- kernel/debug/debug_core.c | 2 +- kernel/irq/resend.c | 2 +- net/atm/pppoatm.c | 2 +- net/iucv/iucv.c | 2 +- sound/drivers/pcsp/pcsp_lib.c | 2 +- 16 files changed, 26 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c index 5fe7a5633e33..dbe836c7ff47 100644 --- a/drivers/input/keyboard/omap-keypad.c +++ b/drivers/input/keyboard/omap-keypad.c @@ -46,7 +46,7 @@ struct omap_kp { unsigned short keymap[]; }; -static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0); +static DECLARE_TASKLET_DISABLED_OLD(kp_tasklet, omap_kp_tasklet); static unsigned int *row_gpios; static unsigned int *col_gpios; diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c index e1423f7648d6..65f4e9d62a67 100644 --- a/drivers/input/serio/hil_mlc.c +++ b/drivers/input/serio/hil_mlc.c @@ -77,7 +77,7 @@ static struct timer_list hil_mlcs_kicker; static int hil_mlcs_probe; static void hil_mlcs_process(unsigned long unused); -static DECLARE_TASKLET_DISABLED(hil_mlcs_tasklet, hil_mlcs_process, 0); +static DECLARE_TASKLET_DISABLED_OLD(hil_mlcs_tasklet, hil_mlcs_process); /* #define HIL_MLC_DEBUG */ diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 7916efce7188..f5198a391417 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -569,8 +569,8 @@ static void do_bottom_half_rx(struct fst_card_info *card); static void fst_process_tx_work_q(unsigned long work_q); static void fst_process_int_work_q(unsigned long work_q); -static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0); -static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0); +static DECLARE_TASKLET_OLD(fst_tx_task, fst_process_tx_work_q); +static DECLARE_TASKLET_OLD(fst_int_task, fst_process_int_work_q); static struct fst_card_info *fst_card_array[FST_MAX_CARDS]; static spinlock_t fst_work_q_lock; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index e71ca4a719a5..2589ccd257e3 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -93,7 +93,7 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus); * Tasklet & timer for AP request polling and interrupts */ static void ap_tasklet_fn(unsigned long); -static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0); +static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn); static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); static struct task_struct *ap_poll_kthread; static DEFINE_MUTEX(ap_poll_thread_mutex); diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c index 8e0f27e61652..509c8012d20b 100644 --- a/drivers/staging/most/dim2/dim2.c +++ b/drivers/staging/most/dim2/dim2.c @@ -46,7 +46,7 @@ MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a powe static DEFINE_SPINLOCK(dim_lock); static void dim2_tasklet_fn(unsigned long data); -static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0); +static DECLARE_TASKLET_OLD(dim2_tasklet, dim2_tasklet_fn); /** * struct hdm_channel - private structure to keep channel specific data diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index ab7dd8216006..9c71ad5af7b9 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -41,7 +41,7 @@ #endif static void cvm_oct_tx_do_cleanup(unsigned long arg); -static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); +static DECLARE_TASKLET_OLD(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup); /* Maximum number of SKBs to try to free per xmit packet. */ #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2) diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 568b2171f335..f80199984ee0 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -1236,7 +1236,7 @@ static void kbd_bh(unsigned long dummy) } } -DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0); +DECLARE_TASKLET_DISABLED_OLD(keyboard_tasklet, kbd_bh); #if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\ defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\ diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c index afdd28f332ce..e76f1a50b0fc 100644 --- a/drivers/usb/gadget/udc/snps_udc_core.c +++ b/drivers/usb/gadget/udc/snps_udc_core.c @@ -96,7 +96,7 @@ static int stop_pollstall_timer; static DECLARE_COMPLETION(on_pollstall_exit); /* tasklet for usb disconnect */ -static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect, 0); +static DECLARE_TASKLET_OLD(disconnect_tasklet, udc_tasklet_disconnect); /* endpoint names used for print */ static const char ep0_string[] = "ep0in"; diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c index 3235d5307403..5c423f240a1f 100644 --- a/drivers/usb/host/fhci-sched.c +++ b/drivers/usb/host/fhci-sched.c @@ -677,7 +677,7 @@ static void process_done_list(unsigned long data) enable_irq(fhci_to_hcd(fhci)->irq); } -DECLARE_TASKLET(fhci_tasklet, process_done_list, 0); +DECLARE_TASKLET_OLD(fhci_tasklet, process_done_list); /* transfer complted callback */ u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5db970b6615a..b911196f03eb 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -612,12 +612,17 @@ struct tasklet_struct unsigned long data; }; -#define DECLARE_TASKLET(name, func, data) \ -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } - -#define DECLARE_TASKLET_DISABLED(name, func, data) \ -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } +#define DECLARE_TASKLET_OLD(name, _func) \ +struct tasklet_struct name = { \ + .count = ATOMIC_INIT(0), \ + .func = _func, \ +} +#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ +struct tasklet_struct name = { \ + .count = ATOMIC_INIT(1), \ + .func = _func, \ +} enum { diff --git a/kernel/backtracetest.c b/kernel/backtracetest.c index a2a97fa3071b..370217dd7e39 100644 --- a/kernel/backtracetest.c +++ b/kernel/backtracetest.c @@ -29,7 +29,7 @@ static void backtrace_test_irq_callback(unsigned long data) complete(&backtrace_work); } -static DECLARE_TASKLET(backtrace_tasklet, &backtrace_test_irq_callback, 0); +static DECLARE_TASKLET_OLD(backtrace_tasklet, &backtrace_test_irq_callback); static void backtrace_test_irq(void) { diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 9e5934780f41..b16dbc1bf056 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -1068,7 +1068,7 @@ static void kgdb_tasklet_bpt(unsigned long ing) atomic_set(&kgdb_break_tasklet_var, 0); } -static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0); +static DECLARE_TASKLET_OLD(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt); void kgdb_schedule_breakpoint(void) { diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 27634f4022d0..c48ce19a257f 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -45,7 +45,7 @@ static void resend_irqs(unsigned long arg) } /* Tasklet to handle resend: */ -static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); +static DECLARE_TASKLET_OLD(resend_tasklet, resend_irqs); static int irq_sw_resend(struct irq_desc *desc) { diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 45d8e1d5d033..579b66da1d95 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -393,7 +393,7 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg) * Each PPPoATM instance has its own tasklet - this is just a * prototypical one used to initialize them */ - static const DECLARE_TASKLET(tasklet_proto, pppoatm_wakeup_sender, 0); + static const DECLARE_TASKLET_OLD(tasklet_proto, pppoatm_wakeup_sender); if (copy_from_user(&be, arg, sizeof be)) return -EFAULT; if (be.encaps != PPPOATM_ENCAPS_AUTODETECT && diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 19250a0c85d3..cd2e468852e7 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -105,7 +105,7 @@ static LIST_HEAD(iucv_task_queue); * The tasklet for fast delivery of iucv interrupts. */ static void iucv_tasklet_fn(unsigned long); -static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); +static DECLARE_TASKLET_OLD(iucv_tasklet, iucv_tasklet_fn); /* * Queue of interrupt buffers for delivery via a work queue diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c index 05244b11ed5e..4e79293d7f11 100644 --- a/sound/drivers/pcsp/pcsp_lib.c +++ b/sound/drivers/pcsp/pcsp_lib.c @@ -36,7 +36,7 @@ static void pcsp_call_pcm_elapsed(unsigned long priv) } } -static DECLARE_TASKLET(pcsp_pcm_tasklet, pcsp_call_pcm_elapsed, 0); +static DECLARE_TASKLET_OLD(pcsp_pcm_tasklet, pcsp_call_pcm_elapsed); /* write the port and returns the next expire time in ns; * called at the trigger-start and in hrtimer callback -- cgit v1.2.3 From 12cc923f1ccc1df467e046b02a72c2b3b321b6a2 Mon Sep 17 00:00:00 2001 From: Romain Perier Date: Sun, 29 Sep 2019 18:30:13 +0200 Subject: tasklet: Introduce new initialization API Nowadays, modern kernel subsystems that use callbacks pass the data structure associated with a given callback as argument to the callback. The tasklet subsystem remains one which passes an arbitrary unsigned long to the callback function. This has several problems: - This keeps an extra field for storing the argument in each tasklet data structure, it bloats the tasklet_struct structure with a redundant .data field - No type checking can be performed on this argument. Instead of using container_of() like other callback subsystems, it forces callbacks to do explicit type cast of the unsigned long argument into the required object type. - Buffer overflows can overwrite the .func and the .data field, so an attacker can easily overwrite the function and its first argument to whatever it wants. Add a new tasklet initialization API, via DECLARE_TASKLET() and tasklet_setup(), which will replace the existing ones. This work is greatly inspired by the timer_struct conversion series, see commit e99e88a9d2b0 ("treewide: setup_timer() -> timer_setup()") To avoid problems with both -Wcast-function-type (which is enabled in the kernel via -Wextra is several subsystems), and with mismatched function prototypes when build with Control Flow Integrity enabled, this adds the "use_callback" member to let the tasklet caller choose which union member to call through. Once all old API uses are removed, this and the .data member will be removed as well. (On 64-bit this does not grow the struct size as the new member fills the hole after atomic_t, which is also "int" sized.) Signed-off-by: Romain Perier Co-developed-by: Allen Pais Signed-off-by: Allen Pais Reviewed-by: Greg Kroah-Hartman Acked-by: Thomas Gleixner Co-developed-by: Kees Cook Signed-off-by: Kees Cook --- include/linux/interrupt.h | 28 +++++++++++++++++++++++++++- kernel/softirq.c | 18 +++++++++++++++++- 2 files changed, 44 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b911196f03eb..f9aee3538461 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -585,6 +585,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void) /* Tasklets --- multithreaded analogue of BHs. + This API is deprecated. Please consider using threaded IRQs instead: + https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de + Main feature differing them of generic softirqs: tasklet is running only on one CPU simultaneously. @@ -608,10 +611,31 @@ struct tasklet_struct struct tasklet_struct *next; unsigned long state; atomic_t count; - void (*func)(unsigned long); + bool use_callback; + union { + void (*func)(unsigned long data); + void (*callback)(struct tasklet_struct *t); + }; unsigned long data; }; +#define DECLARE_TASKLET(name, _callback) \ +struct tasklet_struct name = { \ + .count = ATOMIC_INIT(0), \ + .callback = _callback, \ + .use_callback = true, \ +} + +#define DECLARE_TASKLET_DISABLED(name, _callback) \ +struct tasklet_struct name = { \ + .count = ATOMIC_INIT(1), \ + .callback = _callback, \ + .use_callback = true, \ +} + +#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ + container_of(callback_tasklet, typeof(*var), tasklet_fieldname) + #define DECLARE_TASKLET_OLD(name, _func) \ struct tasklet_struct name = { \ .count = ATOMIC_INIT(0), \ @@ -691,6 +715,8 @@ extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); +extern void tasklet_setup(struct tasklet_struct *t, + void (*callback)(struct tasklet_struct *)); /* * Autoprobing for irqs: diff --git a/kernel/softirq.c b/kernel/softirq.c index c4201b7f42b1..292e7c2d2333 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -547,7 +547,10 @@ static void tasklet_action_common(struct softirq_action *a, if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); - t->func(t->data); + if (t->use_callback) + t->callback(t); + else + t->func(t->data); tasklet_unlock(t); continue; } @@ -573,6 +576,18 @@ static __latent_entropy void tasklet_hi_action(struct softirq_action *a) tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); } +void tasklet_setup(struct tasklet_struct *t, + void (*callback)(struct tasklet_struct *)) +{ + t->next = NULL; + t->state = 0; + atomic_set(&t->count, 0); + t->callback = callback; + t->use_callback = true; + t->data = 0; +} +EXPORT_SYMBOL(tasklet_setup); + void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { @@ -580,6 +595,7 @@ void tasklet_init(struct tasklet_struct *t, t->state = 0; atomic_set(&t->count, 0); t->func = func; + t->use_callback = false; t->data = data; } EXPORT_SYMBOL(tasklet_init); -- cgit v1.2.3 From 63bb76de4aeec833ae7ba05d85a7819ac4558126 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Wed, 15 Jul 2020 08:33:39 +0300 Subject: mm: pgtable: Make generic pgprot_* macros available for no-MMU The header defines some generic pgprot_* implementations, but they are only available when CONFIG_MMU is enabled. The RISC-V architecture, for example, therefore defines some of these pgprot_* macros for !NOMMU. Let's make the pgprot_* generic available even for !NOMMU so we can remove the RISC-V specific definitions. Compile-tested with x86 defconfig, and riscv defconfig and !MMU defconfig. Suggested-by: Palmer Dabbelt Reviewed-by: Mike Rapoport Acked-by: David Rientjes Signed-off-by: Pekka Enberg Signed-off-by: Palmer Dabbelt --- include/linux/pgtable.h | 71 ++++++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 56c1e8eb7bb0..53e97da1e8e2 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -647,40 +647,6 @@ static inline int arch_unmap_one(struct mm_struct *mm, #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif -#ifndef pgprot_nx -#define pgprot_nx(prot) (prot) -#endif - -#ifndef pgprot_noncached -#define pgprot_noncached(prot) (prot) -#endif - -#ifndef pgprot_writecombine -#define pgprot_writecombine pgprot_noncached -#endif - -#ifndef pgprot_writethrough -#define pgprot_writethrough pgprot_noncached -#endif - -#ifndef pgprot_device -#define pgprot_device pgprot_noncached -#endif - -#ifndef pgprot_modify -#define pgprot_modify pgprot_modify -static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) -{ - if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) - newprot = pgprot_noncached(newprot); - if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) - newprot = pgprot_writecombine(newprot); - if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) - newprot = pgprot_device(newprot); - return newprot; -} -#endif - /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no @@ -840,6 +806,43 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, * No-op macros that just return the current protection value. Defined here * because these macros can be used used even if CONFIG_MMU is not defined. */ + +#ifndef pgprot_nx +#define pgprot_nx(prot) (prot) +#endif + +#ifndef pgprot_noncached +#define pgprot_noncached(prot) (prot) +#endif + +#ifndef pgprot_writecombine +#define pgprot_writecombine pgprot_noncached +#endif + +#ifndef pgprot_writethrough +#define pgprot_writethrough pgprot_noncached +#endif + +#ifndef pgprot_device +#define pgprot_device pgprot_noncached +#endif + +#ifdef CONFIG_MMU +#ifndef pgprot_modify +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) + newprot = pgprot_noncached(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) + newprot = pgprot_writecombine(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) + newprot = pgprot_device(newprot); + return newprot; +} +#endif +#endif /* CONFIG_MMU */ + #ifndef pgprot_encrypted #define pgprot_encrypted(prot) (prot) #endif -- cgit v1.2.3 From fb2da16cd70a5140acdd7a102e5cd3b697c3404f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 14 Jul 2020 09:02:07 +0200 Subject: fs: remove ksys_getdents64 Just open code it in the only caller. Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- fs/readdir.c | 11 ++--------- include/linux/syscalls.h | 2 -- 2 files changed, 2 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/fs/readdir.c b/fs/readdir.c index a49f07c11cfb..19434b3c982c 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -348,8 +348,8 @@ efault: return -EFAULT; } -int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, - unsigned int count) +SYSCALL_DEFINE3(getdents64, unsigned int, fd, + struct linux_dirent64 __user *, dirent, unsigned int, count) { struct fd f; struct getdents_callback64 buf = { @@ -380,13 +380,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, return error; } - -SYSCALL_DEFINE3(getdents64, unsigned int, fd, - struct linux_dirent64 __user *, dirent, unsigned int, count) -{ - return ksys_getdents64(fd, dirent, count); -} - #ifdef CONFIG_COMPAT struct compat_old_linux_dirent { compat_ulong_t d_ino; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 10843a6adb77..a998651629c7 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1243,8 +1243,6 @@ ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); int ksys_chdir(const char __user *filename); int ksys_fchmod(unsigned int fd, umode_t mode); int ksys_fchown(unsigned int fd, uid_t user, gid_t group); -int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, - unsigned int count); int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); void ksys_sync(void); -- cgit v1.2.3 From 166e07c37c6417c9713666268fc0eb89a9ce48b9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 6 Jun 2020 15:03:21 +0200 Subject: fs: remove ksys_open Just open code it in the two callers. Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- fs/open.c | 11 ++++++++--- include/linux/syscalls.h | 11 ----------- 2 files changed, 8 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/fs/open.c b/fs/open.c index 75166f071d28..ab3671af8a97 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1208,7 +1208,9 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode) { - return ksys_open(filename, flags, mode); + if (force_o_largefile()) + flags |= O_LARGEFILE; + return do_sys_open(AT_FDCWD, filename, flags, mode); } SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, @@ -1270,9 +1272,12 @@ COMPAT_SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, fla */ SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode) { - return ksys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode); -} + int flags = O_CREAT | O_WRONLY | O_TRUNC; + if (force_o_largefile()) + flags |= O_LARGEFILE; + return do_sys_open(AT_FDCWD, pathname, flags, mode); +} #endif /* diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a998651629c7..363baaadf8e1 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1374,17 +1374,6 @@ static inline int ksys_close(unsigned int fd) return __close_fd(current->files, fd); } -extern long do_sys_open(int dfd, const char __user *filename, int flags, - umode_t mode); - -static inline long ksys_open(const char __user *filename, int flags, - umode_t mode) -{ - if (force_o_largefile()) - flags |= O_LARGEFILE; - return do_sys_open(AT_FDCWD, filename, flags, mode); -} - extern long do_sys_truncate(const char __user *pathname, loff_t length); static inline long ksys_truncate(const char __user *pathname, loff_t length) -- cgit v1.2.3 From bc1cd99a9ad7e2c768e7ea92ae9c6ad4a4e0f7f7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 14 Jul 2020 08:58:49 +0200 Subject: fs: remove ksys_dup Fold it into the only remaining caller. Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- fs/file.c | 7 +------ include/linux/syscalls.h | 1 - 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include') diff --git a/fs/file.c b/fs/file.c index abb8b7081d7a..85b7993165dd 100644 --- a/fs/file.c +++ b/fs/file.c @@ -985,7 +985,7 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) return ksys_dup3(oldfd, newfd, 0); } -int ksys_dup(unsigned int fildes) +SYSCALL_DEFINE1(dup, unsigned int, fildes) { int ret = -EBADF; struct file *file = fget_raw(fildes); @@ -1000,11 +1000,6 @@ int ksys_dup(unsigned int fildes) return ret; } -SYSCALL_DEFINE1(dup, unsigned int, fildes) -{ - return ksys_dup(fildes); -} - int f_dupfd(unsigned int from, struct file *file, unsigned flags) { int err; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 363baaadf8e1..b6d900574762 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1237,7 +1237,6 @@ asmlinkage long sys_ni_syscall(void); */ int ksys_umount(char __user *name, int flags); -int ksys_dup(unsigned int fildes); int ksys_chroot(const char __user *filename); ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); int ksys_chdir(const char __user *filename); -- cgit v1.2.3 From b25ba7c3c9acdc4cf69f5bd69989819cabfc4e3b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 14 Jul 2020 08:59:57 +0200 Subject: fs: remove ksys_fchmod Fold it into the only remaining caller. Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- fs/open.c | 7 +------ include/linux/syscalls.h | 1 - 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include') diff --git a/fs/open.c b/fs/open.c index ab3671af8a97..b316dd6a86a8 100644 --- a/fs/open.c +++ b/fs/open.c @@ -608,7 +608,7 @@ int vfs_fchmod(struct file *file, umode_t mode) return chmod_common(&file->f_path, mode); } -int ksys_fchmod(unsigned int fd, umode_t mode) +SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) { struct fd f = fdget(fd); int err = -EBADF; @@ -620,11 +620,6 @@ int ksys_fchmod(unsigned int fd, umode_t mode) return err; } -SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) -{ - return ksys_fchmod(fd, mode); -} - int do_fchmodat(int dfd, const char __user *filename, umode_t mode) { struct path path; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index b6d900574762..39ff738997a1 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1240,7 +1240,6 @@ int ksys_umount(char __user *name, int flags); int ksys_chroot(const char __user *filename); ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); int ksys_chdir(const char __user *filename); -int ksys_fchmod(unsigned int fd, umode_t mode); int ksys_fchown(unsigned int fd, uid_t user, gid_t group); int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); -- cgit v1.2.3 From 863b67e15177a7cd0c27b3e36e42fe7907dec9bd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 14 Jul 2020 09:00:51 +0200 Subject: fs: remove ksys_ioctl Fold it into the only remaining caller. Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- fs/ioctl.c | 7 +------ include/linux/syscalls.h | 1 - 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include') diff --git a/fs/ioctl.c b/fs/ioctl.c index d69786d1dd91..4e6cc0a7d69c 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -736,7 +736,7 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd, return -ENOIOCTLCMD; } -int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) +SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { struct fd f = fdget(fd); int error; @@ -757,11 +757,6 @@ out: return error; } -SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) -{ - return ksys_ioctl(fd, cmd, arg); -} - #ifdef CONFIG_COMPAT /** * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 39ff738997a1..5b0f1fca4cfb 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1241,7 +1241,6 @@ int ksys_chroot(const char __user *filename); ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); int ksys_chdir(const char __user *filename); int ksys_fchown(unsigned int fd, uid_t user, gid_t group); -int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); void ksys_sync(void); int ksys_unshare(unsigned long unshare_flags); -- cgit v1.2.3 From fd5ad30c782351ab4d4a15941fc61e743a1bd66c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 15 Jul 2020 08:19:55 +0200 Subject: fs: expose utimes_common Rename utimes_common to vfs_utimes and make it available outside of utimes.c. This will be used by the initramfs unpacking code. Signed-off-by: Christoph Hellwig Acked-by: Linus Torvalds --- fs/utimes.c | 6 +++--- include/linux/fs.h | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/utimes.c b/fs/utimes.c index bfd86e81c590..fd3cc4226224 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -16,7 +16,7 @@ static bool nsec_valid(long nsec) return nsec >= 0 && nsec <= 999999999; } -static int utimes_common(const struct path *path, struct timespec64 *times) +int vfs_utimes(const struct path *path, struct timespec64 *times) { int error; struct iattr newattrs; @@ -94,7 +94,7 @@ retry: if (error) return error; - error = utimes_common(&path, times); + error = vfs_utimes(&path, times); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; @@ -115,7 +115,7 @@ static int do_utimes_fd(int fd, struct timespec64 *times, int flags) f = fdget(fd); if (!f.file) return -EBADF; - error = utimes_common(&f.file->f_path, times); + error = vfs_utimes(&f.file->f_path, times); fdput(f); return error; } diff --git a/include/linux/fs.h b/include/linux/fs.h index 635086726f20..a1d2685a4878 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1746,6 +1746,7 @@ int vfs_mkobj(struct dentry *, umode_t, int vfs_fchown(struct file *file, uid_t user, gid_t group); int vfs_fchmod(struct file *file, umode_t mode); +int vfs_utimes(const struct path *path, struct timespec64 *times); extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -- cgit v1.2.3 From e24ab0ef689de43649327f54cd1088f3dad25bb3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 21 Jul 2020 10:48:15 +0200 Subject: fs: push the getname from do_rmdir into the callers This mirrors do_unlinkat and will make life a little easier for the init code to reuse the whole function with a kernel filename. Signed-off-by: Christoph Hellwig --- fs/internal.h | 2 +- fs/namei.c | 10 ++++------ include/linux/syscalls.h | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/fs/internal.h b/fs/internal.h index 9b863a7bd708..e903d5aae139 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -65,7 +65,7 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *, long do_mknodat(int dfd, const char __user *filename, umode_t mode, unsigned int dev); long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); -long do_rmdir(int dfd, const char __user *pathname); +long do_rmdir(int dfd, struct filename *name); long do_unlinkat(int dfd, struct filename *name); long do_symlinkat(const char __user *oldname, int newdfd, const char __user *newname); diff --git a/fs/namei.c b/fs/namei.c index 72d4219c93ac..d75a6039ae39 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3720,17 +3720,16 @@ out: } EXPORT_SYMBOL(vfs_rmdir); -long do_rmdir(int dfd, const char __user *pathname) +long do_rmdir(int dfd, struct filename *name) { int error = 0; - struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; unsigned int lookup_flags = 0; retry: - name = filename_parentat(dfd, getname(pathname), lookup_flags, + name = filename_parentat(dfd, name, lookup_flags, &path, &last, &type); if (IS_ERR(name)) return PTR_ERR(name); @@ -3781,7 +3780,7 @@ exit1: SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { - return do_rmdir(AT_FDCWD, pathname); + return do_rmdir(AT_FDCWD, getname(pathname)); } /** @@ -3926,8 +3925,7 @@ SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) return -EINVAL; if (flag & AT_REMOVEDIR) - return do_rmdir(dfd, pathname); - + return do_rmdir(dfd, getname(pathname)); return do_unlinkat(dfd, getname(pathname)); } diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 5b0f1fca4cfb..e43816198e60 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1281,11 +1281,11 @@ static inline long ksys_unlink(const char __user *pathname) return do_unlinkat(AT_FDCWD, getname(pathname)); } -extern long do_rmdir(int dfd, const char __user *pathname); +long do_rmdir(int dfd, struct filename *name); static inline long ksys_rmdir(const char __user *pathname) { - return do_rmdir(AT_FDCWD, pathname); + return do_rmdir(AT_FDCWD, getname(pathname)); } extern long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); -- cgit v1.2.3 From c60166f04283ffba7b88b45d824bbfb2bfccee24 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 21 Jul 2020 11:12:08 +0200 Subject: init: add an init_mount helper Like do_mount, but takes a kernel pointer for the destination path. Switch over the mounts in the init code and devtmpfs to it, which just happen to work due to the implicit set_fs(KERNEL_DS) during early init right now. Signed-off-by: Christoph Hellwig --- drivers/base/devtmpfs.c | 5 +++-- fs/Makefile | 2 +- fs/init.c | 25 +++++++++++++++++++++++++ fs/internal.h | 4 ++++ fs/namespace.c | 2 +- include/linux/init_syscalls.h | 4 ++++ init/do_mounts.c | 8 ++++---- init/do_mounts.h | 1 + init/do_mounts_initrd.c | 6 +++--- 9 files changed, 46 insertions(+), 11 deletions(-) create mode 100644 fs/init.c create mode 100644 include/linux/init_syscalls.h (limited to 'include') diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index d697634bc0d4..32af6cb987b4 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include "base.h" @@ -359,7 +360,7 @@ int __init devtmpfs_mount(void) if (!thread) return 0; - err = do_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL); + err = init_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL); if (err) printk(KERN_INFO "devtmpfs: error mounting %i\n", err); else @@ -408,7 +409,7 @@ static int __init devtmpfs_setup(void *p) err = ksys_unshare(CLONE_NEWNS); if (err) goto out; - err = do_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL); + err = init_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL); if (err) goto out; ksys_chdir("/.."); /* will traverse into overmounted root */ diff --git a/fs/Makefile b/fs/Makefile index 2ce5112b02c8..1c7b0e3f6daa 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -13,7 +13,7 @@ obj-y := open.o read_write.o file_table.o super.o \ seq_file.o xattr.o libfs.o fs-writeback.o \ pnode.o splice.o sync.o utimes.o d_path.o \ stack.o fs_struct.o statfs.o fs_pin.o nsfs.o \ - fs_types.o fs_context.o fs_parser.o fsopen.o + fs_types.o fs_context.o fs_parser.o fsopen.o init.o ifeq ($(CONFIG_BLOCK),y) obj-y += buffer.o block_dev.o direct-io.o mpage.o diff --git a/fs/init.c b/fs/init.c new file mode 100644 index 000000000000..c6eb724e1c7b --- /dev/null +++ b/fs/init.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Routines that mimic syscalls, but don't use the user address space or file + * descriptors. Only for init/ and related early init code. + */ +#include +#include +#include +#include +#include +#include "internal.h" + +int __init init_mount(const char *dev_name, const char *dir_name, + const char *type_page, unsigned long flags, void *data_page) +{ + struct path path; + int ret; + + ret = kern_path(dir_name, LOOKUP_FOLLOW, &path); + if (ret) + return ret; + ret = path_mount(dev_name, &path, type_page, flags, data_page); + path_put(&path); + return ret; +} diff --git a/fs/internal.h b/fs/internal.h index e903d5aae139..72ea0b6f7435 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -89,6 +89,10 @@ extern int __mnt_want_write_file(struct file *); extern void __mnt_drop_write_file(struct file *); extern void dissolve_on_fput(struct vfsmount *); + +int path_mount(const char *dev_name, struct path *path, + const char *type_page, unsigned long flags, void *data_page); + /* * fs_struct.c */ diff --git a/fs/namespace.c b/fs/namespace.c index 43834b59eff6..2c4d75920974 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3111,7 +3111,7 @@ char *copy_mount_string(const void __user *data) * Therefore, if this magic number is present, it carries no information * and must be discarded. */ -static int path_mount(const char *dev_name, struct path *path, +int path_mount(const char *dev_name, struct path *path, const char *type_page, unsigned long flags, void *data_page) { unsigned int mnt_flags = 0, sb_flags; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h new file mode 100644 index 000000000000..af9ea88a60e0 --- /dev/null +++ b/include/linux/init_syscalls.h @@ -0,0 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +int __init init_mount(const char *dev_name, const char *dir_name, + const char *type_page, unsigned long flags, void *data_page); diff --git a/init/do_mounts.c b/init/do_mounts.c index a7f22cf58c7e..83db87b6e5d1 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -395,16 +395,16 @@ static int __init do_mount_root(const char *name, const char *fs, int ret; if (data) { - /* do_mount() requires a full page as fifth argument */ + /* init_mount() requires a full page as fifth argument */ p = alloc_page(GFP_KERNEL); if (!p) return -ENOMEM; data_page = page_address(p); - /* zero-pad. do_mount() will make sure it's terminated */ + /* zero-pad. init_mount() will make sure it's terminated */ strncpy(data_page, data, PAGE_SIZE); } - ret = do_mount(name, "/root", fs, flags, data_page); + ret = init_mount(name, "/root", fs, flags, data_page); if (ret) goto out; @@ -628,7 +628,7 @@ void __init prepare_namespace(void) mount_root(); out: devtmpfs_mount(); - do_mount(".", "/", NULL, MS_MOVE, NULL); + init_mount(".", "/", NULL, MS_MOVE, NULL); ksys_chroot("."); } diff --git a/init/do_mounts.h b/init/do_mounts.h index 021e2f60223e..20e7fec8cb49 100644 --- a/init/do_mounts.h +++ b/init/do_mounts.h @@ -8,6 +8,7 @@ #include #include #include +#include void mount_block_root(char *name, int flags); void mount_root(void); diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index e08669187d63..1f9336209ad9 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -62,7 +62,7 @@ static int __init init_linuxrc(struct subprocess_info *info, struct cred *new) console_on_rootfs(); /* move initrd over / and chdir/chroot in initrd root */ ksys_chdir("/root"); - do_mount(".", "/", NULL, MS_MOVE, NULL); + init_mount(".", "/", NULL, MS_MOVE, NULL); ksys_chroot("."); ksys_setsid(); return 0; @@ -99,7 +99,7 @@ static void __init handle_initrd(void) current->flags &= ~PF_FREEZER_SKIP; /* move initrd to rootfs' /old */ - do_mount("..", ".", NULL, MS_MOVE, NULL); + init_mount("..", ".", NULL, MS_MOVE, NULL); /* switch root and cwd back to / of rootfs */ ksys_chroot(".."); @@ -113,7 +113,7 @@ static void __init handle_initrd(void) mount_root(); printk(KERN_NOTICE "Trying to move old root to /initrd ... "); - error = do_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL); + error = init_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL); if (!error) printk("okay\n"); else { -- cgit v1.2.3 From 09267defa36aaff6ff829bd2fc8b043ec151cc3e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:23:08 +0200 Subject: init: add an init_umount helper Like ksys_umount, but takes a kernel pointer for the destination path. Switch over the umount in the init code, which just happen to work due to the implicit set_fs(KERNEL_DS) during early init right now. Signed-off-by: Christoph Hellwig --- fs/init.c | 14 ++++++++++++++ fs/internal.h | 1 + fs/namespace.c | 4 ++-- include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 1 - init/do_mounts_initrd.c | 2 +- 6 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index c6eb724e1c7b..9c8e31fdb048 100644 --- a/fs/init.c +++ b/fs/init.c @@ -23,3 +23,17 @@ int __init init_mount(const char *dev_name, const char *dir_name, path_put(&path); return ret; } + +int __init init_umount(const char *name, int flags) +{ + int lookup_flags = LOOKUP_MOUNTPOINT; + struct path path; + int ret; + + if (!(flags & UMOUNT_NOFOLLOW)) + lookup_flags |= LOOKUP_FOLLOW; + ret = kern_path(name, lookup_flags, &path); + if (ret) + return ret; + return path_umount(&path, flags); +} diff --git a/fs/internal.h b/fs/internal.h index 72ea0b6f7435..491d1e63809b 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -92,6 +92,7 @@ extern void dissolve_on_fput(struct vfsmount *); int path_mount(const char *dev_name, struct path *path, const char *type_page, unsigned long flags, void *data_page); +int path_umount(struct path *path, int flags); /* * fs_struct.c diff --git a/fs/namespace.c b/fs/namespace.c index 2c4d75920974..a7301790abb2 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1706,7 +1706,7 @@ static inline bool may_mandlock(void) } #endif -static int path_umount(struct path *path, int flags) +int path_umount(struct path *path, int flags) { struct mount *mnt; int retval; @@ -1736,7 +1736,7 @@ dput_and_out: return retval; } -int ksys_umount(char __user *name, int flags) +static int ksys_umount(char __user *name, int flags) { int lookup_flags = LOOKUP_MOUNTPOINT; struct path path; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index af9ea88a60e0..a5a2e7f19916 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -2,3 +2,4 @@ int __init init_mount(const char *dev_name, const char *dir_name, const char *type_page, unsigned long flags, void *data_page); +int __init init_umount(const char *name, int flags); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index e43816198e60..1a4f5d8ee704 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1236,7 +1236,6 @@ asmlinkage long sys_ni_syscall(void); * the ksys_xyzyyz() functions prototyped below. */ -int ksys_umount(char __user *name, int flags); int ksys_chroot(const char __user *filename); ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); int ksys_chdir(const char __user *filename); diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 1f9336209ad9..6b020a069902 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -122,7 +122,7 @@ static void __init handle_initrd(void) else printk("failed\n"); printk(KERN_NOTICE "Unmounting old root\n"); - ksys_umount("/old", MNT_DETACH); + init_umount("/old", MNT_DETACH); } } -- cgit v1.2.3 From 8fb9f73e5a539ab3aa4785f30fb52c65fa98600c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:23:40 +0200 Subject: init: add an init_unlink helper Add a simple helper to unlink with a kernel space file name and switch the early init code over to it. Remove the now unused ksys_unlink. Signed-off-by: Christoph Hellwig --- fs/init.c | 5 +++++ include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 7 ------- init/do_mounts.h | 2 +- init/do_mounts_initrd.c | 4 ++-- init/do_mounts_rd.c | 2 +- init/initramfs.c | 3 ++- 7 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index 9c8e31fdb048..507ffbb5d146 100644 --- a/fs/init.c +++ b/fs/init.c @@ -37,3 +37,8 @@ int __init init_umount(const char *name, int flags) return ret; return path_umount(&path, flags); } + +int __init init_unlink(const char *pathname) +{ + return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); +} diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index a5a2e7f19916..00d597249549 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -3,3 +3,4 @@ int __init init_mount(const char *dev_name, const char *dir_name, const char *type_page, unsigned long flags, void *data_page); int __init init_umount(const char *name, int flags); +int __init init_unlink(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 1a4f5d8ee704..26f9738e5ab8 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1273,13 +1273,6 @@ int compat_ksys_ipc(u32 call, int first, int second, * The following kernel syscall equivalents are just wrappers to fs-internal * functions. Therefore, provide stubs to be inlined at the callsites. */ -extern long do_unlinkat(int dfd, struct filename *name); - -static inline long ksys_unlink(const char __user *pathname) -{ - return do_unlinkat(AT_FDCWD, getname(pathname)); -} - long do_rmdir(int dfd, struct filename *name); static inline long ksys_rmdir(const char __user *pathname) diff --git a/init/do_mounts.h b/init/do_mounts.h index 20e7fec8cb49..104d8431725a 100644 --- a/init/do_mounts.h +++ b/init/do_mounts.h @@ -16,7 +16,7 @@ extern int root_mountflags; static inline __init int create_dev(char *name, dev_t dev) { - ksys_unlink(name); + init_unlink(name); return ksys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); } diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 6b020a069902..8b44dd017842 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -137,11 +137,11 @@ bool __init initrd_load(void) * mounted in the normal path. */ if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) { - ksys_unlink("/initrd.image"); + init_unlink("/initrd.image"); handle_initrd(); return true; } } - ksys_unlink("/initrd.image"); + init_unlink("/initrd.image"); return false; } diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c index d4255c10432a..ac021ae6e6fa 100644 --- a/init/do_mounts_rd.c +++ b/init/do_mounts_rd.c @@ -272,7 +272,7 @@ noclose_input: fput(out_file); out: kfree(buf); - ksys_unlink("/dev/ram"); + init_unlink("/dev/ram"); return res; } diff --git a/init/initramfs.c b/init/initramfs.c index 9820fca4d4e3..eb58cee6dadb 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -12,6 +12,7 @@ #include #include #include +#include static ssize_t __init xwrite(struct file *file, const char *p, size_t count, loff_t *pos) @@ -301,7 +302,7 @@ static void __init clean_path(char *path, umode_t fmode) if (S_ISDIR(st.mode)) ksys_rmdir(path); else - ksys_unlink(path); + init_unlink(path); } } -- cgit v1.2.3 From 20cce026c3e0972017b9cb4a7cccfb8cacf187d5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:11:45 +0200 Subject: init: add an init_rmdir helper Add a simple helper to rmdir with a kernel space file name and switch the early init code over to it. Remove the now unused ksys_rmdir. Signed-off-by: Christoph Hellwig --- fs/init.c | 5 +++++ include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 7 ------- init/initramfs.c | 2 +- 4 files changed, 7 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index 507ffbb5d146..eabd9ed2b510 100644 --- a/fs/init.c +++ b/fs/init.c @@ -42,3 +42,8 @@ int __init init_unlink(const char *pathname) { return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); } + +int __init init_rmdir(const char *pathname) +{ + return do_rmdir(AT_FDCWD, getname_kernel(pathname)); +} diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index 00d597249549..abf3af563c0b 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -4,3 +4,4 @@ int __init init_mount(const char *dev_name, const char *dir_name, const char *type_page, unsigned long flags, void *data_page); int __init init_umount(const char *name, int flags); int __init init_unlink(const char *pathname); +int __init init_rmdir(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 26f9738e5ab8..a7b14258d245 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1273,13 +1273,6 @@ int compat_ksys_ipc(u32 call, int first, int second, * The following kernel syscall equivalents are just wrappers to fs-internal * functions. Therefore, provide stubs to be inlined at the callsites. */ -long do_rmdir(int dfd, struct filename *name); - -static inline long ksys_rmdir(const char __user *pathname) -{ - return do_rmdir(AT_FDCWD, getname(pathname)); -} - extern long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); static inline long ksys_mkdir(const char __user *pathname, umode_t mode) diff --git a/init/initramfs.c b/init/initramfs.c index eb58cee6dadb..1a9159bf452f 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -300,7 +300,7 @@ static void __init clean_path(char *path, umode_t fmode) if (!vfs_lstat(path, &st) && (st.mode ^ fmode) & S_IFMT) { if (S_ISDIR(st.mode)) - ksys_rmdir(path); + init_rmdir(path); else init_unlink(path); } -- cgit v1.2.3 From db63f1e315384590b979f8f74abd1b5363b69894 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:25:21 +0200 Subject: init: add an init_chdir helper Add a simple helper to chdir with a kernel space file name and switch the early init code over to it. Remove the now unused ksys_chdir. Signed-off-by: Christoph Hellwig --- drivers/base/devtmpfs.c | 2 +- fs/init.c | 16 ++++++++++++++++ fs/open.c | 7 +------ include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 1 - init/do_mounts.c | 2 +- init/do_mounts_initrd.c | 8 ++++---- 7 files changed, 24 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 32af6cb987b4..e48aaba3166b 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -412,7 +412,7 @@ static int __init devtmpfs_setup(void *p) err = init_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL); if (err) goto out; - ksys_chdir("/.."); /* will traverse into overmounted root */ + init_chdir("/.."); /* will traverse into overmounted root */ ksys_chroot("."); out: *(int *)p = err; diff --git a/fs/init.c b/fs/init.c index eabd9ed2b510..64d4e12eba93 100644 --- a/fs/init.c +++ b/fs/init.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include "internal.h" @@ -38,6 +39,21 @@ int __init init_umount(const char *name, int flags) return path_umount(&path, flags); } +int __init init_chdir(const char *filename) +{ + struct path path; + int error; + + error = kern_path(filename, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); + if (error) + return error; + error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); + if (!error) + set_fs_pwd(current->fs, &path); + path_put(&path); + return error; +} + int __init init_unlink(const char *pathname) { return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); diff --git a/fs/open.c b/fs/open.c index b316dd6a86a8..723e0ac89893 100644 --- a/fs/open.c +++ b/fs/open.c @@ -482,7 +482,7 @@ SYSCALL_DEFINE2(access, const char __user *, filename, int, mode) return do_faccessat(AT_FDCWD, filename, mode, 0); } -int ksys_chdir(const char __user *filename) +SYSCALL_DEFINE1(chdir, const char __user *, filename) { struct path path; int error; @@ -508,11 +508,6 @@ out: return error; } -SYSCALL_DEFINE1(chdir, const char __user *, filename) -{ - return ksys_chdir(filename); -} - SYSCALL_DEFINE1(fchdir, unsigned int, fd) { struct fd f = fdget_raw(fd); diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index abf3af563c0b..1e845910ae56 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -3,5 +3,6 @@ int __init init_mount(const char *dev_name, const char *dir_name, const char *type_page, unsigned long flags, void *data_page); int __init init_umount(const char *name, int flags); +int __init init_chdir(const char *filename); int __init init_unlink(const char *pathname); int __init init_rmdir(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a7b14258d245..31fa67fb9894 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1238,7 +1238,6 @@ asmlinkage long sys_ni_syscall(void); int ksys_chroot(const char __user *filename); ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); -int ksys_chdir(const char __user *filename); int ksys_fchown(unsigned int fd, uid_t user, gid_t group); ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); void ksys_sync(void); diff --git a/init/do_mounts.c b/init/do_mounts.c index 83db87b6e5d1..a7581c6e85f2 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -408,7 +408,7 @@ static int __init do_mount_root(const char *name, const char *fs, if (ret) goto out; - ksys_chdir("/root"); + init_chdir("/root"); s = current->fs->pwd.dentry->d_sb; ROOT_DEV = s->s_dev; printk(KERN_INFO diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 8b44dd017842..04627fd22a92 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -61,7 +61,7 @@ static int __init init_linuxrc(struct subprocess_info *info, struct cred *new) ksys_unshare(CLONE_FS | CLONE_FILES); console_on_rootfs(); /* move initrd over / and chdir/chroot in initrd root */ - ksys_chdir("/root"); + init_chdir("/root"); init_mount(".", "/", NULL, MS_MOVE, NULL); ksys_chroot("."); ksys_setsid(); @@ -82,7 +82,7 @@ static void __init handle_initrd(void) /* mount initrd on rootfs' /root */ mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); ksys_mkdir("/old", 0700); - ksys_chdir("/old"); + init_chdir("/old"); /* * In case that a resume from disk is carried out by linuxrc or one of @@ -104,11 +104,11 @@ static void __init handle_initrd(void) ksys_chroot(".."); if (new_decode_dev(real_root_dev) == Root_RAM0) { - ksys_chdir("/old"); + init_chdir("/old"); return; } - ksys_chdir("/"); + init_chdir("/"); ROOT_DEV = new_decode_dev(real_root_dev); mount_root(); -- cgit v1.2.3 From 4b7ca5014cbef51cdb99fd644eae4f3773747a05 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:26:13 +0200 Subject: init: add an init_chroot helper Add a simple helper to chroot with a kernel space file name and switch the early init code over to it. Remove the now unused ksys_chroot. Signed-off-by: Christoph Hellwig --- drivers/base/devtmpfs.c | 2 +- fs/init.c | 24 ++++++++++++++++++++++++ fs/open.c | 7 +------ include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 2 -- init/do_mounts.c | 2 +- init/do_mounts_initrd.c | 4 ++-- 7 files changed, 30 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index e48aaba3166b..eac184e6d657 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -413,7 +413,7 @@ static int __init devtmpfs_setup(void *p) if (err) goto out; init_chdir("/.."); /* will traverse into overmounted root */ - ksys_chroot("."); + init_chroot("."); out: *(int *)p = err; complete(&setup_done); diff --git a/fs/init.c b/fs/init.c index 64d4e12eba93..2c78f24814dd 100644 --- a/fs/init.c +++ b/fs/init.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "internal.h" int __init init_mount(const char *dev_name, const char *dir_name, @@ -54,6 +55,29 @@ int __init init_chdir(const char *filename) return error; } +int __init init_chroot(const char *filename) +{ + struct path path; + int error; + + error = kern_path(filename, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); + if (error) + return error; + error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); + if (error) + goto dput_and_out; + error = -EPERM; + if (!ns_capable(current_user_ns(), CAP_SYS_CHROOT)) + goto dput_and_out; + error = security_path_chroot(&path); + if (error) + goto dput_and_out; + set_fs_root(current->fs, &path); +dput_and_out: + path_put(&path); + return error; +} + int __init init_unlink(const char *pathname) { return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); diff --git a/fs/open.c b/fs/open.c index 723e0ac89893..f62f4752bb43 100644 --- a/fs/open.c +++ b/fs/open.c @@ -530,7 +530,7 @@ out: return error; } -int ksys_chroot(const char __user *filename) +SYSCALL_DEFINE1(chroot, const char __user *, filename) { struct path path; int error; @@ -563,11 +563,6 @@ out: return error; } -SYSCALL_DEFINE1(chroot, const char __user *, filename) -{ - return ksys_chroot(filename); -} - static int chmod_common(const struct path *path, umode_t mode) { struct inode *inode = path->dentry->d_inode; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index 1e845910ae56..e07099a14b91 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -4,5 +4,6 @@ int __init init_mount(const char *dev_name, const char *dir_name, const char *type_page, unsigned long flags, void *data_page); int __init init_umount(const char *name, int flags); int __init init_chdir(const char *filename); +int __init init_chroot(const char *filename); int __init init_unlink(const char *pathname); int __init init_rmdir(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 31fa67fb9894..e89d62e944dc 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1235,8 +1235,6 @@ asmlinkage long sys_ni_syscall(void); * Instead, use one of the functions which work equivalently, such as * the ksys_xyzyyz() functions prototyped below. */ - -int ksys_chroot(const char __user *filename); ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); int ksys_fchown(unsigned int fd, uid_t user, gid_t group); ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); diff --git a/init/do_mounts.c b/init/do_mounts.c index a7581c6e85f2..b5f9604d0c98 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -629,7 +629,7 @@ void __init prepare_namespace(void) out: devtmpfs_mount(); init_mount(".", "/", NULL, MS_MOVE, NULL); - ksys_chroot("."); + init_chroot("."); } static bool is_tmpfs; diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 04627fd22a92..a6b447b191db 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -63,7 +63,7 @@ static int __init init_linuxrc(struct subprocess_info *info, struct cred *new) /* move initrd over / and chdir/chroot in initrd root */ init_chdir("/root"); init_mount(".", "/", NULL, MS_MOVE, NULL); - ksys_chroot("."); + init_chroot("."); ksys_setsid(); return 0; } @@ -101,7 +101,7 @@ static void __init handle_initrd(void) /* move initrd to rootfs' /old */ init_mount("..", ".", NULL, MS_MOVE, NULL); /* switch root and cwd back to / of rootfs */ - ksys_chroot(".."); + init_chroot(".."); if (new_decode_dev(real_root_dev) == Root_RAM0) { init_chdir("/old"); -- cgit v1.2.3 From b873498f99c77e7b5be3aa5ffe9ca67437232fe0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:13:26 +0200 Subject: init: add an init_chown helper Add a simple helper to chown with a kernel space file name and switch the early init code over to it. Signed-off-by: Christoph Hellwig --- fs/init.c | 18 ++++++++++++++++++ fs/internal.h | 2 +- fs/open.c | 2 +- include/linux/init_syscalls.h | 1 + init/initramfs.c | 6 +++--- 5 files changed, 24 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index 2c78f24814dd..edd024465595 100644 --- a/fs/init.c +++ b/fs/init.c @@ -78,6 +78,24 @@ dput_and_out: return error; } +int __init init_chown(const char *filename, uid_t user, gid_t group, int flags) +{ + int lookup_flags = (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW; + struct path path; + int error; + + error = kern_path(filename, lookup_flags, &path); + if (error) + return error; + error = mnt_want_write(path.mnt); + if (!error) { + error = chown_common(&path, user, group); + mnt_drop_write(path.mnt); + } + path_put(&path); + return error; +} + int __init init_unlink(const char *pathname) { return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); diff --git a/fs/internal.h b/fs/internal.h index 491d1e63809b..e81b9e23c3ea 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -134,7 +134,7 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small); int do_fchmodat(int dfd, const char __user *filename, umode_t mode); int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); - +int chown_common(const struct path *path, uid_t user, gid_t group); extern int vfs_open(const struct path *, struct file *); /* diff --git a/fs/open.c b/fs/open.c index f62f4752bb43..49960a1248f1 100644 --- a/fs/open.c +++ b/fs/open.c @@ -639,7 +639,7 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode) return do_fchmodat(AT_FDCWD, filename, mode); } -static int chown_common(const struct path *path, uid_t user, gid_t group) +int chown_common(const struct path *path, uid_t user, gid_t group) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index e07099a14b91..0da59d76133e 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -5,5 +5,6 @@ int __init init_mount(const char *dev_name, const char *dir_name, int __init init_umount(const char *name, int flags); int __init init_chdir(const char *filename); int __init init_chroot(const char *filename); +int __init init_chown(const char *filename, uid_t user, gid_t group, int flags); int __init init_unlink(const char *pathname); int __init init_rmdir(const char *pathname); diff --git a/init/initramfs.c b/init/initramfs.c index 1a9159bf452f..358dcd93cb9d 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -349,14 +349,14 @@ static int __init do_name(void) } } else if (S_ISDIR(mode)) { ksys_mkdir(collected, mode); - ksys_chown(collected, uid, gid); + init_chown(collected, uid, gid, 0); ksys_chmod(collected, mode); dir_add(collected, mtime); } else if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { if (maybe_link() == 0) { ksys_mknod(collected, mode, rdev); - ksys_chown(collected, uid, gid); + init_chown(collected, uid, gid, 0); ksys_chmod(collected, mode); do_utime(collected, mtime); } @@ -393,7 +393,7 @@ static int __init do_symlink(void) collected[N_ALIGN(name_len) + body_len] = '\0'; clean_path(collected, 0); ksys_symlink(collected + N_ALIGN(name_len), collected); - ksys_lchown(collected, uid, gid); + init_chown(collected, uid, gid, AT_SYMLINK_NOFOLLOW); do_utime(collected, mtime); state = SkipIt; next_state = Reset; -- cgit v1.2.3 From 1097742efc643ffc667c5c6684635b2663145a7d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:41:02 +0200 Subject: init: add an init_chmod helper Add a simple helper to chmod with a kernel space file name and switch the early init code over to it. Signed-off-by: Christoph Hellwig --- fs/init.c | 13 +++++++++++++ fs/internal.h | 2 +- fs/open.c | 4 ++-- include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 7 ------- init/initramfs.c | 4 ++-- 6 files changed, 19 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index edd024465595..a66032d128b6 100644 --- a/fs/init.c +++ b/fs/init.c @@ -96,6 +96,19 @@ int __init init_chown(const char *filename, uid_t user, gid_t group, int flags) return error; } +int __init init_chmod(const char *filename, umode_t mode) +{ + struct path path; + int error; + + error = kern_path(filename, LOOKUP_FOLLOW, &path); + if (error) + return error; + error = chmod_common(&path, mode); + path_put(&path); + return error; +} + int __init init_unlink(const char *pathname) { return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); diff --git a/fs/internal.h b/fs/internal.h index e81b9e23c3ea..6d82681c7d83 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -131,7 +131,7 @@ extern struct open_how build_open_how(int flags, umode_t mode); extern int build_open_flags(const struct open_how *how, struct open_flags *op); long do_sys_ftruncate(unsigned int fd, loff_t length, int small); -int do_fchmodat(int dfd, const char __user *filename, umode_t mode); +int chmod_common(const struct path *path, umode_t mode); int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); int chown_common(const struct path *path, uid_t user, gid_t group); diff --git a/fs/open.c b/fs/open.c index 49960a1248f1..7ba89eae46c5 100644 --- a/fs/open.c +++ b/fs/open.c @@ -563,7 +563,7 @@ out: return error; } -static int chmod_common(const struct path *path, umode_t mode) +int chmod_common(const struct path *path, umode_t mode) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; @@ -610,7 +610,7 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) return err; } -int do_fchmodat(int dfd, const char __user *filename, umode_t mode) +static int do_fchmodat(int dfd, const char __user *filename, umode_t mode) { struct path path; int error; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index 0da59d76133e..2b1b4dc58682 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -6,5 +6,6 @@ int __init init_umount(const char *name, int flags); int __init init_chdir(const char *filename); int __init init_chroot(const char *filename); int __init init_chown(const char *filename, uid_t user, gid_t group, int flags); +int __init init_chmod(const char *filename, umode_t mode); int __init init_unlink(const char *pathname); int __init init_rmdir(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index e89d62e944dc..8b71fa321ca2 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1304,13 +1304,6 @@ static inline long ksys_link(const char __user *oldname, return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } -extern int do_fchmodat(int dfd, const char __user *filename, umode_t mode); - -static inline int ksys_chmod(const char __user *filename, umode_t mode) -{ - return do_fchmodat(AT_FDCWD, filename, mode); -} - long do_faccessat(int dfd, const char __user *filename, int mode, int flags); static inline long ksys_access(const char __user *filename, int mode) diff --git a/init/initramfs.c b/init/initramfs.c index 358dcd93cb9d..efa9fb70038d 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -350,14 +350,14 @@ static int __init do_name(void) } else if (S_ISDIR(mode)) { ksys_mkdir(collected, mode); init_chown(collected, uid, gid, 0); - ksys_chmod(collected, mode); + init_chmod(collected, mode); dir_add(collected, mtime); } else if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { if (maybe_link() == 0) { ksys_mknod(collected, mode, rdev); init_chown(collected, uid, gid, 0); - ksys_chmod(collected, mode); + init_chmod(collected, mode); do_utime(collected, mtime); } } -- cgit v1.2.3 From eb9d7d390e51108b4c6a9a7993ed9be92548c8f7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:14:02 +0200 Subject: init: add an init_eaccess helper Add a simple helper to check if a file exists based on kernel space file name and switch the early init code over to it. Note that this theoretically changes behavior as it always is based on the effective permissions. But during early init that doesn't make a difference. Signed-off-by: Christoph Hellwig --- fs/init.c | 13 +++++++++++++ fs/open.c | 2 +- include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 7 ------- init/main.c | 4 ++-- 5 files changed, 17 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index a66032d128b6..6d9af40d2897 100644 --- a/fs/init.c +++ b/fs/init.c @@ -109,6 +109,19 @@ int __init init_chmod(const char *filename, umode_t mode) return error; } +int __init init_eaccess(const char *filename) +{ + struct path path; + int error; + + error = kern_path(filename, LOOKUP_FOLLOW, &path); + if (error) + return error; + error = inode_permission(d_inode(path.dentry), MAY_ACCESS); + path_put(&path); + return error; +} + int __init init_unlink(const char *pathname) { return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); diff --git a/fs/open.c b/fs/open.c index 7ba89eae46c5..aafecd1f7ba1 100644 --- a/fs/open.c +++ b/fs/open.c @@ -394,7 +394,7 @@ static const struct cred *access_override_creds(void) return old_cred; } -long do_faccessat(int dfd, const char __user *filename, int mode, int flags) +static long do_faccessat(int dfd, const char __user *filename, int mode, int flags) { struct path path; struct inode *inode; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index 2b1b4dc58682..7031c0934bee 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -7,5 +7,6 @@ int __init init_chdir(const char *filename); int __init init_chroot(const char *filename); int __init init_chown(const char *filename, uid_t user, gid_t group, int flags); int __init init_chmod(const char *filename, umode_t mode); +int __init init_eaccess(const char *filename); int __init init_unlink(const char *pathname); int __init init_rmdir(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 8b71fa321ca2..a2779638e414 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1304,13 +1304,6 @@ static inline long ksys_link(const char __user *oldname, return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } -long do_faccessat(int dfd, const char __user *filename, int mode, int flags); - -static inline long ksys_access(const char __user *filename, int mode) -{ - return do_faccessat(AT_FDCWD, filename, mode, 0); -} - extern int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); diff --git a/init/main.c b/init/main.c index 47698427b15f..1c710d3e1d46 100644 --- a/init/main.c +++ b/init/main.c @@ -96,6 +96,7 @@ #include #include #include +#include #include #include @@ -1514,8 +1515,7 @@ static noinline void __init kernel_init_freeable(void) * check if there is an early userspace init. If yes, let it do all * the work */ - if (ksys_access((const char __user *) - ramdisk_execute_command, 0) != 0) { + if (init_eaccess(ramdisk_execute_command) != 0) { ramdisk_execute_command = NULL; prepare_namespace(); } -- cgit v1.2.3 From 812931d693da58cc24d2bb8dec01c2b4a7f4668f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:14:19 +0200 Subject: init: add an init_link helper Add a simple helper to link with a kernel space file name and switch the early init code over to it. Remove the now unused ksys_link. Signed-off-by: Christoph Hellwig --- fs/init.c | 33 +++++++++++++++++++++++++++++++++ fs/internal.h | 3 +-- fs/namei.c | 4 ++-- include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 9 --------- init/initramfs.c | 2 +- 6 files changed, 38 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index 6d9af40d2897..5db9d9f74868 100644 --- a/fs/init.c +++ b/fs/init.c @@ -122,6 +122,39 @@ int __init init_eaccess(const char *filename) return error; } +int __init init_link(const char *oldname, const char *newname) +{ + struct dentry *new_dentry; + struct path old_path, new_path; + int error; + + error = kern_path(oldname, 0, &old_path); + if (error) + return error; + + new_dentry = kern_path_create(AT_FDCWD, newname, &new_path, 0); + error = PTR_ERR(new_dentry); + if (IS_ERR(new_dentry)) + goto out; + + error = -EXDEV; + if (old_path.mnt != new_path.mnt) + goto out_dput; + error = may_linkat(&old_path); + if (unlikely(error)) + goto out_dput; + error = security_path_link(old_path.dentry, &new_path, new_dentry); + if (error) + goto out_dput; + error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, + NULL); +out_dput: + done_path_create(&new_path, new_dentry); +out: + path_put(&old_path); + return error; +} + int __init init_unlink(const char *pathname) { return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); diff --git a/fs/internal.h b/fs/internal.h index 6d82681c7d83..58451b033d26 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -69,8 +69,7 @@ long do_rmdir(int dfd, struct filename *name); long do_unlinkat(int dfd, struct filename *name); long do_symlinkat(const char __user *oldname, int newdfd, const char __user *newname); -int do_linkat(int olddfd, const char __user *oldname, int newdfd, - const char __user *newname, int flags); +int may_linkat(struct path *link); /* * namespace.c diff --git a/fs/namei.c b/fs/namei.c index d75a6039ae39..13de64c6be76 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1024,7 +1024,7 @@ static bool safe_hardlink_source(struct inode *inode) * * Returns 0 if successful, -ve on error. */ -static int may_linkat(struct path *link) +int may_linkat(struct path *link) { struct inode *inode = link->dentry->d_inode; @@ -4086,7 +4086,7 @@ EXPORT_SYMBOL(vfs_link); * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ -int do_linkat(int olddfd, const char __user *oldname, int newdfd, +static int do_linkat(int olddfd, const char __user *oldname, int newdfd, const char __user *newname, int flags) { struct dentry *new_dentry; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index 7031c0934bee..5ca15a5b55b7 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -8,5 +8,6 @@ int __init init_chroot(const char *filename); int __init init_chown(const char *filename, uid_t user, gid_t group, int flags); int __init init_chmod(const char *filename, umode_t mode); int __init init_eaccess(const char *filename); +int __init init_link(const char *oldname, const char *newname); int __init init_unlink(const char *pathname); int __init init_rmdir(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a2779638e414..4b18b91ce465 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1295,15 +1295,6 @@ static inline long ksys_mknod(const char __user *filename, umode_t mode, return do_mknodat(AT_FDCWD, filename, mode, dev); } -extern int do_linkat(int olddfd, const char __user *oldname, int newdfd, - const char __user *newname, int flags); - -static inline long ksys_link(const char __user *oldname, - const char __user *newname) -{ - return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); -} - extern int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); diff --git a/init/initramfs.c b/init/initramfs.c index efa9fb70038d..516a66900d7a 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -312,7 +312,7 @@ static int __init maybe_link(void) char *old = find_link(major, minor, ino, mode, collected); if (old) { clean_path(collected, 0); - return (ksys_link(old, collected) < 0) ? -1 : 1; + return (init_link(old, collected) < 0) ? -1 : 1; } } return 0; -- cgit v1.2.3 From cd3acb6a79349f346714ab3d26d203a0c6ca5ab0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:14:36 +0200 Subject: init: add an init_symlink helper Add a simple helper to symlink with a kernel space file name and switch the early init code over to it. Remove the now unused ksys_symlink. Signed-off-by: Christoph Hellwig --- fs/init.c | 16 ++++++++++++++++ fs/internal.h | 2 -- fs/namei.c | 2 +- include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 9 --------- init/initramfs.c | 2 +- 6 files changed, 19 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index 5db9d9f74868..09ef2b58d48c 100644 --- a/fs/init.c +++ b/fs/init.c @@ -155,6 +155,22 @@ out: return error; } +int __init init_symlink(const char *oldname, const char *newname) +{ + struct dentry *dentry; + struct path path; + int error; + + dentry = kern_path_create(AT_FDCWD, newname, &path, 0); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + error = security_path_symlink(&path, dentry, oldname); + if (!error) + error = vfs_symlink(path.dentry->d_inode, dentry, oldname); + done_path_create(&path, dentry); + return error; +} + int __init init_unlink(const char *pathname) { return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); diff --git a/fs/internal.h b/fs/internal.h index 58451b033d26..40b50a222d7a 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -67,8 +67,6 @@ long do_mknodat(int dfd, const char __user *filename, umode_t mode, long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); long do_rmdir(int dfd, struct filename *name); long do_unlinkat(int dfd, struct filename *name); -long do_symlinkat(const char __user *oldname, int newdfd, - const char __user *newname); int may_linkat(struct path *link); /* diff --git a/fs/namei.c b/fs/namei.c index 13de64c6be76..2f6fa53eb3da 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3955,7 +3955,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) } EXPORT_SYMBOL(vfs_symlink); -long do_symlinkat(const char __user *oldname, int newdfd, +static long do_symlinkat(const char __user *oldname, int newdfd, const char __user *newname) { int error; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index 5ca15a5b55b7..125f55ae3f80 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -9,5 +9,6 @@ int __init init_chown(const char *filename, uid_t user, gid_t group, int flags); int __init init_chmod(const char *filename, umode_t mode); int __init init_eaccess(const char *filename); int __init init_link(const char *oldname, const char *newname); +int __init init_symlink(const char *oldname, const char *newname); int __init init_unlink(const char *pathname); int __init init_rmdir(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 4b18b91ce465..7cdc0d749a04 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1277,15 +1277,6 @@ static inline long ksys_mkdir(const char __user *pathname, umode_t mode) return do_mkdirat(AT_FDCWD, pathname, mode); } -extern long do_symlinkat(const char __user *oldname, int newdfd, - const char __user *newname); - -static inline long ksys_symlink(const char __user *oldname, - const char __user *newname) -{ - return do_symlinkat(oldname, AT_FDCWD, newname); -} - extern long do_mknodat(int dfd, const char __user *filename, umode_t mode, unsigned int dev); diff --git a/init/initramfs.c b/init/initramfs.c index 516a66900d7a..c91fc9a51d2a 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -392,7 +392,7 @@ static int __init do_symlink(void) { collected[N_ALIGN(name_len) + body_len] = '\0'; clean_path(collected, 0); - ksys_symlink(collected + N_ALIGN(name_len), collected); + init_symlink(collected + N_ALIGN(name_len), collected); init_chown(collected, uid, gid, AT_SYMLINK_NOFOLLOW); do_utime(collected, mtime); state = SkipIt; -- cgit v1.2.3 From 83ff98c3e9cd2b82b4289e185f2ce7d635a9cbd3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:14:59 +0200 Subject: init: add an init_mkdir helper Add a simple helper to mkdir with a kernel space file name and switch the early init code over to it. Remove the now unused ksys_mkdir. Signed-off-by: Christoph Hellwig --- fs/init.c | 18 ++++++++++++++++++ fs/internal.h | 1 - fs/namei.c | 2 +- include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 7 ------- init/do_mounts_initrd.c | 2 +- init/initramfs.c | 2 +- init/noinitramfs.c | 5 +++-- 8 files changed, 25 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index 09ef2b58d48c..127033d08426 100644 --- a/fs/init.c +++ b/fs/init.c @@ -176,6 +176,24 @@ int __init init_unlink(const char *pathname) return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); } +int __init init_mkdir(const char *pathname, umode_t mode) +{ + struct dentry *dentry; + struct path path; + int error; + + dentry = kern_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + if (!IS_POSIXACL(path.dentry->d_inode)) + mode &= ~current_umask(); + error = security_path_mkdir(&path, dentry, mode); + if (!error) + error = vfs_mkdir(path.dentry->d_inode, dentry, mode); + done_path_create(&path, dentry); + return error; +} + int __init init_rmdir(const char *pathname) { return do_rmdir(AT_FDCWD, getname_kernel(pathname)); diff --git a/fs/internal.h b/fs/internal.h index 40b50a222d7a..4741e591e923 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -64,7 +64,6 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct path *); long do_mknodat(int dfd, const char __user *filename, umode_t mode, unsigned int dev); -long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); long do_rmdir(int dfd, struct filename *name); long do_unlinkat(int dfd, struct filename *name); int may_linkat(struct path *link); diff --git a/fs/namei.c b/fs/namei.c index 2f6fa53eb3da..d6b25dd32f4d 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3645,7 +3645,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) } EXPORT_SYMBOL(vfs_mkdir); -long do_mkdirat(int dfd, const char __user *pathname, umode_t mode) +static long do_mkdirat(int dfd, const char __user *pathname, umode_t mode) { struct dentry *dentry; struct path path; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index 125f55ae3f80..d808985231f8 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -11,4 +11,5 @@ int __init init_eaccess(const char *filename); int __init init_link(const char *oldname, const char *newname); int __init init_symlink(const char *oldname, const char *newname); int __init init_unlink(const char *pathname); +int __init init_mkdir(const char *pathname, umode_t mode); int __init init_rmdir(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7cdc0d749a04..5ef77a91382a 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1270,13 +1270,6 @@ int compat_ksys_ipc(u32 call, int first, int second, * The following kernel syscall equivalents are just wrappers to fs-internal * functions. Therefore, provide stubs to be inlined at the callsites. */ -extern long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); - -static inline long ksys_mkdir(const char __user *pathname, umode_t mode) -{ - return do_mkdirat(AT_FDCWD, pathname, mode); -} - extern long do_mknodat(int dfd, const char __user *filename, umode_t mode, unsigned int dev); diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index a6b447b191db..3f5ac81913dd 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -81,7 +81,7 @@ static void __init handle_initrd(void) create_dev("/dev/root.old", Root_RAM0); /* mount initrd on rootfs' /root */ mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); - ksys_mkdir("/old", 0700); + init_mkdir("/old", 0700); init_chdir("/old"); /* diff --git a/init/initramfs.c b/init/initramfs.c index c91fc9a51d2a..0489eb65b3b8 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -348,7 +348,7 @@ static int __init do_name(void) state = CopyFile; } } else if (S_ISDIR(mode)) { - ksys_mkdir(collected, mode); + init_mkdir(collected, mode); init_chown(collected, uid, gid, 0); init_chmod(collected, mode); dir_add(collected, mtime); diff --git a/init/noinitramfs.c b/init/noinitramfs.c index fa9cdfa7101d..94cc4df74b11 100644 --- a/init/noinitramfs.c +++ b/init/noinitramfs.c @@ -9,6 +9,7 @@ #include #include #include +#include /* * Create a simple rootfs that is similar to the default initramfs @@ -17,7 +18,7 @@ static int __init default_rootfs(void) { int err; - err = ksys_mkdir((const char __user __force *) "/dev", 0755); + err = init_mkdir("/dev", 0755); if (err < 0) goto out; @@ -27,7 +28,7 @@ static int __init default_rootfs(void) if (err < 0) goto out; - err = ksys_mkdir((const char __user __force *) "/root", 0700); + err = init_mkdir("/root", 0700); if (err < 0) goto out; -- cgit v1.2.3 From 5fee64fcde0770c41e926ff981022eaa512d8980 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:41:20 +0200 Subject: init: add an init_mknod helper Add a simple helper to mknod with a kernel space file name and switch the early init code over to it. Remove the now unused ksys_mknod. Signed-off-by: Christoph Hellwig --- fs/init.c | 25 +++++++++++++++++++++++++ fs/internal.h | 2 -- fs/namei.c | 2 +- include/linux/init_syscalls.h | 1 + include/linux/syscalls.h | 9 --------- init/do_mounts.h | 2 +- init/initramfs.c | 2 +- init/noinitramfs.c | 3 +-- 8 files changed, 30 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index 127033d08426..145fb31b7a5f 100644 --- a/fs/init.c +++ b/fs/init.c @@ -122,6 +122,31 @@ int __init init_eaccess(const char *filename) return error; } +int __init init_mknod(const char *filename, umode_t mode, unsigned int dev) +{ + struct dentry *dentry; + struct path path; + int error; + + if (S_ISFIFO(mode) || S_ISSOCK(mode)) + dev = 0; + else if (!(S_ISBLK(mode) || S_ISCHR(mode))) + return -EINVAL; + + dentry = kern_path_create(AT_FDCWD, filename, &path, 0); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + if (!IS_POSIXACL(path.dentry->d_inode)) + mode &= ~current_umask(); + error = security_path_mknod(&path, dentry, mode, dev); + if (!error) + error = vfs_mknod(path.dentry->d_inode, dentry, mode, + new_decode_dev(dev)); + done_path_create(&path, dentry); + return error; +} + int __init init_link(const char *oldname, const char *newname) { struct dentry *new_dentry; diff --git a/fs/internal.h b/fs/internal.h index 4741e591e923..07e145b2f88c 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -62,8 +62,6 @@ extern int filename_lookup(int dfd, struct filename *name, unsigned flags, struct path *path, struct path *root); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct path *); -long do_mknodat(int dfd, const char __user *filename, umode_t mode, - unsigned int dev); long do_rmdir(int dfd, struct filename *name); long do_unlinkat(int dfd, struct filename *name); int may_linkat(struct path *link); diff --git a/fs/namei.c b/fs/namei.c index d6b25dd32f4d..fde8fe086c09 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3564,7 +3564,7 @@ static int may_mknod(umode_t mode) } } -long do_mknodat(int dfd, const char __user *filename, umode_t mode, +static long do_mknodat(int dfd, const char __user *filename, umode_t mode, unsigned int dev) { struct dentry *dentry; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index d808985231f8..fa1fe7a87779 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -8,6 +8,7 @@ int __init init_chroot(const char *filename); int __init init_chown(const char *filename, uid_t user, gid_t group, int flags); int __init init_chmod(const char *filename, umode_t mode); int __init init_eaccess(const char *filename); +int __init init_mknod(const char *filename, umode_t mode, unsigned int dev); int __init init_link(const char *oldname, const char *newname); int __init init_symlink(const char *oldname, const char *newname); int __init init_unlink(const char *pathname); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 5ef77a91382a..63046c5e9fc5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1270,15 +1270,6 @@ int compat_ksys_ipc(u32 call, int first, int second, * The following kernel syscall equivalents are just wrappers to fs-internal * functions. Therefore, provide stubs to be inlined at the callsites. */ -extern long do_mknodat(int dfd, const char __user *filename, umode_t mode, - unsigned int dev); - -static inline long ksys_mknod(const char __user *filename, umode_t mode, - unsigned int dev) -{ - return do_mknodat(AT_FDCWD, filename, mode, dev); -} - extern int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); diff --git a/init/do_mounts.h b/init/do_mounts.h index 104d8431725a..7a29ac3e427b 100644 --- a/init/do_mounts.h +++ b/init/do_mounts.h @@ -17,7 +17,7 @@ extern int root_mountflags; static inline __init int create_dev(char *name, dev_t dev) { init_unlink(name); - return ksys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); + return init_mknod(name, S_IFBLK | 0600, new_encode_dev(dev)); } #ifdef CONFIG_BLK_DEV_RAM diff --git a/init/initramfs.c b/init/initramfs.c index 0489eb65b3b8..425addaf7c69 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -355,7 +355,7 @@ static int __init do_name(void) } else if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { if (maybe_link() == 0) { - ksys_mknod(collected, mode, rdev); + init_mknod(collected, mode, rdev); init_chown(collected, uid, gid, 0); init_chmod(collected, mode); do_utime(collected, mtime); diff --git a/init/noinitramfs.c b/init/noinitramfs.c index 94cc4df74b11..3d62b07f3bb9 100644 --- a/init/noinitramfs.c +++ b/init/noinitramfs.c @@ -22,8 +22,7 @@ static int __init default_rootfs(void) if (err < 0) goto out; - err = ksys_mknod((const char __user __force *) "/dev/console", - S_IFCHR | S_IRUSR | S_IWUSR, + err = init_mknod("/dev/console", S_IFCHR | S_IRUSR | S_IWUSR, new_encode_dev(MKDEV(5, 1))); if (err < 0) goto out; -- cgit v1.2.3 From 716308a5331bf907b819f9db8dc942b19568f925 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 11:15:40 +0200 Subject: init: add an init_stat helper Add a simple helper to stat with a kernel space file name and switch the early init code over to it. Signed-off-by: Christoph Hellwig --- drivers/md/md-autodetect.c | 3 ++- fs/init.c | 15 +++++++++++++++ include/linux/init_syscalls.h | 1 + init/initramfs.c | 3 ++- 4 files changed, 20 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c index 14b6e86814c0..6bbec89976a7 100644 --- a/drivers/md/md-autodetect.c +++ b/drivers/md/md-autodetect.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -151,7 +152,7 @@ static void __init md_setup_drive(struct md_setup_args *args) if (strncmp(devname, "/dev/", 5) == 0) devname += 5; snprintf(comp_name, 63, "/dev/%s", devname); - if (vfs_stat(comp_name, &stat) == 0 && S_ISBLK(stat.mode)) + if (init_stat(comp_name, &stat, 0) == 0 && S_ISBLK(stat.mode)) dev = new_decode_dev(stat.rdev); if (!dev) { pr_warn("md: Unknown device name: %s\n", devname); diff --git a/fs/init.c b/fs/init.c index 145fb31b7a5f..51646ba38099 100644 --- a/fs/init.c +++ b/fs/init.c @@ -122,6 +122,21 @@ int __init init_eaccess(const char *filename) return error; } +int __init init_stat(const char *filename, struct kstat *stat, int flags) +{ + int lookup_flags = (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW; + struct path path; + int error; + + error = kern_path(filename, lookup_flags, &path); + if (error) + return error; + error = vfs_getattr(&path, stat, STATX_BASIC_STATS, + flags | AT_NO_AUTOMOUNT); + path_put(&path); + return error; +} + int __init init_mknod(const char *filename, umode_t mode, unsigned int dev) { struct dentry *dentry; diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index fa1fe7a87779..b2fda50daca6 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -8,6 +8,7 @@ int __init init_chroot(const char *filename); int __init init_chown(const char *filename, uid_t user, gid_t group, int flags); int __init init_chmod(const char *filename, umode_t mode); int __init init_eaccess(const char *filename); +int __init init_stat(const char *filename, struct kstat *stat, int flags); int __init init_mknod(const char *filename, umode_t mode, unsigned int dev); int __init init_link(const char *oldname, const char *newname); int __init init_symlink(const char *oldname, const char *newname); diff --git a/init/initramfs.c b/init/initramfs.c index 425addaf7c69..744e111baba4 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -298,7 +298,8 @@ static void __init clean_path(char *path, umode_t fmode) { struct kstat st; - if (!vfs_lstat(path, &st) && (st.mode ^ fmode) & S_IFMT) { + if (init_stat(path, &st, AT_SYMLINK_NOFOLLOW) && + (st.mode ^ fmode) & S_IFMT) { if (S_ISDIR(st.mode)) init_rmdir(path); else -- cgit v1.2.3 From 235e57935bf328c4cce371ffc4dd1d8fab4885cd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 21 Jul 2020 16:05:31 +0200 Subject: init: add an init_utimes helper Add a simple helper to set timestamps with a kernel space file name and switch the early init code over to it. Signed-off-by: Christoph Hellwig --- fs/init.c | 13 +++++++++++++ include/linux/init_syscalls.h | 1 + init/initramfs.c | 3 +-- 3 files changed, 15 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index 51646ba38099..db5c48a85644 100644 --- a/fs/init.c +++ b/fs/init.c @@ -238,3 +238,16 @@ int __init init_rmdir(const char *pathname) { return do_rmdir(AT_FDCWD, getname_kernel(pathname)); } + +int __init init_utimes(char *filename, struct timespec64 *ts) +{ + struct path path; + int error; + + error = kern_path(filename, 0, &path); + if (error) + return error; + error = vfs_utimes(&path, ts); + path_put(&path); + return error; +} diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index b2fda50daca6..3654b525ac0b 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -15,3 +15,4 @@ int __init init_symlink(const char *oldname, const char *newname); int __init init_unlink(const char *pathname); int __init init_mkdir(const char *pathname, umode_t mode); int __init init_rmdir(const char *pathname); +int __init init_utimes(char *filename, struct timespec64 *ts); diff --git a/init/initramfs.c b/init/initramfs.c index 744e111baba4..e6dbfb767057 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -111,8 +111,7 @@ static long __init do_utime(char *filename, time64_t mtime) t[0].tv_nsec = 0; t[1].tv_sec = mtime; t[1].tv_nsec = 0; - - return do_utimes(AT_FDCWD, filename, t, AT_SYMLINK_NOFOLLOW); + return init_utimes(filename, t); } static __initdata LIST_HEAD(dir_list); -- cgit v1.2.3 From df78a0c0b67de58934877aad61e0431a2bd0caf1 Mon Sep 17 00:00:00 2001 From: Thomas Pedersen Date: Mon, 1 Jun 2020 23:22:47 -0700 Subject: nl80211: S1G band and channel definitions Gives drivers the definitions needed to advertise support for S1G bands. Signed-off-by: Thomas Pedersen Link: https://lore.kernel.org/r/20200602062247.23212-1-thomas@adapt-ip.com Link: https://lore.kernel.org/r/20200731055636.795173-1-thomas@adapt-ip.com Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath10k/mac.c | 9 ++------- include/net/cfg80211.h | 17 +++++++++++++++++ include/uapi/linux/nl80211.h | 16 ++++++++++++++++ net/mac80211/chan.c | 7 ++++++- net/mac80211/scan.c | 1 + net/mac80211/tx.c | 1 + net/mac80211/util.c | 5 +++++ net/wireless/chan.c | 35 +++++++++++++++++++++++++++++++++++ net/wireless/core.c | 5 +++-- net/wireless/util.c | 8 ++++++++ 10 files changed, 94 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 919d15584d4a..3c0c33a9f30c 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -568,11 +568,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) case NL80211_CHAN_WIDTH_40: phymode = MODE_11NG_HT40; break; - case NL80211_CHAN_WIDTH_5: - case NL80211_CHAN_WIDTH_10: - case NL80211_CHAN_WIDTH_80: - case NL80211_CHAN_WIDTH_80P80: - case NL80211_CHAN_WIDTH_160: + default: phymode = MODE_UNKNOWN; break; } @@ -597,8 +593,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) case NL80211_CHAN_WIDTH_80P80: phymode = MODE_11AC_VHT80_80; break; - case NL80211_CHAN_WIDTH_5: - case NL80211_CHAN_WIDTH_10: + default: phymode = MODE_UNKNOWN; break; } diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index fc7e8807838d..ac6e58193426 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -417,6 +417,22 @@ struct ieee80211_edmg { enum ieee80211_edmg_bw_config bw_config; }; +/** + * struct ieee80211_sta_s1g_cap - STA's S1G capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11ah S1G capabilities for a STA. + * + * @s1g_supported: is STA an S1G STA + * @cap: S1G capabilities information + * @nss_mcs: Supported NSS MCS set + */ +struct ieee80211_sta_s1g_cap { + bool s1g; + u8 cap[10]; /* use S1G_CAPAB_ */ + u8 nss_mcs[5]; +}; + /** * struct ieee80211_supported_band - frequency band definition * @@ -448,6 +464,7 @@ struct ieee80211_supported_band { int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_s1g_cap s1g_cap; struct ieee80211_edmg edmg_cap; u16 n_iftype_data; const struct ieee80211_sband_iftype_data *iftype_data; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 4e6339ab1fce..ad183469f9af 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4437,6 +4437,11 @@ enum nl80211_key_mode { * attribute must be provided as well * @NL80211_CHAN_WIDTH_5: 5 MHz OFDM channel * @NL80211_CHAN_WIDTH_10: 10 MHz OFDM channel + * @NL80211_CHAN_WIDTH_1: 1 MHz OFDM channel + * @NL80211_CHAN_WIDTH_2: 2 MHz OFDM channel + * @NL80211_CHAN_WIDTH_4: 4 MHz OFDM channel + * @NL80211_CHAN_WIDTH_8: 8 MHz OFDM channel + * @NL80211_CHAN_WIDTH_16: 16 MHz OFDM channel */ enum nl80211_chan_width { NL80211_CHAN_WIDTH_20_NOHT, @@ -4447,6 +4452,11 @@ enum nl80211_chan_width { NL80211_CHAN_WIDTH_160, NL80211_CHAN_WIDTH_5, NL80211_CHAN_WIDTH_10, + NL80211_CHAN_WIDTH_1, + NL80211_CHAN_WIDTH_2, + NL80211_CHAN_WIDTH_4, + NL80211_CHAN_WIDTH_8, + NL80211_CHAN_WIDTH_16, }; /** @@ -4457,11 +4467,15 @@ enum nl80211_chan_width { * @NL80211_BSS_CHAN_WIDTH_20: control channel is 20 MHz wide or compatible * @NL80211_BSS_CHAN_WIDTH_10: control channel is 10 MHz wide * @NL80211_BSS_CHAN_WIDTH_5: control channel is 5 MHz wide + * @NL80211_BSS_CHAN_WIDTH_1: control channel is 1 MHz wide + * @NL80211_BSS_CHAN_WIDTH_2: control channel is 2 MHz wide */ enum nl80211_bss_scan_width { NL80211_BSS_CHAN_WIDTH_20, NL80211_BSS_CHAN_WIDTH_10, NL80211_BSS_CHAN_WIDTH_5, + NL80211_BSS_CHAN_WIDTH_1, + NL80211_BSS_CHAN_WIDTH_2, }; /** @@ -4740,6 +4754,7 @@ enum nl80211_txrate_gi { * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz) * @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz) + * @NL80211_BAND_S1GHZ: around 900MHz, supported by S1G PHYs * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -4748,6 +4763,7 @@ enum nl80211_band { NL80211_BAND_5GHZ, NL80211_BAND_60GHZ, NL80211_BAND_6GHZ, + NL80211_BAND_S1GHZ, NUM_NL80211_BANDS, }; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index e6e192f53e4e..08cf9da9c1e3 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -313,9 +313,14 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, lockdep_assert_held(&local->chanctx_mtx); - /* don't optimize 5MHz, 10MHz, and radar_enabled confs */ + /* don't optimize non-20MHz based and radar_enabled confs */ if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 || ctx->conf.def.width == NL80211_CHAN_WIDTH_10 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_1 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_2 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_4 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_8 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_16 || ctx->conf.radar_enabled) { ctx->conf.min_def = ctx->conf.def; return; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index ad90bbe57457..8003be6dae8a 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -913,6 +913,7 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, case NL80211_BSS_CHAN_WIDTH_10: local->scan_chandef.width = NL80211_CHAN_WIDTH_10; break; + default: case NL80211_BSS_CHAN_WIDTH_20: /* If scanning on oper channel, use whatever channel-type * is currently in use. diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1a2941e5244f..ee30ef441f4a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -166,6 +166,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, if (r->flags & IEEE80211_RATE_MANDATORY_A) mrate = r->bitrate; break; + case NL80211_BAND_S1GHZ: case NL80211_BAND_60GHZ: /* TODO, for now fall through */ case NUM_NL80211_BANDS: diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 21c94094a699..64a83ecd0a73 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3730,6 +3730,11 @@ u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c) c->width = NL80211_CHAN_WIDTH_20_NOHT; ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; break; + case NL80211_CHAN_WIDTH_1: + case NL80211_CHAN_WIDTH_2: + case NL80211_CHAN_WIDTH_4: + case NL80211_CHAN_WIDTH_8: + case NL80211_CHAN_WIDTH_16: case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: WARN_ON_ONCE(1); diff --git a/net/wireless/chan.c b/net/wireless/chan.c index cddf92c5d09e..90f0f82cd9ca 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -153,6 +153,11 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) control_freq = chandef->chan->center_freq; switch (chandef->width) { + case NL80211_CHAN_WIDTH_1: + case NL80211_CHAN_WIDTH_2: + case NL80211_CHAN_WIDTH_4: + case NL80211_CHAN_WIDTH_8: + case NL80211_CHAN_WIDTH_16: case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20: @@ -263,6 +268,21 @@ static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) int width; switch (c->width) { + case NL80211_CHAN_WIDTH_1: + width = 1; + break; + case NL80211_CHAN_WIDTH_2: + width = 2; + break; + case NL80211_CHAN_WIDTH_4: + width = 4; + break; + case NL80211_CHAN_WIDTH_8: + width = 8; + break; + case NL80211_CHAN_WIDTH_16: + width = 16; + break; case NL80211_CHAN_WIDTH_5: width = 5; break; @@ -911,6 +931,21 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, control_freq = chandef->chan->center_freq; switch (chandef->width) { + case NL80211_CHAN_WIDTH_1: + width = 1; + break; + case NL80211_CHAN_WIDTH_2: + width = 2; + break; + case NL80211_CHAN_WIDTH_4: + width = 4; + break; + case NL80211_CHAN_WIDTH_8: + width = 8; + break; + case NL80211_CHAN_WIDTH_16: + width = 16; + break; case NL80211_CHAN_WIDTH_5: width = 5; break; diff --git a/net/wireless/core.c b/net/wireless/core.c index c623d9bf5096..1971d7e6eb55 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -803,10 +803,11 @@ int wiphy_register(struct wiphy *wiphy) if (WARN_ON(!sband->n_channels)) return -EINVAL; /* - * on 60GHz band, there are no legacy rates, so + * on 60GHz or sub-1Ghz band, there are no legacy rates, so * n_bitrates is 0 */ - if (WARN_ON(band != NL80211_BAND_60GHZ && + if (WARN_ON((band != NL80211_BAND_60GHZ && + band != NL80211_BAND_S1GHZ) && !sband->n_bitrates)) return -EINVAL; diff --git a/net/wireless/util.c b/net/wireless/util.c index 4d3b76f94f55..26a977343c3b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -102,6 +102,8 @@ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band) if (chan < 7) return MHZ_TO_KHZ(56160 + chan * 2160); break; + case NL80211_BAND_S1GHZ: + return 902000 + chan * 500; default: ; } @@ -210,6 +212,12 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband) WARN_ON(!sband->ht_cap.ht_supported); WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e); break; + case NL80211_BAND_S1GHZ: + /* Figure 9-589bd: 3 means unsupported, so != 3 means at least + * mandatory is ok. + */ + WARN_ON((sband->s1g_cap.nss_mcs[0] & 0x3) == 0x3); + break; case NUM_NL80211_BANDS: default: WARN_ON(1); -- cgit v1.2.3 From 987021726f9f41a1daf335c57cd7b6261109cdb2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:21 -0700 Subject: net/wireless: nl80211.h: drop duplicate words in comments Drop doubled words in several comments. Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-1-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index ad183469f9af..f47a7a8d0216 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -363,7 +363,7 @@ * @NL80211_CMD_SET_STATION: Set station attributes for station identified by * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_NEW_STATION: Add a station with given attributes to the - * the interface identified by %NL80211_ATTR_IFINDEX. + * interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC * or, if no MAC address given, all stations, on the interface identified * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and @@ -383,7 +383,7 @@ * @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by * %NL80211_ATTR_MAC. * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the - * the interface identified by %NL80211_ATTR_IFINDEX. + * interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC * or, if no MAC address given, all mesh paths, on the interface identified * by %NL80211_ATTR_IFINDEX. @@ -934,7 +934,7 @@ * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules. * * @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the - * the new channel information (Channel Switch Announcement - CSA) + * new channel information (Channel Switch Announcement - CSA) * in the beacon for some time (as defined in the * %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the * new channel. Userspace provides the new channel information (using @@ -1113,7 +1113,7 @@ * randomization may be enabled and configured by specifying the * %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes. * If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute. - * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in + * A u64 cookie for further %NL80211_ATTR_COOKIE use is returned in * the netlink extended ack message. * * To cancel a measurement, close the socket that requested it. @@ -1511,7 +1511,7 @@ enum nl80211_commands { * rates as defined by IEEE 802.11 7.3.2.2 but without the length * restriction (at most %NL80211_MAX_SUPP_RATES). * @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station - * to, or the AP interface the station was originally added to to. + * to, or the AP interface the station was originally added to. * @NL80211_ATTR_STA_INFO: information about a station, part of station info * given for %NL80211_CMD_GET_STATION, nested attribute containing * info as possible, see &enum nl80211_sta_info. @@ -2084,7 +2084,7 @@ enum nl80211_commands { * @NL80211_ATTR_STA_SUPPORTED_CHANNELS: array of supported channels. * * @NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES: array of supported - * supported operating classes. + * operating classes. * * @NL80211_ATTR_HANDLE_DFS: A flag indicating whether user space * controls DFS operation in IBSS mode. If the flag is included in @@ -2395,7 +2395,7 @@ enum nl80211_commands { * nl80211_txq_stats) * @NL80211_ATTR_TXQ_LIMIT: Total packet limit for the TXQ queues for this phy. * The smaller of this and the memory limit is enforced. - * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory memory limit (in bytes) for the + * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory limit (in bytes) for the * TXQ queues for this phy. The smaller of this and the packet limit is * enforced. * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes @@ -5652,7 +5652,7 @@ enum nl80211_feature_flags { * enum nl80211_ext_feature_index - bit index of extended features. * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates. * @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can - * can request to use RRM (see %NL80211_ATTR_USE_RRM) with + * request to use RRM (see %NL80211_ATTR_USE_RRM) with * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set * the ASSOC_REQ_USE_RRM flag in the association request even if * NL80211_FEATURE_QUIET is not advertized. @@ -6061,7 +6061,7 @@ enum nl80211_dfs_state { }; /** - * enum enum nl80211_protocol_features - nl80211 protocol features + * enum nl80211_protocol_features - nl80211 protocol features * @NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP: nl80211 supports splitting * wiphy dumps (if requested by the application with the attribute * %NL80211_ATTR_SPLIT_WIPHY_DUMP. Also supported is filtering the -- cgit v1.2.3 From 0f55c0c500f2bbfc5cc5590cdf6973b3f64dc195 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:22 -0700 Subject: net/wireless: wireless.h: drop duplicate word in comments Drop doubled word "threshold" in a comment. Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-2-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/uapi/linux/wireless.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h index 24f3371ad826..08967b3f19c8 100644 --- a/include/uapi/linux/wireless.h +++ b/include/uapi/linux/wireless.h @@ -914,7 +914,7 @@ union iwreq_data { struct iw_param sens; /* signal level threshold */ struct iw_param bitrate; /* default bit rate */ struct iw_param txpower; /* default transmit power */ - struct iw_param rts; /* RTS threshold threshold */ + struct iw_param rts; /* RTS threshold */ struct iw_param frag; /* Fragmentation threshold */ __u32 mode; /* Operation mode */ struct iw_param retry; /* Retry limits & lifetime */ -- cgit v1.2.3 From 085a6c109b9dbcb6dfc0c7d1001f554a6d513342 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:23 -0700 Subject: net/wireless: cfg80211.h: drop duplicate words in comments Drop doubled word "by" in a comment. Change "operate in in" to "operate with in" as is used below. Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-3-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index ac6e58193426..5c7ea0b5dc50 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -439,7 +439,7 @@ struct ieee80211_sta_s1g_cap { * This structure describes a frequency band a wiphy * is able to operate in. * - * @channels: Array of channels the hardware can operate in + * @channels: Array of channels the hardware can operate with * in this band. * @band: the band this structure represents * @n_channels: Number of channels in @channels @@ -5527,7 +5527,7 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, * * @skb: The input A-MSDU frame without any headers. * @list: The output list of 802.3 frames. It must be allocated and - * initialized by by the caller. + * initialized by the caller. * @addr: The device MAC address. * @iftype: The device interface type. * @extra_headroom: The hardware extra headroom for SKBs in the @list. -- cgit v1.2.3 From 66b239d28c7524324d2474b3faf206d00eadcd78 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:24 -0700 Subject: net/wireless: mac80211.h: drop duplicate words in comments Drop doubled words "are" and "by" in comments. Change doubled "to to" to "to the". Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-4-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/net/mac80211.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 11d5610d2ad5..c0a597167f14 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2727,7 +2727,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * for devices that support offload of data packets (e.g. ARP responses). * * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag - * when they are able to replace in-use PTK keys according to to following + * when they are able to replace in-use PTK keys according to the following * requirements: * 1) They do not hand over frames decrypted with the old key to mac80211 once the call to set_key() with command %DISABLE_KEY has been @@ -4709,7 +4709,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, * * Call this function for all transmitted data frames after their transmit * completion. This callback should only be called for data frames which - * are are using driver's (or hardware's) offload capability of encap/decap + * are using driver's (or hardware's) offload capability of encap/decap * 802.11 frames. * * This function may not be called in IRQ context. Calls to this function @@ -6344,7 +6344,7 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); * * Note that this must be called in an rcu_read_lock() critical section, * which can only be released after the SKB was handled. Some pointers in - * skb->cb, e.g. the key pointer, are protected by by RCU and thus the + * skb->cb, e.g. the key pointer, are protected by RCU and thus the * critical section must persist not just for the duration of this call * but for the duration of the frame handling. * However, also note that while in the wake_tx_queue() method, -- cgit v1.2.3 From dec4ca931244632eeca46f406b2ce7f5a1fe723f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:25 -0700 Subject: net/wireless: regulatory.h: drop duplicate word in comment Drop doubled word "of" in a comment. Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-5-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/net/regulatory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 09a3099886e5..47f06f6f5a67 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -44,7 +44,7 @@ enum environment_cap { * and potentially inform users of which devices specifically * cased the conflicts. * @initiator: indicates who sent this request, could be any of - * of those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*) + * those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*) * @alpha2: the ISO / IEC 3166 alpha2 country code of the requested * regulatory domain. We have a few special codes: * 00 - World regulatory domain -- cgit v1.2.3 From 2f1805ea209a146669e0b660633ed22f49e1dd49 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 25 Jun 2020 14:15:24 +0300 Subject: cfg80211: allow the low level driver to flush the BSS table The low level driver adds its own opaque information in the BSS table in the cfg80211_bss structure. The low level driver may need to signal that this information is no longer relevant and needs to be recreated. Add an API to allow the low level driver to do that. iwlwifi needs this because it keeps there an information about the firmware's internal clock. This is kept in mac80211's struct ieee80211_bss::sync_device_ts. This information is populated while we scan, we add the internal firmware's clock to each beacon which allows us to program the firmware correctly after association so that it'll know when (in terms of its internal clock) the DTIM and TBTT will happen. When the firmware is reset this internal clock is reset as well and ieee80211_bss::sync_device_ts is no longer accurate. iwlwifi will call this new API any time the firmware is started. Signed-off-by: Emmanuel Grumbach Link: https://lore.kernel.org/r/20200625111524.3992-1-emmanuel.grumbach@intel.com Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 6 ++++++ net/wireless/scan.c | 10 ++++++++++ 2 files changed, 16 insertions(+) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5c7ea0b5dc50..fa4d5627397f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -7899,4 +7899,10 @@ void cfg80211_update_owe_info_event(struct net_device *netdev, struct cfg80211_update_owe_info *owe_info, gfp_t gfp); +/** + * cfg80211_bss_flush - resets all the scan entries + * @wiphy: the wiphy + */ +void cfg80211_bss_flush(struct wiphy *wiphy); + #endif /* __NET_CFG80211_H */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 74ea4cfb39fb..e67a74488bbe 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -712,6 +712,16 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *rdev) __cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE); } +void cfg80211_bss_flush(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + + spin_lock_bh(&rdev->bss_lock); + __cfg80211_bss_expire(rdev, jiffies); + spin_unlock_bh(&rdev->bss_lock); +} +EXPORT_SYMBOL(cfg80211_bss_flush); + const struct element * cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len, const u8 *match, unsigned int match_len, -- cgit v1.2.3 From e3718a611470d311a92c60d4eb535270b49a7108 Mon Sep 17 00:00:00 2001 From: Linus Lüssing Date: Wed, 17 Jun 2020 09:30:33 +0200 Subject: cfg80211/mac80211: add mesh_param "mesh_nolearn" to skip path discovery MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, before being able to forward a packet between two 802.11s nodes, both a PLINK handshake is performed upon receiving a beacon and then later a PREQ/PREP exchange for path discovery is performed on demand upon receiving a data frame to forward. When running a mesh protocol on top of an 802.11s interface, like batman-adv, we do not need the multi-hop mesh routing capabilities of 802.11s and usually set mesh_fwding=0. However, even with mesh_fwding=0 the PREQ/PREP path discovery is still performed on demand. Even though in this scenario the next hop PREQ/PREP will determine is always the direct 11s neighbor node. The new mesh_nolearn parameter allows to skip the PREQ/PREP exchange in this scenario, leading to a reduced delay, reduced packet buffering and simplifies HWMP in general. mesh_nolearn is still rather conservative in that if the packet destination is not a direct 11s neighbor, it will fall back to PREQ/PREP path discovery. For normal, multi-hop 802.11s mesh routing it is usually not advisable to enable mesh_nolearn as a transmission to a direct but distant neighbor might be worse than reaching that same node via a more robust / higher throughput etc. multi-hop path. Cc: Sven Eckelmann Cc: Simon Wunderlich Signed-off-by: Linus Lüssing Link: https://lore.kernel.org/r/20200617073034.26149-1-linus.luessing@c0d3.blue [fix nl80211 policy to range 0/1 only] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 6 ++++++ include/uapi/linux/nl80211.h | 7 +++++++ net/mac80211/cfg.c | 2 ++ net/mac80211/debugfs_netdev.c | 2 ++ net/mac80211/mesh_hwmp.c | 39 +++++++++++++++++++++++++++++++++++++++ net/wireless/mesh.c | 1 + net/wireless/nl80211.c | 7 ++++++- net/wireless/trace.h | 4 +++- 8 files changed, 66 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index fa4d5627397f..78b220950942 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1870,6 +1870,11 @@ struct bss_parameters { * connected to a mesh gate in mesh formation info. If false, the * value in mesh formation is determined by the presence of root paths * in the mesh path table + * @dot11MeshNolearn: Try to avoid multi-hop path discovery (e.g. PREQ/PREP + * for HWMP) if the destination is a direct neighbor. Note that this might + * not be the optimal decision as a multi-hop route might be better. So + * if using this setting you will likely also want to disable + * dot11MeshForwarding and use another mesh routing protocol on top. */ struct mesh_config { u16 dot11MeshRetryTimeout; @@ -1901,6 +1906,7 @@ struct mesh_config { enum nl80211_mesh_power_mode power_mode; u16 dot11MeshAwakeWindowDuration; u32 plink_timeout; + bool dot11MeshNolearn; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f47a7a8d0216..a83d8faf88ac 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4236,6 +4236,12 @@ enum nl80211_mesh_power_mode { * field. If left unset then the mesh formation field will only * advertise such if there is an active root mesh path. * + * @NL80211_MESHCONF_NOLEARN: Try to avoid multi-hop path discovery (e.g. + * PREQ/PREP for HWMP) if the destination is a direct neighbor. Note that + * this might not be the optimal decision as a multi-hop route might be + * better. So if using this setting you will likely also want to disable + * dot11MeshForwarding and use another mesh routing protocol on top. + * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ enum nl80211_meshconf_params { @@ -4269,6 +4275,7 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_AWAKE_WINDOW, NL80211_MESHCONF_PLINK_TIMEOUT, NL80211_MESHCONF_CONNECTED_TO_GATE, + NL80211_MESHCONF_NOLEARN, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index b4a74064675e..9af56b848544 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2126,6 +2126,8 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_GATE, mask)) conf->dot11MeshConnectedToMeshGate = nconf->dot11MeshConnectedToMeshGate; + if (_chg_mesh_attr(NL80211_MESHCONF_NOLEARN, mask)) + conf->dot11MeshNolearn = nconf->dot11MeshNolearn; ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); return 0; } diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index d7e955127d5c..09eab2c3f380 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -638,6 +638,7 @@ IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration, u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC); IEEE80211_IF_FILE(dot11MeshConnectedToMeshGate, u.mesh.mshcfg.dot11MeshConnectedToMeshGate, DEC); +IEEE80211_IF_FILE(dot11MeshNolearn, u.mesh.mshcfg.dot11MeshNolearn, DEC); #endif #define DEBUGFS_ADD_MODE(name, mode) \ @@ -762,6 +763,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata) MESHPARAMS_ADD(power_mode); MESHPARAMS_ADD(dot11MeshAwakeWindowDuration); MESHPARAMS_ADD(dot11MeshConnectedToMeshGate); + MESHPARAMS_ADD(dot11MeshNolearn); #undef MESHPARAMS_ADD } #endif diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index bae3a3e15b88..bec23d2eee7a 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1172,6 +1172,40 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, return -ENOENT; } +/** + * mesh_nexthop_lookup_nolearn - try to set next hop without path discovery + * @skb: 802.11 frame to be sent + * @sdata: network subif the frame will be sent through + * + * Check if the meshDA (addr3) of a unicast frame is a direct neighbor. + * And if so, set the RA (addr1) to it to transmit to this node directly, + * avoiding PREQ/PREP path discovery. + * + * Returns: 0 if the next hop was found and -ENOENT otherwise. + */ +static int mesh_nexthop_lookup_nolearn(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct sta_info *sta; + + if (is_multicast_ether_addr(hdr->addr1)) + return -ENOENT; + + rcu_read_lock(); + sta = sta_info_get(sdata, hdr->addr3); + + if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) { + rcu_read_unlock(); + return -ENOENT; + } + rcu_read_unlock(); + + memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); + memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); + return 0; +} + /** * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling * this function is considered "using" the associated mpath, so preempt a path @@ -1185,11 +1219,16 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_path *mpath; struct sta_info *next_hop; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u8 *target_addr = hdr->addr3; + if (ifmsh->mshcfg.dot11MeshNolearn && + !mesh_nexthop_lookup_nolearn(sdata, skb)) + return 0; + mpath = mesh_path_lookup(sdata, target_addr); if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE)) return -ENOENT; diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index eac5aa1419fc..e4e363138279 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -78,6 +78,7 @@ const struct mesh_config default_mesh_config = { .power_mode = NL80211_MESH_POWER_ACTIVE, .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW, .plink_timeout = MESH_DEFAULT_PLINK_TIMEOUT, + .dot11MeshNolearn = false, }; const struct mesh_setup default_mesh_setup = { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 6fdf818f66cf..257c06315464 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6885,7 +6885,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT, cur_params.plink_timeout) || nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_GATE, - cur_params.dot11MeshConnectedToMeshGate)) + cur_params.dot11MeshConnectedToMeshGate) || + nla_put_u8(msg, NL80211_MESHCONF_NOLEARN, + cur_params.dot11MeshNolearn)) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); @@ -6943,6 +6945,7 @@ nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 }, [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_CONNECTED_TO_GATE] = NLA_POLICY_RANGE(NLA_U8, 0, 1), + [NL80211_MESHCONF_NOLEARN] = NLA_POLICY_RANGE(NLA_U8, 0, 1), }; static const struct nla_policy @@ -7094,6 +7097,8 @@ do { \ NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, mask, NL80211_MESHCONF_PLINK_TIMEOUT, nla_get_u32); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNolearn, mask, + NL80211_MESHCONF_NOLEARN, nla_get_u8); if (mask_out) *mask_out = mask; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b23cab016521..6e218a0acd4e 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -68,7 +68,8 @@ __field(u16, ht_opmode) \ __field(u32, dot11MeshHWMPactivePathToRootTimeout) \ __field(u16, dot11MeshHWMProotInterval) \ - __field(u16, dot11MeshHWMPconfirmationInterval) + __field(u16, dot11MeshHWMPconfirmationInterval) \ + __field(bool, dot11MeshNolearn) #define MESH_CFG_ASSIGN \ do { \ __entry->dot11MeshRetryTimeout = conf->dot11MeshRetryTimeout; \ @@ -109,6 +110,7 @@ conf->dot11MeshHWMProotInterval; \ __entry->dot11MeshHWMPconfirmationInterval = \ conf->dot11MeshHWMPconfirmationInterval; \ + __entry->dot11MeshNolearn = conf->dot11MeshNolearn; \ } while (0) #define CHAN_ENTRY __field(enum nl80211_band, band) \ -- cgit v1.2.3 From 184eebe664f0e11c485f6d309fe56297b3f75e9e Mon Sep 17 00:00:00 2001 From: Markus Theil Date: Thu, 11 Jun 2020 16:02:37 +0200 Subject: cfg80211/mac80211: add connected to auth server to meshconf Besides information about num of peerings and gate connectivity, the mesh formation byte also contains a flag for authentication server connectivity, that currently cannot be set in the mesh conf. This patch adds this capability, which is necessary to implement 802.1X authentication in mesh mode. Signed-off-by: Markus Theil Link: https://lore.kernel.org/r/20200611140238.427461-1-markus.theil@tu-ilmenau.de Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 1 + include/uapi/linux/nl80211.h | 5 +++++ net/mac80211/cfg.c | 3 +++ net/mac80211/debugfs_netdev.c | 3 +++ net/mac80211/mesh.c | 5 ++++- net/wireless/nl80211.c | 8 +++++++- 6 files changed, 23 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 78b220950942..8d5071f84ffe 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1895,6 +1895,7 @@ struct mesh_config { u16 dot11MeshHWMPnetDiameterTraversalTime; u8 dot11MeshHWMPRootMode; bool dot11MeshConnectedToMeshGate; + bool dot11MeshConnectedToAuthServer; u16 dot11MeshHWMPRannInterval; bool dot11MeshGateAnnouncementProtocol; bool dot11MeshForwarding; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index a83d8faf88ac..f1770e3756f4 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4242,6 +4242,10 @@ enum nl80211_mesh_power_mode { * better. So if using this setting you will likely also want to disable * dot11MeshForwarding and use another mesh routing protocol on top. * + * @NL80211_MESHCONF_CONNECTED_TO_AS: If set to true then this mesh STA + * will advertise that it is connected to a authentication server + * in the mesh formation field. + * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ enum nl80211_meshconf_params { @@ -4276,6 +4280,7 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_PLINK_TIMEOUT, NL80211_MESHCONF_CONNECTED_TO_GATE, NL80211_MESHCONF_NOLEARN, + NL80211_MESHCONF_CONNECTED_TO_AS, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9af56b848544..6a6531a50e54 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2128,6 +2128,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, nconf->dot11MeshConnectedToMeshGate; if (_chg_mesh_attr(NL80211_MESHCONF_NOLEARN, mask)) conf->dot11MeshNolearn = nconf->dot11MeshNolearn; + if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_AS, mask)) + conf->dot11MeshConnectedToAuthServer = + nconf->dot11MeshConnectedToAuthServer; ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); return 0; } diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 09eab2c3f380..fe8a7a87e513 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -639,6 +639,8 @@ IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration, IEEE80211_IF_FILE(dot11MeshConnectedToMeshGate, u.mesh.mshcfg.dot11MeshConnectedToMeshGate, DEC); IEEE80211_IF_FILE(dot11MeshNolearn, u.mesh.mshcfg.dot11MeshNolearn, DEC); +IEEE80211_IF_FILE(dot11MeshConnectedToAuthServer, + u.mesh.mshcfg.dot11MeshConnectedToAuthServer, DEC); #endif #define DEBUGFS_ADD_MODE(name, mode) \ @@ -764,6 +766,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata) MESHPARAMS_ADD(dot11MeshAwakeWindowDuration); MESHPARAMS_ADD(dot11MeshConnectedToMeshGate); MESHPARAMS_ADD(dot11MeshNolearn); + MESHPARAMS_ADD(dot11MeshConnectedToAuthServer); #undef MESHPARAMS_ADD } #endif diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 96f0323c0a3d..d0db6af16427 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -260,6 +260,7 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, bool is_connected_to_gate = ifmsh->num_gates > 0 || ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol || ifmsh->mshcfg.dot11MeshConnectedToMeshGate; + bool is_connected_to_as = ifmsh->mshcfg.dot11MeshConnectedToAuthServer; if (skb_tailroom(skb) < 2 + meshconf_len) return -ENOMEM; @@ -284,7 +285,9 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, /* Mesh Formation Info - number of neighbors */ neighbors = atomic_read(&ifmsh->estab_plinks); neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS); - *pos++ = (neighbors << 1) | is_connected_to_gate; + *pos++ = (is_connected_to_as << 7) | + (neighbors << 1) | + is_connected_to_gate; /* Mesh capability */ *pos = 0x00; *pos |= ifmsh->mshcfg.dot11MeshForwarding ? diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 257c06315464..434fd06dc5cf 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6887,7 +6887,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_GATE, cur_params.dot11MeshConnectedToMeshGate) || nla_put_u8(msg, NL80211_MESHCONF_NOLEARN, - cur_params.dot11MeshNolearn)) + cur_params.dot11MeshNolearn) || + nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_AS, + cur_params.dot11MeshConnectedToAuthServer)) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); @@ -6946,6 +6948,7 @@ nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_CONNECTED_TO_GATE] = NLA_POLICY_RANGE(NLA_U8, 0, 1), [NL80211_MESHCONF_NOLEARN] = NLA_POLICY_RANGE(NLA_U8, 0, 1), + [NL80211_MESHCONF_CONNECTED_TO_AS] = NLA_POLICY_RANGE(NLA_U8, 0, 1), }; static const struct nla_policy @@ -7058,6 +7061,9 @@ do { \ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConnectedToMeshGate, mask, NL80211_MESHCONF_CONNECTED_TO_GATE, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConnectedToAuthServer, mask, + NL80211_MESHCONF_CONNECTED_TO_AS, + nla_get_u8); /* * Check HT operation mode based on * IEEE 802.11-2016 9.4.2.57 HT Operation element. -- cgit v1.2.3 From 1303a51c24100b3b1915d6f9072fe5ae5bb4c5f6 Mon Sep 17 00:00:00 2001 From: Markus Theil Date: Thu, 11 Jun 2020 16:02:38 +0200 Subject: cfg80211/mac80211: add connected to auth server to station info This patch adds the necessary bits to later query the auth server flag for every peer from iw. Signed-off-by: Markus Theil Link: https://lore.kernel.org/r/20200611140238.427461-2-markus.theil@tu-ilmenau.de Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 3 +++ include/uapi/linux/nl80211.h | 3 +++ net/mac80211/sta_info.c | 4 +++- net/mac80211/sta_info.h | 2 ++ net/wireless/nl80211.c | 1 + 5 files changed, 12 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 8d5071f84ffe..39fe21edd2c5 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1598,6 +1598,7 @@ struct cfg80211_tid_stats { * an FCS error. This counter should be incremented only when TA of the * received packet with an FCS error matches the peer MAC address. * @airtime_link_metric: mesh airtime link metric. + * @connected_to_as: true if mesh STA has a path to authentication server */ struct station_info { u64 filled; @@ -1655,6 +1656,8 @@ struct station_info { u32 fcs_err_count; u32 airtime_link_metric; + + u8 connected_to_as; }; #if IS_ENABLED(CONFIG_CFG80211) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f1770e3756f4..d6b6599a6001 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3370,6 +3370,8 @@ enum nl80211_sta_bss_param { * @NL80211_STA_INFO_AIRTIME_LINK_METRIC: airtime link metric for mesh station * @NL80211_STA_INFO_ASSOC_AT_BOOTTIME: Timestamp (CLOCK_BOOTTIME, nanoseconds) * of STA's association + * @NL80211_STA_INFO_CONNECTED_TO_AS: set to true if STA has a path to a + * authentication server (u8, 0 or 1) * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -3417,6 +3419,7 @@ enum nl80211_sta_info { NL80211_STA_INFO_AIRTIME_WEIGHT, NL80211_STA_INFO_AIRTIME_LINK_METRIC, NL80211_STA_INFO_ASSOC_AT_BOOTTIME, + NL80211_STA_INFO_CONNECTED_TO_AS, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index cd8487bc6fc2..a39773b40457 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2426,7 +2426,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | BIT_ULL(NL80211_STA_INFO_PEER_PM) | BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | - BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE); + BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | + BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); sinfo->llid = sta->mesh->llid; sinfo->plid = sta->mesh->plid; @@ -2439,6 +2440,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->peer_pm = sta->mesh->peer_pm; sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; sinfo->connected_to_gate = sta->mesh->connected_to_gate; + sinfo->connected_to_as = sta->mesh->connected_to_as; #endif } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 49728047dfad..9d398c9daa4c 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -385,6 +385,7 @@ DECLARE_EWMA(mesh_tx_rate_avg, 8, 16) * @processed_beacon: set to true after peer rates and capabilities are * processed * @connected_to_gate: true if mesh STA has a path to a mesh gate + * @connected_to_as: true if mesh STA has a path to a authentication server * @fail_avg: moving percentage of failed MSDUs * @tx_rate_avg: moving average of tx bitrate */ @@ -404,6 +405,7 @@ struct mesh_sta { bool processed_beacon; bool connected_to_gate; + bool connected_to_as; enum nl80211_plink_state plink_state; u32 plink_timeout; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 434fd06dc5cf..13a38aab1565 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5395,6 +5395,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO(PEER_PM, peer_pm, u32); PUT_SINFO(NONPEER_PM, nonpeer_pm, u32); PUT_SINFO(CONNECTED_TO_GATE, connected_to_gate, u8); + PUT_SINFO(CONNECTED_TO_AS, connected_to_as, u8); if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) { bss_param = nla_nest_start_noflag(msg, -- cgit v1.2.3 From 48a54f6bc456859dabbd1fbd805e233d260754cf Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 26 Jul 2020 15:09:46 +0200 Subject: net/fq_impl: use skb_get_hash instead of skb_get_hash_perturb This avoids unnecessarily regenerating the skb flow hash Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20200726130947.88145-1-nbd@nbd.name [small commit message fixup] Signed-off-by: Johannes Berg --- include/net/fq.h | 1 - include/net/fq_impl.h | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/fq.h b/include/net/fq.h index 2ad85e683041..e39f3f8d5f8a 100644 --- a/include/net/fq.h +++ b/include/net/fq.h @@ -69,7 +69,6 @@ struct fq { struct list_head backlogs; spinlock_t lock; u32 flows_cnt; - siphash_key_t perturbation; u32 limit; u32 memory_limit; u32 memory_usage; diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h index 38a9a3d1222b..e73d74d2fabf 100644 --- a/include/net/fq_impl.h +++ b/include/net/fq_impl.h @@ -108,7 +108,7 @@ begin: static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb) { - u32 hash = skb_get_hash_perturb(skb, &fq->perturbation); + u32 hash = skb_get_hash(skb); return reciprocal_scale(hash, fq->flows_cnt); } @@ -308,7 +308,6 @@ static int fq_init(struct fq *fq, int flows_cnt) INIT_LIST_HEAD(&fq->backlogs); spin_lock_init(&fq->lock); fq->flows_cnt = max_t(u32, flows_cnt, 1); - get_random_bytes(&fq->perturbation, sizeof(fq->perturbation)); fq->quantum = 300; fq->limit = 8192; fq->memory_limit = 16 << 20; /* 16 MBytes */ -- cgit v1.2.3 From fd17dba1c860d39f655a3a08387c21e3ceca8c55 Mon Sep 17 00:00:00 2001 From: Veerendranath Jakkam Date: Mon, 20 Jul 2020 13:12:25 +0530 Subject: cfg80211: Add support to advertize OCV support Add a new feature flag that drivers can use to advertize support for Operating Channel Validation (OCV) when using driver's SME for RSNA handshakes. Signed-off-by: Veerendranath Jakkam Link: https://lore.kernel.org/r/20200720074225.8990-1-vjakkam@codeaurora.org Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index d6b6599a6001..a3ae2b060a55 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -5804,6 +5804,9 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS: The driver * can report tx status for control port over nl80211 tx operations. * + * @NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION: Driver supports Operating + * Channel Validation (OCV) when using driver's SME for RSNA handshakes. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -5859,6 +5862,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT, NL80211_EXT_FEATURE_SCAN_FREQ_KHZ, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS, + NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, -- cgit v1.2.3 From e02281e7a5c524aaf6a52bb01afc4cc49addb908 Mon Sep 17 00:00:00 2001 From: Mathy Vanhoef Date: Thu, 23 Jul 2020 14:01:49 +0400 Subject: mac80211: add radiotap flag to prevent sequence number overwrite The radiotap specification contains a flag to indicate that the sequence number of an injected frame should not be overwritten. Parse this flag and define and set a corresponding Tx control flag. Signed-off-by: Mathy Vanhoef Link: https://lore.kernel.org/r/20200723100153.31631-2-Mathy.Vanhoef@kuleuven.be Signed-off-by: Johannes Berg --- include/net/ieee80211_radiotap.h | 1 + include/net/mac80211.h | 3 +++ net/mac80211/tx.c | 2 ++ 3 files changed, 6 insertions(+) (limited to 'include') diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 459d355f6506..19c00d100096 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -117,6 +117,7 @@ enum ieee80211_radiotap_tx_flags { IEEE80211_RADIOTAP_F_TX_CTS = 0x0002, IEEE80211_RADIOTAP_F_TX_RTS = 0x0004, IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008, + IEEE80211_RADIOTAP_F_TX_NOSEQNO = 0x0010, }; /* for IEEE80211_RADIOTAP_MCS "have" flags */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index c0a597167f14..21ce821a25e7 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -825,6 +825,8 @@ enum mac80211_tx_info_flags { * @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup * @IEEE80211_TX_CTRL_HW_80211_ENCAP: This frame uses hardware encapsulation * (header conversion) + * @IEEE80211_TX_CTRL_NO_SEQNO: Do not overwrite the sequence number that + * has already been assigned to this frame. * * These flags are used in tx_info->control.flags. */ @@ -836,6 +838,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_FAST_XMIT = BIT(4), IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5), IEEE80211_TX_CTRL_HW_80211_ENCAP = BIT(6), + IEEE80211_TX_CTRL_NO_SEQNO = BIT(7), }; /* diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 007e070227fd..413345056445 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2085,6 +2085,8 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, txflags = get_unaligned_le16(iterator.this_arg); if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK) info->flags |= IEEE80211_TX_CTL_NO_ACK; + if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO) + info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO; break; case IEEE80211_RADIOTAP_RATE: -- cgit v1.2.3 From cb17ed29a7a5fea8c9bf70e8a05757d71650e025 Mon Sep 17 00:00:00 2001 From: Mathy Vanhoef Date: Thu, 23 Jul 2020 14:01:53 +0400 Subject: mac80211: parse radiotap header when selecting Tx queue Already parse the radiotap header in ieee80211_monitor_select_queue. In a subsequent commit this will allow us to add a radiotap flag that influences the queue on which injected packets will be sent. This also fixes the incomplete validation of the injected frame in ieee80211_monitor_select_queue: currently an out of bounds memory access may occur in in the called function ieee80211_select_queue_80211 if the 802.11 header is too small. Note that in ieee80211_monitor_start_xmit the radiotap header is parsed again, which is necessairy because ieee80211_monitor_select_queue is not always called beforehand. Signed-off-by: Mathy Vanhoef Link: https://lore.kernel.org/r/20200723100153.31631-6-Mathy.Vanhoef@kuleuven.be Signed-off-by: Johannes Berg --- include/net/mac80211.h | 8 ++++++++ net/mac80211/iface.c | 15 ++++++++++---- net/mac80211/tx.c | 54 ++++++++++++++++++++++---------------------------- 3 files changed, 43 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 21ce821a25e7..6e26f0ba6fd0 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -6238,6 +6238,14 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct sk_buff *skb, int band, struct ieee80211_sta **sta); +/** + * Sanity-check and parse the radiotap header of injected frames + * @skb: packet injected by userspace + * @dev: the &struct device of this 802.11 device + */ +bool ieee80211_parse_tx_radiotap(struct sk_buff *skb, + struct net_device *dev); + /** * struct ieee80211_noa_data - holds temporary data for tracking P2P NoA state * diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 570f818384e8..9740ae8fa697 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1183,17 +1183,24 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; - struct ieee80211_radiotap_header *rtap = (void *)skb->data; + int len_rthdr; if (local->hw.queues < IEEE80211_NUM_ACS) return 0; - if (skb->len < 4 || - skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) + /* reset flags and info before parsing radiotap header */ + memset(info, 0, sizeof(*info)); + + if (!ieee80211_parse_tx_radiotap(skb, dev)) return 0; /* doesn't matter, frame will be dropped */ - hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); + len_rthdr = ieee80211_get_radiotap_len(skb->data); + hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); + if (skb->len < len_rthdr + 2 || + skb->len < len_rthdr + ieee80211_hdrlen(hdr->frame_control)) + return 0; /* doesn't matter, frame will be dropped */ return ieee80211_select_queue_80211(sdata, skb, hdr); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 7fb4afaa9410..ec35a92df8ed 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2015,9 +2015,10 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, ieee80211_tx(sdata, sta, skb, false); } -static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, - struct sk_buff *skb) +bool ieee80211_parse_tx_radiotap(struct sk_buff *skb, + struct net_device *dev) { + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_radiotap_iterator iterator; struct ieee80211_radiotap_header *rthdr = (struct ieee80211_radiotap_header *) skb->data; @@ -2036,6 +2037,18 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, u8 vht_mcs = 0, vht_nss = 0; int i; + /* check for not even having the fixed radiotap header part */ + if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) + return false; /* too short to be possibly valid */ + + /* is it a header version we can trust to find length from? */ + if (unlikely(rthdr->it_version)) + return false; /* only version 0 is supported */ + + /* does the skb contain enough to deliver on the alleged length? */ + if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data))) + return false; /* skb too short for claimed rt header extent */ + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_CTL_DONTFRAG; @@ -2189,13 +2202,6 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, local->hw.max_rate_tries); } - /* - * remove the radiotap header - * iterator->_max_length was sanity-checked against - * skb->len by iterator init - */ - skb_pull(skb, iterator._max_length); - return true; } @@ -2204,8 +2210,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_chanctx_conf *chanctx_conf; - struct ieee80211_radiotap_header *prthdr = - (struct ieee80211_radiotap_header *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *tmp_sdata, *sdata; @@ -2213,21 +2217,17 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, u16 len_rthdr; int hdrlen; - /* check for not even having the fixed radiotap header part */ - if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) - goto fail; /* too short to be possibly valid */ + memset(info, 0, sizeof(*info)); + info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_CTL_INJECTED; - /* is it a header version we can trust to find length from? */ - if (unlikely(prthdr->it_version)) - goto fail; /* only version 0 is supported */ + /* Sanity-check and process the injection radiotap header */ + if (!ieee80211_parse_tx_radiotap(skb, dev)) + goto fail; - /* then there must be a radiotap header with a length we can use */ + /* we now know there is a radiotap header with a length we can use */ len_rthdr = ieee80211_get_radiotap_len(skb->data); - /* does the skb contain enough to deliver on the alleged length? */ - if (unlikely(skb->len < len_rthdr)) - goto fail; /* skb too short for claimed rt header extent */ - /* * fix up the pointers accounting for the radiotap * header still being in there. We are being given @@ -2273,11 +2273,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; } - memset(info, 0, sizeof(*info)); - - info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | - IEEE80211_TX_CTL_INJECTED; - rcu_read_lock(); /* @@ -2343,9 +2338,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, info->band = chandef->chan->band; - /* process and remove the injection radiotap header */ - if (!ieee80211_parse_tx_radiotap(local, skb)) - goto fail_rcu; + /* remove the injection radiotap header */ + skb_pull(skb, len_rthdr); ieee80211_xmit(sdata, NULL, skb); rcu_read_unlock(); -- cgit v1.2.3 From c5d1686b314ea44c5f210990dee05caf98cb068f Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 26 Jul 2020 13:06:11 +0200 Subject: mac80211: add a function for running rx without passing skbs to the stack This can be used to run mac80211 rx processing on a batch of frames in NAPI poll before passing them to the network stack in a large batch. This can improve icache footprint, or it can be used to pass frames via netif_receive_skb_list. Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20200726110611.46886-1-nbd@nbd.name Signed-off-by: Johannes Berg --- include/net/mac80211.h | 25 +++++++++++++++++++ net/mac80211/ieee80211_i.h | 2 +- net/mac80211/rx.c | 60 +++++++++++++++++++++++++++++----------------- 3 files changed, 64 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 6e26f0ba6fd0..66e2bfd165e8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -4360,6 +4360,31 @@ void ieee80211_free_hw(struct ieee80211_hw *hw); */ void ieee80211_restart_hw(struct ieee80211_hw *hw); +/** + * ieee80211_rx_list - receive frame and store processed skbs in a list + * + * Use this function to hand received frames to mac80211. The receive + * buffer in @skb must start with an IEEE 802.11 header. In case of a + * paged @skb is used, the driver is recommended to put the ieee80211 + * header of the frame on the linear part of the @skb to avoid memory + * allocation and/or memcpy by the stack. + * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other. Calls to + * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be + * mixed for a single hardware. Must not run concurrently with + * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * + * This function must be called with BHs disabled and RCU read lock + * + * @hw: the hardware this frame came in on + * @sta: the station the frame was received from, or %NULL + * @skb: the buffer to receive, owned by mac80211 after this call + * @list: the destination list + */ +void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + struct sk_buff *skb, struct list_head *list); + /** * ieee80211_rx_napi - receive frame from NAPI context * diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1760ce77d89f..0b1eaec6649f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -217,7 +217,7 @@ enum ieee80211_rx_flags { }; struct ieee80211_rx_data { - struct napi_struct *napi; + struct list_head *list; struct sk_buff *skb; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 9ac3eaa00ce6..836cde516a18 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2578,8 +2578,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, memset(skb->cb, 0, sizeof(skb->cb)); /* deliver to local stack */ - if (rx->napi) - napi_gro_receive(rx->napi, skb); + if (rx->list) + list_add_tail(&skb->list, rx->list); else netif_receive_skb(skb); } @@ -3869,7 +3869,6 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) /* This is OK -- must be QoS data frame */ .security_idx = tid, .seqno_idx = tid, - .napi = NULL, /* must be NULL to not have races */ }; struct tid_ampdu_rx *tid_agg_rx; @@ -4479,8 +4478,8 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, /* deliver to local stack */ skb->protocol = eth_type_trans(skb, fast_rx->dev); memset(skb->cb, 0, sizeof(skb->cb)); - if (rx->napi) - napi_gro_receive(rx->napi, skb); + if (rx->list) + list_add_tail(&skb->list, rx->list); else netif_receive_skb(skb); @@ -4547,7 +4546,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct sk_buff *skb, - struct napi_struct *napi) + struct list_head *list) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; @@ -4562,7 +4561,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, memset(&rx, 0, sizeof(rx)); rx.skb = skb; rx.local = local; - rx.napi = napi; + rx.list = list; if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) I802_DEBUG_INC(local->dot11ReceivedFragmentCount); @@ -4670,8 +4669,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, * This is the receive path handler. It is called by a low level driver when an * 802.11 MPDU is received from the hardware. */ -void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, - struct sk_buff *skb, struct napi_struct *napi) +void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, + struct sk_buff *skb, struct list_head *list) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate = NULL; @@ -4762,13 +4761,6 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, status->rx_flags = 0; - /* - * key references and virtual interfaces are protected using RCU - * and this requires that we are in a read-side RCU section during - * receive processing - */ - rcu_read_lock(); - /* * Frames with failed FCS/PLCP checksum are not returned, * all other frames are returned without radiotap header @@ -4776,23 +4768,47 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, * Also, frames with less than 16 bytes are dropped. */ skb = ieee80211_rx_monitor(local, skb, rate); - if (!skb) { - rcu_read_unlock(); + if (!skb) return; - } ieee80211_tpt_led_trig_rx(local, ((struct ieee80211_hdr *)skb->data)->frame_control, skb->len); - __ieee80211_rx_handle_packet(hw, pubsta, skb, napi); - - rcu_read_unlock(); + __ieee80211_rx_handle_packet(hw, pubsta, skb, list); return; drop: kfree_skb(skb); } +EXPORT_SYMBOL(ieee80211_rx_list); + +void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, + struct sk_buff *skb, struct napi_struct *napi) +{ + struct sk_buff *tmp; + LIST_HEAD(list); + + + /* + * key references and virtual interfaces are protected using RCU + * and this requires that we are in a read-side RCU section during + * receive processing + */ + rcu_read_lock(); + ieee80211_rx_list(hw, pubsta, skb, &list); + rcu_read_unlock(); + + if (!napi) { + netif_receive_skb_list(&list); + return; + } + + list_for_each_entry_safe(skb, tmp, &list, list) { + skb_list_del_init(skb); + napi_gro_receive(napi, skb); + } +} EXPORT_SYMBOL(ieee80211_rx_napi); /* This is a version of the rx handler that can be called from hard irq -- cgit v1.2.3 From 75e6b594bbaeeb3f8287a2e6eb8811384b8c7195 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 30 Jul 2020 13:00:52 +0200 Subject: cfg80211: invert HE BSS color 'disabled' to 'enabled' This is in fact 'disabled' in the spec, but there it's in a place where that actually makes sense. In our internal data structures, it doesn't really make sense, and in fact the previous commit just fixed a bug in that area. Make this safer by inverting the polarity from 'disabled' to 'enabled'. Link: https://lore.kernel.org/r/20200730130051.5d8399545bd9.Ie62fdcd1a6cd9c969315bc124084a494ca6c8df3@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath11k/mac.c | 2 +- include/net/cfg80211.h | 4 ++-- net/mac80211/cfg.c | 2 +- net/mac80211/mlme.c | 8 ++++---- net/wireless/nl80211.c | 7 ++----- 5 files changed, 10 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 07d3e031c75a..94ae2b9ea663 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -2072,7 +2072,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, ret = ath11k_wmi_send_obss_color_collision_cfg_cmd( ar, arvif->vdev_id, info->he_bss_color.color, ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS, - !info->he_bss_color.disabled); + info->he_bss_color.enabled); if (ret) ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", arvif->vdev_id, ret); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 39fe21edd2c5..d9e6b9fbd95b 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -267,12 +267,12 @@ struct ieee80211_he_obss_pd { * struct cfg80211_he_bss_color - AP settings for BSS coloring * * @color: the current color. - * @disabled: is the feature disabled. + * @enabled: HE BSS color is used * @partial: define the AID equation. */ struct cfg80211_he_bss_color { u8 color; - bool disabled; + bool enabled; bool partial; }; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 23ef0ebaf180..24e9e00decb8 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1019,7 +1019,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); changed |= BSS_CHANGED_HE_OBSS_PD; - if (!params->he_bss_color.disabled) + if (params->he_bss_color.enabled) changed |= BSS_CHANGED_HE_BSS_COLOR; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 8a92a62dc54d..839d0367446c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3460,11 +3460,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, bss_conf->he_bss_color.partial = le32_get_bits(elems->he_operation->he_oper_params, IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR); - bss_conf->he_bss_color.disabled = - le32_get_bits(elems->he_operation->he_oper_params, - IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); + bss_conf->he_bss_color.enabled = + !le32_get_bits(elems->he_operation->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); - if (!bss_conf->he_bss_color.disabled) + if (bss_conf->he_bss_color.enabled) changed |= BSS_CHANGED_HE_BSS_COLOR; bss_conf->htc_trig_based_pkt_ext = diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b4048f3c5134..8d78a6fc59a3 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4713,8 +4713,8 @@ static int nl80211_parse_he_bss_color(struct nlattr *attrs, he_bss_color->color = nla_get_u8(tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]); - he_bss_color->disabled = - nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]); + he_bss_color->enabled = + !nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]); he_bss_color->partial = nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_PARTIAL]); @@ -4865,9 +4865,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) memset(¶ms, 0, sizeof(params)); - /* disable BSS color by default */ - params.he_bss_color.disabled = true; - /* these are required for START_AP */ if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] || !info->attrs[NL80211_ATTR_DTIM_PERIOD] || -- cgit v1.2.3 From f96622749a67d40ad5efe8a58d5fc95313097aa0 Mon Sep 17 00:00:00 2001 From: Chung-Hsien Hsu Date: Tue, 23 Jun 2020 08:49:35 -0500 Subject: nl80211: support 4-way handshake offloading for WPA/WPA2-PSK in AP mode Let drivers advertise support for AP-mode WPA/WPA2-PSK 4-way handshake offloading with a new NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK flag. Extend use of NL80211_ATTR_PMK attribute indicating it might be passed as part of NL80211_CMD_START_AP command, and contain the PSK (which is the PMK, hence the name). The driver is assumed to handle the 4-way handshake by itself in this case, instead of relying on userspace. Signed-off-by: Chung-Hsien Hsu Signed-off-by: Chi-Hsien Lin Link: https://lore.kernel.org/r/20200623134938.39997-2-chi-hsien.lin@cypress.com Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 41 ++++++++++++++++++++++++++++------------- net/wireless/nl80211.c | 4 +++- 2 files changed, 31 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index a3ae2b060a55..631f3a997b3c 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -183,18 +183,27 @@ * * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK flag drivers * can indicate they support offloading EAPOL handshakes for WPA/WPA2 - * preshared key authentication. In %NL80211_CMD_CONNECT the preshared - * key should be specified using %NL80211_ATTR_PMK. Drivers supporting - * this offload may reject the %NL80211_CMD_CONNECT when no preshared - * key material is provided, for example when that driver does not - * support setting the temporal keys through %CMD_NEW_KEY. + * preshared key authentication in station mode. In %NL80211_CMD_CONNECT + * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers + * supporting this offload may reject the %NL80211_CMD_CONNECT when no + * preshared key material is provided, for example when that driver does + * not support setting the temporal keys through %NL80211_CMD_NEW_KEY. * * Similarly @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X flag can be * set by drivers indicating offload support of the PTK/GTK EAPOL - * handshakes during 802.1X authentication. In order to use the offload - * the %NL80211_CMD_CONNECT should have %NL80211_ATTR_WANT_1X_4WAY_HS - * attribute flag. Drivers supporting this offload may reject the - * %NL80211_CMD_CONNECT when the attribute flag is not present. + * handshakes during 802.1X authentication in station mode. In order to + * use the offload the %NL80211_CMD_CONNECT should have + * %NL80211_ATTR_WANT_1X_4WAY_HS attribute flag. Drivers supporting this + * offload may reject the %NL80211_CMD_CONNECT when the attribute flag is + * not present. + * + * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK flag drivers + * can indicate they support offloading EAPOL handshakes for WPA/WPA2 + * preshared key authentication in AP mode. In %NL80211_CMD_START_AP + * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers + * supporting this offload may reject the %NL80211_CMD_START_AP when no + * preshared key material is provided, for example when that driver does + * not support setting the temporal keys through %NL80211_CMD_NEW_KEY. * * For 802.1X the PMK or PMK-R0 are set by providing %NL80211_ATTR_PMK * using %NL80211_CMD_SET_PMK. For offloaded FT support also @@ -2362,10 +2371,11 @@ enum nl80211_commands { * * @NL80211_ATTR_PMK: attribute for passing PMK key material. Used with * %NL80211_CMD_SET_PMKSA for the PMKSA identified by %NL80211_ATTR_PMKID. - * For %NL80211_CMD_CONNECT it is used to provide PSK for offloading 4-way - * handshake for WPA/WPA2-PSK networks. For 802.1X authentication it is - * used with %NL80211_CMD_SET_PMK. For offloaded FT support this attribute - * specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME is included as well. + * For %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP it is used to provide + * PSK for offloading 4-way handshake for WPA/WPA2-PSK networks. For 802.1X + * authentication it is used with %NL80211_CMD_SET_PMK. For offloaded FT + * support this attribute specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME + * is included as well. * * @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to * indicate that it supports multiple active scheduled scan requests. @@ -5807,6 +5817,10 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION: Driver supports Operating * Channel Validation (OCV) when using driver's SME for RSNA handshakes. * + * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK: Device wants to do 4-way + * handshake with PSK in AP mode (PSK is passed as part of the start AP + * command). + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -5863,6 +5877,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_SCAN_FREQ_KHZ, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS, NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8d78a6fc59a3..a096682ec0ad 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9442,7 +9442,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, if (nla_len(info->attrs[NL80211_ATTR_PMK]) != WLAN_PMK_LEN) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, - NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK)) + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK) && + !wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK)) return -EINVAL; settings->psk = nla_data(info->attrs[NL80211_ATTR_PMK]); } -- cgit v1.2.3 From 4278e9d99e38938a7611b927fa4d73e6c86cb4fc Mon Sep 17 00:00:00 2001 From: Marcelo Henrique Cerri Date: Mon, 20 Jul 2020 19:08:09 +0200 Subject: lib/mpi: Add mpi_sub_ui() Add mpi_sub_ui() based on Gnu MP mpz_sub_ui() function from file mpz/aors_ui.h[1] from change id 510b83519d1c adapting the code to the kernel's data structures, helper functions and coding style and also removing the defines used to produce mpz_sub_ui() and mpz_add_ui() from the same code. [1] https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors.h Signed-off-by: Marcelo Henrique Cerri Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- include/linux/mpi.h | 3 ++ lib/mpi/Makefile | 1 + lib/mpi/mpi-sub-ui.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 lib/mpi/mpi-sub-ui.c (limited to 'include') diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 7bd6d8af0004..5d906dfbf3ed 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -63,6 +63,9 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); int mpi_cmp_ui(MPI u, ulong v); int mpi_cmp(MPI u, MPI v); +/*-- mpi-sub-ui.c --*/ +int mpi_sub_ui(MPI w, MPI u, unsigned long vval); + /*-- mpi-bit.c --*/ void mpi_normalize(MPI a); unsigned mpi_get_nbits(MPI a); diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile index d5874a7f5ff9..43b8fce14079 100644 --- a/lib/mpi/Makefile +++ b/lib/mpi/Makefile @@ -16,6 +16,7 @@ mpi-y = \ mpicoder.o \ mpi-bit.o \ mpi-cmp.o \ + mpi-sub-ui.o \ mpih-cmp.o \ mpih-div.o \ mpih-mul.o \ diff --git a/lib/mpi/mpi-sub-ui.c b/lib/mpi/mpi-sub-ui.c new file mode 100644 index 000000000000..b41b082b5f3e --- /dev/null +++ b/lib/mpi/mpi-sub-ui.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI. + * + * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015 + * Free Software Foundation, Inc. + * + * This file was based on the GNU MP Library source file: + * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h + * + * The GNU MP Library is free software; you can redistribute it and/or modify + * it under the terms of either: + * + * * the GNU Lesser General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your + * option) any later version. + * + * or + * + * * the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * or both in parallel, as here. + * + * The GNU MP Library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received copies of the GNU General Public License and the + * GNU Lesser General Public License along with the GNU MP Library. If not, + * see https://www.gnu.org/licenses/. + */ + +#include "mpi-internal.h" + +int mpi_sub_ui(MPI w, MPI u, unsigned long vval) +{ + if (u->nlimbs == 0) { + if (mpi_resize(w, 1) < 0) + return -ENOMEM; + w->d[0] = vval; + w->nlimbs = (vval != 0); + w->sign = (vval != 0); + return 0; + } + + /* If not space for W (and possible carry), increase space. */ + if (mpi_resize(w, u->nlimbs + 1)) + return -ENOMEM; + + if (u->sign) { + mpi_limb_t cy; + + cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + w->d[u->nlimbs] = cy; + w->nlimbs = u->nlimbs + cy; + w->sign = 1; + } else { + /* The signs are different. Need exact comparison to determine + * which operand to subtract from which. + */ + if (u->nlimbs == 1 && u->d[0] < vval) { + w->d[0] = vval - u->d[0]; + w->nlimbs = 1; + w->sign = 1; + } else { + mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); + /* Size can decrease with at most one limb. */ + w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0)); + w->sign = 0; + } + } + + mpi_normalize(w); + return 0; +} +EXPORT_SYMBOL_GPL(mpi_sub_ui); -- cgit v1.2.3 From 4963bb2b89884bbdb7e33e6a09c159551e9627aa Mon Sep 17 00:00:00 2001 From: Nick Terrell Date: Thu, 30 Jul 2020 12:08:35 -0700 Subject: lib: Add zstd support to decompress - Add unzstd() and the zstd decompress interface. - Add zstd support to decompress_method(). The decompress_method() and unzstd() functions are used to decompress the initramfs and the initrd. The __decompress() function is used in the preboot environment to decompress a zstd compressed kernel. The zstd decompression function allows the input and output buffers to overlap because that is used by x86 kernel decompression. Signed-off-by: Nick Terrell Signed-off-by: Ingo Molnar Tested-by: Sedat Dilek Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20200730190841.2071656-3-nickrterrell@gmail.com --- include/linux/decompress/unzstd.h | 11 ++ lib/Kconfig | 4 + lib/Makefile | 1 + lib/decompress.c | 5 + lib/decompress_unzstd.c | 345 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 366 insertions(+) create mode 100644 include/linux/decompress/unzstd.h create mode 100644 lib/decompress_unzstd.c (limited to 'include') diff --git a/include/linux/decompress/unzstd.h b/include/linux/decompress/unzstd.h new file mode 100644 index 000000000000..56d539ae880f --- /dev/null +++ b/include/linux/decompress/unzstd.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_DECOMPRESS_UNZSTD_H +#define LINUX_DECOMPRESS_UNZSTD_H + +int unzstd(unsigned char *inbuf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *output, + long *pos, + void (*error_fn)(char *x)); +#endif diff --git a/lib/Kconfig b/lib/Kconfig index df3f3da95990..a5d6f23c4cab 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -342,6 +342,10 @@ config DECOMPRESS_LZ4 select LZ4_DECOMPRESS tristate +config DECOMPRESS_ZSTD + select ZSTD_DECOMPRESS + tristate + # # Generic allocator support is selected if needed # diff --git a/lib/Makefile b/lib/Makefile index b1c42c10073b..2ba9642a3a87 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -170,6 +170,7 @@ lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o +lib-$(CONFIG_DECOMPRESS_ZSTD) += decompress_unzstd.o obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o diff --git a/lib/decompress.c b/lib/decompress.c index 857ab1af1ef3..ab3fc90ffc64 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,9 @@ #ifndef CONFIG_DECOMPRESS_LZ4 # define unlz4 NULL #endif +#ifndef CONFIG_DECOMPRESS_ZSTD +# define unzstd NULL +#endif struct compress_format { unsigned char magic[2]; @@ -52,6 +56,7 @@ static const struct compress_format compressed_formats[] __initconst = { { {0xfd, 0x37}, "xz", unxz }, { {0x89, 0x4c}, "lzo", unlzo }, { {0x02, 0x21}, "lz4", unlz4 }, + { {0x28, 0xb5}, "zstd", unzstd }, { {0, 0}, NULL, NULL } }; diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c new file mode 100644 index 000000000000..0ad2c15479ed --- /dev/null +++ b/lib/decompress_unzstd.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Important notes about in-place decompression + * + * At least on x86, the kernel is decompressed in place: the compressed data + * is placed to the end of the output buffer, and the decompressor overwrites + * most of the compressed data. There must be enough safety margin to + * guarantee that the write position is always behind the read position. + * + * The safety margin for ZSTD with a 128 KB block size is calculated below. + * Note that the margin with ZSTD is bigger than with GZIP or XZ! + * + * The worst case for in-place decompression is that the beginning of + * the file is compressed extremely well, and the rest of the file is + * uncompressible. Thus, we must look for worst-case expansion when the + * compressor is encoding uncompressible data. + * + * The structure of the .zst file in case of a compresed kernel is as follows. + * Maximum sizes (as bytes) of the fields are in parenthesis. + * + * Frame Header: (18) + * Blocks: (N) + * Checksum: (4) + * + * The frame header and checksum overhead is at most 22 bytes. + * + * ZSTD stores the data in blocks. Each block has a header whose size is + * a 3 bytes. After the block header, there is up to 128 KB of payload. + * The maximum uncompressed size of the payload is 128 KB. The minimum + * uncompressed size of the payload is never less than the payload size + * (excluding the block header). + * + * The assumption, that the uncompressed size of the payload is never + * smaller than the payload itself, is valid only when talking about + * the payload as a whole. It is possible that the payload has parts where + * the decompressor consumes more input than it produces output. Calculating + * the worst case for this would be tricky. Instead of trying to do that, + * let's simply make sure that the decompressor never overwrites any bytes + * of the payload which it is currently reading. + * + * Now we have enough information to calculate the safety margin. We need + * - 22 bytes for the .zst file format headers; + * - 3 bytes per every 128 KiB of uncompressed size (one block header per + * block); and + * - 128 KiB (biggest possible zstd block size) to make sure that the + * decompressor never overwrites anything from the block it is currently + * reading. + * + * We get the following formula: + * + * safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072 + * <= 22 + (uncompressed_size >> 15) + 131072 + */ + +/* + * Preboot environments #include "path/to/decompress_unzstd.c". + * All of the source files we depend on must be #included. + * zstd's only source dependeny is xxhash, which has no source + * dependencies. + * + * When UNZSTD_PREBOOT is defined we declare __decompress(), which is + * used for kernel decompression, instead of unzstd(). + * + * Define __DISABLE_EXPORTS in preboot environments to prevent symbols + * from xxhash and zstd from being exported by the EXPORT_SYMBOL macro. + */ +#ifdef STATIC +# define UNZSTD_PREBOOT +# include "xxhash.c" +# include "zstd/entropy_common.c" +# include "zstd/fse_decompress.c" +# include "zstd/huf_decompress.c" +# include "zstd/zstd_common.c" +# include "zstd/decompress.c" +#endif + +#include +#include +#include + +/* 128MB is the maximum window size supported by zstd. */ +#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX) +/* + * Size of the input and output buffers in multi-call mode. + * Pick a larger size because it isn't used during kernel decompression, + * since that is single pass, and we have to allocate a large buffer for + * zstd's window anyway. The larger size speeds up initramfs decompression. + */ +#define ZSTD_IOBUF_SIZE (1 << 17) + +static int INIT handle_zstd_error(size_t ret, void (*error)(char *x)) +{ + const int err = ZSTD_getErrorCode(ret); + + if (!ZSTD_isError(ret)) + return 0; + + switch (err) { + case ZSTD_error_memory_allocation: + error("ZSTD decompressor ran out of memory"); + break; + case ZSTD_error_prefix_unknown: + error("Input is not in the ZSTD format (wrong magic bytes)"); + break; + case ZSTD_error_dstSize_tooSmall: + case ZSTD_error_corruption_detected: + case ZSTD_error_checksum_wrong: + error("ZSTD-compressed data is corrupt"); + break; + default: + error("ZSTD-compressed data is probably corrupt"); + break; + } + return -1; +} + +/* + * Handle the case where we have the entire input and output in one segment. + * We can allocate less memory (no circular buffer for the sliding window), + * and avoid some memcpy() calls. + */ +static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf, + long out_len, long *in_pos, + void (*error)(char *x)) +{ + const size_t wksp_size = ZSTD_DCtxWorkspaceBound(); + void *wksp = large_malloc(wksp_size); + ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size); + int err; + size_t ret; + + if (dctx == NULL) { + error("Out of memory while allocating ZSTD_DCtx"); + err = -1; + goto out; + } + /* + * Find out how large the frame actually is, there may be junk at + * the end of the frame that ZSTD_decompressDCtx() can't handle. + */ + ret = ZSTD_findFrameCompressedSize(in_buf, in_len); + err = handle_zstd_error(ret, error); + if (err) + goto out; + in_len = (long)ret; + + ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len); + err = handle_zstd_error(ret, error); + if (err) + goto out; + + if (in_pos != NULL) + *in_pos = in_len; + + err = 0; +out: + if (wksp != NULL) + large_free(wksp); + return err; +} + +static int INIT __unzstd(unsigned char *in_buf, long in_len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long out_len, + long *in_pos, + void (*error)(char *x)) +{ + ZSTD_inBuffer in; + ZSTD_outBuffer out; + ZSTD_frameParams params; + void *in_allocated = NULL; + void *out_allocated = NULL; + void *wksp = NULL; + size_t wksp_size; + ZSTD_DStream *dstream; + int err; + size_t ret; + + if (out_len == 0) + out_len = LONG_MAX; /* no limit */ + + if (fill == NULL && flush == NULL) + /* + * We can decompress faster and with less memory when we have a + * single chunk. + */ + return decompress_single(in_buf, in_len, out_buf, out_len, + in_pos, error); + + /* + * If in_buf is not provided, we must be using fill(), so allocate + * a large enough buffer. If it is provided, it must be at least + * ZSTD_IOBUF_SIZE large. + */ + if (in_buf == NULL) { + in_allocated = large_malloc(ZSTD_IOBUF_SIZE); + if (in_allocated == NULL) { + error("Out of memory while allocating input buffer"); + err = -1; + goto out; + } + in_buf = in_allocated; + in_len = 0; + } + /* Read the first chunk, since we need to decode the frame header. */ + if (fill != NULL) + in_len = fill(in_buf, ZSTD_IOBUF_SIZE); + if (in_len < 0) { + error("ZSTD-compressed data is truncated"); + err = -1; + goto out; + } + /* Set the first non-empty input buffer. */ + in.src = in_buf; + in.pos = 0; + in.size = in_len; + /* Allocate the output buffer if we are using flush(). */ + if (flush != NULL) { + out_allocated = large_malloc(ZSTD_IOBUF_SIZE); + if (out_allocated == NULL) { + error("Out of memory while allocating output buffer"); + err = -1; + goto out; + } + out_buf = out_allocated; + out_len = ZSTD_IOBUF_SIZE; + } + /* Set the output buffer. */ + out.dst = out_buf; + out.pos = 0; + out.size = out_len; + + /* + * We need to know the window size to allocate the ZSTD_DStream. + * Since we are streaming, we need to allocate a buffer for the sliding + * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX + * (8 MB), so it is important to use the actual value so as not to + * waste memory when it is smaller. + */ + ret = ZSTD_getFrameParams(¶ms, in.src, in.size); + err = handle_zstd_error(ret, error); + if (err) + goto out; + if (ret != 0) { + error("ZSTD-compressed data has an incomplete frame header"); + err = -1; + goto out; + } + if (params.windowSize > ZSTD_WINDOWSIZE_MAX) { + error("ZSTD-compressed data has too large a window size"); + err = -1; + goto out; + } + + /* + * Allocate the ZSTD_DStream now that we know how much memory is + * required. + */ + wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize); + wksp = large_malloc(wksp_size); + dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size); + if (dstream == NULL) { + error("Out of memory while allocating ZSTD_DStream"); + err = -1; + goto out; + } + + /* + * Decompression loop: + * Read more data if necessary (error if no more data can be read). + * Call the decompression function, which returns 0 when finished. + * Flush any data produced if using flush(). + */ + if (in_pos != NULL) + *in_pos = 0; + do { + /* + * If we need to reload data, either we have fill() and can + * try to get more data, or we don't and the input is truncated. + */ + if (in.pos == in.size) { + if (in_pos != NULL) + *in_pos += in.pos; + in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1; + if (in_len < 0) { + error("ZSTD-compressed data is truncated"); + err = -1; + goto out; + } + in.pos = 0; + in.size = in_len; + } + /* Returns zero when the frame is complete. */ + ret = ZSTD_decompressStream(dstream, &out, &in); + err = handle_zstd_error(ret, error); + if (err) + goto out; + /* Flush all of the data produced if using flush(). */ + if (flush != NULL && out.pos > 0) { + if (out.pos != flush(out.dst, out.pos)) { + error("Failed to flush()"); + err = -1; + goto out; + } + out.pos = 0; + } + } while (ret != 0); + + if (in_pos != NULL) + *in_pos += in.pos; + + err = 0; +out: + if (in_allocated != NULL) + large_free(in_allocated); + if (out_allocated != NULL) + large_free(out_allocated); + if (wksp != NULL) + large_free(wksp); + return err; +} + +#ifndef UNZSTD_PREBOOT +STATIC int INIT unzstd(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, + long *pos, + void (*error)(char *x)) +{ + return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error); +} +#else +STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long out_len, + long *pos, + void (*error)(char *x)) +{ + return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error); +} +#endif -- cgit v1.2.3 From 0584df9c12f449124d0bfef9899e5365604ee7a9 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 29 Jul 2020 13:09:15 +0200 Subject: lockdep: Refactor IRQ trace events fields into struct Refactor the IRQ trace events fields, used for printing information about the IRQ trace events, into a separate struct 'irqtrace_events'. This improves readability by separating the information only used in reporting, as well as enables (simplified) storing/restoring of irqtrace_events snapshots. No functional change intended. Signed-off-by: Marco Elver Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200729110916.3920464-1-elver@google.com Signed-off-by: Ingo Molnar --- include/linux/irqflags.h | 13 +++++++++++ include/linux/sched.h | 11 ++------- kernel/fork.c | 16 +++++-------- kernel/locking/lockdep.c | 58 +++++++++++++++++++++++++----------------------- 4 files changed, 50 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 5811ee8a5cd8..bd5c55755447 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -33,6 +33,19 @@ #ifdef CONFIG_TRACE_IRQFLAGS +/* Per-task IRQ trace events information. */ +struct irqtrace_events { + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; +}; + DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); diff --git a/include/linux/sched.h b/include/linux/sched.h index 8d1de021b315..52e0fdd6a555 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -980,17 +981,9 @@ struct task_struct { #endif #ifdef CONFIG_TRACE_IRQFLAGS - unsigned int irq_events; + struct irqtrace_events irqtrace; unsigned int hardirq_threaded; - unsigned long hardirq_enable_ip; - unsigned long hardirq_disable_ip; - unsigned int hardirq_enable_event; - unsigned int hardirq_disable_event; u64 hardirq_chain_key; - unsigned long softirq_disable_ip; - unsigned long softirq_enable_ip; - unsigned int softirq_disable_event; - unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; int irq_config; diff --git a/kernel/fork.c b/kernel/fork.c index 70d9d0a4de2a..56a640799680 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2035,17 +2035,11 @@ static __latent_entropy struct task_struct *copy_process( seqcount_init(&p->mems_allowed_seq); #endif #ifdef CONFIG_TRACE_IRQFLAGS - p->irq_events = 0; - p->hardirq_enable_ip = 0; - p->hardirq_enable_event = 0; - p->hardirq_disable_ip = _THIS_IP_; - p->hardirq_disable_event = 0; - p->softirqs_enabled = 1; - p->softirq_enable_ip = _THIS_IP_; - p->softirq_enable_event = 0; - p->softirq_disable_ip = 0; - p->softirq_disable_event = 0; - p->softirq_context = 0; + memset(&p->irqtrace, 0, sizeof(p->irqtrace)); + p->irqtrace.hardirq_disable_ip = _THIS_IP_; + p->irqtrace.softirq_enable_ip = _THIS_IP_; + p->softirqs_enabled = 1; + p->softirq_context = 0; #endif p->pagefault_disabled = 0; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index c9ea05edce25..7b5800374c40 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3484,19 +3484,21 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, void print_irqtrace_events(struct task_struct *curr) { - printk("irq event stamp: %u\n", curr->irq_events); + const struct irqtrace_events *trace = &curr->irqtrace; + + printk("irq event stamp: %u\n", trace->irq_events); printk("hardirqs last enabled at (%u): [<%px>] %pS\n", - curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip, - (void *)curr->hardirq_enable_ip); + trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip, + (void *)trace->hardirq_enable_ip); printk("hardirqs last disabled at (%u): [<%px>] %pS\n", - curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip, - (void *)curr->hardirq_disable_ip); + trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip, + (void *)trace->hardirq_disable_ip); printk("softirqs last enabled at (%u): [<%px>] %pS\n", - curr->softirq_enable_event, (void *)curr->softirq_enable_ip, - (void *)curr->softirq_enable_ip); + trace->softirq_enable_event, (void *)trace->softirq_enable_ip, + (void *)trace->softirq_enable_ip); printk("softirqs last disabled at (%u): [<%px>] %pS\n", - curr->softirq_disable_event, (void *)curr->softirq_disable_ip, - (void *)curr->softirq_disable_ip); + trace->softirq_disable_event, (void *)trace->softirq_disable_ip, + (void *)trace->softirq_disable_ip); } static int HARDIRQ_verbose(struct lock_class *class) @@ -3699,7 +3701,7 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare); void noinstr lockdep_hardirqs_on(unsigned long ip) { - struct task_struct *curr = current; + struct irqtrace_events *trace = ¤t->irqtrace; if (unlikely(!debug_locks)) return; @@ -3752,8 +3754,8 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) skip_checks: /* we'll do an OFF -> ON transition: */ this_cpu_write(hardirqs_enabled, 1); - curr->hardirq_enable_ip = ip; - curr->hardirq_enable_event = ++curr->irq_events; + trace->hardirq_enable_ip = ip; + trace->hardirq_enable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_on_events); } EXPORT_SYMBOL_GPL(lockdep_hardirqs_on); @@ -3763,8 +3765,6 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on); */ void noinstr lockdep_hardirqs_off(unsigned long ip) { - struct task_struct *curr = current; - if (unlikely(!debug_locks)) return; @@ -3784,12 +3784,14 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) return; if (lockdep_hardirqs_enabled()) { + struct irqtrace_events *trace = ¤t->irqtrace; + /* * We have done an ON -> OFF transition: */ this_cpu_write(hardirqs_enabled, 0); - curr->hardirq_disable_ip = ip; - curr->hardirq_disable_event = ++curr->irq_events; + trace->hardirq_disable_ip = ip; + trace->hardirq_disable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_off_events); } else { debug_atomic_inc(redundant_hardirqs_off); @@ -3802,7 +3804,7 @@ EXPORT_SYMBOL_GPL(lockdep_hardirqs_off); */ void lockdep_softirqs_on(unsigned long ip) { - struct task_struct *curr = current; + struct irqtrace_events *trace = ¤t->irqtrace; if (unlikely(!debug_locks || current->lockdep_recursion)) return; @@ -3814,7 +3816,7 @@ void lockdep_softirqs_on(unsigned long ip) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; - if (curr->softirqs_enabled) { + if (current->softirqs_enabled) { debug_atomic_inc(redundant_softirqs_on); return; } @@ -3823,9 +3825,9 @@ void lockdep_softirqs_on(unsigned long ip) /* * We'll do an OFF -> ON transition: */ - curr->softirqs_enabled = 1; - curr->softirq_enable_ip = ip; - curr->softirq_enable_event = ++curr->irq_events; + current->softirqs_enabled = 1; + trace->softirq_enable_ip = ip; + trace->softirq_enable_event = ++trace->irq_events; debug_atomic_inc(softirqs_on_events); /* * We are going to turn softirqs on, so set the @@ -3833,7 +3835,7 @@ void lockdep_softirqs_on(unsigned long ip) * enabled too: */ if (lockdep_hardirqs_enabled()) - mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); + mark_held_locks(current, LOCK_ENABLED_SOFTIRQ); lockdep_recursion_finish(); } @@ -3842,8 +3844,6 @@ void lockdep_softirqs_on(unsigned long ip) */ void lockdep_softirqs_off(unsigned long ip) { - struct task_struct *curr = current; - if (unlikely(!debug_locks || current->lockdep_recursion)) return; @@ -3853,13 +3853,15 @@ void lockdep_softirqs_off(unsigned long ip) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; - if (curr->softirqs_enabled) { + if (current->softirqs_enabled) { + struct irqtrace_events *trace = ¤t->irqtrace; + /* * We have done an ON -> OFF transition: */ - curr->softirqs_enabled = 0; - curr->softirq_disable_ip = ip; - curr->softirq_disable_event = ++curr->irq_events; + current->softirqs_enabled = 0; + trace->softirq_disable_ip = ip; + trace->softirq_disable_event = ++trace->irq_events; debug_atomic_inc(softirqs_off_events); /* * Whoops, we wanted softirqs off, so why aren't they? -- cgit v1.2.3 From 92c209ac6d3d35783c16c8a717547183e6e11162 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 29 Jul 2020 13:09:16 +0200 Subject: kcsan: Improve IRQ state trace reporting To improve the general usefulness of the IRQ state trace events with KCSAN enabled, save and restore the trace information when entering and exiting the KCSAN runtime as well as when generating a KCSAN report. Without this, reporting the IRQ trace events (whether via a KCSAN report or outside of KCSAN via a lockdep report) is rather useless due to continuously being touched by KCSAN. This is because if KCSAN is enabled, every instrumented memory access causes changes to IRQ trace events (either by KCSAN disabling/enabling interrupts or taking report_lock when generating a report). Before "lockdep: Prepare for NMI IRQ state tracking", KCSAN avoided touching the IRQ trace events via raw_local_irq_save/restore() and lockdep_off/on(). Fixes: 248591f5d257 ("kcsan: Make KCSAN compatible with new IRQ state tracking") Signed-off-by: Marco Elver Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200729110916.3920464-2-elver@google.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 4 ++++ kernel/kcsan/core.c | 23 +++++++++++++++++++++++ kernel/kcsan/kcsan.h | 7 +++++++ kernel/kcsan/report.c | 3 +++ 4 files changed, 37 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 52e0fdd6a555..060e9214c8b5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1184,8 +1184,12 @@ struct task_struct { #ifdef CONFIG_KASAN unsigned int kasan_depth; #endif + #ifdef CONFIG_KCSAN struct kcsan_ctx kcsan_ctx; +#ifdef CONFIG_TRACE_IRQFLAGS + struct irqtrace_events kcsan_save_irqtrace; +#endif #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 732623c30359..0fe068192781 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -291,6 +291,20 @@ static inline unsigned int get_delay(void) 0); } +void kcsan_save_irqtrace(struct task_struct *task) +{ +#ifdef CONFIG_TRACE_IRQFLAGS + task->kcsan_save_irqtrace = task->irqtrace; +#endif +} + +void kcsan_restore_irqtrace(struct task_struct *task) +{ +#ifdef CONFIG_TRACE_IRQFLAGS + task->irqtrace = task->kcsan_save_irqtrace; +#endif +} + /* * Pull everything together: check_access() below contains the performance * critical operations; the fast-path (including check_access) functions should @@ -336,9 +350,11 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, flags = user_access_save(); if (consumed) { + kcsan_save_irqtrace(current); kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE, KCSAN_REPORT_CONSUMED_WATCHPOINT, watchpoint - watchpoints); + kcsan_restore_irqtrace(current); } else { /* * The other thread may not print any diagnostics, as it has @@ -396,6 +412,12 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) goto out; } + /* + * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's + * runtime is entered for every memory access, and potentially useful + * information is lost if dirtied by KCSAN. + */ + kcsan_save_irqtrace(current); if (!kcsan_interrupt_watcher) local_irq_save(irq_flags); @@ -539,6 +561,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) out_unlock: if (!kcsan_interrupt_watcher) local_irq_restore(irq_flags); + kcsan_restore_irqtrace(current); out: user_access_restore(ua_flags); } diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 763d6d08d94b..29480010dc30 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -9,6 +9,7 @@ #define _KERNEL_KCSAN_KCSAN_H #include +#include /* The number of adjacent watchpoints to check. */ #define KCSAN_CHECK_ADJACENT 1 @@ -22,6 +23,12 @@ extern unsigned int kcsan_udelay_interrupt; */ extern bool kcsan_enabled; +/* + * Save/restore IRQ flags state trace dirtied by KCSAN. + */ +void kcsan_save_irqtrace(struct task_struct *task); +void kcsan_restore_irqtrace(struct task_struct *task); + /* * Initialize debugfs file. */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 6b2fb1a6d8cd..9d07e175de0f 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -308,6 +308,9 @@ static void print_verbose_info(struct task_struct *task) if (!task) return; + /* Restore IRQ state trace for printing. */ + kcsan_restore_irqtrace(task); + pr_err("\n"); debug_show_held_locks(task); print_irqtrace_events(task); -- cgit v1.2.3 From 46cbd0b05799e8234b719d18f3a4b27679c4c92e Mon Sep 17 00:00:00 2001 From: Crag Wang Date: Thu, 30 Jul 2020 11:26:09 +0800 Subject: power: supply: wilco_ec: Add long life charging mode This is a long life mode set in the factory for extended warranty battery, the power charging rate is customized so that battery at work last longer. Presently switching to a different battery charging mode is through EC PID 0x0710 to configure the battery firmware, this operation will be blocked by EC with failure code 0x01 when PLL mode is already in use. Signed-off-by: Crag Wang Reviewed-by: Mario Limonciello Signed-off-by: Sebastian Reichel --- Documentation/ABI/testing/sysfs-class-power-wilco | 4 ++++ drivers/power/supply/power_supply_sysfs.c | 1 + drivers/power/supply/wilco-charger.c | 5 +++++ include/linux/power_supply.h | 1 + 4 files changed, 11 insertions(+) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-class-power-wilco b/Documentation/ABI/testing/sysfs-class-power-wilco index da1d6ffe5e3c..84fde1d0ada0 100644 --- a/Documentation/ABI/testing/sysfs-class-power-wilco +++ b/Documentation/ABI/testing/sysfs-class-power-wilco @@ -14,6 +14,10 @@ Description: Charging begins when level drops below charge_control_start_threshold, and ceases when level is above charge_control_end_threshold. + Long Life: Customized charge rate for last longer battery life. + On Wilco device this mode is pre-configured in the factory + through EC's private PID. Swiching to a different mode will + be denied by Wilco EC when Long Life mode is enabled. What: /sys/class/power_supply/wilco-charger/charge_control_start_threshold Date: April 2019 diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index b903cb4dca2b..3d383086018c 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -87,6 +87,7 @@ static const char * const POWER_SUPPLY_CHARGE_TYPE_TEXT[] = { [POWER_SUPPLY_CHARGE_TYPE_STANDARD] = "Standard", [POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE] = "Adaptive", [POWER_SUPPLY_CHARGE_TYPE_CUSTOM] = "Custom", + [POWER_SUPPLY_CHARGE_TYPE_LONGLIFE] = "Long Life", }; static const char * const POWER_SUPPLY_HEALTH_TEXT[] = { diff --git a/drivers/power/supply/wilco-charger.c b/drivers/power/supply/wilco-charger.c index b3c6d7cdd731..98ade073ef05 100644 --- a/drivers/power/supply/wilco-charger.c +++ b/drivers/power/supply/wilco-charger.c @@ -27,6 +27,7 @@ enum charge_mode { CHARGE_MODE_AC = 3, /* Mostly AC use, used for Trickle */ CHARGE_MODE_AUTO = 4, /* Used for Adaptive */ CHARGE_MODE_CUSTOM = 5, /* Used for Custom */ + CHARGE_MODE_LONGLIFE = 6, /* Used for Long Life */ }; #define CHARGE_LOWER_LIMIT_MIN 50 @@ -48,6 +49,8 @@ static int psp_val_to_charge_mode(int psp_val) return CHARGE_MODE_AUTO; case POWER_SUPPLY_CHARGE_TYPE_CUSTOM: return CHARGE_MODE_CUSTOM; + case POWER_SUPPLY_CHARGE_TYPE_LONGLIFE: + return CHARGE_MODE_LONGLIFE; default: return -EINVAL; } @@ -67,6 +70,8 @@ static int charge_mode_to_psp_val(enum charge_mode mode) return POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE; case CHARGE_MODE_CUSTOM: return POWER_SUPPLY_CHARGE_TYPE_CUSTOM; + case CHARGE_MODE_LONGLIFE: + return POWER_SUPPLY_CHARGE_TYPE_LONGLIFE; default: return -EINVAL; } diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index b5ee35d3c304..97cc4b85bf61 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -48,6 +48,7 @@ enum { POWER_SUPPLY_CHARGE_TYPE_STANDARD, /* normal speed */ POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */ POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */ + POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */ }; enum { -- cgit v1.2.3 From be1213a341a289afc51f89181c310e368fba0b66 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 21 Jul 2020 09:58:13 +0200 Subject: drm/ttm: remove TTM_MEMTYPE_FLAG_FIXED v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead use a boolean field in the memory manager structure. Also invert the meaning of the field since the use of a TT structure is the special case here. v2: cleanup zero init. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Reviewed-by: Thomas Zimmermann Reviewed-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/382079/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 +--- drivers/gpu/drm/drm_gem_vram_helper.c | 1 - drivers/gpu/drm/nouveau/nouveau_bo.c | 4 +--- drivers/gpu/drm/qxl/qxl_ttm.c | 1 - drivers/gpu/drm/radeon/radeon_ttm.c | 3 +-- drivers/gpu/drm/ttm/ttm_bo.c | 19 ++++++++++--------- drivers/gpu/drm/ttm/ttm_bo_util.c | 12 ++++++------ drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 3 ++- include/drm/ttm/ttm_bo_driver.h | 4 +--- 9 files changed, 22 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e57c49a91b73..406bcb03df48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -87,15 +87,14 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_TT: /* GTT memory */ + man->use_tt = true; man->func = &amdgpu_gtt_mgr_func; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - man->flags = 0; break; case TTM_PL_VRAM: /* "On-card" video ram */ man->func = &amdgpu_vram_mgr_func; - man->flags = TTM_MEMTYPE_FLAG_FIXED; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; break; @@ -104,7 +103,6 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, case AMDGPU_PL_OA: /* On-chip GDS memory*/ man->func = &ttm_bo_manager_func; - man->flags = TTM_MEMTYPE_FLAG_FIXED; man->available_caching = TTM_PL_FLAG_UNCACHED; man->default_caching = TTM_PL_FLAG_UNCACHED; break; diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index be177afdeb9a..801a14c6e9e0 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1012,7 +1012,6 @@ static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: man->func = &ttm_bo_manager_func; - man->flags = TTM_MEMTYPE_FLAG_FIXED; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 53af25020bb2..a3ad66ad3817 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -657,7 +657,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, case TTM_PL_SYSTEM: break; case TTM_PL_VRAM: - man->flags = TTM_MEMTYPE_FLAG_FIXED; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; @@ -685,13 +684,12 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, else man->func = &ttm_bo_manager_func; + man->use_tt = true; if (drm->agp.bridge) { - man->flags = 0; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; } else { - man->flags = 0; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index e9b8c921c1f0..abb9fa4d80cf 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -59,7 +59,6 @@ static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, case TTM_PL_PRIV: /* "On-card" video ram */ man->func = &ttm_bo_manager_func; - man->flags = TTM_MEMTYPE_FLAG_FIXED; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; break; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index b4cb75361577..9aba18a143e7 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -81,7 +81,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->func = &ttm_bo_manager_func; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - man->flags = 0; + man->use_tt = true; #if IS_ENABLED(CONFIG_AGP) if (rdev->flags & RADEON_IS_AGP) { if (!rdev->ddev->agp) { @@ -98,7 +98,6 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, case TTM_PL_VRAM: /* "On-card" video ram */ man->func = &ttm_bo_manager_func; - man->flags = TTM_MEMTYPE_FLAG_FIXED; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; break; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index b5608a0087a9..9d316f33e6a6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -84,7 +84,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p drm_printf(p, " has_type: %d\n", man->has_type); drm_printf(p, " use_type: %d\n", man->use_type); - drm_printf(p, " flags: 0x%08X\n", man->flags); + drm_printf(p, " use_tt: %d\n", man->use_tt); drm_printf(p, " size: %llu\n", man->size); drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); @@ -159,7 +159,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, man = &bdev->man[mem->mem_type]; list_add_tail(&bo->lru, &man->lru[bo->priority]); - if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm && + if (man->use_tt && bo->ttm && !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); @@ -286,10 +286,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, * Create and bind a ttm if required. */ - if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { - bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); - - ret = ttm_tt_create(bo, zero); + if (new_man->use_tt) { + /* Zero init the new TTM structure if the old location should + * have used one as well. + */ + ret = ttm_tt_create(bo, old_man->use_tt); if (ret) goto out_err; @@ -314,8 +315,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (bdev->driver->move_notify) bdev->driver->move_notify(bo, evict, mem); - if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && - !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) + if (old_man->use_tt && new_man->use_tt) ret = ttm_bo_move_ttm(bo, ctx, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, ctx, mem); @@ -340,7 +340,7 @@ moved: out_err: new_man = &bdev->man[bo->mem.mem_type]; - if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) { + if (!new_man->use_tt) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } @@ -1673,6 +1673,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ + bdev->man[TTM_PL_SYSTEM].use_tt = true; bdev->man[TTM_PL_SYSTEM].available_caching = TTM_PL_MASK_CACHING; bdev->man[TTM_PL_SYSTEM].default_caching = TTM_PL_FLAG_CACHED; ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7fb3e0bcbab4..1f502be0b646 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -384,7 +384,7 @@ out2: *old_mem = *new_mem; new_mem->mm_node = NULL; - if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { + if (!man->use_tt) { ttm_tt_destroy(ttm); bo->ttm = NULL; } @@ -645,7 +645,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, if (ret) return ret; - if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { + if (!man->use_tt) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } @@ -674,7 +674,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * bo to be unbound and destroyed. */ - if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) + if (man->use_tt) ghost_obj->ttm = NULL; else bo->ttm = NULL; @@ -730,7 +730,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, * bo to be unbound and destroyed. */ - if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED)) + if (to->use_tt) ghost_obj->ttm = NULL; else bo->ttm = NULL; @@ -738,7 +738,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, dma_resv_unlock(&ghost_obj->base._resv); ttm_bo_put(ghost_obj); - } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { + } else if (!from->use_tt) { /** * BO doesn't have a TTM we need to bind/unbind. Just remember @@ -768,7 +768,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, if (ret) return ret; - if (to->flags & TTM_MEMTYPE_FLAG_FIXED) { + if (!to->use_tt) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index b2761a4b4992..d00748ecaf20 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -747,7 +747,6 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, case TTM_PL_VRAM: /* "On-card" video ram */ man->func = &vmw_thp_func; - man->flags = TTM_MEMTYPE_FLAG_FIXED; man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; break; @@ -761,6 +760,8 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->func = &vmw_gmrid_manager_func; man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; + /* TODO: This is most likely not correct */ + man->use_tt = true; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 9b251853afe2..adac4cd0ba23 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -45,8 +45,6 @@ #define TTM_MAX_BO_PRIORITY 4U -#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ - struct ttm_mem_type_manager; struct ttm_mem_type_manager_func { @@ -173,7 +171,7 @@ struct ttm_mem_type_manager { bool has_type; bool use_type; - uint32_t flags; + bool use_tt; uint64_t size; uint32_t available_caching; uint32_t default_caching; -- cgit v1.2.3 From 1a3fb590856a9d7e8392d970fc07791b6703de94 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 23 Jul 2020 17:13:47 +0200 Subject: drm/ttm: remove the init_mem_type callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is a very strange concept to call a function which just calls back the caller for the functions parameters. Signed-off-by: Christian König Reviewed-by: Thomas Zimmermann Reviewed-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/382085/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 ------- drivers/gpu/drm/drm_gem_vram_helper.c | 7 ------- drivers/gpu/drm/nouveau/nouveau_bo.c | 8 -------- drivers/gpu/drm/qxl/qxl_ttm.c | 7 ------- drivers/gpu/drm/radeon/radeon_ttm.c | 7 ------- drivers/gpu/drm/ttm/ttm_bo.c | 4 ---- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 7 ------- include/drm/ttm/ttm_bo_driver.h | 6 ------ 8 files changed, 53 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 98a77fc4a90c..da6434ea07f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -62,12 +62,6 @@ #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 -static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - return 0; -} - static int amdgpu_ttm_init_vram(struct amdgpu_device *adev) { @@ -1727,7 +1721,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, - .init_mem_type = &amdgpu_init_mem_type, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index f7f93a49cd7f..5f03c6137ef9 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1004,12 +1004,6 @@ err_ttm_tt_init: return NULL; } -static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - return 0; -} - static void bo_driver_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { @@ -1069,7 +1063,6 @@ static struct ttm_bo_driver bo_driver = { .ttm_tt_create = bo_driver_ttm_tt_create, .ttm_tt_populate = ttm_pool_populate, .ttm_tt_unpopulate = ttm_pool_unpopulate, - .init_mem_type = bo_driver_init_mem_type, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = bo_driver_evict_flags, .move_notify = bo_driver_move_notify, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 23ef9b1aaabc..5efc572c14cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -646,13 +646,6 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) return nouveau_sgdma_create_ttm(bo, page_flags); } -static int -nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - return 0; -} - static void nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) { @@ -1643,7 +1636,6 @@ struct ttm_bo_driver nouveau_bo_driver = { .ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_populate = &nouveau_ttm_tt_populate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, - .init_mem_type = nouveau_bo_init_mem_type, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = nouveau_bo_evict_flags, .move_notify = nouveau_bo_move_ntfy, diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 852089d7f783..32069e4799f3 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -48,12 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) return qdev; } -static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - return 0; -} - static void qxl_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { @@ -215,7 +209,6 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo, static struct ttm_bo_driver qxl_bo_driver = { .ttm_tt_create = &qxl_ttm_tt_create, - .init_mem_type = &qxl_init_mem_type, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &qxl_evict_flags, .move = &qxl_bo_move, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index b0b59c553785..f499d02917ac 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -66,12 +66,6 @@ struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) return rdev; } -static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - return 0; -} - static int radeon_ttm_init_vram(struct radeon_device *rdev) { struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[TTM_PL_VRAM]; @@ -753,7 +747,6 @@ static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, - .init_mem_type = &radeon_init_mem_type, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9d316f33e6a6..6c02a336a587 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1524,10 +1524,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, mutex_init(&man->io_reserve_mutex); spin_lock_init(&man->move_lock); INIT_LIST_HEAD(&man->io_reserve_lru); - - ret = bdev->driver->init_mem_type(bdev, type, man); - if (ret) - return ret; man->bdev = bdev; if (type != TTM_PL_SYSTEM) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index db4b2e2e4edb..0e2897895327 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -736,12 +736,6 @@ out_no_init: return NULL; } -static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - return 0; -} - static void vmw_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { @@ -817,7 +811,6 @@ struct ttm_bo_driver vmw_bo_driver = { .ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_populate = &vmw_ttm_populate, .ttm_tt_unpopulate = &vmw_ttm_unpopulate, - .init_mem_type = vmw_init_mem_type, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = vmw_evict_flags, .move = NULL, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index adac4cd0ba23..f76f1332fdc5 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -157,7 +157,6 @@ struct ttm_mem_type_manager_func { * @move: The fence of the last pipelined move operation. * * This structure is used to identify and manage memory types for a device. - * It's set up by the ttm_bo_driver::init_mem_type method. */ @@ -203,8 +202,6 @@ struct ttm_mem_type_manager { * struct ttm_bo_driver * * @create_ttm_backend_entry: Callback to create a struct ttm_backend. - * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager - * structure. * @evict_flags: Callback to obtain placement flags when a buffer is evicted. * @move: Callback for a driver to hook in accelerated functions to * move a buffer. @@ -247,9 +244,6 @@ struct ttm_bo_driver { */ void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); - int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man); - /** * struct ttm_bo_driver member eviction_valuable * -- cgit v1.2.3 From ffe8923f109b7ea92c0842c89e61300eefa11c94 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 24 Jul 2020 13:34:46 +0200 Subject: netfilter: nft_compat: make sure xtables destructors have run Pablo Neira found that after recent update of xt_IDLETIMER the iptables-nft tests sometimes show an error. He tracked this down to the delayed cleanup used by nf_tables core: del rule (transaction A) add rule (transaction B) Its possible that by time transaction B (both in same netns) runs, the xt target destructor has not been invoked yet. For native nft expressions this is no problem because all expressions that have such side effects make sure these are handled from the commit phase, rather than async cleanup. For nft_compat however this isn't true. Instead of forcing synchronous behaviour for nft_compat, keep track of the number of outstanding destructor calls. When we attempt to create a new expression, flush the cleanup worker to make sure destructors have completed. With lots of help from Pablo Neira. Reported-by: Pablo Neira Ayso Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 10 ++++++++-- net/netfilter/nft_compat.c | 36 ++++++++++++++++++++++++++++++++---- 3 files changed, 42 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 6f0f6fca9ac3..2571c09be8bb 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1498,4 +1498,6 @@ void nft_chain_filter_fini(void); void __init nft_chain_route_init(void); void nft_chain_route_fini(void); + +void nf_tables_trans_destroy_flush_work(void); #endif /* _NET_NF_TABLES_H */ diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 88325b264737..79e4db3cadec 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -7290,6 +7290,12 @@ static void nf_tables_trans_destroy_work(struct work_struct *w) } } +void nf_tables_trans_destroy_flush_work(void) +{ + flush_work(&trans_destroy_work); +} +EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work); + static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain) { struct nft_rule *rule; @@ -7472,9 +7478,9 @@ static void nf_tables_commit_release(struct net *net) spin_unlock(&nf_tables_destroy_list_lock); nf_tables_module_autoload_cleanup(net); - mutex_unlock(&net->nft.commit_mutex); - schedule_work(&trans_destroy_work); + + mutex_unlock(&net->nft.commit_mutex); } static int nf_tables_commit(struct net *net, struct sk_buff *skb) diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index aa1a066cb74b..6428856ccbec 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -27,6 +27,8 @@ struct nft_xt_match_priv { void *info; }; +static refcount_t nft_compat_pending_destroy = REFCOUNT_INIT(1); + static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx, const char *tablename) { @@ -236,6 +238,15 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); + /* xtables matches or targets can have side effects, e.g. + * creation/destruction of /proc files. + * The xt ->destroy functions are run asynchronously from + * work queue. If we have pending invocations we thus + * need to wait for those to finish. + */ + if (refcount_read(&nft_compat_pending_destroy) > 1) + nf_tables_trans_destroy_flush_work(); + ret = xt_check_target(&par, size, proto, inv); if (ret < 0) return ret; @@ -247,6 +258,13 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return 0; } +static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr) +{ + refcount_dec(&nft_compat_pending_destroy); + module_put(me); + kfree(expr->ops); +} + static void nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { @@ -262,8 +280,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) if (par.target->destroy != NULL) par.target->destroy(&par); - module_put(me); - kfree(expr->ops); + __nft_mt_tg_destroy(me, expr); } static int nft_extension_dump_info(struct sk_buff *skb, int attr, @@ -494,8 +511,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr, if (par.match->destroy != NULL) par.match->destroy(&par); - module_put(me); - kfree(expr->ops); + __nft_mt_tg_destroy(me, expr); } static void @@ -700,6 +716,14 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = { static struct nft_expr_type nft_match_type; +static void nft_mt_tg_deactivate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + enum nft_trans_phase phase) +{ + if (phase == NFT_TRANS_COMMIT) + refcount_inc(&nft_compat_pending_destroy); +} + static const struct nft_expr_ops * nft_match_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) @@ -738,6 +762,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, ops->type = &nft_match_type; ops->eval = nft_match_eval; ops->init = nft_match_init; + ops->deactivate = nft_mt_tg_deactivate, ops->destroy = nft_match_destroy; ops->dump = nft_match_dump; ops->validate = nft_match_validate; @@ -828,6 +853,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); ops->init = nft_target_init; ops->destroy = nft_target_destroy; + ops->deactivate = nft_mt_tg_deactivate, ops->dump = nft_target_dump; ops->validate = nft_target_validate; ops->data = target; @@ -891,6 +917,8 @@ static void __exit nft_compat_module_exit(void) nfnetlink_subsys_unregister(&nfnl_compat_subsys); nft_unregister_expr(&nft_target_type); nft_unregister_expr(&nft_match_type); + + WARN_ON_ONCE(refcount_read(&nft_compat_pending_destroy) != 1); } MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT); -- cgit v1.2.3 From 08ff7209faf21daa01bf66c91c321ce52d4b4bdb Mon Sep 17 00:00:00 2001 From: Cezary Rojewski Date: Fri, 31 Jul 2020 16:41:44 +0200 Subject: ASoC: core: Relocate and expose snd_soc_component_initialize To allow for two-step component registration, expose snd_soc_component_initialize function and move it back to soc-core.c. Signed-off-by: Cezary Rojewski Link: https://lore.kernel.org/r/20200731144146.6678-2-cezary.rojewski@intel.com Signed-off-by: Mark Brown --- include/sound/soc-component.h | 3 --- include/sound/soc.h | 3 +++ sound/soc/soc-component.c | 16 ---------------- sound/soc/soc-core.c | 17 +++++++++++++++++ 4 files changed, 20 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 8917b15eccae..089ea9441fd1 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -325,9 +325,6 @@ static inline int snd_soc_component_cache_sync( return regcache_sync(component->regmap); } -int snd_soc_component_initialize(struct snd_soc_component *component, - const struct snd_soc_component_driver *driver, - struct device *dev, const char *name); void snd_soc_component_set_aux(struct snd_soc_component *component, struct snd_soc_aux_dev *aux); int snd_soc_component_init(struct snd_soc_component *component); diff --git a/include/sound/soc.h b/include/sound/soc.h index acbb5efb28ef..77a304d36c61 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -414,6 +414,9 @@ static inline int snd_soc_resume(struct device *dev) } #endif int snd_soc_poweroff(struct device *dev); +int snd_soc_component_initialize(struct snd_soc_component *component, + const struct snd_soc_component_driver *driver, + struct device *dev, const char *name); int snd_soc_add_component(struct device *dev, struct snd_soc_component *component, const struct snd_soc_component_driver *component_driver, diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index dcc89fa8913a..f0b4f4bc44a4 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -33,22 +33,6 @@ static inline int _soc_component_ret(struct snd_soc_component *component, return ret; } -int snd_soc_component_initialize(struct snd_soc_component *component, - const struct snd_soc_component_driver *driver, - struct device *dev, const char *name) -{ - INIT_LIST_HEAD(&component->dai_list); - INIT_LIST_HEAD(&component->dobj_list); - INIT_LIST_HEAD(&component->card_list); - mutex_init(&component->io_mutex); - - component->name = name; - component->dev = dev; - component->driver = driver; - - return 0; -} - void snd_soc_component_set_aux(struct snd_soc_component *component, struct snd_soc_aux_dev *aux) { diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index defd96b14c28..36eba1bb1ce1 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2438,6 +2438,23 @@ static void snd_soc_del_component_unlocked(struct snd_soc_component *component) list_del(&component->list); } +int snd_soc_component_initialize(struct snd_soc_component *component, + const struct snd_soc_component_driver *driver, + struct device *dev, const char *name) +{ + INIT_LIST_HEAD(&component->dai_list); + INIT_LIST_HEAD(&component->dobj_list); + INIT_LIST_HEAD(&component->card_list); + mutex_init(&component->io_mutex); + + component->name = name; + component->dev = dev; + component->driver = driver; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_soc_component_initialize); + int snd_soc_add_component(struct device *dev, struct snd_soc_component *component, const struct snd_soc_component_driver *component_driver, -- cgit v1.2.3 From 7274d4cd8506bbff9bf2d7c2f73b2febff99abef Mon Sep 17 00:00:00 2001 From: Cezary Rojewski Date: Fri, 31 Jul 2020 16:41:45 +0200 Subject: ASoC: core: Simplify snd_soc_component_initialize declaration Move 'name' field initialization responsibility back to snd_soc_component_initialize to prepare snd_soc_add_component function for being called separatelly as a second registration step. Signed-off-by: Cezary Rojewski Link: https://lore.kernel.org/r/20200731144146.6678-3-cezary.rojewski@intel.com Signed-off-by: Mark Brown --- include/sound/soc.h | 2 +- sound/soc/soc-core.c | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/sound/soc.h b/include/sound/soc.h index 77a304d36c61..787374362f83 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -416,7 +416,7 @@ static inline int snd_soc_resume(struct device *dev) int snd_soc_poweroff(struct device *dev); int snd_soc_component_initialize(struct snd_soc_component *component, const struct snd_soc_component_driver *driver, - struct device *dev, const char *name); + struct device *dev); int snd_soc_add_component(struct device *dev, struct snd_soc_component *component, const struct snd_soc_component_driver *component_driver, diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 36eba1bb1ce1..d8155402c5e1 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2440,14 +2440,19 @@ static void snd_soc_del_component_unlocked(struct snd_soc_component *component) int snd_soc_component_initialize(struct snd_soc_component *component, const struct snd_soc_component_driver *driver, - struct device *dev, const char *name) + struct device *dev) { INIT_LIST_HEAD(&component->dai_list); INIT_LIST_HEAD(&component->dobj_list); INIT_LIST_HEAD(&component->card_list); mutex_init(&component->io_mutex); - component->name = name; + component->name = fmt_single_name(dev, &component->id); + if (!component->name) { + dev_err(dev, "ASoC: Failed to allocate name\n"); + return -ENOMEM; + } + component->dev = dev; component->driver = driver; @@ -2461,19 +2466,12 @@ int snd_soc_add_component(struct device *dev, struct snd_soc_dai_driver *dai_drv, int num_dai) { - const char *name = fmt_single_name(dev, &component->id); int ret; int i; - if (!name) { - dev_err(dev, "ASoC: Failed to allocate name\n"); - return -ENOMEM; - } - mutex_lock(&client_mutex); - ret = snd_soc_component_initialize(component, component_driver, - dev, name); + ret = snd_soc_component_initialize(component, component_driver, dev); if (ret) goto err_free; -- cgit v1.2.3 From ea029dd8d0124fcd5db1c7003e87a7bd4ddb3bad Mon Sep 17 00:00:00 2001 From: Cezary Rojewski Date: Fri, 31 Jul 2020 16:41:46 +0200 Subject: ASoC: core: Two step component registration Modify snd_soc_add_component so it calls snd_soc_component_initialize no longer and thus providing true two-step registration. Drivers may choose to change component's fields before actually adding it to ASoC subsystem. Signed-off-by: Cezary Rojewski Link: https://lore.kernel.org/r/20200731144146.6678-4-cezary.rojewski@intel.com Signed-off-by: Mark Brown --- include/sound/soc.h | 8 +++----- sound/soc/soc-core.c | 27 +++++++++++++-------------- sound/soc/soc-generic-dmaengine-pcm.c | 14 +++++++++----- sound/soc/stm/stm32_adfsdm.c | 9 +++++++-- 4 files changed, 32 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/sound/soc.h b/include/sound/soc.h index 787374362f83..5e3919ffb00c 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -417,11 +417,9 @@ int snd_soc_poweroff(struct device *dev); int snd_soc_component_initialize(struct snd_soc_component *component, const struct snd_soc_component_driver *driver, struct device *dev); -int snd_soc_add_component(struct device *dev, - struct snd_soc_component *component, - const struct snd_soc_component_driver *component_driver, - struct snd_soc_dai_driver *dai_drv, - int num_dai); +int snd_soc_add_component(struct snd_soc_component *component, + struct snd_soc_dai_driver *dai_drv, + int num_dai); int snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *component_driver, struct snd_soc_dai_driver *dai_drv, int num_dai); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index d8155402c5e1..fe23e936e2d1 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2460,22 +2460,16 @@ int snd_soc_component_initialize(struct snd_soc_component *component, } EXPORT_SYMBOL_GPL(snd_soc_component_initialize); -int snd_soc_add_component(struct device *dev, - struct snd_soc_component *component, - const struct snd_soc_component_driver *component_driver, - struct snd_soc_dai_driver *dai_drv, - int num_dai) +int snd_soc_add_component(struct snd_soc_component *component, + struct snd_soc_dai_driver *dai_drv, + int num_dai) { int ret; int i; mutex_lock(&client_mutex); - ret = snd_soc_component_initialize(component, component_driver, dev); - if (ret) - goto err_free; - - if (component_driver->endianness) { + if (component->driver->endianness) { for (i = 0; i < num_dai; i++) { convert_endianness_formats(&dai_drv[i].playback); convert_endianness_formats(&dai_drv[i].capture); @@ -2484,7 +2478,8 @@ int snd_soc_add_component(struct device *dev, ret = snd_soc_register_dais(component, dai_drv, num_dai); if (ret < 0) { - dev_err(dev, "ASoC: Failed to register DAIs: %d\n", ret); + dev_err(component->dev, "ASoC: Failed to register DAIs: %d\n", + ret); goto err_cleanup; } @@ -2502,7 +2497,7 @@ int snd_soc_add_component(struct device *dev, err_cleanup: if (ret < 0) snd_soc_del_component_unlocked(component); -err_free: + mutex_unlock(&client_mutex); if (ret == 0) @@ -2518,13 +2513,17 @@ int snd_soc_register_component(struct device *dev, int num_dai) { struct snd_soc_component *component; + int ret; component = devm_kzalloc(dev, sizeof(*component), GFP_KERNEL); if (!component) return -ENOMEM; - return snd_soc_add_component(dev, component, component_driver, - dai_drv, num_dai); + ret = snd_soc_component_initialize(component, component_driver, dev); + if (ret < 0) + return ret; + + return snd_soc_add_component(component, dai_drv, num_dai); } EXPORT_SYMBOL_GPL(snd_soc_register_component); diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index d17b4bf1dbe3..fb95c1464e66 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -424,6 +424,7 @@ static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm) int snd_dmaengine_pcm_register(struct device *dev, const struct snd_dmaengine_pcm_config *config, unsigned int flags) { + const struct snd_soc_component_driver *driver; struct dmaengine_pcm *pcm; int ret; @@ -442,12 +443,15 @@ int snd_dmaengine_pcm_register(struct device *dev, goto err_free_dma; if (config && config->process) - ret = snd_soc_add_component(dev, &pcm->component, - &dmaengine_pcm_component_process, - NULL, 0); + driver = &dmaengine_pcm_component_process; else - ret = snd_soc_add_component(dev, &pcm->component, - &dmaengine_pcm_component, NULL, 0); + driver = &dmaengine_pcm_component; + + ret = snd_soc_component_initialize(&pcm->component, driver, dev); + if (ret) + goto err_free_dma; + + ret = snd_soc_add_component(&pcm->component, NULL, 0); if (ret) goto err_free_dma; diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c index c1433c20b08b..ec27c13af04f 100644 --- a/sound/soc/stm/stm32_adfsdm.c +++ b/sound/soc/stm/stm32_adfsdm.c @@ -344,12 +344,17 @@ static int stm32_adfsdm_probe(struct platform_device *pdev) component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL); if (!component) return -ENOMEM; + + ret = snd_soc_component_initialize(component, + &stm32_adfsdm_soc_platform, + &pdev->dev); + if (ret < 0) + return ret; #ifdef CONFIG_DEBUG_FS component->debugfs_prefix = "pcm"; #endif - ret = snd_soc_add_component(&pdev->dev, component, - &stm32_adfsdm_soc_platform, NULL, 0); + ret = snd_soc_add_component(component, NULL, 0); if (ret < 0) dev_err(&pdev->dev, "%s: Failed to register PCM platform\n", __func__); -- cgit v1.2.3 From f8ace8d915b88bd1bbaac695de94650dbb25c7b4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:50 +0200 Subject: tcp: rename request_sock cookie_ts bit to syncookie Nowadays output function has a 'synack_type' argument that tells us when the syn/ack is emitted via syncookies. The request already tells us when timestamps are supported, so check both to detect special timestamp for tcp option encoding is needed. We could remove cookie_ts altogether, but a followup patch would otherwise need to adjust function signatures to pass 'want_cookie' to mptcp core. This way, the 'existing' bit can be used. Suggested-by: Eric Dumazet Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/crypto/chelsio/chtls/chtls_cm.c | 2 +- include/net/request_sock.h | 2 +- net/ipv4/tcp_input.c | 3 +-- net/ipv4/tcp_output.c | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index f924c335a195..05520dccd906 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -1348,7 +1348,7 @@ static void chtls_pass_accept_request(struct sock *sk, oreq->rsk_rcv_wnd = 0; oreq->rsk_window_clamp = 0; - oreq->cookie_ts = 0; + oreq->syncookie = 0; oreq->mss = 0; oreq->ts_recent = 0; diff --git a/include/net/request_sock.h b/include/net/request_sock.h index cf8b33213bbc..b2eb8b4ba697 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -54,7 +54,7 @@ struct request_sock { struct request_sock *dl_next; u16 mss; u8 num_retrans; /* number of retransmits */ - u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ + u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */ u8 num_timeout:7; /* number of timeouts */ u32 ts_recent; struct timer_list rsk_timer; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a018bafd7bdf..11a6f128e51c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6519,7 +6519,6 @@ static void tcp_openreq_init(struct request_sock *req, struct inet_request_sock *ireq = inet_rsk(req); req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */ - req->cookie_ts = 0; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tcp_rsk(req)->snt_synack = 0; @@ -6674,6 +6673,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (!req) goto drop; + req->syncookie = want_cookie; tcp_rsk(req)->af_specific = af_ops; tcp_rsk(req)->ts_off = 0; #if IS_ENABLED(CONFIG_MPTCP) @@ -6739,7 +6739,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (want_cookie) { isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); - req->cookie_ts = tmp_opt.tstamp_ok; if (!tmp_opt.tstamp_ok) inet_rsk(req)->ecn_ok = 0; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d8f16f6a9b02..85ff417bda7f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3393,7 +3393,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, memset(&opts, 0, sizeof(opts)); now = tcp_clock_ns(); #ifdef CONFIG_SYN_COOKIES - if (unlikely(req->cookie_ts)) + if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) skb->skb_mstamp_ns = cookie_init_timestamp(req, now); else #endif -- cgit v1.2.3 From 08b8d080982fec354173d3fd28a3106a719b8950 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:53 +0200 Subject: mptcp: rename and export mptcp_subflow_request_sock_ops syncookie code path needs to create an mptcp request sock. Prepare for this and add mptcp prefix plus needed export of ops struct. Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- include/net/mptcp.h | 1 + net/mptcp/subflow.c | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 02158c257bd4..76eb915bf91c 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -58,6 +58,7 @@ struct mptcp_out_options { }; #ifdef CONFIG_MPTCP +extern struct request_sock_ops mptcp_subflow_request_sock_ops; void mptcp_init(void); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 091e305a81c8..9b11d2b6ff4d 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -284,7 +284,8 @@ do_reset: tcp_done(sk); } -static struct request_sock_ops subflow_request_sock_ops; +struct request_sock_ops mptcp_subflow_request_sock_ops; +EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops); static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops; static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) @@ -297,7 +298,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop; - return tcp_conn_request(&subflow_request_sock_ops, + return tcp_conn_request(&mptcp_subflow_request_sock_ops, &subflow_request_sock_ipv4_ops, sk, skb); drop: @@ -322,7 +323,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (!ipv6_unicast_destination(skb)) goto drop; - return tcp_conn_request(&subflow_request_sock_ops, + return tcp_conn_request(&mptcp_subflow_request_sock_ops, &subflow_request_sock_ipv6_ops, sk, skb); drop: @@ -1311,8 +1312,8 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops) void __init mptcp_subflow_init(void) { - subflow_request_sock_ops = tcp_request_sock_ops; - if (subflow_ops_init(&subflow_request_sock_ops) != 0) + mptcp_subflow_request_sock_ops = tcp_request_sock_ops; + if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0) panic("MPTCP: failed to init subflow request sock ops\n"); subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; -- cgit v1.2.3 From c83a47e50d8fd3825a4758158e9edd5acdc74185 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:54 +0200 Subject: mptcp: subflow: add mptcp_subflow_init_cookie_req helper Will be used to initialize the mptcp request socket when a MP_CAPABLE request was handled in syncookie mode, i.e. when a TCP ACK containing a MP_CAPABLE option is a valid syncookie value. Normally (non-cookie case), MPTCP will generate a unique 32 bit connection ID and stores it in the MPTCP token storage to be able to retrieve the mptcp socket for subflow joining. In syncookie case, we do not want to store any state, so just generate the unique ID and use it in the reply. This means there is a small window where another connection could generate the same token. When Cookie ACK comes back, we check that the token has not been registered in the mean time. If it was, the connection needs to fall back to TCP. Changes in v2: - use req->syncookie instead of passing 'want_cookie' arg to ->init_req() (Eric Dumazet) Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- include/net/mptcp.h | 10 ++++++++++ net/mptcp/protocol.h | 1 + net/mptcp/subflow.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++++- net/mptcp/token.c | 26 ++++++++++++++++++++++++++ 4 files changed, 86 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 76eb915bf91c..3525d2822abe 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -131,6 +131,9 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to, } void mptcp_seq_show(struct seq_file *seq); +int mptcp_subflow_init_cookie_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb); #else static inline void mptcp_init(void) @@ -200,6 +203,13 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to, static inline void mptcp_space(const struct sock *ssk, int *s, int *fs) { } static inline void mptcp_seq_show(struct seq_file *seq) { } + +static inline int mptcp_subflow_init_cookie_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb) +{ + return 0; /* TCP fallback */ +} #endif /* CONFIG_MPTCP */ #if IS_ENABLED(CONFIG_MPTCP_IPV6) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index beb34b8a5363..d76d3b40d69e 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -400,6 +400,7 @@ void mptcp_token_destroy_request(struct request_sock *req); int mptcp_token_new_connect(struct sock *sk); void mptcp_token_accept(struct mptcp_subflow_request_sock *r, struct mptcp_sock *msk); +bool mptcp_token_exists(u32 token); struct mptcp_sock *mptcp_token_get_sock(u32 token); struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, long *s_num); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 9b11d2b6ff4d..3d346572d4c9 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -140,18 +140,31 @@ static void subflow_init_req(struct request_sock *req, if (mp_opt.mp_capable && listener->request_mptcp) { int err, retries = 4; + subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; again: do { get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key)); } while (subflow_req->local_key == 0); + if (unlikely(req->syncookie)) { + mptcp_crypto_key_sha(subflow_req->local_key, + &subflow_req->token, + &subflow_req->idsn); + if (mptcp_token_exists(subflow_req->token)) { + if (retries-- > 0) + goto again; + } else { + subflow_req->mp_capable = 1; + } + return; + } + err = mptcp_token_new_request(req); if (err == 0) subflow_req->mp_capable = 1; else if (retries-- > 0) goto again; - subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; } else if (mp_opt.mp_join && listener->request_mptcp) { subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; subflow_req->mp_join = 1; @@ -165,6 +178,41 @@ again: } } +int mptcp_subflow_init_cookie_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb) +{ + struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); + struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); + struct mptcp_options_received mp_opt; + int err; + + err = __subflow_init_req(req, sk_listener); + if (err) + return err; + + mptcp_get_options(skb, &mp_opt); + + if (mp_opt.mp_capable && mp_opt.mp_join) + return -EINVAL; + + if (mp_opt.mp_capable && listener->request_mptcp) { + if (mp_opt.sndr_key == 0) + return -EINVAL; + + subflow_req->local_key = mp_opt.rcvr_key; + err = mptcp_token_new_request(req); + if (err) + return err; + + subflow_req->mp_capable = 1; + subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); + static void subflow_v4_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) diff --git a/net/mptcp/token.c b/net/mptcp/token.c index f82410c54653..8b47c4bb1c6b 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -204,6 +204,32 @@ void mptcp_token_accept(struct mptcp_subflow_request_sock *req, spin_unlock_bh(&bucket->lock); } +bool mptcp_token_exists(u32 token) +{ + struct hlist_nulls_node *pos; + struct token_bucket *bucket; + struct mptcp_sock *msk; + struct sock *sk; + + rcu_read_lock(); + bucket = token_bucket(token); + +again: + sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) { + msk = mptcp_sk(sk); + if (READ_ONCE(msk->token) == token) + goto found; + } + if (get_nulls_value(pos) != (token & token_mask)) + goto again; + + rcu_read_unlock(); + return false; +found: + rcu_read_unlock(); + return true; +} + /** * mptcp_token_get_sock - retrieve mptcp connection sock using its token * @token: token of the mptcp connection to retrieve -- cgit v1.2.3 From 6fc8c827dd4fa615965c4eac9bbfd465f6eb8fb4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:55 +0200 Subject: tcp: syncookies: create mptcp request socket for ACK cookies with MPTCP option If SYN packet contains MP_CAPABLE option, keep it enabled. Syncokie validation and cookie-based socket creation is changed to instantiate an mptcp request sockets if the ACK contains an MPTCP connection request. Rather than extend both cookie_v4/6_check, add a common helper to create the (mp)tcp request socket. Suggested-by: Paolo Abeni Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ net/ipv4/syncookies.c | 38 ++++++++++++++++++++++++++++++++++---- net/ipv4/tcp_input.c | 3 --- net/ipv6/syncookies.c | 5 +---- 4 files changed, 37 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index e0c35d56091f..dbf5c791a6eb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -469,6 +469,8 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); +struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk, struct sk_buff *skb); #ifdef CONFIG_SYN_COOKIES /* Syncookies use a monotonic timer which increments every 60 seconds. diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 9a4f6b16c9bc..54838ee2e8d4 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -276,6 +276,39 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt, } EXPORT_SYMBOL(cookie_ecn_ok); +struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk, + struct sk_buff *skb) +{ + struct tcp_request_sock *treq; + struct request_sock *req; + +#ifdef CONFIG_MPTCP + if (sk_is_mptcp(sk)) + ops = &mptcp_subflow_request_sock_ops; +#endif + + req = inet_reqsk_alloc(ops, sk, false); + if (!req) + return NULL; + +#if IS_ENABLED(CONFIG_MPTCP) + treq = tcp_rsk(req); + treq->is_mptcp = sk_is_mptcp(sk); + if (treq->is_mptcp) { + int err = mptcp_subflow_init_cookie_req(req, sk, skb); + + if (err) { + reqsk_free(req); + return NULL; + } + } +#endif + + return req; +} +EXPORT_SYMBOL_GPL(cookie_tcp_reqsk_alloc); + /* On input, sk is a listener. * Output is listener if incoming packet would not create a child * NULL if memory could not be allocated. @@ -326,7 +359,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false); /* for safety */ + req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb); if (!req) goto out; @@ -350,9 +383,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) treq->snt_synack = 0; treq->tfo_listener = false; - if (IS_ENABLED(CONFIG_MPTCP)) - treq->is_mptcp = 0; - if (IS_ENABLED(CONFIG_SMC)) ireq->smc_ok = 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 11a6f128e51c..739da25b0c23 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6701,9 +6701,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, af_ops->init_req(req, sk, skb); - if (IS_ENABLED(CONFIG_MPTCP) && want_cookie) - tcp_rsk(req)->is_mptcp = 0; - if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 13235a012388..e796a64be308 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -170,7 +170,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false); + req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, sk, skb); if (!req) goto out; @@ -178,9 +178,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) treq = tcp_rsk(req); treq->tfo_listener = false; - if (IS_ENABLED(CONFIG_MPTCP)) - treq->is_mptcp = 0; - if (security_inet_conn_request(sk, skb, req)) goto out_free; -- cgit v1.2.3 From 48040793fa6003d211f021c6ad273477bcd90d91 Mon Sep 17 00:00:00 2001 From: Yousuk Seung Date: Thu, 30 Jul 2020 15:44:40 -0700 Subject: tcp: add earliest departure time to SCM_TIMESTAMPING_OPT_STATS This change adds TCP_NLA_EDT to SCM_TIMESTAMPING_OPT_STATS that reports the earliest departure time(EDT) of the timestamped skb. By tracking EDT values of the skb from different timestamps, we can observe when and how much the value changed. This allows to measure the precise delay injected on the sender host e.g. by a bpf-base throttler. Signed-off-by: Yousuk Seung Signed-off-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/tcp.h | 3 ++- include/uapi/linux/tcp.h | 1 + net/core/skbuff.c | 2 +- net/ipv4/tcp.c | 6 +++++- 4 files changed, 9 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 527d668a5275..14b62d7df942 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -484,7 +484,8 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) tp->saved_syn = NULL; } -struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); +struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, + const struct sk_buff *orig_skb); static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) { diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index f2acb2566333..cfcb10b75483 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -313,6 +313,7 @@ enum { TCP_NLA_SRTT, /* smoothed RTT in usecs */ TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */ TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */ + TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */ }; /* for TCP_MD5SIG socket option */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b8afefe6f6b6..4e2edfbe0e19 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4692,7 +4692,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && sk->sk_protocol == IPPROTO_TCP && sk->sk_type == SOCK_STREAM) { - skb = tcp_get_timestamping_opt_stats(sk); + skb = tcp_get_timestamping_opt_stats(sk, orig_skb); opt_stats = true; } else #endif diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4afec552f211..c06d2bfd2ec4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3501,10 +3501,12 @@ static size_t tcp_opt_stats_get_size(void) nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 0; } -struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) +struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, + const struct sk_buff *orig_skb) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *stats; @@ -3558,6 +3560,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, max_t(int, 0, tp->write_seq - tp->snd_nxt)); + nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, + TCP_NLA_PAD); return stats; } -- cgit v1.2.3 From 829eb208e80d6db95c0201cb8fa00c2f9ad87faf Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Fri, 31 Jul 2020 17:34:01 -0700 Subject: rtnetlink: add support for protodown reason netdev protodown is a mechanism that allows protocols to hold an interface down. It was initially introduced in the kernel to hold links down by a multihoming protocol. There was also an attempt to introduce protodown reason at the time but was rejected. protodown and protodown reason is supported by almost every switching and routing platform. It was ok for a while to live without a protodown reason. But, its become more critical now given more than one protocol may need to keep a link down on a system at the same time. eg: vrrp peer node, port security, multihoming protocol. Its common for Network operators and protocol developers to look for such a reason on a networking box (Its also known as errDisable by most networking operators) This patch adds support for link protodown reason attribute. There are two ways to maintain protodown reasons. (a) enumerate every possible reason code in kernel - A protocol developer has to make a request and have that appear in a certain kernel version (b) provide the bits in the kernel, and allow user-space (sysadmin or NOS distributions) to manage the bit-to-reasonname map. - This makes extending reason codes easier (kind of like the iproute2 table to vrf-name map /etc/iproute2/rt_tables.d/) This patch takes approach (b). a few things about the patch: - It treats the protodown reason bits as counter to indicate active protodown users - Since protodown attribute is already an exposed UAPI, the reason is not enforced on a protodown set. Its a no-op if not used. the patch follows the below algorithm: - presence of reason bits set indicates protodown is in use - user can set protodown and protodown reason in a single or multiple setlink operations - setlink operation to clear protodown, will return -EBUSY if there are active protodown reason bits - reason is not included in link dumps if not used example with patched iproute2: $cat /etc/iproute2/protodown_reasons.d/r.conf 0 mlag 1 evpn 2 vrrp 3 psecurity $ip link set dev vxlan0 protodown on protodown_reason vrrp on $ip link set dev vxlan0 protodown_reason mlag on $ip link show 14: vxlan0: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether f6:06:be:17:91:e7 brd ff:ff:ff:ff:ff:ff protodown on $ip link set dev vxlan0 protodown_reason mlag off $ip link set dev vxlan0 protodown off protodown_reason vrrp off Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++ include/uapi/linux/if_link.h | 10 ++++ net/core/dev.c | 25 ++++++++++ net/core/rtnetlink.c | 113 +++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 147 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ac2cd3f49aba..ba0fa6b22787 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2058,6 +2058,8 @@ struct net_device { struct timer_list watchdog_timer; int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; int __percpu *pcpu_refcnt; @@ -3810,6 +3812,8 @@ int dev_get_port_parent_id(struct net_device *dev, bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); int dev_change_proto_down(struct net_device *dev, bool proto_down); int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); +void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, + u32 value); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 63af64646358..7fba4de511de 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -170,12 +170,22 @@ enum { IFLA_PROP_LIST, IFLA_ALT_IFNAME, /* Alternative ifname */ IFLA_PERM_ADDRESS, + IFLA_PROTO_DOWN_REASON, __IFLA_MAX }; #define IFLA_MAX (__IFLA_MAX - 1) +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC, + IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */ + IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */ + + __IFLA_PROTO_DOWN_REASON_CNT, + IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1 +}; + /* backwards compatibility for userspace */ #ifndef __KERNEL__ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) diff --git a/net/core/dev.c b/net/core/dev.c index 38a6371d9bc5..f7ef0f5c5569 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8715,6 +8715,31 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) } EXPORT_SYMBOL(dev_change_proto_down_generic); +/** + * dev_change_proto_down_reason - proto down reason + * + * @dev: device + * @mask: proto down mask + * @value: proto down value + */ +void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, + u32 value) +{ + int b; + + if (!mask) { + dev->proto_down_reason = value; + } else { + for_each_set_bit(b, &mask, 32) { + if (value & (1 << b)) + dev->proto_down_reason |= BIT(b); + else + dev->proto_down_reason &= ~BIT(b); + } + } +} +EXPORT_SYMBOL(dev_change_proto_down_reason); + u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, enum bpf_netdev_command cmd) { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 85a4b0101f76..a54c3e0f2ee1 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1000,6 +1000,16 @@ static size_t rtnl_prop_list_size(const struct net_device *dev) return size; } +static size_t rtnl_proto_down_size(const struct net_device *dev) +{ + size_t size = nla_total_size(1); + + if (dev->proto_down_reason) + size += nla_total_size(0) + nla_total_size(4); + + return size; +} + static noinline size_t if_nlmsg_size(const struct net_device *dev, u32 ext_filter_mask) { @@ -1041,7 +1051,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_EVENT */ + nla_total_size(4) /* IFLA_NEW_NETNSID */ + nla_total_size(4) /* IFLA_NEW_IFINDEX */ - + nla_total_size(1) /* IFLA_PROTO_DOWN */ + + rtnl_proto_down_size(dev) /* proto down */ + nla_total_size(4) /* IFLA_TARGET_NETNSID */ + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ @@ -1658,6 +1668,35 @@ nest_cancel: return ret; } +static int rtnl_fill_proto_down(struct sk_buff *skb, + const struct net_device *dev) +{ + struct nlattr *pr; + u32 preason; + + if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) + goto nla_put_failure; + + preason = dev->proto_down_reason; + if (!preason) + return 0; + + pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); + if (!pr) + return -EMSGSIZE; + + if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { + nla_nest_cancel(skb, pr); + goto nla_put_failure; + } + + nla_nest_end(skb, pr); + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, struct net *src_net, int type, u32 pid, u32 seq, u32 change, @@ -1708,13 +1747,15 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, nla_put_u32(skb, IFLA_CARRIER_CHANGES, atomic_read(&dev->carrier_up_count) + atomic_read(&dev->carrier_down_count)) || - nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) || nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, atomic_read(&dev->carrier_up_count)) || nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, atomic_read(&dev->carrier_down_count))) goto nla_put_failure; + if (rtnl_fill_proto_down(skb, dev)) + goto nla_put_failure; + if (event != IFLA_EVENT_NONE) { if (nla_put_u32(skb, IFLA_EVENT, event)) goto nla_put_failure; @@ -1834,6 +1875,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_ALT_IFNAME] = { .type = NLA_STRING, .len = ALTIFNAMSIZ - 1 }, [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, + [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { @@ -2483,6 +2525,67 @@ static int do_set_master(struct net_device *dev, int ifindex, return 0; } +static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { + [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, + [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, +}; + +static int do_set_proto_down(struct net_device *dev, + struct nlattr *nl_proto_down, + struct nlattr *nl_proto_down_reason, + struct netlink_ext_ack *extack) +{ + struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; + const struct net_device_ops *ops = dev->netdev_ops; + unsigned long mask = 0; + u32 value; + bool proto_down; + int err; + + if (!ops->ndo_change_proto_down) { + NL_SET_ERR_MSG(extack, "Protodown not supported by device"); + return -EOPNOTSUPP; + } + + if (nl_proto_down_reason) { + err = nla_parse_nested_deprecated(pdreason, + IFLA_PROTO_DOWN_REASON_MAX, + nl_proto_down_reason, + ifla_proto_down_reason_policy, + NULL); + if (err < 0) + return err; + + if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { + NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); + return -EINVAL; + } + + value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); + + if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) + mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); + + dev_change_proto_down_reason(dev, mask, value); + } + + if (nl_proto_down) { + proto_down = nla_get_u8(nl_proto_down); + + /* Dont turn off protodown if there are active reasons */ + if (!proto_down && dev->proto_down_reason) { + NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); + return -EBUSY; + } + err = dev_change_proto_down(dev, + proto_down); + if (err) + return err; + } + + return 0; +} + #define DO_SETLINK_MODIFIED 0x01 /* notify flag means notify + modified. */ #define DO_SETLINK_NOTIFY 0x03 @@ -2771,9 +2874,9 @@ static int do_setlink(const struct sk_buff *skb, } err = 0; - if (tb[IFLA_PROTO_DOWN]) { - err = dev_change_proto_down(dev, - nla_get_u8(tb[IFLA_PROTO_DOWN])); + if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { + err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], + tb[IFLA_PROTO_DOWN_REASON], extack); if (err) goto errout; status |= DO_SETLINK_NOTIFY; -- cgit v1.2.3 From f4470cdf108f00533e8079b19434e6cb48c17fa3 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Fri, 31 Jul 2020 20:20:14 +0100 Subject: sched: Document arch_scale_*_capacity() Rather that hide their purpose in some dark, damp corner of Documentation/, add some documentation to the default implementations. Signed-off-by: Valentin Schneider Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200731192016.7484-2-valentin.schneider@arm.com --- include/linux/sched/topology.h | 10 ++++++++++ kernel/sched/sched.h | 10 ++++++++++ 2 files changed, 20 insertions(+) (limited to 'include') diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 764222d637b7..820511289857 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -217,6 +217,16 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) #endif /* !CONFIG_SMP */ #ifndef arch_scale_cpu_capacity +/** + * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU. + * @cpu: the CPU in question. + * + * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e. + * + * max_perf(cpu) + * ----------------------------- * SCHED_CAPACITY_SCALE + * max(max_perf(c) : c \in CPUs) + */ static __always_inline unsigned long arch_scale_cpu_capacity(int cpu) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 296efd30d8c9..3fd283892761 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2049,6 +2049,16 @@ void arch_scale_freq_tick(void) #endif #ifndef arch_scale_freq_capacity +/** + * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. + * @cpu: the CPU in question. + * + * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. + * + * f_curr + * ------ * SCHED_CAPACITY_SCALE + * f_max + */ static __always_inline unsigned long arch_scale_freq_capacity(int cpu) { -- cgit v1.2.3 From 7ef5264de773279b9f23b6cc8afb5addb30e970b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:20 +0200 Subject: modules: mark ref_module static ref_module isn't used anywhere outside of module.c. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 1 - kernel/module.c | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index 2e6670860d27..f1fdbeef2153 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -657,7 +657,6 @@ static inline void __module_get(struct module *module) #define symbol_put_addr(p) do { } while (0) #endif /* CONFIG_MODULE_UNLOAD */ -int ref_module(struct module *a, struct module *b); /* This is a #define so the string doesn't get put in every .o file */ #define module_name(mod) \ diff --git a/kernel/module.c b/kernel/module.c index e8a198588f26..baae0e83d630 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -869,7 +869,7 @@ static int add_module_usage(struct module *a, struct module *b) } /* Module a uses b: caller needs module_mutex() */ -int ref_module(struct module *a, struct module *b) +static int ref_module(struct module *a, struct module *b) { int err; @@ -888,7 +888,6 @@ int ref_module(struct module *a, struct module *b) } return 0; } -EXPORT_SYMBOL_GPL(ref_module); /* Clear the unload stuff of the module. */ static void module_unload_free(struct module *mod) @@ -1169,11 +1168,10 @@ static inline void module_unload_free(struct module *mod) { } -int ref_module(struct module *a, struct module *b) +static int ref_module(struct module *a, struct module *b) { return strong_try_module_get(b); } -EXPORT_SYMBOL_GPL(ref_module); static inline int module_unload_init(struct module *mod) { -- cgit v1.2.3 From 773110470e2fa3839523384ae014f8a723c4d178 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:21 +0200 Subject: modules: mark find_symbol static find_symbol is only used in module.c. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 11 ----------- kernel/module.c | 3 +-- 2 files changed, 1 insertion(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index f1fdbeef2153..90bdc362be36 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -590,17 +590,6 @@ struct symsearch { bool unused; }; -/* - * Search for an exported symbol by name. - * - * Must be called with module_mutex held or preemption disabled. - */ -const struct kernel_symbol *find_symbol(const char *name, - struct module **owner, - const s32 **crc, - bool gplok, - bool warn); - /* * Walk the exported symbol table * diff --git a/kernel/module.c b/kernel/module.c index baae0e83d630..0f95fb4b3e37 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -585,7 +585,7 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, /* Find an exported symbol and return it, along with, (optional) crc and * (optional) module which owns it. Needs preempt disabled or module_mutex. */ -const struct kernel_symbol *find_symbol(const char *name, +static const struct kernel_symbol *find_symbol(const char *name, struct module **owner, const s32 **crc, bool gplok, @@ -608,7 +608,6 @@ const struct kernel_symbol *find_symbol(const char *name, pr_debug("Failed to find symbol %s\n", name); return NULL; } -EXPORT_SYMBOL_GPL(find_symbol); /* * Search for module by name: must hold module_mutex (or preempt disabled -- cgit v1.2.3 From a54e04914c211b5678602a46b3ede5d82ec1327d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:22 +0200 Subject: modules: mark each_symbol_section static each_symbol_section is only used inside of module.c. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 9 --------- kernel/module.c | 3 +-- 2 files changed, 1 insertion(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index 90bdc362be36..b79219eed83c 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -590,15 +590,6 @@ struct symsearch { bool unused; }; -/* - * Walk the exported symbol table - * - * Must be called with module_mutex held or preemption disabled. - */ -bool each_symbol_section(bool (*fn)(const struct symsearch *arr, - struct module *owner, - void *data), void *data); - /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, diff --git a/kernel/module.c b/kernel/module.c index 0f95fb4b3e37..c2a099a27b68 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -422,7 +422,7 @@ static bool each_symbol_in_section(const struct symsearch *arr, } /* Returns true as soon as fn returns true, otherwise false. */ -bool each_symbol_section(bool (*fn)(const struct symsearch *arr, +static bool each_symbol_section(bool (*fn)(const struct symsearch *arr, struct module *owner, void *data), void *data) @@ -484,7 +484,6 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, } return false; } -EXPORT_SYMBOL_GPL(each_symbol_section); struct find_symbol_arg { /* Input */ -- cgit v1.2.3 From cd8732cdcc37d7077c4fa2c966b748c0662b607e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:25 +0200 Subject: modules: rename the licence field in struct symsearch to license Use the same spelling variant as the rest of the file. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 2 +- kernel/module.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index b79219eed83c..be04ba2f881d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -586,7 +586,7 @@ struct symsearch { NOT_GPL_ONLY, GPL_ONLY, WILL_BE_GPL_ONLY, - } licence; + } license; bool unused; }; diff --git a/kernel/module.c b/kernel/module.c index e85d06158fbc..62d817a0dca8 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -504,9 +504,9 @@ static bool check_exported_symbol(const struct symsearch *syms, struct find_symbol_arg *fsa = data; if (!fsa->gplok) { - if (syms->licence == GPL_ONLY) + if (syms->license == GPL_ONLY) return false; - if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { + if (syms->license == WILL_BE_GPL_ONLY && fsa->warn) { pr_warn("Symbol %s is being used by a non-GPL module, " "which will not be allowed in the future\n", fsa->name); -- cgit v1.2.3 From ef1dac6021cc8ec5de02ce31722bf26ac4ed5523 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:26 +0200 Subject: modules: return licensing information from find_symbol Report the GPLONLY status through a new argument. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 2 +- kernel/module.c | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index be04ba2f881d..30b0f5fcdb3c 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -582,7 +582,7 @@ struct module *find_module(const char *name); struct symsearch { const struct kernel_symbol *start, *stop; const s32 *crcs; - enum { + enum mod_license { NOT_GPL_ONLY, GPL_ONLY, WILL_BE_GPL_ONLY, diff --git a/kernel/module.c b/kernel/module.c index 62d817a0dca8..656f5ff27088 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -495,6 +495,7 @@ struct find_symbol_arg { struct module *owner; const s32 *crc; const struct kernel_symbol *sym; + enum mod_license license; }; static bool check_exported_symbol(const struct symsearch *syms, @@ -528,6 +529,7 @@ static bool check_exported_symbol(const struct symsearch *syms, fsa->owner = owner; fsa->crc = symversion(syms->crcs, symnum); fsa->sym = &syms->start[symnum]; + fsa->license = syms->license; return true; } @@ -587,6 +589,7 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, static const struct kernel_symbol *find_symbol(const char *name, struct module **owner, const s32 **crc, + enum mod_license *license, bool gplok, bool warn) { @@ -601,6 +604,8 @@ static const struct kernel_symbol *find_symbol(const char *name, *owner = fsa.owner; if (crc) *crc = fsa.crc; + if (license) + *license = fsa.license; return fsa.sym; } @@ -1074,7 +1079,7 @@ void __symbol_put(const char *symbol) struct module *owner; preempt_disable(); - if (!find_symbol(symbol, &owner, NULL, true, false)) + if (!find_symbol(symbol, &owner, NULL, NULL, true, false)) BUG(); module_put(owner); preempt_enable(); @@ -1352,7 +1357,7 @@ static inline int check_modstruct_version(const struct load_info *info, * locking is necessary -- use preempt_disable() to placate lockdep. */ preempt_disable(); - if (!find_symbol("module_layout", NULL, &crc, true, false)) { + if (!find_symbol("module_layout", NULL, &crc, NULL, true, false)) { preempt_enable(); BUG(); } @@ -1436,6 +1441,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, struct module *owner; const struct kernel_symbol *sym; const s32 *crc; + enum mod_license license; int err; /* @@ -1445,7 +1451,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, */ sched_annotate_sleep(); mutex_lock(&module_mutex); - sym = find_symbol(name, &owner, &crc, + sym = find_symbol(name, &owner, &crc, &license, !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); if (!sym) goto unlock; @@ -2213,7 +2219,7 @@ void *__symbol_get(const char *symbol) const struct kernel_symbol *sym; preempt_disable(); - sym = find_symbol(symbol, &owner, NULL, true, true); + sym = find_symbol(symbol, &owner, NULL, NULL, true, true); if (sym && strong_try_module_get(owner)) sym = NULL; preempt_enable(); @@ -2249,7 +2255,7 @@ static int verify_exported_symbols(struct module *mod) for (i = 0; i < ARRAY_SIZE(arr); i++) { for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { if (find_symbol(kernel_symbol_name(s), &owner, NULL, - true, false)) { + NULL, true, false)) { pr_err("%s: exports duplicate symbol %s" " (owned by %s)\n", mod->name, kernel_symbol_name(s), -- cgit v1.2.3 From 1752f0adea98ef859978c090e0726844348758f9 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 1 Aug 2020 13:36:33 +0300 Subject: fs: optimise kiocb_set_rw_flags() Use a local var to collect flags in kiocb_set_rw_flags(). That spares some memory writes and allows to replace most of the jumps with MOVEcc. Signed-off-by: Pavel Begunkov Reviewed-by: Matthew Wilcox (Oracle) Signed-off-by: Jens Axboe --- include/linux/fs.h | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 4090320360f4..e535543d31d9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3446,22 +3446,28 @@ static inline int iocb_flags(struct file *file) static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) { + int kiocb_flags = 0; + + if (!flags) + return 0; if (unlikely(flags & ~RWF_SUPPORTED)) return -EOPNOTSUPP; if (flags & RWF_NOWAIT) { if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) return -EOPNOTSUPP; - ki->ki_flags |= IOCB_NOWAIT; + kiocb_flags |= IOCB_NOWAIT; } if (flags & RWF_HIPRI) - ki->ki_flags |= IOCB_HIPRI; + kiocb_flags |= IOCB_HIPRI; if (flags & RWF_DSYNC) - ki->ki_flags |= IOCB_DSYNC; + kiocb_flags |= IOCB_DSYNC; if (flags & RWF_SYNC) - ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); + kiocb_flags |= (IOCB_DSYNC | IOCB_SYNC); if (flags & RWF_APPEND) - ki->ki_flags |= IOCB_APPEND; + kiocb_flags |= IOCB_APPEND; + + ki->ki_flags |= kiocb_flags; return 0; } -- cgit v1.2.3 From 73b11c2ab072d5b0599d1e12cc126f55ee306daf Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Jul 2020 11:28:26 -0700 Subject: bpf: Add support for forced LINK_DETACH command Add LINK_DETACH command to force-detach bpf_link without destroying it. It has the same behavior as auto-detaching of bpf_link due to cgroup dying for bpf_cgroup_link or net_device being destroyed for bpf_xdp_link. In such case, bpf_link is still a valid kernel object, but is defuncts and doesn't hold BPF program attached to corresponding BPF hook. This functionality allows users with enough access rights to manually force-detach attached bpf_link without killing respective owner process. This patch implements LINK_DETACH for cgroup, xdp, and netns links, mostly re-using existing link release handling code. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200731182830.286260-2-andriin@fb.com --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 5 +++++ kernel/bpf/cgroup.c | 15 ++++++++++++++- kernel/bpf/net_namespace.c | 8 ++++++++ kernel/bpf/syscall.c | 26 ++++++++++++++++++++++++++ net/core/dev.c | 11 ++++++++++- 6 files changed, 64 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 40c5e206ecf2..cef4ef0d2b4e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -793,6 +793,7 @@ struct bpf_link { struct bpf_link_ops { void (*release)(struct bpf_link *link); void (*dealloc)(struct bpf_link *link); + int (*detach)(struct bpf_link *link); int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, struct bpf_prog *old_prog); void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index eb5e0c38eb2c..b134e679e9db 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -117,6 +117,7 @@ enum bpf_cmd { BPF_LINK_GET_NEXT_ID, BPF_ENABLE_STATS, BPF_ITER_CREATE, + BPF_LINK_DETACH, }; enum bpf_map_type { @@ -634,6 +635,10 @@ union bpf_attr { __u32 old_prog_fd; } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { /* struct used by BPF_ENABLE_STATS command */ __u32 type; } enable_stats; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 957cce1d5168..83ff127ef7ae 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -814,6 +814,7 @@ static void bpf_cgroup_link_release(struct bpf_link *link) { struct bpf_cgroup_link *cg_link = container_of(link, struct bpf_cgroup_link, link); + struct cgroup *cg; /* link might have been auto-detached by dying cgroup already, * in that case our work is done here @@ -832,8 +833,12 @@ static void bpf_cgroup_link_release(struct bpf_link *link) WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, cg_link->type)); + cg = cg_link->cgroup; + cg_link->cgroup = NULL; + mutex_unlock(&cgroup_mutex); - cgroup_put(cg_link->cgroup); + + cgroup_put(cg); } static void bpf_cgroup_link_dealloc(struct bpf_link *link) @@ -844,6 +849,13 @@ static void bpf_cgroup_link_dealloc(struct bpf_link *link) kfree(cg_link); } +static int bpf_cgroup_link_detach(struct bpf_link *link) +{ + bpf_cgroup_link_release(link); + + return 0; +} + static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link, struct seq_file *seq) { @@ -883,6 +895,7 @@ static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, static const struct bpf_link_ops bpf_cgroup_link_lops = { .release = bpf_cgroup_link_release, .dealloc = bpf_cgroup_link_dealloc, + .detach = bpf_cgroup_link_detach, .update_prog = cgroup_bpf_replace, .show_fdinfo = bpf_cgroup_link_show_fdinfo, .fill_link_info = bpf_cgroup_link_fill_link_info, diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 71405edd667c..542f275bf252 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -142,9 +142,16 @@ static void bpf_netns_link_release(struct bpf_link *link) bpf_prog_array_free(old_array); out_unlock: + net_link->net = NULL; mutex_unlock(&netns_bpf_mutex); } +static int bpf_netns_link_detach(struct bpf_link *link) +{ + bpf_netns_link_release(link); + return 0; +} + static void bpf_netns_link_dealloc(struct bpf_link *link) { struct bpf_netns_link *net_link = @@ -228,6 +235,7 @@ static void bpf_netns_link_show_fdinfo(const struct bpf_link *link, static const struct bpf_link_ops bpf_netns_link_ops = { .release = bpf_netns_link_release, .dealloc = bpf_netns_link_dealloc, + .detach = bpf_netns_link_detach, .update_prog = bpf_netns_link_update_prog, .fill_link_info = bpf_netns_link_fill_info, .show_fdinfo = bpf_netns_link_show_fdinfo, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cd3d599e9e90..2f343ce15747 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3991,6 +3991,29 @@ out_put_link: return ret; } +#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd + +static int link_detach(union bpf_attr *attr) +{ + struct bpf_link *link; + int ret; + + if (CHECK_ATTR(BPF_LINK_DETACH)) + return -EINVAL; + + link = bpf_link_get_from_fd(attr->link_detach.link_fd); + if (IS_ERR(link)) + return PTR_ERR(link); + + if (link->ops->detach) + ret = link->ops->detach(link); + else + ret = -EOPNOTSUPP; + + bpf_link_put(link); + return ret; +} + static int bpf_link_inc_not_zero(struct bpf_link *link) { return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT; @@ -4240,6 +4263,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_ITER_CREATE: err = bpf_iter_create(&attr); break; + case BPF_LINK_DETACH: + err = link_detach(&attr); + break; default: err = -EINVAL; break; diff --git a/net/core/dev.c b/net/core/dev.c index a2a57988880a..c8b911b10187 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8979,12 +8979,20 @@ static void bpf_xdp_link_release(struct bpf_link *link) /* if racing with net_device's tear down, xdp_link->dev might be * already NULL, in which case link was already auto-detached */ - if (xdp_link->dev) + if (xdp_link->dev) { WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); + xdp_link->dev = NULL; + } rtnl_unlock(); } +static int bpf_xdp_link_detach(struct bpf_link *link) +{ + bpf_xdp_link_release(link); + return 0; +} + static void bpf_xdp_link_dealloc(struct bpf_link *link) { struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); @@ -9066,6 +9074,7 @@ out_unlock: static const struct bpf_link_ops bpf_xdp_link_lops = { .release = bpf_xdp_link_release, .dealloc = bpf_xdp_link_dealloc, + .detach = bpf_xdp_link_detach, .show_fdinfo = bpf_xdp_link_show_fdinfo, .fill_link_info = bpf_xdp_link_fill_link_info, .update_prog = bpf_xdp_link_update, -- cgit v1.2.3 From c3ee8c65f63799b02e1fb828bac99fd5008fb565 Mon Sep 17 00:00:00 2001 From: Bernard Zhao Date: Sat, 1 Aug 2020 20:02:13 +0800 Subject: drm/panel: remove return value of function drm_panel_add The function "int drm_panel_add(struct drm_panel *panel)" always returns 0, this return value is meaningless. Also, there is no need to check return value which calls "drm_panel_add and", error branch code will never run. Signed-off-by: Bernard Zhao Reviewed-by: Linus Walleij Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200801120216.8488-1-bernard@vivo.com --- drivers/gpu/drm/drm_panel.c | 6 +----- drivers/gpu/drm/panel/panel-arm-versatile.c | 4 +++- drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c | 6 +----- drivers/gpu/drm/panel/panel-boe-himax8279d.c | 4 +++- drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c | 4 +++- drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c | 4 +--- drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c | 4 +--- drivers/gpu/drm/panel/panel-ilitek-ili9322.c | 4 +++- drivers/gpu/drm/panel/panel-ilitek-ili9881c.c | 4 +--- drivers/gpu/drm/panel/panel-innolux-p079zca.c | 4 +--- drivers/gpu/drm/panel/panel-jdi-lt070me05000.c | 4 ++-- drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c | 4 +++- drivers/gpu/drm/panel/panel-lg-lb035q02.c | 4 +++- drivers/gpu/drm/panel/panel-lg-lg4573.c | 4 +++- drivers/gpu/drm/panel/panel-lvds.c | 4 +--- drivers/gpu/drm/panel/panel-nec-nl8048hl11.c | 4 +++- drivers/gpu/drm/panel/panel-novatek-nt35510.c | 4 +--- drivers/gpu/drm/panel/panel-novatek-nt39016.c | 6 +----- drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c | 4 +++- drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c | 4 +++- drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c | 4 +++- drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c | 6 ++---- drivers/gpu/drm/panel/panel-raydium-rm67191.c | 4 +--- drivers/gpu/drm/panel/panel-ronbo-rb070d30.c | 4 +--- drivers/gpu/drm/panel/panel-samsung-ld9040.c | 4 +++- drivers/gpu/drm/panel/panel-samsung-s6d16d0.c | 4 +--- drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c | 6 +----- drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c | 6 +----- drivers/gpu/drm/panel/panel-samsung-s6e63m0.c | 4 +++- drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c | 6 +----- drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c | 4 +--- drivers/gpu/drm/panel/panel-seiko-43wvf1g.c | 4 +--- drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c | 4 +++- drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c | 4 +++- drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c | 4 +++- drivers/gpu/drm/panel/panel-simple.c | 4 +--- drivers/gpu/drm/panel/panel-sitronix-st7701.c | 4 +--- drivers/gpu/drm/panel/panel-sitronix-st7789v.c | 4 +--- drivers/gpu/drm/panel/panel-sony-acx424akp.c | 4 +--- drivers/gpu/drm/panel/panel-sony-acx565akm.c | 7 +------ drivers/gpu/drm/panel/panel-tpo-td028ttec1.c | 4 +++- drivers/gpu/drm/panel/panel-tpo-td043mtea1.c | 6 +----- drivers/gpu/drm/panel/panel-tpo-tpg110.c | 4 +++- include/drm/drm_panel.h | 2 +- 44 files changed, 82 insertions(+), 111 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index 8c7bac85a793..b8e9abb537cf 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -70,16 +70,12 @@ EXPORT_SYMBOL(drm_panel_init); * * Add a panel to the global registry so that it can be looked up by display * drivers. - * - * Return: 0 on success or a negative error code on failure. */ -int drm_panel_add(struct drm_panel *panel) +void drm_panel_add(struct drm_panel *panel) { mutex_lock(&panel_lock); list_add_tail(&panel->list, &panel_list); mutex_unlock(&panel_lock); - - return 0; } EXPORT_SYMBOL(drm_panel_add); diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c index 47b37fef7ee8..abb0788843c6 100644 --- a/drivers/gpu/drm/panel/panel-arm-versatile.c +++ b/drivers/gpu/drm/panel/panel-arm-versatile.c @@ -349,7 +349,9 @@ static int versatile_panel_probe(struct platform_device *pdev) drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs, DRM_MODE_CONNECTOR_DPI); - return drm_panel_add(&vpanel->panel); + drm_panel_add(&vpanel->panel); + + return 0; } static const struct of_device_id versatile_panel_match[] = { diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c index 9a5b7644d756..e95bc9f60b3f 100644 --- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c +++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c @@ -315,11 +315,7 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi) return ret; } - ret = drm_panel_add(&ctx->panel); - if (ret < 0) { - dev_err(dev, "Failed to add panel: %d\n", ret); - return ret; - } + drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c index 7c27bd5e3486..d676b4c2a8fa 100644 --- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c +++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c @@ -895,7 +895,9 @@ static int panel_add(struct panel_info *pinfo) if (ret) return ret; - return drm_panel_add(&pinfo->base); + drm_panel_add(&pinfo->base); + + return 0; } static int panel_probe(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index db5b866357f2..3bd46600a98b 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -787,7 +787,9 @@ static int boe_panel_add(struct boe_panel *boe) boe->base.funcs = &boe_panel_funcs; boe->base.dev = &boe->dsi->dev; - return drm_panel_add(&boe->base); + drm_panel_add(&boe->base); + + return 0; } static int boe_panel_probe(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c index 54610651ecdb..0b7e82e5ba4e 100644 --- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c +++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c @@ -477,9 +477,7 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi) if (ret) return ret; - ret = drm_panel_add(&ctx->panel); - if (ret < 0) - return ret; + drm_panel_add(&ctx->panel); dsi->mode_flags = MIPI_DSI_MODE_VIDEO; dsi->format = MIPI_DSI_FMT_RGB888; diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c index 19a6274b10f5..f9edee69fea4 100644 --- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c +++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c @@ -224,9 +224,7 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi) if (ret) return ret; - ret = drm_panel_add(&ctx->panel); - if (ret < 0) - return ret; + drm_panel_add(&ctx->panel); dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST; dsi->format = MIPI_DSI_FMT_RGB888; diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 67a64d1999f6..968845894725 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -892,7 +892,9 @@ static int ili9322_probe(struct spi_device *spi) drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs, DRM_MODE_CONNECTOR_DPI); - return drm_panel_add(&ili->panel); + drm_panel_add(&ili->panel); + + return 0; } static int ili9322_remove(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 3ed8635a6fbd..066ef6c535df 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -446,9 +446,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) if (ret) return ret; - ret = drm_panel_add(&ctx->panel); - if (ret < 0) - return ret; + drm_panel_add(&ctx->panel); dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE; dsi->format = MIPI_DSI_FMT_RGB888; diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c index fdf030f4cf92..1a8e69c64125 100644 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c @@ -475,9 +475,7 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi, if (err) return err; - err = drm_panel_add(&innolux->base); - if (err < 0) - return err; + drm_panel_add(&innolux->base); mipi_dsi_set_drvdata(dsi, innolux); innolux->link = dsi; diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index 1e3fd6633981..733010b5e4f5 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -440,9 +440,9 @@ static int jdi_panel_add(struct jdi_panel *jdi) drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs, DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_add(&jdi->base); + drm_panel_add(&jdi->base); - return ret; + return 0; } static void jdi_panel_del(struct jdi_panel *jdi) diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 0d397af23afe..f42dc2ceeb07 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -382,7 +382,9 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay) if (err) return err; - return drm_panel_add(&kingdisplay->base); + drm_panel_add(&kingdisplay->base); + + return 0; } static void kingdisplay_panel_del(struct kingdisplay_panel *kingdisplay) diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c index 14456b9cd5c0..f3183b68704f 100644 --- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c +++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c @@ -198,7 +198,9 @@ static int lb035q02_probe(struct spi_device *spi) drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs, DRM_MODE_CONNECTOR_DPI); - return drm_panel_add(&lcd->panel); + drm_panel_add(&lcd->panel); + + return 0; } static int lb035q02_remove(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c index aedc485d0727..8e5160af1de5 100644 --- a/drivers/gpu/drm/panel/panel-lg-lg4573.c +++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c @@ -261,7 +261,9 @@ static int lg4573_probe(struct spi_device *spi) drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs, DRM_MODE_CONNECTOR_DPI); - return drm_panel_add(&ctx->panel); + drm_panel_add(&ctx->panel); + + return 0; } static int lg4573_remove(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index 5ce3f4a2b7a1..41305c3dcf31 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -227,9 +227,7 @@ static int panel_lvds_probe(struct platform_device *pdev) if (ret) return ret; - ret = drm_panel_add(&lvds->panel); - if (ret < 0) - return ret; + drm_panel_add(&lvds->panel); dev_set_drvdata(lvds->dev, lvds); return 0; diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c index f894971c1c7c..6e5ab1debc8b 100644 --- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c @@ -207,7 +207,9 @@ static int nl8048_probe(struct spi_device *spi) drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs, DRM_MODE_CONNECTOR_DPI); - return drm_panel_add(&lcd->panel); + drm_panel_add(&lcd->panel); + + return 0; } static int nl8048_remove(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c index e98d54df00e7..e67d0955e215 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c @@ -956,9 +956,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi) nt->panel.backlight = bl; } - ret = drm_panel_add(&nt->panel); - if (ret < 0) - return ret; + drm_panel_add(&nt->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c index 91df050ba3f6..3d15d9925204 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c @@ -303,11 +303,7 @@ static int nt39016_probe(struct spi_device *spi) drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs, DRM_MODE_CONNECTOR_DPI); - err = drm_panel_add(&panel->drm_panel); - if (err < 0) { - dev_err(dev, "Failed to register panel"); - return err; - } + drm_panel_add(&panel->drm_panel); return 0; } diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c index ecd76b5391d3..cb5cb27462df 100644 --- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c +++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c @@ -283,7 +283,9 @@ static int lcd_olinuxino_probe(struct i2c_client *client, if (ret) return ret; - return drm_panel_add(&lcd->panel); + drm_panel_add(&lcd->panel); + + return 0; } static int lcd_olinuxino_remove(struct i2c_client *client) diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c index 83e5aa47f0d6..45b975dee587 100644 --- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c +++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c @@ -164,7 +164,9 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587) if (ret) return ret; - return drm_panel_add(&osd101t2587->base); + drm_panel_add(&osd101t2587->base); + + return 0; } static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c index 627dfcf8adb4..3c20beeb1781 100644 --- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c +++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c @@ -206,7 +206,9 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt) if (ret) return ret; - return drm_panel_add(&wuxga_nt->base); + drm_panel_add(&wuxga_nt->base); + + return 0; } static void wuxga_nt_panel_del(struct wuxga_nt_panel *wuxga_nt) diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index e50ee26474cf..5e9ccefb88f6 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -361,7 +361,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, struct rpi_touchscreen *ts; struct device_node *endpoint, *dsi_host_node; struct mipi_dsi_host *host; - int ret, ver; + int ver; struct mipi_dsi_device_info info = { .type = RPI_DSI_DRIVER_NAME, .channel = 0, @@ -429,9 +429,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, /* This appears last, as it's what will unblock the DSI host * driver's component bind function. */ - ret = drm_panel_add(&ts->base); - if (ret) - return ret; + drm_panel_add(&ts->base); return 0; diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c index 57ff2b1f6361..2ef322582133 100644 --- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c +++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c @@ -609,9 +609,7 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi) DRM_MODE_CONNECTOR_DSI); dev_set_drvdata(dev, panel); - ret = drm_panel_add(&panel->panel); - if (ret) - return ret; + drm_panel_add(&panel->panel); ret = mipi_dsi_attach(dsi); if (ret) diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c index a7b0b3e39e1a..ea74958d7544 100644 --- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -200,9 +200,7 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi) if (ret) return ret; - ret = drm_panel_add(&ctx->panel); - if (ret < 0) - return ret; + drm_panel_add(&ctx->panel); dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM; dsi->format = MIPI_DSI_FMT_RGB888; diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c index 9bb2e8c7934a..358168ed8355 100644 --- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c +++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c @@ -354,7 +354,9 @@ static int ld9040_probe(struct spi_device *spi) drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs, DRM_MODE_CONNECTOR_DPI); - return drm_panel_add(&ctx->panel); + drm_panel_add(&ctx->panel); + + return 0; } static int ld9040_remove(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c index f02645d396ac..e88af6f8bf60 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c @@ -212,9 +212,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi) drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs, DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_add(&s6->panel); - if (ret < 0) - return ret; + drm_panel_add(&s6->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c index 80ef122e7466..2c84036c6a65 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c @@ -733,9 +733,7 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi) drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs, DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_add(&ctx->panel); - if (ret < 0) - goto unregister_backlight; + drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) @@ -745,8 +743,6 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi) remove_panel: drm_panel_remove(&ctx->panel); - -unregister_backlight: backlight_device_unregister(ctx->bl_dev); return ret; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c index 1247656d73bf..7a43eb3545cf 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c @@ -479,9 +479,7 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi) ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS; ctx->bl_dev->props.power = FB_BLANK_POWERDOWN; - ret = drm_panel_add(&ctx->panel); - if (ret < 0) - goto unregister_backlight; + drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) @@ -491,8 +489,6 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi) remove_panel: drm_panel_remove(&ctx->panel); - -unregister_backlight: backlight_device_unregister(ctx->bl_dev); return ret; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c index 64421347bfd4..e086efea2950 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c @@ -479,7 +479,9 @@ static int s6e63m0_probe(struct spi_device *spi) if (ret < 0) return ret; - return drm_panel_add(&ctx->panel); + drm_panel_add(&ctx->panel); + + return 0; } static int s6e63m0_remove(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c index 485eabecfcc9..ea63799ff2a1 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c @@ -242,11 +242,7 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi) drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs, DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_add(&ctx->panel); - if (ret < 0) { - dev_err(dev, "Failed to add panel: %d\n", ret); - return ret; - } + drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c index 8a028d2bd0d6..e36cb1a25318 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c @@ -1020,9 +1020,7 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi) drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs, DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_add(&ctx->panel); - if (ret < 0) - return ret; + drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c index e417dc4921c2..0ee508576231 100644 --- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c +++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c @@ -258,9 +258,7 @@ static int seiko_panel_probe(struct device *dev, if (err) return err; - err = drm_panel_add(&panel->base); - if (err < 0) - return err; + drm_panel_add(&panel->base); dev_set_drvdata(dev, panel); diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c index f07324b705b3..f8cd2a42ed13 100644 --- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c +++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c @@ -325,7 +325,9 @@ static int sharp_panel_add(struct sharp_panel *sharp) if (ret) return ret; - return drm_panel_add(&sharp->base); + drm_panel_add(&sharp->base); + + return 0; } static void sharp_panel_del(struct sharp_panel *sharp) diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c index d7bf13b9e1d6..94992f45113a 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c @@ -187,7 +187,9 @@ static int ls037v7dw01_probe(struct platform_device *pdev) drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs, DRM_MODE_CONNECTOR_DPI); - return drm_panel_add(&lcd->panel); + drm_panel_add(&lcd->panel); + + return 0; } static int ls037v7dw01_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c index b2e58935529c..16dbf0f353ed 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c @@ -261,7 +261,9 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt) if (ret) return ret; - return drm_panel_add(&sharp_nt->base); + drm_panel_add(&sharp_nt->base); + + return 0; } static void sharp_nt_panel_del(struct sharp_nt_panel *sharp_nt) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 221859652d82..67ca543e74e8 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -613,9 +613,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) if (err) goto free_ddc; - err = drm_panel_add(&panel->base); - if (err < 0) - goto free_ddc; + drm_panel_add(&panel->base); dev_set_drvdata(dev, panel); diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c index 692041ae4eb6..12462114a52d 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c @@ -380,9 +380,7 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi) if (ret) return ret; - ret = drm_panel_add(&st7701->panel); - if (ret < 0) - return ret; + drm_panel_add(&st7701->panel); mipi_dsi_set_drvdata(dsi, st7701); st7701->dsi = dsi; diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 3513ae40efa8..61e565524542 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -382,9 +382,7 @@ static int st7789v_probe(struct spi_device *spi) if (ret) return ret; - ret = drm_panel_add(&ctx->panel); - if (ret < 0) - return ret; + drm_panel_add(&ctx->panel); return 0; } diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c index 97a1b4790d3c..57575c40f2aa 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c +++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c @@ -504,9 +504,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi) acx->bl->props.brightness = 512; acx->bl->props.power = FB_BLANK_POWERDOWN; - ret = drm_panel_add(&acx->panel); - if (ret < 0) - return ret; + drm_panel_add(&acx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index fc6a7e451abe..e95fdfb16b6c 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -650,12 +650,7 @@ static int acx565akm_probe(struct spi_device *spi) drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs, DRM_MODE_CONNECTOR_DPI); - ret = drm_panel_add(&lcd->panel); - if (ret < 0) { - if (lcd->has_bc) - acx565akm_backlight_cleanup(lcd); - return ret; - } + drm_panel_add(&lcd->panel); return 0; } diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c index 58d683cc5215..037c14fd6bac 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c @@ -350,7 +350,9 @@ static int td028ttec1_probe(struct spi_device *spi) if (ret) return ret; - return drm_panel_add(&lcd->panel); + drm_panel_add(&lcd->panel); + + return 0; } static int td028ttec1_remove(struct spi_device *spi) diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c index 9b2a356c4d9a..49e6c9386258 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c @@ -460,11 +460,7 @@ static int td043mtea1_probe(struct spi_device *spi) drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs, DRM_MODE_CONNECTOR_DPI); - ret = drm_panel_add(&lcd->panel); - if (ret < 0) { - sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group); - return ret; - } + drm_panel_add(&lcd->panel); return 0; } diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c index c7a2f0ae5ba5..cd00cfa6ba14 100644 --- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c +++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c @@ -448,7 +448,9 @@ static int tpg110_probe(struct spi_device *spi) spi_set_drvdata(spi, tpg); - return drm_panel_add(&tpg->panel); + drm_panel_add(&tpg->panel); + + return 0; } static int tpg110_remove(struct spi_device *spi) diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 6193cb555acc..ff066524cb70 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -175,7 +175,7 @@ void drm_panel_init(struct drm_panel *panel, struct device *dev, const struct drm_panel_funcs *funcs, int connector_type); -int drm_panel_add(struct drm_panel *panel); +void drm_panel_add(struct drm_panel *panel); void drm_panel_remove(struct drm_panel *panel); int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); -- cgit v1.2.3 From c83e2a6e2fbbb7d47ea46fd7cb1f01292f2d0b6e Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Fri, 17 Jul 2020 05:11:38 +0000 Subject: wilc1000: Move wilc1000 SDIO ID's from driver source to common header file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moved macros used for Vendor/Device ID from wilc1000 driver to common header file and changed macro name for consistency with other macros. Signed-off-by: Ajay Singh Acked-by: Ulf Hansson Acked-by: Pali Rohár Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717051134.19160-1-ajay.kathat@microchip.com --- drivers/net/wireless/microchip/wilc1000/sdio.c | 6 ++---- include/linux/mmc/sdio_ids.h | 3 +++ 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 36eb589263bf..3ece7b0b0392 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -15,11 +16,8 @@ #define SDIO_MODALIAS "wilc1000_sdio" -#define SDIO_VENDOR_ID_WILC 0x0296 -#define SDIO_DEVICE_ID_WILC 0x5347 - static const struct sdio_device_id wilc_sdio_ids[] = { - { SDIO_DEVICE(SDIO_VENDOR_ID_WILC, SDIO_DEVICE_ID_WILC) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) }, { }, }; diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 15ed8ce9d394..519820d18e62 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -105,6 +105,9 @@ #define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663 #define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668 +#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296 +#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347 + #define SDIO_VENDOR_ID_SIANO 0x039a #define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 #define SDIO_DEVICE_ID_SIANO_NICE 0x0202 -- cgit v1.2.3 From 614a895fc69497d99cc02c076fa712c75194eab3 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Thu, 9 Jul 2020 20:07:33 +0200 Subject: mtd: hyperbus: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Richard Weinberger --- drivers/mtd/hyperbus/hbmc-am654.c | 2 +- drivers/mtd/hyperbus/hyperbus-core.c | 2 +- include/linux/mtd/hyperbus.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c index f350a0809f88..e0e33f6bf513 100644 --- a/drivers/mtd/hyperbus/hbmc-am654.c +++ b/drivers/mtd/hyperbus/hbmc-am654.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // -// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ // Author: Vignesh Raghavendra #include diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c index 32685e8dd278..2f9fc4e17d53 100644 --- a/drivers/mtd/hyperbus/hyperbus-core.c +++ b/drivers/mtd/hyperbus/hyperbus-core.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // -// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ // Author: Vignesh Raghavendra #include diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h index 2dfe65964f6e..2129f7d3b6eb 100644 --- a/include/linux/mtd/hyperbus.h +++ b/include/linux/mtd/hyperbus.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __LINUX_MTD_HYPERBUS_H__ -- cgit v1.2.3 From f8951902b9daa65ba240ce8a054c727748df2147 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:32:08 -0700 Subject: MTD: mtd-abi.h: drop a duplicated word Drop the repeated word "mode" in a comment. Signed-off-by: Randy Dunlap Cc: Miquel Raynal Cc: Richard Weinberger Cc: Vignesh Raghavendra Cc: linux-mtd@lists.infradead.org Signed-off-by: Richard Weinberger --- include/uapi/mtd/mtd-abi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index 4b48fbf7d343..65b9db936557 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -262,7 +262,7 @@ struct mtd_ecc_stats { * @MTD_FILE_MODE_OTP_USER: OTP enabled in user mode * @MTD_FILE_MODE_RAW: OTP disabled, ECC disabled * - * These modes can be set via ioctl(MTDFILEMODE). The mode mode will be retained + * These modes can be set via ioctl(MTDFILEMODE). The mode will be retained * separately for each open file descriptor. * * Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW - -- cgit v1.2.3 From 0c84b7fc973f9220ef8732c430ccc7c92d083184 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:29:54 -0700 Subject: MTD: pfow.h: drop a duplicated word Drop the repeated word "can" in a comment. Signed-off-by: Randy Dunlap Cc: Miquel Raynal Cc: Richard Weinberger Cc: Vignesh Raghavendra Cc: linux-mtd@lists.infradead.org Signed-off-by: Richard Weinberger --- include/linux/mtd/pfow.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h index 122f3439e1af..6166e7c60869 100644 --- a/include/linux/mtd/pfow.h +++ b/include/linux/mtd/pfow.h @@ -19,7 +19,7 @@ /* Identification info for LPDDR chip */ #define PFOW_MANUFACTURER_ID 0x0020 #define PFOW_DEVICE_ID 0x0022 -/* Address in PFOW where prog buffer can can be found */ +/* Address in PFOW where prog buffer can be found */ #define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 /* Size of program buffer in words */ #define PFOW_PROGRAM_BUFFER_SIZE 0x0042 -- cgit v1.2.3 From c6fe44d96fc1536af5b11cd859686453d1b7bfd1 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 23 Jul 2020 12:33:41 -0700 Subject: list: add "list_del_init_careful()" to go with "list_empty_careful()" That gives us ordering guarantees around the pair. Signed-off-by: Linus Torvalds --- include/linux/list.h | 20 +++++++++++++++++++- kernel/sched/wait.c | 2 +- mm/filemap.c | 7 +------ 3 files changed, 21 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/list.h b/include/linux/list.h index aff44d34f4e4..0d0d17a10d25 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -282,6 +282,24 @@ static inline int list_empty(const struct list_head *head) return READ_ONCE(head->next) == head; } +/** + * list_del_init_careful - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + * + * This is the same as list_del_init(), except designed to be used + * together with list_empty_careful() in a way to guarantee ordering + * of other memory operations. + * + * Any memory operations done before a list_del_init_careful() are + * guaranteed to be visible after a list_empty_careful() test. + */ +static inline void list_del_init_careful(struct list_head *entry) +{ + __list_del_entry(entry); + entry->prev = entry; + smp_store_release(&entry->next, entry); +} + /** * list_empty_careful - tests whether a list is empty and not being modified * @head: the list to test @@ -297,7 +315,7 @@ static inline int list_empty(const struct list_head *head) */ static inline int list_empty_careful(const struct list_head *head) { - struct list_head *next = head->next; + struct list_head *next = smp_load_acquire(&head->next); return (next == head) && (next == head->prev); } diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index ba059fbfc53a..01f5d3020589 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -389,7 +389,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i int ret = default_wake_function(wq_entry, mode, sync, key); if (ret) - list_del_init(&wq_entry->entry); + list_del_init_careful(&wq_entry->entry); return ret; } diff --git a/mm/filemap.c b/mm/filemap.c index 8c3d3e233d37..991503bbf922 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1041,13 +1041,8 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, * since after list_del_init(&wait->entry) the wait entry * might be de-allocated and the process might even have * exited. - * - * We _really_ should have a "list_del_init_careful()" to - * properly pair with the unlocked "list_empty_careful()" - * in finish_wait(). */ - smp_mb(); - list_del_init(&wait->entry); + list_del_init_careful(&wait->entry); return ret; } -- cgit v1.2.3 From e5a52fd2b8cdb700b3c07b030e050a49ef3156b9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:33:17 -0700 Subject: xen/gntdev: gntdev.h: drop a duplicated word Drop the repeated word "of" in a comment. Signed-off-by: Randy Dunlap Reviewed-by: Juergen Gross Cc: Boris Ostrovsky Cc: Juergen Gross Cc: xen-devel@lists.xenproject.org Link: https://lore.kernel.org/r/20200719003317.21454-1-rdunlap@infradead.org Signed-off-by: Juergen Gross --- include/uapi/xen/gntdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h index fe4423e518c6..9ac5515b9bc2 100644 --- a/include/uapi/xen/gntdev.h +++ b/include/uapi/xen/gntdev.h @@ -66,7 +66,7 @@ struct ioctl_gntdev_map_grant_ref { /* * Removes the grant references from the mapping table of an instance of - * of gntdev. N.B. munmap() must be called on the relevant virtual address(es) + * gntdev. N.B. munmap() must be called on the relevant virtual address(es) * before this ioctl is called, or an error will result. */ #define IOCTL_GNTDEV_UNMAP_GRANT_REF \ -- cgit v1.2.3 From 2d05f56af8f52d52dc614ddf4d51c00ea5afb67f Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 29 Jul 2020 15:41:44 +0200 Subject: fbdev: Remove trailing whitespace Removes trailing whitespaces in several places. Signed-off-by: Thomas Zimmermann Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20200729134148.6855-2-tzimmermann@suse.de --- drivers/video/fbdev/core/fbmem.c | 10 +++++----- include/linux/fb.h | 18 +++++++++--------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 30e73ec4ad5c..dd0ccf35f7b7 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -777,7 +777,7 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) if (info->fbops->fb_read) return info->fbops->fb_read(info, buf, count, ppos); - + total_size = info->screen_size; if (total_size == 0) @@ -842,7 +842,7 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) if (info->fbops->fb_write) return info->fbops->fb_write(info, buf, count, ppos); - + total_size = info->screen_size; if (total_size == 0) @@ -1061,7 +1061,7 @@ EXPORT_SYMBOL(fb_set_var); int fb_blank(struct fb_info *info, int blank) -{ +{ struct fb_event event; int ret = -EINVAL; @@ -1437,7 +1437,7 @@ out: return res; } -static int +static int fb_release(struct inode *inode, struct file *file) __acquires(&info->lock) __releases(&info->lock) @@ -1627,7 +1627,7 @@ static int do_register_framebuffer(struct fb_info *fb_info) fb_info->pixmap.access_align = 32; fb_info->pixmap.flags = FB_PIXMAP_DEFAULT; } - } + } fb_info->pixmap.offset = 0; if (!fb_info->pixmap.blit_x) diff --git a/include/linux/fb.h b/include/linux/fb.h index 2b530e6d86e4..714187bc13ac 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -124,7 +124,7 @@ struct fb_cursor_user { * Register/unregister for framebuffer events */ -/* The resolution of the passed in fb_info about to change */ +/* The resolution of the passed in fb_info about to change */ #define FB_EVENT_MODE_CHANGE 0x01 #ifdef CONFIG_GUMSTIX_AM200EPD @@ -459,12 +459,12 @@ struct fb_info { #if IS_ENABLED(CONFIG_FB_BACKLIGHT) /* assigned backlight device */ - /* set before framebuffer registration, + /* set before framebuffer registration, remove after unregister */ struct backlight_device *bl_dev; /* Backlight level curve */ - struct mutex bl_curve_mutex; + struct mutex bl_curve_mutex; u8 bl_curve[FB_BACKLIGHT_LEVELS]; #endif #ifdef CONFIG_FB_DEFERRED_IO @@ -483,8 +483,8 @@ struct fb_info { char __iomem *screen_base; /* Virtual address */ char *screen_buffer; }; - unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ - void *pseudo_palette; /* Fake palette of 16 colors */ + unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ + void *pseudo_palette; /* Fake palette of 16 colors */ #define FBINFO_STATE_RUNNING 0 #define FBINFO_STATE_SUSPENDED 1 u32 state; /* Hardware state i.e suspend */ @@ -587,11 +587,11 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { * `Generic' versions of the frame buffer device operations */ -extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var); -extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var); +extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var); +extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var); extern int fb_blank(struct fb_info *info, int blank); -extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); -extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); +extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image); /* * Drawing operations where framebuffer is in system RAM -- cgit v1.2.3 From 4e56cde15f7d68cf86ff8efff8504497de152475 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 31 Jul 2020 21:38:30 +0300 Subject: mac80211: Handle special status codes in SAE commit SAE authentication has been extended with H2E (IEEE 802.11 REVmd) and PK (WFA) options. Those extensions use special status code values in the SAE commit messages (Authentication frame with transaction sequence number 1) to identify which extension is in use. mac80211 was interpreting those new values as the AP denying authentication and that resulted in failure to complete SAE authentication in some cases. Fix this by adding exceptions for the new status code values 126 and 127. Signed-off-by: Jouni Malinen Link: https://lore.kernel.org/r/20200731183830.18735-1-jouni@codeaurora.org Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 2 ++ net/mac80211/mlme.c | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 9f732499ea88..c47f43e65a2f 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2561,6 +2561,8 @@ enum ieee80211_statuscode { /* 802.11ai */ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108, WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109, + WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126, + WLAN_STATUS_SAE_PK = 127, }; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 839d0367446c..8b7ca8ddfe20 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2988,7 +2988,10 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); if (auth_alg == WLAN_AUTH_SAE && - status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED) + (status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED || + (auth_transaction == 1 && + (status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT || + status_code == WLAN_STATUS_SAE_PK)))) return; sdata_info(sdata, "%pM denied authentication (status %d)\n", -- cgit v1.2.3 From 042f649810f61c4a834f3d6d866c567f7f6b3f8c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 1 Jul 2020 11:54:43 -0400 Subject: libceph: just have osd_req_op_init() return a pointer The caller can just ignore the return. No need for this wrapper that just casts the other function to void. [ idryomov: argument alignment ] Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- include/linux/ceph/osd_client.h | 2 +- net/ceph/osd_client.c | 39 ++++++++++++++++----------------------- 2 files changed, 17 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index c60b59e9291b..83fa08a06507 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -404,7 +404,7 @@ void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc); &__oreq->r_ops[__whch].typ.fld; \ }) -extern void osd_req_op_init(struct ceph_osd_request *osd_req, +struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags); extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index db6abb5a5511..e4fbcad6e7d8 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -525,7 +525,7 @@ EXPORT_SYMBOL(ceph_osdc_put_request); static void request_init(struct ceph_osd_request *req) { - /* req only, each op is zeroed in _osd_req_op_init() */ + /* req only, each op is zeroed in osd_req_op_init() */ memset(req, 0, sizeof(*req)); kref_init(&req->r_kref); @@ -746,8 +746,8 @@ EXPORT_SYMBOL(ceph_osdc_alloc_messages); * other information associated with them. It also serves as a * common init routine for all the other init functions, below. */ -static struct ceph_osd_req_op * -_osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, +struct ceph_osd_req_op * +osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags) { struct ceph_osd_req_op *op; @@ -762,12 +762,6 @@ _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, return op; } - -void osd_req_op_init(struct ceph_osd_request *osd_req, - unsigned int which, u16 opcode, u32 flags) -{ - (void)_osd_req_op_init(osd_req, which, opcode, flags); -} EXPORT_SYMBOL(osd_req_op_init); void osd_req_op_extent_init(struct ceph_osd_request *osd_req, @@ -775,8 +769,8 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, - opcode, 0); + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, + opcode, 0); size_t payload_len = 0; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && @@ -822,7 +816,7 @@ void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, BUG_ON(which + 1 >= osd_req->r_num_ops); prev_op = &osd_req->r_ops[which]; - op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); + op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); /* dup previous one */ op->indata_len = prev_op->indata_len; op->outdata_len = prev_op->outdata_len; @@ -845,7 +839,7 @@ int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, size_t size; int ret; - op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); + op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); pagelist = ceph_pagelist_alloc(GFP_NOFS); if (!pagelist) @@ -883,8 +877,8 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *name, const void *value, size_t size, u8 cmp_op, u8 cmp_mode) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, - opcode, 0); + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, + opcode, 0); struct ceph_pagelist *pagelist; size_t payload_len; int ret; @@ -928,7 +922,7 @@ static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, { struct ceph_osd_req_op *op; - op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); + op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); op->watch.cookie = cookie; op->watch.op = watch_opcode; op->watch.gen = 0; @@ -943,10 +937,9 @@ void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, u64 expected_write_size, u32 flags) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, - CEPH_OSD_OP_SETALLOCHINT, - 0); + struct ceph_osd_req_op *op; + op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0); op->alloc_hint.expected_object_size = expected_object_size; op->alloc_hint.expected_write_size = expected_write_size; op->alloc_hint.flags = flags; @@ -4799,7 +4792,7 @@ static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, struct ceph_pagelist *pl; int ret; - op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); + op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); pl = ceph_pagelist_alloc(GFP_NOIO); if (!pl) @@ -4868,7 +4861,7 @@ static int osd_req_op_notify_init(struct ceph_osd_request *req, int which, struct ceph_pagelist *pl; int ret; - op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); + op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); op->notify.cookie = cookie; pl = ceph_pagelist_alloc(GFP_NOIO); @@ -5332,8 +5325,8 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req, if (IS_ERR(pages)) return PTR_ERR(pages); - op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, - dst_fadvise_flags); + op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, + dst_fadvise_flags); op->copy_from.snapid = src_snapid; op->copy_from.src_version = src_version; op->copy_from.flags = copy_from_flags; -- cgit v1.2.3 From 94f17c00d6687993101372f996cf6690ec9adf83 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Wed, 8 Jul 2020 08:53:28 +0200 Subject: libceph: replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. [ idryomov: Do the same for the CRUSH paper and replace ceph.newdream.net with ceph.io. ] Signed-off-by: Alexander A. Klimov Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- fs/ceph/Kconfig | 2 +- include/linux/crush/crush.h | 2 +- net/ceph/Kconfig | 2 +- net/ceph/ceph_hash.c | 2 +- net/ceph/crush/hash.c | 2 +- net/ceph/crush/mapper.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index cf235f6eacf9..471e40156065 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig @@ -13,7 +13,7 @@ config CEPH_FS scalable file system designed to provide high performance, reliable access to petabytes of storage. - More information at http://ceph.newdream.net/. + More information at https://ceph.io/. If unsure, say N. diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 33c16f2de7f6..2f811baf78d2 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -17,7 +17,7 @@ * The algorithm was originally described in detail in this paper * (although the algorithm has evolved somewhat since then): * - * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf + * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf * * LGPL2 */ diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig index d7bec7adc267..f36f9a3a4e20 100644 --- a/net/ceph/Kconfig +++ b/net/ceph/Kconfig @@ -13,7 +13,7 @@ config CEPH_LIB common functionality to both the Ceph filesystem and to the rados block device (rbd). - More information at http://ceph.newdream.net/. + More information at https://ceph.io/. If unsure, say N. diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 9a5850f264ed..81e1e006c540 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -4,7 +4,7 @@ /* * Robert Jenkin's hash function. - * http://burtleburtle.net/bob/hash/evahash.html + * https://burtleburtle.net/bob/hash/evahash.html * This is in the public domain. */ #define mix(a, b, c) \ diff --git a/net/ceph/crush/hash.c b/net/ceph/crush/hash.c index e5cc603cdb17..fe79f6d2d0db 100644 --- a/net/ceph/crush/hash.c +++ b/net/ceph/crush/hash.c @@ -7,7 +7,7 @@ /* * Robert Jenkins' function for mixing 32-bit values - * http://burtleburtle.net/bob/hash/evahash.html + * https://burtleburtle.net/bob/hash/evahash.html * a, b = random bits, c = input and output */ #define crush_hashmix(a, b, c) do { \ diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 3f323ed9df52..07e5614eb3f1 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -298,7 +298,7 @@ static __u64 crush_ln(unsigned int xin) * * for reference, see: * - * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables + * https://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables * */ -- cgit v1.2.3 From 18f473b384a64cef69f166a3e2b73d3d2eca82c6 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Thu, 16 Jul 2020 10:05:57 -0400 Subject: ceph: periodically send perf metrics to MDSes This will send the caps/read/write/metadata metrics to any available MDS once per second, which will be the same as the userland client. It will skip the MDS sessions which don't support the metric collection, as the MDSs will close socket connections when they get an unknown type message. We can disable the metric sending via the disable_send_metrics module parameter. [ jlayton: fix up endianness bug in ceph_mdsc_send_metrics() ] URL: https://tracker.ceph.com/issues/43215 Signed-off-by: Xiubo Li Signed-off-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/mds_client.c | 3 + fs/ceph/mds_client.h | 4 +- fs/ceph/metric.c | 148 +++++++++++++++++++++++++++++++++++++++++++ fs/ceph/metric.h | 77 ++++++++++++++++++++++ fs/ceph/super.c | 42 ++++++++++++ fs/ceph/super.h | 2 + include/linux/ceph/ceph_fs.h | 1 + 7 files changed, 276 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index ef8a1179171b..d6cd2e4f0bc8 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3334,6 +3334,8 @@ static void handle_session(struct ceph_mds_session *session, session->s_state = CEPH_MDS_SESSION_OPEN; session->s_features = features; renewed_caps(mdsc, session, 0); + if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features)) + metric_schedule_delayed(&mdsc->metric); wake = 1; if (mdsc->stopping) __close_session(mdsc, session); @@ -4725,6 +4727,7 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc) ceph_metric_destroy(&mdsc->metric); + flush_delayed_work(&mdsc->metric.delayed_work); fsc->mdsc = NULL; kfree(mdsc); dout("mdsc_destroy %p done\n", mdsc); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 6147ff0a1cdf..bc9e95937d7c 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -28,8 +28,9 @@ enum ceph_feature_type { CEPHFS_FEATURE_LAZY_CAP_WANTED, CEPHFS_FEATURE_MULTI_RECONNECT, CEPHFS_FEATURE_DELEG_INO, + CEPHFS_FEATURE_METRIC_COLLECT, - CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_DELEG_INO, + CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_METRIC_COLLECT, }; /* @@ -43,6 +44,7 @@ enum ceph_feature_type { CEPHFS_FEATURE_LAZY_CAP_WANTED, \ CEPHFS_FEATURE_MULTI_RECONNECT, \ CEPHFS_FEATURE_DELEG_INO, \ + CEPHFS_FEATURE_METRIC_COLLECT, \ \ CEPHFS_FEATURE_MAX, \ } diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c index 269eacbd2a15..2466b261fba2 100644 --- a/fs/ceph/metric.c +++ b/fs/ceph/metric.c @@ -1,10 +1,150 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include #include #include #include #include "metric.h" +#include "mds_client.h" + +static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc, + struct ceph_mds_session *s) +{ + struct ceph_metric_head *head; + struct ceph_metric_cap *cap; + struct ceph_metric_read_latency *read; + struct ceph_metric_write_latency *write; + struct ceph_metric_metadata_latency *meta; + struct ceph_client_metric *m = &mdsc->metric; + u64 nr_caps = atomic64_read(&m->total_caps); + struct ceph_msg *msg; + struct timespec64 ts; + s64 sum; + s32 items = 0; + s32 len; + + len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write) + + sizeof(*meta); + + msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true); + if (!msg) { + pr_err("send metrics to mds%d, failed to allocate message\n", + s->s_mds); + return false; + } + + head = msg->front.iov_base; + + /* encode the cap metric */ + cap = (struct ceph_metric_cap *)(head + 1); + cap->type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO); + cap->ver = 1; + cap->compat = 1; + cap->data_len = cpu_to_le32(sizeof(*cap) - 10); + cap->hit = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_hit)); + cap->mis = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_mis)); + cap->total = cpu_to_le64(nr_caps); + items++; + + /* encode the read latency metric */ + read = (struct ceph_metric_read_latency *)(cap + 1); + read->type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY); + read->ver = 1; + read->compat = 1; + read->data_len = cpu_to_le32(sizeof(*read) - 10); + sum = m->read_latency_sum; + jiffies_to_timespec64(sum, &ts); + read->sec = cpu_to_le32(ts.tv_sec); + read->nsec = cpu_to_le32(ts.tv_nsec); + items++; + + /* encode the write latency metric */ + write = (struct ceph_metric_write_latency *)(read + 1); + write->type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY); + write->ver = 1; + write->compat = 1; + write->data_len = cpu_to_le32(sizeof(*write) - 10); + sum = m->write_latency_sum; + jiffies_to_timespec64(sum, &ts); + write->sec = cpu_to_le32(ts.tv_sec); + write->nsec = cpu_to_le32(ts.tv_nsec); + items++; + + /* encode the metadata latency metric */ + meta = (struct ceph_metric_metadata_latency *)(write + 1); + meta->type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY); + meta->ver = 1; + meta->compat = 1; + meta->data_len = cpu_to_le32(sizeof(*meta) - 10); + sum = m->metadata_latency_sum; + jiffies_to_timespec64(sum, &ts); + meta->sec = cpu_to_le32(ts.tv_sec); + meta->nsec = cpu_to_le32(ts.tv_nsec); + items++; + + put_unaligned_le32(items, &head->num); + msg->front.iov_len = len; + msg->hdr.version = cpu_to_le16(1); + msg->hdr.compat_version = cpu_to_le16(1); + msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); + dout("client%llu send metrics to mds%d\n", + ceph_client_gid(mdsc->fsc->client), s->s_mds); + ceph_con_send(&s->s_con, msg); + + return true; +} + + +static void metric_get_session(struct ceph_mds_client *mdsc) +{ + struct ceph_mds_session *s; + int i; + + mutex_lock(&mdsc->mutex); + for (i = 0; i < mdsc->max_sessions; i++) { + s = __ceph_lookup_mds_session(mdsc, i); + if (!s) + continue; + + /* + * Skip it if MDS doesn't support the metric collection, + * or the MDS will close the session's socket connection + * directly when it get this message. + */ + if (check_session_state(s) && + test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) { + mdsc->metric.session = s; + break; + } + + ceph_put_mds_session(s); + } + mutex_unlock(&mdsc->mutex); +} + +static void metric_delayed_work(struct work_struct *work) +{ + struct ceph_client_metric *m = + container_of(work, struct ceph_client_metric, delayed_work.work); + struct ceph_mds_client *mdsc = + container_of(m, struct ceph_mds_client, metric); + + if (mdsc->stopping) + return; + + if (!m->session || !check_session_state(m->session)) { + if (m->session) { + ceph_put_mds_session(m->session); + m->session = NULL; + } + metric_get_session(mdsc); + } + if (m->session) { + ceph_mdsc_send_metrics(mdsc, m->session); + metric_schedule_delayed(m); + } +} int ceph_metric_init(struct ceph_client_metric *m) { @@ -52,6 +192,9 @@ int ceph_metric_init(struct ceph_client_metric *m) m->total_metadatas = 0; m->metadata_latency_sum = 0; + m->session = NULL; + INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work); + return 0; err_i_caps_mis: @@ -73,6 +216,11 @@ void ceph_metric_destroy(struct ceph_client_metric *m) percpu_counter_destroy(&m->i_caps_hit); percpu_counter_destroy(&m->d_lease_mis); percpu_counter_destroy(&m->d_lease_hit); + + cancel_delayed_work_sync(&m->delayed_work); + + if (m->session) + ceph_put_mds_session(m->session); } static inline void __update_latency(ktime_t *totalp, ktime_t *lsump, diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h index 23a3373d5a3d..fe5d07d2e63a 100644 --- a/fs/ceph/metric.h +++ b/fs/ceph/metric.h @@ -6,6 +6,71 @@ #include #include +extern bool disable_send_metrics; + +enum ceph_metric_type { + CLIENT_METRIC_TYPE_CAP_INFO, + CLIENT_METRIC_TYPE_READ_LATENCY, + CLIENT_METRIC_TYPE_WRITE_LATENCY, + CLIENT_METRIC_TYPE_METADATA_LATENCY, + CLIENT_METRIC_TYPE_DENTRY_LEASE, + + CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_DENTRY_LEASE, +}; + +/* metric caps header */ +struct ceph_metric_cap { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(hit + mis + total) */ + __le64 hit; + __le64 mis; + __le64 total; +} __packed; + +/* metric read latency header */ +struct ceph_metric_read_latency { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(sec + nsec) */ + __le32 sec; + __le32 nsec; +} __packed; + +/* metric write latency header */ +struct ceph_metric_write_latency { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(sec + nsec) */ + __le32 sec; + __le32 nsec; +} __packed; + +/* metric metadata latency header */ +struct ceph_metric_metadata_latency { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(sec + nsec) */ + __le32 sec; + __le32 nsec; +} __packed; + +struct ceph_metric_head { + __le32 num; /* the number of metrics that will be sent */ +} __packed; + /* This is the global metrics */ struct ceph_client_metric { atomic64_t total_dentries; @@ -36,8 +101,20 @@ struct ceph_client_metric { ktime_t metadata_latency_sq_sum; ktime_t metadata_latency_min; ktime_t metadata_latency_max; + + struct ceph_mds_session *session; + struct delayed_work delayed_work; /* delayed work */ }; +static inline void metric_schedule_delayed(struct ceph_client_metric *m) +{ + if (disable_send_metrics) + return; + + /* per second */ + schedule_delayed_work(&m->delayed_work, round_jiffies_relative(HZ)); +} + extern int ceph_metric_init(struct ceph_client_metric *m); extern void ceph_metric_destroy(struct ceph_client_metric *m); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index c9784eb1159a..933f5df5da7d 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -27,6 +27,9 @@ #include #include +static DEFINE_SPINLOCK(ceph_fsc_lock); +static LIST_HEAD(ceph_fsc_list); + /* * Ceph superblock operations * @@ -691,6 +694,10 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, if (!fsc->wb_pagevec_pool) goto fail_cap_wq; + spin_lock(&ceph_fsc_lock); + list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list); + spin_unlock(&ceph_fsc_lock); + return fsc; fail_cap_wq: @@ -717,6 +724,10 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) { dout("destroy_fs_client %p\n", fsc); + spin_lock(&ceph_fsc_lock); + list_del(&fsc->metric_wakeup); + spin_unlock(&ceph_fsc_lock); + ceph_mdsc_destroy(fsc); destroy_workqueue(fsc->inode_wq); destroy_workqueue(fsc->cap_wq); @@ -1282,6 +1293,37 @@ static void __exit exit_ceph(void) destroy_caches(); } +static int param_set_metrics(const char *val, const struct kernel_param *kp) +{ + struct ceph_fs_client *fsc; + int ret; + + ret = param_set_bool(val, kp); + if (ret) { + pr_err("Failed to parse sending metrics switch value '%s'\n", + val); + return ret; + } else if (!disable_send_metrics) { + // wake up all the mds clients + spin_lock(&ceph_fsc_lock); + list_for_each_entry(fsc, &ceph_fsc_list, metric_wakeup) { + metric_schedule_delayed(&fsc->mdsc->metric); + } + spin_unlock(&ceph_fsc_lock); + } + + return 0; +} + +static const struct kernel_param_ops param_ops_metrics = { + .set = param_set_metrics, + .get = param_get_bool, +}; + +bool disable_send_metrics = false; +module_param_cb(disable_send_metrics, ¶m_ops_metrics, &disable_send_metrics, 0644); +MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)"); + module_init(init_ceph); module_exit(exit_ceph); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 5a6cdd39bc10..2dcb6a90c636 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -101,6 +101,8 @@ struct ceph_mount_options { struct ceph_fs_client { struct super_block *sb; + struct list_head metric_wakeup; + struct ceph_mount_options *mount_options; struct ceph_client *client; diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index ebf5ba62b772..455e9b9e2adf 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -130,6 +130,7 @@ struct ceph_dir_layout { #define CEPH_MSG_CLIENT_REQUEST 24 #define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 #define CEPH_MSG_CLIENT_REPLY 26 +#define CEPH_MSG_CLIENT_METRICS 29 #define CEPH_MSG_CLIENT_CAPS 0x310 #define CEPH_MSG_CLIENT_LEASE 0x311 #define CEPH_MSG_CLIENT_SNAP 0x312 -- cgit v1.2.3 From f1f565a26976612121f97464f9245307422d0ce8 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:36:04 -0700 Subject: ceph: delete repeated words in fs/ceph/ Drop duplicated words "down" and "the" in fs/ceph/. [ idryomov: merge into a single patch ] Signed-off-by: Randy Dunlap Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/super.c | 2 +- fs/ceph/super.h | 2 +- include/linux/ceph/ceph_features.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 933f5df5da7d..585aecea5cad 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -839,7 +839,7 @@ static void destroy_caches(void) } /* - * ceph_umount_begin - initiate forced umount. Tear down down the + * ceph_umount_begin - initiate forced umount. Tear down the * mount, skipping steps that may hang while waiting for server(s). */ static void ceph_umount_begin(struct super_block *sb) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 2dcb6a90c636..9001a896ae8c 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -355,7 +355,7 @@ struct ceph_inode_info { unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */ /* - * Link to the the auth cap's session's s_cap_dirty list. s_cap_dirty + * Link to the auth cap's session's s_cap_dirty list. s_cap_dirty * is protected by the mdsc->cap_dirty_lock, but each individual item * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty * requires the mdsc->cap_dirty_lock. List presence for an item can diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 39e6f4c57580..fcd84e8d88f4 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -58,7 +58,7 @@ * because 10.2.z (jewel) did not care if its peers advertised this * feature bit. * - * - In the second phase we stop advertising the the bit and call it + * - In the second phase we stop advertising the bit and call it * RETIRED. This can normally be done in the *next* major release * following the one in which we marked the feature DEPRECATED. In * the above example, for 12.0.z (luminous) we can say: -- cgit v1.2.3 From 73f9407b3eb893bc8a82293cc8d4dfa3db079c0b Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 3 Aug 2020 10:33:04 +0300 Subject: netfilter: conntrack: Move nf_ct_offload_timeout to header file To be used by callers from other modules. [ Rename DAY to NF_CT_DAY to avoid possible symbol name pollution issue --Pablo ] Signed-off-by: Roi Dayan Reviewed-by: Oz Shlomo Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 12 ++++++++++++ net/netfilter/nf_conntrack_core.c | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 90690e37a56f..c7bfddfc65b0 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -279,6 +279,18 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct) !nf_ct_is_dying(ct); } +#define NF_CT_DAY (86400 * HZ) + +/* Set an arbitrary timeout large enough not to ever expire, this save + * us a check for the IPS_OFFLOAD_BIT from the packet path via + * nf_ct_is_expired(). + */ +static inline void nf_ct_offload_timeout(struct nf_conn *ct) +{ + if (nf_ct_expires(ct) < NF_CT_DAY / 2) + ct->timeout = nfct_time_stamp + NF_CT_DAY; +} + struct kernel_param; int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f33d72c5b06e..c3cea50d1bcb 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1344,18 +1344,6 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct) return false; } -#define DAY (86400 * HZ) - -/* Set an arbitrary timeout large enough not to ever expire, this save - * us a check for the IPS_OFFLOAD_BIT from the packet path via - * nf_ct_is_expired(). - */ -static void nf_ct_offload_timeout(struct nf_conn *ct) -{ - if (nf_ct_expires(ct) < DAY / 2) - ct->timeout = nfct_time_stamp + DAY; -} - static void gc_worker(struct work_struct *work) { unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); -- cgit v1.2.3 From 54212f5a1ba3123281877e54c1e5f672bf7563d8 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Mon, 3 Aug 2020 13:20:06 +0200 Subject: leds: add RGB color option, as that is different from multicolor. Multicolor is a bit too abstract. Yes, we can have Green-Magenta-Ultraviolet LED, but so far all the LEDs we support are RGB, and not even RGB-White or RGB-Yellow variants emerged. Multicolor is not a good fit for RGB LED. It does not really know about LED color. In particular, there's no way to make LED "white". Userspace is interested in knowing "this LED can produce arbitrary color", which not all multicolor LEDs can. Signed-off-by: Pavel Machek --- drivers/leds/led-core.c | 1 + drivers/leds/leds-lp55xx-common.c | 2 +- include/dt-bindings/leds/common.h | 6 ++++-- 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 846248a0693d..a6dce01dbd5e 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -35,6 +35,7 @@ const char * const led_colors[LED_COLOR_ID_MAX] = { [LED_COLOR_ID_YELLOW] = "yellow", [LED_COLOR_ID_IR] = "ir", [LED_COLOR_ID_MULTI] = "multicolor", + [LED_COLOR_ID_RGB] = "rgb", }; EXPORT_SYMBOL_GPL(led_colors); diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index af14e2b2d577..56210f4ad919 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -638,7 +638,7 @@ static int lp55xx_parse_logical_led(struct device_node *np, if (ret) return ret; - if (led_color == LED_COLOR_ID_MULTI) + if (led_color == LED_COLOR_ID_RGB) return lp55xx_parse_multi_led(np, cfg, child_number); ret = lp55xx_parse_common_child(np, cfg, child_number, &chan_nr); diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h index a463ce6a8794..52b619d44ba2 100644 --- a/include/dt-bindings/leds/common.h +++ b/include/dt-bindings/leds/common.h @@ -30,8 +30,10 @@ #define LED_COLOR_ID_VIOLET 5 #define LED_COLOR_ID_YELLOW 6 #define LED_COLOR_ID_IR 7 -#define LED_COLOR_ID_MULTI 8 -#define LED_COLOR_ID_MAX 9 +#define LED_COLOR_ID_MULTI 8 /* For multicolor LEDs */ +#define LED_COLOR_ID_RGB 9 /* For multicolor LEDs that can do arbitrary color, + so this would include RGBW and similar */ +#define LED_COLOR_ID_MAX 10 /* Standard LED functions */ /* Keyboard LEDs, usually it would be input4::capslock etc. */ -- cgit v1.2.3 From c9dff08485a6c11449307abde1d7bb404fcee481 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Thu, 9 Jul 2020 14:49:30 +0200 Subject: fs: Fix typo in comment The comment for function filemap_check_wb_err accidentally refers to it as filemap_check_wb_error. Signed-off-by: Andreas Gruenbacher --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index f5abba86107d..f9ae45795c1a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2798,7 +2798,7 @@ static inline void filemap_set_wb_err(struct address_space *mapping, int err) } /** - * filemap_check_wb_error - has an error occurred since the mark was sampled? + * filemap_check_wb_err - has an error occurred since the mark was sampled? * @mapping: mapping to check for writeback errors * @since: previously-sampled errseq_t * -- cgit v1.2.3 From 966e50597666d530b69de2abb9c83ff0a9bd3ee6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 28 Jul 2020 14:47:58 -0700 Subject: udp_tunnel: add the ability to hard-code IANA VXLAN mlx5 has the IANA VXLAN port (4789) hard coded by the device, instead of being added dynamically when tunnels are created. To support this add a workaround flag to struct udp_tunnel_nic_info. Skipping updates for the port is fairly trivial, dumping the hard coded port via ethtool requires some code duplication. The port is not a part of any real table, we dump it in a special table which has no tunnel types supported and only one entry. This is the last known workaround / hack needed to convert all drivers to the new infra. Signed-off-by: Jakub Kicinski Signed-off-by: Saeed Mahameed --- Documentation/networking/ethtool-netlink.rst | 3 ++ include/net/udp_tunnel.h | 5 ++ net/ethtool/tunnels.c | 69 ++++++++++++++++++++++++---- net/ipv4/udp_tunnel_nic.c | 7 +++ 4 files changed, 76 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 7d75f1e32152..d53bcb31645a 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -1263,6 +1263,9 @@ Kernel response contents: | | | | ``ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE`` | u32 | tunnel type | +-+-+-+---------------------------------------+--------+---------------------+ +For UDP tunnel table empty ``ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES`` indicates that +the table contains static entries, hard-coded by the NIC. + Request translation =================== diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index dd20ce99740c..94bb7a882250 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -193,6 +193,11 @@ enum udp_tunnel_nic_info_flags { UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1), /* Device supports only IPv4 tunnels */ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2), + /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN. + * This port must not be counted towards n_entries of any table. + * Driver will not receive any callback associated with port 4789. + */ + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3), }; /** diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c index 6b89255f1231..84f23289475b 100644 --- a/net/ethtool/tunnels.c +++ b/net/ethtool/tunnels.c @@ -2,6 +2,7 @@ #include #include +#include #include "bitset.h" #include "common.h" @@ -18,6 +19,20 @@ static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE)); static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE == ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE)); +static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact) +{ + ssize_t size; + + size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact); + if (size < 0) + return size; + + return size + + nla_total_size(0) + /* _UDP_TABLE */ + nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */ +} + static ssize_t ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, struct netlink_ext_ack *extack) @@ -25,8 +40,8 @@ ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; const struct udp_tunnel_nic_info *info; unsigned int i; + ssize_t ret; size_t size; - int ret; info = req_base->dev->udp_tunnel_nic_info; if (!info) { @@ -39,13 +54,10 @@ ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { if (!info->tables[i].n_entries) - return size; + break; - size += nla_total_size(0); /* _UDP_TABLE */ - size += nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */ - ret = ethnl_bitset32_size(&info->tables[i].tunnel_types, NULL, - __ETHTOOL_UDP_TUNNEL_TYPE_CNT, - udp_tunnel_type_names, compact); + ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types, + compact); if (ret < 0) return ret; size += ret; @@ -53,6 +65,17 @@ ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, size += udp_tunnel_nic_dump_size(req_base->dev, i); } + if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) { + ret = ethnl_udp_table_reply_size(0, compact); + if (ret < 0) + return ret; + size += ret; + + size += nla_total_size(0) + /* _TABLE_ENTRY */ + nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */ + nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */ + } + return size; } @@ -62,7 +85,7 @@ ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base, { bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; const struct udp_tunnel_nic_info *info; - struct nlattr *ports, *table; + struct nlattr *ports, *table, *entry; unsigned int i; info = req_base->dev->udp_tunnel_nic_info; @@ -97,10 +120,40 @@ ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base, nla_nest_end(skb, table); } + if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) { + u32 zero = 0; + + table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE); + if (!table) + goto err_cancel_ports; + + if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1)) + goto err_cancel_table; + + if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, + &zero, NULL, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact)) + goto err_cancel_table; + + entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY); + + if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, + htons(IANA_VXLAN_UDP_PORT)) || + nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, + ilog2(UDP_TUNNEL_TYPE_VXLAN))) + goto err_cancel_entry; + + nla_nest_end(skb, entry); + nla_nest_end(skb, table); + } + nla_nest_end(skb, ports); return 0; +err_cancel_entry: + nla_nest_cancel(skb, entry); err_cancel_table: nla_nest_cancel(skb, table); err_cancel_ports: diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c index f0dbd9905a53..69962165c0e8 100644 --- a/net/ipv4/udp_tunnel_nic.c +++ b/net/ipv4/udp_tunnel_nic.c @@ -7,6 +7,7 @@ #include #include #include +#include enum udp_tunnel_nic_table_entry_flags { UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0), @@ -504,6 +505,12 @@ __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) return; if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY) return; + if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN && + ti->port == htons(IANA_VXLAN_UDP_PORT)) { + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + netdev_warn(dev, "device assumes port 4789 will be used by vxlan tunnels\n"); + return; + } if (!udp_tunnel_nic_is_capable(dev, utn, ti)) return; -- cgit v1.2.3 From f3751ad0116fb6881f2c3c957d66a9327f69cefb Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Thu, 30 Jul 2020 15:45:54 -0700 Subject: tracepoint: Mark __tracepoint_string's __used __tracepoint_string's have their string data stored in .rodata, and an address to that data stored in the "__tracepoint_str" section. Functions that refer to those strings refer to the symbol of the address. Compiler optimization can replace those address references with references directly to the string data. If the address doesn't appear to have other uses, then it appears dead to the compiler and is removed. This can break the /tracing/printk_formats sysfs node which iterates the addresses stored in the "__tracepoint_str" section. Like other strings stored in custom sections in this header, mark these __used to inform the compiler that there are other non-obvious users of the address, so they should still be emitted. Link: https://lkml.kernel.org/r/20200730224555.2142154-2-ndesaulniers@google.com Cc: Ingo Molnar Cc: Miguel Ojeda Cc: stable@vger.kernel.org Fixes: 102c9323c35a8 ("tracing: Add __tracepoint_string() to export string pointers") Reported-by: Tim Murray Reported-by: Simon MacMullen Suggested-by: Greg Hackmann Signed-off-by: Nick Desaulniers Signed-off-by: Steven Rostedt (VMware) --- include/linux/tracepoint.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index a1fecf311621..3a5b717d92e8 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -361,7 +361,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) -#define __tracepoint_string __attribute__((section("__tracepoint_str"))) +#define __tracepoint_string __attribute__((section("__tracepoint_str"), used)) #else /* * tracepoint_string() is used to save the string address for userspace -- cgit v1.2.3 From 1c39d761ff5c90227fc582c45018d5922efaa253 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Thu, 30 Jul 2020 15:45:55 -0700 Subject: tracepoint: Use __used attribute definitions from compiler_attributes.h Just a small cleanup while I was touching this header. compiler_attributes.h does feature detection of these __attributes__(()) and provides more concise ways to invoke them. Link: https://lkml.kernel.org/r/20200730224555.2142154-3-ndesaulniers@google.com Acked-by: Miguel Ojeda Signed-off-by: Nick Desaulniers Signed-off-by: Steven Rostedt (VMware) --- include/linux/tracepoint.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 3a5b717d92e8..598fec9f9dbf 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -116,8 +116,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define __TRACEPOINT_ENTRY(name) \ static tracepoint_ptr_t __tracepoint_ptr_##name __used \ - __attribute__((section("__tracepoints_ptrs"))) = \ - &__tracepoint_##name + __section(__tracepoints_ptrs) = &__tracepoint_##name #endif #endif /* _LINUX_TRACEPOINT_H */ @@ -280,9 +279,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) */ #define DEFINE_TRACE_FN(name, reg, unreg) \ static const char __tpstrtab_##name[] \ - __attribute__((section("__tracepoints_strings"))) = #name; \ - struct tracepoint __tracepoint_##name \ - __attribute__((section("__tracepoints"), used)) = \ + __section(__tracepoints_strings) = #name; \ + struct tracepoint __tracepoint_##name __used \ + __section(__tracepoints) = \ { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ __TRACEPOINT_ENTRY(name); @@ -361,7 +360,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) -#define __tracepoint_string __attribute__((section("__tracepoint_str"), used)) +#define __tracepoint_string __used __section(__tracepoint_str) #else /* * tracepoint_string() is used to save the string address for userspace -- cgit v1.2.3 From 321bd212619a7269308696e4ddc446930ea73fad Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 24 Jun 2020 18:24:33 -0400 Subject: virtio: VIRTIO_F_IOMMU_PLATFORM -> VIRTIO_F_ACCESS_PLATFORM Rename the bit to match latest virtio spec. Add a compat macro to avoid breaking existing userspace. Signed-off-by: Michael S. Tsirkin Reviewed-by: David Hildenbrand --- arch/um/drivers/virtio_uml.c | 2 +- drivers/vdpa/ifcvf/ifcvf_base.h | 2 +- drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 ++-- drivers/vhost/net.c | 4 ++-- drivers/vhost/vdpa.c | 2 +- drivers/virtio/virtio_balloon.c | 2 +- drivers/virtio/virtio_ring.c | 2 +- include/linux/virtio_config.h | 2 +- include/uapi/linux/virtio_config.h | 10 +++++++--- tools/virtio/linux/virtio_config.h | 2 +- 10 files changed, 18 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index 351aee52aca6..a6c4bb6c2c01 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -385,7 +385,7 @@ static irqreturn_t vu_req_interrupt(int irq, void *data) } break; case VHOST_USER_SLAVE_IOTLB_MSG: - /* not supported - VIRTIO_F_IOMMU_PLATFORM */ + /* not supported - VIRTIO_F_ACCESS_PLATFORM */ case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */ default: diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index f4554412e607..24af422b5a3e 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -29,7 +29,7 @@ (1ULL << VIRTIO_F_VERSION_1) | \ (1ULL << VIRTIO_NET_F_STATUS) | \ (1ULL << VIRTIO_F_ORDER_PLATFORM) | \ - (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \ + (1ULL << VIRTIO_F_ACCESS_PLATFORM) | \ (1ULL << VIRTIO_NET_F_MRG_RXBUF)) /* Only one queue pair for now. */ diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index c7334cc65bb2..a9bc5e0fb353 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -55,7 +55,7 @@ struct vdpasim_virtqueue { static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) | - (1ULL << VIRTIO_F_IOMMU_PLATFORM); + (1ULL << VIRTIO_F_ACCESS_PLATFORM); /* State of each vdpasim device */ struct vdpasim { @@ -450,7 +450,7 @@ static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features) struct vdpasim *vdpasim = vdpa_to_sim(vdpa); /* DMA mapping must be done by driver */ - if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) + if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) return -EINVAL; vdpasim->features = features & vdpasim_features; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index e992decfec53..8e0921d3805d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -73,7 +73,7 @@ enum { VHOST_NET_FEATURES = VHOST_FEATURES | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) | - (1ULL << VIRTIO_F_IOMMU_PLATFORM) + (1ULL << VIRTIO_F_ACCESS_PLATFORM) }; enum { @@ -1653,7 +1653,7 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) !vhost_log_access_ok(&n->dev)) goto out_unlock; - if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) { + if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) { if (vhost_init_device_iotlb(&n->dev, true)) goto out_unlock; } diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index a54b60d6623f..18869a35d408 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -31,7 +31,7 @@ enum { (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | (1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) | - (1ULL << VIRTIO_F_IOMMU_PLATFORM) | + (1ULL << VIRTIO_F_ACCESS_PLATFORM) | (1ULL << VIRTIO_F_RING_PACKED) | (1ULL << VIRTIO_F_ORDER_PLATFORM) | (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 8be02f333b7a..54fd989f9353 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -1129,7 +1129,7 @@ static int virtballoon_validate(struct virtio_device *vdev) else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING); - __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); + __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM); return 0; } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 58b96baa8d48..a1a5c2a91426 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2225,7 +2225,7 @@ void vring_transport_features(struct virtio_device *vdev) break; case VIRTIO_F_VERSION_1: break; - case VIRTIO_F_IOMMU_PLATFORM: + case VIRTIO_F_ACCESS_PLATFORM: break; case VIRTIO_F_RING_PACKED: break; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index bb4cc4910750..f2cc2a0df174 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -171,7 +171,7 @@ static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) * Note the reverse polarity of the quirk feature (compared to most * other features), this is for compatibility with legacy systems. */ - return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM); } static inline diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index ff8e7dc9d4dd..b5eda06f0d57 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -67,13 +67,17 @@ #define VIRTIO_F_VERSION_1 32 /* - * If clear - device has the IOMMU bypass quirk feature. - * If set - use platform tools to detect the IOMMU. + * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature. + * If set - use platform DMA tools to access the memory. * * Note the reverse polarity (compared to most other features), * this is for compatibility with legacy systems. */ -#define VIRTIO_F_IOMMU_PLATFORM 33 +#define VIRTIO_F_ACCESS_PLATFORM 33 +#ifndef __KERNEL__ +/* Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) */ +#define VIRTIO_F_IOMMU_PLATFORM VIRTIO_F_ACCESS_PLATFORM +#endif /* __KERNEL__ */ /* This feature indicates support for the packed virtqueue layout. */ #define VIRTIO_F_RING_PACKED 34 diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h index dbf14c1e2188..f99ae42668e0 100644 --- a/tools/virtio/linux/virtio_config.h +++ b/tools/virtio/linux/virtio_config.h @@ -51,7 +51,7 @@ static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) * Note the reverse polarity of the quirk feature (compared to most * other features), this is for compatibility with legacy systems. */ - return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM); } static inline bool virtio_is_little_endian(struct virtio_device *vdev) -- cgit v1.2.3 From 24b6842ade6925199e182988259761504aacfbc0 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 24 Jun 2020 19:17:04 -0400 Subject: virtio: virtio_has_iommu_quirk -> virtio_has_dma_quirk Now that the corresponding feature bit has been renamed, rename the quirk too - it's about special ways to do DMA, not necessarily about the IOMMU. Signed-off-by: Michael S. Tsirkin --- drivers/gpu/drm/virtio/virtgpu_object.c | 2 +- drivers/gpu/drm/virtio/virtgpu_vq.c | 4 ++-- drivers/virtio/virtio_ring.c | 2 +- include/linux/virtio_config.h | 4 ++-- tools/virtio/linux/virtio_config.h | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 6ccbd01cd888..e8799ab0c753 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -141,7 +141,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, struct virtio_gpu_mem_entry **ents, unsigned int *nents) { - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); struct scatterlist *sg; int si, ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 9e663a5d9952..53af60d484a4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -599,7 +599,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) @@ -1015,7 +1015,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index a1a5c2a91426..34253cb69cb8 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -240,7 +240,7 @@ static inline bool virtqueue_use_indirect(struct virtqueue *_vq, static bool vring_use_dma_api(struct virtio_device *vdev) { - if (!virtio_has_iommu_quirk(vdev)) + if (!virtio_has_dma_quirk(vdev)) return true; /* Otherwise, we are left to guess. */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index f2cc2a0df174..3b4eae5ac5e3 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -162,10 +162,10 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, } /** - * virtio_has_iommu_quirk - determine whether this device has the iommu quirk + * virtio_has_dma_quirk - determine whether this device has the DMA quirk * @vdev: the device */ -static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) +static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) { /* * Note the reverse polarity of the quirk feature (compared to most diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h index f99ae42668e0..f2640e505c4e 100644 --- a/tools/virtio/linux/virtio_config.h +++ b/tools/virtio/linux/virtio_config.h @@ -42,10 +42,10 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev, (__virtio_test_bit((dev), feature)) /** - * virtio_has_iommu_quirk - determine whether this device has the iommu quirk + * virtio_has_dma_quirk - determine whether this device has the DMA quirk * @vdev: the device */ -static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) +static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) { /* * Note the reverse polarity of the quirk feature (compared to most -- cgit v1.2.3 From 80a6e707dd9742390776a9306b400b1fbe405b4a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 17 Jul 2020 17:42:55 +0900 Subject: kprobes: Remove show_registers() function prototype Remove show_registers() function prototype because this function has been renamed by commit 57da8b960b9a ("x86: Avoid double stack traces with show_regs()"), and commit 80006dbee674 ("kprobes/x86: Remove jprobe implementation") has removed the caller in kprobes. So this doesn't exist anymore. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- include/linux/kprobes.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 6adf90f248d7..81cb7e00ccdc 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -227,7 +227,6 @@ extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); -extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); extern int arch_populate_kprobe_blacklist(void); -- cgit v1.2.3 From 6cfde88418fe95240e32d2955b51988360ee0942 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 1 Aug 2020 10:53:42 +0200 Subject: clk: drop unused function __clk_get_flags The function __clk_get_flags has not been used since the April 2019 commit a348f05361c9 ("ARM: omap2+: hwmod: drop CLK_IS_BASIC flag usage"). Other uses were removed in June 2015, eg by commit 98d8a60eccee ("clk: Convert __clk_get_flags() to clk_hw_get_flags()"), which shows how clk_hw_get_flags can easily be used instead. Signed-off-by: Julia Lawall Link: https://lore.kernel.org/r/1596272022-14173-1-git-send-email-Julia.Lawall@inria.fr Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 6 ------ include/linux/clk-provider.h | 1 - 2 files changed, 7 deletions(-) (limited to 'include') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 3f588ed06ce3..d71456aad11d 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -500,12 +500,6 @@ static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) return core->accuracy; } -unsigned long __clk_get_flags(struct clk *clk) -{ - return !clk ? 0 : clk->core->flags; -} -EXPORT_SYMBOL_GPL(__clk_get_flags); - unsigned long clk_hw_get_flags(const struct clk_hw *hw) { return hw->core->flags; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index bd1ee9039558..93a78a5256d1 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -1096,7 +1096,6 @@ int clk_hw_get_parent_index(struct clk_hw *hw); int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent); unsigned int __clk_get_enable_count(struct clk *clk); unsigned long clk_hw_get_rate(const struct clk_hw *hw); -unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); #define clk_hw_can_set_rate_parent(hw) \ (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) -- cgit v1.2.3 From bb3831294cd50750806f2ce8d73317dc8feeda09 Mon Sep 17 00:00:00 2001 From: Bruno Thomsen Date: Thu, 30 Jul 2020 21:57:48 +0200 Subject: net: mdiobus: add reset-post-delay-us handling Load new "reset-post-delay-us" value from MDIO properties, and if configured to a greater then zero delay do a flexible sleeping delay after MDIO bus reset deassert. This allows devices to exit reset state before start bus communication. Signed-off-by: Bruno Thomsen Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 2 ++ drivers/of/of_mdio.c | 2 ++ include/linux/phy.h | 2 ++ 3 files changed, 6 insertions(+) (limited to 'include') diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 5df3782b05b4..0af20faad69d 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -556,6 +556,8 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) gpiod_set_value_cansleep(gpiod, 1); fsleep(bus->reset_delay_us); gpiod_set_value_cansleep(gpiod, 0); + if (bus->reset_post_delay_us > 0) + fsleep(bus->reset_post_delay_us); } if (bus->reset) { diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index eb84507de28a..cb32d7ef4938 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -268,6 +268,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) /* Get bus level PHY reset GPIO details */ mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; of_property_read_u32(np, "reset-delay-us", &mdio->reset_delay_us); + mdio->reset_post_delay_us = 0; + of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us); /* Register the MDIO bus */ rc = mdiobus_register(mdio); diff --git a/include/linux/phy.h b/include/linux/phy.h index 0403eb799913..3a09d2bf69ea 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -293,6 +293,8 @@ struct mii_bus { /* GPIO reset pulse width in microseconds */ int reset_delay_us; + /* GPIO reset deassert delay in microseconds */ + int reset_post_delay_us; /* RESET GPIO descriptor pointer */ struct gpio_desc *reset_gpiod; -- cgit v1.2.3 From 038ebb1a713d114d54dbf14868a73181c0c92758 Mon Sep 17 00:00:00 2001 From: wenxu Date: Fri, 31 Jul 2020 10:45:01 +0800 Subject: net/sched: act_ct: fix miss set mru for ovs after defrag in act_ct When openvswitch conntrack offload with act_ct action. Fragment packets defrag in the ingress tc act_ct action and miss the next chain. Then the packet pass to the openvswitch datapath without the mru. The over mtu packet will be dropped in output action in openvswitch for over mtu. "kernel: net2: dropped over-mtu packet: 1528 > 1500" This patch add mru in the tc_skb_ext for adefrag and miss next chain situation. And also add mru in the qdisc_skb_cb. The act_ct set the mru to the qdisc_skb_cb when the packet defrag. And When the chain miss, The mru is set to tc_skb_ext which can be got by ovs datapath. Fixes: b57dc7c13ea9 ("net/sched: Introduce action ct") Signed-off-by: wenxu Reviewed-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/net/sch_generic.h | 3 ++- net/openvswitch/flow.c | 1 + net/sched/act_ct.c | 8 ++++++-- net/sched/cls_api.c | 1 + 5 files changed, 11 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fa817a105517..3ad65d4ce085 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -283,6 +283,7 @@ struct nf_bridge_info { */ struct tc_skb_ext { __u32 chain; + __u16 mru; }; #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c510b03b9751..d60e7c39d60c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -384,6 +384,7 @@ struct qdisc_skb_cb { }; #define QDISC_CB_PRIV_LEN 20 unsigned char data[QDISC_CB_PRIV_LEN]; + u16 mru; }; typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); @@ -463,7 +464,7 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; - BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); + BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); BUILD_BUG_ON(sizeof(qcb->data) < sz); } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 9d375e74b607..03942c30d83e 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -890,6 +890,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, if (static_branch_unlikely(&tc_recirc_sharing_support)) { tc_ext = skb_ext_find(skb, TC_SKB_EXT); key->recirc_id = tc_ext ? tc_ext->chain : 0; + OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0; } else { key->recirc_id = 0; } diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 97e27946897f..e6ad42b11835 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -706,8 +706,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, if (err && err != -EINPROGRESS) goto out_free; - if (!err) + if (!err) { *defrag = true; + cb.mru = IPCB(skb)->frag_max_size; + } } else { /* NFPROTO_IPV6 */ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; @@ -717,8 +719,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, if (err && err != -EINPROGRESS) goto out_free; - if (!err) + if (!err) { *defrag = true; + cb.mru = IP6CB(skb)->frag_max_size; + } #else err = -EOPNOTSUPP; goto out_free; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0b8623b3b24f..41a55c6cbeb8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1629,6 +1629,7 @@ int tcf_classify_ingress(struct sk_buff *skb, if (WARN_ON_ONCE(!ext)) return TC_ACT_SHOT; ext->chain = last_executed_chain; + ext->mru = qdisc_skb_cb(skb)->mru; } return ret; -- cgit v1.2.3 From 9d2f627b7ec9d5d3246b6cec17f290ee6778c83b Mon Sep 17 00:00:00 2001 From: Eelco Chaudron Date: Fri, 31 Jul 2020 14:20:56 +0200 Subject: net: openvswitch: add masks cache hit counter Add a counter that counts the number of masks cache hits, and export it through the megaflow netlink statistics. Reviewed-by: Paolo Abeni Reviewed-by: Tonghao Zhang Signed-off-by: Eelco Chaudron Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 2 +- net/openvswitch/datapath.c | 5 ++++- net/openvswitch/datapath.h | 3 +++ net/openvswitch/flow_table.c | 19 ++++++++++++++----- net/openvswitch/flow_table.h | 3 ++- 5 files changed, 24 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 9b14519e74d9..7cb76e5ca7cf 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -102,8 +102,8 @@ struct ovs_dp_megaflow_stats { __u64 n_mask_hit; /* Number of masks used for flow lookups. */ __u32 n_masks; /* Number of masks for the datapath. */ __u32 pad0; /* Pad for future expension. */ + __u64 n_cache_hit; /* Number of cache matches for flow lookups. */ __u64 pad1; /* Pad for future expension. */ - __u64 pad2; /* Pad for future expension. */ }; struct ovs_vport_stats { diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 6b6822f82f70..f45fee760504 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) struct dp_stats_percpu *stats; u64 *stats_counter; u32 n_mask_hit; + u32 n_cache_hit; int error; stats = this_cpu_ptr(dp->stats_percpu); /* Look up flow. */ flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb), - &n_mask_hit); + &n_mask_hit, &n_cache_hit); if (unlikely(!flow)) { struct dp_upcall_info upcall; @@ -262,6 +263,7 @@ out: u64_stats_update_begin(&stats->syncp); (*stats_counter)++; stats->n_mask_hit += n_mask_hit; + stats->n_cache_hit += n_cache_hit; u64_stats_update_end(&stats->syncp); } @@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, stats->n_missed += local_stats.n_missed; stats->n_lost += local_stats.n_lost; mega_stats->n_mask_hit += local_stats.n_mask_hit; + mega_stats->n_cache_hit += local_stats.n_cache_hit; } } diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 24fcec22fde2..38f7d3e66ca6 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -38,12 +38,15 @@ * @n_mask_hit: Number of masks looked up for flow match. * @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked * up per packet. + * @n_cache_hit: The number of received packets that had their mask found using + * the mask cache. */ struct dp_stats_percpu { u64 n_hit; u64 n_missed; u64 n_lost; u64 n_mask_hit; + u64 n_cache_hit; struct u64_stats_sync syncp; }; diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index af22c9ee28dd..a5912ea05352 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -667,6 +667,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, struct mask_array *ma, const struct sw_flow_key *key, u32 *n_mask_hit, + u32 *n_cache_hit, u32 *index) { u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); @@ -682,6 +683,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, u64_stats_update_begin(&ma->syncp); usage_counters[*index]++; u64_stats_update_end(&ma->syncp); + (*n_cache_hit)++; return flow; } } @@ -719,7 +721,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, const struct sw_flow_key *key, u32 skb_hash, - u32 *n_mask_hit) + u32 *n_mask_hit, + u32 *n_cache_hit) { struct mask_array *ma = rcu_dereference(tbl->mask_array); struct table_instance *ti = rcu_dereference(tbl->ti); @@ -729,10 +732,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, int seg; *n_mask_hit = 0; + *n_cache_hit = 0; if (unlikely(!skb_hash)) { u32 mask_index = 0; + u32 cache = 0; - return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); + return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache, + &mask_index); } /* Pre and post recirulation flows usually have the same skb_hash @@ -753,7 +759,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, e = &entries[index]; if (e->skb_hash == skb_hash) { flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, - &e->mask_index); + n_cache_hit, &e->mask_index); if (!flow) e->skb_hash = 0; return flow; @@ -766,10 +772,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, } /* Cache miss, do full lookup. */ - flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index); + flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit, + &ce->mask_index); if (flow) ce->skb_hash = skb_hash; + *n_cache_hit = 0; return flow; } @@ -779,9 +787,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); u32 __always_unused n_mask_hit; + u32 __always_unused n_cache_hit; u32 index = 0; - return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index); + return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); } struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 1f664b050e3b..325e939371d8 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -82,7 +82,8 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, const struct sw_flow_key *, u32 skb_hash, - u32 *n_mask_hit); + u32 *n_mask_hit, + u32 *n_cache_hit); struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, const struct sw_flow_key *); struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, -- cgit v1.2.3 From 9bf24f594c6acf676fb8c229f152c21bfb915ddb Mon Sep 17 00:00:00 2001 From: Eelco Chaudron Date: Fri, 31 Jul 2020 14:21:34 +0200 Subject: net: openvswitch: make masks cache size configurable This patch makes the masks cache size configurable, or with a size of 0, disable it. Reviewed-by: Paolo Abeni Reviewed-by: Tonghao Zhang Signed-off-by: Eelco Chaudron Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 1 + net/openvswitch/datapath.c | 17 +++++++ net/openvswitch/flow_table.c | 101 ++++++++++++++++++++++++++++++++++----- net/openvswitch/flow_table.h | 10 +++- 4 files changed, 115 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 7cb76e5ca7cf..8300cc29dec8 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -86,6 +86,7 @@ enum ovs_datapath_attr { OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */ OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */ OVS_DP_ATTR_PAD, + OVS_DP_ATTR_MASKS_CACHE_SIZE, __OVS_DP_ATTR_MAX }; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f45fee760504..42f8cc70bb2c 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1498,6 +1498,7 @@ static size_t ovs_dp_cmd_msg_size(void) msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats)); msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats)); msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ + msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */ return msgsize; } @@ -1535,6 +1536,10 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features)) goto nla_put_failure; + if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE, + ovs_flow_tbl_masks_cache_size(&dp->table))) + goto nla_put_failure; + genlmsg_end(skb, ovs_header); return 0; @@ -1599,6 +1604,16 @@ static int ovs_dp_change(struct datapath *dp, struct nlattr *a[]) #endif } + if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) { + int err; + u32 cache_size; + + cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]); + err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size); + if (err) + return err; + } + dp->user_features = user_features; if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) @@ -1887,6 +1902,8 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, + [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0, + PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)), }; static const struct genl_ops dp_datapath_genl_ops[] = { diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index a5912ea05352..6527d84c3ea6 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -38,8 +38,8 @@ #define MASK_ARRAY_SIZE_MIN 16 #define REHASH_INTERVAL (10 * 60 * HZ) +#define MC_DEFAULT_HASH_ENTRIES 256 #define MC_HASH_SHIFT 8 -#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT) #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT) static struct kmem_cache *flow_cache; @@ -341,15 +341,79 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) } } +static void __mask_cache_destroy(struct mask_cache *mc) +{ + free_percpu(mc->mask_cache); + kfree(mc); +} + +static void mask_cache_rcu_cb(struct rcu_head *rcu) +{ + struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu); + + __mask_cache_destroy(mc); +} + +static struct mask_cache *tbl_mask_cache_alloc(u32 size) +{ + struct mask_cache_entry __percpu *cache = NULL; + struct mask_cache *new; + + /* Only allow size to be 0, or a power of 2, and does not exceed + * percpu allocation size. + */ + if ((!is_power_of_2(size) && size != 0) || + (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) + return NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + new->cache_size = size; + if (new->cache_size > 0) { + cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry), + new->cache_size), + __alignof__(struct mask_cache_entry)); + if (!cache) { + kfree(new); + return NULL; + } + } + + new->mask_cache = cache; + return new; +} +int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size) +{ + struct mask_cache *mc = rcu_dereference(table->mask_cache); + struct mask_cache *new; + + if (size == mc->cache_size) + return 0; + + if ((!is_power_of_2(size) && size != 0) || + (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) + return -EINVAL; + + new = tbl_mask_cache_alloc(size); + if (!new) + return -ENOMEM; + + rcu_assign_pointer(table->mask_cache, new); + call_rcu(&mc->rcu, mask_cache_rcu_cb); + + return 0; +} + int ovs_flow_tbl_init(struct flow_table *table) { struct table_instance *ti, *ufid_ti; + struct mask_cache *mc; struct mask_array *ma; - table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) * - MC_HASH_ENTRIES, - __alignof__(struct mask_cache_entry)); - if (!table->mask_cache) + mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES); + if (!mc) return -ENOMEM; ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); @@ -367,6 +431,7 @@ int ovs_flow_tbl_init(struct flow_table *table) rcu_assign_pointer(table->ti, ti); rcu_assign_pointer(table->ufid_ti, ufid_ti); rcu_assign_pointer(table->mask_array, ma); + rcu_assign_pointer(table->mask_cache, mc); table->last_rehash = jiffies; table->count = 0; table->ufid_count = 0; @@ -377,7 +442,7 @@ free_ti: free_mask_array: __mask_array_destroy(ma); free_mask_cache: - free_percpu(table->mask_cache); + __mask_cache_destroy(mc); return -ENOMEM; } @@ -453,9 +518,11 @@ void ovs_flow_tbl_destroy(struct flow_table *table) { struct table_instance *ti = rcu_dereference_raw(table->ti); struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); + struct mask_cache *mc = rcu_dereference(table->mask_cache); + struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); - free_percpu(table->mask_cache); - call_rcu(&table->mask_array->rcu, mask_array_rcu_cb); + call_rcu(&mc->rcu, mask_cache_rcu_cb); + call_rcu(&ma->rcu, mask_array_rcu_cb); table_instance_destroy(table, ti, ufid_ti, false); } @@ -724,6 +791,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, u32 *n_mask_hit, u32 *n_cache_hit) { + struct mask_cache *mc = rcu_dereference(tbl->mask_cache); struct mask_array *ma = rcu_dereference(tbl->mask_array); struct table_instance *ti = rcu_dereference(tbl->ti); struct mask_cache_entry *entries, *ce; @@ -733,7 +801,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, *n_mask_hit = 0; *n_cache_hit = 0; - if (unlikely(!skb_hash)) { + if (unlikely(!skb_hash || mc->cache_size == 0)) { u32 mask_index = 0; u32 cache = 0; @@ -749,11 +817,11 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, ce = NULL; hash = skb_hash; - entries = this_cpu_ptr(tbl->mask_cache); + entries = this_cpu_ptr(mc->mask_cache); /* Find the cache entry 'ce' to operate on. */ for (seg = 0; seg < MC_HASH_SEGS; seg++) { - int index = hash & (MC_HASH_ENTRIES - 1); + int index = hash & (mc->cache_size - 1); struct mask_cache_entry *e; e = &entries[index]; @@ -867,6 +935,13 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table) return READ_ONCE(ma->count); } +u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table) +{ + struct mask_cache *mc = rcu_dereference(table->mask_cache); + + return READ_ONCE(mc->cache_size); +} + static struct table_instance *table_instance_expand(struct table_instance *ti, bool ufid) { @@ -1095,8 +1170,8 @@ void ovs_flow_masks_rebalance(struct flow_table *table) for (i = 0; i < masks_entries; i++) { int index = masks_and_count[i].index; - new->masks[new->count++] = - rcu_dereference_ovsl(ma->masks[index]); + if (ovsl_dereference(ma->masks[index])) + new->masks[new->count++] = ma->masks[index]; } rcu_assign_pointer(table->mask_array, new); diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 325e939371d8..74ce48fecba9 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -27,6 +27,12 @@ struct mask_cache_entry { u32 mask_index; }; +struct mask_cache { + struct rcu_head rcu; + u32 cache_size; /* Must be ^2 value. */ + struct mask_cache_entry __percpu *mask_cache; +}; + struct mask_count { int index; u64 counter; @@ -53,7 +59,7 @@ struct table_instance { struct flow_table { struct table_instance __rcu *ti; struct table_instance __rcu *ufid_ti; - struct mask_cache_entry __percpu *mask_cache; + struct mask_cache __rcu *mask_cache; struct mask_array __rcu *mask_array; unsigned long last_rehash; unsigned int count; @@ -77,6 +83,8 @@ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, const struct sw_flow_mask *mask); void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); int ovs_flow_tbl_num_masks(const struct flow_table *table); +u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table); +int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size); struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, u32 *bucket, u32 *idx); struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, -- cgit v1.2.3 From 5f402bb17533113c21d61c2d4bc4ef4a6fa1c9a5 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Fri, 31 Jul 2020 14:38:36 +0200 Subject: gpio: don't use same lockdep class for all devm_gpiochip_add_data users Commit 959bc7b22bd2 ("gpio: Automatically add lockdep keys") documents in its commits message its intention to "create a unique class key for each driver". It does so by having gpiochip_add_data add in-place the definition of two static lockdep classes for LOCKDEP use. That way, every caller of the macro adds their gpiochip with unique lockdep classes. There are many indirect callers of gpiochip_add_data, however, via use of devm_gpiochip_add_data. devm_gpiochip_add_data has external linkage and all its users will share the same lockdep classes, which probably is not intended. Fix this by replicating the gpio_chip_add_data statics-in-macro for the devm_ version as well. Fixes: 959bc7b22bd2 ("gpio: Automatically add lockdep keys") Signed-off-by: Ahmad Fatoum Reviewed-by: Andy Shevchenko Reviewed-by: Bartosz Golaszewski Link: https://lore.kernel.org/r/20200731123835.8003-1-a.fatoum@pengutronix.de Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib-devres.c | 13 ++++++++----- include/linux/gpio/driver.h | 13 +++++++++++-- 2 files changed, 19 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c index 5c91c4365da1..7dbce4c4ebdf 100644 --- a/drivers/gpio/gpiolib-devres.c +++ b/drivers/gpio/gpiolib-devres.c @@ -487,10 +487,12 @@ static void devm_gpio_chip_release(struct device *dev, void *res) } /** - * devm_gpiochip_add_data() - Resource managed gpiochip_add_data() + * devm_gpiochip_add_data_with_key() - Resource managed gpiochip_add_data_with_key() * @dev: pointer to the device that gpio_chip belongs to. * @gc: the GPIO chip to register * @data: driver-private data associated with this chip + * @lock_key: lockdep class for IRQ lock + * @request_key: lockdep class for IRQ request * * Context: potentially before irqs will work * @@ -501,8 +503,9 @@ static void devm_gpio_chip_release(struct device *dev, void *res) * gc->base is invalid or already associated with a different chip. * Otherwise it returns zero as a success code. */ -int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc, - void *data) +int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data, + struct lock_class_key *lock_key, + struct lock_class_key *request_key) { struct gpio_chip **ptr; int ret; @@ -512,7 +515,7 @@ int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc, if (!ptr) return -ENOMEM; - ret = gpiochip_add_data(gc, data); + ret = gpiochip_add_data_with_key(gc, data, lock_key, request_key); if (ret < 0) { devres_free(ptr); return ret; @@ -523,4 +526,4 @@ int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc, return 0; } -EXPORT_SYMBOL_GPL(devm_gpiochip_add_data); +EXPORT_SYMBOL_GPL(devm_gpiochip_add_data_with_key); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 6e9f1826ecd7..d1cef5c2715c 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -525,8 +525,16 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, gpiochip_add_data_with_key(gc, data, &lock_key, \ &request_key); \ }) +#define devm_gpiochip_add_data(dev, gc, data) ({ \ + static struct lock_class_key lock_key; \ + static struct lock_class_key request_key; \ + devm_gpiochip_add_data_with_key(dev, gc, data, &lock_key, \ + &request_key); \ + }) #else #define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL) +#define devm_gpiochip_add_data(dev, gc, data) \ + devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL) #endif /* CONFIG_LOCKDEP */ static inline int gpiochip_add(struct gpio_chip *gc) @@ -534,8 +542,9 @@ static inline int gpiochip_add(struct gpio_chip *gc) return gpiochip_add_data(gc, NULL); } extern void gpiochip_remove(struct gpio_chip *gc); -extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc, - void *data); +extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data, + struct lock_class_key *lock_key, + struct lock_class_key *request_key); extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *gc, void *data)); -- cgit v1.2.3 From 88fab21c691bb1ff164e540735237a385e3afeaf Mon Sep 17 00:00:00 2001 From: Ioana-Ruxandra Stăncioi Date: Mon, 3 Aug 2020 07:33:33 +0000 Subject: seg6_iptunnel: Refactor seg6_lwt_headroom out of uapi header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactor the function seg6_lwt_headroom out of the seg6_iptunnel.h uapi header, because it is only used in seg6_iptunnel.c. Moreover, it is only used in the kernel code, as indicated by the "#ifdef __KERNEL__". Suggested-by: David Miller Signed-off-by: Ioana-Ruxandra Stăncioi Signed-off-by: David S. Miller --- include/uapi/linux/seg6_iptunnel.h | 21 --------------------- net/ipv6/seg6_iptunnel.c | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h index 09fb608a35ec..eb815e0d0ac3 100644 --- a/include/uapi/linux/seg6_iptunnel.h +++ b/include/uapi/linux/seg6_iptunnel.h @@ -37,25 +37,4 @@ enum { SEG6_IPTUN_MODE_L2ENCAP, }; -#ifdef __KERNEL__ - -static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) -{ - int head = 0; - - switch (tuninfo->mode) { - case SEG6_IPTUN_MODE_INLINE: - break; - case SEG6_IPTUN_MODE_ENCAP: - head = sizeof(struct ipv6hdr); - break; - case SEG6_IPTUN_MODE_L2ENCAP: - return 0; - } - - return ((tuninfo->srh->hdrlen + 1) << 3) + head; -} - -#endif - #endif diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index e0e9f48ab14f..897fa59c47de 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -27,6 +27,23 @@ #include #endif +static size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) +{ + int head = 0; + + switch (tuninfo->mode) { + case SEG6_IPTUN_MODE_INLINE: + break; + case SEG6_IPTUN_MODE_ENCAP: + head = sizeof(struct ipv6hdr); + break; + case SEG6_IPTUN_MODE_L2ENCAP: + return 0; + } + + return ((tuninfo->srh->hdrlen + 1) << 3) + head; +} + struct seg6_lwt { struct dst_cache cache; struct seg6_iptunnel_encap tuninfo[]; -- cgit v1.2.3 From 08e335f6ad35a019f4cb1a74badc2f4bceb63bcf Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 3 Aug 2020 19:11:33 +0300 Subject: devlink: Add early_drop trap Add the packet trap that can report packets that were ECN marked due to RED AQM. Signed-off-by: Amit Cohen Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- Documentation/networking/devlink/devlink-trap.rst | 4 ++++ include/net/devlink.h | 3 +++ net/core/devlink.c | 1 + 3 files changed, 8 insertions(+) (limited to 'include') diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst index 2014307fbe63..7a798352b45d 100644 --- a/Documentation/networking/devlink/devlink-trap.rst +++ b/Documentation/networking/devlink/devlink-trap.rst @@ -405,6 +405,10 @@ be added to the following table: - ``control`` - Traps packets logged during processing of flow action trap (e.g., via tc's trap action) + * - ``early_drop`` + - ``drop`` + - Traps packets dropped due to the RED (Random Early Detection) algorithm + (i.e., early drops) Driver-specific Packet Traps ============================ diff --git a/include/net/devlink.h b/include/net/devlink.h index 0606967cb501..fd3ae0760492 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -703,6 +703,7 @@ enum devlink_trap_generic_id { DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL, DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE, DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP, + DEVLINK_TRAP_GENERIC_ID_EARLY_DROP, /* Add new generic trap IDs above */ __DEVLINK_TRAP_GENERIC_ID_MAX, @@ -891,6 +892,8 @@ enum devlink_trap_group_generic_id { "flow_action_sample" #define DEVLINK_TRAP_GENERIC_NAME_FLOW_ACTION_TRAP \ "flow_action_trap" +#define DEVLINK_TRAP_GENERIC_NAME_EARLY_DROP \ + "early_drop" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \ "l2_drops" diff --git a/net/core/devlink.c b/net/core/devlink.c index 5fdebb7289e9..bde4c29a30bc 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -8801,6 +8801,7 @@ static const struct devlink_trap devlink_trap_generic[] = { DEVLINK_TRAP(PTP_GENERAL, CONTROL), DEVLINK_TRAP(FLOW_ACTION_SAMPLE, CONTROL), DEVLINK_TRAP(FLOW_ACTION_TRAP, CONTROL), + DEVLINK_TRAP(EARLY_DROP, DROP), }; #define DEVLINK_TRAP_GROUP(_id) \ -- cgit v1.2.3 From c88e11e04716ab4ed51d5972ea04c7b70b6e9d8a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 3 Aug 2020 19:11:34 +0300 Subject: devlink: Pass extack when setting trap's action and group's parameters A later patch will refuse to set the action of certain traps in mlxsw and also to change the policer binding of certain groups. Pass extack so that failure could be communicated clearly to user space. Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 10 ++++++---- drivers/net/ethernet/mellanox/mlxsw/core.h | 6 ++++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 6 ++++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c | 13 ++++++++----- drivers/net/netdevsim/dev.c | 6 ++++-- include/net/devlink.h | 6 ++++-- net/core/devlink.c | 8 +++++--- 7 files changed, 35 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 866381e72960..08d101138fbe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1177,14 +1177,15 @@ static void mlxsw_devlink_trap_fini(struct devlink *devlink, static int mlxsw_devlink_trap_action_set(struct devlink *devlink, const struct devlink_trap *trap, - enum devlink_trap_action action) + enum devlink_trap_action action, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; if (!mlxsw_driver->trap_action_set) return -EOPNOTSUPP; - return mlxsw_driver->trap_action_set(mlxsw_core, trap, action); + return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack); } static int @@ -1202,14 +1203,15 @@ mlxsw_devlink_trap_group_init(struct devlink *devlink, static int mlxsw_devlink_trap_group_set(struct devlink *devlink, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer) + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; if (!mlxsw_driver->trap_group_set) return -EOPNOTSUPP; - return mlxsw_driver->trap_group_set(mlxsw_core, group, policer); + return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack); } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c1c1e039323a..219ce89e629a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -326,12 +326,14 @@ struct mlxsw_driver { const struct devlink_trap *trap, void *trap_ctx); int (*trap_action_set)(struct mlxsw_core *mlxsw_core, const struct devlink_trap *trap, - enum devlink_trap_action action); + enum devlink_trap_action action, + struct netlink_ext_ack *extack); int (*trap_group_init)(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group); int (*trap_group_set)(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer); + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack); int (*trap_policer_init)(struct mlxsw_core *mlxsw_core, const struct devlink_trap_policer *policer); void (*trap_policer_fini)(struct mlxsw_core *mlxsw_core, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6ab1b6d725af..866a1193f12b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1177,12 +1177,14 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core, const struct devlink_trap *trap, void *trap_ctx); int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core, const struct devlink_trap *trap, - enum devlink_trap_action action); + enum devlink_trap_action action, + struct netlink_ext_ack *extack); int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group); int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer); + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack); int mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core, const struct devlink_trap_policer *policer); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 1e38dfe7cf64..00b6cb9d2306 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -1352,7 +1352,8 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core, int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core, const struct devlink_trap *trap, - enum devlink_trap_action action) + enum devlink_trap_action action, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); const struct mlxsw_sp_trap_item *trap_item; @@ -1392,7 +1393,7 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core, static int __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group, - u32 policer_id) + u32 policer_id, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); u16 hw_policer_id = MLXSW_REG_HTGT_INVALID_POLICER; @@ -1422,16 +1423,18 @@ int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group) { return __mlxsw_sp_trap_group_init(mlxsw_core, group, - group->init_policer_id); + group->init_policer_id, NULL); } int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer) + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack) { u32 policer_id = policer ? policer->id : 0; - return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id); + return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id, + extack); } static int diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index ce719c830a77..32f339fedb21 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -810,7 +810,8 @@ static int nsim_dev_devlink_trap_init(struct devlink *devlink, static int nsim_dev_devlink_trap_action_set(struct devlink *devlink, const struct devlink_trap *trap, - enum devlink_trap_action action) + enum devlink_trap_action action, + struct netlink_ext_ack *extack) { struct nsim_dev *nsim_dev = devlink_priv(devlink); struct nsim_trap_item *nsim_trap_item; @@ -829,7 +830,8 @@ nsim_dev_devlink_trap_action_set(struct devlink *devlink, static int nsim_dev_devlink_trap_group_set(struct devlink *devlink, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer) + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack) { struct nsim_dev *nsim_dev = devlink_priv(devlink); diff --git a/include/net/devlink.h b/include/net/devlink.h index fd3ae0760492..8f3c8a443238 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1077,7 +1077,8 @@ struct devlink_ops { */ int (*trap_action_set)(struct devlink *devlink, const struct devlink_trap *trap, - enum devlink_trap_action action); + enum devlink_trap_action action, + struct netlink_ext_ack *extack); /** * @trap_group_init: Trap group initialization function. * @@ -1094,7 +1095,8 @@ struct devlink_ops { */ int (*trap_group_set)(struct devlink *devlink, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer); + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack); /** * @trap_policer_init: Trap policer initialization function. * diff --git a/net/core/devlink.c b/net/core/devlink.c index bde4c29a30bc..e674f0f46dc2 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -6423,7 +6423,7 @@ static int __devlink_trap_action_set(struct devlink *devlink, } err = devlink->ops->trap_action_set(devlink, trap_item->trap, - trap_action); + trap_action, extack); if (err) return err; @@ -6713,7 +6713,8 @@ static int devlink_trap_group_set(struct devlink *devlink, } policer = policer_item ? policer_item->policer : NULL; - err = devlink->ops->trap_group_set(devlink, group_item->group, policer); + err = devlink->ops->trap_group_set(devlink, group_item->group, policer, + extack); if (err) return err; @@ -9051,7 +9052,8 @@ static void devlink_trap_disable(struct devlink *devlink, if (WARN_ON_ONCE(!trap_item)) return; - devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP); + devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP, + NULL); trap_item->action = DEVLINK_TRAP_ACTION_DROP; } -- cgit v1.2.3 From 6c84a589972f6458da3168a68e7d65f6ca8687f8 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 3 Aug 2020 13:03:52 -0700 Subject: net: dsa: loop: Move data structures to header In preparation for adding support for a mockup data path, move the driver data structures to include/linux/dsa/loop.h such that we can share them between net/dsa/ and drivers/net/dsa/ later on. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 32 +------------------------------- include/linux/dsa/loop.h | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 31 deletions(-) create mode 100644 include/linux/dsa/loop.h (limited to 'include') diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 6e97b44c6f3f..ed0b580c9944 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -14,28 +14,11 @@ #include #include #include +#include #include #include "dsa_loop.h" -struct dsa_loop_vlan { - u16 members; - u16 untagged; -}; - -struct dsa_loop_mib_entry { - char name[ETH_GSTRING_LEN]; - unsigned long val; -}; - -enum dsa_loop_mib_counters { - DSA_LOOP_PHY_READ_OK, - DSA_LOOP_PHY_READ_ERR, - DSA_LOOP_PHY_WRITE_OK, - DSA_LOOP_PHY_WRITE_ERR, - __DSA_LOOP_CNT_MAX, -}; - static struct dsa_loop_mib_entry dsa_loop_mibs[] = { [DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", }, [DSA_LOOP_PHY_READ_ERR] = { "phy_read_err", }, @@ -43,19 +26,6 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = { [DSA_LOOP_PHY_WRITE_ERR] = { "phy_write_err", }, }; -struct dsa_loop_port { - struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX]; - u16 pvid; -}; - -struct dsa_loop_priv { - struct mii_bus *bus; - unsigned int port_base; - struct dsa_loop_vlan vlans[VLAN_N_VID]; - struct net_device *netdev; - struct dsa_loop_port ports[DSA_MAX_PORTS]; -}; - static struct phy_device *phydevs[PHY_MAX_ADDR]; static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds, diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h new file mode 100644 index 000000000000..bb39401a8056 --- /dev/null +++ b/include/linux/dsa/loop.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DSA_LOOP_H +#define DSA_LOOP_H + +#include +#include +#include + +struct dsa_loop_vlan { + u16 members; + u16 untagged; +}; + +struct dsa_loop_mib_entry { + char name[ETH_GSTRING_LEN]; + unsigned long val; +}; + +enum dsa_loop_mib_counters { + DSA_LOOP_PHY_READ_OK, + DSA_LOOP_PHY_READ_ERR, + DSA_LOOP_PHY_WRITE_OK, + DSA_LOOP_PHY_WRITE_ERR, + __DSA_LOOP_CNT_MAX, +}; + +struct dsa_loop_port { + struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX]; + u16 pvid; +}; + +struct dsa_loop_priv { + struct mii_bus *bus; + unsigned int port_base; + struct dsa_loop_vlan vlans[VLAN_N_VID]; + struct net_device *netdev; + struct dsa_loop_port ports[DSA_MAX_PORTS]; +}; + +#endif /* DSA_LOOP_H */ -- cgit v1.2.3 From c99194eded25deb58f606e59b3101ba05e786021 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 3 Aug 2020 13:03:53 -0700 Subject: net: dsa: loop: Wire-up MTU callbacks For now we simply store the port MTU into a per-port member. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 17 +++++++++++++++++ include/linux/dsa/loop.h | 1 + 2 files changed, 18 insertions(+) (limited to 'include') diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index ed0b580c9944..6a7d661b5a59 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -225,6 +225,21 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, return 0; } +static int dsa_loop_port_change_mtu(struct dsa_switch *ds, int port, + int new_mtu) +{ + struct dsa_loop_priv *priv = ds->priv; + + priv->ports[port].mtu = new_mtu; + + return 0; +} + +static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port) +{ + return ETH_MAX_MTU; +} + static const struct dsa_switch_ops dsa_loop_driver = { .get_tag_protocol = dsa_loop_get_protocol, .setup = dsa_loop_setup, @@ -241,6 +256,8 @@ static const struct dsa_switch_ops dsa_loop_driver = { .port_vlan_prepare = dsa_loop_port_vlan_prepare, .port_vlan_add = dsa_loop_port_vlan_add, .port_vlan_del = dsa_loop_port_vlan_del, + .port_change_mtu = dsa_loop_port_change_mtu, + .port_max_mtu = dsa_loop_port_max_mtu, }; static int dsa_loop_drv_probe(struct mdio_device *mdiodev) diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h index bb39401a8056..5a3470bcc8a7 100644 --- a/include/linux/dsa/loop.h +++ b/include/linux/dsa/loop.h @@ -27,6 +27,7 @@ enum dsa_loop_mib_counters { struct dsa_loop_port { struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX]; u16 pvid; + int mtu; }; struct dsa_loop_priv { -- cgit v1.2.3 From c1d55d50139bea6bfe964458272a93dd899efb83 Mon Sep 17 00:00:00 2001 From: Stafford Horne Date: Wed, 29 Jul 2020 21:03:14 +0900 Subject: asm-generic/io.h: Fix sparse warnings on big-endian architectures On big-endian architectures like OpenRISC, sparse outputs below warnings on asm-generic/io.h. This is due to io statements like: __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); The __raw_writel() function expects native endianness, however cpu_to_le32() returns __le32. On little-endian machines these match up and there is no issue. However, on big-endian we get warnings, for IO that is defined as little-endian the mismatch is expected. The fix I propose is to __force to native endian. Warnings: ./include/asm-generic/io.h:166:15: warning: cast to restricted __le16 ./include/asm-generic/io.h:166:15: warning: cast to restricted __le16 ./include/asm-generic/io.h:166:15: warning: cast to restricted __le16 ./include/asm-generic/io.h:166:15: warning: cast to restricted __le16 ./include/asm-generic/io.h:179:15: warning: cast to restricted __le32 ./include/asm-generic/io.h:179:15: warning: cast to restricted __le32 ./include/asm-generic/io.h:179:15: warning: cast to restricted __le32 ./include/asm-generic/io.h:179:15: warning: cast to restricted __le32 ./include/asm-generic/io.h:179:15: warning: cast to restricted __le32 ./include/asm-generic/io.h:179:15: warning: cast to restricted __le32 ./include/asm-generic/io.h:215:22: warning: incorrect type in argument 1 (different base types) ./include/asm-generic/io.h:215:22: expected unsigned short [usertype] value ./include/asm-generic/io.h:215:22: got restricted __le16 [usertype] ./include/asm-generic/io.h:225:22: warning: incorrect type in argument 1 (different base types) ./include/asm-generic/io.h:225:22: expected unsigned int [usertype] value ./include/asm-generic/io.h:225:22: got restricted __le32 [usertype] Signed-off-by: Stafford Horne Acked-by: Arnd Bergmann --- include/asm-generic/io.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 30a3aab312e6..dabf8cb7203b 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -163,7 +163,7 @@ static inline u16 readw(const volatile void __iomem *addr) u16 val; __io_br(); - val = __le16_to_cpu(__raw_readw(addr)); + val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); __io_ar(val); return val; } @@ -176,7 +176,7 @@ static inline u32 readl(const volatile void __iomem *addr) u32 val; __io_br(); - val = __le32_to_cpu(__raw_readl(addr)); + val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); __io_ar(val); return val; } @@ -212,7 +212,7 @@ static inline void writeb(u8 value, volatile void __iomem *addr) static inline void writew(u16 value, volatile void __iomem *addr) { __io_bw(); - __raw_writew(cpu_to_le16(value), addr); + __raw_writew((u16 __force)cpu_to_le16(value), addr); __io_aw(); } #endif @@ -222,7 +222,7 @@ static inline void writew(u16 value, volatile void __iomem *addr) static inline void writel(u32 value, volatile void __iomem *addr) { __io_bw(); - __raw_writel(__cpu_to_le32(value), addr); + __raw_writel((u32 __force)__cpu_to_le32(value), addr); __io_aw(); } #endif @@ -474,7 +474,7 @@ static inline u16 _inw(unsigned long addr) u16 val; __io_pbr(); - val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr)); + val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr)); __io_par(val); return val; } @@ -487,7 +487,7 @@ static inline u32 _inl(unsigned long addr) u32 val; __io_pbr(); - val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr)); + val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr)); __io_par(val); return val; } @@ -508,7 +508,7 @@ static inline void _outb(u8 value, unsigned long addr) static inline void _outw(u16 value, unsigned long addr) { __io_pbw(); - __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr); + __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); __io_paw(); } #endif @@ -518,7 +518,7 @@ static inline void _outw(u16 value, unsigned long addr) static inline void _outl(u32 value, unsigned long addr) { __io_pbw(); - __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); + __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); __io_paw(); } #endif -- cgit v1.2.3 From 6cb4f3b055fe42bb0f20c6ad6401d325b39d1e2a Mon Sep 17 00:00:00 2001 From: Dennis YC Hsieh Date: Sun, 5 Jul 2020 14:37:13 +0800 Subject: dt-binding: gce: add gce header file for mt6779 Add documentation for the mt6779 gce. Add gce header file defined the gce hardware event, subsys number and constant for mt6779. Signed-off-by: Dennis YC Hsieh Reviewed-by: Rob Herring Reviewed-by: CK Hu Reviewed-by: Bibby Hsieh Signed-off-by: Jassi Brar --- .../devicetree/bindings/mailbox/mtk-gce.txt | 8 +- include/dt-bindings/gce/mt6779-gce.h | 222 +++++++++++++++++++++ 2 files changed, 227 insertions(+), 3 deletions(-) create mode 100644 include/dt-bindings/gce/mt6779-gce.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/mailbox/mtk-gce.txt b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt index 0b5b2a6bcc48..cf48cd806e00 100644 --- a/Documentation/devicetree/bindings/mailbox/mtk-gce.txt +++ b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt @@ -9,7 +9,8 @@ CMDQ driver uses mailbox framework for communication. Please refer to mailbox.txt for generic information about mailbox device-tree bindings. Required properties: -- compatible: can be "mediatek,mt8173-gce" or "mediatek,mt8183-gce" +- compatible: can be "mediatek,mt8173-gce", "mediatek,mt8183-gce" or + "mediatek,mt6779-gce". - reg: Address range of the GCE unit - interrupts: The interrupt signal from the GCE block - clock: Clocks according to the common clock binding @@ -34,8 +35,9 @@ Optional properties for a client device: start_offset: the start offset of register address that GCE can access. size: the total size of register address that GCE can access. -Some vaules of properties are defined in 'dt-bindings/gce/mt8173-gce.h' -or 'dt-binding/gce/mt8183-gce.h'. Such as sub-system ids, thread priority, event ids. +Some vaules of properties are defined in 'dt-bindings/gce/mt8173-gce.h', +'dt-binding/gce/mt8183-gce.h' or 'dt-bindings/gce/mt6779-gce.h'. Such as +sub-system ids, thread priority, event ids. Example: diff --git a/include/dt-bindings/gce/mt6779-gce.h b/include/dt-bindings/gce/mt6779-gce.h new file mode 100644 index 000000000000..06101316ace4 --- /dev/null +++ b/include/dt-bindings/gce/mt6779-gce.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Dennis-YC Hsieh + */ + +#ifndef _DT_BINDINGS_GCE_MT6779_H +#define _DT_BINDINGS_GCE_MT6779_H + +#define CMDQ_NO_TIMEOUT 0xffffffff + +/* GCE HW thread priority */ +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_1 1 +#define CMDQ_THR_PRIO_2 2 +#define CMDQ_THR_PRIO_3 3 +#define CMDQ_THR_PRIO_4 4 +#define CMDQ_THR_PRIO_5 5 +#define CMDQ_THR_PRIO_6 6 +#define CMDQ_THR_PRIO_HIGHEST 7 + +/* GCE subsys table */ +#define SUBSYS_1300XXXX 0 +#define SUBSYS_1400XXXX 1 +#define SUBSYS_1401XXXX 2 +#define SUBSYS_1402XXXX 3 +#define SUBSYS_1502XXXX 4 +#define SUBSYS_1880XXXX 5 +#define SUBSYS_1881XXXX 6 +#define SUBSYS_1882XXXX 7 +#define SUBSYS_1883XXXX 8 +#define SUBSYS_1884XXXX 9 +#define SUBSYS_1000XXXX 10 +#define SUBSYS_1001XXXX 11 +#define SUBSYS_1002XXXX 12 +#define SUBSYS_1003XXXX 13 +#define SUBSYS_1004XXXX 14 +#define SUBSYS_1005XXXX 15 +#define SUBSYS_1020XXXX 16 +#define SUBSYS_1028XXXX 17 +#define SUBSYS_1700XXXX 18 +#define SUBSYS_1701XXXX 19 +#define SUBSYS_1702XXXX 20 +#define SUBSYS_1703XXXX 21 +#define SUBSYS_1800XXXX 22 +#define SUBSYS_1801XXXX 23 +#define SUBSYS_1802XXXX 24 +#define SUBSYS_1804XXXX 25 +#define SUBSYS_1805XXXX 26 +#define SUBSYS_1808XXXX 27 +#define SUBSYS_180aXXXX 28 +#define SUBSYS_180bXXXX 29 +#define CMDQ_SUBSYS_OFF 32 + +/* GCE hardware events */ +#define CMDQ_EVENT_DISP_RDMA0_SOF 0 +#define CMDQ_EVENT_DISP_RDMA1_SOF 1 +#define CMDQ_EVENT_MDP_RDMA0_SOF 2 +#define CMDQ_EVENT_MDP_RDMA1_SOF 3 +#define CMDQ_EVENT_MDP_RSZ0_SOF 4 +#define CMDQ_EVENT_MDP_RSZ1_SOF 5 +#define CMDQ_EVENT_MDP_TDSHP_SOF 6 +#define CMDQ_EVENT_MDP_WROT0_SOF 7 +#define CMDQ_EVENT_MDP_WROT1_SOF 8 +#define CMDQ_EVENT_DISP_OVL0_SOF 9 +#define CMDQ_EVENT_DISP_2L_OVL0_SOF 10 +#define CMDQ_EVENT_DISP_2L_OVL1_SOF 11 +#define CMDQ_EVENT_DISP_WDMA0_SOF 12 +#define CMDQ_EVENT_DISP_COLOR0_SOF 13 +#define CMDQ_EVENT_DISP_CCORR0_SOF 14 +#define CMDQ_EVENT_DISP_AAL0_SOF 15 +#define CMDQ_EVENT_DISP_GAMMA0_SOF 16 +#define CMDQ_EVENT_DISP_DITHER0_SOF 17 +#define CMDQ_EVENT_DISP_PWM0_SOF 18 +#define CMDQ_EVENT_DISP_DSI0_SOF 19 +#define CMDQ_EVENT_DISP_DPI0_SOF 20 +#define CMDQ_EVENT_DISP_POSTMASK0_SOF 21 +#define CMDQ_EVENT_DISP_RSZ0_SOF 22 +#define CMDQ_EVENT_MDP_AAL_SOF 23 +#define CMDQ_EVENT_MDP_CCORR_SOF 24 +#define CMDQ_EVENT_DISP_DBI0_SOF 25 +#define CMDQ_EVENT_ISP_RELAY_SOF 26 +#define CMDQ_EVENT_IPU_RELAY_SOF 27 +#define CMDQ_EVENT_DISP_RDMA0_EOF 28 +#define CMDQ_EVENT_DISP_RDMA1_EOF 29 +#define CMDQ_EVENT_MDP_RDMA0_EOF 30 +#define CMDQ_EVENT_MDP_RDMA1_EOF 31 +#define CMDQ_EVENT_MDP_RSZ0_EOF 32 +#define CMDQ_EVENT_MDP_RSZ1_EOF 33 +#define CMDQ_EVENT_MDP_TDSHP_EOF 34 +#define CMDQ_EVENT_MDP_WROT0_W_EOF 35 +#define CMDQ_EVENT_MDP_WROT1_W_EOF 36 +#define CMDQ_EVENT_DISP_OVL0_EOF 37 +#define CMDQ_EVENT_DISP_2L_OVL0_EOF 38 +#define CMDQ_EVENT_DISP_2L_OVL1_EOF 39 +#define CMDQ_EVENT_DISP_WDMA0_EOF 40 +#define CMDQ_EVENT_DISP_COLOR0_EOF 41 +#define CMDQ_EVENT_DISP_CCORR0_EOF 42 +#define CMDQ_EVENT_DISP_AAL0_EOF 43 +#define CMDQ_EVENT_DISP_GAMMA0_EOF 44 +#define CMDQ_EVENT_DISP_DITHER0_EOF 45 +#define CMDQ_EVENT_DISP_DSI0_EOF 46 +#define CMDQ_EVENT_DISP_DPI0_EOF 47 +#define CMDQ_EVENT_DISP_RSZ0_EOF 49 +#define CMDQ_EVENT_MDP_AAL_FRAME_DONE 50 +#define CMDQ_EVENT_MDP_CCORR_FRAME_DONE 51 +#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 52 +#define CMDQ_EVENT_MUTEX0_STREAM_EOF 130 +#define CMDQ_EVENT_MUTEX1_STREAM_EOF 131 +#define CMDQ_EVENT_MUTEX2_STREAM_EOF 132 +#define CMDQ_EVENT_MUTEX3_STREAM_EOF 133 +#define CMDQ_EVENT_MUTEX4_STREAM_EOF 134 +#define CMDQ_EVENT_MUTEX5_STREAM_EOF 135 +#define CMDQ_EVENT_MUTEX6_STREAM_EOF 136 +#define CMDQ_EVENT_MUTEX7_STREAM_EOF 137 +#define CMDQ_EVENT_MUTEX8_STREAM_EOF 138 +#define CMDQ_EVENT_MUTEX9_STREAM_EOF 139 +#define CMDQ_EVENT_MUTEX10_STREAM_EOF 140 +#define CMDQ_EVENT_MUTEX11_STREAM_EOF 141 +#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 142 +#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 143 +#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 144 +#define CMDQ_EVENT_DISP_RDMA3_UNDERRUN 145 +#define CMDQ_EVENT_DSI0_TE 146 +#define CMDQ_EVENT_DSI0_IRQ_EVENT 147 +#define CMDQ_EVENT_DSI0_DONE_EVENT 148 +#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE 150 +#define CMDQ_EVENT_DISP_WDMA0_RST_DONE 151 +#define CMDQ_EVENT_MDP_WROT0_RST_DONE 153 +#define CMDQ_EVENT_MDP_RDMA0_RST_DONE 154 +#define CMDQ_EVENT_DISP_OVL0_RST_DONE 155 +#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE 156 +#define CMDQ_EVENT_DISP_OVL1_2L_RST_DONE 157 +#define CMDQ_EVENT_DIP_CQ_THREAD0_EOF 257 +#define CMDQ_EVENT_DIP_CQ_THREAD1_EOF 258 +#define CMDQ_EVENT_DIP_CQ_THREAD2_EOF 259 +#define CMDQ_EVENT_DIP_CQ_THREAD3_EOF 260 +#define CMDQ_EVENT_DIP_CQ_THREAD4_EOF 261 +#define CMDQ_EVENT_DIP_CQ_THREAD5_EOF 262 +#define CMDQ_EVENT_DIP_CQ_THREAD6_EOF 263 +#define CMDQ_EVENT_DIP_CQ_THREAD7_EOF 264 +#define CMDQ_EVENT_DIP_CQ_THREAD8_EOF 265 +#define CMDQ_EVENT_DIP_CQ_THREAD9_EOF 266 +#define CMDQ_EVENT_DIP_CQ_THREAD10_EOF 267 +#define CMDQ_EVENT_DIP_CQ_THREAD11_EOF 268 +#define CMDQ_EVENT_DIP_CQ_THREAD12_EOF 269 +#define CMDQ_EVENT_DIP_CQ_THREAD13_EOF 270 +#define CMDQ_EVENT_DIP_CQ_THREAD14_EOF 271 +#define CMDQ_EVENT_DIP_CQ_THREAD15_EOF 272 +#define CMDQ_EVENT_DIP_CQ_THREAD16_EOF 273 +#define CMDQ_EVENT_DIP_CQ_THREAD17_EOF 274 +#define CMDQ_EVENT_DIP_CQ_THREAD18_EOF 275 +#define CMDQ_EVENT_DIP_DMA_ERR_EVENT 276 +#define CMDQ_EVENT_AMD_FRAME_DONE 277 +#define CMDQ_EVENT_MFB_DONE 278 +#define CMDQ_EVENT_WPE_A_EOF 279 +#define CMDQ_EVENT_VENC_EOF 289 +#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 290 +#define CMDQ_EVENT_JPEG_ENC_EOF 291 +#define CMDQ_EVENT_VENC_MB_DONE 292 +#define CMDQ_EVENT_VENC_128BYTE_CNT_DONE 293 +#define CMDQ_EVENT_ISP_FRAME_DONE_A 321 +#define CMDQ_EVENT_ISP_FRAME_DONE_B 322 +#define CMDQ_EVENT_ISP_FRAME_DONE_C 323 +#define CMDQ_EVENT_ISP_CAMSV_0_PASS1_DONE 324 +#define CMDQ_EVENT_ISP_CAMSV_0_2_PASS1_DONE 325 +#define CMDQ_EVENT_ISP_CAMSV_1_PASS1_DONE 326 +#define CMDQ_EVENT_ISP_CAMSV_2_PASS1_DONE 327 +#define CMDQ_EVENT_ISP_CAMSV_3_PASS1_DONE 328 +#define CMDQ_EVENT_ISP_TSF_DONE 329 +#define CMDQ_EVENT_SENINF_0_FIFO_FULL 330 +#define CMDQ_EVENT_SENINF_1_FIFO_FULL 331 +#define CMDQ_EVENT_SENINF_2_FIFO_FULL 332 +#define CMDQ_EVENT_SENINF_3_FIFO_FULL 333 +#define CMDQ_EVENT_SENINF_4_FIFO_FULL 334 +#define CMDQ_EVENT_SENINF_5_FIFO_FULL 335 +#define CMDQ_EVENT_SENINF_6_FIFO_FULL 336 +#define CMDQ_EVENT_SENINF_7_FIFO_FULL 337 +#define CMDQ_EVENT_TG_OVRUN_A_INT_DLY 338 +#define CMDQ_EVENT_TG_OVRUN_B_INT_DLY 339 +#define CMDQ_EVENT_TG_OVRUN_C_INT 340 +#define CMDQ_EVENT_TG_GRABERR_A_INT_DLY 341 +#define CMDQ_EVENT_TG_GRABERR_B_INT_DLY 342 +#define CMDQ_EVENT_TG_GRABERR_C_INT 343 +#define CMDQ_EVENT_CQ_VR_SNAP_A_INT_DLY 344 +#define CMDQ_EVENT_CQ_VR_SNAP_B_INT_DLY 345 +#define CMDQ_EVENT_CQ_VR_SNAP_C_INT 346 +#define CMDQ_EVENT_DMA_R1_ERROR_A_INT_DLY 347 +#define CMDQ_EVENT_DMA_R1_ERROR_B_INT_DLY 348 +#define CMDQ_EVENT_DMA_R1_ERROR_C_INT 349 +#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_0 353 +#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_1 354 +#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_2 355 +#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_3 356 +#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_0 385 +#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_1 386 +#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_2 387 +#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_3 388 +#define CMDQ_EVENT_VDEC_EVENT_0 416 +#define CMDQ_EVENT_VDEC_EVENT_1 417 +#define CMDQ_EVENT_VDEC_EVENT_2 418 +#define CMDQ_EVENT_VDEC_EVENT_3 419 +#define CMDQ_EVENT_VDEC_EVENT_4 420 +#define CMDQ_EVENT_VDEC_EVENT_5 421 +#define CMDQ_EVENT_VDEC_EVENT_6 422 +#define CMDQ_EVENT_VDEC_EVENT_7 423 +#define CMDQ_EVENT_VDEC_EVENT_8 424 +#define CMDQ_EVENT_VDEC_EVENT_9 425 +#define CMDQ_EVENT_VDEC_EVENT_10 426 +#define CMDQ_EVENT_VDEC_EVENT_11 427 +#define CMDQ_EVENT_VDEC_EVENT_12 428 +#define CMDQ_EVENT_VDEC_EVENT_13 429 +#define CMDQ_EVENT_VDEC_EVENT_14 430 +#define CMDQ_EVENT_VDEC_EVENT_15 431 +#define CMDQ_EVENT_FDVT_DONE 449 +#define CMDQ_EVENT_FE_DONE 450 +#define CMDQ_EVENT_RSC_EOF 451 +#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 452 +#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 453 +#define CMDQ_EVENT_DSI0_TE_INFRA 898 + +#endif -- cgit v1.2.3 From 0858fde496f84fff2fdae53d9e33c7b308195f74 Mon Sep 17 00:00:00 2001 From: Dennis YC Hsieh Date: Sun, 5 Jul 2020 14:37:14 +0800 Subject: mailbox: cmdq: variablize address shift in platform Some gce hardware shift pc and end address in register to support large dram addressing. Implement gce address shift when write or read pc and end register. And add shift bit in platform definition. Signed-off-by: Dennis YC Hsieh Signed-off-by: Jassi Brar --- drivers/mailbox/mtk-cmdq-mailbox.c | 57 ++++++++++++++++++++++++-------- include/linux/mailbox/mtk-cmdq-mailbox.h | 2 ++ 2 files changed, 46 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index b24822ad8409..49d9264145aa 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -75,8 +75,22 @@ struct cmdq { struct cmdq_thread *thread; struct clk *clock; bool suspended; + u8 shift_pa; }; +struct gce_plat { + u32 thread_nr; + u8 shift; +}; + +u8 cmdq_get_shift_pa(struct mbox_chan *chan) +{ + struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); + + return cmdq->shift_pa; +} +EXPORT_SYMBOL(cmdq_get_shift_pa); + static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) { u32 status; @@ -183,13 +197,15 @@ static void cmdq_task_handle_error(struct cmdq_task *task) { struct cmdq_thread *thread = task->thread; struct cmdq_task *next_task; + struct cmdq *cmdq = task->cmdq; - dev_err(task->cmdq->mbox.dev, "task 0x%p error\n", task); - WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0); + dev_err(cmdq->mbox.dev, "task 0x%p error\n", task); + WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); next_task = list_first_entry_or_null(&thread->task_busy_list, struct cmdq_task, list_entry); if (next_task) - writel(next_task->pa_base, thread->base + CMDQ_THR_CURR_ADDR); + writel(next_task->pa_base >> cmdq->shift_pa, + thread->base + CMDQ_THR_CURR_ADDR); cmdq_thread_resume(thread); } @@ -219,7 +235,7 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq, else return; - curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR); + curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa; list_for_each_entry_safe(task, tmp, &thread->task_busy_list, list_entry) { @@ -335,27 +351,31 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) WARN_ON(clk_enable(cmdq->clock) < 0); WARN_ON(cmdq_thread_reset(cmdq, thread) < 0); - writel(task->pa_base, thread->base + CMDQ_THR_CURR_ADDR); - writel(task->pa_base + pkt->cmd_buf_size, + writel(task->pa_base >> cmdq->shift_pa, + thread->base + CMDQ_THR_CURR_ADDR); + writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa, thread->base + CMDQ_THR_END_ADDR); + writel(thread->priority, thread->base + CMDQ_THR_PRIORITY); writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE); writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK); } else { WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0); - curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR); - end_pa = readl(thread->base + CMDQ_THR_END_ADDR); + curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << + cmdq->shift_pa; + end_pa = readl(thread->base + CMDQ_THR_END_ADDR) << + cmdq->shift_pa; /* check boundary */ if (curr_pa == end_pa - CMDQ_INST_SIZE || curr_pa == end_pa) { /* set to this task directly */ - writel(task->pa_base, + writel(task->pa_base >> cmdq->shift_pa, thread->base + CMDQ_THR_CURR_ADDR); } else { cmdq_task_insert_into_thread(task); smp_mb(); /* modify jump before enable thread */ } - writel(task->pa_base + pkt->cmd_buf_size, + writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa, thread->base + CMDQ_THR_END_ADDR); cmdq_thread_resume(thread); } @@ -453,6 +473,7 @@ static int cmdq_probe(struct platform_device *pdev) struct resource *res; struct cmdq *cmdq; int err, i; + struct gce_plat *plat_data; cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL); if (!cmdq) @@ -471,7 +492,14 @@ static int cmdq_probe(struct platform_device *pdev) return -EINVAL; } - cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev); + plat_data = (struct gce_plat *)of_device_get_match_data(dev); + if (!plat_data) { + dev_err(dev, "failed to get match data\n"); + return -EINVAL; + } + + cmdq->thread_nr = plat_data->thread_nr; + cmdq->shift_pa = plat_data->shift; cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0); err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED, "mtk_cmdq", cmdq); @@ -534,9 +562,12 @@ static const struct dev_pm_ops cmdq_pm_ops = { .resume = cmdq_resume, }; +static const struct gce_plat gce_plat_v2 = {.thread_nr = 16}; +static const struct gce_plat gce_plat_v3 = {.thread_nr = 24}; + static const struct of_device_id cmdq_of_ids[] = { - {.compatible = "mediatek,mt8173-gce", .data = (void *)16}, - {.compatible = "mediatek,mt8183-gce", .data = (void *)24}, + {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2}, + {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3}, {} }; diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index a4dc45fbec0a..c342b8799be8 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -88,4 +88,6 @@ struct cmdq_pkt { void *cl; }; +u8 cmdq_get_shift_pa(struct mbox_chan *chan); + #endif /* __MTK_CMDQ_MAILBOX_H__ */ -- cgit v1.2.3 From 297f7f82636115d6db4211b8d81764223baf2908 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 10 Jul 2020 15:34:21 -0700 Subject: swiotlb-xen: add struct device * parameter to xen_dma_sync_for_cpu No functional changes. The parameter is unused in this patch but will be used by next patches. Signed-off-by: Stefano Stabellini Reviewed-by: Boris Ostrovsky Tested-by: Corey Minyard Tested-by: Roman Shaposhnik Link: https://lore.kernel.org/r/20200710223427.6897-5-sstabellini@kernel.org Signed-off-by: Juergen Gross --- arch/arm/xen/mm.c | 5 +++-- drivers/xen/swiotlb-xen.c | 4 ++-- include/xen/swiotlb-xen.h | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index d40e9e5fc52b..1a00e8003c64 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -71,8 +71,9 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op) * pfn_valid returns true the pages is local and we can use the native * dma-direct functions, otherwise we call the Xen specific version. */ -void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size, - enum dma_data_direction dir) +void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, + phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { if (pfn_valid(PFN_DOWN(handle))) arch_sync_dma_for_cpu(paddr, size, dir); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index a8e447137faf..d04b7a15124f 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -428,7 +428,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, BUG_ON(dir == DMA_NONE); if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - xen_dma_sync_for_cpu(dev_addr, paddr, size, dir); + xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir); /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) @@ -442,7 +442,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, phys_addr_t paddr = xen_bus_to_phys(dev, dma_addr); if (!dev_is_dma_coherent(dev)) - xen_dma_sync_for_cpu(dma_addr, paddr, size, dir); + xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir); if (is_xen_swiotlb_buffer(dma_addr)) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index ffc0d3902b71..f62d1854780b 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -4,8 +4,9 @@ #include -void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size, - enum dma_data_direction dir); +void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, + phys_addr_t paddr, size_t size, + enum dma_data_direction dir); void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size, enum dma_data_direction dir); -- cgit v1.2.3 From 995d3556694edd3c6dda7671b46ad4a6b3043f2f Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 10 Jul 2020 15:34:22 -0700 Subject: swiotlb-xen: add struct device * parameter to xen_dma_sync_for_device No functional changes. The parameter is unused in this patch but will be used by next patches. Signed-off-by: Stefano Stabellini Reviewed-by: Boris Ostrovsky Tested-by: Corey Minyard Tested-by: Roman Shaposhnik Link: https://lore.kernel.org/r/20200710223427.6897-6-sstabellini@kernel.org Signed-off-by: Juergen Gross --- arch/arm/xen/mm.c | 5 +++-- drivers/xen/swiotlb-xen.c | 4 ++-- include/xen/swiotlb-xen.h | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 1a00e8003c64..f2414ea40a79 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -81,8 +81,9 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); } -void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size, - enum dma_data_direction dir) +void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, + phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { if (pfn_valid(PFN_DOWN(handle))) arch_sync_dma_for_device(paddr, size, dir); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index d04b7a15124f..8a3a7bcc5ec0 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -408,7 +408,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, done: if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - xen_dma_sync_for_device(dev_addr, phys, size, dir); + xen_dma_sync_for_device(dev, dev_addr, phys, size, dir); return dev_addr; } @@ -458,7 +458,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); if (!dev_is_dma_coherent(dev)) - xen_dma_sync_for_device(dma_addr, paddr, size, dir); + xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir); } /* diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index f62d1854780b..6d235fe2b92d 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -7,8 +7,9 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, phys_addr_t paddr, size_t size, enum dma_data_direction dir); -void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size, - enum dma_data_direction dir); +void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, + phys_addr_t paddr, size_t size, + enum dma_data_direction dir); extern int xen_swiotlb_init(int verbose, bool early); extern const struct dma_map_ops xen_swiotlb_dma_ops; -- cgit v1.2.3 From e9aab7e4ffbbf6eba8eaa66e351557c0c7562cff Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 10 Jul 2020 15:34:24 -0700 Subject: swiotlb-xen: remove XEN_PFN_PHYS XEN_PFN_PHYS is only used in one place in swiotlb-xen making things more complex than need to be. Remove the definition of XEN_PFN_PHYS and open code the cast in the one place where it is needed. Signed-off-by: Stefano Stabellini Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/20200710223427.6897-8-sstabellini@kernel.org Signed-off-by: Juergen Gross --- drivers/xen/swiotlb-xen.c | 7 +------ include/xen/page.h | 1 - 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index e2c35f45f91e..03d118b6c141 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -52,11 +52,6 @@ static unsigned long xen_io_tlb_nslabs; * Quick lookup value of the bus address of the IOTLB. */ -/* - * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t - * can be 32bit when dma_addr_t is 64bit leading to a loss in - * information if the shift is done before casting to 64bit. - */ static inline dma_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr) { unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr)); @@ -101,7 +96,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) { unsigned long bfn = XEN_PFN_DOWN(dma_addr); unsigned long xen_pfn = bfn_to_local_pfn(bfn); - phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn); + phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT; /* If the address is outside our domain, it CAN * have the same virtual address as another address diff --git a/include/xen/page.h b/include/xen/page.h index df6d6b6ec66e..285677b42943 100644 --- a/include/xen/page.h +++ b/include/xen/page.h @@ -24,7 +24,6 @@ #define XEN_PFN_DOWN(x) ((x) >> XEN_PAGE_SHIFT) #define XEN_PFN_UP(x) (((x) + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT) -#define XEN_PFN_PHYS(x) ((phys_addr_t)(x) << XEN_PAGE_SHIFT) #include -- cgit v1.2.3 From 63f0620cc552c4cd5bb2747f77efce407487cb12 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 10 Jul 2020 15:34:26 -0700 Subject: xen/arm: introduce phys/dma translations in xen_dma_sync_for_* xen_dma_sync_for_cpu, xen_dma_sync_for_device, xen_arch_need_swiotlb are getting called passing dma addresses. On some platforms dma addresses could be different from physical addresses. Before doing any operations on these addresses we need to convert them back to physical addresses using dma_to_phys. Move the arch_sync_dma_for_cpu and arch_sync_dma_for_device calls from xen_dma_sync_for_cpu/device to swiotlb-xen.c, and add a call dma_to_phys to do address translations there. dma_cache_maint is fixed by the next patch. Signed-off-by: Stefano Stabellini Tested-by: Corey Minyard Tested-by: Roman Shaposhnik Acked-by: Juergen Gross Link: https://lore.kernel.org/r/20200710223427.6897-10-sstabellini@kernel.org Signed-off-by: Juergen Gross --- arch/arm/xen/mm.c | 17 ++++++----------- drivers/xen/swiotlb-xen.c | 32 ++++++++++++++++++++++++-------- include/xen/swiotlb-xen.h | 6 ++---- 3 files changed, 32 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index f2414ea40a79..a8251a70f442 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include +#include #include #include #include @@ -72,22 +73,16 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op) * dma-direct functions, otherwise we call the Xen specific version. */ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, - phys_addr_t paddr, size_t size, - enum dma_data_direction dir) + size_t size, enum dma_data_direction dir) { - if (pfn_valid(PFN_DOWN(handle))) - arch_sync_dma_for_cpu(paddr, size, dir); - else if (dir != DMA_TO_DEVICE) + if (dir != DMA_TO_DEVICE) dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); } void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, - phys_addr_t paddr, size_t size, - enum dma_data_direction dir) + size_t size, enum dma_data_direction dir) { - if (pfn_valid(PFN_DOWN(handle))) - arch_sync_dma_for_device(paddr, size, dir); - else if (dir == DMA_FROM_DEVICE) + if (dir == DMA_FROM_DEVICE) dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); else dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN); @@ -98,7 +93,7 @@ bool xen_arch_need_swiotlb(struct device *dev, dma_addr_t dev_addr) { unsigned int xen_pfn = XEN_PFN_DOWN(phys); - unsigned int bfn = XEN_PFN_DOWN(dev_addr); + unsigned int bfn = XEN_PFN_DOWN(dma_to_phys(dev, dev_addr)); /* * The swiotlb buffer should be used if diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index a6a95358a8cb..39a0f2e0847c 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -413,8 +413,12 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, } done: - if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - xen_dma_sync_for_device(dev, dev_addr, phys, size, dir); + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { + if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr)))) + arch_sync_dma_for_device(phys, size, dir); + else + xen_dma_sync_for_device(dev, dev_addr, size, dir); + } return dev_addr; } @@ -433,8 +437,12 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, BUG_ON(dir == DMA_NONE); - if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir); + if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { + if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr)))) + arch_sync_dma_for_cpu(paddr, size, dir); + else + xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir); + } /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(hwdev, dev_addr)) @@ -447,8 +455,12 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, { phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); - if (!dev_is_dma_coherent(dev)) - xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir); + if (!dev_is_dma_coherent(dev)) { + if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) + arch_sync_dma_for_cpu(paddr, size, dir); + else + xen_dma_sync_for_cpu(dev, dma_addr, size, dir); + } if (is_xen_swiotlb_buffer(dev, dma_addr)) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); @@ -463,8 +475,12 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, if (is_xen_swiotlb_buffer(dev, dma_addr)) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); - if (!dev_is_dma_coherent(dev)) - xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir); + if (!dev_is_dma_coherent(dev)) { + if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) + arch_sync_dma_for_device(paddr, size, dir); + else + xen_dma_sync_for_device(dev, dma_addr, size, dir); + } } /* diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index 6d235fe2b92d..d5eaf9d682b8 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -5,11 +5,9 @@ #include void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, - phys_addr_t paddr, size_t size, - enum dma_data_direction dir); + size_t size, enum dma_data_direction dir); void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, - phys_addr_t paddr, size_t size, - enum dma_data_direction dir); + size_t size, enum dma_data_direction dir); extern int xen_swiotlb_init(int verbose, bool early); extern const struct dma_map_ops xen_swiotlb_dma_ops; -- cgit v1.2.3 From c0842fbc1b18c7a044e6ff3e8fa78bfa822c7d1a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 31 Jul 2020 07:51:14 +0200 Subject: random32: move the pseudo-random 32-bit definitions to prandom.h The addition of percpu.h to the list of includes in random.h revealed some circular dependencies on arm64 and possibly other platforms. This include was added solely for the pseudo-random definitions, which have nothing to do with the rest of the definitions in this file but are still there for legacy reasons. This patch moves the pseudo-random parts to linux/prandom.h and the percpu.h include with it, which is now guarded by _LINUX_PRANDOM_H and protected against recursive inclusion. A further cleanup step would be to remove this from entirely, and make people who use the prandom infrastructure include just the new header file. That's a bit of a churn patch, but grepping for "prandom_" and "next_pseudo_random32" "struct rnd_state" should catch most users. But it turns out that that nice cleanup step is fairly painful, because a _lot_ of code currently seems to depend on the implicit include of , which can currently come in a lot of ways, including such fairly core headfers as . So the "nice cleanup" part may or may never happen. Fixes: 1c9df907da83 ("random: fix circular include dependency on arm64 after addition of percpu.h") Tested-by: Guenter Roeck Acked-by: Willy Tarreau Signed-off-by: Linus Torvalds --- include/linux/prandom.h | 78 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/random.h | 66 +++-------------------------------------- 2 files changed, 82 insertions(+), 62 deletions(-) create mode 100644 include/linux/prandom.h (limited to 'include') diff --git a/include/linux/prandom.h b/include/linux/prandom.h new file mode 100644 index 000000000000..aa16e6468f91 --- /dev/null +++ b/include/linux/prandom.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/prandom.h + * + * Include file for the fast pseudo-random 32-bit + * generation. + */ +#ifndef _LINUX_PRANDOM_H +#define _LINUX_PRANDOM_H + +#include +#include + +u32 prandom_u32(void); +void prandom_bytes(void *buf, size_t nbytes); +void prandom_seed(u32 seed); +void prandom_reseed_late(void); + +struct rnd_state { + __u32 s1, s2, s3, s4; +}; + +DECLARE_PER_CPU(struct rnd_state, net_rand_state); + +u32 prandom_u32_state(struct rnd_state *state); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) + +/** + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) + * @ep_ro: right open interval endpoint + * + * Returns a pseudo-random number that is in interval [0, ep_ro). Note + * that the result depends on PRNG being well distributed in [0, ~0U] + * u32 space. Here we use maximally equidistributed combined Tausworthe + * generator, that is, prandom_u32(). This is useful when requesting a + * random index of an array containing ep_ro elements, for example. + * + * Returns: pseudo-random number in interval [0, ep_ro) + */ +static inline u32 prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +} + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom_seed_state - set seed for prandom_u32_state(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 2U); + state->s2 = __seed(i, 8U); + state->s3 = __seed(i, 16U); + state->s4 = __seed(i, 128U); +} + +/* Pseudo random number generator from numerical recipes. */ +static inline u32 next_pseudo_random32(u32 seed) +{ + return seed * 1664525 + 1013904223; +} + +#endif diff --git a/include/linux/random.h b/include/linux/random.h index 9ab7443bd91b..f45b8be3e3c4 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -11,7 +11,6 @@ #include #include #include -#include #include @@ -111,63 +110,12 @@ declare_get_random_var_wait(long) unsigned long randomize_page(unsigned long start, unsigned long range); -u32 prandom_u32(void); -void prandom_bytes(void *buf, size_t nbytes); -void prandom_seed(u32 seed); -void prandom_reseed_late(void); - -struct rnd_state { - __u32 s1, s2, s3, s4; -}; - -DECLARE_PER_CPU(struct rnd_state, net_rand_state); - -u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); -void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); - -#define prandom_init_once(pcpu_state) \ - DO_ONCE(prandom_seed_full_state, (pcpu_state)) - -/** - * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) - * @ep_ro: right open interval endpoint - * - * Returns a pseudo-random number that is in interval [0, ep_ro). Note - * that the result depends on PRNG being well distributed in [0, ~0U] - * u32 space. Here we use maximally equidistributed combined Tausworthe - * generator, that is, prandom_u32(). This is useful when requesting a - * random index of an array containing ep_ro elements, for example. - * - * Returns: pseudo-random number in interval [0, ep_ro) - */ -static inline u32 prandom_u32_max(u32 ep_ro) -{ - return (u32)(((u64) prandom_u32() * ep_ro) >> 32); -} - /* - * Handle minimum values for seeds - */ -static inline u32 __seed(u32 x, u32 m) -{ - return (x < m) ? x + m : x; -} - -/** - * prandom_seed_state - set seed for prandom_u32_state(). - * @state: pointer to state structure to receive the seed. - * @seed: arbitrary 64-bit value to use as a seed. + * This is designed to be standalone for just prandom + * users, but for now we include it from + * for legacy reasons. */ -static inline void prandom_seed_state(struct rnd_state *state, u64 seed) -{ - u32 i = (seed >> 32) ^ (seed << 10) ^ seed; - - state->s1 = __seed(i, 2U); - state->s2 = __seed(i, 8U); - state->s3 = __seed(i, 16U); - state->s4 = __seed(i, 128U); -} +#include #ifdef CONFIG_ARCH_RANDOM # include @@ -210,10 +158,4 @@ static inline bool __init arch_get_random_long_early(unsigned long *v) } #endif -/* Pseudo random number generator from numerical recipes. */ -static inline u32 next_pseudo_random32(u32 seed) -{ - return seed * 1664525 + 1013904223; -} - #endif /* _LINUX_RANDOM_H */ -- cgit v1.2.3 From 403d2d116ec011942a8505068b943c5c6cd91176 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 31 Jul 2020 19:03:26 +0200 Subject: PM: runtime: Add kerneldoc comments to multiple helpers Add kerneldoc comments to multiple PM-runtime helper functions defined as static inline wrappers around lower-level routines to provide quick reference decumentation of their behavior. Some of them are similar to each other with subtle differences only and the behavior of some of them may appear as counter-intuitive, so clarify all that to avoid confusion. Signed-off-by: Rafael J. Wysocki Acked-by: Alan Stern Reviewed-by: Sakari Ailus --- include/linux/pm_runtime.h | 246 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 246 insertions(+) (limited to 'include') diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 3dbc207bff53..6245caa18034 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -60,58 +60,151 @@ extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device *dev); +/** + * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. + * @dev: Target device. + * + * Increment the runtime PM usage counter of @dev if its runtime PM status is + * %RPM_ACTIVE and its runtime PM usage counter is greater than 0. + */ static inline int pm_runtime_get_if_in_use(struct device *dev) { return pm_runtime_get_if_active(dev, false); } +/** + * pm_suspend_ignore_children - Set runtime PM behavior regarding children. + * @dev: Target device. + * @enable: Whether or not to ignore possible dependencies on children. + * + * The dependencies of @dev on its children will not be taken into account by + * the runtime PM framework going forward if @enable is %true, or they will + * be taken into account otherwise. + */ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) { dev->power.ignore_children = enable; } +/** + * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device. + * @dev: Target device. + */ static inline void pm_runtime_get_noresume(struct device *dev) { atomic_inc(&dev->power.usage_count); } +/** + * pm_runtime_put_noidle - Drop runtime PM usage counter of a device. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev unless it is 0 already. + */ static inline void pm_runtime_put_noidle(struct device *dev) { atomic_add_unless(&dev->power.usage_count, -1, 0); } +/** + * pm_runtime_suspended - Check whether or not a device is runtime-suspended. + * @dev: Target device. + * + * Return %true if runtime PM is enabled for @dev and its runtime PM status is + * %RPM_SUSPENDED, or %false otherwise. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which + * runtime PM cannot be either disabled or enabled for @dev and its runtime PM + * status cannot change. + */ static inline bool pm_runtime_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED && !dev->power.disable_depth; } +/** + * pm_runtime_active - Check whether or not a device is runtime-active. + * @dev: Target device. + * + * Return %true if runtime PM is enabled for @dev and its runtime PM status is + * %RPM_ACTIVE, or %false otherwise. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which + * runtime PM cannot be either disabled or enabled for @dev and its runtime PM + * status cannot change. + */ static inline bool pm_runtime_active(struct device *dev) { return dev->power.runtime_status == RPM_ACTIVE || dev->power.disable_depth; } +/** + * pm_runtime_status_suspended - Check if runtime PM status is "suspended". + * @dev: Target device. + * + * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false + * otherwise, regardless of whether or not runtime PM has been enabled for @dev. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which the + * runtime PM status of @dev cannot change. + */ static inline bool pm_runtime_status_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED; } +/** + * pm_runtime_enabled - Check if runtime PM is enabled. + * @dev: Target device. + * + * Return %true if runtime PM is enabled for @dev or %false otherwise. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which + * runtime PM cannot be either disabled or enabled for @dev. + */ static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; } +/** + * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present. + * @dev: Target device. + * + * Return %true if @dev is a special device without runtime PM callbacks or + * %false otherwise. + */ static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return dev->power.no_callbacks; } +/** + * pm_runtime_mark_last_busy - Update the last access time of a device. + * @dev: Target device. + * + * Update the last access time of @dev used by the runtime PM autosuspend + * mechanism to the current time as returned by ktime_get_mono_fast_ns(). + */ static inline void pm_runtime_mark_last_busy(struct device *dev) { WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); } +/** + * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context. + * @dev: Target device. + * + * Return %true if @dev has been marked as an "IRQ-safe" device (with respect + * to runtime PM), in which case its runtime PM callabcks can be expected to + * work correctly when invoked from interrupt handlers. + */ static inline bool pm_runtime_is_irq_safe(struct device *dev) { return dev->power.irq_safe; @@ -191,97 +284,250 @@ static inline void pm_runtime_drop_link(struct device *dev) {} #endif /* !CONFIG_PM */ +/** + * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it. + * @dev: Target device. + * + * Invoke the "idle check" callback of @dev and, depending on its return value, + * set up autosuspend of @dev or suspend it (depending on whether or not + * autosuspend has been enabled for it). + */ static inline int pm_runtime_idle(struct device *dev) { return __pm_runtime_idle(dev, 0); } +/** + * pm_runtime_suspend - Suspend a device synchronously. + * @dev: Target device. + */ static inline int pm_runtime_suspend(struct device *dev) { return __pm_runtime_suspend(dev, 0); } +/** + * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it. + * @dev: Target device. + * + * Set up autosuspend of @dev or suspend it (depending on whether or not + * autosuspend is enabled for it) without engaging its "idle check" callback. + */ static inline int pm_runtime_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_AUTO); } +/** + * pm_runtime_resume - Resume a device synchronously. + * @dev: Target device. + */ static inline int pm_runtime_resume(struct device *dev) { return __pm_runtime_resume(dev, 0); } +/** + * pm_request_idle - Queue up "idle check" execution for a device. + * @dev: Target device. + * + * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev + * asynchronously. + */ static inline int pm_request_idle(struct device *dev) { return __pm_runtime_idle(dev, RPM_ASYNC); } +/** + * pm_request_resume - Queue up runtime-resume of a device. + * @dev: Target device. + */ static inline int pm_request_resume(struct device *dev) { return __pm_runtime_resume(dev, RPM_ASYNC); } +/** + * pm_request_autosuspend - Queue up autosuspend of a device. + * @dev: Target device. + * + * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev + * asynchronously. + */ static inline int pm_request_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); } +/** + * pm_runtime_get - Bump up usage counter and queue up resume of a device. + * @dev: Target device. + * + * Bump up the runtime PM usage counter of @dev and queue up a work item to + * carry out runtime-resume of it. + */ static inline int pm_runtime_get(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); } +/** + * pm_runtime_get_sync - Bump up usage counter of a device and resume it. + * @dev: Target device. + * + * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of + * it synchronously. + * + * The possible return values of this function are the same as for + * pm_runtime_resume() and the runtime PM usage counter of @dev remains + * incremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_get_sync(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT); } +/** + * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, queue up a work item for @dev like in pm_request_idle(). + */ static inline int pm_runtime_put(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); } +/** + * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, queue up a work item for @dev like in pm_request_autosuspend(). + */ static inline int pm_runtime_put_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); } +/** + * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, invoke the "idle check" callback of @dev and, depending on its + * return value, set up autosuspend of @dev or suspend it (depending on whether + * or not autosuspend has been enabled for it). + * + * The possible return values of this function are the same as for + * pm_runtime_idle() and the runtime PM usage counter of @dev remains + * decremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_put_sync(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT); } +/** + * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, carry out runtime-suspend of @dev synchronously. + * + * The possible return values of this function are the same as for + * pm_runtime_suspend() and the runtime PM usage counter of @dev remains + * decremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_put_sync_suspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT); } +/** + * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending + * on whether or not autosuspend has been enabled for it). + * + * The possible return values of this function are the same as for + * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains + * decremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_put_sync_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); } +/** + * pm_runtime_set_active - Set runtime PM status to "active". + * @dev: Target device. + * + * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies + * of it will be taken into account. + * + * It is not valid to call this function for devices with runtime PM enabled. + */ static inline int pm_runtime_set_active(struct device *dev) { return __pm_runtime_set_status(dev, RPM_ACTIVE); } +/** + * pm_runtime_set_suspended - Set runtime PM status to "active". + * @dev: Target device. + * + * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that + * dependencies of it will be taken into account. + * + * It is not valid to call this function for devices with runtime PM enabled. + */ static inline int pm_runtime_set_suspended(struct device *dev) { return __pm_runtime_set_status(dev, RPM_SUSPENDED); } +/** + * pm_runtime_disable - Disable runtime PM for a device. + * @dev: Target device. + * + * Prevent the runtime PM framework from working with @dev (by incrementing its + * "blocking" counter). + * + * For each invocation of this function for @dev there must be a matching + * pm_runtime_enable() call in order for runtime PM to be enabled for it. + */ static inline void pm_runtime_disable(struct device *dev) { __pm_runtime_disable(dev, true); } +/** + * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device. + * @dev: Target device. + * + * Allow the runtime PM autosuspend mechanism to be used for @dev whenever + * requested (or "autosuspend" will be handled as direct runtime-suspend for + * it). + */ static inline void pm_runtime_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, true); } +/** + * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used. + * @dev: Target device. + * + * Prevent the runtime PM autosuspend mechanism from being used for @dev which + * means that "autosuspend" will be handled as direct runtime-suspend for it + * going forward. + */ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, false); -- cgit v1.2.3 From 669cbc708122fc7a02282058a09f096200cee090 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 21 Jul 2020 20:25:13 -0600 Subject: PCI: Move DT resource setup into devm_pci_alloc_host_bridge() Now that pci_parse_request_of_pci_ranges() callers just setup pci_host_bridge.windows and dma_ranges directly and don't need the bus range returned, we can just initialize them when allocating the pci_host_bridge struct. With this, pci_parse_request_of_pci_ranges() becomes a static function. Link: https://lore.kernel.org/r/20200722022514.1283916-19-robh@kernel.org Signed-off-by: Rob Herring Signed-off-by: Lorenzo Pieralisi Acked-by: Bjorn Helgaas Cc: Lorenzo Pieralisi Cc: Bjorn Helgaas --- drivers/pci/controller/cadence/pcie-cadence-host.c | 6 ---- drivers/pci/controller/dwc/pcie-designware-host.c | 5 --- .../pci/controller/mobiveil/pcie-mobiveil-host.c | 8 ----- drivers/pci/controller/pci-aardvark.c | 7 ---- drivers/pci/controller/pci-ftpci100.c | 5 --- drivers/pci/controller/pci-host-common.c | 13 ++++---- drivers/pci/controller/pci-loongson.c | 7 ---- drivers/pci/controller/pci-rcar-gen2.c | 6 ---- drivers/pci/controller/pci-tegra.c | 6 ---- drivers/pci/controller/pci-v3-semi.c | 5 --- drivers/pci/controller/pci-versatile.c | 7 +--- drivers/pci/controller/pci-xgene.c | 5 --- drivers/pci/controller/pcie-altera.c | 7 ---- drivers/pci/controller/pcie-brcmstb.c | 5 --- drivers/pci/controller/pcie-iproc-platform.c | 7 ---- drivers/pci/controller/pcie-mediatek.c | 7 ---- drivers/pci/controller/pcie-rcar-host.c | 5 --- drivers/pci/controller/pcie-rockchip-host.c | 5 --- drivers/pci/controller/pcie-xilinx-nwl.c | 7 ---- drivers/pci/controller/pcie-xilinx.c | 7 ---- drivers/pci/of.c | 37 ++++++++++------------ drivers/pci/pci.h | 8 +++++ drivers/pci/probe.c | 4 +++ include/linux/pci.h | 12 ------- 24 files changed, 36 insertions(+), 155 deletions(-) (limited to 'include') diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 39a8a7a3051e..436eb9f3d68e 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -171,14 +171,8 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) static int cdns_pcie_host_init(struct device *dev, struct cdns_pcie_rc *rc) { - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc); int err; - /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, NULL, NULL); - if (err) - return err; - err = cdns_pcie_host_init_root_port(rc); if (err) return err; diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 9775558acdc8..f9fa7b1f429d 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -346,11 +346,6 @@ int dw_pcie_host_init(struct pcie_port *pp) if (!bridge) return -ENOMEM; - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry(win, &bridge->windows) { switch (resource_type(win->res)) { diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index 7250b84a7efe..2954d6ad8333 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -577,14 +577,6 @@ int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) if (!mobiveil_pcie_is_bridge(pcie)) return -ENODEV; - /* parse the host bridge base addresses from the device tree file */ - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "Getting bridge resources failed\n"); - return ret; - } - /* * configure all inbound and outbound windows and prepare the RC for * config access diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 36dc2412c66f..7ee14cfb7f5a 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -1130,13 +1130,6 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "Failed to parse resources\n"); - return ret; - } - pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, "reset-gpios", 0, GPIOD_OUT_LOW, diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index 5441a5f92739..94f6ab5fff70 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -465,11 +465,6 @@ static int faraday_pci_probe(struct platform_device *pdev) if (IS_ERR(p->base)) return PTR_ERR(p->base); - ret = pci_parse_request_of_pci_ranges(dev, &host->windows, - &host->dma_ranges, NULL); - if (ret) - return ret; - win = resource_list_first_type(&host->windows, IORESOURCE_IO); if (win) { io = win->res; diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index e662910fe032..509624175260 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -25,21 +25,20 @@ static struct pci_config_window *gen_pci_init(struct device *dev, { int err; struct resource cfgres; - struct resource *bus_range = NULL; + struct resource_entry *bus; struct pci_config_window *cfg; - /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, NULL, &bus_range); - if (err) - return ERR_PTR(err); - err = of_address_to_resource(dev->of_node, 0, &cfgres); if (err) { dev_err(dev, "missing \"reg\" property\n"); return ERR_PTR(err); } - cfg = pci_ecam_create(dev, &cfgres, bus_range, ops); + bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (!bus) + return ERR_PTR(-ENODEV); + + cfg = pci_ecam_create(dev, &cfgres, bus->res, ops); if (IS_ERR(cfg)) return cfg; diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 0198c15ed97c..a7a7fbe2b7a5 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -218,13 +218,6 @@ static int loongson_pci_probe(struct platform_device *pdev) } } - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) { - dev_err(dev, "failed to get bridge resources\n"); - return err; - } - bridge->sysdata = priv; bridge->ops = &loongson_pci_ops; bridge->map_irq = loongson_map_irq; diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c index 7bc6c1750ebe..c9530038ca9a 100644 --- a/drivers/pci/controller/pci-rcar-gen2.c +++ b/drivers/pci/controller/pci-rcar-gen2.c @@ -282,7 +282,6 @@ static int rcar_pci_probe(struct platform_device *pdev) struct rcar_pci_priv *priv; struct pci_host_bridge *bridge; void __iomem *reg; - int ret; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*priv)); if (!bridge) @@ -315,11 +314,6 @@ static int rcar_pci_probe(struct platform_device *pdev) return priv->irq; } - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - bridge->ops = &rcar_pci_ops; pci_add_flags(PCI_REASSIGN_ALL_BUS); diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 8f37cbe7657e..c75ee4bb914e 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2682,12 +2682,6 @@ static int tegra_pcie_probe(struct platform_device *pdev) INIT_LIST_HEAD(&pcie->ports); pcie->dev = dev; - err = pci_parse_request_of_pci_ranges(dev, &host->windows, NULL, NULL); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - err = tegra_pcie_parse_dt(pcie); if (err < 0) return err; diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index d2619f583bfb..1a2cbc56b34b 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c @@ -764,11 +764,6 @@ static int v3_pci_probe(struct platform_device *pdev) if (IS_ERR(v3->config_base)) return PTR_ERR(v3->config_base); - ret = pci_parse_request_of_pci_ranges(dev, &host->windows, - &host->dma_ranges, NULL); - if (ret) - return ret; - /* Get and request error IRQ resource */ irq = platform_get_irq(pdev, 0); if (irq < 0) { diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c index 66f5c61b8eb8..fb45f8d158fe 100644 --- a/drivers/pci/controller/pci-versatile.c +++ b/drivers/pci/controller/pci-versatile.c @@ -67,7 +67,7 @@ static int versatile_pci_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; struct resource_entry *entry; - int ret, i, myslot = -1, mem = 1; + int i, myslot = -1, mem = 1; u32 val; void __iomem *local_pci_cfg_base; struct pci_host_bridge *bridge; @@ -89,11 +89,6 @@ static int versatile_pci_probe(struct platform_device *pdev) if (IS_ERR(versatile_cfg_base[1])) return PTR_ERR(versatile_cfg_base[1]); - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - NULL, NULL); - if (ret) - return ret; - resource_list_for_each_entry(entry, &bridge->windows) { if (resource_type(entry->res) == IORESOURCE_MEM) { writel(entry->res->start >> 28, PCI_IMAP(mem)); diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index d3aa28e1a482..190234b7a3a9 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -614,11 +614,6 @@ static int xgene_pcie_probe(struct platform_device *pdev) if (ret) return ret; - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - ret = xgene_pcie_setup(port); if (ret) return ret; diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 4424430dacb0..d93489e8978e 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -794,13 +794,6 @@ static int altera_pcie_probe(struct platform_device *pdev) return ret; } - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "Failed add resources\n"); - return ret; - } - ret = altera_pcie_init_irq_domain(pcie); if (ret) { dev_err(dev, "Failed creating IRQ Domain\n"); diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 20e1aa23edc5..f0f283ec17a0 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -970,11 +970,6 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc"); - ret = pci_parse_request_of_pci_ranges(pcie->dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - ret = clk_prepare_enable(pcie->clk); if (ret) { dev_err(&pdev->dev, "could not enable clock\n"); diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index ff0a81a632a1..7c10c1cb6f65 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -95,13 +95,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) if (IS_ERR(pcie->phy)) return PTR_ERR(pcie->phy); - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "unable to get PCI host bridge resources\n"); - return ret; - } - /* PAXC doesn't support legacy IRQs, skip mapping */ switch (pcie->type) { case IPROC_PCIE_PAXC: diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index ac6dfa087247..a745f35a8396 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -1027,15 +1027,8 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) struct device *dev = pcie->dev; struct device_node *node = dev->of_node, *child; struct mtk_pcie_port *port, *tmp; - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct list_head *windows = &host->windows; int err; - err = pci_parse_request_of_pci_ranges(dev, windows, - &host->dma_ranges, NULL); - if (err) - return err; - for_each_available_child_of_node(node, child) { int slot; diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index fa7b89378904..67f2a9d3bc29 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -913,11 +913,6 @@ static int rcar_pcie_probe(struct platform_device *pdev) pcie->dev = dev; platform_set_drvdata(pdev, host); - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) - return err; - pm_runtime_enable(pcie->dev); err = pm_runtime_get_sync(pcie->dev); if (err < 0) { diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 9a30d08976d8..fed4f6cd1b7b 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -989,11 +989,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev) if (err < 0) goto err_deinit_port; - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) - goto err_remove_irq_domain; - err = rockchip_pcie_cfg_atu(rockchip); if (err) goto err_remove_irq_domain; diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 566165c18fad..7e7c23c555c7 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -838,13 +838,6 @@ static int nwl_pcie_probe(struct platform_device *pdev) return err; } - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - err = nwl_pcie_init_irq_domain(pcie); if (err) { dev_err(dev, "Failed creating IRQ Domain\n"); diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 7bf80f68efa9..f8b8ccea5cbc 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -641,13 +641,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev) return err; } - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - bridge->sysdata = port; bridge->ops = &xilinx_pcie_ops; bridge->map_irq = of_irq_parse_and_map_pci; diff --git a/drivers/pci/of.c b/drivers/pci/of.c index cfb940c8b399..5e06aae1b4cd 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -521,28 +521,26 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci); #endif /* CONFIG_OF_IRQ */ -int pci_parse_request_of_pci_ranges(struct device *dev, - struct list_head *resources, - struct list_head *ib_resources, - struct resource **bus_range) +static int pci_parse_request_of_pci_ranges(struct device *dev, + struct pci_host_bridge *bridge) { int err, res_valid = 0; resource_size_t iobase; struct resource_entry *win, *tmp; - INIT_LIST_HEAD(resources); - if (ib_resources) - INIT_LIST_HEAD(ib_resources); - err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources, - ib_resources, &iobase); + INIT_LIST_HEAD(&bridge->windows); + INIT_LIST_HEAD(&bridge->dma_ranges); + + err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows, + &bridge->dma_ranges, &iobase); if (err) return err; - err = devm_request_pci_bus_resources(dev, resources); + err = devm_request_pci_bus_resources(dev, &bridge->windows); if (err) - goto out_release_res; + return err; - resource_list_for_each_entry_safe(win, tmp, resources) { + resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { struct resource *res = win->res; switch (resource_type(res)) { @@ -557,10 +555,6 @@ int pci_parse_request_of_pci_ranges(struct device *dev, case IORESOURCE_MEM: res_valid |= !(res->flags & IORESOURCE_PREFETCH); break; - case IORESOURCE_BUS: - if (bus_range) - *bus_range = res; - break; } } @@ -568,12 +562,15 @@ int pci_parse_request_of_pci_ranges(struct device *dev, dev_warn(dev, "non-prefetchable memory resource required\n"); return 0; +} - out_release_res: - pci_free_resource_list(resources); - return err; +int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge) +{ + if (!dev->of_node) + return 0; + + return pci_parse_request_of_pci_ranges(dev, bridge); } -EXPORT_SYMBOL_GPL(pci_parse_request_of_pci_ranges); #endif /* CONFIG_PCI */ diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6d3f75867106..56d67071e116 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -627,6 +627,8 @@ void pci_release_of_node(struct pci_dev *dev); void pci_set_bus_of_node(struct pci_bus *bus); void pci_release_bus_of_node(struct pci_bus *bus); +int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge); + #else static inline int of_pci_parse_bus_range(struct device_node *node, struct resource *res) @@ -650,6 +652,12 @@ static inline void pci_set_of_node(struct pci_dev *dev) { } static inline void pci_release_of_node(struct pci_dev *dev) { } static inline void pci_set_bus_of_node(struct pci_bus *bus) { } static inline void pci_release_bus_of_node(struct pci_bus *bus) { } + +static inline int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge) +{ + return 0; +} + #endif /* CONFIG_OF */ #ifdef CONFIG_PCIEAER diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f850782efc35..998f615cdb6d 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -635,6 +635,10 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, if (ret) return NULL; + ret = devm_of_pci_bridge_init(dev, bridge); + if (ret) + return NULL; + return bridge; } EXPORT_SYMBOL(devm_pci_alloc_host_bridge); diff --git a/include/linux/pci.h b/include/linux/pci.h index c79d83304e52..2830799208fd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2303,10 +2303,6 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, struct device_node; struct irq_domain; struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); -int pci_parse_request_of_pci_ranges(struct device *dev, - struct list_head *resources, - struct list_head *ib_resources, - struct resource **bus_range); /* Arch may override this (weak) */ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); @@ -2314,14 +2310,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); #else /* CONFIG_OF */ static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } -static inline int -pci_parse_request_of_pci_ranges(struct device *dev, - struct list_head *resources, - struct list_head *ib_resources, - struct resource **bus_range) -{ - return -EINVAL; -} #endif /* CONFIG_OF */ static inline struct device_node * -- cgit v1.2.3 From a0102bda5bc0991c5c8c7c07770b236894a810fd Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 30 Jul 2020 11:03:55 -0400 Subject: ceph: move sb->wb_pagevec_pool to be a global mempool When doing some testing recently, I hit some page allocation failures on mount, when creating the wb_pagevec_pool for the mount. That requires 128k (32 contiguous pages), and after thrashing the memory during an xfstests run, sometimes that would fail. 128k for each mount seems like a lot to hold in reserve for a rainy day, so let's change this to a global mempool that gets allocated when the module is plugged in. Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- fs/ceph/addr.c | 23 +++++++++++------------ fs/ceph/super.c | 22 ++++++++-------------- fs/ceph/super.h | 2 -- include/linux/ceph/libceph.h | 1 + 4 files changed, 20 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 01ad09733ac7..6ea761c84494 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -862,8 +862,7 @@ static void writepages_finish(struct ceph_osd_request *req) osd_data = osd_req_op_extent_osd_data(req, 0); if (osd_data->pages_from_pool) - mempool_free(osd_data->pages, - ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); + mempool_free(osd_data->pages, ceph_wb_pagevec_pool); else kfree(osd_data->pages); ceph_osdc_put_request(req); @@ -955,10 +954,10 @@ retry: int num_ops = 0, op_idx; unsigned i, pvec_pages, max_pages, locked_pages = 0; struct page **pages = NULL, **data_pages; - mempool_t *pool = NULL; /* Becomes non-null if mempool used */ struct page *page; pgoff_t strip_unit_end = 0; u64 offset = 0, len = 0; + bool from_pool = false; max_pages = wsize >> PAGE_SHIFT; @@ -1057,16 +1056,16 @@ get_more_pages: sizeof(*pages), GFP_NOFS); if (!pages) { - pool = fsc->wb_pagevec_pool; - pages = mempool_alloc(pool, GFP_NOFS); + from_pool = true; + pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); BUG_ON(!pages); } len = 0; } else if (page->index != (offset + len) >> PAGE_SHIFT) { - if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : - CEPH_OSD_MAX_OPS)) { + if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : + CEPH_OSD_MAX_OPS)) { redirty_page_for_writepage(wbc, page); unlock_page(page); break; @@ -1161,7 +1160,7 @@ new_request: offset, len); osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 0, - !!pool, false); + from_pool, false); osd_req_op_extent_update(req, op_idx, len); len = 0; @@ -1188,12 +1187,12 @@ new_request: dout("writepages got pages at %llu~%llu\n", offset, len); osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, - 0, !!pool, false); + 0, from_pool, false); osd_req_op_extent_update(req, op_idx, len); BUG_ON(op_idx + 1 != req->r_num_ops); - pool = NULL; + from_pool = false; if (i < locked_pages) { BUG_ON(num_ops <= req->r_num_ops); num_ops -= req->r_num_ops; @@ -1204,8 +1203,8 @@ new_request: pages = kmalloc_array(locked_pages, sizeof(*pages), GFP_NOFS); if (!pages) { - pool = fsc->wb_pagevec_pool; - pages = mempool_alloc(pool, GFP_NOFS); + from_pool = true; + pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); BUG_ON(!pages); } memcpy(pages, data_pages + i, diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 585aecea5cad..7ec0e6d03d10 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -637,8 +637,6 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, struct ceph_options *opt) { struct ceph_fs_client *fsc; - int page_count; - size_t size; int err; fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); @@ -686,22 +684,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, if (!fsc->cap_wq) goto fail_inode_wq; - /* set up mempools */ - err = -ENOMEM; - page_count = fsc->mount_options->wsize >> PAGE_SHIFT; - size = sizeof (struct page *) * (page_count ? page_count : 1); - fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); - if (!fsc->wb_pagevec_pool) - goto fail_cap_wq; - spin_lock(&ceph_fsc_lock); list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list); spin_unlock(&ceph_fsc_lock); return fsc; -fail_cap_wq: - destroy_workqueue(fsc->cap_wq); fail_inode_wq: destroy_workqueue(fsc->inode_wq); fail_client: @@ -732,8 +720,6 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) destroy_workqueue(fsc->inode_wq); destroy_workqueue(fsc->cap_wq); - mempool_destroy(fsc->wb_pagevec_pool); - destroy_mount_options(fsc->mount_options); ceph_destroy_client(fsc->client); @@ -752,6 +738,7 @@ struct kmem_cache *ceph_dentry_cachep; struct kmem_cache *ceph_file_cachep; struct kmem_cache *ceph_dir_file_cachep; struct kmem_cache *ceph_mds_request_cachep; +mempool_t *ceph_wb_pagevec_pool; static void ceph_inode_init_once(void *foo) { @@ -796,6 +783,10 @@ static int __init init_caches(void) if (!ceph_mds_request_cachep) goto bad_mds_req; + ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT); + if (!ceph_wb_pagevec_pool) + goto bad_pagevec_pool; + error = ceph_fscache_register(); if (error) goto bad_fscache; @@ -804,6 +795,8 @@ static int __init init_caches(void) bad_fscache: kmem_cache_destroy(ceph_mds_request_cachep); +bad_pagevec_pool: + mempool_destroy(ceph_wb_pagevec_pool); bad_mds_req: kmem_cache_destroy(ceph_dir_file_cachep); bad_dir_file: @@ -834,6 +827,7 @@ static void destroy_caches(void) kmem_cache_destroy(ceph_file_cachep); kmem_cache_destroy(ceph_dir_file_cachep); kmem_cache_destroy(ceph_mds_request_cachep); + mempool_destroy(ceph_wb_pagevec_pool); ceph_fscache_unregister(); } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 9001a896ae8c..4c3c964b1c54 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -118,8 +118,6 @@ struct ceph_fs_client { struct ceph_mds_client *mdsc; - /* writeback */ - mempool_t *wb_pagevec_pool; atomic_long_t writeback_count; struct workqueue_struct *inode_wq; diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index e5ed1c541e7f..c8645f0b797d 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -282,6 +282,7 @@ extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; extern struct kmem_cache *ceph_dir_file_cachep; extern struct kmem_cache *ceph_mds_request_cachep; +extern mempool_t *ceph_wb_pagevec_pool; /* ceph_common.c */ extern bool libceph_compatible(void *data); -- cgit v1.2.3 From df23bb18b44b9a1f2b54358201730e710a9df57f Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Tue, 4 Aug 2020 07:53:42 +0200 Subject: ipv4: route: Ignore output interface in FIB lookup for PMTU route Currently, processes sending traffic to a local bridge with an encapsulation device as a port don't get ICMP errors if they exceed the PMTU of the encapsulated link. David Ahern suggested this as a hack, but it actually looks like the correct solution: when we update the PMTU for a given destination by means of updating or creating a route exception, the encapsulation might trigger this because of PMTU discovery happening either on the encapsulation device itself, or its lower layer. This happens on bridged encapsulations only. The output interface shouldn't matter, because we already have a valid destination. Drop the output interface restriction from the associated route lookup. For UDP tunnels, we will now have a route exception created for the encapsulation itself, with a MTU value reflecting its headroom, which allows a bridge forwarding IP packets originated locally to deliver errors back to the sending socket. The behaviour is now consistent with IPv6 and verified with selftests pmtu_ipv{4,6}_br_{geneve,vxlan}{4,6}_exception introduced later in this series. v2: - reset output interface only for bridge ports (David Ahern) - add and use netif_is_any_bridge_port() helper (David Ahern) Suggested-by: David Ahern Signed-off-by: Stefano Brivio Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/netdevice.h | 5 +++++ net/ipv4/route.c | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 88d40b9abaa1..90444622b703 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4840,6 +4840,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev) return dev->priv_flags & IFF_OVS_DATAPATH; } +static inline bool netif_is_any_bridge_port(const struct net_device *dev) +{ + return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); +} + static inline bool netif_is_team_master(const struct net_device *dev) { return dev->priv_flags & IFF_TEAM; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a01efa062f6b..8ca6bcab7b03 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1050,6 +1050,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct flowi4 fl4; ip_rt_build_flow_key(&fl4, sk, skb); + + /* Don't make lookup fail for bridged encapsulations */ + if (skb && netif_is_any_bridge_port(skb->dev)) + fl4.flowi4_oif = 0; + __ip_rt_update_pmtu(rt, &fl4, mtu); } -- cgit v1.2.3 From 4cb47a8644cc9eb8ec81190a50e79e6530d0297f Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Tue, 4 Aug 2020 07:53:43 +0200 Subject: tunnels: PMTU discovery support for directly bridged IP packets It's currently possible to bridge Ethernet tunnels carrying IP packets directly to external interfaces without assigning them addresses and routes on the bridged network itself: this is the case for UDP tunnels bridged with a standard bridge or by Open vSwitch. PMTU discovery is currently broken with those configurations, because the encapsulation effectively decreases the MTU of the link, and while we are able to account for this using PMTU discovery on the lower layer, we don't have a way to relay ICMP or ICMPv6 messages needed by the sender, because we don't have valid routes to it. On the other hand, as a tunnel endpoint, we can't fragment packets as a general approach: this is for instance clearly forbidden for VXLAN by RFC 7348, section 4.3: VTEPs MUST NOT fragment VXLAN packets. Intermediate routers may fragment encapsulated VXLAN packets due to the larger frame size. The destination VTEP MAY silently discard such VXLAN fragments. The same paragraph recommends that the MTU over the physical network accomodates for encapsulations, but this isn't a practical option for complex topologies, especially for typical Open vSwitch use cases. Further, it states that: Other techniques like Path MTU discovery (see [RFC1191] and [RFC1981]) MAY be used to address this requirement as well. Now, PMTU discovery already works for routed interfaces, we get route exceptions created by the encapsulation device as they receive ICMP Fragmentation Needed and ICMPv6 Packet Too Big messages, and we already rebuild those messages with the appropriate MTU and route them back to the sender. Add the missing bits for bridged cases: - checks in skb_tunnel_check_pmtu() to understand if it's appropriate to trigger a reply according to RFC 1122 section 3.2.2 for ICMP and RFC 4443 section 2.4 for ICMPv6. This function is already called by UDP tunnels - a new function generating those ICMP or ICMPv6 replies. We can't reuse icmp_send() and icmp6_send() as we don't see the sender as a valid destination. This doesn't need to be generic, as we don't cover any other type of ICMP errors given that we only provide an encapsulation function to the sender While at it, make the MTU check in skb_tunnel_check_pmtu() accurate: we might receive GSO buffers here, and the passed headroom already includes the inner MAC length, so we don't have to account for it a second time (that would imply three MAC headers on the wire, but there are just two). This issue became visible while bridging IPv6 packets with 4500 bytes of payload over GENEVE using IPv4 with a PMTU of 4000. Given the 50 bytes of encapsulation headroom, we would advertise MTU as 3950, and we would reject fragmented IPv6 datagrams of 3958 bytes size on the wire. We're exclusively dealing with network MTU here, though, so we could get Ethernet frames up to 3964 octets in that case. v2: - moved skb_tunnel_check_pmtu() to ip_tunnel_core.c (David Ahern) - split IPv4/IPv6 functions (David Ahern) Signed-off-by: Stefano Brivio Reviewed-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/bareudp.c | 5 +- drivers/net/geneve.c | 5 +- drivers/net/vxlan.c | 4 +- include/net/dst.h | 10 -- include/net/ip_tunnels.h | 2 + net/ipv4/ip_tunnel_core.c | 244 ++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 254 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 3b6664c7e73c..841910f1db65 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -308,7 +308,7 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, - BAREUDP_IPV4_HLEN + info->options_len); + BAREUDP_IPV4_HLEN + info->options_len, false); sport = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, @@ -369,7 +369,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(dst)) return PTR_ERR(dst); - skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len); + skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len, + false); sport = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 017c13acc911..de86b6d82132 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -894,7 +894,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, - GENEVE_IPV4_HLEN + info->options_len); + GENEVE_IPV4_HLEN + info->options_len, false); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->cfg.collect_md) { @@ -955,7 +955,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(dst)) return PTR_ERR(dst); - skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); + skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len, + false); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->cfg.collect_md) { diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 77658425db8a..1432544da507 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2720,7 +2720,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ndst = &rt->dst; - skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); + skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM, false); tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); @@ -2760,7 +2760,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto out_unlock; } - skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); + skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM, false); tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); diff --git a/include/net/dst.h b/include/net/dst.h index 852d8fb36ab7..6ae2e625050d 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -535,14 +535,4 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu) dst->ops->update_pmtu(dst, NULL, skb, mtu, false); } -static inline void skb_tunnel_check_pmtu(struct sk_buff *skb, - struct dst_entry *encap_dst, - int headroom) -{ - u32 encap_mtu = dst_mtu(encap_dst); - - if (skb->len > encap_mtu - headroom) - skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom); -} - #endif /* _NET_DST_H */ diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 36025dea7612..02ccd32542d0 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -420,6 +420,8 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, u8 tos, u8 ttl, __be16 df, bool xnet); struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, gfp_t flags); +int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, + int headroom, bool reply); int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index f8b419e2475c..9ddee2a0c66d 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -184,6 +184,250 @@ int iptunnel_handle_offloads(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); +/** + * iptunnel_pmtud_build_icmp() - Build ICMP error message for PMTUD + * @skb: Original packet with L2 header + * @mtu: MTU value for ICMP error + * + * Return: length on success, negative error code if message couldn't be built. + */ +static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu) +{ + const struct iphdr *iph = ip_hdr(skb); + struct icmphdr *icmph; + struct iphdr *niph; + struct ethhdr eh; + int len, err; + + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) + return -EINVAL; + + skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); + pskb_pull(skb, ETH_HLEN); + skb_reset_network_header(skb); + + err = pskb_trim(skb, 576 - sizeof(*niph) - sizeof(*icmph)); + if (err) + return err; + + len = skb->len + sizeof(*icmph); + err = skb_cow(skb, sizeof(*niph) + sizeof(*icmph) + ETH_HLEN); + if (err) + return err; + + icmph = skb_push(skb, sizeof(*icmph)); + *icmph = (struct icmphdr) { + .type = ICMP_DEST_UNREACH, + .code = ICMP_FRAG_NEEDED, + .checksum = 0, + .un.frag.__unused = 0, + .un.frag.mtu = ntohs(mtu), + }; + icmph->checksum = ip_compute_csum(icmph, len); + skb_reset_transport_header(skb); + + niph = skb_push(skb, sizeof(*niph)); + *niph = (struct iphdr) { + .ihl = sizeof(*niph) / 4u, + .version = 4, + .tos = 0, + .tot_len = htons(len + sizeof(*niph)), + .id = 0, + .frag_off = htons(IP_DF), + .ttl = iph->ttl, + .protocol = IPPROTO_ICMP, + .saddr = iph->daddr, + .daddr = iph->saddr, + }; + ip_send_check(niph); + skb_reset_network_header(skb); + + skb->ip_summed = CHECKSUM_NONE; + + eth_header(skb, skb->dev, htons(eh.h_proto), eh.h_source, eh.h_dest, 0); + skb_reset_mac_header(skb); + + return skb->len; +} + +/** + * iptunnel_pmtud_check_icmp() - Trigger ICMP reply if needed and allowed + * @skb: Buffer being sent by encapsulation, L2 headers expected + * @mtu: Network MTU for path + * + * Return: 0 for no ICMP reply, length if built, negative value on error. + */ +static int iptunnel_pmtud_check_icmp(struct sk_buff *skb, int mtu) +{ + const struct icmphdr *icmph = icmp_hdr(skb); + const struct iphdr *iph = ip_hdr(skb); + + if (mtu <= 576 || iph->frag_off != htons(IP_DF)) + return 0; + + if (ipv4_is_lbcast(iph->daddr) || ipv4_is_multicast(iph->daddr) || + ipv4_is_zeronet(iph->saddr) || ipv4_is_loopback(iph->saddr) || + ipv4_is_lbcast(iph->saddr) || ipv4_is_multicast(iph->saddr)) + return 0; + + if (iph->protocol == IPPROTO_ICMP && icmp_is_err(icmph->type)) + return 0; + + return iptunnel_pmtud_build_icmp(skb, mtu); +} + +#if IS_ENABLED(CONFIG_IPV6) +/** + * iptunnel_pmtud_build_icmpv6() - Build ICMPv6 error message for PMTUD + * @skb: Original packet with L2 header + * @mtu: MTU value for ICMPv6 error + * + * Return: length on success, negative error code if message couldn't be built. + */ +static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct icmp6hdr *icmp6h; + struct ipv6hdr *nip6h; + struct ethhdr eh; + int len, err; + __wsum csum; + + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) + return -EINVAL; + + skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); + pskb_pull(skb, ETH_HLEN); + skb_reset_network_header(skb); + + err = pskb_trim(skb, IPV6_MIN_MTU - sizeof(*nip6h) - sizeof(*icmp6h)); + if (err) + return err; + + len = skb->len + sizeof(*icmp6h); + err = skb_cow(skb, sizeof(*nip6h) + sizeof(*icmp6h) + ETH_HLEN); + if (err) + return err; + + icmp6h = skb_push(skb, sizeof(*icmp6h)); + *icmp6h = (struct icmp6hdr) { + .icmp6_type = ICMPV6_PKT_TOOBIG, + .icmp6_code = 0, + .icmp6_cksum = 0, + .icmp6_mtu = htonl(mtu), + }; + skb_reset_transport_header(skb); + + nip6h = skb_push(skb, sizeof(*nip6h)); + *nip6h = (struct ipv6hdr) { + .priority = 0, + .version = 6, + .flow_lbl = { 0 }, + .payload_len = htons(len), + .nexthdr = IPPROTO_ICMPV6, + .hop_limit = ip6h->hop_limit, + .saddr = ip6h->daddr, + .daddr = ip6h->saddr, + }; + skb_reset_network_header(skb); + + csum = csum_partial(icmp6h, len, 0); + icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len, + IPPROTO_ICMPV6, csum); + + skb->ip_summed = CHECKSUM_NONE; + + eth_header(skb, skb->dev, htons(eh.h_proto), eh.h_source, eh.h_dest, 0); + skb_reset_mac_header(skb); + + return skb->len; +} + +/** + * iptunnel_pmtud_check_icmpv6() - Trigger ICMPv6 reply if needed and allowed + * @skb: Buffer being sent by encapsulation, L2 headers expected + * @mtu: Network MTU for path + * + * Return: 0 for no ICMPv6 reply, length if built, negative value on error. + */ +static int iptunnel_pmtud_check_icmpv6(struct sk_buff *skb, int mtu) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + int stype = ipv6_addr_type(&ip6h->saddr); + u8 proto = ip6h->nexthdr; + __be16 frag_off; + int offset; + + if (mtu <= IPV6_MIN_MTU) + return 0; + + if (stype == IPV6_ADDR_ANY || stype == IPV6_ADDR_MULTICAST || + stype == IPV6_ADDR_LOOPBACK) + return 0; + + offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, + &frag_off); + if (offset < 0 || (frag_off & htons(~0x7))) + return 0; + + if (proto == IPPROTO_ICMPV6) { + struct icmp6hdr *icmp6h; + + if (!pskb_may_pull(skb, skb_network_header(skb) + + offset + 1 - skb->data)) + return 0; + + icmp6h = (struct icmp6hdr *)(skb_network_header(skb) + offset); + if (icmpv6_is_err(icmp6h->icmp6_type) || + icmp6h->icmp6_type == NDISC_REDIRECT) + return 0; + } + + return iptunnel_pmtud_build_icmpv6(skb, mtu); +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + +/** + * skb_tunnel_check_pmtu() - Check, update PMTU and trigger ICMP reply as needed + * @skb: Buffer being sent by encapsulation, L2 headers expected + * @encap_dst: Destination for tunnel encapsulation (outer IP) + * @headroom: Encapsulation header size, bytes + * @reply: Build matching ICMP or ICMPv6 message as a result + * + * L2 tunnel implementations that can carry IP and can be directly bridged + * (currently UDP tunnels) can't always rely on IP forwarding paths to handle + * PMTU discovery. In the bridged case, ICMP or ICMPv6 messages need to be built + * based on payload and sent back by the encapsulation itself. + * + * For routable interfaces, we just need to update the PMTU for the destination. + * + * Return: 0 if ICMP error not needed, length if built, negative value on error + */ +int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, + int headroom, bool reply) +{ + u32 mtu = dst_mtu(encap_dst) - headroom; + + if ((skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) || + (!skb_is_gso(skb) && (skb->len - skb_mac_header_len(skb)) <= mtu)) + return 0; + + skb_dst_update_pmtu_no_confirm(skb, mtu); + + if (!reply || skb->pkt_type == PACKET_HOST) + return 0; + + if (skb->protocol == htons(ETH_P_IP)) + return iptunnel_pmtud_check_icmp(skb, mtu); + +#if IS_ENABLED(CONFIG_IPV6) + if (skb->protocol == htons(ETH_P_IPV6)) + return iptunnel_pmtud_check_icmpv6(skb, mtu); +#endif + return 0; +} +EXPORT_SYMBOL(skb_tunnel_check_pmtu); + /* Often modified stats are per cpu, other are shared (netdev->stats) */ void ip_tunnel_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) -- cgit v1.2.3 From 522ec6e0eed0ab0678e7d5b5bf00487dfe83f7ce Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Thu, 30 Jul 2020 18:04:33 -0400 Subject: drm/amdkfd: Replace bitmask with event idx in SMI event msg Event bitmask is a 64-bit mask with only 1 bit set. Sending this event bitmask in KFD SMI event message is both wasteful of memory and potentially limiting to only 64 events. Instead send event index in SMI event message. Please note this change does not break the ABI for the two event types defined so far. The new index is identical to the mask used before. Signed-off-by: Mukul Joshi Suggested-by: Felix Kuehling Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 24 +++++++++++++----------- include/uapi/linux/kfd_ioctl.h | 10 +++++++--- 2 files changed, 20 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 86c2c3e97944..4d4b6e3ab697 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -149,7 +149,7 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep) return 0; } -static void add_event_to_kfifo(struct kfd_dev *dev, unsigned long long smi_event, +static void add_event_to_kfifo(struct kfd_dev *dev, unsigned int smi_event, char *event_msg, int len) { struct kfd_smi_client *client; @@ -157,14 +157,15 @@ static void add_event_to_kfifo(struct kfd_dev *dev, unsigned long long smi_event rcu_read_lock(); list_for_each_entry_rcu(client, &dev->smi_clients, list) { - if (!(READ_ONCE(client->events) & smi_event)) + if (!(READ_ONCE(client->events) & + KFD_SMI_EVENT_MASK_FROM_INDEX(smi_event))) continue; spin_lock(&client->lock); if (kfifo_avail(&client->fifo) >= len) { kfifo_in(&client->fifo, event_msg, len); wake_up_all(&client->wait_queue); } else { - pr_debug("smi_event(EventID: %llu): no space left\n", + pr_debug("smi_event(EventID: %u): no space left\n", smi_event); } spin_unlock(&client->lock); @@ -180,21 +181,21 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, /* * ThermalThrottle msg = throttle_bitmask(8): * thermal_interrupt_count(16): - * 16 bytes event + 1 byte space + 8 byte throttle_bitmask + + * 1 byte event + 1 byte space + 8 byte throttle_bitmask + * 1 byte : + 16 byte thermal_interupt_counter + 1 byte \n + - * 1 byte \0 = 44 + * 1 byte \0 = 29 */ - char fifo_in[44]; + char fifo_in[29]; int len; if (list_empty(&dev->smi_clients)) return; - len = snprintf(fifo_in, 44, "%x %x:%llx\n", + len = snprintf(fifo_in, 29, "%x %x:%llx\n", KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, atomic64_read(&adev->smu.throttle_int_counter)); - add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); + add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); } void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) @@ -202,9 +203,10 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; struct amdgpu_task_info task_info; /* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */ - /* 16 bytes event + 1 byte space + 25 bytes msg + 1 byte \n = 43 + /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n + + * 1 byte \0 = 29 */ - char fifo_in[43]; + char fifo_in[29]; int len; if (list_empty(&dev->smi_clients)) @@ -216,7 +218,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) if (!task_info.pid) return; - len = snprintf(fifo_in, 43, "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT, + len = snprintf(fifo_in, 29, "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT, task_info.pid, task_info.task_name); add_event_to_kfifo(dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len); diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index df6c7a43aadc..cb1f963a84e0 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -449,9 +449,13 @@ struct kfd_ioctl_import_dmabuf_args { /* * KFD SMI(System Management Interface) events */ -/* Event type (defined by bitmask) */ -#define KFD_SMI_EVENT_VMFAULT 0x0000000000000001 -#define KFD_SMI_EVENT_THERMAL_THROTTLE 0x0000000000000002 +enum kfd_smi_event { + KFD_SMI_EVENT_NONE = 0, /* not used */ + KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */ + KFD_SMI_EVENT_THERMAL_THROTTLE = 2, +}; + +#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) struct kfd_ioctl_smi_events_args { __u32 gpuid; /* to KFD */ -- cgit v1.2.3 From a8ac78357d9b71a5608c609094ad3b114a46ccd4 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Mon, 3 Aug 2020 18:00:08 +0300 Subject: scsi: target: Make iscsit_register_transport() return void This function always returns 0. We can make it return void to simplify the code. Also, no caller ever checks the return value of this function. Link: https://lore.kernel.org/r/20200803150008.83920-1-maxg@mellanox.com Signed-off-by: Max Gurtovoy Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/iscsi_target_transport.c | 4 +--- include/target/iscsi/iscsi_transport.h | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c index 036940518bfe..27c85f260459 100644 --- a/drivers/target/iscsi/iscsi_target_transport.c +++ b/drivers/target/iscsi/iscsi_target_transport.c @@ -31,7 +31,7 @@ void iscsit_put_transport(struct iscsit_transport *t) module_put(t->owner); } -int iscsit_register_transport(struct iscsit_transport *t) +void iscsit_register_transport(struct iscsit_transport *t) { INIT_LIST_HEAD(&t->t_node); @@ -40,8 +40,6 @@ int iscsit_register_transport(struct iscsit_transport *t) mutex_unlock(&transport_mutex); pr_debug("Registered iSCSI transport: %s\n", t->name); - - return 0; } EXPORT_SYMBOL(iscsit_register_transport); diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index 75bee29fd7dd..b8feba7ffebc 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -43,7 +43,7 @@ static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd) * From iscsi_target_transport.c */ -extern int iscsit_register_transport(struct iscsit_transport *); +extern void iscsit_register_transport(struct iscsit_transport *); extern void iscsit_unregister_transport(struct iscsit_transport *); extern struct iscsit_transport *iscsit_get_transport(int); extern void iscsit_put_transport(struct iscsit_transport *); -- cgit v1.2.3 From f073531070d24bbb82cb2658952d949f4851024b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 17:49:47 +0200 Subject: init: add an init_dup helper Add a simple helper to grab a reference to a file and install it at the next available fd, and switch the early init code over to it. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/init.c | 12 ++++++++++++ include/linux/init_syscalls.h | 1 + init/main.c | 8 ++++---- 3 files changed, 17 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/init.c b/fs/init.c index db5c48a85644..e9c320a48cf1 100644 --- a/fs/init.c +++ b/fs/init.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include "internal.h" @@ -251,3 +252,14 @@ int __init init_utimes(char *filename, struct timespec64 *ts) path_put(&path); return error; } + +int __init init_dup(struct file *file) +{ + int fd; + + fd = get_unused_fd_flags(0); + if (fd < 0) + return fd; + fd_install(fd, get_file(file)); + return 0; +} diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h index 3654b525ac0b..92045d18cbfc 100644 --- a/include/linux/init_syscalls.h +++ b/include/linux/init_syscalls.h @@ -16,3 +16,4 @@ int __init init_unlink(const char *pathname); int __init init_mkdir(const char *pathname, umode_t mode); int __init init_rmdir(const char *pathname); int __init init_utimes(char *filename, struct timespec64 *ts); +int __init init_dup(struct file *file); diff --git a/init/main.c b/init/main.c index 1c710d3e1d46..9dae9c4f806b 100644 --- a/init/main.c +++ b/init/main.c @@ -1467,10 +1467,10 @@ void __init console_on_rootfs(void) pr_err("Warning: unable to open an initial console.\n"); return; } - get_file_rcu_many(file, 2); - fd_install(get_unused_fd_flags(0), file); - fd_install(get_unused_fd_flags(0), file); - fd_install(get_unused_fd_flags(0), file); + init_dup(file); + init_dup(file); + init_dup(file); + fput(file); } static noinline void __init kernel_init_freeable(void) -- cgit v1.2.3 From 4476770881d7ac647e3bcae0943f37e00b9c3f3c Mon Sep 17 00:00:00 2001 From: Siddharth Gupta Date: Wed, 29 Jul 2020 10:40:00 -0700 Subject: remoteproc: Add remoteproc character device interface Add the character device interface into remoteproc framework. This interface can be used in order to boot/shutdown remote subsystems and provides a basic ioctl based interface to implement supplementary functionality. An ioctl call is implemented to enable the shutdown on release feature which will allow remote processors to be shutdown when the controlling userspace application crashes or hangs. Reviewed-by: Bjorn Andersson Reviewed-by: Mathieu Poirier Signed-off-by: Rishabh Bhatnagar Signed-off-by: Siddharth Gupta Link: https://lore.kernel.org/r/1596044401-22083-2-git-send-email-sidgup@codeaurora.org [bjorn: s/int32_t/s32/ per checkpatch] Signed-off-by: Bjorn Andersson --- Documentation/userspace-api/ioctl/ioctl-number.rst | 1 + drivers/remoteproc/Kconfig | 9 ++ drivers/remoteproc/Makefile | 1 + drivers/remoteproc/remoteproc_cdev.c | 124 +++++++++++++++++++++ drivers/remoteproc/remoteproc_internal.h | 28 +++++ include/linux/remoteproc.h | 5 + include/uapi/linux/remoteproc_cdev.h | 37 ++++++ 7 files changed, 205 insertions(+) create mode 100644 drivers/remoteproc/remoteproc_cdev.c create mode 100644 include/uapi/linux/remoteproc_cdev.h (limited to 'include') diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 59472cd6a11d..2a198838fca9 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -339,6 +339,7 @@ Code Seq# Include File Comments 0xB4 00-0F linux/gpio.h 0xB5 00-0F uapi/linux/rpmsg.h 0xB6 all linux/fpga-dfl.h +0xB7 all uapi/linux/remoteproc_cdev.h 0xC0 00-0F linux/usb/iowarrior.h 0xCA 00-0F uapi/misc/cxl.h 0xCA 10-2F uapi/misc/ocxl.h diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 48315dc4a30c..c6659dfea7c7 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -14,6 +14,15 @@ config REMOTEPROC if REMOTEPROC +config REMOTEPROC_CDEV + bool "Remoteproc character device interface" + help + Say y here to have a character device interface for the remoteproc + framework. Userspace can boot/shutdown remote processors through + this interface. + + It's safe to say N if you don't want to use this interface. + config IMX_REMOTEPROC tristate "IMX6/7 remoteproc support" depends on ARCH_MXC diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 4d4307dc8fa9..3dfa28e6c701 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -10,6 +10,7 @@ remoteproc-y += remoteproc_debugfs.o remoteproc-y += remoteproc_sysfs.o remoteproc-y += remoteproc_virtio.o remoteproc-y += remoteproc_elf_loader.o +obj-$(CONFIG_REMOTEPROC_CDEV) += remoteproc_cdev.o obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c new file mode 100644 index 000000000000..b19ea3057bde --- /dev/null +++ b/drivers/remoteproc/remoteproc_cdev.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Character device interface driver for Remoteproc framework. + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "remoteproc_internal.h" + +#define NUM_RPROC_DEVICES 64 +static dev_t rproc_major; + +static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) +{ + struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev); + int ret = 0; + char cmd[10]; + + if (!len || len > sizeof(cmd)) + return -EINVAL; + + ret = copy_from_user(cmd, buf, len); + if (ret) + return -EFAULT; + + if (!strncmp(cmd, "start", len)) { + if (rproc->state == RPROC_RUNNING) + return -EBUSY; + + ret = rproc_boot(rproc); + } else if (!strncmp(cmd, "stop", len)) { + if (rproc->state != RPROC_RUNNING) + return -EINVAL; + + rproc_shutdown(rproc); + } else { + dev_err(&rproc->dev, "Unrecognized option\n"); + ret = -EINVAL; + } + + return ret ? ret : len; +} + +static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev); + void __user *argp = (void __user *)arg; + s32 param; + + switch (ioctl) { + case RPROC_SET_SHUTDOWN_ON_RELEASE: + if (copy_from_user(¶m, argp, sizeof(s32))) + return -EFAULT; + + rproc->cdev_put_on_release = !!param; + break; + case RPROC_GET_SHUTDOWN_ON_RELEASE: + param = (s32)rproc->cdev_put_on_release; + if (copy_to_user(argp, ¶m, sizeof(s32))) + return -EFAULT; + + break; + default: + dev_err(&rproc->dev, "Unsupported ioctl\n"); + return -EINVAL; + } + + return 0; +} + +static int rproc_cdev_release(struct inode *inode, struct file *filp) +{ + struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev); + + if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING) + rproc_shutdown(rproc); + + return 0; +} + +static const struct file_operations rproc_fops = { + .write = rproc_cdev_write, + .unlocked_ioctl = rproc_device_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = rproc_cdev_release, +}; + +int rproc_char_device_add(struct rproc *rproc) +{ + int ret; + + cdev_init(&rproc->cdev, &rproc_fops); + rproc->cdev.owner = THIS_MODULE; + + rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index); + cdev_set_parent(&rproc->cdev, &rproc->dev.kobj); + ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1); + if (ret < 0) + dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name); + + return ret; +} + +void rproc_char_device_remove(struct rproc *rproc) +{ + __unregister_chrdev(MAJOR(rproc->dev.devt), rproc->index, 1, "remoteproc"); +} + +void __init rproc_init_cdev(void) +{ + int ret; + + ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc"); + if (ret < 0) + pr_err("Failed to alloc rproc_cdev region, err %d\n", ret); +} diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index cd4176b033a3..c34002888d2c 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -53,6 +53,34 @@ void rproc_exit_sysfs(void); void rproc_coredump_cleanup(struct rproc *rproc); void rproc_coredump(struct rproc *rproc); +#ifdef CONFIG_REMOTEPROC_CDEV +void rproc_init_cdev(void); +void rproc_exit_cdev(void); +int rproc_char_device_add(struct rproc *rproc); +void rproc_char_device_remove(struct rproc *rproc); +#else +static inline void rproc_init_cdev(void) +{ +} + +static inline void rproc_exit_cdev(void) +{ +} + +/* + * The character device interface is an optional feature, if it is not enabled + * the function should not return an error. + */ +static inline int rproc_char_device_add(struct rproc *rproc) +{ + return 0; +} + +static inline void rproc_char_device_remove(struct rproc *rproc) +{ +} +#endif + void rproc_free_vring(struct rproc_vring *rvring); int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 0e8d2ff575b4..2fa68bf5aa4f 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -509,6 +510,8 @@ struct rproc_dump_segment { * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc + * @char_dev: character device of the rproc + * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release */ struct rproc { struct list_head node; @@ -546,6 +549,8 @@ struct rproc { int nb_vdev; u8 elf_class; u16 elf_machine; + struct cdev cdev; + bool cdev_put_on_release; }; /** diff --git a/include/uapi/linux/remoteproc_cdev.h b/include/uapi/linux/remoteproc_cdev.h new file mode 100644 index 000000000000..c43768e4b0dc --- /dev/null +++ b/include/uapi/linux/remoteproc_cdev.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * IOCTLs for Remoteproc's character device interface. + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _UAPI_REMOTEPROC_CDEV_H_ +#define _UAPI_REMOTEPROC_CDEV_H_ + +#include +#include + +#define RPROC_MAGIC 0xB7 + +/* + * The RPROC_SET_SHUTDOWN_ON_RELEASE ioctl allows to enable/disable the shutdown of a remote + * processor automatically when the controlling userpsace closes the char device interface. + * + * input parameter: integer + * 0 : disable automatic shutdown + * other : enable automatic shutdown + */ +#define RPROC_SET_SHUTDOWN_ON_RELEASE _IOW(RPROC_MAGIC, 1, __s32) + +/* + * The RPROC_GET_SHUTDOWN_ON_RELEASE ioctl gets information about whether the automatic shutdown of + * a remote processor is enabled or disabled when the controlling userspace closes the char device + * interface. + * + * output parameter: integer + * 0 : automatic shutdown disable + * other : automatic shutdown enable + */ +#define RPROC_GET_SHUTDOWN_ON_RELEASE _IOR(RPROC_MAGIC, 2, __s32) + +#endif -- cgit v1.2.3 From 7de62bc09fe6d100ebd6c931c3f9a6fa7e6ed10f Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 15 Jul 2020 13:17:52 -0400 Subject: SUNRPC dont update timeout value on connection reset Current behaviour: every time a v3 operation is re-sent to the server we update (double) the timeout. There is no distinction between whether or not the previous timer had expired before the re-sent happened. Here's the scenario: 1. Client sends a v3 operation 2. Server RST-s the connection (prior to the timeout) (eg., connection is immediately reset) 3. Client re-sends a v3 operation but the timeout is now 120sec. As a result, an application sees 2mins pause before a retry in case server again does not reply. Instead, this patch proposes to keep track off when the minor timeout should happen and if it didn't, then don't update the new timeout. Value is updated based on the previous value to make timeouts predictable. Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 1 + net/sunrpc/xprt.c | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index e64bd8222f55..a603d48d2b2c 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -101,6 +101,7 @@ struct rpc_rqst { * used in the softirq. */ unsigned long rq_majortimeo; /* major timeout alarm */ + unsigned long rq_minortimeo; /* minor timeout alarm */ unsigned long rq_timeout; /* Current timeout value */ ktime_t rq_rtt; /* round-trip time */ unsigned int rq_retries; /* # of retries */ diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index d5cc5db9dbf3..6ba9d5842629 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -607,6 +607,11 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req) req->rq_majortimeo += xprt_calc_majortimeo(req); } +static void xprt_reset_minortimeo(struct rpc_rqst *req) +{ + req->rq_minortimeo += req->rq_timeout; +} + static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) { unsigned long time_init; @@ -618,6 +623,7 @@ static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) time_init = xprt_abs_ktime_to_jiffies(task->tk_start); req->rq_timeout = task->tk_client->cl_timeout->to_initval; req->rq_majortimeo = time_init + xprt_calc_majortimeo(req); + req->rq_minortimeo = time_init + req->rq_timeout; } /** @@ -631,6 +637,8 @@ int xprt_adjust_timeout(struct rpc_rqst *req) const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; int status = 0; + if (time_before(jiffies, req->rq_minortimeo)) + return status; if (time_before(jiffies, req->rq_majortimeo)) { if (to->to_exponential) req->rq_timeout <<= 1; @@ -649,6 +657,7 @@ int xprt_adjust_timeout(struct rpc_rqst *req) spin_unlock(&xprt->transport_lock); status = -ETIMEDOUT; } + xprt_reset_minortimeo(req); if (req->rq_timeout == 0) { printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); -- cgit v1.2.3 From 262e6ae7081df304fc625cf368d5c2cbba2bb991 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 23:33:33 +0200 Subject: modules: inherit TAINT_PROPRIETARY_MODULE If a TAINT_PROPRIETARY_MODULE exports symbol, inherit the taint flag for all modules importing these symbols, and don't allow loading symbols from TAINT_PROPRIETARY_MODULE modules if the module previously imported gplonly symbols. Add a anti-circumvention devices so people don't accidentally get themselves into trouble this way. Comment from Greg: "Ah, the proven-to-be-illegal "GPL Condom" defense :)" [jeyu: pr_info -> pr_err and pr_warn as per discussion] Link: http://lore.kernel.org/r/20200730162957.GA22469@lst.de Acked-by: Daniel Vetter Reviewed-by: Greg Kroah-Hartman Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 1 + kernel/module.c | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index 30b0f5fcdb3c..e30ed5fa33a7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -389,6 +389,7 @@ struct module { unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; + bool using_gplonly_symbols; #ifdef CONFIG_UNUSED_SYMBOLS /* unused exported symbols. */ diff --git a/kernel/module.c b/kernel/module.c index 656f5ff27088..09bf5a652a47 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1431,6 +1431,24 @@ static int verify_namespace_is_imported(const struct load_info *info, return 0; } +static bool inherit_taint(struct module *mod, struct module *owner) +{ + if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) + return true; + + if (mod->using_gplonly_symbols) { + pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n", + mod->name, owner->name); + return false; + } + + if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { + pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n", + mod->name, owner->name); + set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); + } + return true; +} /* Resolve a symbol for this module. I.e. if we find one, record usage. */ static const struct kernel_symbol *resolve_symbol(struct module *mod, @@ -1456,6 +1474,14 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, if (!sym) goto unlock; + if (license == GPL_ONLY) + mod->using_gplonly_symbols = true; + + if (!inherit_taint(mod, owner)) { + sym = NULL; + goto getname; + } + if (!check_version(info, name, mod, crc)) { sym = ERR_PTR(-EINVAL); goto getname; -- cgit v1.2.3 From 75820314de26b00aaea0d0e79269c0d17914c5c4 Mon Sep 17 00:00:00 2001 From: Codrin Ciubotariu Date: Tue, 4 Aug 2020 12:59:24 +0300 Subject: i2c: core: add generic I2C GPIO recovery Multiple I2C bus drivers use similar bindings to obtain information needed for I2C recovery. For example, for platforms using device-tree, the properties look something like this: &i2c { ... pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c_default>; pinctrl-1 = <&pinctrl_i2c_gpio>; sda-gpios = <&pio 0 GPIO_ACTIVE_HIGH>; scl-gpios = <&pio 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; ... } For this reason, we can add this common initialization in the core. This way, other I2C bus drivers will be able to support GPIO recovery just by providing a pointer to platform's pinctrl and calling i2c_recover_bus() when SDA is stuck low. Signed-off-by: Codrin Ciubotariu [wsa: inverted one logic for better readability, minor update to kdoc] Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 126 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/i2c.h | 11 ++++ 2 files changed, 137 insertions(+) (limited to 'include') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 69217d2193da..ece25560eae4 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -181,6 +182,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) if (bri->prepare_recovery) bri->prepare_recovery(adap); + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_gpio); /* * If we can set SDA, we will always create a STOP to ensure additional @@ -236,6 +239,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) if (bri->unprepare_recovery) bri->unprepare_recovery(adap); + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_default); return ret; } @@ -251,6 +256,125 @@ int i2c_recover_bus(struct i2c_adapter *adap) } EXPORT_SYMBOL_GPL(i2c_recover_bus); +static void i2c_gpio_init_pinctrl_recovery(struct i2c_adapter *adap) +{ + struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; + struct device *dev = &adap->dev; + struct pinctrl *p = bri->pinctrl; + + /* + * we can't change states without pinctrl, so remove the states if + * populated + */ + if (!p) { + bri->pins_default = NULL; + bri->pins_gpio = NULL; + return; + } + + if (!bri->pins_default) { + bri->pins_default = pinctrl_lookup_state(p, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(bri->pins_default)) { + dev_dbg(dev, PINCTRL_STATE_DEFAULT " state not found for GPIO recovery\n"); + bri->pins_default = NULL; + } + } + if (!bri->pins_gpio) { + bri->pins_gpio = pinctrl_lookup_state(p, "gpio"); + if (IS_ERR(bri->pins_gpio)) + bri->pins_gpio = pinctrl_lookup_state(p, "recovery"); + + if (IS_ERR(bri->pins_gpio)) { + dev_dbg(dev, "no gpio or recovery state found for GPIO recovery\n"); + bri->pins_gpio = NULL; + } + } + + /* for pinctrl state changes, we need all the information */ + if (bri->pins_default && bri->pins_gpio) { + dev_info(dev, "using pinctrl states for GPIO recovery"); + } else { + bri->pinctrl = NULL; + bri->pins_default = NULL; + bri->pins_gpio = NULL; + } +} + +static int i2c_gpio_init_generic_recovery(struct i2c_adapter *adap) +{ + struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; + struct device *dev = &adap->dev; + struct gpio_desc *gpiod; + int ret = 0; + + /* + * don't touch the recovery information if the driver is not using + * generic SCL recovery + */ + if (bri->recover_bus && bri->recover_bus != i2c_generic_scl_recovery) + return 0; + + /* + * pins might be taken as GPIO, so we should inform pinctrl about + * this and move the state to GPIO + */ + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_gpio); + + /* + * if there is incomplete or no recovery information, see if generic + * GPIO recovery is available + */ + if (!bri->scl_gpiod) { + gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); + if (PTR_ERR(gpiod) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto cleanup_pinctrl_state; + } + if (!IS_ERR(gpiod)) { + bri->scl_gpiod = gpiod; + bri->recover_bus = i2c_generic_scl_recovery; + dev_info(dev, "using generic GPIOs for recovery\n"); + } + } + + /* SDA GPIOD line is optional, so we care about DEFER only */ + if (!bri->sda_gpiod) { + /* + * We have SCL. Pull SCL low and wait a bit so that SDA glitches + * have no effect. + */ + gpiod_direction_output(bri->scl_gpiod, 0); + udelay(10); + gpiod = devm_gpiod_get(dev, "sda", GPIOD_IN); + + /* Wait a bit in case of a SDA glitch, and then release SCL. */ + udelay(10); + gpiod_direction_output(bri->scl_gpiod, 1); + + if (PTR_ERR(gpiod) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto cleanup_pinctrl_state; + } + if (!IS_ERR(gpiod)) + bri->sda_gpiod = gpiod; + } + +cleanup_pinctrl_state: + /* change the state of the pins back to their default state */ + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_default); + + return ret; +} + +static int i2c_gpio_init_recovery(struct i2c_adapter *adap) +{ + i2c_gpio_init_pinctrl_recovery(adap); + return i2c_gpio_init_generic_recovery(adap); +} + static void i2c_init_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; @@ -259,6 +383,8 @@ static void i2c_init_recovery(struct i2c_adapter *adap) if (!bri) return; + i2c_gpio_init_recovery(adap); + if (!bri->recover_bus) { err_str = "no recover_bus() found"; goto err; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 8ea9c3f86dba..d387d3786429 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -606,6 +606,14 @@ struct i2c_timings { * may configure padmux here for SDA/SCL line or something else they want. * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery. * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery. + * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins. + * Optional. + * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned + * to the I2C bus. Optional. Populated internally for GPIO recovery, if + * state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid. + * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as + * GPIOs. Optional. Populated internally for GPIO recovery, if this state + * is called "gpio" or "recovery" and pinctrl is valid. */ struct i2c_bus_recovery_info { int (*recover_bus)(struct i2c_adapter *adap); @@ -622,6 +630,9 @@ struct i2c_bus_recovery_info { /* gpio recovery */ struct gpio_desc *scl_gpiod; struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; }; int i2c_recover_bus(struct i2c_adapter *adap); -- cgit v1.2.3 From 4106820b90ab0f963571d14bdbf9a2587ca80532 Mon Sep 17 00:00:00 2001 From: Mohan Kumar Date: Wed, 5 Aug 2020 15:22:20 +0530 Subject: ALSA: hda: Add dma stop delay variable A variable dma_stop_delay is added as a new member in hdac_bus structure to avoid memory decode error incase DMA RUN bit is not disabled in the given timeout from snd_hdac_stream_sync function and followed by stream reset which results in memory decode error between reset set and clear operation. Signed-off-by: Mohan Kumar Link: https://lore.kernel.org/r/20200805095221.5476-3-mkumard@nvidia.com Signed-off-by: Takashi Iwai --- include/sound/hdaudio.h | 3 +++ sound/hda/hdac_stream.c | 7 +++++++ 2 files changed, 10 insertions(+) (limited to 'include') diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index c1f78d9a6e47..6eed61e6cf8a 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -347,6 +347,9 @@ struct hdac_bus { int bdl_pos_adj; /* BDL position adjustment */ + /* delay time in us for dma stop */ + unsigned int dma_stop_delay; + /* locks */ spinlock_t reg_lock; struct mutex cmd_mutex; diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index a38a2af1654f..abe7a1b16fe1 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -150,9 +150,12 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) { unsigned char val; int timeout; + int dma_run_state; snd_hdac_stream_clear(azx_dev); + dma_run_state = snd_hdac_stream_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START; + snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); udelay(3); timeout = 300; @@ -162,6 +165,10 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) if (val) break; } while (--timeout); + + if (azx_dev->bus->dma_stop_delay && dma_run_state) + udelay(azx_dev->bus->dma_stop_delay); + val &= ~SD_CTL_STREAM_RESET; snd_hdac_stream_writeb(azx_dev, SD_CTL, val); udelay(3); -- cgit v1.2.3 From 5487196878bc926a1ee15069c13aa48b9a894fab Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 06:46:04 -0400 Subject: virtio_ring: sparse warning fixup virtio_store_mb was built with split ring in mind so it accepts __virtio16 arguments. Packed ring uses __le16 values, so sparse complains. It's just a store with some barriers so let's convert it to a macro, we don't loose too much type safety by doing that. Signed-off-by: Michael S. Tsirkin Acked-by: Cornelia Huck --- include/linux/virtio_ring.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 3dc70adfe5f5..b485b13fa50b 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -46,16 +46,15 @@ static inline void virtio_wmb(bool weak_barriers) dma_wmb(); } -static inline void virtio_store_mb(bool weak_barriers, - __virtio16 *p, __virtio16 v) -{ - if (weak_barriers) { - virt_store_mb(*p, v); - } else { - WRITE_ONCE(*p, v); - mb(); - } -} +#define virtio_store_mb(weak_barriers, p, v) \ +do { \ + if (weak_barriers) { \ + virt_store_mb(*p, v); \ + } else { \ + WRITE_ONCE(*p, v); \ + mb(); \ + } \ +} while (0) \ struct virtio_device; struct virtqueue; -- cgit v1.2.3 From a4235ec06acf05c58081700cda02dcd480d9e9cb Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 03:20:21 -0400 Subject: virtio: allow __virtioXX, __leXX in config space Currently all config space fields are of the type __uXX. This confuses people and some drivers (notably vdpa) access them using CPU endian-ness - which only works well for legacy or LE platforms. Update virtio_cread/virtio_cwrite macros to allow __virtioXX and __leXX field types. Follow-up patches will convert config space to use these types. Signed-off-by: Michael S. Tsirkin Acked-by: Cornelia Huck --- include/linux/virtio_config.h | 50 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 3b4eae5ac5e3..64da491936f7 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -6,6 +6,7 @@ #include #include #include +#include #include struct irq_affinity; @@ -287,12 +288,57 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); } +/* + * Only the checker differentiates between __virtioXX and __uXX types. But we + * try to share as much code as we can with the regular GCC build. + */ +#if !defined(CONFIG_CC_IS_GCC) && !defined(__CHECKER__) + +/* Not a checker - we can keep things simple */ +#define __virtio_native_typeof(x) typeof(x) + +#else + +/* + * We build this out of a couple of helper macros in a vain attempt to + * help you keep your lunch down while reading it. + */ +#define __virtio_pick_value(x, type, then, otherwise) \ + __builtin_choose_expr(__same_type(x, type), then, otherwise) + +#define __virtio_pick_type(x, type, then, otherwise) \ + __virtio_pick_value(x, type, (then)0, otherwise) + +#define __virtio_pick_endian(x, x16, x32, x64, otherwise) \ + __virtio_pick_type(x, x16, __u16, \ + __virtio_pick_type(x, x32, __u32, \ + __virtio_pick_type(x, x64, __u64, \ + otherwise))) + +#define __virtio_native_typeof(x) typeof( \ + __virtio_pick_type(x, __u8, __u8, \ + __virtio_pick_endian(x, __virtio16, __virtio32, __virtio64, \ + __virtio_pick_endian(x, __le16, __le32, __le64, \ + __virtio_pick_endian(x, __u16, __u32, __u64, \ + /* No other type allowed */ \ + (void)0))))) + +#endif + +#define __virtio_native_type(structname, member) \ + __virtio_native_typeof(((structname*)0)->member) + +#define __virtio_typecheck(structname, member, val) \ + /* Must match the member's type, and be integer */ \ + typecheck(__virtio_native_type(structname, member), (val)) + + /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ might_sleep(); \ /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ + if (!__virtio_typecheck(structname, member, *(ptr))) \ (*ptr) = 1; \ \ switch (sizeof(*ptr)) { \ @@ -322,7 +368,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) do { \ might_sleep(); \ /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ + if (!__virtio_typecheck(structname, member, *(ptr))) \ BUG_ON((*ptr) == 1); \ \ switch (sizeof(*ptr)) { \ -- cgit v1.2.3 From cae19a6386c86dec8ec2810a96d7497a2eec8d38 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_9p: correct tags for config space fields Tag config space fields as having virtio endian-ness. Signed-off-by: Michael S. Tsirkin Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_9p.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_9p.h b/include/uapi/linux/virtio_9p.h index 277c4ad44e84..441047432258 100644 --- a/include/uapi/linux/virtio_9p.h +++ b/include/uapi/linux/virtio_9p.h @@ -25,7 +25,7 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#include +#include #include #include @@ -36,7 +36,7 @@ struct virtio_9p_config { /* length of the tag name */ - __u16 tag_len; + __virtio16 tag_len; /* non-NULL terminated tag name */ __u8 tag[0]; } __attribute__((packed)); -- cgit v1.2.3 From c73cb10cc4421baa669c1edd8211086280273216 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_balloon: correct tags for config space fields Tag config space fields as having little endian-ness. Note that balloon is special: LE even when using the legacy interface. Signed-off-by: Michael S. Tsirkin Acked-by: David Hildenbrand Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_balloon.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index dc3e656470dd..ddaa45e723c4 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -45,20 +45,20 @@ #define VIRTIO_BALLOON_CMD_ID_DONE 1 struct virtio_balloon_config { /* Number of pages host wants Guest to give up. */ - __u32 num_pages; + __le32 num_pages; /* Number of pages we've actually got in balloon. */ - __u32 actual; + __le32 actual; /* * Free page hint command id, readonly by guest. * Was previously named free_page_report_cmd_id so we * need to carry that name for legacy support. */ union { - __u32 free_page_hint_cmd_id; - __u32 free_page_report_cmd_id; /* deprecated */ + __le32 free_page_hint_cmd_id; + __le32 free_page_report_cmd_id; /* deprecated */ }; /* Stores PAGE_POISON if page poisoning is in use */ - __u32 poison_val; + __le32 poison_val; }; #define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */ -- cgit v1.2.3 From 40e04c488bd6ab1859778d40bf9bb3ca294ab97b Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_blk: correct tags for config space fields Tag config space fields as having virtio endian-ness. Signed-off-by: Michael S. Tsirkin Reviewed-by: Cornelia Huck Reviewed-by: Stefano Garzarella Reviewed-by: Stefano Garzarella --- include/uapi/linux/virtio_blk.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 0f99d7b49ede..d888f013d9ff 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h @@ -57,20 +57,20 @@ struct virtio_blk_config { /* The capacity (in 512-byte sectors). */ - __u64 capacity; + __virtio64 capacity; /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */ - __u32 size_max; + __virtio32 size_max; /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ - __u32 seg_max; + __virtio32 seg_max; /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */ struct virtio_blk_geometry { - __u16 cylinders; + __virtio16 cylinders; __u8 heads; __u8 sectors; } geometry; /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ - __u32 blk_size; + __virtio32 blk_size; /* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */ /* exponent for physical block per logical block. */ @@ -78,42 +78,42 @@ struct virtio_blk_config { /* alignment offset in logical blocks. */ __u8 alignment_offset; /* minimum I/O size without performance penalty in logical blocks. */ - __u16 min_io_size; + __virtio16 min_io_size; /* optimal sustained I/O size in logical blocks. */ - __u32 opt_io_size; + __virtio32 opt_io_size; /* writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */ __u8 wce; __u8 unused; /* number of vqs, only available when VIRTIO_BLK_F_MQ is set */ - __u16 num_queues; + __virtio16 num_queues; /* the next 3 entries are guarded by VIRTIO_BLK_F_DISCARD */ /* * The maximum discard sectors (in 512-byte sectors) for * one segment. */ - __u32 max_discard_sectors; + __virtio32 max_discard_sectors; /* * The maximum number of discard segments in a * discard command. */ - __u32 max_discard_seg; + __virtio32 max_discard_seg; /* Discard commands must be aligned to this number of sectors. */ - __u32 discard_sector_alignment; + __virtio32 discard_sector_alignment; /* the next 3 entries are guarded by VIRTIO_BLK_F_WRITE_ZEROES */ /* * The maximum number of write zeroes sectors (in 512-byte sectors) in * one segment. */ - __u32 max_write_zeroes_sectors; + __virtio32 max_write_zeroes_sectors; /* * The maximum number of segments in a write zeroes * command. */ - __u32 max_write_zeroes_seg; + __virtio32 max_write_zeroes_seg; /* * Set if a VIRTIO_BLK_T_WRITE_ZEROES request may result in the * deallocation of one or more of the sectors. -- cgit v1.2.3 From dbe2dc8c5838ef415620a4fe691570b062ab046f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_console: correct tags for config space fields Tag config space fields as having virtio endian-ness. Signed-off-by: Michael S. Tsirkin Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_console.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h index b7fb108c9310..7e6ec2ff0560 100644 --- a/include/uapi/linux/virtio_console.h +++ b/include/uapi/linux/virtio_console.h @@ -45,13 +45,13 @@ struct virtio_console_config { /* colums of the screens */ - __u16 cols; + __virtio16 cols; /* rows of the screens */ - __u16 rows; + __virtio16 rows; /* max. number of ports this device can hold */ - __u32 max_nr_ports; + __virtio32 max_nr_ports; /* emergency write register */ - __u32 emerg_wr; + __virtio32 emerg_wr; } __attribute__((packed)); /* -- cgit v1.2.3 From 24bcf35b695ef5228f3035eb979cc2de571d560b Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_crypto: correct tags for config space fields Since crypto is a modern-only device, tag config space fields as having little endian-ness. Signed-off-by: Michael S. Tsirkin Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_crypto.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h index 50cdc8aebfcf..a03932f10565 100644 --- a/include/uapi/linux/virtio_crypto.h +++ b/include/uapi/linux/virtio_crypto.h @@ -414,33 +414,33 @@ struct virtio_crypto_op_data_req { struct virtio_crypto_config { /* See VIRTIO_CRYPTO_OP_* above */ - __u32 status; + __le32 status; /* * Maximum number of data queue */ - __u32 max_dataqueues; + __le32 max_dataqueues; /* * Specifies the services mask which the device support, * see VIRTIO_CRYPTO_SERVICE_* above */ - __u32 crypto_services; + __le32 crypto_services; /* Detailed algorithms mask */ - __u32 cipher_algo_l; - __u32 cipher_algo_h; - __u32 hash_algo; - __u32 mac_algo_l; - __u32 mac_algo_h; - __u32 aead_algo; + __le32 cipher_algo_l; + __le32 cipher_algo_h; + __le32 hash_algo; + __le32 mac_algo_l; + __le32 mac_algo_h; + __le32 aead_algo; /* Maximum length of cipher key */ - __u32 max_cipher_key_len; + __le32 max_cipher_key_len; /* Maximum length of authenticated key */ - __u32 max_auth_key_len; - __u32 reserve; + __le32 max_auth_key_len; + __le32 reserve; /* Maximum size of each crypto request's content */ - __u64 max_size; + __le64 max_size; }; struct virtio_crypto_inhdr { -- cgit v1.2.3 From fc4a1accbb4ef372bb55b7ab161cf88e3b631935 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_fs: correct tags for config space fields Since fs is a modern-only device, tag config space fields as having little endian-ness. Signed-off-by: Michael S. Tsirkin Acked-by: Vivek Goyal Acked-by: Vivek Goyal Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_fs.h b/include/uapi/linux/virtio_fs.h index b02eb2ac3d99..3056b6e9f8ce 100644 --- a/include/uapi/linux/virtio_fs.h +++ b/include/uapi/linux/virtio_fs.h @@ -13,7 +13,7 @@ struct virtio_fs_config { __u8 tag[36]; /* Number of request queues */ - __u32 num_request_queues; + __le32 num_request_queues; } __attribute__((packed)); #endif /* _UAPI_LINUX_VIRTIO_FS_H */ -- cgit v1.2.3 From f378444b7c97e39358de5d50d01fb0e92f259073 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_gpu: correct tags for config space fields Since gpu is a modern-only device, tag config space fields as having little endian-ness. Signed-off-by: Michael S. Tsirkin Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_gpu.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h index 0c85914d9369..ccbd174ef321 100644 --- a/include/uapi/linux/virtio_gpu.h +++ b/include/uapi/linux/virtio_gpu.h @@ -320,10 +320,10 @@ struct virtio_gpu_resp_edid { #define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) struct virtio_gpu_config { - __u32 events_read; - __u32 events_clear; - __u32 num_scanouts; - __u32 num_capsets; + __le32 events_read; + __le32 events_clear; + __le32 num_scanouts; + __le32 num_capsets; }; /* simple formats for fbcon/X use */ -- cgit v1.2.3 From 924b59a6dfa85dc9eac4c7f2fe1857bba2cb2510 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_input: correct tags for config space fields Since this is a modern-only device, tag config space fields as having little endian-ness. Signed-off-by: Michael S. Tsirkin Reviewed-by: Gerd Hoffmann Reviewed-by: Gerd Hoffmann Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_input.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_input.h b/include/uapi/linux/virtio_input.h index a7fe5c8fb135..52084b1fb965 100644 --- a/include/uapi/linux/virtio_input.h +++ b/include/uapi/linux/virtio_input.h @@ -40,18 +40,18 @@ enum virtio_input_config_select { }; struct virtio_input_absinfo { - __u32 min; - __u32 max; - __u32 fuzz; - __u32 flat; - __u32 res; + __le32 min; + __le32 max; + __le32 fuzz; + __le32 flat; + __le32 res; }; struct virtio_input_devids { - __u16 bustype; - __u16 vendor; - __u16 product; - __u16 version; + __le16 bustype; + __le16 vendor; + __le16 product; + __le16 version; }; struct virtio_input_config { -- cgit v1.2.3 From 0ebcffcc2731682777bab19b51a512d8f31e1bdd Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_iommu: correct tags for config space fields Since this is a modern-only device, tag config space fields as having little endian-ness. Signed-off-by: Michael S. Tsirkin Reviewed-by: Jean-Philippe Brucker Reviewed-by: Jean-Philippe Brucker Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_iommu.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h index 48e3c29223b5..237e36a280cb 100644 --- a/include/uapi/linux/virtio_iommu.h +++ b/include/uapi/linux/virtio_iommu.h @@ -18,24 +18,24 @@ #define VIRTIO_IOMMU_F_MMIO 5 struct virtio_iommu_range_64 { - __u64 start; - __u64 end; + __le64 start; + __le64 end; }; struct virtio_iommu_range_32 { - __u32 start; - __u32 end; + __le32 start; + __le32 end; }; struct virtio_iommu_config { /* Supported page sizes */ - __u64 page_size_mask; + __le64 page_size_mask; /* Supported IOVA range */ struct virtio_iommu_range_64 input_range; /* Max domain ID size */ struct virtio_iommu_range_32 domain_range; /* Probe buffer size */ - __u32 probe_size; + __le32 probe_size; }; /* Request types */ -- cgit v1.2.3 From 79268954424771185fb4ca304786dd561a272246 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_mem: correct tags for config space fields Since this is a modern-only device, tag config space fields as having little endian-ness. TODO: check other uses of __virtioXX types in this header, should probably be __leXX. Signed-off-by: Michael S. Tsirkin Acked-by: David Hildenbrand Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_mem.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h index a9ffe041843c..70e01c687d5e 100644 --- a/include/uapi/linux/virtio_mem.h +++ b/include/uapi/linux/virtio_mem.h @@ -185,27 +185,27 @@ struct virtio_mem_resp { struct virtio_mem_config { /* Block size and alignment. Cannot change. */ - __u64 block_size; + __le64 block_size; /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */ - __u16 node_id; + __le16 node_id; __u8 padding[6]; /* Start address of the memory region. Cannot change. */ - __u64 addr; + __le64 addr; /* Region size (maximum). Cannot change. */ - __u64 region_size; + __le64 region_size; /* * Currently usable region size. Can grow up to region_size. Can * shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config * update will be sent). */ - __u64 usable_region_size; + __le64 usable_region_size; /* * Currently used size. Changes due to plug/unplug requests, but no * config updates will be sent. */ - __u64 plugged_size; + __le64 plugged_size; /* Requested size. New plug requests cannot exceed it. Can change. */ - __u64 requested_size; + __le64 requested_size; }; #endif /* _LINUX_VIRTIO_MEM_H */ -- cgit v1.2.3 From 577e677a785357542311a645eeb1756cd83988be Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_net: correct tags for config space fields Tag config space fields as having virtio endian-ness. Signed-off-by: Michael S. Tsirkin --- include/uapi/linux/virtio_net.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 19d23e5baa4e..27d996f29dd1 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -87,19 +87,19 @@ struct virtio_net_config { /* The config defining mac address (if VIRTIO_NET_F_MAC) */ __u8 mac[ETH_ALEN]; /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ - __u16 status; + __virtio16 status; /* Maximum number of each of transmit and receive queues; * see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ. * Legal values are between 1 and 0x8000 */ - __u16 max_virtqueue_pairs; + __virtio16 max_virtqueue_pairs; /* Default maximum transmit unit advice */ - __u16 mtu; + __virtio16 mtu; /* * speed, in units of 1Mb. All values 0 to INT_MAX are legal. * Any other value stands for unknown. */ - __u32 speed; + __virtio32 speed; /* * 0x00 - half duplex * 0x01 - full duplex -- cgit v1.2.3 From a28feb855cc0ad452acd3dfe2b8f2841927da5f1 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_pmem: correct tags for config space fields Since this is a modern-only device, tag config space fields as having little endian-ness. Signed-off-by: Michael S. Tsirkin Reviewed-by: Cornelia Huck --- include/uapi/linux/virtio_pmem.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h index b022787ffb94..d676b3620383 100644 --- a/include/uapi/linux/virtio_pmem.h +++ b/include/uapi/linux/virtio_pmem.h @@ -15,8 +15,8 @@ #include struct virtio_pmem_config { - __u64 start; - __u64 size; + __le64 start; + __le64 size; }; #define VIRTIO_PMEM_REQ_TYPE_FLUSH 0 -- cgit v1.2.3 From 965b5350514b597dc6347b733127e180844aeb43 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:17:13 -0400 Subject: virtio_scsi: correct tags for config space fields Tag config space fields as having virtio endian-ness. Signed-off-by: Michael S. Tsirkin Reviewed-by: Cornelia Huck --- drivers/scsi/virtio_scsi.c | 4 ++-- include/uapi/linux/virtio_scsi.h | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 0e0910c5b942..c36aeb9a1330 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -746,14 +746,14 @@ static struct scsi_host_template virtscsi_host_template = { #define virtscsi_config_get(vdev, fld) \ ({ \ - typeof(((struct virtio_scsi_config *)0)->fld) __val; \ + __virtio_native_type(struct virtio_scsi_config, fld) __val; \ virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ __val; \ }) #define virtscsi_config_set(vdev, fld, val) \ do { \ - typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ + __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \ virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ } while(0) diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h index cc18ef8825c0..0abaae4027c0 100644 --- a/include/uapi/linux/virtio_scsi.h +++ b/include/uapi/linux/virtio_scsi.h @@ -103,16 +103,16 @@ struct virtio_scsi_event { } __attribute__((packed)); struct virtio_scsi_config { - __u32 num_queues; - __u32 seg_max; - __u32 max_sectors; - __u32 cmd_per_lun; - __u32 event_info_size; - __u32 sense_size; - __u32 cdb_size; - __u16 max_channel; - __u16 max_target; - __u32 max_lun; + __virtio32 num_queues; + __virtio32 seg_max; + __virtio32 max_sectors; + __virtio32 cmd_per_lun; + __virtio32 event_info_size; + __virtio32 sense_size; + __virtio32 cdb_size; + __virtio16 max_channel; + __virtio16 max_target; + __virtio32 max_lun; } __attribute__((packed)); /* Feature Bits */ -- cgit v1.2.3 From 4a04cfb0eb5e00432f9ff978f2b81bd1736e85db Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:55:52 -0400 Subject: virtio_config: disallow native type fields Transitional devices should all use __virtioXX types (and __leXX for fields not present in legacy devices). Modern ones should use __leXX. _uXX type would be a bug. Let's prevent that. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 64da491936f7..c68f58f3bf34 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -319,9 +319,8 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __virtio_pick_type(x, __u8, __u8, \ __virtio_pick_endian(x, __virtio16, __virtio32, __virtio64, \ __virtio_pick_endian(x, __le16, __le32, __le64, \ - __virtio_pick_endian(x, __u16, __u32, __u64, \ - /* No other type allowed */ \ - (void)0))))) + /* No other type allowed */ \ + (void)0)))) #endif -- cgit v1.2.3 From 452639a64ad880792652b6d20cc5c8dd4ecf27d9 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 27 Jul 2020 10:51:55 -0400 Subject: vdpa: make sure set_features is invoked for legacy Some legacy guests just assume features are 0 after reset. We detect that config space is accessed before features are set and set features to 0 automatically. Note: some legacy guests might not even access config space, if this is reported in the field we might need to catch a kick to handle these. Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/vdpa.c | 1 + include/linux/vdpa.h | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) (limited to 'include') diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index de211ef3738c..7105265e4793 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -96,6 +96,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, vdev->dev.release = vdpa_release_dev; vdev->index = err; vdev->config = config; + vdev->features_valid = false; err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); if (err) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 239db794357c..29b8296f1414 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -33,12 +33,14 @@ struct vdpa_notification_area { * @dma_dev: the actual device that is performing DMA * @config: the configuration ops for this device. * @index: device index + * @features_valid: were features initialized? for legacy guests */ struct vdpa_device { struct device dev; struct device *dma_dev; const struct vdpa_config_ops *config; unsigned int index; + bool features_valid; }; /** @@ -266,4 +268,36 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) { return vdev->dma_dev; } + +static inline void vdpa_reset(struct vdpa_device *vdev) +{ + const struct vdpa_config_ops *ops = vdev->config; + + vdev->features_valid = false; + ops->set_status(vdev, 0); +} + +static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) +{ + const struct vdpa_config_ops *ops = vdev->config; + + vdev->features_valid = true; + return ops->set_features(vdev, features); +} + + +static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, + void *buf, unsigned int len) +{ + const struct vdpa_config_ops *ops = vdev->config; + + /* + * Config accesses aren't supposed to trigger before features are set. + * If it does happen we assume a legacy guest. + */ + if (!vdev->features_valid) + vdpa_set_features(vdev, 0); + ops->get_config(vdev, offset, buf, len); +} + #endif /* _LINUX_VDPA_H */ -- cgit v1.2.3 From cacaf775c699e9e8473491197587535f1c10ac8f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 30 Jul 2020 16:12:40 -0400 Subject: virtio_config: cread/write cleanup Use vars of the correct type instead of casting. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index c68f58f3bf34..5c3b02245ecd 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -444,53 +444,60 @@ static inline void virtio_cwrite8(struct virtio_device *vdev, static inline u16 virtio_cread16(struct virtio_device *vdev, unsigned int offset) { - u16 ret; + __virtio16 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio16_to_cpu(vdev, (__force __virtio16)ret); + return virtio16_to_cpu(vdev, ret); } static inline void virtio_cwrite16(struct virtio_device *vdev, unsigned int offset, u16 val) { + __virtio16 v; + might_sleep(); - val = (__force u16)cpu_to_virtio16(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio16(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u32 virtio_cread32(struct virtio_device *vdev, unsigned int offset) { - u32 ret; + __virtio32 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio32_to_cpu(vdev, (__force __virtio32)ret); + return virtio32_to_cpu(vdev, ret); } static inline void virtio_cwrite32(struct virtio_device *vdev, unsigned int offset, u32 val) { + __virtio32 v; + might_sleep(); - val = (__force u32)cpu_to_virtio32(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio32(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u64 virtio_cread64(struct virtio_device *vdev, unsigned int offset) { - u64 ret; + __virtio64 ret; + __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); - return virtio64_to_cpu(vdev, (__force __virtio64)ret); + return virtio64_to_cpu(vdev, ret); } static inline void virtio_cwrite64(struct virtio_device *vdev, unsigned int offset, u64 val) { + __virtio64 v; + might_sleep(); - val = (__force u64)cpu_to_virtio64(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio64(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } /* Conditional config space accessors. */ -- cgit v1.2.3 From a5b90f2db8e0ef6504695cbd36a65fd8296338ee Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 3 Aug 2020 16:08:11 -0400 Subject: virtio_config: rewrite using _Generic Min compiler version has been raised, so that's ok now. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 161 ++++++++++++++++++++---------------------- 1 file changed, 76 insertions(+), 85 deletions(-) (limited to 'include') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 5c3b02245ecd..7fa000f02721 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -288,112 +288,103 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); } -/* - * Only the checker differentiates between __virtioXX and __uXX types. But we - * try to share as much code as we can with the regular GCC build. - */ -#if !defined(CONFIG_CC_IS_GCC) && !defined(__CHECKER__) - -/* Not a checker - we can keep things simple */ -#define __virtio_native_typeof(x) typeof(x) - -#else - -/* - * We build this out of a couple of helper macros in a vain attempt to - * help you keep your lunch down while reading it. - */ -#define __virtio_pick_value(x, type, then, otherwise) \ - __builtin_choose_expr(__same_type(x, type), then, otherwise) - -#define __virtio_pick_type(x, type, then, otherwise) \ - __virtio_pick_value(x, type, (then)0, otherwise) - -#define __virtio_pick_endian(x, x16, x32, x64, otherwise) \ - __virtio_pick_type(x, x16, __u16, \ - __virtio_pick_type(x, x32, __u32, \ - __virtio_pick_type(x, x64, __u64, \ - otherwise))) - -#define __virtio_native_typeof(x) typeof( \ - __virtio_pick_type(x, __u8, __u8, \ - __virtio_pick_endian(x, __virtio16, __virtio32, __virtio64, \ - __virtio_pick_endian(x, __le16, __le32, __le64, \ - /* No other type allowed */ \ - (void)0)))) - -#endif +#define virtio_to_cpu(vdev, x) \ + _Generic((x), \ + __u8: (x), \ + __virtio16: virtio16_to_cpu((vdev), (x)), \ + __virtio32: virtio32_to_cpu((vdev), (x)), \ + __virtio64: virtio64_to_cpu((vdev), (x)), \ + /* + * Why define a default? checker can distinguish between + * e.g. __u16, __le16 and __virtio16, but GCC can't so + * attempts to define variants for both look like a duplicate + * variant to it. + */ \ + default: _Generic((x), \ + __u8: (x), \ + __le16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ + __le32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ + __le64: virtio64_to_cpu((vdev), (__force __virtio64)(x)), \ + default: _Generic((x), \ + __u8: (x), \ + __u16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ + __u32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ + __u64: virtio64_to_cpu((vdev), (__force __virtio64)(x)) \ + ) \ + ) \ + ) + +#define cpu_to_virtio(vdev, x, m) \ + _Generic((m), \ + __u8: (x), \ + __virtio16: cpu_to_virtio16((vdev), (x)), \ + __virtio32: cpu_to_virtio32((vdev), (x)), \ + __virtio64: cpu_to_virtio64((vdev), (x)), \ + /* + * Why define a default? checker can distinguish between + * e.g. __u16, __le16 and __virtio16, but GCC can't so + * attempts to define variants for both look like a duplicate + * variant to it. + */ \ + default: _Generic((m), \ + __u8: (x), \ + __le16: (__force __le16)cpu_to_virtio16((vdev), (x)), \ + __le32: (__force __le32)cpu_to_virtio32((vdev), (x)), \ + __le64: (__force __le64)cpu_to_virtio64((vdev), (x)), \ + default: _Generic((m), \ + __u8: (x), \ + __u16: (__force __u16)cpu_to_virtio16((vdev), (x)), \ + __u32: (__force __u32)cpu_to_virtio32((vdev), (x)), \ + __u64: (__force __u64)cpu_to_virtio64((vdev), (x)) \ + ) \ + ) \ + ) #define __virtio_native_type(structname, member) \ - __virtio_native_typeof(((structname*)0)->member) - -#define __virtio_typecheck(structname, member, val) \ - /* Must match the member's type, and be integer */ \ - typecheck(__virtio_native_type(structname, member), (val)) - + typeof(virtio_to_cpu(NULL, ((structname*)0)->member)) /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ might_sleep(); \ - /* Must match the member's type, and be integer */ \ - if (!__virtio_typecheck(structname, member, *(ptr))) \ - (*ptr) = 1; \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \ \ - switch (sizeof(*ptr)) { \ + switch (sizeof(virtio_cread_v)) { \ case 1: \ - *(ptr) = virtio_cread8(vdev, \ - offsetof(structname, member)); \ - break; \ case 2: \ - *(ptr) = virtio_cread16(vdev, \ - offsetof(structname, member)); \ - break; \ case 4: \ - *(ptr) = virtio_cread32(vdev, \ - offsetof(structname, member)); \ - break; \ - case 8: \ - *(ptr) = virtio_cread64(vdev, \ - offsetof(structname, member)); \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ break; \ default: \ - BUG(); \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ } \ + *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \ } while(0) /* Config space accessors. */ #define virtio_cwrite(vdev, structname, member, ptr) \ do { \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \ + \ might_sleep(); \ - /* Must match the member's type, and be integer */ \ - if (!__virtio_typecheck(structname, member, *(ptr))) \ - BUG_ON((*ptr) == 1); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \ \ - switch (sizeof(*ptr)) { \ - case 1: \ - virtio_cwrite8(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - case 2: \ - virtio_cwrite16(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - case 4: \ - virtio_cwrite32(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - case 8: \ - virtio_cwrite64(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - default: \ - BUG(); \ - } \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ } while(0) /* Read @count fields, @bytes each. */ -- cgit v1.2.3 From 14191c15ab9d87e60d2ebbfbf6df83d546152af1 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:55:52 -0400 Subject: virtio_config: disallow native type fields (again) _Generic version allowed __uXX types but that is no longer necessary: Transitional devices should all use __virtioXX types (and __leXX for fields not present in the legacy devices). Modern ones should use __leXX. _uXX type would be a bug. Let's prevent that. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 7fa000f02721..441fd6dd42ab 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -304,13 +304,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __u8: (x), \ __le16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ __le32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ - __le64: virtio64_to_cpu((vdev), (__force __virtio64)(x)), \ - default: _Generic((x), \ - __u8: (x), \ - __u16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ - __u32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ - __u64: virtio64_to_cpu((vdev), (__force __virtio64)(x)) \ - ) \ + __le64: virtio64_to_cpu((vdev), (__force __virtio64)(x)) \ ) \ ) @@ -330,13 +324,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __u8: (x), \ __le16: (__force __le16)cpu_to_virtio16((vdev), (x)), \ __le32: (__force __le32)cpu_to_virtio32((vdev), (x)), \ - __le64: (__force __le64)cpu_to_virtio64((vdev), (x)), \ - default: _Generic((m), \ - __u8: (x), \ - __u16: (__force __u16)cpu_to_virtio16((vdev), (x)), \ - __u32: (__force __u32)cpu_to_virtio32((vdev), (x)), \ - __u64: (__force __u64)cpu_to_virtio64((vdev), (x)) \ - ) \ + __le64: (__force __le64)cpu_to_virtio64((vdev), (x)) \ ) \ ) -- cgit v1.2.3 From e598960ff5e511b76a0eb8dff25207d35c2442c8 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 4 Aug 2020 17:33:08 -0400 Subject: virtio_config: LE config space accessors To be used by modern code, as well as to handle LE only fields such as balloon. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 65 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) (limited to 'include') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 441fd6dd42ab..5b5196fec899 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -375,6 +375,71 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) sizeof(virtio_cwrite_v)); \ } while(0) +/* + * Nothing virtio-specific about these, but let's worry about generalizing + * these later. + */ +#define virtio_le_to_cpu(x) \ + _Generic((x), \ + __u8: (x), \ + __le16: le16_to_cpu(x), \ + __le32: le32_to_cpu(x), \ + __le64: le64_to_cpu(x) \ + ) + +#define virtio_cpu_to_le(x, m) \ + _Generic((m), \ + __u8: (x), \ + __le16: cpu_to_le16(x), \ + __le32: cpu_to_le32(x), \ + __le64: cpu_to_le64(x) \ + ) + +/* LE (e.g. modern) Config space accessors. */ +#define virtio_cread_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \ + \ + switch (sizeof(virtio_cread_v)) { \ + case 1: \ + case 2: \ + case 4: \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ + break; \ + default: \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ + } \ + *(ptr) = virtio_le_to_cpu(virtio_cread_v); \ + } while(0) + +/* Config space accessors. */ +#define virtio_cwrite_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \ + \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ + } while(0) + + /* Read @count fields, @bytes each. */ static inline void __virtio_cread_many(struct virtio_device *vdev, unsigned int offset, -- cgit v1.2.3 From e3e7994d53082e48d2a6a248376683d3be3dff9d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 4 Aug 2020 18:02:39 -0400 Subject: virtio_caif: correct tags for config space fields Tag config space fields as having virtio endian-ness. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_caif.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/virtio_caif.h b/include/linux/virtio_caif.h index 5d2d3124ca3d..ea722479510c 100644 --- a/include/linux/virtio_caif.h +++ b/include/linux/virtio_caif.h @@ -11,9 +11,9 @@ #include struct virtio_caif_transf_config { - u16 headroom; - u16 tailroom; - u32 mtu; + __virtio16 headroom; + __virtio16 tailroom; + __virtio32 mtu; u8 reserved[4]; }; -- cgit v1.2.3 From 035ce4210be1257dd417785ff7818b5c0f2205fb Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 5 Aug 2020 09:17:38 -0400 Subject: virtio_config: add virtio_cread_le_feature Mirrors virtio_cread_feature but for LE fields. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 5b5196fec899..cc7a2b1fd7b2 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -555,4 +555,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, _r; \ }) +/* Conditional config space accessors. */ +#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \ + ({ \ + int _r = 0; \ + if (!virtio_has_feature(vdev, fbit)) \ + _r = -ENOENT; \ + else \ + virtio_cread_le((vdev), structname, member, ptr); \ + _r; \ + }) #endif /* _LINUX_VIRTIO_CONFIG_H */ -- cgit v1.2.3 From 83eb9db95eb453f1db651909ad4598c3d44ef1e1 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 5 Aug 2020 07:29:12 -0400 Subject: virtio_config: drop LE option from config space All drivers now use virtio_cread/write_le for LE config space fields. Drop LE option from virtio_cread/write, only leaving the option to access transitional fields. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index cc7a2b1fd7b2..ecb166c824bb 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -293,19 +293,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __u8: (x), \ __virtio16: virtio16_to_cpu((vdev), (x)), \ __virtio32: virtio32_to_cpu((vdev), (x)), \ - __virtio64: virtio64_to_cpu((vdev), (x)), \ - /* - * Why define a default? checker can distinguish between - * e.g. __u16, __le16 and __virtio16, but GCC can't so - * attempts to define variants for both look like a duplicate - * variant to it. - */ \ - default: _Generic((x), \ - __u8: (x), \ - __le16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ - __le32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ - __le64: virtio64_to_cpu((vdev), (__force __virtio64)(x)) \ - ) \ + __virtio64: virtio64_to_cpu((vdev), (x)) \ ) #define cpu_to_virtio(vdev, x, m) \ @@ -313,19 +301,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __u8: (x), \ __virtio16: cpu_to_virtio16((vdev), (x)), \ __virtio32: cpu_to_virtio32((vdev), (x)), \ - __virtio64: cpu_to_virtio64((vdev), (x)), \ - /* - * Why define a default? checker can distinguish between - * e.g. __u16, __le16 and __virtio16, but GCC can't so - * attempts to define variants for both look like a duplicate - * variant to it. - */ \ - default: _Generic((m), \ - __u8: (x), \ - __le16: (__force __le16)cpu_to_virtio16((vdev), (x)), \ - __le32: (__force __le32)cpu_to_virtio32((vdev), (x)), \ - __le64: (__force __le64)cpu_to_virtio64((vdev), (x)) \ - ) \ + __virtio64: cpu_to_virtio64((vdev), (x)) \ ) #define __virtio_native_type(structname, member) \ -- cgit v1.2.3 From 64ffa39dc860fb9772225c694353f73eca5801c6 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 5 Aug 2020 05:39:36 -0400 Subject: virtio_net: use LE accessors for speed/duplex Speed and duplex config fields depend on VIRTIO_NET_F_SPEED_DUPLEX which being 63>31 depends on VIRTIO_F_VERSION_1. Accordingly, use LE accessors for these fields. Reported-by: Cornelia Huck Signed-off-by: Michael S. Tsirkin --- drivers/net/virtio_net.c | 9 +++++---- include/uapi/linux/virtio_net.h | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ba38765dc490..0934b1ec5320 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2264,12 +2264,13 @@ static void virtnet_update_settings(struct virtnet_info *vi) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) return; - speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, - speed)); + virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); + if (ethtool_validate_speed(speed)) vi->speed = speed; - duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, - duplex)); + + virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); + if (ethtool_validate_duplex(duplex)) vi->duplex = duplex; } diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 27d996f29dd1..3f55a4215f11 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -99,7 +99,7 @@ struct virtio_net_config { * speed, in units of 1Mb. All values 0 to INT_MAX are legal. * Any other value stands for unknown. */ - __virtio32 speed; + __le32 speed; /* * 0x00 - half duplex * 0x01 - full duplex -- cgit v1.2.3 From 7164675ab5caf46867d6a5448f4ff3af92d80a30 Mon Sep 17 00:00:00 2001 From: Zhu Lingshan Date: Fri, 31 Jul 2020 14:55:30 +0800 Subject: vDPA: add get_vq_irq() in vdpa_config_ops This commit adds a new function get_vq_irq() in struct vdpa_config_ops, which will return the irq number of a virtqueue. Signed-off-by: Zhu Lingshan Suggested-by: Jason Wang Link: https://lore.kernel.org/r/20200731065533.4144-4-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin --- include/linux/vdpa.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 29b8296f1414..5c530a64aa06 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -89,6 +89,12 @@ struct vdpa_device { * @vdev: vdpa device * @idx: virtqueue index * Returns the notifcation area + * @get_vq_irq: Get the irq number of a virtqueue (optional, + * but must implemented if require vq irq offloading) + * @vdev: vdpa device + * @idx: virtqueue index + * Returns int: irq number of a virtqueue, + * negative number if no irq assigned. * @get_vq_align: Get the virtqueue align requirement * for the device * @vdev: vdpa device @@ -180,6 +186,7 @@ struct vdpa_config_ops { u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); + int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx); /* Device ops */ u32 (*get_vq_align)(struct vdpa_device *vdev); -- cgit v1.2.3 From 4c05433bc6fb4ae172270f0279be8ba89a3da64f Mon Sep 17 00:00:00 2001 From: Zhu Lingshan Date: Tue, 4 Aug 2020 18:21:23 +0800 Subject: vDPA: dont change vq irq after DRIVER_OK IRQ of a vq is not expected to be changed in a DRIVER_OK ~ !DRIVER_OK period for irq offloading purposes. Place this comment at the side of bus ops get_vq_irq than in set_status in vhost_vdpa. Signed-off-by: Zhu Lingshan Link: https://lore.kernel.org/r/20200804102123.69978-1-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 1 - include/linux/vdpa.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 7441b9803eae..f8f8c9cf05b0 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -172,7 +172,6 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) ops->set_status(vdpa, status); - /* vq irq is not expected to be changed once DRIVER_OK is set */ if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) for (i = 0; i < nvqs; i++) vhost_vdpa_setup_vq_irq(v, i); diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 5c530a64aa06..565298cb45d2 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -186,6 +186,7 @@ struct vdpa_config_ops { u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); + /* vq irq is not expected to be changed once DRIVER_OK is set */ int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx); /* Device ops */ -- cgit v1.2.3 From 923a3a863ae0c26876d704fb3453069e11ebdcb6 Mon Sep 17 00:00:00 2001 From: Michael Shych Date: Mon, 4 May 2020 17:14:24 +0300 Subject: platform_data/mlxreg: support new watchdog type with longer timeout period Add new watchdog type 3 with longer timeout period. Extend size of health_cntr field that that can be used to init watchdog timeout period. Signed-off-by: Michael Shych Reviewed-by: Vadim Pasternak Acked-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200504141427.17685-2-michaelsh@mellanox.com Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- include/linux/platform_data/mlxreg.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index b8da8aef2446..2c5e58d1d77b 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -43,10 +43,13 @@ * * TYPE1 HW watchdog implementation exist in old systems. * All new systems have TYPE2 HW watchdog. + * TYPE3 HW watchdog can exist on all systems with new CPLD. + * TYPE3 is selected by WD capability bit. */ enum mlxreg_wdt_type { MLX_WDT_TYPE1, MLX_WDT_TYPE2, + MLX_WDT_TYPE3, }; /** @@ -90,7 +93,7 @@ struct mlxreg_core_data { umode_t mode; struct device_node *np; struct mlxreg_hotplug_device hpdev; - u8 health_cntr; + u32 health_cntr; bool attached; }; -- cgit v1.2.3 From cef9572e9af373cefd1adc9e771e89670da5da3c Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Fri, 17 Jul 2020 16:29:56 +0300 Subject: watchdog: add support for adjusting last known HW keepalive time Certain watchdogs require the watchdog only to be pinged within a specific time window, pinging too early or too late cause the watchdog to fire. In cases where this sort of watchdog has been started before kernel comes up, we must adjust the watchdog keepalive window to match the actually running timer, so add a new driver API for this purpose. Signed-off-by: Tero Kristo Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/20200717132958.14304-3-t-kristo@ti.com Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- Documentation/watchdog/watchdog-kernel-api.rst | 12 +++++++++++ drivers/watchdog/watchdog_dev.c | 30 ++++++++++++++++++++++++++ include/linux/watchdog.h | 2 ++ 3 files changed, 44 insertions(+) (limited to 'include') diff --git a/Documentation/watchdog/watchdog-kernel-api.rst b/Documentation/watchdog/watchdog-kernel-api.rst index 068a55ee0d4a..baf44e986b07 100644 --- a/Documentation/watchdog/watchdog-kernel-api.rst +++ b/Documentation/watchdog/watchdog-kernel-api.rst @@ -336,3 +336,15 @@ an action is taken by a preconfigured pretimeout governor preassigned to the watchdog device. If watchdog pretimeout governor framework is not enabled, watchdog_notify_pretimeout() prints a notification message to the kernel log buffer. + +To set the last known HW keepalive time for a watchdog, the following function +should be used:: + + int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd, + unsigned int last_ping_ms) + +This function must be called immediately after watchdog registration. It +sets the last known hardware heartbeat to have happened last_ping_ms before +current time. Calling this is only needed if the watchdog is already running +when probe is called, and the watchdog can only be pinged after the +min_hw_heartbeat_ms time has passed from the last ping. diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 0ad1c393c00e..531e74994b61 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -1138,6 +1138,36 @@ void watchdog_dev_unregister(struct watchdog_device *wdd) watchdog_cdev_unregister(wdd); } +/* + * watchdog_set_last_hw_keepalive: set last HW keepalive time for watchdog + * @wdd: watchdog device + * @last_ping_ms: time since last HW heartbeat + * + * Adjusts the last known HW keepalive time for a watchdog timer. + * This is needed if the watchdog is already running when the probe + * function is called, and it can't be pinged immediately. This + * function must be called immediately after watchdog registration, + * and min_hw_heartbeat_ms must be set for this to be useful. + */ +int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd, + unsigned int last_ping_ms) +{ + struct watchdog_core_data *wd_data; + ktime_t now; + + if (!wdd) + return -EINVAL; + + wd_data = wdd->wd_data; + + now = ktime_get(); + + wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms)); + + return __watchdog_ping(wdd); +} +EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive); + /* * watchdog_dev_init: init dev part of watchdog core * diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 1464ce6ffa31..9b19e6bb68b5 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -210,6 +210,8 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd, extern int watchdog_register_device(struct watchdog_device *); extern void watchdog_unregister_device(struct watchdog_device *); +int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int); + /* devres register variant */ int devm_watchdog_register_device(struct device *dev, struct watchdog_device *); -- cgit v1.2.3 From 81f6cb31222d286dab65579d61f96664eeb99e0b Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 3 Aug 2020 23:34:46 +0800 Subject: ipv6: add ipv6_dev_find() This is to add an ip_dev_find like function for ipv6, used to find the dev by saddr. It will be used by TIPC protocol. So also export it. Signed-off-by: Xin Long Acked-by: Ying Xue Signed-off-by: David S. Miller --- include/net/addrconf.h | 2 ++ net/ipv6/addrconf.c | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) (limited to 'include') diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 8418b7d38468..ba3f6c15ad2b 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -97,6 +97,8 @@ bool ipv6_chk_custom_prefix(const struct in6_addr *addr, int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); +struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr); + struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, struct net_device *dev, int strict); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0acf6a9796ca..8e761b8c47c6 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1983,6 +1983,45 @@ int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) } EXPORT_SYMBOL(ipv6_chk_prefix); +/** + * ipv6_dev_find - find the first device with a given source address. + * @net: the net namespace + * @addr: the source address + * + * The caller should be protected by RCU, or RTNL. + */ +struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr) +{ + unsigned int hash = inet6_addr_hash(net, addr); + struct inet6_ifaddr *ifp, *result = NULL; + struct net_device *dev = NULL; + + rcu_read_lock(); + hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) { + if (net_eq(dev_net(ifp->idev->dev), net) && + ipv6_addr_equal(&ifp->addr, addr)) { + result = ifp; + break; + } + } + + if (!result) { + struct rt6_info *rt; + + rt = rt6_lookup(net, addr, NULL, 0, NULL, 0); + if (rt) { + dev = rt->dst.dev; + ip6_rt_put(rt); + } + } else { + dev = result->idev->dev; + } + rcu_read_unlock(); + + return dev; +} +EXPORT_SYMBOL(ipv6_dev_find); + struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, struct net_device *dev, int strict) { -- cgit v1.2.3 From 25abc060d282132ea5c945392f900dca0a7e9bbb Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 4 Aug 2020 19:20:40 +0300 Subject: vhost-vdpa: support IOTLB batching hints This patches extend the vhost IOTLB API to accept batch updating hints form userspace. When userspace wants update the device IOTLB in a batch, it may do: 1) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_BEGIN flag 2) Perform a batch of IOTLB updating via VHOST_IOTLB_UPDATE/INVALIDATE 3) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_END flag Vhost-vdpa may decide to batch the IOMMU/IOTLB updating in step 3 when vDPA device support set_map() ops. This is useful for the vDPA device that want to know all the mappings to tweak their own DMA translation logic. For vDPA device that doesn't require set_map(), no behavior changes. This capability is advertised via VHOST_BACKEND_F_IOTLB_BATCH capability. Signed-off-by: Jason Wang Link: https://lore.kernel.org/r/20200804162048.22587-5-eli@mellanox.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 36 +++++++++++++++++++++++++++--------- include/uapi/linux/vhost.h | 2 ++ include/uapi/linux/vhost_types.h | 11 +++++++++++ 3 files changed, 40 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 61c17d34cb39..e80db051845d 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -27,7 +27,9 @@ #include "vhost.h" enum { - VHOST_VDPA_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) + VHOST_VDPA_BACKEND_FEATURES = + (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) | + (1ULL << VHOST_BACKEND_F_IOTLB_BATCH), }; /* Currently, only network backend w/o multiqueue is supported. */ @@ -48,6 +50,7 @@ struct vhost_vdpa { int virtio_id; int minor; struct eventfd_ctx *config_ctx; + int in_batch; }; static DEFINE_IDA(vhost_vdpa_ida); @@ -124,6 +127,7 @@ static void vhost_vdpa_reset(struct vhost_vdpa *v) struct vdpa_device *vdpa = v->vdpa; vdpa_reset(vdpa); + v->in_batch = 0; } static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp) @@ -546,13 +550,15 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, if (r) return r; - if (ops->dma_map) + if (ops->dma_map) { r = ops->dma_map(vdpa, iova, size, pa, perm); - else if (ops->set_map) - r = ops->set_map(vdpa, dev->iotlb); - else + } else if (ops->set_map) { + if (!v->in_batch) + r = ops->set_map(vdpa, dev->iotlb); + } else { r = iommu_map(v->domain, iova, pa, size, perm_to_iommu_flags(perm)); + } return r; } @@ -565,12 +571,14 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size) vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1); - if (ops->dma_map) + if (ops->dma_map) { ops->dma_unmap(vdpa, iova, size); - else if (ops->set_map) - ops->set_map(vdpa, dev->iotlb); - else + } else if (ops->set_map) { + if (!v->in_batch) + ops->set_map(vdpa, dev->iotlb); + } else { iommu_unmap(v->domain, iova, size); + } } static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, @@ -663,6 +671,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, struct vhost_iotlb_msg *msg) { struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev); + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; int r = 0; r = vhost_dev_check_owner(dev); @@ -676,6 +686,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, case VHOST_IOTLB_INVALIDATE: vhost_vdpa_unmap(v, msg->iova, msg->size); break; + case VHOST_IOTLB_BATCH_BEGIN: + v->in_batch = true; + break; + case VHOST_IOTLB_BATCH_END: + if (v->in_batch && ops->set_map) + ops->set_map(vdpa, dev->iotlb); + v->in_batch = false; + break; default: r = -EINVAL; break; diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 0c2349612e77..75232185324a 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -91,6 +91,8 @@ /* Use message type V2 */ #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 +/* IOTLB can accept batching hints */ +#define VHOST_BACKEND_F_IOTLB_BATCH 0x2 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h index 669457ce5c48..9a269a88a6ff 100644 --- a/include/uapi/linux/vhost_types.h +++ b/include/uapi/linux/vhost_types.h @@ -60,6 +60,17 @@ struct vhost_iotlb_msg { #define VHOST_IOTLB_UPDATE 2 #define VHOST_IOTLB_INVALIDATE 3 #define VHOST_IOTLB_ACCESS_FAIL 4 +/* + * VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying + * multiple mappings in one go: beginning with + * VHOST_IOTLB_BATCH_BEGIN, followed by any number of + * VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END. + * When one of these two values is used as the message type, the rest + * of the fields in the message are ignored. There's no guarantee that + * these changes take place automatically in the device. + */ +#define VHOST_IOTLB_BATCH_BEGIN 5 +#define VHOST_IOTLB_BATCH_END 6 __u8 type; }; -- cgit v1.2.3 From a9974489b61c09c702c85c6cba3d1a3fd1be7a15 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Tue, 4 Aug 2020 19:20:42 +0300 Subject: vdpa: remove hard coded virtq num This will enable vdpa providers to add support for multi queue feature and publish it to upper layers (vhost and virtio). Signed-off-by: Max Gurtovoy Reviewed-by: Jason Wang Link: https://lore.kernel.org/r/20200804162048.22587-7-eli@mellanox.com Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/ifcvf/ifcvf_main.c | 3 ++- drivers/vdpa/vdpa.c | 3 +++ drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 ++-- drivers/vhost/vdpa.c | 9 +++------ include/linux/vdpa.h | 6 ++++-- 5 files changed, 14 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index a902b29b0d29..7c93225367db 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -431,7 +431,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, - dev, &ifc_vdpa_ops); + dev, &ifc_vdpa_ops, + IFCVF_MAX_QUEUE_PAIRS * 2); if (adapter == NULL) { IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); return -ENOMEM; diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 7105265e4793..a69ffc991e13 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -61,6 +61,7 @@ static void vdpa_release_dev(struct device *d) * initialized but before registered. * @parent: the parent device * @config: the bus operations that is supported by this device + * @nvqs: number of virtqueues supported by this device * @size: size of the parent structure that contains private data * * Driver should use vdpa_alloc_device() wrapper macro instead of @@ -71,6 +72,7 @@ static void vdpa_release_dev(struct device *d) */ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, + int nvqs, size_t size) { struct vdpa_device *vdev; @@ -97,6 +99,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, vdev->index = err; vdev->config = config; vdev->features_valid = false; + vdev->nvqs = nvqs; err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); if (err) diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 7b34d663778f..58baff89cc29 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -65,7 +65,7 @@ static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | /* State of each vdpasim device */ struct vdpasim { struct vdpa_device vdpa; - struct vdpasim_virtqueue vqs[2]; + struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM]; struct work_struct work; /* spinlock to synchronize virtqueue state */ spinlock_t lock; @@ -352,7 +352,7 @@ static struct vdpasim *vdpasim_create(void) else ops = &vdpasim_net_config_ops; - vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops); + vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM); if (!vdpasim) goto err_alloc; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index e80db051845d..2d8c950ad3a8 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -32,9 +32,6 @@ enum { (1ULL << VHOST_BACKEND_F_IOTLB_BATCH), }; -/* Currently, only network backend w/o multiqueue is supported. */ -#define VHOST_VDPA_VQ_MAX 2 - #define VHOST_VDPA_DEV_MAX (1U << MINORBITS) struct vhost_vdpa { @@ -930,7 +927,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) { const struct vdpa_config_ops *ops = vdpa->config; struct vhost_vdpa *v; - int minor, nvqs = VHOST_VDPA_VQ_MAX; + int minor; int r; /* Currently, we only accept the network devices. */ @@ -951,14 +948,14 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) atomic_set(&v->opened, 0); v->minor = minor; v->vdpa = vdpa; - v->nvqs = nvqs; + v->nvqs = vdpa->nvqs; v->virtio_id = ops->get_device_id(vdpa); device_initialize(&v->dev); v->dev.release = vhost_vdpa_release_dev; v->dev.parent = &vdpa->dev; v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor); - v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), + v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue), GFP_KERNEL); if (!v->vqs) { r = -ENOMEM; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 565298cb45d2..b5901cde73e0 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -41,6 +41,7 @@ struct vdpa_device { const struct vdpa_config_ops *config; unsigned int index; bool features_valid; + int nvqs; }; /** @@ -218,11 +219,12 @@ struct vdpa_config_ops { struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, + int nvqs, size_t size); -#define vdpa_alloc_device(dev_struct, member, parent, config) \ +#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \ container_of(__vdpa_alloc_device( \ - parent, config, \ + parent, config, nvqs, \ sizeof(dev_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ dev_struct, member))), \ -- cgit v1.2.3 From aac50c0bd434794b9950181349099e709ca4edad Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 4 Aug 2020 19:20:43 +0300 Subject: net/vdpa: Use struct for set/get vq state For now VQ state involves 16 bit available index value encoded in u64 variable. In the future it will be extended to contain more fields. Use struct to contain the state, now containing only a single u16 for the available index. In the future we can add fields to this struct. Reviewed-by: Parav Pandit Acked-by: Jason Wang Signed-off-by: Eli Cohen Link: https://lore.kernel.org/r/20200804162048.22587-8-eli@mellanox.com Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/ifcvf/ifcvf_base.c | 4 ++-- drivers/vdpa/ifcvf/ifcvf_base.h | 4 ++-- drivers/vdpa/ifcvf/ifcvf_main.c | 9 +++++---- drivers/vdpa/vdpa_sim/vdpa_sim.c | 10 ++++++---- drivers/vhost/vdpa.c | 7 +++++-- include/linux/vdpa.h | 18 ++++++++++++++---- 6 files changed, 34 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 94bf0328b68d..f2a128e56de5 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -272,7 +272,7 @@ static int ifcvf_config_features(struct ifcvf_hw *hw) return 0; } -u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) +u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) { struct ifcvf_lm_cfg __iomem *ifcvf_lm; void __iomem *avail_idx_addr; @@ -287,7 +287,7 @@ u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) return last_avail_idx; } -int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num) +int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) { struct ifcvf_lm_cfg __iomem *ifcvf_lm; void __iomem *avail_idx_addr; diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index 24af422b5a3e..08f267a2aafe 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -116,7 +116,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status); void io_write64_twopart(u64 val, u32 *lo, u32 *hi); void ifcvf_reset(struct ifcvf_hw *hw); u64 ifcvf_get_features(struct ifcvf_hw *hw); -u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); -int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num); +u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); +int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num); struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); #endif /* _IFCVF_H_ */ diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 7c93225367db..dc311e972b9e 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -237,19 +237,20 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) return IFCVF_QUEUE_MAX; } -static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid) +static void ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, + struct vdpa_vq_state *state) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - return ifcvf_get_vq_state(vf, qid); + state->avail_index = ifcvf_get_vq_state(vf, qid); } static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, - u64 num) + const struct vdpa_vq_state *state) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - return ifcvf_set_vq_state(vf, qid, num); + return ifcvf_set_vq_state(vf, qid, state->avail_index); } static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 58baff89cc29..c93126ad09d1 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -450,26 +450,28 @@ static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) return vq->ready; } -static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state) +static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, + const struct vdpa_vq_state *state) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vringh *vrh = &vq->vring; spin_lock(&vdpasim->lock); - vrh->last_avail_idx = state; + vrh->last_avail_idx = state->avail_index; spin_unlock(&vdpasim->lock); return 0; } -static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx) +static void vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, + struct vdpa_vq_state *state) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vringh *vrh = &vq->vring; - return vrh->last_avail_idx; + state->avail_index = vrh->last_avail_idx; } static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 2d8c950ad3a8..066b165c17b1 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -349,6 +349,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; + struct vdpa_vq_state vq_state; struct vdpa_callback cb; struct vhost_virtqueue *vq; struct vhost_vring_state s; @@ -374,7 +375,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ops->set_vq_ready(vdpa, idx, s.num); return 0; case VHOST_GET_VRING_BASE: - vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx); + ops->get_vq_state(v->vdpa, idx, &vq_state); + vq->last_avail_idx = vq_state.avail_index; break; case VHOST_GET_BACKEND_FEATURES: features = VHOST_VDPA_BACKEND_FEATURES; @@ -404,7 +406,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, break; case VHOST_SET_VRING_BASE: - if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx)) + vq_state.avail_index = vq->last_avail_idx; + if (ops->set_vq_state(vdpa, idx, &vq_state)) r = -EINVAL; break; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index b5901cde73e0..d7399c983734 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -27,6 +27,14 @@ struct vdpa_notification_area { resource_size_t size; }; +/** + * vDPA vq_state definition + * @avail_index: available index + */ +struct vdpa_vq_state { + u16 avail_index; +}; + /** * vDPA device - representation of a vDPA device * @dev: underlying device @@ -80,12 +88,12 @@ struct vdpa_device { * @set_vq_state: Set the state for a virtqueue * @vdev: vdpa device * @idx: virtqueue index - * @state: virtqueue state (last_avail_idx) + * @state: pointer to set virtqueue state (last_avail_idx) * Returns integer: success (0) or error (< 0) * @get_vq_state: Get the state for a virtqueue * @vdev: vdpa device * @idx: virtqueue index - * Returns virtqueue state (last_avail_idx) + * @state: pointer to returned state (last_avail_idx) * @get_vq_notification: Get the notification area for a virtqueue * @vdev: vdpa device * @idx: virtqueue index @@ -183,8 +191,10 @@ struct vdpa_config_ops { struct vdpa_callback *cb); void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready); bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); - int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state); - u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); + int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, + const struct vdpa_vq_state *state); + void (*get_vq_state)(struct vdpa_device *vdev, u16 idx, + struct vdpa_vq_state *state); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); /* vq irq is not expected to be changed once DRIVER_OK is set */ -- cgit v1.2.3 From 23750e39d57433d0e3d89658f0bc448f9c42ff49 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 4 Aug 2020 19:20:44 +0300 Subject: vdpa: Modify get_vq_state() to return error code Modify get_vq_state() so it returns an error code. In case of hardware acceleration, the available index may be retrieved from the device, an operation that can possibly fail. Reviewed-by: Parav Pandit Signed-off-by: Eli Cohen Link: https://lore.kernel.org/r/20200804162048.22587-9-eli@mellanox.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vdpa/ifcvf/ifcvf_main.c | 5 +++-- drivers/vdpa/vdpa_sim/vdpa_sim.c | 5 +++-- drivers/vhost/vdpa.c | 5 ++++- include/linux/vdpa.h | 4 ++-- 4 files changed, 12 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index dc311e972b9e..076d7ac5e723 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -237,12 +237,13 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) return IFCVF_QUEUE_MAX; } -static void ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, - struct vdpa_vq_state *state) +static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, + struct vdpa_vq_state *state) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); state->avail_index = ifcvf_get_vq_state(vf, qid); + return 0; } static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index c93126ad09d1..df3224b138ee 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -464,14 +464,15 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, return 0; } -static void vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, - struct vdpa_vq_state *state) +static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, + struct vdpa_vq_state *state) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vringh *vrh = &vq->vring; state->avail_index = vrh->last_avail_idx; + return 0; } static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 066b165c17b1..3fab94f88894 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -375,7 +375,10 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ops->set_vq_ready(vdpa, idx, s.num); return 0; case VHOST_GET_VRING_BASE: - ops->get_vq_state(v->vdpa, idx, &vq_state); + r = ops->get_vq_state(v->vdpa, idx, &vq_state); + if (r) + return r; + vq->last_avail_idx = vq_state.avail_index; break; case VHOST_GET_BACKEND_FEATURES: diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index d7399c983734..eae0bfd87d91 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -193,8 +193,8 @@ struct vdpa_config_ops { bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, const struct vdpa_vq_state *state); - void (*get_vq_state)(struct vdpa_device *vdev, u16 idx, - struct vdpa_vq_state *state); + int (*get_vq_state)(struct vdpa_device *vdev, u16 idx, + struct vdpa_vq_state *state); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); /* vq irq is not expected to be changed once DRIVER_OK is set */ -- cgit v1.2.3 From c84f91e2622235bb742f9f20b8675cf095157026 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 5 Aug 2020 19:55:50 -0400 Subject: virtio_config: fix up warnings on parisc Apparently, on parisc le16_to_cpu returns an int. virtio_cread_le is very strict about type sizes so it causes a warning. Fix it up by casting to the correct type. Reported-by: kernel test robot Signed-off-by: Michael S. Tsirkin Link: https://lore.kernel.org/r/20200805235550.1451637-1-mst@redhat.com --- include/linux/virtio_config.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index ecb166c824bb..8fe857e27ef3 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -357,10 +357,10 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) */ #define virtio_le_to_cpu(x) \ _Generic((x), \ - __u8: (x), \ - __le16: le16_to_cpu(x), \ - __le32: le32_to_cpu(x), \ - __le64: le64_to_cpu(x) \ + __u8: (u8)(x), \ + __le16: (u16)le16_to_cpu(x), \ + __le32: (u32)le32_to_cpu(x), \ + __le64: (u64)le64_to_cpu(x) \ ) #define virtio_cpu_to_le(x, m) \ @@ -400,7 +400,6 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) *(ptr) = virtio_le_to_cpu(virtio_cread_v); \ } while(0) -/* Config space accessors. */ #define virtio_cwrite_le(vdev, structname, member, ptr) \ do { \ typeof(((structname*)0)->member) virtio_cwrite_v = \ -- cgit v1.2.3 From 46bca88bbdd3046db31b8b7e053a909ae79e285b Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:55:38 +1000 Subject: drm/ttm/amdgpu: consolidate ttm reserve paths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop the WARN_ON and consolidate the two paths into one. Use the consolidate slowpath in the execbuf utils code. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-6-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 12 +--- include/drm/ttm/ttm_bo_driver.h | 91 ++++++------------------------ 3 files changed, 20 insertions(+), 85 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index afa5189dba7d..e01e8903741e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -160,7 +160,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); int r; - r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); + r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) dev_err(adev->dev, "%p reserve failed\n", bo); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 1797f04c0534..8a8f1a6a83a6 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -93,7 +93,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); + ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); if (ret == -EALREADY && dups) { struct ttm_validate_buffer *safe = entry; entry = list_prev_entry(entry, head); @@ -119,13 +119,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ttm_eu_backoff_reservation_reverse(list, entry); if (ret == -EDEADLK) { - if (intr) { - ret = dma_resv_lock_slow_interruptible(bo->base.resv, - ticket); - } else { - dma_resv_lock_slow(bo->base.resv, ticket); - ret = 0; - } + ret = ttm_bo_reserve_slowpath(bo, intr, ticket); } if (!ret && entry->num_shared) @@ -133,8 +127,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, entry->num_shared); if (unlikely(ret != 0)) { - if (ret == -EINTR) - ret = -ERESTARTSYS; if (ticket) { ww_acquire_done(ticket); ww_acquire_fini(ticket); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index f76f1332fdc5..44b4d24e73a2 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -599,29 +599,30 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); /** - * __ttm_bo_reserve: + * ttm_bo_reserve: * * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. * @ticket: ticket used to acquire the ww_mutex. * - * Will not remove reserved buffers from the lru lists. - * Otherwise identical to ttm_bo_reserve. + * Locks a buffer object for validation. (Or prevents other processes from + * locking it for validation), while taking a number of measures to prevent + * deadlocks. * * Returns: * -EDEADLK: The reservation may cause a deadlock. * Release all buffer reservations, wait for @bo to become unreserved and - * try again. (only if use_sequence == 1). + * try again. * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by * a signal. Release all buffer reservations and return to user-space. * -EBUSY: The function needed to sleep, but @no_wait was true * -EALREADY: Bo already reserved using @ticket. This error code will only * be returned if @use_ticket is set to true. */ -static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait, - struct ww_acquire_ctx *ticket) +static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, + bool interruptible, bool no_wait, + struct ww_acquire_ctx *ticket) { int ret = 0; @@ -643,59 +644,6 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, return ret; } -/** - * ttm_bo_reserve: - * - * @bo: A pointer to a struct ttm_buffer_object. - * @interruptible: Sleep interruptible if waiting. - * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. - * @ticket: ticket used to acquire the ww_mutex. - * - * Locks a buffer object for validation. (Or prevents other processes from - * locking it for validation) and removes it from lru lists, while taking - * a number of measures to prevent deadlocks. - * - * Deadlocks may occur when two processes try to reserve multiple buffers in - * different order, either by will or as a result of a buffer being evicted - * to make room for a buffer already reserved. (Buffers are reserved before - * they are evicted). The following algorithm prevents such deadlocks from - * occurring: - * Processes attempting to reserve multiple buffers other than for eviction, - * (typically execbuf), should first obtain a unique 32-bit - * validation sequence number, - * and call this function with @use_ticket == 1 and @ticket->stamp == the unique - * sequence number. If upon call of this function, the buffer object is already - * reserved, the validation sequence is checked against the validation - * sequence of the process currently reserving the buffer, - * and if the current validation sequence is greater than that of the process - * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps - * waiting for the buffer to become unreserved, after which it retries - * reserving. - * The caller should, when receiving an -EDEADLK error - * release all its buffer reservations, wait for @bo to become unreserved, and - * then rerun the validation with the same validation sequence. This procedure - * will always guarantee that the process with the lowest validation sequence - * will eventually succeed, preventing both deadlocks and starvation. - * - * Returns: - * -EDEADLK: The reservation may cause a deadlock. - * Release all buffer reservations, wait for @bo to become unreserved and - * try again. (only if use_sequence == 1). - * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by - * a signal. Release all buffer reservations and return to user-space. - * -EBUSY: The function needed to sleep, but @no_wait was true - * -EALREADY: Bo already reserved using @ticket. This error code will only - * be returned if @use_ticket is set to true. - */ -static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait, - struct ww_acquire_ctx *ticket) -{ - WARN_ON(!kref_read(&bo->kref)); - - return __ttm_bo_reserve(bo, interruptible, no_wait, ticket); -} - /** * ttm_bo_reserve_slowpath: * @bo: A pointer to a struct ttm_buffer_object. @@ -710,20 +658,15 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, bool interruptible, struct ww_acquire_ctx *ticket) { - int ret = 0; - - WARN_ON(!kref_read(&bo->kref)); - - if (interruptible) - ret = dma_resv_lock_slow_interruptible(bo->base.resv, - ticket); - else - dma_resv_lock_slow(bo->base.resv, ticket); - - if (ret == -EINTR) - ret = -ERESTARTSYS; - - return ret; + if (interruptible) { + int ret = dma_resv_lock_slow_interruptible(bo->base.resv, + ticket); + if (ret == -EINTR) + ret = -ERESTARTSYS; + return ret; + } + dma_resv_lock_slow(bo->base.resv, ticket); + return 0; } /** -- cgit v1.2.3 From 20784cdf4b8c81359289aff73a89f557c7bc9e76 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:55:39 +1000 Subject: drm/ttm: use a helper for unlocked moves to the lru tail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The pattern was repeated a few times, just make an inline for it. Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-7-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 8 ++------ drivers/gpu/drm/ttm/ttm_bo_vm.c | 4 +--- include/drm/ttm/ttm_bo_driver.h | 11 ++++++++--- 3 files changed, 11 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6c02a336a587..c5771a43d8ec 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1101,9 +1101,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, error: if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { - spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail_unlocked(bo); } return ret; @@ -1318,9 +1316,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, return ret; } - spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail_unlocked(bo); return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 82b893d4249f..1e2820b06c6a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -306,9 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, } if (bo->moving != moving) { - spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail_unlocked(bo); } dma_fence_put(moving); } diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 44b4d24e73a2..049ebf85712f 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -669,6 +669,13 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, return 0; } +static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) +{ + spin_lock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail(bo, NULL); + spin_unlock(&ttm_bo_glob.lru_lock); +} + /** * ttm_bo_unreserve * @@ -678,9 +685,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, */ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) { - spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail_unlocked(bo); dma_resv_unlock(bo->base.resv); } -- cgit v1.2.3 From a2ff1e81d04eb938a670bce206de2963d98950d8 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:55:41 +1000 Subject: drm/ttm: export memory type debug entrypoint. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As suggested on review, just export the memory type debug for drivers to use, while also making the debug callback optional (don't need to test for system as it won't init it). rename it to be more consistent with object name for now. (we may rename all the objects later.) Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-9-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 13 +++++++------ include/drm/ttm/ttm_bo_driver.h | 8 ++++++++ 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c5771a43d8ec..8cd012f6bba2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -77,26 +77,26 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, return 0; } -static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p, - int mem_type) +void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, + struct drm_printer *p) { - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - drm_printf(p, " has_type: %d\n", man->has_type); drm_printf(p, " use_type: %d\n", man->use_type); drm_printf(p, " use_tt: %d\n", man->use_tt); drm_printf(p, " size: %llu\n", man->size); drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); - if (mem_type != TTM_PL_SYSTEM) + if (man->func && man->func->debug) (*man->func->debug)(man, p); } +EXPORT_SYMBOL(ttm_mem_type_manager_debug); static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct drm_printer p = drm_debug_printer(TTM_PFX); int i, ret, mem_type; + struct ttm_mem_type_manager *man; drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, @@ -108,7 +108,8 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, return; drm_printf(&p, " placement[%d]=0x%08X (%d)\n", i, placement->placement[i].flags, mem_type); - ttm_mem_type_debug(bo->bdev, &p, mem_type); + man = &bo->bdev->man[mem_type]; + ttm_mem_type_manager_debug(man, &p); } } diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 049ebf85712f..ee11ae621c3b 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -806,4 +806,12 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; +/** + * ttm_mem_type_manager_debug + * + * @man: manager type to dump. + * @p: printer to use for debug. + */ +void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, + struct drm_printer *p); #endif -- cgit v1.2.3 From 747074bb04b5a6be8e562d06b5a312d6ddb253d0 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:55:46 +1000 Subject: drm/ttm: split the mm manager init code (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will allow the driver to control the ordering here better. Eventually the old path will be removed. v2: add docs for new APIs. rename new path to ttm_mem_type_manager_init/set_used(for now) Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-14-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 34 ++++++++++++++++++++-------------- include/drm/ttm/ttm_bo_api.h | 15 +++++++++++++++ include/drm/ttm/ttm_bo_driver.h | 15 +++++++++++++++ 3 files changed, 50 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 8cd012f6bba2..ebc850ce1273 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1507,35 +1507,41 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) } EXPORT_SYMBOL(ttm_bo_evict_mm); -int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_size) +void ttm_mem_type_manager_init(struct ttm_bo_device *bdev, + struct ttm_mem_type_manager *man, + unsigned long p_size) { - int ret; - struct ttm_mem_type_manager *man; unsigned i; - BUG_ON(type >= TTM_NUM_MEM_TYPES); - man = &bdev->man[type]; BUG_ON(man->has_type); man->use_io_reserve_lru = false; mutex_init(&man->io_reserve_mutex); spin_lock_init(&man->move_lock); INIT_LIST_HEAD(&man->io_reserve_lru); man->bdev = bdev; - - if (type != TTM_PL_SYSTEM) { - ret = (*man->func->init)(man, p_size); - if (ret) - return ret; - } - man->has_type = true; - man->use_type = true; man->size = p_size; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) INIT_LIST_HEAD(&man->lru[i]); man->move = NULL; +} +EXPORT_SYMBOL(ttm_mem_type_manager_init); +int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, + unsigned long p_size) +{ + int ret; + struct ttm_mem_type_manager *man; + + BUG_ON(type >= TTM_NUM_MEM_TYPES); + ttm_mem_type_manager_init(bdev, &bdev->man[type], p_size); + + if (type != TTM_PL_SYSTEM) { + ret = (*man->func->init)(man, p_size); + if (ret) + return ret; + } + ttm_mem_type_manager_set_used(man, true); return 0; } EXPORT_SYMBOL(ttm_bo_init_mm); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index b1c705a93517..cc876cd3b82c 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -54,6 +54,8 @@ struct ttm_place; struct ttm_lru_bulk_move; +struct ttm_mem_type_manager; + /** * struct ttm_bus_placement * @@ -531,6 +533,19 @@ int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_alignment, bool interruptible, struct ttm_buffer_object **p_bo); +/** + * ttm_mem_type_manager_init + * + * @bdev: Pointer to a ttm_bo_device struct. + * @man: memory manager object to init + * @p_size: size managed area in pages. + * + * Initialise core parts of a a manager object. + */ +void ttm_mem_type_manager_init(struct ttm_bo_device *bdev, + struct ttm_mem_type_manager *man, + unsigned long p_size); + /** * ttm_bo_init_mm * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index ee11ae621c3b..02aa1b996b3a 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -689,6 +689,21 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) dma_resv_unlock(bo->base.resv); } +/** + * ttm_mem_type_manager_set_used + * + * @man: A memory manager object. + * @used: usage state to set. + * + * Set the manager in use flag. If disabled the manager is no longer + * used for object placement. + */ +static inline void ttm_mem_type_manager_set_used(struct ttm_mem_type_manager *man, bool used) +{ + man->has_type = true; + man->use_type = used; +} + /* * ttm_bo_util.c */ -- cgit v1.2.3 From 3c90424bd7df15eb062ae9e6518886a706ed0d84 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:55:47 +1000 Subject: drm/ttm: provide a driver-led init path for range mm manager. (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This lets the generic range mm manager be initialised by the driver. v2: add docs. rename api to range_man_init for now. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-15-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo_manager.c | 23 ++++++++++++++++++++--- include/drm/ttm/ttm_bo_driver.h | 14 ++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index facd3049c3aa..eb86c8694f47 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -104,8 +104,8 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, } } -static int ttm_bo_man_init(struct ttm_mem_type_manager *man, - unsigned long p_size) +static int ttm_bo_man_init_private(struct ttm_mem_type_manager *man, + unsigned long p_size) { struct ttm_range_manager *rman; @@ -119,6 +119,23 @@ static int ttm_bo_man_init(struct ttm_mem_type_manager *man, return 0; } +int ttm_range_man_init(struct ttm_bo_device *bdev, + struct ttm_mem_type_manager *man, + unsigned long p_size) +{ + int ret; + + man->func = &ttm_bo_manager_func; + + ttm_mem_type_manager_init(bdev, man, p_size); + ret = ttm_bo_man_init_private(man, p_size); + if (ret) + return ret; + ttm_mem_type_manager_set_used(man, true); + return 0; +} +EXPORT_SYMBOL(ttm_range_man_init); + static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; @@ -147,7 +164,7 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, } const struct ttm_mem_type_manager_func ttm_bo_manager_func = { - .init = ttm_bo_man_init, + .init = ttm_bo_man_init_private, .takedown = ttm_bo_man_takedown, .get_node = ttm_bo_man_get_node, .put_node = ttm_bo_man_put_node, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 02aa1b996b3a..23352053df36 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -819,6 +819,20 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); */ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); +/** + * ttm_range_man_init + * + * @bdev: ttm device + * @man: the manager to initialise with the range manager. + * @p_size: size of area to be managed in pages. + * + * Initialise a generic range manager for the selected memory type. + * The range manager is installed for this device in the type slot. + */ +int ttm_range_man_init(struct ttm_bo_device *bdev, + struct ttm_mem_type_manager *man, + unsigned long p_size); + extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; /** -- cgit v1.2.3 From 98399abd52b234b82457ef6c40c41543d806d3b7 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:55:56 +1000 Subject: drm/ttm: purge old manager init path. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-24-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 19 ------------------- drivers/gpu/drm/ttm/ttm_bo_manager.c | 29 ++++++++++------------------- include/drm/ttm/ttm_bo_api.h | 18 ------------------ include/drm/ttm/ttm_bo_driver.h | 15 --------------- 4 files changed, 10 insertions(+), 71 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a7e66a2d89a2..869ca5b3e4bf 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1527,25 +1527,6 @@ void ttm_mem_type_manager_init(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_mem_type_manager_init); -int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_size) -{ - int ret; - struct ttm_mem_type_manager *man; - - BUG_ON(type >= TTM_NUM_MEM_TYPES); - ttm_mem_type_manager_init(bdev, &bdev->man[type], p_size); - - if (type != TTM_PL_SYSTEM) { - ret = (*man->func->init)(man, p_size); - if (ret) - return ret; - } - ttm_mem_type_manager_set_used(man, true); - return 0; -} -EXPORT_SYMBOL(ttm_bo_init_mm); - static void ttm_bo_global_kobj_release(struct kobject *kobj) { struct ttm_bo_global *glob = diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index eb86c8694f47..b56c6961b278 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -104,11 +104,18 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, } } -static int ttm_bo_man_init_private(struct ttm_mem_type_manager *man, - unsigned long p_size) +static const struct ttm_mem_type_manager_func ttm_bo_manager_func; + +int ttm_range_man_init(struct ttm_bo_device *bdev, + struct ttm_mem_type_manager *man, + unsigned long p_size) { struct ttm_range_manager *rman; + man->func = &ttm_bo_manager_func; + + ttm_mem_type_manager_init(bdev, man, p_size); + rman = kzalloc(sizeof(*rman), GFP_KERNEL); if (!rman) return -ENOMEM; @@ -116,21 +123,7 @@ static int ttm_bo_man_init_private(struct ttm_mem_type_manager *man, drm_mm_init(&rman->mm, 0, p_size); spin_lock_init(&rman->lock); man->priv = rman; - return 0; -} -int ttm_range_man_init(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man, - unsigned long p_size) -{ - int ret; - - man->func = &ttm_bo_manager_func; - - ttm_mem_type_manager_init(bdev, man, p_size); - ret = ttm_bo_man_init_private(man, p_size); - if (ret) - return ret; ttm_mem_type_manager_set_used(man, true); return 0; } @@ -163,11 +156,9 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, spin_unlock(&rman->lock); } -const struct ttm_mem_type_manager_func ttm_bo_manager_func = { - .init = ttm_bo_man_init_private, +static const struct ttm_mem_type_manager_func ttm_bo_manager_func = { .takedown = ttm_bo_man_takedown, .get_node = ttm_bo_man_get_node, .put_node = ttm_bo_man_put_node, .debug = ttm_bo_man_debug }; -EXPORT_SYMBOL(ttm_bo_manager_func); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index cc876cd3b82c..56d207b983e9 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -546,24 +546,6 @@ void ttm_mem_type_manager_init(struct ttm_bo_device *bdev, struct ttm_mem_type_manager *man, unsigned long p_size); -/** - * ttm_bo_init_mm - * - * @bdev: Pointer to a ttm_bo_device struct. - * @mem_type: The memory type. - * @p_size: size managed area in pages. - * - * Initialize a manager for a given memory type. - * Note: if part of driver firstopen, it must be protected from a - * potentially racing lastclose. - * Returns: - * -EINVAL: invalid size or memory type. - * -ENOMEM: Not enough memory. - * May also return driver-specified errors. - */ -int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_size); - /** * ttm_bo_clean_mm * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 23352053df36..303014250767 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -48,19 +48,6 @@ struct ttm_mem_type_manager; struct ttm_mem_type_manager_func { - /** - * struct ttm_mem_type_manager member init - * - * @man: Pointer to a memory type manager. - * @p_size: Implementation dependent, but typically the size of the - * range to be managed in pages. - * - * Called to initialize a private range manager. The function is - * expected to initialize the man::priv member. - * Returns 0 on success, negative error code on failure. - */ - int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); - /** * struct ttm_mem_type_manager member takedown * @@ -833,8 +820,6 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, struct ttm_mem_type_manager *man, unsigned long p_size); -extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; - /** * ttm_mem_type_manager_debug * -- cgit v1.2.3 From 4265accbfc724a68894f91737e765e2cce43fe4e Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:55:58 +1000 Subject: drm/ttm: make some inline helper functions for cleanup paths. (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The disable path is just temporary for now, it will be dropped once has_type is gone in a later patch. v2: add docs. rename to ttm_mem_type_manager namespace Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-26-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 6 ++---- include/drm/ttm/ttm_bo_driver.h | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 53017cf06527..be0c0a01b929 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1466,8 +1466,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) return ret; } - man->use_type = false; - man->has_type = false; + ttm_mem_type_manager_disable(man); ret = 0; if (mem_type > 0) { @@ -1480,8 +1479,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ret = (*man->func->takedown)(man); } - dma_fence_put(man->move); - man->move = NULL; + ttm_mem_type_manager_cleanup(man); return ret; } diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 303014250767..30dfb9d5f6c9 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -691,6 +691,32 @@ static inline void ttm_mem_type_manager_set_used(struct ttm_mem_type_manager *ma man->use_type = used; } +/** + * ttm_mem_type_manager_disable. + * + * @man: A memory manager object. + * + * Indicate the manager is not to be used and deregistered. (temporary during rework). + */ +static inline void ttm_mem_type_manager_disable(struct ttm_mem_type_manager *man) +{ + man->has_type = false; + man->use_type = false; +} + +/** + * ttm_mem_type_manager_cleanup + * + * @man: A memory manager object. + * + * Cleanup the move fences from the memory manager object. + */ +static inline void ttm_mem_type_manager_cleanup(struct ttm_mem_type_manager *man) +{ + dma_fence_put(man->move); + man->move = NULL; +} + /* * ttm_bo_util.c */ -- cgit v1.2.3 From 56ee8b1c71ffb556b8758f2d9e3098f4f80b4d01 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:55:59 +1000 Subject: drm/ttm: start allowing drivers to use new takedown path (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow the takedown path callback to be optional as well. v2: use fini for range manager Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-27-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 12 +++++++----- drivers/gpu/drm/ttm/ttm_bo_manager.c | 21 +++++++++++++++++++-- include/drm/ttm/ttm_bo_driver.h | 24 ++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index be0c0a01b929..480d23fa8099 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1405,8 +1405,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_create); -static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man) +int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_mem_type_manager *man) { struct ttm_operation_ctx ctx = { .interruptible = false, @@ -1448,6 +1448,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, return 0; } +EXPORT_SYMBOL(ttm_mem_type_manager_force_list_clean); int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { @@ -1470,13 +1471,14 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ret = 0; if (mem_type > 0) { - ret = ttm_bo_force_list_clean(bdev, man); + ret = ttm_mem_type_manager_force_list_clean(bdev, man); if (ret) { pr_err("Cleanup eviction failed\n"); return ret; } - ret = (*man->func->takedown)(man); + if (man->func->takedown) + ret = (*man->func->takedown)(man); } ttm_mem_type_manager_cleanup(man); @@ -1499,7 +1501,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) return 0; } - return ttm_bo_force_list_clean(bdev, man); + return ttm_mem_type_manager_force_list_clean(bdev, man); } EXPORT_SYMBOL(ttm_bo_evict_mm); diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index b56c6961b278..96da22be672b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -129,7 +129,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_range_man_init); -static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) +static int ttm_bo_man_takedown_private(struct ttm_mem_type_manager *man) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; @@ -146,6 +146,23 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) return -EBUSY; } +int ttm_range_man_fini(struct ttm_bo_device *bdev, + struct ttm_mem_type_manager *man) +{ + int ret; + + ttm_mem_type_manager_disable(man); + + ret = ttm_mem_type_manager_force_list_clean(bdev, man); + if (ret) + return ret; + + ttm_bo_man_takedown_private(man); + ttm_mem_type_manager_cleanup(man); + return 0; +} +EXPORT_SYMBOL(ttm_range_man_fini); + static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, struct drm_printer *printer) { @@ -157,7 +174,7 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, } static const struct ttm_mem_type_manager_func ttm_bo_manager_func = { - .takedown = ttm_bo_man_takedown, + .takedown = ttm_bo_man_takedown_private, .get_node = ttm_bo_man_get_node, .put_node = ttm_bo_man_put_node, .debug = ttm_bo_man_debug diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 30dfb9d5f6c9..811ace1416b3 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -717,6 +717,18 @@ static inline void ttm_mem_type_manager_cleanup(struct ttm_mem_type_manager *man man->move = NULL; } +/* + * ttm_mem_type_manager_force_list_clean + * + * @bdev - device to use + * @man - manager to use + * + * Force all the objects out of a memory manager until clean. + * Part of memory manager cleanup sequence. + */ +int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_mem_type_manager *man); + /* * ttm_bo_util.c */ @@ -846,6 +858,17 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, struct ttm_mem_type_manager *man, unsigned long p_size); +/** + * ttm_range_man_fini + * + * @bdev: ttm device + * @type: memory manager type + * + * Remove the generic range manager from a slot and tear it down. + */ +int ttm_range_man_fini(struct ttm_bo_device *bdev, + struct ttm_mem_type_manager *man); + /** * ttm_mem_type_manager_debug * @@ -854,4 +877,5 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, */ void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, struct drm_printer *p); + #endif -- cgit v1.2.3 From 0cf0a7984268c64e906b63a96df3e331ca61f989 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:08 +1000 Subject: drm/ttm: make TTM responsible for cleaning system only. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drivers should all be cleaning up their memory managers themselves now, so let the core just clean the system one up. Remove the legacy cleaning interface. Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-36-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 54 +++-------------------------------------- include/drm/ttm/ttm_bo_api.h | 28 --------------------- include/drm/ttm/ttm_bo_driver.h | 10 -------- 3 files changed, 4 insertions(+), 88 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 480d23fa8099..400f025ce04a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1450,42 +1450,6 @@ int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_mem_type_manager_force_list_clean); -int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) -{ - struct ttm_mem_type_manager *man; - int ret = -EINVAL; - - if (mem_type >= TTM_NUM_MEM_TYPES) { - pr_err("Illegal memory type %d\n", mem_type); - return ret; - } - man = &bdev->man[mem_type]; - - if (!man->has_type) { - pr_err("Trying to take down uninitialized memory manager type %u\n", - mem_type); - return ret; - } - - ttm_mem_type_manager_disable(man); - - ret = 0; - if (mem_type > 0) { - ret = ttm_mem_type_manager_force_list_clean(bdev, man); - if (ret) { - pr_err("Cleanup eviction failed\n"); - return ret; - } - - if (man->func->takedown) - ret = (*man->func->takedown)(man); - } - - ttm_mem_type_manager_cleanup(man); - - return ret; -} -EXPORT_SYMBOL(ttm_bo_clean_mm); int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { @@ -1589,21 +1553,11 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) { struct ttm_bo_global *glob = &ttm_bo_glob; int ret = 0; - unsigned i = TTM_NUM_MEM_TYPES; + unsigned i; struct ttm_mem_type_manager *man; - while (i--) { - man = &bdev->man[i]; - if (man->has_type) { - man->use_type = false; - if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { - ret = -EBUSY; - pr_err("DRM memory manager type %d is not clean\n", - i); - } - man->has_type = false; - } - } + man = &bdev->man[TTM_PL_SYSTEM]; + ttm_mem_type_manager_disable(man); mutex_lock(&ttm_global_mutex); list_del(&bdev->device_list); @@ -1616,7 +1570,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) spin_lock(&glob->lru_lock); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) - if (list_empty(&bdev->man[0].lru[0])) + if (list_empty(&man->lru[0])) pr_debug("Swap list %d was clean\n", i); spin_unlock(&glob->lru_lock); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 56d207b983e9..045f283d79e8 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -546,34 +546,6 @@ void ttm_mem_type_manager_init(struct ttm_bo_device *bdev, struct ttm_mem_type_manager *man, unsigned long p_size); -/** - * ttm_bo_clean_mm - * - * @bdev: Pointer to a ttm_bo_device struct. - * @mem_type: The memory type. - * - * Take down a manager for a given memory type after first walking - * the LRU list to evict any buffers left alive. - * - * Normally, this function is part of lastclose() or unload(), and at that - * point there shouldn't be any buffers left created by user-space, since - * there should've been removed by the file descriptor release() method. - * However, before this function is run, make sure to signal all sync objects, - * and verify that the delayed delete queue is empty. The driver must also - * make sure that there are no NO_EVICT buffers present in this memory type - * when the call is made. - * - * If this function is part of a VT switch, the caller must make sure that - * there are no appications currently validating buffers before this - * function is called. The caller can do that by first taking the - * struct ttm_bo_device::ttm_lock in write mode. - * - * Returns: - * -EINVAL: invalid or uninitialized memory type. - * -EBUSY: There are still buffers left in this memory type. - */ -int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); - /** * ttm_bo_evict_mm * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 811ace1416b3..c76301a808ae 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -48,16 +48,6 @@ struct ttm_mem_type_manager; struct ttm_mem_type_manager_func { - /** - * struct ttm_mem_type_manager member takedown - * - * @man: Pointer to a memory type manager. - * - * Called to undo the setup done in init. All allocated resources - * should be freed. - */ - int (*takedown)(struct ttm_mem_type_manager *man); - /** * struct ttm_mem_type_manager member get_node * -- cgit v1.2.3 From 9eca33f4a13919bb17b8a02809a32f8299f5c9bf Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:09 +1000 Subject: drm/ttm: add wrapper to get manager from bdev. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will allow different abstractions later. Acked-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-37-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 34 +++++++++++++++++----------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 20 ++++++++++---------- drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- include/drm/ttm/ttm_bo_driver.h | 6 ++++++ 4 files changed, 34 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 400f025ce04a..7474679a2364 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -108,7 +108,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, return; drm_printf(&p, " placement[%d]=0x%08X (%d)\n", i, placement->placement[i].flags, mem_type); - man = &bo->bdev->man[mem_type]; + man = ttm_manager_type(bo->bdev, mem_type); ttm_mem_type_manager_debug(man, &p); } } @@ -157,7 +157,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, if (mem->placement & TTM_PL_FLAG_NO_EVICT) return; - man = &bdev->man[mem->mem_type]; + man = ttm_manager_type(bdev, mem->mem_type); list_add_tail(&bo->lru, &man->lru[bo->priority]); if (man->use_tt && bo->ttm && @@ -232,7 +232,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->last->base.resv); - man = &pos->first->bdev->man[TTM_PL_TT]; + man = ttm_manager_type(pos->first->bdev, TTM_PL_TT); list_bulk_move_tail(&man->lru[i], &pos->first->lru, &pos->last->lru); } @@ -247,7 +247,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->last->base.resv); - man = &pos->first->bdev->man[TTM_PL_VRAM]; + man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM); list_bulk_move_tail(&man->lru[i], &pos->first->lru, &pos->last->lru); } @@ -273,8 +273,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; - struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; + struct ttm_mem_type_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_mem_type_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; ret = ttm_mem_io_lock(old_man, true); @@ -340,7 +340,7 @@ moved: return 0; out_err: - new_man = &bdev->man[bo->mem.mem_type]; + new_man = ttm_manager_type(bdev, bo->mem.mem_type); if (!new_man->use_tt) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; @@ -552,7 +552,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); size_t acc_size = bo->acc_size; int ret; @@ -844,7 +844,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); mem->mm_node = NULL; if (!man->func || !man->func->get_node) @@ -855,7 +855,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); if (!man->func || !man->func->put_node) return; @@ -910,7 +910,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); struct ww_acquire_ctx *ticket; int ret; @@ -1000,7 +1000,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, if (ret) return ret; - man = &bdev->man[mem_type]; + man = ttm_manager_type(bdev, mem_type); if (!man->has_type || !man->use_type) return -EBUSY; @@ -1063,7 +1063,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (unlikely(ret)) goto error; - man = &bdev->man[mem->mem_type]; + man = ttm_manager_type(bdev, mem->mem_type); ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); if (unlikely(ret)) { ttm_bo_mem_put(bo, mem); @@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(ttm_mem_type_manager_force_list_clean); int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem_type); if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { pr_err("Illegal memory manager memory type %u\n", mem_type); @@ -1556,7 +1556,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) unsigned i; struct ttm_mem_type_manager *man; - man = &bdev->man[TTM_PL_SYSTEM]; + man = ttm_manager_type(bdev, TTM_PL_SYSTEM); ttm_mem_type_manager_disable(man); mutex_lock(&ttm_global_mutex); @@ -1583,7 +1583,7 @@ EXPORT_SYMBOL(ttm_bo_device_release); static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) { - struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_SYSTEM]; + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, TTM_PL_SYSTEM); /* * Initialize the system memory buffer type. @@ -1647,7 +1647,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); ttm_mem_io_lock(man, false); ttm_bo_unmap_virtual_locked(bo); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 1f502be0b646..879c8ded0cd8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -129,7 +129,7 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; if (mem->bus.io_reserved_count++) @@ -162,7 +162,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) { - struct ttm_mem_type_manager *man = &bo->bdev->man[bo->mem.mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); struct ttm_mem_reg *mem = &bo->mem; int ret; @@ -195,7 +195,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void **virtual) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; void *addr; @@ -232,7 +232,7 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, { struct ttm_mem_type_manager *man; - man = &bdev->man[mem->mem_type]; + man = ttm_manager_type(bdev, mem->mem_type); if (virtual && mem->bus.addr == NULL) iounmap(virtual); @@ -303,7 +303,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg old_copy = *old_mem; @@ -571,7 +571,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, struct ttm_bo_kmap_obj *map) { struct ttm_mem_type_manager *man = - &bo->bdev->man[bo->mem.mem_type]; + ttm_manager_type(bo->bdev, bo->mem.mem_type); unsigned long offset, size; int ret; @@ -601,7 +601,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { struct ttm_buffer_object *bo = map->bo; struct ttm_mem_type_manager *man = - &bo->bdev->man[bo->mem.mem_type]; + ttm_manager_type(bo->bdev, bo->mem.mem_type); if (!map->virtual) return; @@ -634,7 +634,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_mem_reg *old_mem = &bo->mem; int ret; struct ttm_buffer_object *ghost_obj; @@ -697,8 +697,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; - struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; + struct ttm_mem_type_manager *from = ttm_manager_type(bdev, old_mem->mem_type); + struct ttm_mem_type_manager *to = ttm_manager_type(bdev, new_mem->mem_type); int ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 1e2820b06c6a..db4e21d11967 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -282,7 +282,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, vm_fault_t ret = VM_FAULT_NOPAGE; unsigned long address = vmf->address; struct ttm_mem_type_manager *man = - &bdev->man[bo->mem.mem_type]; + ttm_manager_type(bdev, bo->mem.mem_type); /* * Refuse to fault imported pages. This should be handled diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index c76301a808ae..c8ea5eab719d 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -444,6 +444,12 @@ struct ttm_bo_device { bool no_retry; }; +static inline struct ttm_mem_type_manager *ttm_manager_type(struct ttm_bo_device *bdev, + int mem_type) +{ + return &bdev->man[mem_type]; +} + /** * struct ttm_lru_bulk_move_pos * -- cgit v1.2.3 From 01057278bc68610389e32ffe3a8335aff38a84ce Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:16 +1000 Subject: drm/ttm: rename manager variable to make sure wrapper is used. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Other users of this should notice this change and switch to wrapper. Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-44-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- include/drm/ttm/ttm_bo_driver.h | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7474679a2364..4d87ee98467f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1615,7 +1615,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->driver = driver; - memset(bdev->man, 0, sizeof(bdev->man)); + memset(bdev->man_priv, 0, sizeof(bdev->man_priv)); ttm_bo_init_sysman(bdev); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index c8ea5eab719d..f97c047b6a3a 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -415,7 +415,10 @@ struct ttm_bo_device { */ struct list_head device_list; struct ttm_bo_driver *driver; - struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; + /* + * access via ttm_manager_type. + */ + struct ttm_mem_type_manager man_priv[TTM_NUM_MEM_TYPES]; /* * Protected by internal locks. @@ -447,7 +450,7 @@ struct ttm_bo_device { static inline struct ttm_mem_type_manager *ttm_manager_type(struct ttm_bo_device *bdev, int mem_type) { - return &bdev->man[mem_type]; + return &bdev->man_priv[mem_type]; } /** -- cgit v1.2.3 From d398811ebfa80329269932803a6e78dc01c79bf1 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:17 +1000 Subject: drm/ttm: allow drivers to provide their own manager subclasses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will get removed eventually and all drivers will use this. Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-45-airlied@gmail.com --- include/drm/ttm/ttm_bo_driver.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index f97c047b6a3a..ce15eb075241 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -419,7 +419,7 @@ struct ttm_bo_device { * access via ttm_manager_type. */ struct ttm_mem_type_manager man_priv[TTM_NUM_MEM_TYPES]; - + struct ttm_mem_type_manager *man_drv[TTM_NUM_MEM_TYPES]; /* * Protected by internal locks. */ @@ -450,9 +450,18 @@ struct ttm_bo_device { static inline struct ttm_mem_type_manager *ttm_manager_type(struct ttm_bo_device *bdev, int mem_type) { + if (bdev->man_drv[mem_type]) + return bdev->man_drv[mem_type]; return &bdev->man_priv[mem_type]; } +static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev, + int type, + struct ttm_mem_type_manager *manager) +{ + bdev->man_drv[type] = manager; +} + /** * struct ttm_lru_bulk_move_pos * -- cgit v1.2.3 From 37205891d84f9269de61d6e85c24607209478a85 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:19 +1000 Subject: drm/ttm: make ttm_range_man_init/takedown take type + args MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes it easier to move these to a driver allocated system Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-47-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 15 ++++++--------- drivers/gpu/drm/drm_gem_vram_helper.c | 10 +++++----- drivers/gpu/drm/nouveau/nouveau_ttm.c | 22 +++++++++++++--------- drivers/gpu/drm/qxl/qxl_ttm.c | 12 ++++-------- drivers/gpu/drm/radeon/radeon_ttm.c | 31 +++++++++++++++---------------- drivers/gpu/drm/ttm/ttm_bo_manager.c | 19 ++++++++++++++----- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 13 ++++--------- include/drm/ttm/ttm_bo_driver.h | 12 +++++++++--- 8 files changed, 70 insertions(+), 64 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 207ba70a7a39..555695854076 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -66,12 +66,9 @@ static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, unsigned int type, uint64_t size) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, type); - - man->available_caching = TTM_PL_FLAG_UNCACHED; - man->default_caching = TTM_PL_FLAG_UNCACHED; - - return ttm_range_man_init(&adev->mman.bdev, man, size >> PAGE_SHIFT); + return ttm_range_man_init(&adev->mman.bdev, type, + TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED, + false, size >> PAGE_SHIFT); } /** @@ -1996,9 +1993,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) amdgpu_vram_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev); - ttm_range_man_fini(&adev->mman.bdev, ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_GDS)); - ttm_range_man_fini(&adev->mman.bdev, ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_GWS)); - ttm_range_man_fini(&adev->mman.bdev, ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_OA)); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); ttm_bo_device_release(&adev->mman.bdev); adev->mman.initialized = false; DRM_INFO("amdgpu: ttm finalized\n"); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index a01768adb96d..2187787f397e 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1103,7 +1103,6 @@ EXPORT_SYMBOL(drm_vram_mm_debugfs_init); static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, uint64_t vram_base, size_t vram_size) { - struct ttm_mem_type_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM); int ret; vmm->vram_base = vram_base; @@ -1116,9 +1115,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, if (ret) return ret; - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - ret = ttm_range_man_init(&vmm->bdev, man, vram_size >> PAGE_SHIFT); + ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM, + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, + TTM_PL_FLAG_WC, false, + vram_size >> PAGE_SHIFT); if (ret) return ret; @@ -1127,7 +1127,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) { - ttm_range_man_fini(&vmm->bdev, ttm_manager_type(&vmm->bdev, TTM_PL_VRAM)); + ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM); ttm_bo_device_release(&vmm->bdev); } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index cc6cf04553dd..1c636723823c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -156,16 +156,17 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind) static int nouveau_ttm_init_vram(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); struct nvif_mmu *mmu = &drm->client.mmu; - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); + /* Some BARs do not support being ioremapped WC */ const u8 type = mmu->type[drm->ttm.type_vram].type; + man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + if (type & NVIF_MEM_UNCACHED) { man->available_caching = TTM_PL_FLAG_UNCACHED; man->default_caching = TTM_PL_FLAG_UNCACHED; @@ -178,7 +179,9 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) ttm_mem_type_manager_set_used(man, true); return 0; } else { - return ttm_range_man_init(&drm->ttm.bdev, man, + return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, + TTM_PL_FLAG_WC, false, drm->gem.vram_available >> PAGE_SHIFT); } } @@ -193,7 +196,7 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); ttm_mem_type_manager_cleanup(man); } else - ttm_range_man_fini(&drm->ttm.bdev, man); + ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM); } static int @@ -216,9 +219,10 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) else if (!drm->agp.bridge) man->func = &nv04_gart_manager; else - return ttm_range_man_init(&drm->ttm.bdev, man, + return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, + TTM_PL_FLAG_WC, true, size_pages); - ttm_mem_type_manager_init(&drm->ttm.bdev, man, size_pages); ttm_mem_type_manager_set_used(man, true); @@ -232,7 +236,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm) if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA && drm->agp.bridge) - ttm_range_man_fini(&drm->ttm.bdev, man); + ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT); else { ttm_mem_type_manager_disable(man); ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index a0c91bce1d2e..7b9f7a94332a 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -220,12 +220,8 @@ static int qxl_ttm_init_mem_type(struct qxl_device *qdev, unsigned int type, uint64_t size) { - struct ttm_mem_type_manager *man = ttm_manager_type(&qdev->mman.bdev, type); - - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - - return ttm_range_man_init(&qdev->mman.bdev, man, size); + return ttm_range_man_init(&qdev->mman.bdev, type, TTM_PL_MASK_CACHING, + TTM_PL_FLAG_CACHED, false, size); } int qxl_ttm_init(struct qxl_device *qdev) @@ -267,8 +263,8 @@ int qxl_ttm_init(struct qxl_device *qdev) void qxl_ttm_fini(struct qxl_device *qdev) { - ttm_range_man_fini(&qdev->mman.bdev, ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM)); - ttm_range_man_fini(&qdev->mman.bdev, ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV)); + ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM); + ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV); ttm_bo_device_release(&qdev->mman.bdev); DRM_INFO("qxl: ttm finalized\n"); } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 671ca63d420c..5f536de3986d 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -68,35 +68,34 @@ struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) static int radeon_ttm_init_vram(struct radeon_device *rdev) { - struct ttm_mem_type_manager *man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); - - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - - return ttm_range_man_init(&rdev->mman.bdev, man, + return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM, + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, + TTM_PL_FLAG_WC, false, rdev->mc.real_vram_size >> PAGE_SHIFT); } static int radeon_ttm_init_gtt(struct radeon_device *rdev) { - struct ttm_mem_type_manager *man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT); + uint32_t available_caching, default_caching; + + available_caching = TTM_PL_MASK_CACHING; + default_caching = TTM_PL_FLAG_CACHED; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - man->use_tt = true; #if IS_ENABLED(CONFIG_AGP) if (rdev->flags & RADEON_IS_AGP) { if (!rdev->ddev->agp) { DRM_ERROR("AGP is not enabled\n"); return -EINVAL; } - man->available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; + available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + default_caching = TTM_PL_FLAG_WC; } #endif - return ttm_range_man_init(&rdev->mman.bdev, man, + return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT, + available_caching, + default_caching, true, rdev->mc.gtt_size >> PAGE_SHIFT); } @@ -827,8 +826,8 @@ void radeon_ttm_fini(struct radeon_device *rdev) } radeon_bo_unref(&rdev->stolen_vga_memory); } - ttm_range_man_fini(&rdev->mman.bdev, ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM)); - ttm_range_man_fini(&rdev->mman.bdev, ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT)); + ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM); + ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT); ttm_bo_device_release(&rdev->mman.bdev); radeon_gart_fini(rdev); rdev->mman.initialized = false; diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 86bf5e71e959..d83cb967a107 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -107,19 +107,27 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, static const struct ttm_mem_type_manager_func ttm_bo_manager_func; int ttm_range_man_init(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man, + unsigned type, + uint32_t available_caching, + uint32_t default_caching, + bool use_tt, unsigned long p_size) { + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, type); struct ttm_range_manager *rman; - man->func = &ttm_bo_manager_func; - - ttm_mem_type_manager_init(bdev, man, p_size); + man->available_caching = available_caching; + man->default_caching = default_caching; + man->use_tt = use_tt; rman = kzalloc(sizeof(*rman), GFP_KERNEL); if (!rman) return -ENOMEM; + man->func = &ttm_bo_manager_func; + + ttm_mem_type_manager_init(bdev, man, p_size); + drm_mm_init(&rman->mm, 0, p_size); spin_lock_init(&rman->lock); man->priv = rman; @@ -130,8 +138,9 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, EXPORT_SYMBOL(ttm_range_man_init); int ttm_range_man_fini(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man) + unsigned type) { + struct ttm_mem_type_manager *man = ttm_manager_type(bdev, type); struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; int ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c30021d0215c..f4b8de57a761 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -626,13 +626,9 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv) #ifdef CONFIG_TRANSPARENT_HUGEPAGE ret = vmw_thp_init(dev_priv); #else - struct ttm_mem_type_manager *man = &dev_priv->bdev.man[TTM_PL_VRAM]; - - man->available_caching = TTM_PL_FLAG_CACHED; - man->default_caching = TTM_PL_FLAG_CACHED; - - ret = ttm_range_man_init(&dev_priv->bdev, man, - dev_priv->vram_size >> PAGE_SHIFT); + ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, + TTM_PL_FLAG_CACHED, TTM_PL_FLAG_CACHED, + false, dev_priv->vram_size >> PAGE_SHIFT); #endif ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM)->use_type = false; return ret; @@ -643,8 +639,7 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv) #ifdef CONFIG_TRANSPARENT_HUGEPAGE vmw_thp_fini(dev_priv); #else - ttm_bo_man_fini(&dev_priv->bdev, - ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM)); + ttm_bo_man_fini(&dev_priv->bdev, TTM_PL_VRAM); #endif } diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index ce15eb075241..7490de8f53af 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -856,14 +856,20 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); * ttm_range_man_init * * @bdev: ttm device - * @man: the manager to initialise with the range manager. + * @type: memory manager type + * @available_caching: TTM_PL_FLAG_* for allowed caching modes + * @default_caching: default caching mode + * @use_tt: if the memory manager uses tt * @p_size: size of area to be managed in pages. * * Initialise a generic range manager for the selected memory type. * The range manager is installed for this device in the type slot. */ int ttm_range_man_init(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man, + unsigned type, + uint32_t available_caching, + uint32_t default_caching, + bool use_tt, unsigned long p_size); /** @@ -875,7 +881,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, * Remove the generic range manager from a slot and tear it down. */ int ttm_range_man_fini(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man); + unsigned type); /** * ttm_mem_type_manager_debug -- cgit v1.2.3 From 7ee6c95e05e9b06741d347107cff13559e9f81d9 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:24 +1000 Subject: drm/ttm: drop priv pointer in memory manager MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This isn't needed anymore by any drivers. Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-52-airlied@gmail.com --- include/drm/ttm/ttm_bo_driver.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 7490de8f53af..8c39901d8717 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -123,7 +123,6 @@ struct ttm_mem_type_manager_func { * @default_caching: The default caching policy used for a buffer object * placed in this memory type if the user doesn't provide one. * @func: structure pointer implementing the range manager. See above - * @priv: Driver private closure for @func. * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions * reserved by the TTM vm system. @@ -152,7 +151,6 @@ struct ttm_mem_type_manager { uint32_t available_caching; uint32_t default_caching; const struct ttm_mem_type_manager_func *func; - void *priv; struct mutex io_reserve_mutex; bool use_io_reserve_lru; spinlock_t move_lock; -- cgit v1.2.3 From 7541ce1a6f2be9ab056a5b5105e08aef8d3287b1 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:26 +1000 Subject: drm/ttm: drop man->bdev link. This link isn't needed anymore, drop it from the init interface. Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-54-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 2 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 6 ++---- drivers/gpu/drm/ttm/ttm_bo.c | 6 ++---- drivers/gpu/drm/ttm/ttm_bo_manager.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 2 +- include/drm/ttm/ttm_bo_api.h | 6 ++---- include/drm/ttm/ttm_bo_driver.h | 2 -- 9 files changed, 11 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index a74c6987ac14..a6a99e66b871 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -108,7 +108,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(&adev->mman.bdev, man, gtt_size >> PAGE_SHIFT); + ttm_mem_type_manager_init(man, gtt_size >> PAGE_SHIFT); start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 87c6a1e1aa82..785c073d71eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -190,7 +190,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - ttm_mem_type_manager_init(&adev->mman.bdev, man, adev->gmc.real_vram_size >> PAGE_SHIFT); + ttm_mem_type_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); man->func = &amdgpu_vram_mgr_func; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 2680bdc97c1c..a79691374f60 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -175,7 +175,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) man->func = &nouveau_vram_manager; man->use_io_reserve_lru = true; - ttm_mem_type_manager_init(&drm->ttm.bdev, man, + ttm_mem_type_manager_init(man, drm->gem.vram_available >> PAGE_SHIFT); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man); ttm_mem_type_manager_set_used(man, true); @@ -237,9 +237,7 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) man->available_caching = available_caching; man->default_caching = default_caching; man->use_tt = true; - ttm_mem_type_manager_init(&drm->ttm.bdev, man, - size_pages); - + ttm_mem_type_manager_init(man, size_pages); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man); ttm_mem_type_manager_set_used(man, true); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4d87ee98467f..aec7e40f9f54 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1469,8 +1469,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) } EXPORT_SYMBOL(ttm_bo_evict_mm); -void ttm_mem_type_manager_init(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man, +void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) { unsigned i; @@ -1480,7 +1479,6 @@ void ttm_mem_type_manager_init(struct ttm_bo_device *bdev, mutex_init(&man->io_reserve_mutex); spin_lock_init(&man->move_lock); INIT_LIST_HEAD(&man->io_reserve_lru); - man->bdev = bdev; man->size = p_size; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) @@ -1593,7 +1591,7 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(bdev, man, 0); + ttm_mem_type_manager_init(man, 0); ttm_mem_type_manager_set_used(man, true); } diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 01d41c6f2f7b..1b7245ce3356 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -133,7 +133,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, man->func = &ttm_bo_manager_func; - ttm_mem_type_manager_init(bdev, man, p_size); + ttm_mem_type_manager_init(man, p_size); drm_mm_init(&rman->mm, 0, p_size); spin_lock_init(&rman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index bc51b7773084..c3fa25161fd0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -116,7 +116,7 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) man->default_caching = TTM_PL_FLAG_CACHED; /* TODO: This is most likely not correct */ man->use_tt = true; - ttm_mem_type_manager_init(&dev_priv->bdev, man, 0); + ttm_mem_type_manager_init(man, 0); spin_lock_init(&gman->lock); gman->used_gmr_pages = 0; ida_init(&gman->gmr_ida); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 1cefd9c1e8ea..0b9c29249393 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -134,7 +134,7 @@ int vmw_thp_init(struct vmw_private *dev_priv) man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(&dev_priv->bdev, man, + ttm_mem_type_manager_init(man, dev_priv->vram_size >> PAGE_SHIFT); drm_mm_init(&rman->mm, 0, man->size); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 045f283d79e8..95d6c648d5c6 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -536,14 +536,12 @@ int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, /** * ttm_mem_type_manager_init * - * @bdev: Pointer to a ttm_bo_device struct. * @man: memory manager object to init * @p_size: size managed area in pages. * - * Initialise core parts of a a manager object. + * Initialise core parts of a manager object. */ -void ttm_mem_type_manager_init(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man, +void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size); /** diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 8c39901d8717..e17975466b7f 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -138,8 +138,6 @@ struct ttm_mem_type_manager_func { struct ttm_mem_type_manager { - struct ttm_bo_device *bdev; - /* * No protection. Constant from start. */ -- cgit v1.2.3 From a751612d4cb77779669da0a6d19fbc4f7e72ba6f Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:27 +1000 Subject: drm/ttm: drop list of memory managers from device. (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The driver now controls these, the core just controls the system memory one. v2: init sysman explicitly and assign it as a driver manager to simplify the lookup sequence. Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-55-airlied@gmail.com --- drivers/gpu/drm/ttm/ttm_bo.c | 6 +++--- include/drm/ttm/ttm_bo_driver.h | 6 ++---- 2 files changed, 5 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index aec7e40f9f54..6f02c7fa180a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1556,6 +1556,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) man = ttm_manager_type(bdev, TTM_PL_SYSTEM); ttm_mem_type_manager_disable(man); + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); mutex_lock(&ttm_global_mutex); list_del(&bdev->device_list); @@ -1581,7 +1582,7 @@ EXPORT_SYMBOL(ttm_bo_device_release); static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, TTM_PL_SYSTEM); + struct ttm_mem_type_manager *man = &bdev->sysman; /* * Initialize the system memory buffer type. @@ -1592,6 +1593,7 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) man->default_caching = TTM_PL_FLAG_CACHED; ttm_mem_type_manager_init(man, 0); + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); ttm_mem_type_manager_set_used(man, true); } @@ -1613,8 +1615,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->driver = driver; - memset(bdev->man_priv, 0, sizeof(bdev->man_priv)); - ttm_bo_init_sysman(bdev); bdev->vma_manager = vma_manager; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e17975466b7f..2cb8721398ee 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -414,7 +414,7 @@ struct ttm_bo_device { /* * access via ttm_manager_type. */ - struct ttm_mem_type_manager man_priv[TTM_NUM_MEM_TYPES]; + struct ttm_mem_type_manager sysman; struct ttm_mem_type_manager *man_drv[TTM_NUM_MEM_TYPES]; /* * Protected by internal locks. @@ -446,9 +446,7 @@ struct ttm_bo_device { static inline struct ttm_mem_type_manager *ttm_manager_type(struct ttm_bo_device *bdev, int mem_type) { - if (bdev->man_drv[mem_type]) - return bdev->man_drv[mem_type]; - return &bdev->man_priv[mem_type]; + return bdev->man_drv[mem_type]; } static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev, -- cgit v1.2.3 From 90a0489a718b87bc0674792f9eafac007e0ea3d6 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:28 +1000 Subject: drm/ttm: drop type manager has_type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit under driver control, this flag isn't needed anymore, remove the API that used to access it, and consoldiate with the used api. Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-56-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 2 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 4 ++-- drivers/gpu/drm/ttm/ttm_bo.c | 8 +++----- drivers/gpu/drm/ttm/ttm_bo_manager.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 2 +- include/drm/ttm/ttm_bo_driver.h | 17 ----------------- 8 files changed, 10 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index a6a99e66b871..e9de6f9538c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -146,7 +146,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); int ret; - ttm_mem_type_manager_disable(man); + ttm_mem_type_manager_set_used(man, false); ret = ttm_mem_type_manager_force_list_clean(&adev->mman.bdev, man); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 785c073d71eb..03a6248f0c4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -223,7 +223,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); int ret; - ttm_mem_type_manager_disable(man); + ttm_mem_type_manager_set_used(man, false); ret = ttm_mem_type_manager_force_list_clean(&adev->mman.bdev, man); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index a79691374f60..38d9ea73ac8b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -194,7 +194,7 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - ttm_mem_type_manager_disable(man); + ttm_mem_type_manager_set_used(man, false); ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); ttm_mem_type_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); @@ -253,7 +253,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm) drm->agp.bridge) ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT); else { - ttm_mem_type_manager_disable(man); + ttm_mem_type_manager_set_used(man, false); ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); ttm_mem_type_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6f02c7fa180a..c1644a0e0586 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -80,7 +80,6 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, struct drm_printer *p) { - drm_printf(p, " has_type: %d\n", man->has_type); drm_printf(p, " use_type: %d\n", man->use_type); drm_printf(p, " use_tt: %d\n", man->use_tt); drm_printf(p, " size: %llu\n", man->size); @@ -1001,7 +1000,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, return ret; man = ttm_manager_type(bdev, mem_type); - if (!man->has_type || !man->use_type) + if (!man || !man->use_type) return -EBUSY; if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) @@ -1460,7 +1459,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) return -EINVAL; } - if (!man->has_type) { + if (!man) { pr_err("Memory type %u has not been initialized\n", mem_type); return 0; } @@ -1474,7 +1473,6 @@ void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, { unsigned i; - BUG_ON(man->has_type); man->use_io_reserve_lru = false; mutex_init(&man->io_reserve_mutex); spin_lock_init(&man->move_lock); @@ -1555,7 +1553,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) struct ttm_mem_type_manager *man; man = ttm_manager_type(bdev, TTM_PL_SYSTEM); - ttm_mem_type_manager_disable(man); + ttm_mem_type_manager_set_used(man, false); ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); mutex_lock(&ttm_global_mutex); diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 1b7245ce3356..6679dc11934f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -152,7 +152,7 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev, struct drm_mm *mm = &rman->mm; int ret; - ttm_mem_type_manager_disable(man); + ttm_mem_type_manager_set_used(man, false); ret = ttm_mem_type_manager_force_list_clean(bdev, man); if (ret) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c3fa25161fd0..ca5037184814 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -143,7 +143,7 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, type); struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); - ttm_mem_type_manager_disable(man); + ttm_mem_type_manager_set_used(man, false); ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 0b9c29249393..4110e8309188 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -152,7 +152,7 @@ void vmw_thp_fini(struct vmw_private *dev_priv) struct drm_mm *mm = &rman->mm; int ret; - ttm_mem_type_manager_disable(man); + ttm_mem_type_manager_set_used(man, false); ret = ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man); if (ret) diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 2cb8721398ee..a6076ab89a51 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -111,7 +111,6 @@ struct ttm_mem_type_manager_func { /** * struct ttm_mem_type_manager * - * @has_type: The memory type has been initialized. * @use_type: The memory type is enabled. * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory * managed by this memory type. @@ -141,8 +140,6 @@ struct ttm_mem_type_manager { /* * No protection. Constant from start. */ - - bool has_type; bool use_type; bool use_tt; uint64_t size; @@ -689,23 +686,9 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) */ static inline void ttm_mem_type_manager_set_used(struct ttm_mem_type_manager *man, bool used) { - man->has_type = true; man->use_type = used; } -/** - * ttm_mem_type_manager_disable. - * - * @man: A memory manager object. - * - * Indicate the manager is not to be used and deregistered. (temporary during rework). - */ -static inline void ttm_mem_type_manager_disable(struct ttm_mem_type_manager *man) -{ - man->has_type = false; - man->use_type = false; -} - /** * ttm_mem_type_manager_cleanup * -- cgit v1.2.3 From 3f48f938ad21a1ab1cec5631af3e468baabe41c8 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:29 +1000 Subject: drm/ttm: add a wrapper for checking if manager is in use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This converts vmwgfx over to using an interface to set the in use and check the in use flag. Reviewed-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-57-airlied@gmail.com --- drivers/gpu/drm/nouveau/nouveau_ttm.c | 1 - drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 14 +++++++------- include/drm/ttm/ttm_bo_driver.h | 14 ++++++++++++++ 4 files changed, 22 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 38d9ea73ac8b..84387c810540 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -240,7 +240,6 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) ttm_mem_type_manager_init(man, size_pages); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man); ttm_mem_type_manager_set_used(man, true); - return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c1644a0e0586..e0188250b6ec 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1000,7 +1000,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, return ret; man = ttm_manager_type(bdev, mem_type); - if (!man || !man->use_type) + if (!man || !ttm_mem_type_manager_used(man)) return -EBUSY; if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index f4b8de57a761..a7b3c8ee7f21 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -630,7 +630,7 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv) TTM_PL_FLAG_CACHED, TTM_PL_FLAG_CACHED, false, dev_priv->vram_size >> PAGE_SHIFT); #endif - ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM)->use_type = false; + ttm_mem_type_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); return ret; } @@ -1192,9 +1192,9 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv) struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); spin_lock(&dev_priv->svga_lock); - if (!man->use_type) { + if (!ttm_mem_type_manager_used(man)) { vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); - man->use_type = true; + ttm_mem_type_manager_set_used(man, true); } spin_unlock(&dev_priv->svga_lock); } @@ -1223,8 +1223,8 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv) struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); spin_lock(&dev_priv->svga_lock); - if (man->use_type) { - man->use_type = false; + if (ttm_mem_type_manager_used(man)) { + ttm_mem_type_manager_set_used(man, false); vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_HIDE | SVGA_REG_ENABLE_ENABLE); @@ -1257,8 +1257,8 @@ void vmw_svga_disable(struct vmw_private *dev_priv) vmw_kms_lost_device(dev_priv->dev); ttm_write_lock(&dev_priv->reservation_sem, false); spin_lock(&dev_priv->svga_lock); - if (man->use_type) { - man->use_type = false; + if (ttm_mem_type_manager_used(man)) { + ttm_mem_type_manager_set_used(man, false); spin_unlock(&dev_priv->svga_lock); if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) DRM_ERROR("Failed evicting VRAM buffers.\n"); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index a6076ab89a51..31264a09ec63 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -689,6 +689,20 @@ static inline void ttm_mem_type_manager_set_used(struct ttm_mem_type_manager *ma man->use_type = used; } +/** + * ttm_mem_type_manager_used + * + * @man: Manager to get used state for + * + * Get the in use flag for a manager. + * Returns: + * true is used, false if not. + */ +static inline bool ttm_mem_type_manager_used(struct ttm_mem_type_manager *man) +{ + return man->use_type; +} + /** * ttm_mem_type_manager_cleanup * -- cgit v1.2.3 From 9de59bc201496f28bb8835c2bcbae3ddb186b548 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:31 +1000 Subject: drm/ttm: rename ttm_mem_type_manager -> ttm_resource_manager. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This name makes a lot more sense, since these are about managing driver resources rather than just memory ranges. Acked-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-59-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 36 +++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 8 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 36 +++++++-------- drivers/gpu/drm/drm_gem_vram_helper.c | 4 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 44 +++++++++--------- drivers/gpu/drm/nouveau/nouveau_ttm.h | 6 +-- drivers/gpu/drm/qxl/qxl_ttm.c | 4 +- drivers/gpu/drm/radeon/radeon_gem.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 4 +- drivers/gpu/drm/ttm/ttm_bo.c | 66 +++++++++++++-------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 26 +++++------ drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- drivers/gpu/drm/ttm/ttm_range_manager.c | 28 ++++++------ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 20 ++++---- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 26 +++++------ drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 26 +++++------ include/drm/ttm/ttm_bo_api.h | 6 +-- include/drm/ttm/ttm_bo_driver.h | 60 ++++++++++++------------ 23 files changed, 209 insertions(+), 209 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index ba4d11e8a960..e2b4d3fc601d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -517,7 +517,7 @@ out_put: uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - struct ttm_mem_type_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return amdgpu_vram_mgr_usage(vram_man); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5ef7b3b7c9af..65b67c82a4b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -299,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, { s64 time_us, increment_us; u64 free_vram, total_vram, used_vram; - struct ttm_mem_type_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); /* Allow a maximum of 200 accumulated ms. This is basically per-IB * throttling. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index e9de6f9538c0..b9050b7221d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -25,13 +25,13 @@ #include "amdgpu.h" struct amdgpu_gtt_mgr { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; atomic64_t available; }; -static inline struct amdgpu_gtt_mgr *to_gtt_mgr(struct ttm_mem_type_manager *man) +static inline struct amdgpu_gtt_mgr *to_gtt_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_gtt_mgr, manager); } @@ -54,7 +54,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); return snprintf(buf, PAGE_SIZE, "%llu\n", man->size * PAGE_SIZE); } @@ -72,7 +72,7 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_gtt_mgr_usage(man)); } @@ -82,7 +82,7 @@ static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO, amdgpu_mem_info_gtt_used_show, NULL); -static const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; +static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func; /** * amdgpu_gtt_mgr_init - init GTT manager and DRM MM * @@ -93,7 +93,7 @@ static const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; */ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct amdgpu_gtt_mgr *mgr; uint64_t start, size; int ret; @@ -108,7 +108,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(man, gtt_size >> PAGE_SHIFT); + ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT); start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; @@ -128,7 +128,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) } ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } @@ -142,13 +142,13 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) */ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); if (ret) return; @@ -159,7 +159,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total); device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL); kfree(mgr); } @@ -186,7 +186,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) * * Dummy, allocate the node but no space for it yet. */ -static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, +static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -251,7 +251,7 @@ err_out: * * Free the allocated GTT again. */ -static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, +static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); @@ -274,7 +274,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, * * Return how many bytes are used in the GTT domain */ -uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); s64 result = man->size - atomic64_read(&mgr->available); @@ -282,7 +282,7 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) return (result > 0 ? result : 0) * PAGE_SIZE; } -int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) +int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_node *node; @@ -309,7 +309,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) * * Dump the table content using printk. */ -static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, +static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); @@ -323,7 +323,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, amdgpu_gtt_mgr_usage(man) >> 20); } -static const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { +static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = { .get_node = amdgpu_gtt_mgr_new, .put_node = amdgpu_gtt_mgr_del, .debug = amdgpu_gtt_mgr_debug diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 134cca1af744..fff9c013f337 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -616,9 +616,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file } case AMDGPU_INFO_MEMORY: { struct drm_amdgpu_memory_info mem; - struct ttm_mem_type_manager *vram_man = + struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - struct ttm_mem_type_manager *gtt_man = + struct ttm_resource_manager *gtt_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); memset(&mem, 0, sizeof(mem)); mem.vram.total_heap_size = adev->gmc.real_vram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ced418cba2f7..ce98df5b0c21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -442,7 +442,7 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, unsigned long size, u32 domain) { - struct ttm_mem_type_manager *man = NULL; + struct ttm_resource_manager *man = NULL; /* * If GTT is part of requested domains the check must succeed to diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 555695854076..2fc0214d9a95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2012,7 +2012,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) */ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); uint64_t size; int r; @@ -2234,7 +2234,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) unsigned ttm_pl = (uintptr_t)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); struct drm_printer p = drm_seq_file_printer(m); man->func->debug(man, &p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index c01fdb3f0458..3db29ae1f802 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -73,8 +73,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); -uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); -int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); +uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); +int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, @@ -86,8 +86,8 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, struct device *dev, enum dma_data_direction dir, struct sg_table *sgt); -uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); +uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man); +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man); int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_late_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 03a6248f0c4e..6f888a63f22d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -29,7 +29,7 @@ #include "atom.h" struct amdgpu_vram_mgr { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; atomic64_t usage; @@ -37,7 +37,7 @@ struct amdgpu_vram_mgr { struct amdgpu_device *adev; }; -static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_mem_type_manager *man) +static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_vram_mgr, manager); } @@ -89,7 +89,7 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_vram_mgr_usage(man)); } @@ -107,7 +107,7 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); } @@ -165,7 +165,7 @@ static const struct attribute *amdgpu_vram_mgr_attributes[] = { NULL }; -static const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; +static const struct ttm_resource_manager_func amdgpu_vram_mgr_func; /** * amdgpu_vram_mgr_init - init VRAM manager and DRM MM @@ -177,7 +177,7 @@ static const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; */ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct amdgpu_vram_mgr *mgr; int ret; @@ -190,7 +190,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - ttm_mem_type_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); + ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); man->func = &amdgpu_vram_mgr_func; @@ -205,7 +205,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) DRM_ERROR("Failed to register sysfs\n"); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } @@ -219,13 +219,13 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) */ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) { - struct ttm_mem_type_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); if (ret) return; @@ -235,7 +235,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); kfree(mgr); } @@ -321,7 +321,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, * * Allocate VRAM for the given BO. */ -static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, +static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -441,7 +441,7 @@ error: * * Free the allocated VRAM again. */ -static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, +static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -575,7 +575,7 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, * * Returns how many bytes are used in this domain. */ -uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -589,7 +589,7 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) * * Returns how many bytes are used in the visible part of VRAM */ -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -604,7 +604,7 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) * * Dump the table content using printk. */ -static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, +static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -618,7 +618,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, amdgpu_vram_mgr_vis_usage(man) >> 20); } -static const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { +static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { .get_node = amdgpu_vram_mgr_new, .put_node = amdgpu_vram_mgr_del, .debug = amdgpu_vram_mgr_debug diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 2187787f397e..e3660d00987d 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1075,10 +1075,10 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_vram_mm *vmm = node->minor->dev->vram_mm; - struct ttm_mem_type_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM); struct drm_printer p = drm_seq_file_printer(m); - ttm_mem_type_manager_debug(man, &p); + ttm_resource_manager_debug(man, &p); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 84387c810540..78b5a87b9855 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -32,13 +32,13 @@ #include static void -nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg) +nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_mem_reg *reg) { nouveau_mem_del(reg); } static int -nouveau_vram_manager_new(struct ttm_mem_type_manager *man, +nouveau_vram_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *reg) @@ -63,13 +63,13 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, return 0; } -const struct ttm_mem_type_manager_func nouveau_vram_manager = { +const struct ttm_resource_manager_func nouveau_vram_manager = { .get_node = nouveau_vram_manager_new, .put_node = nouveau_manager_del, }; static int -nouveau_gart_manager_new(struct ttm_mem_type_manager *man, +nouveau_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *reg) @@ -86,13 +86,13 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, return 0; } -const struct ttm_mem_type_manager_func nouveau_gart_manager = { +const struct ttm_resource_manager_func nouveau_gart_manager = { .get_node = nouveau_gart_manager_new, .put_node = nouveau_manager_del, }; static int -nv04_gart_manager_new(struct ttm_mem_type_manager *man, +nv04_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *reg) @@ -118,7 +118,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, return 0; } -const struct ttm_mem_type_manager_func nv04_gart_manager = { +const struct ttm_resource_manager_func nv04_gart_manager = { .get_node = nv04_gart_manager_new, .put_node = nouveau_manager_del, }; @@ -160,7 +160,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { /* Some BARs do not support being ioremapped WC */ const u8 type = mmu->type[drm->ttm.type_vram].type; - struct ttm_mem_type_manager *man = kzalloc(sizeof(*man), GFP_KERNEL); + struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL); if (!man) return -ENOMEM; @@ -175,10 +175,10 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) man->func = &nouveau_vram_manager; man->use_io_reserve_lru = true; - ttm_mem_type_manager_init(man, + ttm_resource_manager_init(man, drm->gem.vram_available >> PAGE_SHIFT); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } else { return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, @@ -191,12 +191,12 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) static void nouveau_ttm_fini_vram(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - ttm_mem_type_manager_set_used(man, false); - ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_set_used(man, false); + ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); kfree(man); } else @@ -206,10 +206,10 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) static int nouveau_ttm_init_gtt(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT; unsigned available_caching, default_caching; - const struct ttm_mem_type_manager_func *func = NULL; + const struct ttm_resource_manager_func *func = NULL; if (drm->agp.bridge) { available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; @@ -237,24 +237,24 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) man->available_caching = available_caching; man->default_caching = default_caching; man->use_tt = true; - ttm_mem_type_manager_init(man, size_pages); + ttm_resource_manager_init(man, size_pages); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } static void nouveau_ttm_fini_gtt(struct nouveau_drm *drm) { - struct ttm_mem_type_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT); + struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT); if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA && drm->agp.bridge) ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT); else { - ttm_mem_type_manager_set_used(man, false); - ttm_mem_type_manager_force_list_clean(&drm->ttm.bdev, man); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_set_used(man, false); + ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL); kfree(man); } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h index 085280754b3e..eaf25461cd91 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.h +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h @@ -8,9 +8,9 @@ nouveau_bdev(struct ttm_bo_device *bd) return container_of(bd, struct nouveau_drm, ttm.bdev); } -extern const struct ttm_mem_type_manager_func nouveau_vram_manager; -extern const struct ttm_mem_type_manager_func nouveau_gart_manager; -extern const struct ttm_mem_type_manager_func nv04_gart_manager; +extern const struct ttm_resource_manager_func nouveau_vram_manager; +extern const struct ttm_resource_manager_func nouveau_gart_manager; +extern const struct ttm_resource_manager_func nv04_gart_manager; struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, u32 page_flags); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 7b9f7a94332a..727049046014 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -275,10 +275,10 @@ void qxl_ttm_fini(struct qxl_device *qdev) static int qxl_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; - struct ttm_mem_type_manager *man = (struct ttm_mem_type_manager *)node->info_ent->data; + struct ttm_resource_manager *man = (struct ttm_resource_manager *)node->info_ent->data; struct drm_printer p = drm_seq_file_printer(m); - ttm_mem_type_manager_debug(man, &p); + ttm_resource_manager_debug(man, &p); return 0; } #endif diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3ec028dba739..7f5dfe04789e 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -224,7 +224,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 5f536de3986d..21a01737b1be 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -838,7 +838,7 @@ void radeon_ttm_fini(struct radeon_device *rdev) * isn't running */ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!rdev->mman.initialized) return; @@ -897,7 +897,7 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) unsigned ttm_pl = *(int*)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - struct ttm_mem_type_manager *man = ttm_manager_type(&rdev->mman.bdev, ttm_pl); + struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev, ttm_pl); struct drm_printer p = drm_seq_file_printer(m); man->func->debug(man, &p); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e0188250b6ec..ff68f25ddbd4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -77,7 +77,7 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, return 0; } -void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, +void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p) { drm_printf(p, " use_type: %d\n", man->use_type); @@ -88,14 +88,14 @@ void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, if (man->func && man->func->debug) (*man->func->debug)(man, p); } -EXPORT_SYMBOL(ttm_mem_type_manager_debug); +EXPORT_SYMBOL(ttm_resource_manager_debug); static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct drm_printer p = drm_debug_printer(TTM_PFX); int i, ret, mem_type; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, @@ -108,7 +108,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, drm_printf(&p, " placement[%d]=0x%08X (%d)\n", i, placement->placement[i].flags, mem_type); man = ttm_manager_type(bo->bdev, mem_type); - ttm_mem_type_manager_debug(man, &p); + ttm_resource_manager_debug(man, &p); } } @@ -148,7 +148,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!list_empty(&bo->lru)) return; @@ -223,7 +223,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -238,7 +238,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; if (!pos->first) continue; @@ -272,8 +272,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); - struct ttm_mem_type_manager *new_man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; ret = ttm_mem_io_lock(old_man, true); @@ -551,7 +551,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); size_t acc_size = bo->acc_size; int ret; @@ -768,7 +768,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, } static int ttm_mem_evict_first(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man, + struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_operation_ctx *ctx, struct ww_acquire_ctx *ticket) @@ -843,7 +843,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); mem->mm_node = NULL; if (!man->func || !man->func->get_node) @@ -854,7 +854,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); if (!man->func || !man->func->put_node) return; @@ -869,7 +869,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put); * Add the last move fence to the BO and reserve a new shared slot. */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, - struct ttm_mem_type_manager *man, + struct ttm_resource_manager *man, struct ttm_mem_reg *mem, bool no_wait_gpu) { @@ -909,7 +909,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); struct ww_acquire_ctx *ticket; int ret; @@ -929,7 +929,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } -static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, +static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, uint32_t cur_placement, uint32_t proposed_placement) { @@ -954,7 +954,7 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, return result; } -static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, +static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man, uint32_t mem_type, const struct ttm_place *place, uint32_t *masked_placement) @@ -991,7 +991,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, { struct ttm_bo_device *bdev = bo->bdev; uint32_t mem_type = TTM_PL_SYSTEM; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; uint32_t cur_flags = 0; int ret; @@ -1000,7 +1000,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, return ret; man = ttm_manager_type(bdev, mem_type); - if (!man || !ttm_mem_type_manager_used(man)) + if (!man || !ttm_resource_manager_used(man)) return -EBUSY; if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) @@ -1047,7 +1047,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, for (i = 0; i < placement->num_placement; ++i) { const struct ttm_place *place = &placement->placement[i]; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; ret = ttm_bo_mem_placement(bo, place, mem, ctx); if (ret == -EBUSY) @@ -1404,8 +1404,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_create); -int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man) +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man) { struct ttm_operation_ctx ctx = { .interruptible = false, @@ -1447,12 +1447,12 @@ int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, return 0; } -EXPORT_SYMBOL(ttm_mem_type_manager_force_list_clean); +EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { pr_err("Illegal memory manager memory type %u\n", mem_type); @@ -1464,11 +1464,11 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) return 0; } - return ttm_mem_type_manager_force_list_clean(bdev, man); + return ttm_resource_manager_force_list_clean(bdev, man); } EXPORT_SYMBOL(ttm_bo_evict_mm); -void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, +void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size) { unsigned i; @@ -1483,7 +1483,7 @@ void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, INIT_LIST_HEAD(&man->lru[i]); man->move = NULL; } -EXPORT_SYMBOL(ttm_mem_type_manager_init); +EXPORT_SYMBOL(ttm_resource_manager_init); static void ttm_bo_global_kobj_release(struct kobject *kobj) { @@ -1550,10 +1550,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) struct ttm_bo_global *glob = &ttm_bo_glob; int ret = 0; unsigned i; - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; man = ttm_manager_type(bdev, TTM_PL_SYSTEM); - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); mutex_lock(&ttm_global_mutex); @@ -1580,7 +1580,7 @@ EXPORT_SYMBOL(ttm_bo_device_release); static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) { - struct ttm_mem_type_manager *man = &bdev->sysman; + struct ttm_resource_manager *man = &bdev->sysman; /* * Initialize the system memory buffer type. @@ -1590,9 +1590,9 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(man, 0); + ttm_resource_manager_init(man, 0); ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); } int ttm_bo_device_init(struct ttm_bo_device *bdev, @@ -1643,7 +1643,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); ttm_mem_io_lock(man, false); ttm_bo_unmap_virtual_locked(bo); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 879c8ded0cd8..8ef0de8e36c5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -91,7 +91,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) +int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible) { if (likely(!man->use_io_reserve_lru)) return 0; @@ -103,7 +103,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) return 0; } -void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) +void ttm_mem_io_unlock(struct ttm_resource_manager *man) { if (likely(!man->use_io_reserve_lru)) return; @@ -111,7 +111,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) mutex_unlock(&man->io_reserve_mutex); } -static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) +static int ttm_mem_io_evict(struct ttm_resource_manager *man) { struct ttm_buffer_object *bo; @@ -129,7 +129,7 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; if (mem->bus.io_reserved_count++) @@ -162,7 +162,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) { - struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); struct ttm_mem_reg *mem = &bo->mem; int ret; @@ -195,7 +195,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void **virtual) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; void *addr; @@ -230,7 +230,7 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void *virtual) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; man = ttm_manager_type(bdev, mem->mem_type); @@ -303,7 +303,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg old_copy = *old_mem; @@ -570,7 +570,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_type_manager *man = + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); unsigned long offset, size; int ret; @@ -600,7 +600,7 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { struct ttm_buffer_object *bo = map->bo; - struct ttm_mem_type_manager *man = + struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); if (!map->virtual) @@ -634,7 +634,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_mem_reg *old_mem = &bo->mem; int ret; struct ttm_buffer_object *ghost_obj; @@ -697,8 +697,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_type_manager *from = ttm_manager_type(bdev, old_mem->mem_type); - struct ttm_mem_type_manager *to = ttm_manager_type(bdev, new_mem->mem_type); + struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type); + struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); int ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index db4e21d11967..ba2e8bd198ad 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -281,7 +281,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgoff_t i; vm_fault_t ret = VM_FAULT_NOPAGE; unsigned long address = vmf->address; - struct ttm_mem_type_manager *man = + struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); /* diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 7fddc74b3827..df62177cd913 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -44,17 +44,17 @@ */ struct ttm_range_manager { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; }; -static inline struct ttm_range_manager *to_range_manager(struct ttm_mem_type_manager *man) +static inline struct ttm_range_manager *to_range_manager(struct ttm_resource_manager *man) { return container_of(man, struct ttm_range_manager, manager); } -static int ttm_range_man_get_node(struct ttm_mem_type_manager *man, +static int ttm_range_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -95,7 +95,7 @@ static int ttm_range_man_get_node(struct ttm_mem_type_manager *man, return ret; } -static void ttm_range_man_put_node(struct ttm_mem_type_manager *man, +static void ttm_range_man_put_node(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = to_range_manager(man); @@ -110,7 +110,7 @@ static void ttm_range_man_put_node(struct ttm_mem_type_manager *man, } } -static const struct ttm_mem_type_manager_func ttm_range_manager_func; +static const struct ttm_resource_manager_func ttm_range_manager_func; int ttm_range_man_init(struct ttm_bo_device *bdev, unsigned type, @@ -119,7 +119,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, bool use_tt, unsigned long p_size) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct ttm_range_manager *rman; rman = kzalloc(sizeof(*rman), GFP_KERNEL); @@ -133,13 +133,13 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, man->func = &ttm_range_manager_func; - ttm_mem_type_manager_init(man, p_size); + ttm_resource_manager_init(man, p_size); drm_mm_init(&rman->mm, 0, p_size); spin_lock_init(&rman->lock); ttm_set_driver_manager(bdev, type, &rman->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } EXPORT_SYMBOL(ttm_range_man_init); @@ -147,14 +147,14 @@ EXPORT_SYMBOL(ttm_range_man_init); int ttm_range_man_fini(struct ttm_bo_device *bdev, unsigned type) { - struct ttm_mem_type_manager *man = ttm_manager_type(bdev, type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, type); struct ttm_range_manager *rman = to_range_manager(man); struct drm_mm *mm = &rman->mm; int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(bdev, man); + ret = ttm_resource_manager_force_list_clean(bdev, man); if (ret) return ret; @@ -163,14 +163,14 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev, drm_mm_takedown(mm); spin_unlock(&rman->lock); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(bdev, type, NULL); kfree(rman); return 0; } EXPORT_SYMBOL(ttm_range_man_fini); -static void ttm_range_man_debug(struct ttm_mem_type_manager *man, +static void ttm_range_man_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct ttm_range_manager *rman = to_range_manager(man); @@ -180,7 +180,7 @@ static void ttm_range_man_debug(struct ttm_mem_type_manager *man, spin_unlock(&rman->lock); } -static const struct ttm_mem_type_manager_func ttm_range_manager_func = { +static const struct ttm_resource_manager_func ttm_range_manager_func = { .get_node = ttm_range_man_get_node, .put_node = ttm_range_man_put_node, .debug = ttm_range_man_debug diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a7b3c8ee7f21..a68ae0204bf5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -630,7 +630,7 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv) TTM_PL_FLAG_CACHED, TTM_PL_FLAG_CACHED, false, dev_priv->vram_size >> PAGE_SHIFT); #endif - ttm_mem_type_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); + ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); return ret; } @@ -1189,12 +1189,12 @@ static void vmw_master_drop(struct drm_device *dev, */ static void __vmw_svga_enable(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); spin_lock(&dev_priv->svga_lock); - if (!ttm_mem_type_manager_used(man)) { + if (!ttm_resource_manager_used(man)) { vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); } spin_unlock(&dev_priv->svga_lock); } @@ -1220,11 +1220,11 @@ void vmw_svga_enable(struct vmw_private *dev_priv) */ static void __vmw_svga_disable(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); spin_lock(&dev_priv->svga_lock); - if (ttm_mem_type_manager_used(man)) { - ttm_mem_type_manager_set_used(man, false); + if (ttm_resource_manager_used(man)) { + ttm_resource_manager_set_used(man, false); vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_HIDE | SVGA_REG_ENABLE_ENABLE); @@ -1241,7 +1241,7 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv) */ void vmw_svga_disable(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); /* * Disabling SVGA will turn off device modesetting capabilities, so * notify KMS about that so that it doesn't cache atomic state that @@ -1257,8 +1257,8 @@ void vmw_svga_disable(struct vmw_private *dev_priv) vmw_kms_lost_device(dev_priv->dev); ttm_write_lock(&dev_priv->reservation_sem, false); spin_lock(&dev_priv->svga_lock); - if (ttm_mem_type_manager_used(man)) { - ttm_mem_type_manager_set_used(man, false); + if (ttm_resource_manager_used(man)) { + ttm_resource_manager_set_used(man, false); spin_unlock(&dev_priv->svga_lock); if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) DRM_ERROR("Failed evicting VRAM buffers.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index ca5037184814..c8fe6e9cf092 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -37,7 +37,7 @@ #include struct vmwgfx_gmrid_man { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; spinlock_t lock; struct ida gmr_ida; uint32_t max_gmr_ids; @@ -45,12 +45,12 @@ struct vmwgfx_gmrid_man { uint32_t used_gmr_pages; }; -static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_mem_type_manager *man) +static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man) { return container_of(man, struct vmwgfx_gmrid_man, manager); } -static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, +static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -84,7 +84,7 @@ nospace: return -ENOSPC; } -static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, +static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); @@ -98,11 +98,11 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, } } -static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; +static const struct ttm_resource_manager_func vmw_gmrid_manager_func; int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct vmwgfx_gmrid_man *gman = kzalloc(sizeof(*gman), GFP_KERNEL); @@ -116,7 +116,7 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) man->default_caching = TTM_PL_FLAG_CACHED; /* TODO: This is most likely not correct */ man->use_tt = true; - ttm_mem_type_manager_init(man, 0); + ttm_resource_manager_init(man, 0); spin_lock_init(&gman->lock); gman->used_gmr_pages = 0; ida_init(&gman->gmr_ida); @@ -134,20 +134,20 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) BUG(); } ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, type); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type); struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man); + ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&dev_priv->bdev, type, NULL); ida_destroy(&gman->gmr_ida); @@ -155,7 +155,7 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) } -static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { +static const struct ttm_resource_manager_func vmw_gmrid_manager_func = { .get_node = vmw_gmrid_man_get_node, .put_node = vmw_gmrid_man_put_node, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 4110e8309188..6cac7b091205 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -16,12 +16,12 @@ * @lock: Manager lock. */ struct vmw_thp_manager { - struct ttm_mem_type_manager manager; + struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; }; -static struct vmw_thp_manager *to_thp_manager(struct ttm_mem_type_manager *man) +static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man) { return container_of(man, struct vmw_thp_manager, manager); } @@ -44,7 +44,7 @@ static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, return -ENOSPC; } -static int vmw_thp_get_node(struct ttm_mem_type_manager *man, +static int vmw_thp_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) @@ -106,7 +106,7 @@ found_unlock: -static void vmw_thp_put_node(struct ttm_mem_type_manager *man, +static void vmw_thp_put_node(struct ttm_resource_manager *man, struct ttm_mem_reg *mem) { struct vmw_thp_manager *rman = to_thp_manager(man); @@ -123,7 +123,7 @@ static void vmw_thp_put_node(struct ttm_mem_type_manager *man, int vmw_thp_init(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man; + struct ttm_resource_manager *man; struct vmw_thp_manager *rman; rman = kzalloc(sizeof(*rman), GFP_KERNEL); @@ -134,39 +134,39 @@ int vmw_thp_init(struct vmw_private *dev_priv) man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; - ttm_mem_type_manager_init(man, + ttm_resource_manager_init(man, dev_priv->vram_size >> PAGE_SHIFT); drm_mm_init(&rman->mm, 0, man->size); spin_lock_init(&rman->lock); ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager); - ttm_mem_type_manager_set_used(man, true); + ttm_resource_manager_set_used(man, true); return 0; } void vmw_thp_fini(struct vmw_private *dev_priv) { - struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); struct vmw_thp_manager *rman = to_thp_manager(man); struct drm_mm *mm = &rman->mm; int ret; - ttm_mem_type_manager_set_used(man, false); + ttm_resource_manager_set_used(man, false); - ret = ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man); + ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); if (ret) return; spin_lock(&rman->lock); drm_mm_clean(mm); drm_mm_takedown(mm); spin_unlock(&rman->lock); - ttm_mem_type_manager_cleanup(man); + ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL); kfree(rman); } -static void vmw_thp_debug(struct ttm_mem_type_manager *man, +static void vmw_thp_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct vmw_thp_manager *rman = to_thp_manager(man); @@ -176,7 +176,7 @@ static void vmw_thp_debug(struct ttm_mem_type_manager *man, spin_unlock(&rman->lock); } -const struct ttm_mem_type_manager_func vmw_thp_func = { +const struct ttm_resource_manager_func vmw_thp_func = { .get_node = vmw_thp_get_node, .put_node = vmw_thp_put_node, .debug = vmw_thp_debug diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 95d6c648d5c6..7b0655bc13da 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -54,7 +54,7 @@ struct ttm_place; struct ttm_lru_bulk_move; -struct ttm_mem_type_manager; +struct ttm_resource_manager; /** * struct ttm_bus_placement @@ -534,14 +534,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, struct ttm_buffer_object **p_bo); /** - * ttm_mem_type_manager_init + * ttm_resource_manager_init * * @man: memory manager object to init * @p_size: size managed area in pages. * * Initialise core parts of a manager object. */ -void ttm_mem_type_manager_init(struct ttm_mem_type_manager *man, +void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size); /** diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 31264a09ec63..d17e25ba80d4 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -45,11 +45,11 @@ #define TTM_MAX_BO_PRIORITY 4U -struct ttm_mem_type_manager; +struct ttm_resource_manager; -struct ttm_mem_type_manager_func { +struct ttm_resource_manager_func { /** - * struct ttm_mem_type_manager member get_node + * struct ttm_resource_manager member get_node * * @man: Pointer to a memory type manager. * @bo: Pointer to the buffer object we're allocating space for. @@ -69,20 +69,20 @@ struct ttm_mem_type_manager_func { * the function should return a negative error code. * * Note that @mem::mm_node will only be dereferenced by - * struct ttm_mem_type_manager functions and optionally by the driver, + * struct ttm_resource_manager functions and optionally by the driver, * which has knowledge of the underlying type. * * This function may not be called from within atomic context, so * an implementation can and must use either a mutex or a spinlock to * protect any data structures managing the space. */ - int (*get_node)(struct ttm_mem_type_manager *man, + int (*get_node)(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem); /** - * struct ttm_mem_type_manager member put_node + * struct ttm_resource_manager member put_node * * @man: Pointer to a memory type manager. * @mem: Pointer to a struct ttm_mem_reg to be filled in. @@ -91,11 +91,11 @@ struct ttm_mem_type_manager_func { * and that are identified by @mem::mm_node and @mem::start. May not * be called from within atomic context. */ - void (*put_node)(struct ttm_mem_type_manager *man, + void (*put_node)(struct ttm_resource_manager *man, struct ttm_mem_reg *mem); /** - * struct ttm_mem_type_manager member debug + * struct ttm_resource_manager member debug * * @man: Pointer to a memory type manager. * @printer: Prefix to be used in printout to identify the caller. @@ -104,12 +104,12 @@ struct ttm_mem_type_manager_func { * type manager to aid debugging of out-of-memory conditions. * It may not be called from within atomic context. */ - void (*debug)(struct ttm_mem_type_manager *man, + void (*debug)(struct ttm_resource_manager *man, struct drm_printer *printer); }; /** - * struct ttm_mem_type_manager + * struct ttm_resource_manager * * @use_type: The memory type is enabled. * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory @@ -136,7 +136,7 @@ struct ttm_mem_type_manager_func { -struct ttm_mem_type_manager { +struct ttm_resource_manager { /* * No protection. Constant from start. */ @@ -145,7 +145,7 @@ struct ttm_mem_type_manager { uint64_t size; uint32_t available_caching; uint32_t default_caching; - const struct ttm_mem_type_manager_func *func; + const struct ttm_resource_manager_func *func; struct mutex io_reserve_mutex; bool use_io_reserve_lru; spinlock_t move_lock; @@ -390,7 +390,7 @@ extern struct ttm_bo_global { * struct ttm_bo_device - Buffer object driver device-specific data. * * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. - * @man: An array of mem_type_managers. + * @man: An array of resource_managers. * @vma_manager: Address space manager (pointer) * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. @@ -411,8 +411,8 @@ struct ttm_bo_device { /* * access via ttm_manager_type. */ - struct ttm_mem_type_manager sysman; - struct ttm_mem_type_manager *man_drv[TTM_NUM_MEM_TYPES]; + struct ttm_resource_manager sysman; + struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; /* * Protected by internal locks. */ @@ -440,7 +440,7 @@ struct ttm_bo_device { bool no_retry; }; -static inline struct ttm_mem_type_manager *ttm_manager_type(struct ttm_bo_device *bdev, +static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev, int mem_type) { return bdev->man_drv[mem_type]; @@ -448,7 +448,7 @@ static inline struct ttm_mem_type_manager *ttm_manager_type(struct ttm_bo_device static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev, int type, - struct ttm_mem_type_manager *manager) + struct ttm_resource_manager *manager) { bdev->man_drv[type] = manager; } @@ -581,8 +581,8 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); -int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); -void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); +int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible); +void ttm_mem_io_unlock(struct ttm_resource_manager *man); /** * ttm_bo_reserve: @@ -676,7 +676,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) } /** - * ttm_mem_type_manager_set_used + * ttm_resource_manager_set_used * * @man: A memory manager object. * @used: usage state to set. @@ -684,13 +684,13 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) * Set the manager in use flag. If disabled the manager is no longer * used for object placement. */ -static inline void ttm_mem_type_manager_set_used(struct ttm_mem_type_manager *man, bool used) +static inline void ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used) { man->use_type = used; } /** - * ttm_mem_type_manager_used + * ttm_resource_manager_used * * @man: Manager to get used state for * @@ -698,26 +698,26 @@ static inline void ttm_mem_type_manager_set_used(struct ttm_mem_type_manager *ma * Returns: * true is used, false if not. */ -static inline bool ttm_mem_type_manager_used(struct ttm_mem_type_manager *man) +static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man) { return man->use_type; } /** - * ttm_mem_type_manager_cleanup + * ttm_resource_manager_cleanup * * @man: A memory manager object. * * Cleanup the move fences from the memory manager object. */ -static inline void ttm_mem_type_manager_cleanup(struct ttm_mem_type_manager *man) +static inline void ttm_resource_manager_cleanup(struct ttm_resource_manager *man) { dma_fence_put(man->move); man->move = NULL; } /* - * ttm_mem_type_manager_force_list_clean + * ttm_resource_manager_force_list_clean * * @bdev - device to use * @man - manager to use @@ -725,8 +725,8 @@ static inline void ttm_mem_type_manager_cleanup(struct ttm_mem_type_manager *man * Force all the objects out of a memory manager until clean. * Part of memory manager cleanup sequence. */ -int ttm_mem_type_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_mem_type_manager *man); +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man); /* * ttm_bo_util.c @@ -875,12 +875,12 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev, unsigned type); /** - * ttm_mem_type_manager_debug + * ttm_resource_manager_debug * * @man: manager type to dump. * @p: printer to use for debug. */ -void ttm_mem_type_manager_debug(struct ttm_mem_type_manager *man, +void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p); #endif -- cgit v1.2.3 From 2966141ad2dda23d1b37997de6a4389b7864c169 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 4 Aug 2020 12:56:32 +1000 Subject: drm/ttm: rename ttm_mem_reg to ttm_resource. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This name better reflects what the object does. I didn't rename all the pointers it seemed too messy. Signed-off-by: Dave Airlie Acked-by: Christian König Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-60-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 46 ++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 10 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 12 +++---- drivers/gpu/drm/drm_gem_vram_helper.c | 6 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 46 ++++++++++++------------- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 8 ++--- drivers/gpu/drm/nouveau/nouveau_mem.h | 10 +++--- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 4 +-- drivers/gpu/drm/nouveau/nouveau_ttm.c | 8 ++--- drivers/gpu/drm/nouveau/nv17_fence.c | 2 +- drivers/gpu/drm/nouveau/nv50_fence.c | 2 +- drivers/gpu/drm/qxl/qxl_drv.h | 2 +- drivers/gpu/drm/qxl/qxl_ttm.c | 14 ++++---- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 2 +- drivers/gpu/drm/radeon/radeon_object.h | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 28 ++++++++-------- drivers/gpu/drm/radeon/radeon_vm.c | 2 +- drivers/gpu/drm/ttm/ttm_agp_backend.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 26 +++++++-------- drivers/gpu/drm/ttm/ttm_bo_util.c | 46 ++++++++++++------------- drivers/gpu/drm/ttm/ttm_range_manager.c | 4 +-- drivers/gpu/drm/ttm/ttm_tt.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 4 +-- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 +-- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 4 +-- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 6 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 8 ++--- include/drm/ttm/ttm_bo_api.h | 10 +++--- include/drm/ttm/ttm_bo_driver.h | 48 +++++++++++++-------------- include/drm/ttm/ttm_tt.h | 10 +++--- 37 files changed, 199 insertions(+), 199 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index b9050b7221d5..c847a5fe94c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -171,7 +171,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) * * Check if a mem object has already address space allocated. */ -bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) { return mem->mm_node != NULL; } @@ -189,7 +189,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_node *node; @@ -252,7 +252,7 @@ err_out: * Free the allocated GTT again. */ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_node *node = mem->mm_node; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ce98df5b0c21..43f4966331dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1268,11 +1268,11 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, */ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; if (!amdgpu_bo_is_amdgpu_bo(bo)) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index e01e8903741e..5ddb6cf96030 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -283,7 +283,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, uint64_t *flags); void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2fc0214d9a95..28557839f132 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -182,9 +182,9 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) * Assign the memory from new_mem to the memory of the buffer object bo. */ static void amdgpu_move_null(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; @@ -201,7 +201,7 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, */ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, struct drm_mm_node *mm_node, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { uint64_t addr = 0; @@ -221,7 +221,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, * @offset: The offset that drm_mm_node is used for finding. * */ -static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, +static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem, uint64_t *offset) { struct drm_mm_node *mm_node = mem->mm_node; @@ -249,7 +249,7 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, * the physical address for local memory. */ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct drm_mm_node *mm_node, unsigned num_pages, uint64_t offset, unsigned window, struct amdgpu_ring *ring, @@ -473,8 +473,8 @@ error: */ static int amdgpu_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu, - struct ttm_mem_reg *new_mem, - struct ttm_mem_reg *old_mem) + struct ttm_resource *new_mem, + struct ttm_resource *old_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); @@ -533,10 +533,10 @@ error: */ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource tmp_mem; struct ttm_place placements; struct ttm_placement placement; int r; @@ -589,10 +589,10 @@ out_cleanup: */ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource tmp_mem; struct ttm_placement placement; struct ttm_place placements; int r; @@ -635,7 +635,7 @@ out_cleanup: * Called by amdgpu_bo_move() */ static bool amdgpu_mem_visible(struct amdgpu_device *adev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct drm_mm_node *nodes = mem->mm_node; @@ -645,7 +645,7 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, if (mem->mem_type != TTM_PL_VRAM) return false; - /* ttm_mem_reg_ioremap only supports contiguous memory */ + /* ttm_resource_ioremap only supports contiguous memory */ if (nodes->size != mem->num_pages) return false; @@ -660,11 +660,11 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, */ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct amdgpu_device *adev; struct amdgpu_bo *abo; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int r; /* Can't move a pinned BO */ @@ -746,7 +746,7 @@ memcpy: * * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() */ -static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct drm_mm_node *mm_node = mem->mm_node; @@ -770,7 +770,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ return -EINVAL; /* Only physically contiguous buffers apply. In a contiguous * buffer, size of the first mm_node would match the number of - * pages in ttm_mem_reg. + * pages in ttm_resource. */ if (adev->mman.aper_base_kaddr && (mm_node->size == mem->num_pages)) @@ -1115,7 +1115,7 @@ gart_bind_fail: * This handles binding GTT memory to the device address space. */ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) + struct ttm_resource *bo_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void*)ttm; @@ -1166,7 +1166,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; - struct ttm_mem_reg tmp; + struct ttm_resource tmp; struct ttm_placement placement; struct ttm_place placements; uint64_t addr, flags; @@ -1507,7 +1507,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) * * Figure out the flags to use for a VM PDE (Page Directory Entry). */ -uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem) +uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) { uint64_t flags = 0; @@ -1533,7 +1533,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem) * Figure out the flags to use for a VM PTE (Page Table Entry). */ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 3db29ae1f802..36b024fd077e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -63,7 +63,7 @@ struct amdgpu_mman { struct amdgpu_copy_mem { struct ttm_buffer_object *bo; - struct ttm_mem_reg *mem; + struct ttm_resource *mem; unsigned long offset; }; @@ -72,13 +72,13 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev); int amdgpu_vram_mgr_init(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); -bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct device *dev, enum dma_data_direction dir, struct sg_table **sgt); @@ -142,9 +142,9 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated); bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); -uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem); +uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7417754e9141..920a0553e172 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1765,7 +1765,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; - struct ttm_mem_reg *mem; + struct ttm_resource *mem; struct drm_mm_node *nodes; struct dma_fence **last_update; struct dma_resv *resv; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 6f888a63f22d..895634cbf999 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -272,7 +272,7 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_mem_reg *mem = &bo->tbo.mem; + struct ttm_resource *mem = &bo->tbo.mem; struct drm_mm_node *nodes = mem->mm_node; unsigned pages = mem->num_pages; u64 usage; @@ -292,13 +292,13 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) /** * amdgpu_vram_mgr_virt_start - update virtual start address * - * @mem: ttm_mem_reg to update + * @mem: ttm_resource to update * @node: just allocated node * * Calculate a virtual BO start address to easily check if everything is CPU * accessible. */ -static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, +static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, struct drm_mm_node *node) { unsigned long start; @@ -324,7 +324,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = mgr->adev; @@ -442,7 +442,7 @@ error: * Free the allocated VRAM again. */ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = mgr->adev; @@ -482,7 +482,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, * Allocate and fill a sg table from a VRAM allocation. */ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct device *dev, enum dma_data_direction dir, struct sg_table **sgt) diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index e3660d00987d..b410930d94a0 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -653,7 +653,7 @@ static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_bo_kmap_obj *kmap = &gbo->kmap; @@ -1020,7 +1020,7 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo, static void bo_driver_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct drm_gem_vram_object *gbo; @@ -1034,7 +1034,7 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo, } static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 51416086e2f4..604a74323696 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -679,7 +679,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) static int nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); int ret = RING_SPACE(chan, 10); @@ -711,7 +711,7 @@ nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) static int nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); u64 src_offset = mem->vma[0].addr; @@ -749,7 +749,7 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); u64 src_offset = mem->vma[0].addr; @@ -788,7 +788,7 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); u64 src_offset = mem->vma[0].addr; @@ -826,7 +826,7 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); int ret = RING_SPACE(chan, 7); @@ -844,7 +844,7 @@ nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); int ret = RING_SPACE(chan, 7); @@ -878,7 +878,7 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) static int nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { struct nouveau_mem *mem = nouveau_mem(old_reg); u64 length = (new_reg->num_pages << PAGE_SHIFT); @@ -965,7 +965,7 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) static inline uint32_t nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, - struct nouveau_channel *chan, struct ttm_mem_reg *reg) + struct nouveau_channel *chan, struct ttm_resource *reg) { if (reg->mem_type == TTM_PL_TT) return NvDmaTT; @@ -974,7 +974,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, static int nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) + struct ttm_resource *old_reg, struct ttm_resource *new_reg) { u32 src_offset = old_reg->start << PAGE_SHIFT; u32 dst_offset = new_reg->start << PAGE_SHIFT; @@ -1020,7 +1020,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_mem *old_mem = nouveau_mem(&bo->mem); struct nouveau_mem *new_mem = nouveau_mem(reg); @@ -1052,7 +1052,7 @@ done: static int nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_reg) + bool no_wait_gpu, struct ttm_resource *new_reg) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_channel *chan = drm->ttm.chan; @@ -1062,7 +1062,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, /* create temporary vmas for the transfer and attach them to the * old nvkm_mem node, these will get cleaned up after ttm has - * destroyed the ttm_mem_reg + * destroyed the ttm_resource */ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_move_prep(drm, bo, new_reg); @@ -1098,7 +1098,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) s32 oclass; int (*exec)(struct nouveau_channel *, struct ttm_buffer_object *, - struct ttm_mem_reg *, struct ttm_mem_reg *); + struct ttm_resource *, struct ttm_resource *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, @@ -1160,7 +1160,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_reg) + bool no_wait_gpu, struct ttm_resource *new_reg) { struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; struct ttm_place placement_memtype = { @@ -1169,7 +1169,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING }; struct ttm_placement placement; - struct ttm_mem_reg tmp_reg; + struct ttm_resource tmp_reg; int ret; placement.num_placement = placement.num_busy_placement = 1; @@ -1197,7 +1197,7 @@ out: static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_reg) + bool no_wait_gpu, struct ttm_resource *new_reg) { struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; struct ttm_place placement_memtype = { @@ -1206,7 +1206,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING }; struct ttm_placement placement; - struct ttm_mem_reg tmp_reg; + struct ttm_resource tmp_reg; int ret; placement.num_placement = placement.num_busy_placement = 1; @@ -1233,7 +1233,7 @@ out: static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_reg) + struct ttm_resource *new_reg) { struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; struct nouveau_bo *nvbo = nouveau_bo(bo); @@ -1265,7 +1265,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, } static int -nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg, +nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, struct nouveau_drm_tile **new_tile) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -1301,11 +1301,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, static int nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_reg) + struct ttm_resource *new_reg) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct ttm_mem_reg *old_reg = &bo->mem; + struct ttm_resource *old_reg = &bo->mem; struct nouveau_drm_tile *new_tile = NULL; int ret = 0; @@ -1374,7 +1374,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) } static int -nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) +nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); struct nvkm_device *device = nvxx_device(&drm->client.device); @@ -1454,7 +1454,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) } static void -nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) +nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_mem *mem = nouveau_mem(reg); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 2a6519737800..b4314c01e313 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -155,7 +155,7 @@ struct nouveau_drm { atomic_t validate_sequence; int (*move)(struct nouveau_channel *, struct ttm_buffer_object *, - struct ttm_mem_reg *, struct ttm_mem_reg *); + struct ttm_resource *, struct ttm_resource *); struct nouveau_channel *chan; struct nvif_object copy; int mtrr; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index c002f8968507..9559f925bb53 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem) } int -nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt) +nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt) { struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_cli *cli = mem->cli; @@ -130,7 +130,7 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt) } int -nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page) +nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) { struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_cli *cli = mem->cli; @@ -173,7 +173,7 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page) } void -nouveau_mem_del(struct ttm_mem_reg *reg) +nouveau_mem_del(struct ttm_resource *reg) { struct nouveau_mem *mem = nouveau_mem(reg); nouveau_mem_fini(mem); @@ -183,7 +183,7 @@ nouveau_mem_del(struct ttm_mem_reg *reg) int nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_mem *mem; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h index f6d039e73812..3fe1cfed57a1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.h +++ b/drivers/gpu/drm/nouveau/nouveau_mem.h @@ -7,7 +7,7 @@ struct ttm_dma_tt; #include static inline struct nouveau_mem * -nouveau_mem(struct ttm_mem_reg *reg) +nouveau_mem(struct ttm_resource *reg) { return reg->mm_node; } @@ -21,10 +21,10 @@ struct nouveau_mem { }; int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, - struct ttm_mem_reg *); -void nouveau_mem_del(struct ttm_mem_reg *); -int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page); -int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *); + struct ttm_resource *); +void nouveau_mem_del(struct ttm_resource *); +int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); +int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *); void nouveau_mem_fini(struct nouveau_mem *); int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index feaac908efed..1ec97f5c3cf5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -26,7 +26,7 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm) } static int -nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) +nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_mem *mem = nouveau_mem(reg); @@ -61,7 +61,7 @@ static struct ttm_backend_func nv04_sgdma_backend = { }; static int -nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) +nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_mem *mem = nouveau_mem(reg); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 78b5a87b9855..e6a30865a00b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -32,7 +32,7 @@ #include static void -nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_mem_reg *reg) +nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg) { nouveau_mem_del(reg); } @@ -41,7 +41,7 @@ static int nouveau_vram_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -72,7 +72,7 @@ static int nouveau_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -95,7 +95,7 @@ static int nv04_gart_manager_new(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *reg) + struct ttm_resource *reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_drm *drm = nouveau_bdev(bo->bdev); diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 5d613d43b84d..5121124267ff 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan) { struct nv10_fence_priv *priv = chan->drm->fence; struct nv10_fence_chan *fctx; - struct ttm_mem_reg *reg = &priv->bo->bo.mem; + struct ttm_resource *reg = &priv->bo->bo.mem; u32 start = reg->start * PAGE_SIZE; u32 limit = start + reg->size - 1; int ret = 0; diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index a00ecc3de053..d7288691a874 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan) { struct nv10_fence_priv *priv = chan->drm->fence; struct nv10_fence_chan *fctx; - struct ttm_mem_reg *reg = &priv->bo->bo.mem; + struct ttm_resource *reg = &priv->bo->bo.mem; u32 start = reg->start * PAGE_SIZE; u32 limit = start + reg->size - 1; int ret; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 9691449aefdb..aae90a9ee1db 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -350,7 +350,7 @@ int qxl_mode_dumb_mmap(struct drm_file *filp, int qxl_ttm_init(struct qxl_device *qdev); void qxl_ttm_fini(struct qxl_device *qdev); int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /* qxl image */ diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 727049046014..b1ea984f143a 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -71,7 +71,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, } int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct qxl_device *qdev = qxl_get_qdev(bdev); @@ -111,7 +111,7 @@ struct qxl_ttm_tt { }; static int qxl_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) + struct ttm_resource *bo_mem) { struct qxl_ttm_tt *gtt = (void *)ttm; @@ -164,9 +164,9 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, } static void qxl_move_null(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; @@ -175,9 +175,9 @@ static void qxl_move_null(struct ttm_buffer_object *bo, static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int ret; ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); @@ -193,7 +193,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, static void qxl_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct qxl_bo *qbo; struct qxl_device *qdev; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b7c3fb2bfb54..cc4f58d16589 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2857,7 +2857,7 @@ int radeon_vm_clear_invalids(struct radeon_device *rdev, struct radeon_vm *vm); int radeon_vm_bo_update(struct radeon_device *rdev, struct radeon_bo_va *bo_va, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); void radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo); struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index f3dee01250da..bb7582afd803 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -775,7 +775,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, void radeon_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct radeon_bo *rbo; diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 60275b822f79..44b47241ee42 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -165,7 +165,7 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop); extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 21a01737b1be..3355b69b13d1 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -166,9 +166,9 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) } static void radeon_move_null(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; @@ -177,8 +177,8 @@ static void radeon_move_null(struct ttm_buffer_object *bo, static int radeon_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu, - struct ttm_mem_reg *new_mem, - struct ttm_mem_reg *old_mem) + struct ttm_resource *new_mem, + struct ttm_resource *old_mem) { struct radeon_device *rdev; uint64_t old_start, new_start; @@ -233,11 +233,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, static int radeon_move_vram_ram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource tmp_mem; struct ttm_place placements; struct ttm_placement placement; int r; @@ -278,11 +278,11 @@ out_cleanup: static int radeon_move_ram_vram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg tmp_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource tmp_mem; struct ttm_placement placement; struct ttm_place placements; int r; @@ -315,11 +315,11 @@ out_cleanup: static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct radeon_device *rdev; struct radeon_bo *rbo; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int r; r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); @@ -376,7 +376,7 @@ memcpy: return 0; } -static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { struct radeon_device *rdev = radeon_get_rdev(bdev); @@ -544,7 +544,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) } static int radeon_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) + struct ttm_resource *bo_mem) { struct radeon_ttm_tt *gtt = (void*)ttm; uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ | diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index f60fae0aed11..71e2c3785ab9 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -911,7 +911,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm, */ int radeon_vm_bo_update(struct radeon_device *rdev, struct radeon_bo_va *bo_va, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct radeon_vm *vm = bo_va->vm; struct radeon_ib ib; diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 6050dc846894..8f24663c3df3 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -48,7 +48,7 @@ struct ttm_agp_backend { struct agp_bridge_data *bridge; }; -static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) +static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index ff68f25ddbd4..ad09329b62d3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -145,7 +145,7 @@ static inline uint32_t ttm_bo_type_flags(unsigned type) } static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man; @@ -268,7 +268,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem, bool evict, + struct ttm_resource *mem, bool evict, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -642,7 +642,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_reg evict_mem; + struct ttm_resource evict_mem; struct ttm_placement placement; int ret = 0; @@ -841,7 +841,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, static int ttm_bo_mem_get(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); @@ -852,7 +852,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, return man->func->get_node(man, bo, place, mem); } -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); @@ -870,7 +870,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put); */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_resource_manager *man, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, bool no_wait_gpu) { struct dma_fence *fence; @@ -905,7 +905,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, */ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -986,7 +986,7 @@ static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man, */ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -1034,7 +1034,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; @@ -1112,7 +1112,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { int ret = 0; - struct ttm_mem_reg mem; + struct ttm_resource mem; dma_resv_assert_held(bo->base.resv); @@ -1138,7 +1138,7 @@ out_unlock: static bool ttm_bo_places_compat(const struct ttm_place *places, unsigned num_placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, uint32_t *new_flags) { unsigned i; @@ -1161,7 +1161,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, } bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, uint32_t *new_flags) { if (ttm_bo_places_compat(placement->placement, placement->num_placement, @@ -1730,7 +1730,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm->caching_state != tt_cached) { struct ttm_operation_ctx ctx = { false, false }; - struct ttm_mem_reg evict_mem; + struct ttm_resource evict_mem; evict_mem = bo->mem; evict_mem.mm_node = NULL; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 8ef0de8e36c5..496158acd5b9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -52,10 +52,10 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) int ttm_bo_move_ttm(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_tt *ttm = bo->ttm; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { @@ -127,7 +127,7 @@ static int ttm_mem_io_evict(struct ttm_resource_manager *man) } int ttm_mem_io_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; @@ -149,7 +149,7 @@ retry: } void ttm_mem_io_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { if (--mem->bus.io_reserved_count) return; @@ -163,7 +163,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; int ret; if (mem->bus.io_reserved_vm) @@ -181,7 +181,7 @@ int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) { - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; if (!mem->bus.io_reserved_vm) return; @@ -191,8 +191,8 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) ttm_mem_io_free(bo->bdev, mem); } -static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, +static int ttm_resource_ioremap(struct ttm_bo_device *bdev, + struct ttm_resource *mem, void **virtual) { struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); @@ -226,8 +226,8 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, return 0; } -static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, +static void ttm_resource_iounmap(struct ttm_bo_device *bdev, + struct ttm_resource *mem, void *virtual) { struct ttm_resource_manager *man; @@ -300,13 +300,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_tt *ttm = bo->ttm; - struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg old_copy = *old_mem; + struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -319,10 +319,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, if (ret) return ret; - ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); + ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap); if (ret) return ret; - ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); + ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap); if (ret) goto out; @@ -390,9 +390,9 @@ out2: } out1: - ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); + ttm_resource_iounmap(bdev, old_mem, new_iomap); out: - ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); + ttm_resource_iounmap(bdev, &old_copy, old_iomap); /* * On error, keep the mm node! @@ -502,7 +502,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, unsigned long size, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; if (bo->mem.bus.addr) { map->bo_kmap_type = ttm_bo_map_premapped; @@ -526,7 +526,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_reg *mem = &bo->mem; + struct ttm_resource *mem = &bo->mem; struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false @@ -631,11 +631,11 @@ EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; int ret; struct ttm_buffer_object *ghost_obj; @@ -692,10 +692,10 @@ EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_resource *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_resource *old_mem = &bo->mem; struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type); struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index df62177cd913..274a05ca13d3 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -57,7 +57,7 @@ static inline struct ttm_range_manager *to_range_manager(struct ttm_resource_man static int ttm_range_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_range_manager *rman = to_range_manager(man); struct drm_mm *mm = &rman->mm; @@ -96,7 +96,7 @@ static int ttm_range_man_get_node(struct ttm_resource_manager *man, } static void ttm_range_man_put_node(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct ttm_range_manager *rman = to_range_manager(man); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e25d4097aa16..bdd6169cef13 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -322,7 +322,7 @@ void ttm_tt_unbind(struct ttm_tt *ttm) } } -int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, struct ttm_operation_ctx *ctx) { int ret = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 1e59c019affa..3229451d0706 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -1135,14 +1135,14 @@ void vmw_bo_swap_notify(struct ttm_buffer_object *bo) * vmw_bo_move_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to move. - * @mem: The struct ttm_mem_reg indicating to what memory + * @mem: The struct ttm_resource indicating to what memory * region the move is taking place. * * Detaches cached maps and device bindings that require that the * buffer doesn't move. */ void vmw_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmw_buffer_object *vbo; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index aa763c6b1146..871ad738dadb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -793,7 +793,7 @@ extern void vmw_resource_unreserve(struct vmw_resource *res, struct vmw_buffer_object *new_backup, unsigned long new_backup_offset); extern void vmw_query_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); extern void vmw_resource_evict_all(struct vmw_private *dev_priv); extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); @@ -878,7 +878,7 @@ extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); extern struct vmw_buffer_object * vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c8fe6e9cf092..3fea7a6c7cfa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -53,7 +53,7 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); int id; @@ -85,7 +85,7 @@ nospace: } static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c8441030637a..c0f156078dda 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -855,7 +855,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob) * states from the device. */ void vmw_query_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmw_buffer_object *dx_query_mob; struct ttm_bo_device *bdev = bo->bdev; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 6cac7b091205..f594e2e6ab7e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -29,7 +29,7 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man) static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, unsigned long align_pages, const struct ttm_place *place, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, unsigned long lpfn, enum drm_mm_insert_mode mode) { @@ -47,7 +47,7 @@ static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, static int vmw_thp_get_node(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmw_thp_manager *rman = to_thp_manager(man); struct drm_mm *mm = &rman->mm; @@ -107,7 +107,7 @@ found_unlock: static void vmw_thp_put_node(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { struct vmw_thp_manager *rman = to_thp_manager(man); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 01c81e89ed7a..69e7e7fe2a4c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -539,7 +539,7 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) } -static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) +static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); @@ -713,7 +713,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) return vmw_user_bo_verify_access(bo, tfile); } -static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); @@ -743,7 +743,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg * vmw_move_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to move. - * @mem: The struct ttm_mem_reg indicating to what memory + * @mem: The struct ttm_resource indicating to what memory * region the move is taking place. * * Calls move_notify for all subsystems needing it. @@ -751,7 +751,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg */ static void vmw_move_notify(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *mem) + struct ttm_resource *mem) { vmw_bo_move_notify(bo, mem); vmw_query_move_notify(bo, mem); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 7b0655bc13da..770ad2195875 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -81,7 +81,7 @@ struct ttm_bus_placement { /** - * struct ttm_mem_reg + * struct ttm_resource * * @mm_node: Memory manager node. * @size: Requested size of memory region. @@ -94,7 +94,7 @@ struct ttm_bus_placement { * buffer object. */ -struct ttm_mem_reg { +struct ttm_resource { void *mm_node; unsigned long start; unsigned long size; @@ -187,7 +187,7 @@ struct ttm_buffer_object { * Members protected by the bo::resv::reserved lock. */ - struct ttm_mem_reg mem; + struct ttm_resource mem; struct file *persistent_swap_storage; struct ttm_tt *ttm; bool evicted; @@ -316,12 +316,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo * * @placement: Return immediately if buffer is busy. - * @mem: The struct ttm_mem_reg indicating the region where the bo resides + * @mem: The struct ttm_resource indicating the region where the bo resides * @new_flags: Describes compatible placement found * * Returns true if the placement is compatible */ -bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem, +bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem, uint32_t *new_flags); /** diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index d17e25ba80d4..eb1c3312e175 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -55,7 +55,7 @@ struct ttm_resource_manager_func { * @bo: Pointer to the buffer object we're allocating space for. * @placement: Placement details. * @flags: Additional placement flags. - * @mem: Pointer to a struct ttm_mem_reg to be filled in. + * @mem: Pointer to a struct ttm_resource to be filled in. * * This function should allocate space in the memory type managed * by @man. Placement details if @@ -79,20 +79,20 @@ struct ttm_resource_manager_func { int (*get_node)(struct ttm_resource_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /** * struct ttm_resource_manager member put_node * * @man: Pointer to a memory type manager. - * @mem: Pointer to a struct ttm_mem_reg to be filled in. + * @mem: Pointer to a struct ttm_resource to be filled in. * * This function frees memory type resources previously allocated * and that are identified by @mem::mm_node and @mem::start. May not * be called from within atomic context. */ void (*put_node)(struct ttm_resource_manager *man, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /** * struct ttm_resource_manager member debug @@ -251,7 +251,7 @@ struct ttm_bo_driver { */ int (*move)(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * struct ttm_bo_driver_member verify_access @@ -277,7 +277,7 @@ struct ttm_bo_driver { */ void (*move_notify)(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /* notify the driver we are taking a fault on this BO * and have reserved it */ int (*fault_reserve_notify)(struct ttm_buffer_object *bo); @@ -294,9 +294,9 @@ struct ttm_bo_driver { * are balanced. */ int (*io_mem_reserve)(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); void (*io_mem_free)(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /** * Return the pfn for a given page_offset inside the BO. @@ -503,15 +503,15 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) */ /** - * ttm_mem_reg_is_pci + * ttm_resource_is_pci * * @bdev: Pointer to a struct ttm_bo_device. - * @mem: A valid struct ttm_mem_reg. + * @mem: A valid struct ttm_resource. * * Returns true if the memory described by @mem is PCI memory, * false otherwise. */ -bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); +bool ttm_resource_is_pci(struct ttm_bo_device *bdev, struct ttm_resource *mem); /** * ttm_bo_mem_space @@ -519,7 +519,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); * @bo: Pointer to a struct ttm_buffer_object. the data of which * we want to allocate space for. * @proposed_placement: Proposed new placement for the buffer object. - * @mem: A struct ttm_mem_reg. + * @mem: A struct ttm_resource. * @interruptible: Sleep interruptible when sliping. * @no_wait_gpu: Return immediately if the GPU is busy. * @@ -534,10 +534,10 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); */ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, - struct ttm_mem_reg *mem, + struct ttm_resource *mem, struct ttm_operation_ctx *ctx); -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem); int ttm_bo_device_release(struct ttm_bo_device *bdev); @@ -733,16 +733,16 @@ int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, */ int ttm_mem_io_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); void ttm_mem_io_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); + struct ttm_resource *mem); /** * ttm_bo_move_ttm * * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. * @no_wait_gpu: Return immediately if the GPU is busy. - * @new_mem: struct ttm_mem_reg indicating where to move. + * @new_mem: struct ttm_resource indicating where to move. * * Optimized move function for a buffer object with both old and * new placement backed by a TTM. The function will, if successful, @@ -756,7 +756,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, int ttm_bo_move_ttm(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * ttm_bo_move_memcpy @@ -764,7 +764,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. * @no_wait_gpu: Return immediately if the GPU is busy. - * @new_mem: struct ttm_mem_reg indicating where to move. + * @new_mem: struct ttm_resource indicating where to move. * * Fallback move function for a mappable buffer object in mappable memory. * The function will, if successful, @@ -778,7 +778,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * ttm_bo_free_old_node @@ -795,7 +795,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * @bo: A pointer to a struct ttm_buffer_object. * @fence: A fence object that signals when moving is complete. * @evict: This is an evict move. Don't return until the buffer is idle. - * @new_mem: struct ttm_mem_reg indicating where to move. + * @new_mem: struct ttm_resource indicating where to move. * * Accelerated move function to be called when an accelerated move * has been scheduled. The function will create a new temporary buffer object @@ -806,7 +806,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); */ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * ttm_bo_pipeline_move. @@ -814,14 +814,14 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * @bo: A pointer to a struct ttm_buffer_object. * @fence: A fence object that signals when moving is complete. * @evict: This is an evict move. Don't return until the buffer is idle. - * @new_mem: struct ttm_mem_reg indicating where to move. + * @new_mem: struct ttm_resource indicating where to move. * * Function for pipelining accelerated moves. Either free the memory * immediately or hang it on a temporary buffer object. */ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem); + struct ttm_resource *new_mem); /** * ttm_bo_pipeline_gutting. diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index c0e928abf592..2ac34219ecb5 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -30,7 +30,7 @@ #include struct ttm_tt; -struct ttm_mem_reg; +struct ttm_resource; struct ttm_buffer_object; struct ttm_operation_ctx; @@ -53,14 +53,14 @@ struct ttm_backend_func { * struct ttm_backend_func member bind * * @ttm: Pointer to a struct ttm_tt. - * @bo_mem: Pointer to a struct ttm_mem_reg describing the + * @bo_mem: Pointer to a struct ttm_resource describing the * memory type and location for binding. * * Bind the backend pages into the aperture in the location * indicated by @bo_mem. This function should be able to handle * differences between aperture and system page sizes. */ - int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); + int (*bind) (struct ttm_tt *ttm, struct ttm_resource *bo_mem); /** * struct ttm_backend_func member unbind @@ -179,11 +179,11 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); * ttm_ttm_bind: * * @ttm: The struct ttm_tt containing backing pages. - * @bo_mem: The struct ttm_mem_reg identifying the binding location. + * @bo_mem: The struct ttm_resource identifying the binding location. * * Bind the pages of @ttm to an aperture location identified by @bo_mem */ -int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, struct ttm_operation_ctx *ctx); /** -- cgit v1.2.3 From 19d0070a2792181f79df01277fe00b83b9f7eda7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 4 Aug 2020 17:01:23 +0200 Subject: timekeeping/vsyscall: Provide vdso_update_begin/end() Architectures can have the requirement to add additional architecture specific data to the VDSO data page which needs to be updated independent of the timekeeper updates. To protect these updates vs. concurrent readers and a conflicting update through timekeeping, provide helper functions to make such updates safe. vdso_update_begin() takes the timekeeper_lock to protect against a potential update from timekeeper code and increments the VDSO sequence count to signal data inconsistency to concurrent readers. vdso_update_end() makes the sequence count even again to signal data consistency and drops the timekeeper lock. [ Sven: Add interrupt disable handling to the functions ] Signed-off-by: Thomas Gleixner Signed-off-by: Sven Schnelle Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20200804150124.41692-3-svens@linux.ibm.com --- include/vdso/vsyscall.h | 3 +++ kernel/time/timekeeping.c | 2 +- kernel/time/timekeeping_internal.h | 11 +++++++--- kernel/time/vsyscall.c | 41 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 53 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/vdso/vsyscall.h b/include/vdso/vsyscall.h index 2c6134e0c23d..b0fdc9c6bf43 100644 --- a/include/vdso/vsyscall.h +++ b/include/vdso/vsyscall.h @@ -6,6 +6,9 @@ #include +unsigned long vdso_update_begin(void); +void vdso_update_end(unsigned long flags); + #endif /* !__ASSEMBLY__ */ #endif /* __VDSO_VSYSCALL_H */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 63a632f9896c..4c7212f3c603 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -50,7 +50,7 @@ static struct { .seq = SEQCNT_ZERO(tk_core.seq), }; -static DEFINE_RAW_SPINLOCK(timekeeper_lock); +DEFINE_RAW_SPINLOCK(timekeeper_lock); static struct timekeeper shadow_timekeeper; /** diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h index bcbb52db2256..4ca2787d1642 100644 --- a/kernel/time/timekeeping_internal.h +++ b/kernel/time/timekeeping_internal.h @@ -1,12 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _TIMEKEEPING_INTERNAL_H #define _TIMEKEEPING_INTERNAL_H -/* - * timekeeping debug functions - */ + #include +#include #include +/* + * timekeeping debug functions + */ #ifdef CONFIG_DEBUG_FS extern void tk_debug_account_sleep_time(const struct timespec64 *t); #else @@ -31,4 +33,7 @@ static inline u64 clocksource_delta(u64 now, u64 last, u64 mask) } #endif +/* Semi public for serialization of non timekeeper VDSO updates. */ +extern raw_spinlock_t timekeeper_lock; + #endif /* _TIMEKEEPING_INTERNAL_H */ diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c index 54ce6eb2ca36..88e6b8ed6ca5 100644 --- a/kernel/time/vsyscall.c +++ b/kernel/time/vsyscall.c @@ -13,6 +13,8 @@ #include #include +#include "timekeeping_internal.h" + static inline void update_vdso_data(struct vdso_data *vdata, struct timekeeper *tk) { @@ -127,3 +129,42 @@ void update_vsyscall_tz(void) __arch_sync_vdso_data(vdata); } + +/** + * vdso_update_begin - Start of a VDSO update section + * + * Allows architecture code to safely update the architecture specific VDSO + * data. Disables interrupts, acquires timekeeper lock to serialize against + * concurrent updates from timekeeping and invalidates the VDSO data + * sequence counter to prevent concurrent readers from accessing + * inconsistent data. + * + * Returns: Saved interrupt flags which need to be handed in to + * vdso_update_end(). + */ +unsigned long vdso_update_begin(void) +{ + struct vdso_data *vdata = __arch_get_k_vdso_data(); + unsigned long flags; + + raw_spin_lock_irqsave(&timekeeper_lock, flags); + vdso_write_begin(vdata); + return flags; +} + +/** + * vdso_update_end - End of a VDSO update section + * @flags: Interrupt flags as returned from vdso_update_begin() + * + * Pairs with vdso_update_begin(). Marks vdso data consistent, invokes data + * synchronization if the architecture requires it, drops timekeeper lock + * and restores interrupt flags. + */ +void vdso_update_end(unsigned long flags) +{ + struct vdso_data *vdata = __arch_get_k_vdso_data(); + + vdso_write_end(vdata); + __arch_sync_vdso_data(vdata); + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); +} -- cgit v1.2.3 From d60d7de3e16d7cea998bad17d87366a359625894 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Tue, 4 Aug 2020 17:01:22 +0200 Subject: lib/vdso: Allow to add architecture-specific vdso data The initial assumption that all VDSO related data can be completely generic does not hold. S390 needs architecture specific storage to access the clock steering information. Add struct arch_vdso_data to the vdso data struct. For architectures which do not need extra data this defaults to an empty struct. Architectures which require it, enable CONFIG_ARCH_HAS_VDSO_DATA and provide their specific struct in asm/vdso/data.h. Signed-off-by: Sven Schnelle Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20200804150124.41692-2-svens@linux.ibm.com --- arch/Kconfig | 3 +++ include/vdso/datapage.h | 10 ++++++++++ 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/arch/Kconfig b/arch/Kconfig index a1124481d910..b44dd6b9e2bd 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -975,6 +975,9 @@ config HAVE_SPARSE_SYSCALL_NR entries at 4000, 5000 and 6000 locations. This option turns on syscall related optimizations for a given architecture. +config ARCH_HAS_VDSO_DATA + bool + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h index ee810cae4e1e..73eb622e7663 100644 --- a/include/vdso/datapage.h +++ b/include/vdso/datapage.h @@ -19,6 +19,12 @@ #include #include +#ifdef CONFIG_ARCH_HAS_VDSO_DATA +#include +#else +struct arch_vdso_data {}; +#endif + #define VDSO_BASES (CLOCK_TAI + 1) #define VDSO_HRES (BIT(CLOCK_REALTIME) | \ BIT(CLOCK_MONOTONIC) | \ @@ -64,6 +70,8 @@ struct vdso_timestamp { * @tz_dsttime: type of DST correction * @hrtimer_res: hrtimer resolution * @__unused: unused + * @arch_data: architecture specific data (optional, defaults + * to an empty struct) * * vdso_data will be accessed by 64 bit and compat code at the same time * so we should be careful before modifying this structure. @@ -97,6 +105,8 @@ struct vdso_data { s32 tz_dsttime; u32 hrtimer_res; u32 __unused; + + struct arch_vdso_data arch_data; }; /* -- cgit v1.2.3 From 6d10fc2b9bfeb34756c9f2bda135aa56105118d9 Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Sun, 19 Jul 2020 23:16:16 +0200 Subject: media: v4l2: Correct kernel-doc inconsistency Silence documentation build warnings by correcting kernel-doc comment v4l2_create_fwnode_links_to_pad and v4l2_create_fwnode_links functions. ./include/media/v4l2-mc.h:110: warning: Function parameter or member 'src_sd' not described in 'v4l2_create_fwnode_links_to_pad' ./include/media/v4l2-mc.h:110: warning: Function parameter or member 'sink' not described in 'v4l2_create_fwnode_links_to_pad' ./include/media/v4l2-mc.h:134: warning: Function parameter or member 'src_sd' not described in 'v4l2_create_fwnode_links' ./include/media/v4l2-mc.h:134: warning: Function parameter or member 'sink_sd' not described in 'v4l2_create_fwnode_links' Signed-off-by: Colton Lewis Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-mc.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h index 246eed398648..bdaa5f2f8ca2 100644 --- a/include/media/v4l2-mc.h +++ b/include/media/v4l2-mc.h @@ -89,8 +89,8 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q); * v4l2_create_fwnode_links_to_pad - Create fwnode-based links from a * source subdev to a sink subdev pad. * - * @src_sd - pointer to a source subdev - * @sink - pointer to a subdev sink pad + * @src_sd: pointer to a source subdev + * @sink: pointer to a subdev sink pad * * This function searches for fwnode endpoint connections from a source * subdevice to a single sink pad, and if suitable connections are found, @@ -113,8 +113,8 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, * v4l2_create_fwnode_links - Create fwnode-based links from a source * subdev to a sink subdev. * - * @src_sd - pointer to a source subdevice - * @sink_sd - pointer to a sink subdevice + * @src_sd: pointer to a source subdevice + * @sink_sd: pointer to a sink subdevice * * This function searches for any and all fwnode endpoint connections * between source and sink subdevices, and translates them into media -- cgit v1.2.3 From 2f501169842c1626bf298b9bf497bf084635c6df Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Sun, 19 Jul 2020 23:16:24 +0200 Subject: media: v4l2: Correct kernel-doc inconsistency Silence documentation build warnings by correcting kernel-doc comment v4l2_subdev_get_fwnode_pad_1_to_1 function ./include/media/v4l2-subdev.h:1045: warning: Function parameter or member 'entity' not described in 'v4l2_subdev_get_fwnode_pad_1_to_1' ./include/media/v4l2-subdev.h:1045: warning: Function parameter or member 'endpoint' not described in 'v4l2_subdev_get_fwnode_pad_1_to_1' Signed-off-by: Colton Lewis Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-subdev.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index b855721879b8..d4e3b44cf14c 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -1032,8 +1032,8 @@ static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd) * v4l2_subdev_get_fwnode_pad_1_to_1 - Get pad number from a subdev fwnode * endpoint, assuming 1:1 port:pad * - * @entity - Pointer to the subdev entity - * @endpoint - Pointer to a parsed fwnode endpoint + * @entity: Pointer to the subdev entity + * @endpoint: Pointer to a parsed fwnode endpoint * * This function can be used as the .get_fwnode_pad operation for * subdevices that map port numbers and pad indexes 1:1. If the endpoint -- cgit v1.2.3 From 0cd39f4600ed4de859383018eb10f0f724900e1b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 6 Aug 2020 14:35:11 +0200 Subject: locking/seqlock, headers: Untangle the spaghetti monster By using lockdep_assert_*() from seqlock.h, the spaghetti monster attacked. Attack back by reducing seqlock.h dependencies from two key high level headers: - : -Remove - : -Remove - : +Add The price was to add it to sched.h ... Core header fallout, we add direct header dependencies instead of gaining them parasitically from higher level headers: - : +Add - : +Add - : +Add - : +Add - : +Add - : +Add Arch headers fallout: - PARISC: : +Add - SH: : +Add - SPARC: : +Add - SPARC: : +Add , -Remove - X86: : +Add -Remove There's also a bunch of parasitic header dependency fallout in .c files, not listed separately. [ mingo: Extended the changelog, split up & fixed the original patch. ] Co-developed-by: Ingo Molnar Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200804133438.GK2674@hirez.programming.kicks-ass.net --- arch/sh/include/asm/io.h | 1 + arch/sh/kernel/machvec.c | 1 + arch/sparc/include/asm/timer_64.h | 1 + arch/sparc/include/asm/vvar.h | 3 ++- arch/sparc/kernel/vdso.c | 1 - arch/x86/include/asm/fixmap.h | 2 +- arch/x86/kernel/apic/apic_noop.c | 1 + arch/x86/kernel/apic/hw_nmi.c | 1 + arch/x86/kernel/apic/probe_64.c | 1 + arch/x86/kernel/cpu/amd.c | 1 + arch/x86/kernel/cpu/common.c | 1 + arch/x86/kernel/cpu/hygon.c | 1 + arch/x86/kernel/cpu/intel.c | 1 + arch/x86/kernel/jailhouse.c | 1 + arch/x86/kernel/tsc_msr.c | 1 + arch/x86/mm/init_32.c | 1 + arch/x86/xen/apic.c | 1 + arch/x86/xen/smp_hvm.c | 1 + arch/x86/xen/suspend_pv.c | 4 ++-- include/linux/dynamic_queue_limits.h | 2 ++ include/linux/hrtimer.h | 1 + include/linux/ktime.h | 1 + include/linux/lockdep.h | 1 + include/linux/mutex.h | 11 +++++++++++ include/linux/sched.h | 1 + include/linux/seqlock.h | 1 - include/linux/time.h | 1 - include/linux/videodev2.h | 1 + include/linux/ww_mutex.h | 8 -------- 29 files changed, 38 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 26f0f9b4658b..ec587b583822 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index beadbbdb4486..76bd8955d4fe 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -15,6 +15,7 @@ #include #include #include +#include #define MV_NAME_SIZE 32 diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h index c7e4fb601a57..dcfad4613e18 100644 --- a/arch/sparc/include/asm/timer_64.h +++ b/arch/sparc/include/asm/timer_64.h @@ -7,6 +7,7 @@ #ifndef _SPARC64_TIMER_H #define _SPARC64_TIMER_H +#include #include #include diff --git a/arch/sparc/include/asm/vvar.h b/arch/sparc/include/asm/vvar.h index 0289503d1cb0..6eaf5cfcaae1 100644 --- a/arch/sparc/include/asm/vvar.h +++ b/arch/sparc/include/asm/vvar.h @@ -6,7 +6,8 @@ #define _ASM_SPARC_VVAR_DATA_H #include -#include +#include +#include #include #include diff --git a/arch/sparc/kernel/vdso.c b/arch/sparc/kernel/vdso.c index 58880662b271..0e27437eb97b 100644 --- a/arch/sparc/kernel/vdso.c +++ b/arch/sparc/kernel/vdso.c @@ -7,7 +7,6 @@ * a different vsyscall implementation for Linux/IA32 and for the name. */ -#include #include #include diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index b9527a54db99..0f0dd645b594 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -26,9 +26,9 @@ #ifndef __ASSEMBLY__ #include -#include #include #include +#include #ifdef CONFIG_X86_32 #include #include diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index 98c9bb75d185..780c702969b7 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -10,6 +10,7 @@ * like self-ipi, etc... */ #include +#include #include diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index d1fc62a67320..34a992e275ef 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -9,6 +9,7 @@ * Bits copied from original nmi.c file * */ +#include #include #include diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 29f0e0984557..bd3835d6b535 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -8,6 +8,7 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ +#include #include #include "local.h" diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d4806eac9325..dcc3d943c68f 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 95c090a45b4b..52b565016eb1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 4e28c1fc8749..ac6c30e5801d 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 0ab48f1cdf84..6eb42d7a3dfd 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -23,6 +23,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 #include diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index 2caf5b990bf6..4eb8f2d19a87 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 4fec6f3a1858..46c72f2ec32f 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -7,6 +7,7 @@ */ #include +#include #include #include diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8b4afad84f4a..d46a5cf6ccb0 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -52,6 +52,7 @@ #include #include #include +#include #include "mm_internal.h" diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c index 2df7d089ad54..1aff4ae65655 100644 --- a/arch/x86/xen/apic.c +++ b/arch/x86/xen/apic.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c index f8d39440b292..f5e7db4f82ab 100644 --- a/arch/x86/xen/smp_hvm.c +++ b/arch/x86/xen/smp_hvm.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include diff --git a/arch/x86/xen/suspend_pv.c b/arch/x86/xen/suspend_pv.c index 8303b58c79a9..cae9660f4c67 100644 --- a/arch/x86/xen/suspend_pv.c +++ b/arch/x86/xen/suspend_pv.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include - #include #include +#include + #include "xen-ops.h" void xen_pv_pre_suspend(void) diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 99fc06f0afc1..407c2f281b64 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -38,6 +38,8 @@ #ifdef __KERNEL__ +#include + struct dql { /* Fields accessed in enqueue path (dql_queued) */ unsigned int num_queued; /* Total ever queued */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 25993b86ac5c..107cedd7019a 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 42d2e6ac35f2..a12b5523cc18 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -23,6 +23,7 @@ #include #include +#include /* Nanosecond scalar representation for kernel time values */ typedef s64 ktime_t; diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 39a35699d0d6..62a382d1845b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -11,6 +11,7 @@ #define __LINUX_LOCKDEP_H #include +#include #include struct task_struct; diff --git a/include/linux/mutex.h b/include/linux/mutex.h index ae197cc00cc8..dcd185cbfe79 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -65,6 +65,17 @@ struct mutex { #endif }; +struct ww_class; +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +#ifdef CONFIG_DEBUG_MUTEXES + struct ww_class *ww_class; +#endif +}; + /* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: diff --git a/include/linux/sched.h b/include/linux/sched.h index 9a9d8263962d..7c7a9499d7bc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -31,6 +31,7 @@ #include #include #include +#include #include /* task_struct member predeclarations (sorted alphabetically): */ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index a076f783aa36..962d9768945f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -19,7 +19,6 @@ #include #include #include -#include #include diff --git a/include/linux/time.h b/include/linux/time.h index 4c325bf44ce0..b142cb5f5a53 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -3,7 +3,6 @@ #define _LINUX_TIME_H # include -# include # include # include diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 16c0ed6c50a7..219037f4c08d 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -57,6 +57,7 @@ #define __LINUX_VIDEODEV2_H #include /* need struct timeval */ +#include #include #endif /* __LINUX_VIDEODEV2_H */ diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index d7554252404c..850424e5d030 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -48,14 +48,6 @@ struct ww_acquire_ctx { #endif }; -struct ww_mutex { - struct mutex base; - struct ww_acquire_ctx *ctx; -#ifdef CONFIG_DEBUG_MUTEXES - struct ww_class *ww_class; -#endif -}; - #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \ , .ww_class = class -- cgit v1.2.3 From 1fb497dd003009be95ce67689ac800c446b7acc5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 30 Jul 2020 12:14:06 +0200 Subject: posix-cpu-timers: Provide mechanisms to defer timer handling to task_work Running posix CPU timers in hard interrupt context has a few downsides: - For PREEMPT_RT it cannot work as the expiry code needs to take sighand lock, which is a 'sleeping spinlock' in RT. The original RT approach of offloading the posix CPU timer handling into a high priority thread was clumsy and provided no real benefit in general. - For fine grained accounting it's just wrong to run this in context of the timer interrupt because that way a process specific CPU time is accounted to the timer interrupt. - Long running timer interrupts caused by a large amount of expiring timers which can be created and armed by unpriviledged user space. There is no hard requirement to expire them in interrupt context. If the signal is targeted at the task itself then it won't be delivered before the task returns to user space anyway. If the signal is targeted at a supervisor process then it might be slightly delayed, but posix CPU timers are inaccurate anyway due to the fact that they are tied to the tick. Provide infrastructure to schedule task work which allows splitting the posix CPU timer code into a quick check in interrupt context and a thread context expiry and signal delivery function. This has to be enabled by architectures as it requires that the architecture specific KVM implementation handles pending task work before exiting to guest mode. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Reviewed-by: Oleg Nesterov Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20200730102337.783470146@linutronix.de --- include/linux/posix-timers.h | 17 ++++ include/linux/sched.h | 4 + kernel/time/Kconfig | 9 ++ kernel/time/posix-cpu-timers.c | 185 ++++++++++++++++++++++++++++++++++++++--- kernel/time/timer.c | 1 + 5 files changed, 204 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index e3f0f8585da4..896c16d2c5fb 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -6,6 +6,7 @@ #include #include #include +#include struct kernel_siginfo; struct task_struct; @@ -125,6 +126,16 @@ struct posix_cputimers { unsigned int expiry_active; }; +/** + * posix_cputimers_work - Container for task work based posix CPU timer expiry + * @work: The task work to be scheduled + * @scheduled: @work has been scheduled already, no further processing + */ +struct posix_cputimers_work { + struct callback_head work; + unsigned int scheduled; +}; + static inline void posix_cputimers_init(struct posix_cputimers *pct) { memset(pct, 0, sizeof(*pct)); @@ -165,6 +176,12 @@ static inline void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) { } #endif +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK +void posix_cputimers_init_work(void); +#else +static inline void posix_cputimers_init_work(void) { } +#endif + #define REQUEUE_PENDING 1 /** diff --git a/include/linux/sched.h b/include/linux/sched.h index 06ec60462af0..e9942ce07ef1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -889,6 +889,10 @@ struct task_struct { /* Empty if CONFIG_POSIX_CPUTIMERS=n */ struct posix_cputimers posix_cputimers; +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK + struct posix_cputimers_work posix_cputimers_work; +#endif + /* Process credentials: */ /* Tracer's credentials at attach: */ diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index fcc42353f125..a09b1d61df6a 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -52,6 +52,15 @@ config GENERIC_CLOCKEVENTS_MIN_ADJUST config GENERIC_CMOS_UPDATE bool +# Select to handle posix CPU timers from task_work +# and not from the timer interrupt context +config HAVE_POSIX_CPU_TIMERS_TASK_WORK + bool + +config POSIX_CPU_TIMERS_TASK_WORK + bool + default y if POSIX_TIMERS && HAVE_POSIX_CPU_TIMERS_TASK_WORK + if GENERIC_CLOCKEVENTS menu "Timers subsystem" diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index e5ad87320468..a71758e34e45 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -377,6 +377,7 @@ static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp) */ static int posix_cpu_timer_create(struct k_itimer *new_timer) { + static struct lock_class_key posix_cpu_timers_key; struct pid *pid; rcu_read_lock(); @@ -386,6 +387,17 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer) return -EINVAL; } + /* + * If posix timer expiry is handled in task work context then + * timer::it_lock can be taken without disabling interrupts as all + * other locking happens in task context. This requires a seperate + * lock class key otherwise regular posix timer expiry would record + * the lock class being taken in interrupt context and generate a + * false positive warning. + */ + if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK)) + lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key); + new_timer->kclock = &clock_posix_cpu; timerqueue_init(&new_timer->it.cpu.node); new_timer->it.cpu.pid = get_pid(pid); @@ -1080,26 +1092,163 @@ static inline bool fastpath_timer_check(struct task_struct *tsk) return false; } -static void __run_posix_cpu_timers(struct task_struct *tsk) +static void handle_posix_cpu_timers(struct task_struct *tsk); + +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK +static void posix_cpu_timers_work(struct callback_head *work) +{ + handle_posix_cpu_timers(current); +} + +/* + * Initialize posix CPU timers task work in init task. Out of line to + * keep the callback static and to avoid header recursion hell. + */ +void __init posix_cputimers_init_work(void) +{ + init_task_work(¤t->posix_cputimers_work.work, + posix_cpu_timers_work); +} + +/* + * Note: All operations on tsk->posix_cputimer_work.scheduled happen either + * in hard interrupt context or in task context with interrupts + * disabled. Aside of that the writer/reader interaction is always in the + * context of the current task, which means they are strict per CPU. + */ +static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk) +{ + return tsk->posix_cputimers_work.scheduled; +} + +static inline void __run_posix_cpu_timers(struct task_struct *tsk) +{ + if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled)) + return; + + /* Schedule task work to actually expire the timers */ + tsk->posix_cputimers_work.scheduled = true; + task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME); +} + +static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk, + unsigned long start) +{ + bool ret = true; + + /* + * On !RT kernels interrupts are disabled while collecting expired + * timers, so no tick can happen and the fast path check can be + * reenabled without further checks. + */ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + tsk->posix_cputimers_work.scheduled = false; + return true; + } + + /* + * On RT enabled kernels ticks can happen while the expired timers + * are collected under sighand lock. But any tick which observes + * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath + * checks. So reenabling the tick work has do be done carefully: + * + * Disable interrupts and run the fast path check if jiffies have + * advanced since the collecting of expired timers started. If + * jiffies have not advanced or the fast path check did not find + * newly expired timers, reenable the fast path check in the timer + * interrupt. If there are newly expired timers, return false and + * let the collection loop repeat. + */ + local_irq_disable(); + if (start != jiffies && fastpath_timer_check(tsk)) + ret = false; + else + tsk->posix_cputimers_work.scheduled = false; + local_irq_enable(); + + return ret; +} +#else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */ +static inline void __run_posix_cpu_timers(struct task_struct *tsk) +{ + lockdep_posixtimer_enter(); + handle_posix_cpu_timers(tsk); + lockdep_posixtimer_exit(); +} + +static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk) +{ + return false; +} + +static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk, + unsigned long start) +{ + return true; +} +#endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */ + +static void handle_posix_cpu_timers(struct task_struct *tsk) { struct k_itimer *timer, *next; - unsigned long flags; + unsigned long flags, start; LIST_HEAD(firing); if (!lock_task_sighand(tsk, &flags)) return; - /* - * Here we take off tsk->signal->cpu_timers[N] and - * tsk->cpu_timers[N] all the timers that are firing, and - * put them on the firing list. - */ - check_thread_timers(tsk, &firing); + do { + /* + * On RT locking sighand lock does not disable interrupts, + * so this needs to be careful vs. ticks. Store the current + * jiffies value. + */ + start = READ_ONCE(jiffies); + barrier(); - check_process_timers(tsk, &firing); + /* + * Here we take off tsk->signal->cpu_timers[N] and + * tsk->cpu_timers[N] all the timers that are firing, and + * put them on the firing list. + */ + check_thread_timers(tsk, &firing); + + check_process_timers(tsk, &firing); + + /* + * The above timer checks have updated the exipry cache and + * because nothing can have queued or modified timers after + * sighand lock was taken above it is guaranteed to be + * consistent. So the next timer interrupt fastpath check + * will find valid data. + * + * If timer expiry runs in the timer interrupt context then + * the loop is not relevant as timers will be directly + * expired in interrupt context. The stub function below + * returns always true which allows the compiler to + * optimize the loop out. + * + * If timer expiry is deferred to task work context then + * the following rules apply: + * + * - On !RT kernels no tick can have happened on this CPU + * after sighand lock was acquired because interrupts are + * disabled. So reenabling task work before dropping + * sighand lock and reenabling interrupts is race free. + * + * - On RT kernels ticks might have happened but the tick + * work ignored posix CPU timer handling because the + * CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work + * must be done very carefully including a check whether + * ticks have happened since the start of the timer + * expiry checks. posix_cpu_timers_enable_work() takes + * care of that and eventually lets the expiry checks + * run again. + */ + } while (!posix_cpu_timers_enable_work(tsk, start)); /* - * We must release these locks before taking any timer's lock. + * We must release sighand lock before taking any timer's lock. * There is a potential race with timer deletion here, as the * siglock now protects our private firing list. We have set * the firing flag in each timer, so that a deletion attempt @@ -1117,6 +1266,13 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) { int cpu_firing; + /* + * spin_lock() is sufficient here even independent of the + * expiry context. If expiry happens in hard interrupt + * context it's obvious. For task work context it's safe + * because all other operations on timer::it_lock happen in + * task context (syscall or exit). + */ spin_lock(&timer->it_lock); list_del_init(&timer->it.cpu.elist); cpu_firing = timer->it.cpu.firing; @@ -1143,6 +1299,13 @@ void run_posix_cpu_timers(void) lockdep_assert_irqs_disabled(); + /* + * If the actual expiry is deferred to task work context and the + * work is already scheduled there is no point to do anything here. + */ + if (posix_cpu_timers_work_scheduled(tsk)) + return; + /* * The fast path checks that there are no expired thread or thread * group timers. If that's so, just return. @@ -1150,9 +1313,7 @@ void run_posix_cpu_timers(void) if (!fastpath_timer_check(tsk)) return; - lockdep_posixtimer_enter(); __run_posix_cpu_timers(tsk); - lockdep_posixtimer_exit(); } /* diff --git a/kernel/time/timer.c b/kernel/time/timer.c index ae5029f984a8..a16764b0116e 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2017,6 +2017,7 @@ static void __init init_timer_cpus(void) void __init init_timers(void) { init_timer_cpus(); + posix_cputimers_init_work(); open_softirq(TIMER_SOFTIRQ, run_timer_softirq); } -- cgit v1.2.3 From 84d1c617402e7e67fc95ab2384da8dae7d1b0efe Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 26 Jun 2020 13:26:48 -0400 Subject: net: sock: add sock_set_mark This patch adds a new socket helper function to set the mark value for a kernel socket. Signed-off-by: Alexander Aring Signed-off-by: David Teigland --- include/net/sock.h | 1 + net/core/sock.c | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index c53cc42b5ab9..591dd3f12dbb 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2696,6 +2696,7 @@ void sock_no_linger(struct sock *sk); void sock_set_keepalive(struct sock *sk); void sock_set_priority(struct sock *sk, u32 priority); void sock_set_rcvbuf(struct sock *sk, int val); +void sock_set_mark(struct sock *sk, u32 val); void sock_set_reuseaddr(struct sock *sk); void sock_set_reuseport(struct sock *sk); void sock_set_sndtimeo(struct sock *sk, s64 secs); diff --git a/net/core/sock.c b/net/core/sock.c index 6c4acf1f0220..ea6e8348b3dc 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -828,6 +828,14 @@ void sock_set_rcvbuf(struct sock *sk, int val) } EXPORT_SYMBOL(sock_set_rcvbuf); +void sock_set_mark(struct sock *sk, u32 val) +{ + lock_sock(sk); + sk->sk_mark = val; + release_sock(sk); +} +EXPORT_SYMBOL(sock_set_mark); + /* * This is meant for all protocols to use and covers goings on * at the socket level. Everything here is generic. -- cgit v1.2.3 From 09fc67b500c7f0bb1b5ed774197ac7f2c5285655 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 17 Jul 2020 17:42:55 +0900 Subject: kprobes: Remove show_registers() function prototype Remove show_registers() function prototype because this function has been renamed by commit: 57da8b960b9a ("x86: Avoid double stack traces with show_regs()") and this commit has removed the caller in kprobes altogether: 80006dbee674 ("kprobes/x86: Remove jprobe implementation") So this doesn't exist anymore - remove the orphan prototype. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar --- include/linux/kprobes.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 45b8cdc9fad7..9be1bff4f586 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -227,7 +227,6 @@ extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); -extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); extern int arch_populate_kprobe_blacklist(void); -- cgit v1.2.3 From b55b3fdce3e554a6bbe8f8ca6a01a892d720e64e Mon Sep 17 00:00:00 2001 From: Bhupesh Sharma Date: Fri, 17 Jul 2020 13:01:00 +0530 Subject: hw_breakpoint: Remove unused __register_perf_hw_breakpoint() declaration Commit: b326e9560a28 ("hw-breakpoints: Use overflow handler instead of the event callback") removed __register_perf_hw_breakpoint() function usage and replaced it with register_perf_hw_breakpoint() function. Remove the left-over unused __register_perf_hw_breakpoint() declaration from as well. Signed-off-by: Bhupesh Sharma Signed-off-by: Ingo Molnar Acked-by: Mark Rutland Link: https://lore.kernel.org/r/1594971060-14180-1-git-send-email-bhsharma@redhat.com --- include/linux/hw_breakpoint.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index d7d4250cd1e4..78dd7035d1e5 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -72,7 +72,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, void *context); extern int register_perf_hw_breakpoint(struct perf_event *bp); -extern int __register_perf_hw_breakpoint(struct perf_event *bp); extern void unregister_hw_breakpoint(struct perf_event *bp); extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events); @@ -119,8 +118,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, void *context) { return NULL; } static inline int register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } -static inline int -__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } static inline void unregister_hw_breakpoint(struct perf_event *bp) { } static inline void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { } -- cgit v1.2.3 From bb22d80b47d5dd641d09d31946c4be0f610f3f45 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:36:40 -0700 Subject: LSM: drop duplicated words in header file comments Drop the doubled words "the" and "and" in comments. Signed-off-by: Randy Dunlap Acked-by: Serge Hallyn Cc: linux-security-module@vger.kernel.org Signed-off-by: James Morris --- include/linux/lsm_hook_defs.h | 2 +- include/linux/lsm_hooks.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 5616b2567aa7..dec0e5186834 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -15,7 +15,7 @@ */ /* - * The macro LSM_HOOK is used to define the data structures required by the + * The macro LSM_HOOK is used to define the data structures required by * the LSM framework using the pattern: * * LSM_HOOK(, , , args...) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 988ca0df7824..afd2c5becec5 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -798,7 +798,7 @@ * structure. Note that the security field was not added directly to the * socket structure, but rather, the socket security information is stored * in the associated inode. Typically, the inode alloc_security hook will - * allocate and and attach security information to + * allocate and attach security information to * SOCK_INODE(sock)->i_security. This hook may be used to update the * SOCK_INODE(sock)->i_security field with additional information that * wasn't available when the inode was allocated. -- cgit v1.2.3 From 16c642ec3fe9a144fbe1e97dc56f13a6308f1381 Mon Sep 17 00:00:00 2001 From: Pierre-Eric Pelloux-Prayer Date: Thu, 30 Jul 2020 15:54:59 +0200 Subject: drm/amdgpu: new ids flag for tmz (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allows UMD to know if TMZ is supported and enabled. This commit also bumps KMS_DRIVER_MINOR because if we don't UMD can't tell if "ids_flags & AMDGPU_IDS_FLAGS_TMZ == 0" means "tmz is not enabled" or "tmz may be enabled but the kernel doesn't report it". v2: use amdgpu_is_tmz() and reworded commit message. Signed-off-by: Pierre-Eric Pelloux-Prayer Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 ++ include/uapi/drm/amdgpu_drm.h | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 5156c67ec67b..92d0368217a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -88,9 +88,10 @@ * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC * - 3.39.0 - DMABUF implicit sync does a full pipeline sync + * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 39 +#define KMS_DRIVER_MINOR 40 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e99ad031efd4..58580a48b648 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -737,6 +737,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; + if (amdgpu_is_tmz(adev)) + dev_info.ids_flags |= AMDGPU_IDS_FLAGS_TMZ; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size -= AMDGPU_VA_RESERVED_SIZE; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 3218576e109d..c5ff2b275fcd 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -673,6 +673,7 @@ struct drm_amdgpu_cs_chunk_data { */ #define AMDGPU_IDS_FLAGS_FUSION 0x1 #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2 +#define AMDGPU_IDS_FLAGS_TMZ 0x4 /* indicate if acceleration can be working */ #define AMDGPU_INFO_ACCEL_WORKING 0x00 -- cgit v1.2.3 From 5e7b30205cef80f6bb922e61834437ca7bff5837 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 4 Aug 2020 22:50:56 -0700 Subject: bpf: Change uapi for bpf iterator map elements Commit a5cbe05a6673 ("bpf: Implement bpf iterator for map elements") added bpf iterator support for map elements. The map element bpf iterator requires info to identify a particular map. In the above commit, the attr->link_create.target_fd is used to carry map_fd and an enum bpf_iter_link_info is added to uapi to specify the target_fd actually representing a map_fd: enum bpf_iter_link_info { BPF_ITER_LINK_UNSPEC = 0, BPF_ITER_LINK_MAP_FD = 1, MAX_BPF_ITER_LINK_INFO, }; This is an extensible approach as we can grow enumerator for pid, cgroup_id, etc. and we can unionize target_fd for pid, cgroup_id, etc. But in the future, there are chances that more complex customization may happen, e.g., for tasks, it could be filtered based on both cgroup_id and user_id. This patch changed the uapi to have fields __aligned_u64 iter_info; __u32 iter_info_len; for additional iter_info for link_create. The iter_info is defined as union bpf_iter_link_info { struct { __u32 map_fd; } map; }; So future extension for additional customization will be easier. The bpf_iter_link_info will be passed to target callback to validate and generic bpf_iter framework does not need to deal it any more. Note that map_fd = 0 will be considered invalid and -EBADF will be returned to user space. Fixes: a5cbe05a6673 ("bpf: Implement bpf iterator for map elements") Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200805055056.1457463-1-yhs@fb.com --- include/linux/bpf.h | 10 ++++---- include/uapi/linux/bpf.h | 15 ++++++------ kernel/bpf/bpf_iter.c | 58 +++++++++++++++++++++++------------------------ kernel/bpf/map_iter.c | 37 +++++++++++++++++++++++------- kernel/bpf/syscall.c | 2 +- net/core/bpf_sk_storage.c | 37 +++++++++++++++++++++++------- 6 files changed, 102 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index cef4ef0d2b4e..55f694b63164 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1214,15 +1214,17 @@ struct bpf_iter_aux_info { struct bpf_map *map; }; -typedef int (*bpf_iter_check_target_t)(struct bpf_prog *prog, - struct bpf_iter_aux_info *aux); +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux); +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); #define BPF_ITER_CTX_ARG_MAX 2 struct bpf_iter_reg { const char *target; - bpf_iter_check_target_t check_target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; u32 ctx_arg_info_size; - enum bpf_iter_link_info req_linfo; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; const struct bpf_iter_seq_info *seq_info; }; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b134e679e9db..0480f893facd 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key { __u32 attach_type; /* program attach type */ }; +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; +}; + /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, @@ -249,13 +255,6 @@ enum bpf_link_type { MAX_BPF_LINK_TYPE, }; -enum bpf_iter_link_info { - BPF_ITER_LINK_UNSPEC = 0, - BPF_ITER_LINK_MAP_FD = 1, - - MAX_BPF_ITER_LINK_INFO, -}; - /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. @@ -623,6 +622,8 @@ union bpf_attr { }; __u32 attach_type; /* attach type */ __u32 flags; /* extra flags */ + __aligned_u64 iter_info; /* extra bpf_iter_link_info */ + __u32 iter_info_len; /* iter_info length */ } link_create; struct { /* struct used by BPF_LINK_UPDATE command */ diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 363b9cafc2d8..b6715964b685 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -338,8 +338,8 @@ static void bpf_iter_link_release(struct bpf_link *link) struct bpf_iter_link *iter_link = container_of(link, struct bpf_iter_link, link); - if (iter_link->aux.map) - bpf_map_put_with_uref(iter_link->aux.map); + if (iter_link->tinfo->reg_info->detach_target) + iter_link->tinfo->reg_info->detach_target(&iter_link->aux); } static void bpf_iter_link_dealloc(struct bpf_link *link) @@ -390,15 +390,35 @@ bool bpf_link_is_iter(struct bpf_link *link) int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { + union bpf_iter_link_info __user *ulinfo; struct bpf_link_primer link_primer; struct bpf_iter_target_info *tinfo; - struct bpf_iter_aux_info aux = {}; + union bpf_iter_link_info linfo; struct bpf_iter_link *link; - u32 prog_btf_id, target_fd; + u32 prog_btf_id, linfo_len; bool existed = false; - struct bpf_map *map; int err; + if (attr->link_create.target_fd || attr->link_create.flags) + return -EINVAL; + + memset(&linfo, 0, sizeof(union bpf_iter_link_info)); + + ulinfo = u64_to_user_ptr(attr->link_create.iter_info); + linfo_len = attr->link_create.iter_info_len; + if (!ulinfo ^ !linfo_len) + return -EINVAL; + + if (ulinfo) { + err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo), + linfo_len); + if (err) + return err; + linfo_len = min_t(u32, linfo_len, sizeof(linfo)); + if (copy_from_user(&linfo, ulinfo, linfo_len)) + return -EFAULT; + } + prog_btf_id = prog->aux->attach_btf_id; mutex_lock(&targets_mutex); list_for_each_entry(tinfo, &targets, list) { @@ -411,13 +431,6 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) if (!existed) return -ENOENT; - /* Make sure user supplied flags are target expected. */ - target_fd = attr->link_create.target_fd; - if (attr->link_create.flags != tinfo->reg_info->req_linfo) - return -EINVAL; - if (!attr->link_create.flags && target_fd) - return -EINVAL; - link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); if (!link) return -ENOMEM; @@ -431,28 +444,15 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) return err; } - if (tinfo->reg_info->req_linfo == BPF_ITER_LINK_MAP_FD) { - map = bpf_map_get_with_uref(target_fd); - if (IS_ERR(map)) { - err = PTR_ERR(map); - goto cleanup_link; - } - - aux.map = map; - err = tinfo->reg_info->check_target(prog, &aux); + if (tinfo->reg_info->attach_target) { + err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux); if (err) { - bpf_map_put_with_uref(map); - goto cleanup_link; + bpf_link_cleanup(&link_primer); + return err; } - - link->aux.map = map; } return bpf_link_settle(&link_primer); - -cleanup_link: - bpf_link_cleanup(&link_primer); - return err; } static void init_seq_meta(struct bpf_iter_priv_data *priv_data, diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index fbe1f557cb88..af86048e5afd 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -98,12 +98,21 @@ static struct bpf_iter_reg bpf_map_reg_info = { .seq_info = &bpf_map_seq_info, }; -static int bpf_iter_check_map(struct bpf_prog *prog, - struct bpf_iter_aux_info *aux) +static int bpf_iter_attach_map(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux) { u32 key_acc_size, value_acc_size, key_size, value_size; - struct bpf_map *map = aux->map; + struct bpf_map *map; bool is_percpu = false; + int err = -EINVAL; + + if (!linfo->map.map_fd) + return -EBADF; + + map = bpf_map_get_with_uref(linfo->map.map_fd); + if (IS_ERR(map)) + return PTR_ERR(map); if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || @@ -112,7 +121,7 @@ static int bpf_iter_check_map(struct bpf_prog *prog, else if (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_LRU_HASH && map->map_type != BPF_MAP_TYPE_ARRAY) - return -EINVAL; + goto put_map; key_acc_size = prog->aux->max_rdonly_access; value_acc_size = prog->aux->max_rdwr_access; @@ -122,10 +131,22 @@ static int bpf_iter_check_map(struct bpf_prog *prog, else value_size = round_up(map->value_size, 8) * num_possible_cpus(); - if (key_acc_size > key_size || value_acc_size > value_size) - return -EACCES; + if (key_acc_size > key_size || value_acc_size > value_size) { + err = -EACCES; + goto put_map; + } + aux->map = map; return 0; + +put_map: + bpf_map_put_with_uref(map); + return err; +} + +static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux) +{ + bpf_map_put_with_uref(aux->map); } DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta, @@ -133,8 +154,8 @@ DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta, static const struct bpf_iter_reg bpf_map_elem_reg_info = { .target = "bpf_map_elem", - .check_target = bpf_iter_check_map, - .req_linfo = BPF_ITER_LINK_MAP_FD, + .attach_target = bpf_iter_attach_map, + .detach_target = bpf_iter_detach_map, .ctx_arg_info_size = 2, .ctx_arg_info = { { offsetof(struct bpf_iter__bpf_map_elem, key), diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 2f343ce15747..86299a292214 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3883,7 +3883,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog * return -EINVAL; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.flags +#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len static int link_create(union bpf_attr *attr) { enum bpf_prog_type ptype; diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index d3377c90a291..b988f48153a4 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -1384,18 +1384,39 @@ static int bpf_iter_init_sk_storage_map(void *priv_data, return 0; } -static int bpf_iter_check_map(struct bpf_prog *prog, - struct bpf_iter_aux_info *aux) +static int bpf_iter_attach_map(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux) { - struct bpf_map *map = aux->map; + struct bpf_map *map; + int err = -EINVAL; + + if (!linfo->map.map_fd) + return -EBADF; + + map = bpf_map_get_with_uref(linfo->map.map_fd); + if (IS_ERR(map)) + return PTR_ERR(map); if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) - return -EINVAL; + goto put_map; - if (prog->aux->max_rdonly_access > map->value_size) - return -EACCES; + if (prog->aux->max_rdonly_access > map->value_size) { + err = -EACCES; + goto put_map; + } + aux->map = map; return 0; + +put_map: + bpf_map_put_with_uref(map); + return err; +} + +static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux) +{ + bpf_map_put_with_uref(aux->map); } static const struct seq_operations bpf_sk_storage_map_seq_ops = { @@ -1414,8 +1435,8 @@ static const struct bpf_iter_seq_info iter_seq_info = { static struct bpf_iter_reg bpf_sk_storage_map_reg_info = { .target = "bpf_sk_storage_map", - .check_target = bpf_iter_check_map, - .req_linfo = BPF_ITER_LINK_MAP_FD, + .attach_target = bpf_iter_attach_map, + .detach_target = bpf_iter_detach_map, .ctx_arg_info_size = 2, .ctx_arg_info = { { offsetof(struct bpf_iter__bpf_sk_storage_map, sk), -- cgit v1.2.3 From d58669b093997e4e5f98c38a54f99761657c19d2 Mon Sep 17 00:00:00 2001 From: Akshu Agrawal Date: Fri, 31 Jul 2020 19:06:01 +0530 Subject: ACPI: APD: Change name from ST to FCH AMD SoC general pupose clk is present in new platforms with same MMIO mappings. We can reuse the same clk handler support for other platforms. Hence, changing name from ST(SoC) to FCH(IP) Signed-off-by: Akshu Agrawal Acked-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_apd.c | 14 +++++++------- drivers/clk/x86/clk-st.c | 4 ++-- include/linux/platform_data/clk-fch.h | 17 +++++++++++++++++ include/linux/platform_data/clk-st.h | 17 ----------------- 4 files changed, 26 insertions(+), 26 deletions(-) create mode 100644 include/linux/platform_data/clk-fch.h delete mode 100644 include/linux/platform_data/clk-st.h (limited to 'include') diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index f24f6d3f1fa5..36319deb24d9 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include #include #include @@ -79,11 +79,11 @@ static int misc_check_res(struct acpi_resource *ares, void *data) return !acpi_dev_resource_memory(ares, &res); } -static int st_misc_setup(struct apd_private_data *pdata) +static int fch_misc_setup(struct apd_private_data *pdata) { struct acpi_device *adev = pdata->adev; struct platform_device *clkdev; - struct st_clk_data *clk_data; + struct fch_clk_data *clk_data; struct resource_entry *rentry; struct list_head resource_list; int ret; @@ -106,7 +106,7 @@ static int st_misc_setup(struct apd_private_data *pdata) acpi_dev_free_resource_list(&resource_list); - clkdev = platform_device_register_data(&adev->dev, "clk-st", + clkdev = platform_device_register_data(&adev->dev, "clk-fch", PLATFORM_DEVID_NONE, clk_data, sizeof(*clk_data)); return PTR_ERR_OR_ZERO(clkdev); @@ -135,8 +135,8 @@ static const struct apd_device_desc cz_uart_desc = { .properties = uart_properties, }; -static const struct apd_device_desc st_misc_desc = { - .setup = st_misc_setup, +static const struct apd_device_desc fch_misc_desc = { + .setup = fch_misc_setup, }; #endif @@ -239,7 +239,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "AMD0020", APD_ADDR(cz_uart_desc) }, { "AMDI0020", APD_ADDR(cz_uart_desc) }, { "AMD0030", }, - { "AMD0040", APD_ADDR(st_misc_desc)}, + { "AMD0040", APD_ADDR(fch_misc_desc)}, { "HYGO0010", APD_ADDR(wt_i2c_desc) }, #endif #ifdef CONFIG_ARM64 diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index 25d4b97aff9b..c2438874d9f2 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include /* Clock Driving Strength 2 register */ @@ -31,7 +31,7 @@ static struct clk_hw *hws[ST_MAX_CLKS]; static int st_clk_probe(struct platform_device *pdev) { - struct st_clk_data *st_data; + struct fch_clk_data *st_data; st_data = dev_get_platdata(&pdev->dev); if (!st_data || !st_data->base) diff --git a/include/linux/platform_data/clk-fch.h b/include/linux/platform_data/clk-fch.h new file mode 100644 index 000000000000..850ca776156d --- /dev/null +++ b/include/linux/platform_data/clk-fch.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * clock framework for AMD misc clocks + * + * Copyright 2018 Advanced Micro Devices, Inc. + */ + +#ifndef __CLK_FCH_H +#define __CLK_FCH_H + +#include + +struct fch_clk_data { + void __iomem *base; +}; + +#endif /* __CLK_FCH_H */ diff --git a/include/linux/platform_data/clk-st.h b/include/linux/platform_data/clk-st.h deleted file mode 100644 index 7cdb6a402b35..000000000000 --- a/include/linux/platform_data/clk-st.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * clock framework for AMD Stoney based clock - * - * Copyright 2018 Advanced Micro Devices, Inc. - */ - -#ifndef __CLK_ST_H -#define __CLK_ST_H - -#include - -struct st_clk_data { - void __iomem *base; -}; - -#endif /* __CLK_ST_H */ -- cgit v1.2.3 From 7f8802f2d2ed67ffbac9264f946a52507f749e19 Mon Sep 17 00:00:00 2001 From: Akshu Agrawal Date: Fri, 31 Jul 2020 19:06:03 +0530 Subject: ACPI: APD: Add a fmw property is_raven Since there is slight difference in AMD RV based soc in misc clk architecture. The fmw property will help in differentiating the SoCs. Signed-off-by: Akshu Agrawal Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_apd.c | 4 ++++ include/linux/platform_data/clk-fch.h | 1 + 2 files changed, 5 insertions(+) (limited to 'include') diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 36319deb24d9..4c348377a39d 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -82,6 +82,7 @@ static int misc_check_res(struct acpi_resource *ares, void *data) static int fch_misc_setup(struct apd_private_data *pdata) { struct acpi_device *adev = pdata->adev; + const union acpi_object *obj; struct platform_device *clkdev; struct fch_clk_data *clk_data; struct resource_entry *rentry; @@ -98,6 +99,9 @@ static int fch_misc_setup(struct apd_private_data *pdata) if (ret < 0) return -ENOENT; + acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj); + clk_data->is_rv = obj->integer.value; + list_for_each_entry(rentry, &resource_list, node) { clk_data->base = devm_ioremap(&adev->dev, rentry->res->start, resource_size(rentry->res)); diff --git a/include/linux/platform_data/clk-fch.h b/include/linux/platform_data/clk-fch.h index 850ca776156d..b9f682459f08 100644 --- a/include/linux/platform_data/clk-fch.h +++ b/include/linux/platform_data/clk-fch.h @@ -12,6 +12,7 @@ struct fch_clk_data { void __iomem *base; + u32 is_rv; }; #endif /* __CLK_FCH_H */ -- cgit v1.2.3 From 529a781ee07aaa58be8164d75ba5998eb7dd216c Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Sat, 20 Jun 2020 10:54:27 +0800 Subject: jbd2: remove unused parameter in jbd2_journal_try_to_free_buffers() Parameter gfp_mask in jbd2_journal_try_to_free_buffers() is no longer used after commit <536fc240e7147> ("jbd2: clean up jbd2_journal_try_to_free_buffers()"), so just remove it. Signed-off-by: zhangyi (F) Link: https://lore.kernel.org/r/20200620025427.1756360-6-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o --- fs/ext4/inode.c | 2 +- fs/ext4/super.c | 4 ++-- fs/jbd2/transaction.c | 7 +------ include/linux/jbd2.h | 2 +- 4 files changed, 5 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 6187c8880c02..551a1056870d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3288,7 +3288,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait) if (PageChecked(page)) return 0; if (journal) - return jbd2_journal_try_to_free_buffers(journal, page, wait); + return jbd2_journal_try_to_free_buffers(journal, page); else return try_to_free_buffers(page); } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c77b10257b36..8c00f0a09f4d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1288,8 +1288,8 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, if (!page_has_buffers(page)) return 0; if (journal) - return jbd2_journal_try_to_free_buffers(journal, page, - wait & ~__GFP_DIRECT_RECLAIM); + return jbd2_journal_try_to_free_buffers(journal, page); + return try_to_free_buffers(page); } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 6250c9faa4cb..43985738aa86 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2081,10 +2081,6 @@ out: * int jbd2_journal_try_to_free_buffers() - try to free page buffers. * @journal: journal for operation * @page: to try and free - * @gfp_mask: we use the mask to detect how hard should we try to release - * buffers. If __GFP_DIRECT_RECLAIM and __GFP_FS is set, we wait for commit - * code to release the buffers. - * * * For all the buffers on this page, * if they are fully written out ordered data, move them onto BUF_CLEAN @@ -2115,8 +2111,7 @@ out: * * Return 0 on failure, 1 on success */ -int jbd2_journal_try_to_free_buffers(journal_t *journal, - struct page *page, gfp_t gfp_mask) +int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page) { struct buffer_head *head; struct buffer_head *bh; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index d56128df2aff..a756a4cdf939 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1380,7 +1380,7 @@ extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned int, unsigned int); -extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); +extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); extern int jbd2_journal_stop(handle_t *); extern int jbd2_journal_flush (journal_t *); extern void jbd2_journal_lock_updates (journal_t *); -- cgit v1.2.3 From ab74c7b23f3770935016e3eb3ecdf1e42b73efaa Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 15 Jul 2020 11:48:55 -0400 Subject: ext4: indicate via a block bitmap read is prefetched via a tracepoint Modify the ext4_read_block_bitmap_load tracepoint so that it tells us whether a block bitmap is being prefetched. Signed-off-by: Theodore Ts'o Reviewed-by: Artem Blagodarenko --- fs/ext4/balloc.c | 2 +- include/trace/events/ext4.h | 24 ++++++++++++++++++++---- 2 files changed, 21 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 1e2b1b4093aa..48c3df47748d 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -494,7 +494,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group, * submit the buffer_head for reading */ set_buffer_new(bh); - trace_ext4_read_block_bitmap_load(sb, block_group); + trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked); bh->b_end_io = ext4_end_bitmap_read; get_bh(bh); submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO | diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index cc41d692ae8e..cbcd2e1a608d 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -1312,18 +1312,34 @@ DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load, TP_ARGS(sb, group) ); -DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load, +DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap, TP_PROTO(struct super_block *sb, unsigned long group), TP_ARGS(sb, group) ); -DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap, +TRACE_EVENT(ext4_read_block_bitmap_load, + TP_PROTO(struct super_block *sb, unsigned long group, bool prefetch), - TP_PROTO(struct super_block *sb, unsigned long group), + TP_ARGS(sb, group, prefetch), - TP_ARGS(sb, group) + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + __field( bool, prefetch ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + __entry->prefetch = prefetch; + ), + + TP_printk("dev %d,%d group %u prefetch %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->group, __entry->prefetch) ); TRACE_EVENT(ext4_direct_IO_enter, -- cgit v1.2.3 From 3d392b2676bf3199863a1e5efb2c087ad9d442a4 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 17 Jul 2020 00:14:40 -0400 Subject: ext4: add prefetch_block_bitmaps mount option For file systems where we can afford to keep the buddy bitmaps cached, we can speed up initial writes to large file systems by starting to load the block allocation bitmaps as soon as the file system is mounted. This won't work well for _super_ large file systems, or memory constrained systems, so we only enable this when it is requested via a mount option. Addresses-Google-Bug: 159488342 Signed-off-by: Theodore Ts'o Reviewed-by: Andreas Dilger --- fs/ext4/ext4.h | 15 +++++++++++- fs/ext4/mballoc.c | 10 ++++---- fs/ext4/super.c | 57 ++++++++++++++++++++++++++++++++------------- include/trace/events/ext4.h | 44 ++++++++++++++++++++++++++++++++++ 4 files changed, 103 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 26ae31a994a2..e5f0c66a6156 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1188,6 +1188,7 @@ struct ext4_inode_info { #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ #define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */ +#define EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS 0x4000000 #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ @@ -2334,9 +2335,15 @@ struct ext4_lazy_init { struct mutex li_list_mtx; }; +enum ext4_li_mode { + EXT4_LI_MODE_PREFETCH_BBITMAP, + EXT4_LI_MODE_ITABLE, +}; + struct ext4_li_request { struct super_block *lr_super; - struct ext4_sb_info *lr_sbi; + enum ext4_li_mode lr_mode; + ext4_group_t lr_first_not_zeroed; ext4_group_t lr_next_group; struct list_head lr_request; unsigned long lr_next_sched; @@ -2676,6 +2683,12 @@ extern int ext4_mb_reserve_blocks(struct super_block *, int); extern void ext4_discard_preallocations(struct inode *); extern int __init ext4_init_mballoc(void); extern void ext4_exit_mballoc(void); +extern ext4_group_t ext4_mb_prefetch(struct super_block *sb, + ext4_group_t group, + unsigned int nr, int *cnt); +extern void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, + unsigned int nr); + extern void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, unsigned long count, int flags); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 9a07da53ab7b..8ecd49dd3906 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2232,9 +2232,8 @@ out: * Start prefetching @nr block bitmaps starting at @group. * Return the next group which needs to be prefetched. */ -static ext4_group_t -ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, - unsigned int nr, int *cnt) +ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, + unsigned int nr, int *cnt) { ext4_group_t ngroups = ext4_get_groups_count(sb); struct buffer_head *bh; @@ -2284,9 +2283,8 @@ ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, * waiting for the block allocation bitmap read to finish when * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). */ -static void -ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, - unsigned int nr) +void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, + unsigned int nr) { while (nr-- > 0) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8c00f0a09f4d..1cbe3f248faf 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1521,6 +1521,7 @@ enum { Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, + Opt_prefetch_block_bitmaps, }; static const match_table_t tokens = { @@ -1612,6 +1613,7 @@ static const match_table_t tokens = { {Opt_test_dummy_encryption, "test_dummy_encryption"}, {Opt_nombcache, "nombcache"}, {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */ + {Opt_prefetch_block_bitmaps, "prefetch_block_bitmaps"}, {Opt_removed, "check=none"}, /* mount option from ext2/3 */ {Opt_removed, "nocheck"}, /* mount option from ext2/3 */ {Opt_removed, "reservation"}, /* mount option from ext2/3 */ @@ -1829,6 +1831,8 @@ static const struct mount_opts { {Opt_max_dir_size_kb, 0, MOPT_GTE0}, {Opt_test_dummy_encryption, 0, MOPT_STRING}, {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, + {Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS, + MOPT_SET}, {Opt_err, 0, 0} }; @@ -3201,15 +3205,34 @@ static void print_daily_error_info(struct timer_list *t) static int ext4_run_li_request(struct ext4_li_request *elr) { struct ext4_group_desc *gdp = NULL; - ext4_group_t group, ngroups; - struct super_block *sb; + struct super_block *sb = elr->lr_super; + ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; + ext4_group_t group = elr->lr_next_group; unsigned long timeout = 0; + unsigned int prefetch_ios = 0; int ret = 0; - sb = elr->lr_super; - ngroups = EXT4_SB(sb)->s_groups_count; + if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) { + elr->lr_next_group = ext4_mb_prefetch(sb, group, + EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios); + if (prefetch_ios) + ext4_mb_prefetch_fini(sb, elr->lr_next_group, + prefetch_ios); + trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, + prefetch_ios); + if (group >= elr->lr_next_group) { + ret = 1; + if (elr->lr_first_not_zeroed != ngroups && + !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) { + elr->lr_next_group = elr->lr_first_not_zeroed; + elr->lr_mode = EXT4_LI_MODE_ITABLE; + ret = 0; + } + } + return ret; + } - for (group = elr->lr_next_group; group < ngroups; group++) { + for (; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) { ret = 1; @@ -3227,9 +3250,10 @@ static int ext4_run_li_request(struct ext4_li_request *elr) timeout = jiffies; ret = ext4_init_inode_table(sb, group, elr->lr_timeout ? 0 : 1); + trace_ext4_lazy_itable_init(sb, group); if (elr->lr_timeout == 0) { timeout = (jiffies - timeout) * - elr->lr_sbi->s_li_wait_mult; + EXT4_SB(elr->lr_super)->s_li_wait_mult; elr->lr_timeout = timeout; } elr->lr_next_sched = jiffies + elr->lr_timeout; @@ -3244,15 +3268,11 @@ static int ext4_run_li_request(struct ext4_li_request *elr) */ static void ext4_remove_li_request(struct ext4_li_request *elr) { - struct ext4_sb_info *sbi; - if (!elr) return; - sbi = elr->lr_sbi; - list_del(&elr->lr_request); - sbi->s_li_request = NULL; + EXT4_SB(elr->lr_super)->s_li_request = NULL; kfree(elr); } @@ -3461,7 +3481,6 @@ static int ext4_li_info_new(void) static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, ext4_group_t start) { - struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr; elr = kzalloc(sizeof(*elr), GFP_KERNEL); @@ -3469,8 +3488,13 @@ static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, return NULL; elr->lr_super = sb; - elr->lr_sbi = sbi; - elr->lr_next_group = start; + elr->lr_first_not_zeroed = start; + if (test_opt(sb, PREFETCH_BLOCK_BITMAPS)) + elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP; + else { + elr->lr_mode = EXT4_LI_MODE_ITABLE; + elr->lr_next_group = start; + } /* * Randomize first schedule time of the request to @@ -3500,8 +3524,9 @@ int ext4_register_li_request(struct super_block *sb, goto out; } - if (first_not_zeroed == ngroups || sb_rdonly(sb) || - !test_opt(sb, INIT_INODE_TABLE)) + if (!test_opt(sb, PREFETCH_BLOCK_BITMAPS) && + (first_not_zeroed == ngroups || sb_rdonly(sb) || + !test_opt(sb, INIT_INODE_TABLE))) goto out; elr = ext4_li_request_new(sb, first_not_zeroed); diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index cbcd2e1a608d..8008d2e116b9 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -2742,6 +2742,50 @@ TRACE_EVENT(ext4_error, __entry->function, __entry->line) ); +TRACE_EVENT(ext4_prefetch_bitmaps, + TP_PROTO(struct super_block *sb, ext4_group_t group, + ext4_group_t next, unsigned int prefetch_ios), + + TP_ARGS(sb, group, next, prefetch_ios), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + __field( __u32, next ) + __field( __u32, ios ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + __entry->next = next; + __entry->ios = prefetch_ios; + ), + + TP_printk("dev %d,%d group %u next %u ios %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->group, __entry->next, __entry->ios) +); + +TRACE_EVENT(ext4_lazy_itable_init, + TP_PROTO(struct super_block *sb, ext4_group_t group), + + TP_ARGS(sb, group), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + ), + + TP_printk("dev %d,%d group %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group) +); + #endif /* _TRACE_EXT4_H */ /* This part must be outside protection */ -- cgit v1.2.3 From c1a06df6ebf6ca98fb7a672fe447c7469d6c1968 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Thu, 6 Aug 2020 23:17:09 -0700 Subject: mm/migrate: fix migrate_pgmap_owner w/o CONFIG_MMU_NOTIFIER On x86_64, when CONFIG_MMU_NOTIFIER is not set/enabled, there is a compiler error: mm/migrate.c: In function 'migrate_vma_collect': mm/migrate.c:2481:7: error: 'struct mmu_notifier_range' has no member named 'migrate_pgmap_owner' range.migrate_pgmap_owner = migrate->pgmap_owner; ^ Fixes: 998427b3ad2c ("mm/notifier: add migration invalidation type") Reported-by: Randy Dunlap Signed-off-by: Ralph Campbell Signed-off-by: Andrew Morton Tested-by: Randy Dunlap Acked-by: Randy Dunlap Cc: Jerome Glisse Cc: John Hubbard Cc: Christoph Hellwig Cc: "Jason Gunthorpe" Link: http://lkml.kernel.org/r/20200806193353.7124-1-rcampbell@nvidia.com Signed-off-by: Linus Torvalds --- include/linux/mmu_notifier.h | 13 +++++++++++++ mm/migrate.c | 6 +++--- 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index c6f0708195cd..b8200782dede 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -521,6 +521,16 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, range->flags = flags; } +static inline void mmu_notifier_range_init_migrate( + struct mmu_notifier_range *range, unsigned int flags, + struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long start, unsigned long end, void *pgmap) +{ + mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm, + start, end); + range->migrate_pgmap_owner = pgmap; +} + #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ ({ \ int __young; \ @@ -645,6 +655,9 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range, #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \ _mmu_notifier_range_init(range, start, end) +#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \ + pgmap) \ + _mmu_notifier_range_init(range, start, end) static inline bool mmu_notifier_range_blockable(const struct mmu_notifier_range *range) diff --git a/mm/migrate.c b/mm/migrate.c index 4fcc465736ff..d179657f8685 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2386,9 +2386,9 @@ static void migrate_vma_collect(struct migrate_vma *migrate) * that the registered device driver can skip invalidating device * private page mappings that won't be migrated. */ - mmu_notifier_range_init(&range, MMU_NOTIFY_MIGRATE, 0, migrate->vma, - migrate->vma->vm_mm, migrate->start, migrate->end); - range.migrate_pgmap_owner = migrate->pgmap_owner; + mmu_notifier_range_init_migrate(&range, 0, migrate->vma, + migrate->vma->vm_mm, migrate->start, migrate->end, + migrate->pgmap_owner); mmu_notifier_invalidate_range_start(&range); walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, -- cgit v1.2.3 From 453431a54934d917153c65211b2dabf45562ca88 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 6 Aug 2020 23:18:13 -0700 Subject: mm, treewide: rename kzfree() to kfree_sensitive() As said by Linus: A symmetric naming is only helpful if it implies symmetries in use. Otherwise it's actively misleading. In "kzalloc()", the z is meaningful and an important part of what the caller wants. In "kzfree()", the z is actively detrimental, because maybe in the future we really _might_ want to use that "memfill(0xdeadbeef)" or something. The "zero" part of the interface isn't even _relevant_. The main reason that kzfree() exists is to clear sensitive information that should not be leaked to other future users of the same memory objects. Rename kzfree() to kfree_sensitive() to follow the example of the recently added kvfree_sensitive() and make the intention of the API more explicit. In addition, memzero_explicit() is used to clear the memory to make sure that it won't get optimized away by the compiler. The renaming is done by using the command sequence: git grep -w --name-only kzfree |\ xargs sed -i 's/kzfree/kfree_sensitive/' followed by some editing of the kfree_sensitive() kerneldoc and adding a kzfree backward compatibility macro in slab.h. [akpm@linux-foundation.org: fs/crypto/inline_crypt.c needs linux/slab.h] [akpm@linux-foundation.org: fix fs/crypto/inline_crypt.c some more] Suggested-by: Joe Perches Signed-off-by: Waiman Long Signed-off-by: Andrew Morton Acked-by: David Howells Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: Jarkko Sakkinen Cc: James Morris Cc: "Serge E. Hallyn" Cc: Joe Perches Cc: Matthew Wilcox Cc: David Rientjes Cc: Dan Carpenter Cc: "Jason A . Donenfeld" Link: http://lkml.kernel.org/r/20200616154311.12314-3-longman@redhat.com Signed-off-by: Linus Torvalds --- arch/s390/crypto/prng.c | 4 +-- arch/x86/power/hibernate.c | 2 +- crypto/adiantum.c | 2 +- crypto/ahash.c | 4 +-- crypto/api.c | 2 +- crypto/asymmetric_keys/verify_pefile.c | 4 +-- crypto/deflate.c | 2 +- crypto/drbg.c | 10 +++---- crypto/ecc.c | 8 ++--- crypto/ecdh.c | 2 +- crypto/gcm.c | 2 +- crypto/gf128mul.c | 4 +-- crypto/jitterentropy-kcapi.c | 2 +- crypto/rng.c | 2 +- crypto/rsa-pkcs1pad.c | 6 ++-- crypto/seqiv.c | 2 +- crypto/shash.c | 2 +- crypto/skcipher.c | 2 +- crypto/testmgr.c | 6 ++-- crypto/zstd.c | 2 +- .../crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c | 2 +- .../crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c | 2 +- drivers/crypto/amlogic/amlogic-gxl-cipher.c | 4 +-- drivers/crypto/atmel-ecc.c | 2 +- drivers/crypto/caam/caampkc.c | 28 +++++++++--------- drivers/crypto/cavium/cpt/cptvf_main.c | 6 ++-- drivers/crypto/cavium/cpt/cptvf_reqmanager.c | 12 ++++---- drivers/crypto/cavium/nitrox/nitrox_lib.c | 4 +-- drivers/crypto/cavium/zip/zip_crypto.c | 6 ++-- drivers/crypto/ccp/ccp-crypto-rsa.c | 6 ++-- drivers/crypto/ccree/cc_aead.c | 4 +-- drivers/crypto/ccree/cc_buffer_mgr.c | 4 +-- drivers/crypto/ccree/cc_cipher.c | 6 ++-- drivers/crypto/ccree/cc_hash.c | 8 ++--- drivers/crypto/ccree/cc_request_mgr.c | 2 +- drivers/crypto/marvell/cesa/hash.c | 2 +- drivers/crypto/marvell/octeontx/otx_cptvf_main.c | 6 ++-- drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h | 2 +- drivers/crypto/nx/nx.c | 4 +-- drivers/crypto/virtio/virtio_crypto_algs.c | 12 ++++---- drivers/crypto/virtio/virtio_crypto_core.c | 2 +- drivers/md/dm-crypt.c | 32 ++++++++++---------- drivers/md/dm-integrity.c | 6 ++-- drivers/misc/ibmvmc.c | 6 ++-- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 6 ++-- drivers/net/ppp/ppp_mppe.c | 6 ++-- drivers/net/wireguard/noise.c | 4 +-- drivers/net/wireguard/peer.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 6 ++-- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 6 ++-- drivers/net/wireless/intersil/orinoco/wext.c | 4 +-- drivers/s390/crypto/ap_bus.h | 4 +-- drivers/staging/ks7010/ks_hostif.c | 2 +- drivers/staging/rtl8723bs/core/rtw_security.c | 2 +- drivers/staging/wlan-ng/p80211netdev.c | 2 +- drivers/target/iscsi/iscsi_target_auth.c | 2 +- fs/cifs/cifsencrypt.c | 2 +- fs/cifs/connect.c | 10 +++---- fs/cifs/dfs_cache.c | 2 +- fs/cifs/misc.c | 8 ++--- fs/crypto/inline_crypt.c | 5 ++-- fs/crypto/keyring.c | 6 ++-- fs/crypto/keysetup_v1.c | 4 +-- fs/ecryptfs/keystore.c | 4 +-- fs/ecryptfs/messaging.c | 2 +- include/crypto/aead.h | 2 +- include/crypto/akcipher.h | 2 +- include/crypto/gf128mul.h | 2 +- include/crypto/hash.h | 2 +- include/crypto/internal/acompress.h | 2 +- include/crypto/kpp.h | 2 +- include/crypto/skcipher.h | 2 +- include/linux/slab.h | 4 ++- lib/mpi/mpiutil.c | 6 ++-- lib/test_kasan.c | 6 ++-- mm/slab_common.c | 8 ++--- net/atm/mpoa_caches.c | 4 +-- net/bluetooth/ecdh_helper.c | 6 ++-- net/bluetooth/smp.c | 24 +++++++-------- net/core/sock.c | 2 +- net/ipv4/tcp_fastopen.c | 2 +- net/mac80211/aead_api.c | 4 +-- net/mac80211/aes_gmac.c | 2 +- net/mac80211/key.c | 2 +- net/mac802154/llsec.c | 20 ++++++------- net/sctp/auth.c | 2 +- net/sunrpc/auth_gss/gss_krb5_crypto.c | 4 +-- net/sunrpc/auth_gss/gss_krb5_keys.c | 6 ++-- net/sunrpc/auth_gss/gss_krb5_mech.c | 2 +- net/tipc/crypto.c | 10 +++---- net/wireless/core.c | 2 +- net/wireless/ibss.c | 4 +-- net/wireless/lib80211_crypt_tkip.c | 2 +- net/wireless/lib80211_crypt_wep.c | 2 +- net/wireless/nl80211.c | 24 +++++++-------- net/wireless/sme.c | 6 ++-- net/wireless/util.c | 2 +- net/wireless/wext-sme.c | 2 +- scripts/coccinelle/free/devm_free.cocci | 4 +-- scripts/coccinelle/free/ifnullfree.cocci | 4 +-- scripts/coccinelle/free/kfree.cocci | 6 ++-- scripts/coccinelle/free/kfreeaddr.cocci | 2 +- security/apparmor/domain.c | 4 +-- security/apparmor/include/file.h | 2 +- security/apparmor/policy.c | 24 +++++++-------- security/apparmor/policy_ns.c | 6 ++-- security/apparmor/policy_unpack.c | 14 ++++----- security/keys/big_key.c | 6 ++-- security/keys/dh.c | 14 ++++----- security/keys/encrypted-keys/encrypted.c | 14 ++++----- security/keys/trusted-keys/trusted_tpm1.c | 34 +++++++++++----------- security/keys/user_defined.c | 6 ++-- 114 files changed, 323 insertions(+), 320 deletions(-) (limited to 'include') diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index e1ae23911ccd..5057773f82e9 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -249,7 +249,7 @@ static void prng_tdes_deinstantiate(void) { pr_debug("The prng module stopped " "after running in triple DES mode\n"); - kzfree(prng_data); + kfree_sensitive(prng_data); } @@ -442,7 +442,7 @@ outfree: static void prng_sha512_deinstantiate(void) { pr_debug("The prng module stopped after running in SHA-512 mode\n"); - kzfree(prng_data); + kfree_sensitive(prng_data); } diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c index d147f1b2c925..cd3914fc9f3d 100644 --- a/arch/x86/power/hibernate.c +++ b/arch/x86/power/hibernate.c @@ -98,7 +98,7 @@ static int get_e820_md5(struct e820_table *table, void *buf) if (crypto_shash_digest(desc, (u8 *)table, size, buf)) ret = -EINVAL; - kzfree(desc); + kfree_sensitive(desc); free_tfm: crypto_free_shash(tfm); diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 7fbdc3270984..ce4d5725342c 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -177,7 +177,7 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key, keyp += NHPOLY1305_KEY_SIZE; WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]); out: - kzfree(data); + kfree_sensitive(data); return err; } diff --git a/crypto/ahash.c b/crypto/ahash.c index 68a0f0cb75c4..d9d65d1cc669 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -183,7 +183,7 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = tfm->setkey(tfm, alignbuffer, keylen); - kzfree(buffer); + kfree_sensitive(buffer); return ret; } @@ -302,7 +302,7 @@ static void ahash_restore_req(struct ahash_request *req, int err) req->priv = NULL; /* Free the req->priv.priv from the ADJUSTED request. */ - kzfree(priv); + kfree_sensitive(priv); } static void ahash_notify_einprogress(struct ahash_request *req) diff --git a/crypto/api.c b/crypto/api.c index 5d8fe60b36c1..ed08cbd5b9d3 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -571,7 +571,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) alg->cra_exit(tfm); crypto_exit_ops(tfm); crypto_mod_put(alg); - kzfree(mem); + kfree_sensitive(mem); } EXPORT_SYMBOL_GPL(crypto_destroy_tfm); diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c index cc9dbcecaaca..7553ab18db89 100644 --- a/crypto/asymmetric_keys/verify_pefile.c +++ b/crypto/asymmetric_keys/verify_pefile.c @@ -376,7 +376,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, } error: - kzfree(desc); + kfree_sensitive(desc); error_no_desc: crypto_free_shash(tfm); kleave(" = %d", ret); @@ -447,6 +447,6 @@ int verify_pefile_signature(const void *pebuf, unsigned pelen, ret = pefile_digest_pe(pebuf, pelen, &ctx); error: - kzfree(ctx.digest); + kfree_sensitive(ctx.digest); return ret; } diff --git a/crypto/deflate.c b/crypto/deflate.c index 4c0e6c9d942a..b2a46f6dc961 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -163,7 +163,7 @@ static void __deflate_exit(void *ctx) static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) { __deflate_exit(ctx); - kzfree(ctx); + kfree_sensitive(ctx); } static void deflate_exit(struct crypto_tfm *tfm) diff --git a/crypto/drbg.c b/crypto/drbg.c index 8d80d93cab97..e99fe34cfa00 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1218,19 +1218,19 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) { if (!drbg) return; - kzfree(drbg->Vbuf); + kfree_sensitive(drbg->Vbuf); drbg->Vbuf = NULL; drbg->V = NULL; - kzfree(drbg->Cbuf); + kfree_sensitive(drbg->Cbuf); drbg->Cbuf = NULL; drbg->C = NULL; - kzfree(drbg->scratchpadbuf); + kfree_sensitive(drbg->scratchpadbuf); drbg->scratchpadbuf = NULL; drbg->reseed_ctr = 0; drbg->d_ops = NULL; drbg->core = NULL; if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { - kzfree(drbg->prev); + kfree_sensitive(drbg->prev); drbg->prev = NULL; drbg->fips_primed = false; } @@ -1701,7 +1701,7 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg) struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; if (sdesc) { crypto_free_shash(sdesc->shash.tfm); - kzfree(sdesc); + kfree_sensitive(sdesc); } drbg->priv_data = NULL; return 0; diff --git a/crypto/ecc.c b/crypto/ecc.c index 8acf8433ca29..c80aa25994a0 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -67,7 +67,7 @@ static u64 *ecc_alloc_digits_space(unsigned int ndigits) static void ecc_free_digits_space(u64 *space) { - kzfree(space); + kfree_sensitive(space); } static struct ecc_point *ecc_alloc_point(unsigned int ndigits) @@ -101,9 +101,9 @@ static void ecc_free_point(struct ecc_point *p) if (!p) return; - kzfree(p->x); - kzfree(p->y); - kzfree(p); + kfree_sensitive(p->x); + kfree_sensitive(p->y); + kfree_sensitive(p); } static void vli_clear(u64 *vli, unsigned int ndigits) diff --git a/crypto/ecdh.c b/crypto/ecdh.c index bd599053a8c4..b0232d6ab4ce 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -124,7 +124,7 @@ static int ecdh_compute_value(struct kpp_request *req) /* fall through */ free_all: - kzfree(shared_secret); + kfree_sensitive(shared_secret); free_pubkey: kfree(public_key); return ret; diff --git a/crypto/gcm.c b/crypto/gcm.c index 3a36a9533c96..338ee0769747 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -139,7 +139,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); out: - kzfree(data); + kfree_sensitive(data); return err; } diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index a4b1c026aaee..a69ae3e6c16c 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c @@ -304,8 +304,8 @@ void gf128mul_free_64k(struct gf128mul_64k *t) int i; for (i = 0; i < 16; i++) - kzfree(t->t[i]); - kzfree(t); + kfree_sensitive(t->t[i]); + kfree_sensitive(t); } EXPORT_SYMBOL(gf128mul_free_64k); diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index b43684c0dade..eb7d1dd506bf 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -57,7 +57,7 @@ void *jent_zalloc(unsigned int len) void jent_zfree(void *ptr) { - kzfree(ptr); + kfree_sensitive(ptr); } int jent_fips_enabled(void) diff --git a/crypto/rng.c b/crypto/rng.c index 1490d210f1a1..a888d84b524a 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -53,7 +53,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); crypto_stats_rng_seed(alg, err); out: - kzfree(buf); + kfree_sensitive(buf); return err; } EXPORT_SYMBOL_GPL(crypto_rng_reset); diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 4983b2b4a223..ddd3d10ffc15 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -199,7 +199,7 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, ctx->key_size), out_buf, ctx->key_size); - kzfree(out_buf); + kfree_sensitive(out_buf); out: req->dst_len = ctx->key_size; @@ -322,7 +322,7 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err) out_buf + pos, req->dst_len); done: - kzfree(req_ctx->out_buf); + kfree_sensitive(req_ctx->out_buf); return err; } @@ -500,7 +500,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) req->dst_len) != 0) err = -EKEYREJECTED; done: - kzfree(req_ctx->out_buf); + kfree_sensitive(req_ctx->out_buf); return err; } diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 23e22d8b63e6..0899d527c284 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -33,7 +33,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); out: - kzfree(subreq->iv); + kfree_sensitive(subreq->iv); } static void seqiv_aead_encrypt_complete(struct crypto_async_request *base, diff --git a/crypto/shash.c b/crypto/shash.c index e6a4b5f39b8c..2e3433ad9762 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -44,7 +44,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); err = shash->setkey(tfm, alignbuffer, keylen); - kzfree(buffer); + kfree_sensitive(buffer); return err; } diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 467af525848a..b4dae640de9f 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -592,7 +592,7 @@ static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cipher->setkey(tfm, alignbuffer, keylen); - kzfree(buffer); + kfree_sensitive(buffer); return ret; } diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 6863f911fcee..23c27fc96394 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1744,7 +1744,7 @@ out: kfree(vec.plaintext); kfree(vec.digest); crypto_free_shash(generic_tfm); - kzfree(generic_desc); + kfree_sensitive(generic_desc); return err; } #else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ @@ -3665,7 +3665,7 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr, if (IS_ERR(drng)) { printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " "%s\n", driver); - kzfree(buf); + kfree_sensitive(buf); return -ENOMEM; } @@ -3712,7 +3712,7 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr, outbuf: crypto_free_rng(drng); - kzfree(buf); + kfree_sensitive(buf); return ret; } diff --git a/crypto/zstd.c b/crypto/zstd.c index 5a3ff258d8f7..1a3309f066f7 100644 --- a/crypto/zstd.c +++ b/crypto/zstd.c @@ -137,7 +137,7 @@ static void __zstd_exit(void *ctx) static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx) { __zstd_exit(ctx); - kzfree(ctx); + kfree_sensitive(ctx); } static void zstd_exit(struct crypto_tfm *tfm) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 1e4f9a58bb24..b4d5fea27d20 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -254,7 +254,7 @@ theend_iv: offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { memcpy(areq->iv, backup_iv, ivsize); - kzfree(backup_iv); + kfree_sensitive(backup_iv); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 7a131675a41c..7b39b4495571 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -249,7 +249,7 @@ theend_iv: if (rctx->op_dir & SS_DECRYPTION) { memcpy(areq->iv, backup_iv, ivsize); memzero_explicit(backup_iv, ivsize); - kzfree(backup_iv); + kfree_sensitive(backup_iv); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index 5880b94dcb32..d93210726697 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -252,8 +252,8 @@ static int meson_cipher(struct skcipher_request *areq) } } theend: - kzfree(bkeyiv); - kzfree(backup_iv); + kfree_sensitive(bkeyiv); + kfree_sensitive(backup_iv); return err; } diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index ff02cc05affb..9bd8e5167be3 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -69,7 +69,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, /* fall through */ free_work_data: - kzfree(work_data); + kfree_sensitive(work_data); kpp_request_complete(req, status); } diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 2e44d685618f..dd5f101e43f8 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -854,14 +854,14 @@ static int caam_rsa_dec(struct akcipher_request *req) static void caam_rsa_free_key(struct caam_rsa_key *key) { - kzfree(key->d); - kzfree(key->p); - kzfree(key->q); - kzfree(key->dp); - kzfree(key->dq); - kzfree(key->qinv); - kzfree(key->tmp1); - kzfree(key->tmp2); + kfree_sensitive(key->d); + kfree_sensitive(key->p); + kfree_sensitive(key->q); + kfree_sensitive(key->dp); + kfree_sensitive(key->dq); + kfree_sensitive(key->qinv); + kfree_sensitive(key->tmp1); + kfree_sensitive(key->tmp2); kfree(key->e); kfree(key->n); memset(key, 0, sizeof(*key)); @@ -1018,17 +1018,17 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, return; free_dq: - kzfree(rsa_key->dq); + kfree_sensitive(rsa_key->dq); free_dp: - kzfree(rsa_key->dp); + kfree_sensitive(rsa_key->dp); free_tmp2: - kzfree(rsa_key->tmp2); + kfree_sensitive(rsa_key->tmp2); free_tmp1: - kzfree(rsa_key->tmp1); + kfree_sensitive(rsa_key->tmp1); free_q: - kzfree(rsa_key->q); + kfree_sensitive(rsa_key->q); free_p: - kzfree(rsa_key->p); + kfree_sensitive(rsa_key->p); } static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c index 0f72e9abdefe..a15245992cf9 100644 --- a/drivers/crypto/cavium/cpt/cptvf_main.c +++ b/drivers/crypto/cavium/cpt/cptvf_main.c @@ -74,7 +74,7 @@ static void cleanup_worker_threads(struct cpt_vf *cptvf) for (i = 0; i < cptvf->nr_queues; i++) tasklet_kill(&cwqe_info->vq_wqe[i].twork); - kzfree(cwqe_info); + kfree_sensitive(cwqe_info); cptvf->wqe_info = NULL; } @@ -88,7 +88,7 @@ static void free_pending_queues(struct pending_qinfo *pqinfo) continue; /* free single queue */ - kzfree((queue->head)); + kfree_sensitive((queue->head)); queue->front = 0; queue->rear = 0; @@ -189,7 +189,7 @@ static void free_command_queues(struct cpt_vf *cptvf, chunk->head = NULL; chunk->dma_addr = 0; hlist_del(&chunk->nextchunk); - kzfree(chunk); + kfree_sensitive(chunk); } queue->nchunks = 0; diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 3878b01e19e1..dc5fda522719 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -305,12 +305,12 @@ static void do_request_cleanup(struct cpt_vf *cptvf, } } - kzfree(info->scatter_components); - kzfree(info->gather_components); - kzfree(info->out_buffer); - kzfree(info->in_buffer); - kzfree((void *)info->completion_addr); - kzfree(info); + kfree_sensitive(info->scatter_components); + kfree_sensitive(info->gather_components); + kfree_sensitive(info->out_buffer); + kfree_sensitive(info->in_buffer); + kfree_sensitive((void *)info->completion_addr); + kfree_sensitive(info); } static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info) diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index 5cbc64b851b9..a5cdc2b48bd6 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -90,7 +90,7 @@ static void nitrox_free_aqm_queues(struct nitrox_device *ndev) for (i = 0; i < ndev->nr_queues; i++) { nitrox_cmdq_cleanup(ndev->aqmq[i]); - kzfree(ndev->aqmq[i]); + kfree_sensitive(ndev->aqmq[i]); ndev->aqmq[i] = NULL; } } @@ -122,7 +122,7 @@ static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev) err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES); if (err) { - kzfree(cmdq); + kfree_sensitive(cmdq); goto aqmq_fail; } ndev->aqmq[i] = cmdq; diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c index 4985bc812b0e..7df71fcebe8f 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ b/drivers/crypto/cavium/zip/zip_crypto.c @@ -260,7 +260,7 @@ void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm) ret = zip_ctx_init(zip_ctx, 0); if (ret) { - kzfree(zip_ctx); + kfree_sensitive(zip_ctx); return ERR_PTR(ret); } @@ -279,7 +279,7 @@ void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm) ret = zip_ctx_init(zip_ctx, 1); if (ret) { - kzfree(zip_ctx); + kfree_sensitive(zip_ctx); return ERR_PTR(ret); } @@ -291,7 +291,7 @@ void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx) struct zip_kernel_ctx *zip_ctx = ctx; zip_ctx_exit(zip_ctx); - kzfree(zip_ctx); + kfree_sensitive(zip_ctx); } int zip_scomp_compress(struct crypto_scomp *tfm, diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c index 649c91d60401..1223ac70aea2 100644 --- a/drivers/crypto/ccp/ccp-crypto-rsa.c +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c @@ -112,13 +112,13 @@ static int ccp_check_key_length(unsigned int len) static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx) { /* Clean up old key data */ - kzfree(ctx->u.rsa.e_buf); + kfree_sensitive(ctx->u.rsa.e_buf); ctx->u.rsa.e_buf = NULL; ctx->u.rsa.e_len = 0; - kzfree(ctx->u.rsa.n_buf); + kfree_sensitive(ctx->u.rsa.n_buf); ctx->u.rsa.n_buf = NULL; ctx->u.rsa.n_len = 0; - kzfree(ctx->u.rsa.d_buf); + kfree_sensitive(ctx->u.rsa.d_buf); ctx->u.rsa.d_buf = NULL; ctx->u.rsa.d_len = 0; } diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 1cf51edbc4b9..35794c7271fb 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -448,7 +448,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey, if (dma_mapping_error(dev, key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", key, keylen); - kzfree(key); + kfree_sensitive(key); return -ENOMEM; } if (keylen > blocksize) { @@ -533,7 +533,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey, if (key_dma_addr) dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE); - kzfree(key); + kfree_sensitive(key); return rc; } diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index b2bd093e7013..a5e041d9d2cf 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -488,7 +488,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) if (areq_ctx->gen_ctx.iv_dma_addr) { dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size, DMA_BIDIRECTIONAL); - kzfree(areq_ctx->gen_ctx.iv); + kfree_sensitive(areq_ctx->gen_ctx.iv); } /* Release pool */ @@ -559,7 +559,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata, if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", hw_iv_size, req->iv); - kzfree(areq_ctx->gen_ctx.iv); + kfree_sensitive(areq_ctx->gen_ctx.iv); areq_ctx->gen_ctx.iv = NULL; rc = -ENOMEM; goto chain_iv_exit; diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 076669dc1035..d77ae981b64b 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -257,7 +257,7 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) &ctx_p->user.key_dma_addr); /* Free key buffer in context */ - kzfree(ctx_p->user.key); + kfree_sensitive(ctx_p->user.key); dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key); } @@ -881,7 +881,7 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err) /* Not a BACKLOG notification */ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); memcpy(req->iv, req_ctx->iv, ivsize); - kzfree(req_ctx->iv); + kfree_sensitive(req_ctx->iv); } skcipher_request_complete(req, err); @@ -994,7 +994,7 @@ static int cc_cipher_process(struct skcipher_request *req, exit_process: if (rc != -EINPROGRESS && rc != -EBUSY) { - kzfree(req_ctx->iv); + kfree_sensitive(req_ctx->iv); } return rc; diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index d5310783af15..683c9a430e11 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -764,7 +764,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", ctx->key_params.key, keylen); - kzfree(ctx->key_params.key); + kfree_sensitive(ctx->key_params.key); return -ENOMEM; } dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", @@ -913,7 +913,7 @@ out: &ctx->key_params.key_dma_addr, ctx->key_params.keylen); } - kzfree(ctx->key_params.key); + kfree_sensitive(ctx->key_params.key); return rc; } @@ -950,7 +950,7 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", key, keylen); - kzfree(ctx->key_params.key); + kfree_sensitive(ctx->key_params.key); return -ENOMEM; } dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", @@ -999,7 +999,7 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", &ctx->key_params.key_dma_addr, ctx->key_params.keylen); - kzfree(ctx->key_params.key); + kfree_sensitive(ctx->key_params.key); return rc; } diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c index 1d7649ecf44e..33fb27745d52 100644 --- a/drivers/crypto/ccree/cc_request_mgr.c +++ b/drivers/crypto/ccree/cc_request_mgr.c @@ -107,7 +107,7 @@ void cc_req_mgr_fini(struct cc_drvdata *drvdata) /* Kill tasklet */ tasklet_kill(&req_mgr_h->comptask); #endif - kzfree(req_mgr_h); + kfree_sensitive(req_mgr_h); drvdata->request_mgr_handle = NULL; } diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index bd0bd9ffd6e9..f2a2fc111164 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -1157,7 +1157,7 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req, } /* Set the memory region to 0 to avoid any leak. */ - kzfree(keydup); + kfree_sensitive(keydup); if (ret) return ret; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c index ce3168327a39..228fe8e47e0e 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c @@ -68,7 +68,7 @@ static void cleanup_worker_threads(struct otx_cptvf *cptvf) for (i = 0; i < cptvf->num_queues; i++) tasklet_kill(&cwqe_info->vq_wqe[i].twork); - kzfree(cwqe_info); + kfree_sensitive(cwqe_info); cptvf->wqe_info = NULL; } @@ -82,7 +82,7 @@ static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo) continue; /* free single queue */ - kzfree((queue->head)); + kfree_sensitive((queue->head)); queue->front = 0; queue->rear = 0; queue->qlen = 0; @@ -176,7 +176,7 @@ static void free_command_queues(struct otx_cptvf *cptvf, chunk->head = NULL; chunk->dma_addr = 0; list_del(&chunk->nextchunk); - kzfree(chunk); + kfree_sensitive(chunk); } queue->num_chunks = 0; queue->idx = 0; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h index d912fe0c532d..a02d059fb652 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h @@ -215,7 +215,7 @@ static inline void do_request_cleanup(struct pci_dev *pdev, DMA_BIDIRECTIONAL); } } - kzfree(info); + kfree_sensitive(info); } struct otx_cptvf_wqe; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index f03c238f5a31..40882d6d52c1 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -746,7 +746,7 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); - kzfree(nx_ctx->kmem); + kfree_sensitive(nx_ctx->kmem); nx_ctx->csbcpb = NULL; nx_ctx->csbcpb_aead = NULL; nx_ctx->in_sg = NULL; @@ -762,7 +762,7 @@ void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); - kzfree(nx_ctx->kmem); + kfree_sensitive(nx_ctx->kmem); } static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index b2601958282e..583c0b535d13 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -167,7 +167,7 @@ static int virtio_crypto_alg_skcipher_init_session( num_in, vcrypto, GFP_ATOMIC); if (err < 0) { spin_unlock(&vcrypto->ctrl_lock); - kzfree(cipher_key); + kfree_sensitive(cipher_key); return err; } virtqueue_kick(vcrypto->ctrl_vq); @@ -184,7 +184,7 @@ static int virtio_crypto_alg_skcipher_init_session( spin_unlock(&vcrypto->ctrl_lock); pr_err("virtio_crypto: Create session failed status: %u\n", le32_to_cpu(vcrypto->input.status)); - kzfree(cipher_key); + kfree_sensitive(cipher_key); return -EINVAL; } @@ -197,7 +197,7 @@ static int virtio_crypto_alg_skcipher_init_session( spin_unlock(&vcrypto->ctrl_lock); - kzfree(cipher_key); + kfree_sensitive(cipher_key); return 0; } @@ -472,9 +472,9 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, return 0; free_iv: - kzfree(iv); + kfree_sensitive(iv); free: - kzfree(req_data); + kfree_sensitive(req_data); kfree(sgs); return err; } @@ -583,7 +583,7 @@ static void virtio_crypto_skcipher_finalize_req( scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0); - kzfree(vc_sym_req->iv); + kfree_sensitive(vc_sym_req->iv); virtcrypto_clear_request(&vc_sym_req->base); crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine, diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 77e744eaedd0..0c66d6193ca2 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -17,7 +17,7 @@ void virtcrypto_clear_request(struct virtio_crypto_request *vc_req) { if (vc_req) { - kzfree(vc_req->req_data); + kfree_sensitive(vc_req->req_data); kfree(vc_req->sgs); } } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index b437a14c4942..37dcc52cf21d 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -407,7 +407,7 @@ static void crypt_iv_lmk_dtr(struct crypt_config *cc) crypto_free_shash(lmk->hash_tfm); lmk->hash_tfm = NULL; - kzfree(lmk->seed); + kfree_sensitive(lmk->seed); lmk->seed = NULL; } @@ -558,9 +558,9 @@ static void crypt_iv_tcw_dtr(struct crypt_config *cc) { struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; - kzfree(tcw->iv_seed); + kfree_sensitive(tcw->iv_seed); tcw->iv_seed = NULL; - kzfree(tcw->whitening); + kfree_sensitive(tcw->whitening); tcw->whitening = NULL; if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) @@ -994,8 +994,8 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d kunmap_atomic(data); out: - kzfree(ks); - kzfree(es); + kfree_sensitive(ks); + kfree_sensitive(es); skcipher_request_free(req); return r; } @@ -2294,7 +2294,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string key = request_key(type, key_desc + 1, NULL); if (IS_ERR(key)) { - kzfree(new_key_string); + kfree_sensitive(new_key_string); return PTR_ERR(key); } @@ -2304,7 +2304,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string if (ret < 0) { up_read(&key->sem); key_put(key); - kzfree(new_key_string); + kfree_sensitive(new_key_string); return ret; } @@ -2318,10 +2318,10 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string if (!ret) { set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - kzfree(cc->key_string); + kfree_sensitive(cc->key_string); cc->key_string = new_key_string; } else - kzfree(new_key_string); + kfree_sensitive(new_key_string); return ret; } @@ -2382,7 +2382,7 @@ static int crypt_set_key(struct crypt_config *cc, char *key) clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); /* wipe references to any kernel keyring key */ - kzfree(cc->key_string); + kfree_sensitive(cc->key_string); cc->key_string = NULL; /* Decode key from its hex representation. */ @@ -2414,7 +2414,7 @@ static int crypt_wipe_key(struct crypt_config *cc) return r; } - kzfree(cc->key_string); + kfree_sensitive(cc->key_string); cc->key_string = NULL; r = crypt_setkey(cc); memset(&cc->key, 0, cc->key_size * sizeof(u8)); @@ -2493,15 +2493,15 @@ static void crypt_dtr(struct dm_target *ti) if (cc->dev) dm_put_device(ti, cc->dev); - kzfree(cc->cipher_string); - kzfree(cc->key_string); - kzfree(cc->cipher_auth); - kzfree(cc->authenc_key); + kfree_sensitive(cc->cipher_string); + kfree_sensitive(cc->key_string); + kfree_sensitive(cc->cipher_auth); + kfree_sensitive(cc->authenc_key); mutex_destroy(&cc->bio_alloc_lock); /* Must zero key material before freeing */ - kzfree(cc); + kfree_sensitive(cc); spin_lock(&dm_crypt_clients_lock); WARN_ON(!dm_crypt_clients_n); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 5da3eb661e50..8c8d940e532e 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -3405,8 +3405,8 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int static void free_alg(struct alg_spec *a) { - kzfree(a->alg_string); - kzfree(a->key); + kfree_sensitive(a->alg_string); + kfree_sensitive(a->key); memset(a, 0, sizeof *a); } @@ -4337,7 +4337,7 @@ static void dm_integrity_dtr(struct dm_target *ti) for (i = 0; i < ic->journal_sections; i++) { struct skcipher_request *req = ic->sk_requests[i]; if (req) { - kzfree(req->iv); + kfree_sensitive(req->iv); skcipher_request_free(req); } } diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index c0d139c26505..2d778d0f011e 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -286,7 +286,7 @@ static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size, if (dma_mapping_error(&vdev->dev, *dma_handle)) { *dma_handle = 0; - kzfree(buffer); + kfree_sensitive(buffer); return NULL; } @@ -310,7 +310,7 @@ static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr, dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL); /* deallocate memory */ - kzfree(vaddr); + kfree_sensitive(vaddr); } /** @@ -883,7 +883,7 @@ static int ibmvmc_close(struct inode *inode, struct file *file) spin_unlock_irqrestore(&hmc->lock, flags); } - kzfree(session); + kfree_sensitive(session); return rc; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 0874ae47cb03..3ab6db2588d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -137,7 +137,7 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) while (chain) { chain_tmp = chain->next; - kzfree(chain); + kfree_sensitive(chain); chain = chain_tmp; } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 6516980965a2..eca73526ac86 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -960,9 +960,9 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return 0; err_aead: - kzfree(xs->aead); + kfree_sensitive(xs->aead); err_xs: - kzfree(xs); + kfree_sensitive(xs); err_out: msgbuf[1] = err; return err; @@ -1047,7 +1047,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ixgbe_ipsec_del_sa(xs); /* remove the xs that was made-up in the add request */ - kzfree(xs); + kfree_sensitive(xs); return 0; } diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index de3b57d09d0c..208f6e24f37c 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c @@ -222,7 +222,7 @@ out_free: kfree(state->sha1_digest); if (state->sha1) { crypto_free_shash(state->sha1->tfm); - kzfree(state->sha1); + kfree_sensitive(state->sha1); } kfree(state); out: @@ -238,8 +238,8 @@ static void mppe_free(void *arg) if (state) { kfree(state->sha1_digest); crypto_free_shash(state->sha1->tfm); - kzfree(state->sha1); - kzfree(state); + kfree_sensitive(state->sha1); + kfree_sensitive(state); } } diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 201a22681945..3dd3b76790d0 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -114,7 +114,7 @@ static struct noise_keypair *keypair_create(struct wg_peer *peer) static void keypair_free_rcu(struct rcu_head *rcu) { - kzfree(container_of(rcu, struct noise_keypair, rcu)); + kfree_sensitive(container_of(rcu, struct noise_keypair, rcu)); } static void keypair_free_kref(struct kref *kref) @@ -821,7 +821,7 @@ bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, handshake->entry.peer->device->index_hashtable, &handshake->entry, &new_keypair->entry); } else { - kzfree(new_keypair); + kfree_sensitive(new_keypair); } rcu_read_unlock_bh(); diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c index 1d634bd3038f..b3b6370e6b95 100644 --- a/drivers/net/wireguard/peer.c +++ b/drivers/net/wireguard/peer.c @@ -203,7 +203,7 @@ static void rcu_release(struct rcu_head *rcu) /* The final zeroing takes care of clearing any remaining handshake key * material and other potentially sensitive information. */ - kzfree(peer); + kfree_sensitive(peer); } static void kref_release(struct kref *refcount) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 24cb1b1f21f0..9463c108aa96 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1369,7 +1369,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, &rxcb, rxq->id); if (reclaim) { - kzfree(txq->entries[cmd_index].free_buf); + kfree_sensitive(txq->entries[cmd_index].free_buf); txq->entries[cmd_index].free_buf = NULL; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 7fc7542535d8..606bef2ecc7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1026,7 +1026,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) - kzfree(txq->entries[idx].free_buf); + kfree_sensitive(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); @@ -1257,8 +1257,8 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc array of command/tx buffers */ if (txq_id == trans->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { - kzfree(txq->entries[i].cmd); - kzfree(txq->entries[i].free_buf); + kfree_sensitive(txq->entries[i].cmd); + kfree_sensitive(txq->entries[i].free_buf); } del_timer_sync(&txq->stuck_timer); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 5c6c3fa0d29f..eb396c06b7fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -721,8 +721,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc array of command/tx buffers */ if (txq_id == trans->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { - kzfree(txq->entries[i].cmd); - kzfree(txq->entries[i].free_buf); + kfree_sensitive(txq->entries[i].cmd); + kfree_sensitive(txq->entries[i].free_buf); } /* De-alloc circular buffer of TFDs */ @@ -1765,7 +1765,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) - kzfree(txq->entries[idx].free_buf); + kfree_sensitive(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); diff --git a/drivers/net/wireless/intersil/orinoco/wext.c b/drivers/net/wireless/intersil/orinoco/wext.c index 1d4dae422106..7b6c4ae8ddb3 100644 --- a/drivers/net/wireless/intersil/orinoco/wext.c +++ b/drivers/net/wireless/intersil/orinoco/wext.c @@ -31,8 +31,8 @@ static int orinoco_set_key(struct orinoco_private *priv, int index, enum orinoco_alg alg, const u8 *key, int key_len, const u8 *seq, int seq_len) { - kzfree(priv->keys[index].key); - kzfree(priv->keys[index].seq); + kfree_sensitive(priv->keys[index].key); + kfree_sensitive(priv->keys[index].seq); if (key_len) { priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC); diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 1a1d5e3c8d45..1ea046324e8f 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -219,8 +219,8 @@ static inline void ap_init_message(struct ap_message *ap_msg) */ static inline void ap_release_message(struct ap_message *ap_msg) { - kzfree(ap_msg->msg); - kzfree(ap_msg->private); + kfree_sensitive(ap_msg->msg); + kfree_sensitive(ap_msg->private); } /* diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c index b10a92ae2067..eaaf6a5440a9 100644 --- a/drivers/staging/ks7010/ks_hostif.c +++ b/drivers/staging/ks7010/ks_hostif.c @@ -245,7 +245,7 @@ michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result) ret = crypto_shash_finup(desc, data + 12, len - 12, result); err_free_desc: - kzfree(desc); + kfree_sensitive(desc); err_free_tfm: crypto_free_shash(tfm); diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c index 0f15c96183a0..7f74e1d05b3a 100644 --- a/drivers/staging/rtl8723bs/core/rtw_security.c +++ b/drivers/staging/rtl8723bs/core/rtw_security.c @@ -2251,7 +2251,7 @@ static void gf_mulx(u8 *pad) static void aes_encrypt_deinit(void *ctx) { - kzfree(ctx); + kfree_sensitive(ctx); } diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index b809c0015c0c..7b091c5a2984 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -429,7 +429,7 @@ static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb, failed: /* Free up the WEP buffer if it's not the same as the skb */ if ((p80211_wep.data) && (p80211_wep.data != skb->data)) - kzfree(p80211_wep.data); + kfree_sensitive(p80211_wep.data); /* we always free the skb here, never in a lower level. */ if (!result) diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 0e54627d9aa8..62d912b79c61 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -484,7 +484,7 @@ static int chap_server_compute_hash( pr_debug("[server] Sending CHAP_R=0x%s\n", response); auth_ret = 0; out: - kzfree(desc); + kfree_sensitive(desc); if (tfm) crypto_free_shash(tfm); kfree(initiatorchg); diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 874a551f339c..9daa256f69d4 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -797,7 +797,7 @@ calc_seckey(struct cifs_ses *ses) ses->auth_key.len = CIFS_SESS_KEY_SIZE; memzero_explicit(sec_key, CIFS_SESS_KEY_SIZE); - kzfree(ctx_arc4); + kfree_sensitive(ctx_arc4); return 0; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a61abde09ffe..889fee586d94 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2182,7 +2182,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, tmp_end++; if (!(tmp_end < end && tmp_end[1] == delim)) { /* No it is not. Set the password to NULL */ - kzfree(vol->password); + kfree_sensitive(vol->password); vol->password = NULL; break; } @@ -2220,7 +2220,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, options = end; } - kzfree(vol->password); + kfree_sensitive(vol->password); /* Now build new password string */ temp_len = strlen(value); vol->password = kzalloc(temp_len+1, GFP_KERNEL); @@ -3198,7 +3198,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) rc = -ENOMEM; kfree(vol->username); vol->username = NULL; - kzfree(vol->password); + kfree_sensitive(vol->password); vol->password = NULL; goto out_key_put; } @@ -4219,7 +4219,7 @@ void cifs_cleanup_volume_info_contents(struct smb_vol *volume_info) { kfree(volume_info->username); - kzfree(volume_info->password); + kfree_sensitive(volume_info->password); kfree(volume_info->UNC); kfree(volume_info->domainname); kfree(volume_info->iocharset); @@ -5345,7 +5345,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) out: kfree(vol_info->username); - kzfree(vol_info->password); + kfree_sensitive(vol_info->password); kfree(vol_info); return tcon; diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index df81c718d2fa..7a8d41493aad 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -1131,7 +1131,7 @@ err_free_domainname: err_free_unc: kfree(new->UNC); err_free_password: - kzfree(new->password); + kfree_sensitive(new->password); err_free_username: kfree(new->username); kfree(new); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e44d049142d0..3e3772446c71 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -103,12 +103,12 @@ sesInfoFree(struct cifs_ses *buf_to_free) kfree(buf_to_free->serverOS); kfree(buf_to_free->serverDomain); kfree(buf_to_free->serverNOS); - kzfree(buf_to_free->password); + kfree_sensitive(buf_to_free->password); kfree(buf_to_free->user_name); kfree(buf_to_free->domainName); - kzfree(buf_to_free->auth_key.response); + kfree_sensitive(buf_to_free->auth_key.response); kfree(buf_to_free->iface_list); - kzfree(buf_to_free); + kfree_sensitive(buf_to_free); } struct cifs_tcon * @@ -148,7 +148,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free) } atomic_dec(&tconInfoAllocCount); kfree(buf_to_free->nativeFileSystem); - kzfree(buf_to_free->password); + kfree_sensitive(buf_to_free->password); kfree(buf_to_free->crfid.fid); #ifdef CONFIG_CIFS_DFS_UPCALL kfree(buf_to_free->dfs_path); diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index b6b8574caa13..faa25541ccb6 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "fscrypt_private.h" @@ -187,7 +188,7 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, fail: for (i = 0; i < queue_refs; i++) blk_put_queue(blk_key->devs[i]); - kzfree(blk_key); + kfree_sensitive(blk_key); return err; } @@ -201,7 +202,7 @@ void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); blk_put_queue(blk_key->devs[i]); } - kzfree(blk_key); + kfree_sensitive(blk_key); } } diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 71d56f8e2870..e74f239c4428 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -51,7 +51,7 @@ static void free_master_key(struct fscrypt_master_key *mk) } key_put(mk->mk_users); - kzfree(mk); + kfree_sensitive(mk); } static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) @@ -531,7 +531,7 @@ static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) static void fscrypt_provisioning_key_free_preparse( struct key_preparsed_payload *prep) { - kzfree(prep->payload.data[0]); + kfree_sensitive(prep->payload.data[0]); } static void fscrypt_provisioning_key_describe(const struct key *key, @@ -548,7 +548,7 @@ static void fscrypt_provisioning_key_describe(const struct key *key, static void fscrypt_provisioning_key_destroy(struct key *key) { - kzfree(key->payload.data[0]); + kfree_sensitive(key->payload.data[0]); } static struct key_type key_type_fscrypt_provisioning = { diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index e4e707fb1100..a3cb52572b05 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -155,7 +155,7 @@ static void free_direct_key(struct fscrypt_direct_key *dk) { if (dk) { fscrypt_destroy_prepared_key(&dk->dk_key); - kzfree(dk); + kfree_sensitive(dk); } } @@ -283,7 +283,7 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci, err = fscrypt_set_per_file_enc_key(ci, derived_key); out: - kzfree(derived_key); + kfree_sensitive(derived_key); return err; } diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index af3eb02bbca1..f6a17d259db7 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -838,7 +838,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, out_release_free_unlock: crypto_free_shash(s->hash_tfm); out_free_unlock: - kzfree(s->block_aligned_filename); + kfree_sensitive(s->block_aligned_filename); out_unlock: mutex_unlock(s->tfm_mutex); out: @@ -847,7 +847,7 @@ out: key_put(auth_tok_key); } skcipher_request_free(s->skcipher_req); - kzfree(s->hash_desc); + kfree_sensitive(s->hash_desc); kfree(s); return rc; } diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index 8646ba76def3..c0dfd9647627 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -175,7 +175,7 @@ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) } hlist_del(&daemon->euid_chain); mutex_unlock(&daemon->mux); - kzfree(daemon); + kfree_sensitive(daemon); out: return rc; } diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 62c68550aab6..c32a6f5664e9 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -425,7 +425,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, */ static inline void aead_request_free(struct aead_request *req) { - kzfree(req); + kfree_sensitive(req); } /** diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 6924b091adec..1d3aa252caba 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -207,7 +207,7 @@ static inline struct akcipher_request *akcipher_request_alloc( */ static inline void akcipher_request_free(struct akcipher_request *req) { - kzfree(req); + kfree_sensitive(req); } /** diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h index fa0a63d298dc..81330c6446f6 100644 --- a/include/crypto/gf128mul.h +++ b/include/crypto/gf128mul.h @@ -230,7 +230,7 @@ void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); void gf128mul_x8_ble(le128 *r, const le128 *x); static inline void gf128mul_free_4k(struct gf128mul_4k *t) { - kzfree(t); + kfree_sensitive(t); } diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 19ce91f2359f..0d1b403888c9 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -606,7 +606,7 @@ static inline struct ahash_request *ahash_request_alloc( */ static inline void ahash_request_free(struct ahash_request *req) { - kzfree(req); + kfree_sensitive(req); } static inline void ahash_request_zero(struct ahash_request *req) diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index cf478681b53e..cfc47e18820f 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -46,7 +46,7 @@ static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) static inline void __acomp_request_free(struct acomp_req *req) { - kzfree(req); + kfree_sensitive(req); } /** diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index cd9a9b500624..88b591215d5c 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -187,7 +187,7 @@ static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm, */ static inline void kpp_request_free(struct kpp_request *req) { - kzfree(req); + kfree_sensitive(req); } /** diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 5663f71198b3..6a733b171a5d 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -508,7 +508,7 @@ static inline struct skcipher_request *skcipher_request_alloc( */ static inline void skcipher_request_free(struct skcipher_request *req) { - kzfree(req); + kfree_sensitive(req); } static inline void skcipher_request_zero(struct skcipher_request *req) diff --git a/include/linux/slab.h b/include/linux/slab.h index 6d454886bcaf..0884d82c55ee 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -186,10 +186,12 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); */ void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); -void kzfree(const void *); +void kfree_sensitive(const void *); size_t __ksize(const void *); size_t ksize(const void *); +#define kzfree(x) kfree_sensitive(x) /* For backward compatibility */ + #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR void __check_heap_object(const void *ptr, unsigned long n, struct page *page, bool to_user); diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c index 20ed0f766787..4cd2b335cb7f 100644 --- a/lib/mpi/mpiutil.c +++ b/lib/mpi/mpiutil.c @@ -69,7 +69,7 @@ void mpi_free_limb_space(mpi_ptr_t a) if (!a) return; - kzfree(a); + kfree_sensitive(a); } void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) @@ -95,7 +95,7 @@ int mpi_resize(MPI a, unsigned nlimbs) if (!p) return -ENOMEM; memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); - kzfree(a->d); + kfree_sensitive(a->d); a->d = p; } else { a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL); @@ -112,7 +112,7 @@ void mpi_free(MPI a) return; if (a->flags & 4) - kzfree(a->d); + kfree_sensitive(a->d); else mpi_free_limb_space(a->d); diff --git a/lib/test_kasan.c b/lib/test_kasan.c index dc2c6a51d11a..e4d9a86b174b 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -766,15 +766,15 @@ static noinline void __init kmalloc_double_kzfree(void) char *ptr; size_t size = 16; - pr_info("double-free (kzfree)\n"); + pr_info("double-free (kfree_sensitive)\n"); ptr = kmalloc(size, GFP_KERNEL); if (!ptr) { pr_err("Allocation failed\n"); return; } - kzfree(ptr); - kzfree(ptr); + kfree_sensitive(ptr); + kfree_sensitive(ptr); } #ifdef CONFIG_KASAN_VMALLOC diff --git a/mm/slab_common.c b/mm/slab_common.c index fe8b68482670..f47a097bb4b8 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1729,17 +1729,17 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) EXPORT_SYMBOL(krealloc); /** - * kzfree - like kfree but zero memory + * kfree_sensitive - Clear sensitive information in memory before freeing * @p: object to free memory of * * The memory of the object @p points to is zeroed before freed. - * If @p is %NULL, kzfree() does nothing. + * If @p is %NULL, kfree_sensitive() does nothing. * * Note: this function zeroes the whole allocated buffer which can be a good * deal bigger than the requested buffer size passed to kmalloc(). So be * careful when using this function in performance sensitive code. */ -void kzfree(const void *p) +void kfree_sensitive(const void *p) { size_t ks; void *mem = (void *)p; @@ -1750,7 +1750,7 @@ void kzfree(const void *p) memzero_explicit(mem, ks); kfree(mem); } -EXPORT_SYMBOL(kzfree); +EXPORT_SYMBOL(kfree_sensitive); /** * ksize - get the actual amount of memory allocated for a given object diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c index 3286f9d527d3..f7a2f0e41105 100644 --- a/net/atm/mpoa_caches.c +++ b/net/atm/mpoa_caches.c @@ -180,7 +180,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc) static void in_cache_put(in_cache_entry *entry) { if (refcount_dec_and_test(&entry->use)) { - kzfree(entry); + kfree_sensitive(entry); } } @@ -415,7 +415,7 @@ static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, static void eg_cache_put(eg_cache_entry *entry) { if (refcount_dec_and_test(&entry->use)) { - kzfree(entry); + kfree_sensitive(entry); } } diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c index 2155ce802877..3226fe02e875 100644 --- a/net/bluetooth/ecdh_helper.c +++ b/net/bluetooth/ecdh_helper.c @@ -104,7 +104,7 @@ int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 public_key[64], free_all: kpp_request_free(req); free_tmp: - kzfree(tmp); + kfree_sensitive(tmp); return err; } @@ -151,9 +151,9 @@ int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]) err = crypto_kpp_set_secret(tfm, buf, buf_len); /* fall through */ free_all: - kzfree(buf); + kfree_sensitive(buf); free_tmp: - kzfree(tmp); + kfree_sensitive(tmp); return err; } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 433227f96c73..bf4bef13d935 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -753,9 +753,9 @@ static void smp_chan_destroy(struct l2cap_conn *conn) complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); mgmt_smp_complete(hcon, complete); - kzfree(smp->csrk); - kzfree(smp->slave_csrk); - kzfree(smp->link_key); + kfree_sensitive(smp->csrk); + kfree_sensitive(smp->slave_csrk); + kfree_sensitive(smp->link_key); crypto_free_shash(smp->tfm_cmac); crypto_free_kpp(smp->tfm_ecdh); @@ -789,7 +789,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn) } chan->data = NULL; - kzfree(smp); + kfree_sensitive(smp); hci_conn_drop(hcon); } @@ -1156,7 +1156,7 @@ static void sc_generate_link_key(struct smp_chan *smp) const u8 salt[16] = { 0x31, 0x70, 0x6d, 0x74 }; if (smp_h7(smp->tfm_cmac, smp->tk, salt, smp->link_key)) { - kzfree(smp->link_key); + kfree_sensitive(smp->link_key); smp->link_key = NULL; return; } @@ -1165,14 +1165,14 @@ static void sc_generate_link_key(struct smp_chan *smp) const u8 tmp1[4] = { 0x31, 0x70, 0x6d, 0x74 }; if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) { - kzfree(smp->link_key); + kfree_sensitive(smp->link_key); smp->link_key = NULL; return; } } if (smp_h6(smp->tfm_cmac, smp->link_key, lebr, smp->link_key)) { - kzfree(smp->link_key); + kfree_sensitive(smp->link_key); smp->link_key = NULL; return; } @@ -1407,7 +1407,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) free_shash: crypto_free_shash(smp->tfm_cmac); zfree_smp: - kzfree(smp); + kfree_sensitive(smp); return NULL; } @@ -3278,7 +3278,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(tfm_cmac)) { BT_ERR("Unable to create CMAC crypto context"); - kzfree(smp); + kfree_sensitive(smp); return ERR_CAST(tfm_cmac); } @@ -3286,7 +3286,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) if (IS_ERR(tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); crypto_free_shash(tfm_cmac); - kzfree(smp); + kfree_sensitive(smp); return ERR_CAST(tfm_ecdh); } @@ -3300,7 +3300,7 @@ create_chan: if (smp) { crypto_free_shash(smp->tfm_cmac); crypto_free_kpp(smp->tfm_ecdh); - kzfree(smp); + kfree_sensitive(smp); } return ERR_PTR(-ENOMEM); } @@ -3347,7 +3347,7 @@ static void smp_del_chan(struct l2cap_chan *chan) chan->data = NULL; crypto_free_shash(smp->tfm_cmac); crypto_free_kpp(smp->tfm_ecdh); - kzfree(smp); + kfree_sensitive(smp); } l2cap_chan_put(chan); diff --git a/net/core/sock.c b/net/core/sock.c index 49cd5ffe673e..9d8b15bfd192 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2257,7 +2257,7 @@ static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, if (WARN_ON_ONCE(!mem)) return; if (nullify) - kzfree(mem); + kfree_sensitive(mem); else kfree(mem); atomic_sub(size, &sk->sk_omem_alloc); diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 19ad9586c720..c1a54f3d58f5 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -38,7 +38,7 @@ static void tcp_fastopen_ctx_free(struct rcu_head *head) struct tcp_fastopen_context *ctx = container_of(head, struct tcp_fastopen_context, rcu); - kzfree(ctx); + kfree_sensitive(ctx); } void tcp_fastopen_destroy_cipher(struct sock *sk) diff --git a/net/mac80211/aead_api.c b/net/mac80211/aead_api.c index c5fe95e49c68..d7b3d905d535 100644 --- a/net/mac80211/aead_api.c +++ b/net/mac80211/aead_api.c @@ -41,7 +41,7 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len, aead_request_set_ad(aead_req, sg[0].length); crypto_aead_encrypt(aead_req); - kzfree(aead_req); + kfree_sensitive(aead_req); return 0; } @@ -76,7 +76,7 @@ int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len, aead_request_set_ad(aead_req, sg[0].length); err = crypto_aead_decrypt(aead_req); - kzfree(aead_req); + kfree_sensitive(aead_req); return err; } diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c index 16ba09cb5def..6f3b3a0cc10a 100644 --- a/net/mac80211/aes_gmac.c +++ b/net/mac80211/aes_gmac.c @@ -60,7 +60,7 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len); crypto_aead_encrypt(aead_req); - kzfree(aead_req); + kfree_sensitive(aead_req); return 0; } diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 9c2888004878..2df636c32432 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -732,7 +732,7 @@ static void ieee80211_key_free_common(struct ieee80211_key *key) ieee80211_aes_gcm_key_free(key->u.gcmp.tfm); break; } - kzfree(key); + kfree_sensitive(key); } static void __ieee80211_key_destroy(struct ieee80211_key *key, diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index c079ee69d3d0..585d33144c33 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -49,7 +49,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec) msl = container_of(sl, struct mac802154_llsec_seclevel, level); list_del(&sl->list); - kzfree(msl); + kfree_sensitive(msl); } list_for_each_entry_safe(dev, dn, &sec->table.devices, list) { @@ -66,7 +66,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec) mkey = container_of(key->key, struct mac802154_llsec_key, key); list_del(&key->list); llsec_key_put(mkey); - kzfree(key); + kfree_sensitive(key); } } @@ -155,7 +155,7 @@ err_tfm: if (key->tfm[i]) crypto_free_aead(key->tfm[i]); - kzfree(key); + kfree_sensitive(key); return NULL; } @@ -170,7 +170,7 @@ static void llsec_key_release(struct kref *ref) crypto_free_aead(key->tfm[i]); crypto_free_sync_skcipher(key->tfm0); - kzfree(key); + kfree_sensitive(key); } static struct mac802154_llsec_key* @@ -261,7 +261,7 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec, return 0; fail: - kzfree(new); + kfree_sensitive(new); return -ENOMEM; } @@ -341,10 +341,10 @@ static void llsec_dev_free(struct mac802154_llsec_device *dev) devkey); list_del(&pos->list); - kzfree(devkey); + kfree_sensitive(devkey); } - kzfree(dev); + kfree_sensitive(dev); } int mac802154_llsec_dev_add(struct mac802154_llsec *sec, @@ -682,7 +682,7 @@ llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, rc = crypto_aead_encrypt(req); - kzfree(req); + kfree_sensitive(req); return rc; } @@ -886,7 +886,7 @@ llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, rc = crypto_aead_decrypt(req); - kzfree(req); + kfree_sensitive(req); skb_trim(skb, skb->len - authlen); return rc; @@ -926,7 +926,7 @@ llsec_update_devkey_record(struct mac802154_llsec_device *dev, if (!devkey) list_add_rcu(&next->devkey.list, &dev->dev.keys); else - kzfree(next); + kfree_sensitive(next); spin_unlock_bh(&dev->lock); } diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 83e97e8892e0..9e289c770574 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -49,7 +49,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key) return; if (refcount_dec_and_test(&key->refcnt)) { - kzfree(key); + kfree_sensitive(key); SCTP_DBG_OBJCNT_DEC(keys); } } diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index e7180da1fc6a..794fb3001880 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -1003,7 +1003,7 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, err = 0; out_err: - kzfree(desc); + kfree_sensitive(desc); crypto_free_shash(hmac); dprintk("%s: returning %d\n", __func__, err); return err; @@ -1079,7 +1079,7 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, err = 0; out_err: - kzfree(desc); + kfree_sensitive(desc); crypto_free_shash(hmac); dprintk("%s: returning %d\n", __func__, err); return err; diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c index 3b7f721c023b..726c076950c0 100644 --- a/net/sunrpc/auth_gss/gss_krb5_keys.c +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -228,11 +228,11 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e, ret = 0; err_free_raw: - kzfree(rawkey); + kfree_sensitive(rawkey); err_free_out: - kzfree(outblockdata); + kfree_sensitive(outblockdata); err_free_in: - kzfree(inblockdata); + kfree_sensitive(inblockdata); err_free_cipher: crypto_free_sync_skcipher(cipher); err_return: diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 75b3c2e9e8f8..a84a5b289484 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -443,7 +443,7 @@ context_derive_keys_rc4(struct krb5_ctx *ctx) desc->tfm = hmac; err = crypto_shash_digest(desc, sigkeyconstant, slen, ctx->cksum); - kzfree(desc); + kfree_sensitive(desc); if (err) goto out_err_free_hmac; /* diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index c8c47fc72653..001bcb0f2480 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -441,7 +441,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, /* Allocate per-cpu TFM entry pointer */ tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); if (!tmp->tfm_entry) { - kzfree(tmp); + kfree_sensitive(tmp); return -ENOMEM; } @@ -491,7 +491,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, /* Not any TFM is allocated? */ if (!tfm_cnt) { free_percpu(tmp->tfm_entry); - kzfree(tmp); + kfree_sensitive(tmp); return err; } @@ -545,7 +545,7 @@ static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src) aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); if (unlikely(!aead->tfm_entry)) { - kzfree(aead); + kfree_sensitive(aead); return -ENOMEM; } @@ -1352,7 +1352,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, /* Allocate statistic structure */ c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); if (!c->stats) { - kzfree(c); + kfree_sensitive(c); return -ENOMEM; } @@ -1408,7 +1408,7 @@ void tipc_crypto_stop(struct tipc_crypto **crypto) free_percpu(c->stats); *crypto = NULL; - kzfree(c); + kfree_sensitive(c); } void tipc_crypto_timeout(struct tipc_crypto *rx) diff --git a/net/wireless/core.c b/net/wireless/core.c index 1971d7e6eb55..354b0ccbdc24 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1125,7 +1125,7 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) } #ifdef CONFIG_CFG80211_WEXT - kzfree(wdev->wext.keys); + kfree_sensitive(wdev->wext.keys); wdev->wext.keys = NULL; #endif /* only initialized if we have a netdev */ diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index ae8fe66a9bb8..a0621bb76d8e 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -127,7 +127,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, return -EINVAL; if (WARN_ON(wdev->connect_keys)) - kzfree(wdev->connect_keys); + kfree_sensitive(wdev->connect_keys); wdev->connect_keys = connkeys; wdev->ibss_fixed = params->channel_fixed; @@ -161,7 +161,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) ASSERT_WDEV_LOCK(wdev); - kzfree(wdev->connect_keys); + kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; rdev_set_qos_map(rdev, dev, NULL); diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c index f5e842ba7673..1b4d6c87a5c5 100644 --- a/net/wireless/lib80211_crypt_tkip.c +++ b/net/wireless/lib80211_crypt_tkip.c @@ -131,7 +131,7 @@ static void lib80211_tkip_deinit(void *priv) crypto_free_shash(_priv->tx_tfm_michael); crypto_free_shash(_priv->rx_tfm_michael); } - kzfree(priv); + kfree_sensitive(priv); } static inline u16 RotR1(u16 val) diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c index dafc6f3571db..6ab9957b8f96 100644 --- a/net/wireless/lib80211_crypt_wep.c +++ b/net/wireless/lib80211_crypt_wep.c @@ -56,7 +56,7 @@ static void *lib80211_wep_init(int keyidx) static void lib80211_wep_deinit(void *priv) { - kzfree(priv); + kfree_sensitive(priv); } /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 814e23d3ce7c..c04fc6cf6583 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9836,7 +9836,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) && no_ht) { - kzfree(connkeys); + kfree_sensitive(connkeys); return -EINVAL; } } @@ -9848,7 +9848,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) int r = validate_pae_over_nl80211(rdev, info); if (r < 0) { - kzfree(connkeys); + kfree_sensitive(connkeys); return r; } @@ -9861,7 +9861,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) wdev_lock(dev->ieee80211_ptr); err = __cfg80211_join_ibss(rdev, dev, &ibss, connkeys); if (err) - kzfree(connkeys); + kfree_sensitive(connkeys); else if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid; wdev_unlock(dev->ieee80211_ptr); @@ -10289,7 +10289,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) { - kzfree(connkeys); + kfree_sensitive(connkeys); return -EINVAL; } memcpy(&connect.ht_capa, @@ -10307,7 +10307,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) { - kzfree(connkeys); + kfree_sensitive(connkeys); return -EINVAL; } memcpy(&connect.vht_capa, @@ -10321,7 +10321,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) (rdev->wiphy.features & NL80211_FEATURE_QUIET)) && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_RRM)) { - kzfree(connkeys); + kfree_sensitive(connkeys); return -EINVAL; } connect.flags |= ASSOC_REQ_USE_RRM; @@ -10329,21 +10329,21 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); if (connect.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) { - kzfree(connkeys); + kfree_sensitive(connkeys); return -EOPNOTSUPP; } if (info->attrs[NL80211_ATTR_BSS_SELECT]) { /* bss selection makes no sense if bssid is set */ if (connect.bssid) { - kzfree(connkeys); + kfree_sensitive(connkeys); return -EINVAL; } err = parse_bss_select(info->attrs[NL80211_ATTR_BSS_SELECT], wiphy, &connect.bss_select); if (err) { - kzfree(connkeys); + kfree_sensitive(connkeys); return err; } } @@ -10373,13 +10373,13 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) info->attrs[NL80211_ATTR_FILS_ERP_REALM] || info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] || info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { - kzfree(connkeys); + kfree_sensitive(connkeys); return -EINVAL; } if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) { if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { - kzfree(connkeys); + kfree_sensitive(connkeys); GENL_SET_ERR_MSG(info, "external auth requires connection ownership"); return -EINVAL; @@ -10392,7 +10392,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) err = cfg80211_connect(rdev, dev, &connect, connkeys, connect.prev_bssid); if (err) - kzfree(connkeys); + kfree_sensitive(connkeys); if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) { dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 15595cf401de..985f3c23f054 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -742,7 +742,7 @@ void __cfg80211_connect_result(struct net_device *dev, } if (cr->status != WLAN_STATUS_SUCCESS) { - kzfree(wdev->connect_keys); + kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; wdev->conn_owner_nlportid = 0; @@ -1098,7 +1098,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, wdev->current_bss = NULL; wdev->ssid_len = 0; wdev->conn_owner_nlportid = 0; - kzfree(wdev->connect_keys); + kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); @@ -1281,7 +1281,7 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, ASSERT_WDEV_LOCK(wdev); - kzfree(wdev->connect_keys); + kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; wdev->conn_owner_nlportid = 0; diff --git a/net/wireless/util.c b/net/wireless/util.c index 26a977343c3b..dfad1c0f57ad 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -871,7 +871,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev) } } - kzfree(wdev->connect_keys); + kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; } diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 73fd0eae08ca..73df23570d43 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -57,7 +57,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, err = cfg80211_connect(rdev, wdev->netdev, &wdev->wext.connect, ck, prev_bssid); if (err) - kzfree(ck); + kfree_sensitive(ck); return err; } diff --git a/scripts/coccinelle/free/devm_free.cocci b/scripts/coccinelle/free/devm_free.cocci index 3357bf4dbd7c..da80050b91ff 100644 --- a/scripts/coccinelle/free/devm_free.cocci +++ b/scripts/coccinelle/free/devm_free.cocci @@ -89,7 +89,7 @@ position p; ( kfree@p(x) | - kzfree@p(x) + kfree_sensitive@p(x) | krealloc@p(x, ...) | @@ -112,7 +112,7 @@ position p != safe.p; ( * kfree@p(x) | -* kzfree@p(x) +* kfree_sensitive@p(x) | * krealloc@p(x, ...) | diff --git a/scripts/coccinelle/free/ifnullfree.cocci b/scripts/coccinelle/free/ifnullfree.cocci index b3290c4ee239..2045391e36a0 100644 --- a/scripts/coccinelle/free/ifnullfree.cocci +++ b/scripts/coccinelle/free/ifnullfree.cocci @@ -21,7 +21,7 @@ expression E; ( kfree(E); | - kzfree(E); + kfree_sensitive(E); | debugfs_remove(E); | @@ -42,7 +42,7 @@ position p; @@ * if (E != NULL) -* \(kfree@p\|kzfree@p\|debugfs_remove@p\|debugfs_remove_recursive@p\| +* \(kfree@p\|kfree_sensitive@p\|debugfs_remove@p\|debugfs_remove_recursive@p\| * usb_free_urb@p\|kmem_cache_destroy@p\|mempool_destroy@p\| * dma_pool_destroy@p\)(E); diff --git a/scripts/coccinelle/free/kfree.cocci b/scripts/coccinelle/free/kfree.cocci index e9d50e718e46..168568386034 100644 --- a/scripts/coccinelle/free/kfree.cocci +++ b/scripts/coccinelle/free/kfree.cocci @@ -24,7 +24,7 @@ position p1; ( * kfree@p1(E) | -* kzfree@p1(E) +* kfree_sensitive@p1(E) ) @print expression@ @@ -68,7 +68,7 @@ while (1) { ... ( * kfree@ok(E) | -* kzfree@ok(E) +* kfree_sensitive@ok(E) ) ... when != break; when != goto l; @@ -86,7 +86,7 @@ position free.p1!=loop.ok,p2!={print.p,sz.p}; ( * kfree@p1(E,...) | -* kzfree@p1(E,...) +* kfree_sensitive@p1(E,...) ) ... ( diff --git a/scripts/coccinelle/free/kfreeaddr.cocci b/scripts/coccinelle/free/kfreeaddr.cocci index cfaf308328d8..142af6337a04 100644 --- a/scripts/coccinelle/free/kfreeaddr.cocci +++ b/scripts/coccinelle/free/kfreeaddr.cocci @@ -20,7 +20,7 @@ position p; ( * kfree@p(&e->f) | -* kzfree@p(&e->f) +* kfree_sensitive@p(&e->f) ) @script:python depends on org@ diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 1c898055a476..7b0e13ce7dc7 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -40,8 +40,8 @@ void aa_free_domain_entries(struct aa_domain *domain) return; for (i = 0; i < domain->size; i++) - kzfree(domain->table[i]); - kzfree(domain->table); + kfree_sensitive(domain->table[i]); + kfree_sensitive(domain->table); domain->table = NULL; } } diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h index aff26fc71407..d4f8948517d9 100644 --- a/security/apparmor/include/file.h +++ b/security/apparmor/include/file.h @@ -72,7 +72,7 @@ static inline void aa_free_file_ctx(struct aa_file_ctx *ctx) { if (ctx) { aa_put_label(rcu_access_pointer(ctx->label)); - kzfree(ctx); + kfree_sensitive(ctx); } } diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index af4f50fda9e3..4c010c9a6af1 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -187,9 +187,9 @@ static void aa_free_data(void *ptr, void *arg) { struct aa_data *data = ptr; - kzfree(data->data); - kzfree(data->key); - kzfree(data); + kfree_sensitive(data->data); + kfree_sensitive(data->key); + kfree_sensitive(data); } /** @@ -217,19 +217,19 @@ void aa_free_profile(struct aa_profile *profile) aa_put_profile(rcu_access_pointer(profile->parent)); aa_put_ns(profile->ns); - kzfree(profile->rename); + kfree_sensitive(profile->rename); aa_free_file_rules(&profile->file); aa_free_cap_rules(&profile->caps); aa_free_rlimit_rules(&profile->rlimits); for (i = 0; i < profile->xattr_count; i++) - kzfree(profile->xattrs[i]); - kzfree(profile->xattrs); + kfree_sensitive(profile->xattrs[i]); + kfree_sensitive(profile->xattrs); for (i = 0; i < profile->secmark_count; i++) - kzfree(profile->secmark[i].label); - kzfree(profile->secmark); - kzfree(profile->dirname); + kfree_sensitive(profile->secmark[i].label); + kfree_sensitive(profile->secmark); + kfree_sensitive(profile->dirname); aa_put_dfa(profile->xmatch); aa_put_dfa(profile->policy.dfa); @@ -237,14 +237,14 @@ void aa_free_profile(struct aa_profile *profile) rht = profile->data; profile->data = NULL; rhashtable_free_and_destroy(rht, aa_free_data, NULL); - kzfree(rht); + kfree_sensitive(rht); } - kzfree(profile->hash); + kfree_sensitive(profile->hash); aa_put_loaddata(profile->rawdata); aa_label_destroy(&profile->label); - kzfree(profile); + kfree_sensitive(profile); } /** diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c index d7ef540027a5..70921d95fb40 100644 --- a/security/apparmor/policy_ns.c +++ b/security/apparmor/policy_ns.c @@ -121,9 +121,9 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name) return ns; fail_unconfined: - kzfree(ns->base.hname); + kfree_sensitive(ns->base.hname); fail_ns: - kzfree(ns); + kfree_sensitive(ns); return NULL; } @@ -145,7 +145,7 @@ void aa_free_ns(struct aa_ns *ns) ns->unconfined->ns = NULL; aa_free_profile(ns->unconfined); - kzfree(ns); + kfree_sensitive(ns); } /** diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index b67322abcc33..dc345ac93205 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -163,10 +163,10 @@ static void do_loaddata_free(struct work_struct *work) aa_put_ns(ns); } - kzfree(d->hash); - kzfree(d->name); + kfree_sensitive(d->hash); + kfree_sensitive(d->name); kvfree(d->data); - kzfree(d); + kfree_sensitive(d); } void aa_loaddata_kref(struct kref *kref) @@ -894,7 +894,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) while (unpack_strdup(e, &key, NULL)) { data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { - kzfree(key); + kfree_sensitive(key); goto fail; } @@ -902,8 +902,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) data->size = unpack_blob(e, &data->data, NULL); data->data = kvmemdup(data->data, data->size); if (data->size && !data->data) { - kzfree(data->key); - kzfree(data); + kfree_sensitive(data->key); + kfree_sensitive(data); goto fail; } @@ -1037,7 +1037,7 @@ void aa_load_ent_free(struct aa_load_ent *ent) aa_put_profile(ent->old); aa_put_profile(ent->new); kfree(ent->ns_name); - kzfree(ent); + kfree_sensitive(ent); } } diff --git a/security/keys/big_key.c b/security/keys/big_key.c index dd708e8f13c0..691347dea3c1 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c @@ -138,7 +138,7 @@ int big_key_preparse(struct key_preparsed_payload *prep) err_fput: fput(file); err_enckey: - kzfree(enckey); + kfree_sensitive(enckey); error: memzero_explicit(buf, enclen); kvfree(buf); @@ -155,7 +155,7 @@ void big_key_free_preparse(struct key_preparsed_payload *prep) path_put(path); } - kzfree(prep->payload.data[big_key_data]); + kfree_sensitive(prep->payload.data[big_key_data]); } /* @@ -187,7 +187,7 @@ void big_key_destroy(struct key *key) path->mnt = NULL; path->dentry = NULL; } - kzfree(key->payload.data[big_key_data]); + kfree_sensitive(key->payload.data[big_key_data]); key->payload.data[big_key_data] = NULL; } diff --git a/security/keys/dh.c b/security/keys/dh.c index c4c629bb1c03..1abfa70ed6e1 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c @@ -58,9 +58,9 @@ error: static void dh_free_data(struct dh *dh) { - kzfree(dh->key); - kzfree(dh->p); - kzfree(dh->g); + kfree_sensitive(dh->key); + kfree_sensitive(dh->p); + kfree_sensitive(dh->g); } struct dh_completion { @@ -126,7 +126,7 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc) if (sdesc->shash.tfm) crypto_free_shash(sdesc->shash.tfm); - kzfree(sdesc); + kfree_sensitive(sdesc); } /* @@ -220,7 +220,7 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc, ret = -EFAULT; err: - kzfree(outbuf); + kfree_sensitive(outbuf); return ret; } @@ -395,11 +395,11 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params, out6: kpp_request_free(req); out5: - kzfree(outbuf); + kfree_sensitive(outbuf); out4: crypto_free_kpp(tfm); out3: - kzfree(secret); + kfree_sensitive(secret); out2: dh_free_data(&dh_inputs); out1: diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 14cf81d1a30b..deebbf14eeca 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -370,7 +370,7 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type, master_keylen); ret = crypto_shash_tfm_digest(hash_tfm, derived_buf, derived_buf_len, derived_key); - kzfree(derived_buf); + kfree_sensitive(derived_buf); return ret; } @@ -812,13 +812,13 @@ static int encrypted_instantiate(struct key *key, ret = encrypted_init(epayload, key->description, format, master_desc, decrypted_datalen, hex_encoded_iv); if (ret < 0) { - kzfree(epayload); + kfree_sensitive(epayload); goto out; } rcu_assign_keypointer(key, epayload); out: - kzfree(datablob); + kfree_sensitive(datablob); return ret; } @@ -827,7 +827,7 @@ static void encrypted_rcu_free(struct rcu_head *rcu) struct encrypted_key_payload *epayload; epayload = container_of(rcu, struct encrypted_key_payload, rcu); - kzfree(epayload); + kfree_sensitive(epayload); } /* @@ -885,7 +885,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) rcu_assign_keypointer(key, new_epayload); call_rcu(&epayload->rcu, encrypted_rcu_free); out: - kzfree(buf); + kfree_sensitive(buf); return ret; } @@ -946,7 +946,7 @@ static long encrypted_read(const struct key *key, char *buffer, memzero_explicit(derived_key, sizeof(derived_key)); memcpy(buffer, ascii_buf, asciiblob_len); - kzfree(ascii_buf); + kfree_sensitive(ascii_buf); return asciiblob_len; out: @@ -961,7 +961,7 @@ out: */ static void encrypted_destroy(struct key *key) { - kzfree(key->payload.data[0]); + kfree_sensitive(key->payload.data[0]); } struct key_type key_type_encrypted = { diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c index 8001ab07e63b..b9fe02e5f84f 100644 --- a/security/keys/trusted-keys/trusted_tpm1.c +++ b/security/keys/trusted-keys/trusted_tpm1.c @@ -68,7 +68,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen, } ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); - kzfree(sdesc); + kfree_sensitive(sdesc); return ret; } @@ -112,7 +112,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, if (!ret) ret = crypto_shash_final(&sdesc->shash, digest); out: - kzfree(sdesc); + kfree_sensitive(sdesc); return ret; } @@ -166,7 +166,7 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key, paramdigest, TPM_NONCE_SIZE, h1, TPM_NONCE_SIZE, h2, 1, &c, 0, 0); out: - kzfree(sdesc); + kfree_sensitive(sdesc); return ret; } EXPORT_SYMBOL_GPL(TSS_authhmac); @@ -251,7 +251,7 @@ int TSS_checkhmac1(unsigned char *buffer, if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: - kzfree(sdesc); + kfree_sensitive(sdesc); return ret; } EXPORT_SYMBOL_GPL(TSS_checkhmac1); @@ -353,7 +353,7 @@ static int TSS_checkhmac2(unsigned char *buffer, if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: - kzfree(sdesc); + kfree_sensitive(sdesc); return ret; } @@ -563,7 +563,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, *bloblen = storedsize; } out: - kzfree(td); + kfree_sensitive(td); return ret; } @@ -1031,12 +1031,12 @@ static int trusted_instantiate(struct key *key, if (!ret && options->pcrlock) ret = pcrlock(options->pcrlock); out: - kzfree(datablob); - kzfree(options); + kfree_sensitive(datablob); + kfree_sensitive(options); if (!ret) rcu_assign_keypointer(key, payload); else - kzfree(payload); + kfree_sensitive(payload); return ret; } @@ -1045,7 +1045,7 @@ static void trusted_rcu_free(struct rcu_head *rcu) struct trusted_key_payload *p; p = container_of(rcu, struct trusted_key_payload, rcu); - kzfree(p); + kfree_sensitive(p); } /* @@ -1087,13 +1087,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; - kzfree(new_p); + kfree_sensitive(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; - kzfree(new_p); + kfree_sensitive(new_p); goto out; } @@ -1107,22 +1107,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); - kzfree(new_p); + kfree_sensitive(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); - kzfree(new_p); + kfree_sensitive(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: - kzfree(datablob); - kzfree(new_o); + kfree_sensitive(datablob); + kfree_sensitive(new_o); return ret; } @@ -1154,7 +1154,7 @@ static long trusted_read(const struct key *key, char *buffer, */ static void trusted_destroy(struct key *key) { - kzfree(key->payload.data[0]); + kfree_sensitive(key->payload.data[0]); } struct key_type key_type_trusted = { diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 07d4287e9084..749e2a4dcb13 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -82,7 +82,7 @@ EXPORT_SYMBOL_GPL(user_preparse); */ void user_free_preparse(struct key_preparsed_payload *prep) { - kzfree(prep->payload.data[0]); + kfree_sensitive(prep->payload.data[0]); } EXPORT_SYMBOL_GPL(user_free_preparse); @@ -91,7 +91,7 @@ static void user_free_payload_rcu(struct rcu_head *head) struct user_key_payload *payload; payload = container_of(head, struct user_key_payload, rcu); - kzfree(payload); + kfree_sensitive(payload); } /* @@ -147,7 +147,7 @@ void user_destroy(struct key *key) { struct user_key_payload *upayload = key->payload.data[0]; - kzfree(upayload); + kfree_sensitive(upayload); } EXPORT_SYMBOL_GPL(user_destroy); -- cgit v1.2.3 From 6dc5ea16c86f753951f53085aa04df49cf17cb50 Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Thu, 6 Aug 2020 23:19:51 -0700 Subject: mm, dump_page: do not crash with bad compound_mapcount() If a compound page is being split while dump_page() is being run on that page, we can end up calling compound_mapcount() on a page that is no longer compound. This leads to a crash (already seen at least once in the field), due to the VM_BUG_ON_PAGE() assertion inside compound_mapcount(). (The above is from Matthew Wilcox's analysis of Qian Cai's bug report.) A similar problem is possible, via compound_pincount() instead of compound_mapcount(). In order to avoid this kind of crash, make dump_page() slightly more robust, by providing a pair of simpler routines that don't contain assertions: head_mapcount() and head_pincount(). For debug tools, we don't want to go *too* far in this direction, but this is a simple small fix, and the crash has already been seen, so it's a good trade-off. Reported-by: Qian Cai Suggested-by: Matthew Wilcox Signed-off-by: John Hubbard Signed-off-by: Andrew Morton Acked-by: Vlastimil Babka Cc: Kirill A. Shutemov Cc: Mike Rapoport Cc: William Kucharski Link: http://lkml.kernel.org/r/20200804214807.169256-1-jhubbard@nvidia.com Signed-off-by: Linus Torvalds --- include/linux/mm.h | 14 ++++++++++++-- mm/debug.c | 6 +++--- 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index dc7b87310c10..303a47a9769d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -779,6 +779,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) extern void kvfree(const void *addr); extern void kvfree_sensitive(const void *addr, size_t len); +static inline int head_mapcount(struct page *head) +{ + return atomic_read(compound_mapcount_ptr(head)) + 1; +} + /* * Mapcount of compound page as a whole, does not include mapped sub-pages. * @@ -788,7 +793,7 @@ static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); page = compound_head(page); - return atomic_read(compound_mapcount_ptr(page)) + 1; + return head_mapcount(page); } /* @@ -901,11 +906,16 @@ static inline bool hpage_pincount_available(struct page *page) return PageCompound(page) && compound_order(page) > 1; } +static inline int head_pincount(struct page *head) +{ + return atomic_read(compound_pincount_ptr(head)); +} + static inline int compound_pincount(struct page *page) { VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); page = compound_head(page); - return atomic_read(compound_pincount_ptr(page)); + return head_pincount(page); } static inline void set_compound_order(struct page *page, unsigned int order) diff --git a/mm/debug.c b/mm/debug.c index 8f569db9a514..ca8d1cacdecc 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -102,12 +102,12 @@ void __dump_page(struct page *page, const char *reason) if (hpage_pincount_available(page)) { pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n", head, compound_order(head), - compound_mapcount(head), - compound_pincount(head)); + head_mapcount(head), + head_pincount(head)); } else { pr_warn("head:%p order:%u compound_mapcount:%d\n", head, compound_order(head), - compound_mapcount(head)); + head_mapcount(head)); } } if (PageKsm(page)) -- cgit v1.2.3 From e809d5f0b5c912fe981dce738f3283b2010665f0 Mon Sep 17 00:00:00 2001 From: Chris Down Date: Thu, 6 Aug 2020 23:20:20 -0700 Subject: tmpfs: per-superblock i_ino support Patch series "tmpfs: inode: Reduce risk of inum overflow", v7. In Facebook production we are seeing heavy i_ino wraparounds on tmpfs. On affected tiers, in excess of 10% of hosts show multiple files with different content and the same inode number, with some servers even having as many as 150 duplicated inode numbers with differing file content. This causes actual, tangible problems in production. For example, we have complaints from those working on remote caches that their application is reporting cache corruptions because it uses (device, inodenum) to establish the identity of a particular cache object, but because it's not unique any more, the application refuses to continue and reports cache corruption. Even worse, sometimes applications may not even detect the corruption but may continue anyway, causing phantom and hard to debug behaviour. In general, userspace applications expect that (device, inodenum) should be enough to be uniquely point to one inode, which seems fair enough. One might also need to check the generation, but in this case: 1. That's not currently exposed to userspace (ioctl(...FS_IOC_GETVERSION...) returns ENOTTY on tmpfs); 2. Even with generation, there shouldn't be two live inodes with the same inode number on one device. In order to mitigate this, we take a two-pronged approach: 1. Moving inum generation from being global to per-sb for tmpfs. This itself allows some reduction in i_ino churn. This works on both 64- and 32- bit machines. 2. Adding inode{64,32} for tmpfs. This fix is supported on machines with 64-bit ino_t only: we allow users to mount tmpfs with a new inode64 option that uses the full width of ino_t, or CONFIG_TMPFS_INODE64. You can see how this compares to previous related patches which didn't implement this per-superblock: - https://patchwork.kernel.org/patch/11254001/ - https://patchwork.kernel.org/patch/11023915/ This patch (of 2): get_next_ino has a number of problems: - It uses and returns a uint, which is susceptible to become overflowed if a lot of volatile inodes that use get_next_ino are created. - It's global, with no specificity per-sb or even per-filesystem. This means it's not that difficult to cause inode number wraparounds on a single device, which can result in having multiple distinct inodes with the same inode number. This patch adds a per-superblock counter that mitigates the second case. This design also allows us to later have a specific i_ino size per-device, for example, allowing users to choose whether to use 32- or 64-bit inodes for each tmpfs mount. This is implemented in the next commit. For internal shmem mounts which may be less tolerant to spinlock delays, we implement a percpu batching scheme which only takes the stat_lock at each batch boundary. Signed-off-by: Chris Down Signed-off-by: Andrew Morton Acked-by: Hugh Dickins Cc: Amir Goldstein Cc: Al Viro Cc: Matthew Wilcox Cc: Jeff Layton Cc: Johannes Weiner Cc: Tejun Heo Link: http://lkml.kernel.org/r/cover.1594661218.git.chris@chrisdown.name Link: http://lkml.kernel.org/r/1986b9d63b986f08ec07a4aa4b2275e718e47d8a.1594661218.git.chris@chrisdown.name Signed-off-by: Linus Torvalds --- include/linux/fs.h | 15 +++++++++++ include/linux/shmem_fs.h | 2 ++ mm/shmem.c | 66 ++++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 78 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 488c3ef93601..b1c3a14f12e8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2946,6 +2946,21 @@ extern void discard_new_inode(struct inode *); extern unsigned int get_next_ino(void); extern void evict_inodes(struct super_block *sb); +/* + * Userspace may rely on the the inode number being non-zero. For example, glibc + * simply ignores files with zero i_ino in unlink() and other places. + * + * As an additional complication, if userspace was compiled with + * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the + * lower 32 bits, so we need to check that those aren't zero explicitly. With + * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but + * better safe than sorry. + */ +static inline bool is_zero_ino(ino_t ino) +{ + return (u32)ino == 0; +} + extern void __iget(struct inode * inode); extern void iget_failed(struct inode *); extern void clear_inode(struct inode *); diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 7a35a6901221..eb628696ec66 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -36,6 +36,8 @@ struct shmem_sb_info { unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ + ino_t next_ino; /* The next per-sb inode number to use */ + ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */ struct mempolicy *mpol; /* default memory policy for mappings */ spinlock_t shrinklist_lock; /* Protects shrinklist */ struct list_head shrinklist; /* List of shinkable inodes */ diff --git a/mm/shmem.c b/mm/shmem.c index b2abca3f7f33..585a82d87a92 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -260,18 +260,67 @@ bool vma_is_shmem(struct vm_area_struct *vma) static LIST_HEAD(shmem_swaplist); static DEFINE_MUTEX(shmem_swaplist_mutex); -static int shmem_reserve_inode(struct super_block *sb) +/* + * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and + * produces a novel ino for the newly allocated inode. + * + * It may also be called when making a hard link to permit the space needed by + * each dentry. However, in that case, no new inode number is needed since that + * internally draws from another pool of inode numbers (currently global + * get_next_ino()). This case is indicated by passing NULL as inop. + */ +#define SHMEM_INO_BATCH 1024 +static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); - if (sbinfo->max_inodes) { + ino_t ino; + + if (!(sb->s_flags & SB_KERNMOUNT)) { spin_lock(&sbinfo->stat_lock); if (!sbinfo->free_inodes) { spin_unlock(&sbinfo->stat_lock); return -ENOSPC; } sbinfo->free_inodes--; + if (inop) { + ino = sbinfo->next_ino++; + if (unlikely(is_zero_ino(ino))) + ino = sbinfo->next_ino++; + if (unlikely(ino > UINT_MAX)) { + /* + * Emulate get_next_ino uint wraparound for + * compatibility + */ + ino = 1; + } + *inop = ino; + } spin_unlock(&sbinfo->stat_lock); + } else if (inop) { + /* + * __shmem_file_setup, one of our callers, is lock-free: it + * doesn't hold stat_lock in shmem_reserve_inode since + * max_inodes is always 0, and is called from potentially + * unknown contexts. As such, use a per-cpu batched allocator + * which doesn't require the per-sb stat_lock unless we are at + * the batch boundary. + */ + ino_t *next_ino; + next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); + ino = *next_ino; + if (unlikely(ino % SHMEM_INO_BATCH == 0)) { + spin_lock(&sbinfo->stat_lock); + ino = sbinfo->next_ino; + sbinfo->next_ino += SHMEM_INO_BATCH; + spin_unlock(&sbinfo->stat_lock); + if (unlikely(is_zero_ino(ino))) + ino++; + } + *inop = ino; + *next_ino = ++ino; + put_cpu(); } + return 0; } @@ -2222,13 +2271,14 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode struct inode *inode; struct shmem_inode_info *info; struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + ino_t ino; - if (shmem_reserve_inode(sb)) + if (shmem_reserve_inode(sb, &ino)) return NULL; inode = new_inode(sb); if (inode) { - inode->i_ino = get_next_ino(); + inode->i_ino = ino; inode_init_owner(inode, dir, mode); inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); @@ -2932,7 +2982,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr * first link must skip that, to get the accounting right. */ if (inode->i_nlink) { - ret = shmem_reserve_inode(inode->i_sb); + ret = shmem_reserve_inode(inode->i_sb, NULL); if (ret) goto out; } @@ -3584,6 +3634,7 @@ static void shmem_put_super(struct super_block *sb) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + free_percpu(sbinfo->ino_batch); percpu_counter_destroy(&sbinfo->used_blocks); mpol_put(sbinfo->mpol); kfree(sbinfo); @@ -3626,6 +3677,11 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) #endif sbinfo->max_blocks = ctx->blocks; sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; + if (sb->s_flags & SB_KERNMOUNT) { + sbinfo->ino_batch = alloc_percpu(ino_t); + if (!sbinfo->ino_batch) + goto failed; + } sbinfo->uid = ctx->uid; sbinfo->gid = ctx->gid; sbinfo->mode = ctx->mode; -- cgit v1.2.3 From ea3271f7196c65ae5d3e1c7b3f733892c017dbd6 Mon Sep 17 00:00:00 2001 From: Chris Down Date: Thu, 6 Aug 2020 23:20:25 -0700 Subject: tmpfs: support 64-bit inums per-sb The default is still set to inode32 for backwards compatibility, but system administrators can opt in to the new 64-bit inode numbers by either: 1. Passing inode64 on the command line when mounting, or 2. Configuring the kernel with CONFIG_TMPFS_INODE64=y The inode64 and inode32 names are used based on existing precedent from XFS. [hughd@google.com: Kconfig fixes] Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008011928010.13320@eggly.anvils Signed-off-by: Chris Down Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Reviewed-by: Amir Goldstein Acked-by: Hugh Dickins Cc: Al Viro Cc: Matthew Wilcox Cc: Jeff Layton Cc: Johannes Weiner Cc: Tejun Heo Link: http://lkml.kernel.org/r/8b23758d0c66b5e2263e08baf9c4b6a7565cbd8f.1594661218.git.chris@chrisdown.name Signed-off-by: Linus Torvalds --- Documentation/filesystems/tmpfs.rst | 18 ++++++++++ fs/Kconfig | 21 ++++++++++++ include/linux/shmem_fs.h | 1 + mm/shmem.c | 65 +++++++++++++++++++++++++++++++++++-- 4 files changed, 103 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/tmpfs.rst b/Documentation/filesystems/tmpfs.rst index 4e95929301a5..c44f8b1d3cab 100644 --- a/Documentation/filesystems/tmpfs.rst +++ b/Documentation/filesystems/tmpfs.rst @@ -150,6 +150,22 @@ These options do not have any effect on remount. You can change these parameters with chmod(1), chown(1) and chgrp(1) on a mounted filesystem. +tmpfs has a mount option to select whether it will wrap at 32- or 64-bit inode +numbers: + +======= ======================== +inode64 Use 64-bit inode numbers +inode32 Use 32-bit inode numbers +======= ======================== + +On a 32-bit kernel, inode32 is implicit, and inode64 is refused at mount time. +On a 64-bit kernel, CONFIG_TMPFS_INODE64 sets the default. inode64 avoids the +possibility of multiple files with the same inode number on a single device; +but risks glibc failing with EOVERFLOW once 33-bit inode numbers are reached - +if a long-lived tmpfs is accessed by 32-bit applications so ancient that +opening a file larger than 2GiB fails with EINVAL. + + So 'mount -t tmpfs -o size=10G,nr_inodes=10k,mode=700 tmpfs /mytmpfs' will give you tmpfs instance on /mytmpfs which can allocate 10GB RAM/SWAP in 10240 inodes and it is only accessible by root. @@ -161,3 +177,5 @@ RAM/SWAP in 10240 inodes and it is only accessible by root. Hugh Dickins, 4 June 2007 :Updated: KOSAKI Motohiro, 16 Mar 2010 +:Updated: + Chris Down, 13 July 2020 diff --git a/fs/Kconfig b/fs/Kconfig index a88aa3af73c1..aa4c12282301 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -201,6 +201,27 @@ config TMPFS_XATTR If unsure, say N. +config TMPFS_INODE64 + bool "Use 64-bit ino_t by default in tmpfs" + depends on TMPFS && 64BIT + default n + help + tmpfs has historically used only inode numbers as wide as an unsigned + int. In some cases this can cause wraparound, potentially resulting + in multiple files with the same inode number on a single device. This + option makes tmpfs use the full width of ino_t by default, without + needing to specify the inode64 option when mounting. + + But if a long-lived tmpfs is to be accessed by 32-bit applications so + ancient that opening a file larger than 2GiB fails with EINVAL, then + the INODE64 config option and inode64 mount option risk operations + failing with EOVERFLOW once 33-bit inode numbers are reached. + + To override this configured default, use the inode32 or inode64 + option when mounting. + + If unsure, say N. + config HUGETLBFS bool "HugeTLB file system support" depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index eb628696ec66..a5a5d1d4d7b1 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -36,6 +36,7 @@ struct shmem_sb_info { unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ + bool full_inums; /* If i_ino should be uint or ino_t */ ino_t next_ino; /* The next per-sb inode number to use */ ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */ struct mempolicy *mpol; /* default memory policy for mappings */ diff --git a/mm/shmem.c b/mm/shmem.c index 585a82d87a92..c5c281893bb8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -114,11 +114,13 @@ struct shmem_options { kuid_t uid; kgid_t gid; umode_t mode; + bool full_inums; int huge; int seen; #define SHMEM_SEEN_BLOCKS 1 #define SHMEM_SEEN_INODES 2 #define SHMEM_SEEN_HUGE 4 +#define SHMEM_SEEN_INUMS 8 }; #ifdef CONFIG_TMPFS @@ -286,12 +288,17 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) ino = sbinfo->next_ino++; if (unlikely(is_zero_ino(ino))) ino = sbinfo->next_ino++; - if (unlikely(ino > UINT_MAX)) { + if (unlikely(!sbinfo->full_inums && + ino > UINT_MAX)) { /* * Emulate get_next_ino uint wraparound for * compatibility */ - ino = 1; + if (IS_ENABLED(CONFIG_64BIT)) + pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", + __func__, MINOR(sb->s_dev)); + sbinfo->next_ino = 1; + ino = sbinfo->next_ino++; } *inop = ino; } @@ -304,6 +311,10 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) * unknown contexts. As such, use a per-cpu batched allocator * which doesn't require the per-sb stat_lock unless we are at * the batch boundary. + * + * We don't need to worry about inode{32,64} since SB_KERNMOUNT + * shmem mounts are not exposed to userspace, so we don't need + * to worry about things like glibc compatibility. */ ino_t *next_ino; next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); @@ -3397,6 +3408,8 @@ enum shmem_param { Opt_nr_inodes, Opt_size, Opt_uid, + Opt_inode32, + Opt_inode64, }; static const struct constant_table shmem_param_enums_huge[] = { @@ -3416,6 +3429,8 @@ const struct fs_parameter_spec shmem_fs_parameters[] = { fsparam_string("nr_inodes", Opt_nr_inodes), fsparam_string("size", Opt_size), fsparam_u32 ("uid", Opt_uid), + fsparam_flag ("inode32", Opt_inode32), + fsparam_flag ("inode64", Opt_inode64), {} }; @@ -3487,6 +3502,18 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) break; } goto unsupported_parameter; + case Opt_inode32: + ctx->full_inums = false; + ctx->seen |= SHMEM_SEEN_INUMS; + break; + case Opt_inode64: + if (sizeof(ino_t) < 8) { + return invalfc(fc, + "Cannot use inode64 with <64bit inums in kernel\n"); + } + ctx->full_inums = true; + ctx->seen |= SHMEM_SEEN_INUMS; + break; } return 0; @@ -3578,8 +3605,16 @@ static int shmem_reconfigure(struct fs_context *fc) } } + if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && + sbinfo->next_ino > UINT_MAX) { + err = "Current inum too high to switch to 32-bit inums"; + goto out; + } + if (ctx->seen & SHMEM_SEEN_HUGE) sbinfo->huge = ctx->huge; + if (ctx->seen & SHMEM_SEEN_INUMS) + sbinfo->full_inums = ctx->full_inums; if (ctx->seen & SHMEM_SEEN_BLOCKS) sbinfo->max_blocks = ctx->blocks; if (ctx->seen & SHMEM_SEEN_INODES) { @@ -3619,6 +3654,29 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root) if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbinfo->gid)); + + /* + * Showing inode{64,32} might be useful even if it's the system default, + * since then people don't have to resort to checking both here and + * /proc/config.gz to confirm 64-bit inums were successfully applied + * (which may not even exist if IKCONFIG_PROC isn't enabled). + * + * We hide it when inode64 isn't the default and we are using 32-bit + * inodes, since that probably just means the feature isn't even under + * consideration. + * + * As such: + * + * +-----------------+-----------------+ + * | TMPFS_INODE64=y | TMPFS_INODE64=n | + * +------------------+-----------------+-----------------+ + * | full_inums=true | show | show | + * | full_inums=false | show | hide | + * +------------------+-----------------+-----------------+ + * + */ + if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) + seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ if (sbinfo->huge) @@ -3667,6 +3725,8 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) ctx->blocks = shmem_default_max_blocks(); if (!(ctx->seen & SHMEM_SEEN_INODES)) ctx->inodes = shmem_default_max_inodes(); + if (!(ctx->seen & SHMEM_SEEN_INUMS)) + ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); } else { sb->s_flags |= SB_NOUSER; } @@ -3684,6 +3744,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) } sbinfo->uid = ctx->uid; sbinfo->gid = ctx->gid; + sbinfo->full_inums = ctx->full_inums; sbinfo->mode = ctx->mode; sbinfo->huge = ctx->huge; sbinfo->mpol = ctx->mpol; -- cgit v1.2.3 From eedc4e5a142cc33fbb54f8d72b929a0e123c48c4 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:20:32 -0700 Subject: mm: memcg: factor out memcg- and lruvec-level changes out of __mod_lruvec_state() Patch series "The new cgroup slab memory controller", v7. The patchset moves the accounting from the page level to the object level. It allows to share slab pages between memory cgroups. This leads to a significant win in the slab utilization (up to 45%) and the corresponding drop in the total kernel memory footprint. The reduced number of unmovable slab pages should also have a positive effect on the memory fragmentation. The patchset makes the slab accounting code simpler: there is no more need in the complicated dynamic creation and destruction of per-cgroup slab caches, all memory cgroups use a global set of shared slab caches. The lifetime of slab caches is not more connected to the lifetime of memory cgroups. The more precise accounting does require more CPU, however in practice the difference seems to be negligible. We've been using the new slab controller in Facebook production for several months with different workloads and haven't seen any noticeable regressions. What we've seen were memory savings in order of 1 GB per host (it varied heavily depending on the actual workload, size of RAM, number of CPUs, memory pressure, etc). The third version of the patchset added yet another step towards the simplification of the code: sharing of slab caches between accounted and non-accounted allocations. It comes with significant upsides (most noticeable, a complete elimination of dynamic slab caches creation) but not without some regression risks, so this change sits on top of the patchset and is not completely merged in. So in the unlikely event of a noticeable performance regression it can be reverted separately. The slab memory accounting works in exactly the same way for SLAB and SLUB. With both allocators the new controller shows significant memory savings, with SLUB the difference is bigger. On my 16-core desktop machine running Fedora 32 the size of the slab memory measured after the start of the system was lower by 58% and 38% with SLUB and SLAB correspondingly. As an estimation of a potential CPU overhead, below are results of slab_bulk_test01 test, kindly provided by Jesper D. Brouer. He also helped with the evaluation of results. The test can be found here: https://github.com/netoptimizer/prototype-kernel/ The smallest number in each row should be picked for a comparison. SLUB-patched - bulk-API - SLUB-patched : bulk_quick_reuse objects=1 : 187 - 90 - 224 cycles(tsc) - SLUB-patched : bulk_quick_reuse objects=2 : 110 - 53 - 133 cycles(tsc) - SLUB-patched : bulk_quick_reuse objects=3 : 88 - 95 - 42 cycles(tsc) - SLUB-patched : bulk_quick_reuse objects=4 : 91 - 85 - 36 cycles(tsc) - SLUB-patched : bulk_quick_reuse objects=8 : 32 - 66 - 32 cycles(tsc) SLUB-original - bulk-API - SLUB-original: bulk_quick_reuse objects=1 : 87 - 87 - 142 cycles(tsc) - SLUB-original: bulk_quick_reuse objects=2 : 52 - 53 - 53 cycles(tsc) - SLUB-original: bulk_quick_reuse objects=3 : 42 - 42 - 91 cycles(tsc) - SLUB-original: bulk_quick_reuse objects=4 : 91 - 37 - 37 cycles(tsc) - SLUB-original: bulk_quick_reuse objects=8 : 31 - 79 - 76 cycles(tsc) SLAB-patched - bulk-API - SLAB-patched : bulk_quick_reuse objects=1 : 67 - 67 - 140 cycles(tsc) - SLAB-patched : bulk_quick_reuse objects=2 : 55 - 46 - 46 cycles(tsc) - SLAB-patched : bulk_quick_reuse objects=3 : 93 - 94 - 39 cycles(tsc) - SLAB-patched : bulk_quick_reuse objects=4 : 35 - 88 - 85 cycles(tsc) - SLAB-patched : bulk_quick_reuse objects=8 : 30 - 30 - 30 cycles(tsc) SLAB-original- bulk-API - SLAB-original: bulk_quick_reuse objects=1 : 143 - 136 - 67 cycles(tsc) - SLAB-original: bulk_quick_reuse objects=2 : 45 - 46 - 46 cycles(tsc) - SLAB-original: bulk_quick_reuse objects=3 : 38 - 39 - 39 cycles(tsc) - SLAB-original: bulk_quick_reuse objects=4 : 35 - 87 - 87 cycles(tsc) - SLAB-original: bulk_quick_reuse objects=8 : 29 - 66 - 30 cycles(tsc) This patch (of 19): To convert memcg and lruvec slab counters to bytes there must be a way to change these counters without touching node counters. Factor out __mod_memcg_lruvec_state() out of __mod_lruvec_state(). Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Cc: Christoph Lameter Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-1-guro@fb.com Link: http://lkml.kernel.org/r/20200623174037.3951353-2-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 17 +++++++++++++++++ mm/memcontrol.c | 43 ++++++++++++++++++++++++------------------- 2 files changed, 41 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e77197a62809..b250f8197710 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -679,11 +679,23 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return x; } +void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val); void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); void mod_memcg_obj_state(void *p, int idx, int val); +static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_memcg_lruvec_state(lruvec, idx, val); + local_irq_restore(flags); +} + static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { @@ -1057,6 +1069,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return node_page_state(lruvec_pgdat(lruvec), idx); } +static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ +} + static inline void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 24892a14cc75..5863ceb310fb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -713,30 +713,13 @@ parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) return mem_cgroup_nodeinfo(parent, nid); } -/** - * __mod_lruvec_state - update lruvec memory statistics - * @lruvec: the lruvec - * @idx: the stat item - * @val: delta to add to the counter, can be negative - * - * The lruvec is the intersection of the NUMA node and a cgroup. This - * function updates the all three counters that are affected by a - * change of state at this level: per-node, per-cgroup, per-lruvec. - */ -void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, - int val) +void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val) { - pg_data_t *pgdat = lruvec_pgdat(lruvec); struct mem_cgroup_per_node *pn; struct mem_cgroup *memcg; long x; - /* Update node */ - __mod_node_page_state(pgdat, idx, val); - - if (mem_cgroup_disabled()) - return; - pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); memcg = pn->memcg; @@ -748,6 +731,7 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { + pg_data_t *pgdat = lruvec_pgdat(lruvec); struct mem_cgroup_per_node *pi; for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) @@ -757,6 +741,27 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); } +/** + * __mod_lruvec_state - update lruvec memory statistics + * @lruvec: the lruvec + * @idx: the stat item + * @val: delta to add to the counter, can be negative + * + * The lruvec is the intersection of the NUMA node and a cgroup. This + * function updates the all three counters that are affected by a + * change of state at this level: per-node, per-cgroup, per-lruvec. + */ +void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val) +{ + /* Update node */ + __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); + + /* Update memcg and lruvec */ + if (!mem_cgroup_disabled()) + __mod_memcg_lruvec_state(lruvec, idx, val); +} + void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) { pg_data_t *pgdat = page_pgdat(virt_to_page(p)); -- cgit v1.2.3 From ea426c2a7de8e575108b7cecd3374e0c15a9f25e Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:20:35 -0700 Subject: mm: memcg: prepare for byte-sized vmstat items To implement per-object slab memory accounting, we need to convert slab vmstat counters to bytes. Actually, out of 4 levels of counters: global, per-node, per-memcg and per-lruvec only two last levels will require byte-sized counters. It's because global and per-node counters will be counting the number of slab pages, and per-memcg and per-lruvec will be counting the amount of memory taken by charged slab objects. Converting all vmstat counters to bytes or even all slab counters to bytes would introduce an additional overhead. So instead let's store global and per-node counters in pages, and memcg and lruvec counters in bytes. To make the API clean all access helpers (both on the read and write sides) are dealing with bytes. To avoid back-and-forth conversions a new flavor of read-side helpers is introduced, which always returns values in pages: node_page_state_pages() and global_node_page_state_pages(). Actually new helpers are just reading raw values. Old helpers are simple wrappers, which will complain on an attempt to read byte value, because at the moment no one actually needs bytes. Thanks to Johannes Weiner for the idea of having the byte-sized API on top of the page-sized internal storage. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Cc: Christoph Lameter Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-3-guro@fb.com Signed-off-by: Linus Torvalds --- drivers/base/node.c | 2 +- include/linux/mmzone.h | 10 ++++++++++ include/linux/vmstat.h | 14 +++++++++++++- mm/memcontrol.c | 14 ++++++++++---- mm/vmstat.c | 30 ++++++++++++++++++++++++++---- 5 files changed, 60 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/base/node.c b/drivers/base/node.c index 5b02f69769e8..e21e31359297 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -513,7 +513,7 @@ static ssize_t node_read_vmstat(struct device *dev, for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) n += sprintf(buf+n, "%s %lu\n", node_stat_name(i), - node_page_state(pgdat, i)); + node_page_state_pages(pgdat, i)); return n; } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f6f884970511..f16306e15b98 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -206,6 +206,16 @@ enum node_stat_item { NR_VM_NODE_STAT_ITEMS }; +/* + * Returns true if the value is measured in bytes (most vmstat values are + * measured in pages). This defines the API part, the internal representation + * might be different. + */ +static __always_inline bool vmstat_item_in_bytes(int idx) +{ + return false; +} + /* * We do arithmetic on the LRU lists in various places in the code, * so it is important to keep the active lists LRU_ACTIVE higher in diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index aa961088c551..91220ace31da 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -8,6 +8,7 @@ #include #include #include +#include extern int sysctl_stat_interval; @@ -192,7 +193,8 @@ static inline unsigned long global_zone_page_state(enum zone_stat_item item) return x; } -static inline unsigned long global_node_page_state(enum node_stat_item item) +static inline +unsigned long global_node_page_state_pages(enum node_stat_item item) { long x = atomic_long_read(&vm_node_stat[item]); #ifdef CONFIG_SMP @@ -202,6 +204,13 @@ static inline unsigned long global_node_page_state(enum node_stat_item item) return x; } +static inline unsigned long global_node_page_state(enum node_stat_item item) +{ + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); + + return global_node_page_state_pages(item); +} + static inline unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { @@ -242,9 +251,12 @@ extern unsigned long sum_zone_node_page_state(int node, extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); extern unsigned long node_page_state(struct pglist_data *pgdat, enum node_stat_item item); +extern unsigned long node_page_state_pages(struct pglist_data *pgdat, + enum node_stat_item item); #else #define sum_zone_node_page_state(node, item) global_zone_page_state(item) #define node_page_state(node, item) global_node_page_state(item) +#define node_page_state_pages(node, item) global_node_page_state_pages(item) #endif /* CONFIG_NUMA */ #ifdef CONFIG_SMP diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5863ceb310fb..61ae6658d59f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -681,13 +681,16 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) */ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) { - long x; + long x, threshold = MEMCG_CHARGE_BATCH; if (mem_cgroup_disabled()) return; + if (vmstat_item_in_bytes(idx)) + threshold <<= PAGE_SHIFT; + x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); - if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { + if (unlikely(abs(x) > threshold)) { struct mem_cgroup *mi; /* @@ -718,7 +721,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, { struct mem_cgroup_per_node *pn; struct mem_cgroup *memcg; - long x; + long x, threshold = MEMCG_CHARGE_BATCH; pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); memcg = pn->memcg; @@ -729,8 +732,11 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, /* Update lruvec */ __this_cpu_add(pn->lruvec_stat_local->count[idx], val); + if (vmstat_item_in_bytes(idx)) + threshold <<= PAGE_SHIFT; + x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); - if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { + if (unlikely(abs(x) > threshold)) { pg_data_t *pgdat = lruvec_pgdat(lruvec); struct mem_cgroup_per_node *pi; diff --git a/mm/vmstat.c b/mm/vmstat.c index 3fb23a21f6dd..b171a76bfe83 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -341,6 +341,11 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, long x; long t; + if (vmstat_item_in_bytes(item)) { + VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); + delta >>= PAGE_SHIFT; + } + x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); @@ -398,6 +403,8 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) s8 __percpu *p = pcp->vm_node_stat_diff + item; s8 v, t; + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); + v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { @@ -442,6 +449,8 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) s8 __percpu *p = pcp->vm_node_stat_diff + item; s8 v, t; + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); + v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { @@ -541,6 +550,11 @@ static inline void mod_node_state(struct pglist_data *pgdat, s8 __percpu *p = pcp->vm_node_stat_diff + item; long o, n, t, z; + if (vmstat_item_in_bytes(item)) { + VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); + delta >>= PAGE_SHIFT; + } + do { z = 0; /* overflow to node counters */ @@ -989,8 +1003,8 @@ unsigned long sum_zone_numa_state(int node, /* * Determine the per node value of a stat item. */ -unsigned long node_page_state(struct pglist_data *pgdat, - enum node_stat_item item) +unsigned long node_page_state_pages(struct pglist_data *pgdat, + enum node_stat_item item) { long x = atomic_long_read(&pgdat->vm_stat[item]); #ifdef CONFIG_SMP @@ -999,6 +1013,14 @@ unsigned long node_page_state(struct pglist_data *pgdat, #endif return x; } + +unsigned long node_page_state(struct pglist_data *pgdat, + enum node_stat_item item) +{ + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); + + return node_page_state_pages(pgdat, item); +} #endif #ifdef CONFIG_COMPACTION @@ -1577,7 +1599,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, seq_printf(m, "\n per-node stats"); for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { seq_printf(m, "\n %-12s %lu", node_stat_name(i), - node_page_state(pgdat, i)); + node_page_state_pages(pgdat, i)); } } seq_printf(m, @@ -1698,7 +1720,7 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos) #endif for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) - v[i] = global_node_page_state(i); + v[i] = global_node_page_state_pages(i); v += NR_VM_NODE_STAT_ITEMS; global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD, -- cgit v1.2.3 From d42f3245c7e299e017213fa028c319316bcdb7f4 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:20:39 -0700 Subject: mm: memcg: convert vmstat slab counters to bytes In order to prepare for per-object slab memory accounting, convert NR_SLAB_RECLAIMABLE and NR_SLAB_UNRECLAIMABLE vmstat items to bytes. To make it obvious, rename them to NR_SLAB_RECLAIMABLE_B and NR_SLAB_UNRECLAIMABLE_B (similar to NR_KERNEL_STACK_KB). Internally global and per-node counters are stored in pages, however memcg and lruvec counters are stored in bytes. This scheme may look weird, but only for now. As soon as slab pages will be shared between multiple cgroups, global and node counters will reflect the total number of slab pages. However memcg and lruvec counters will be used for per-memcg slab memory tracking, which will take separate kernel objects in the account. Keeping global and node counters in pages helps to avoid additional overhead. The size of slab memory shouldn't exceed 4Gb on 32-bit machines, so it will fit into atomic_long_t we use for vmstats. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Christoph Lameter Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-4-guro@fb.com Signed-off-by: Linus Torvalds --- drivers/base/node.c | 4 ++-- fs/proc/meminfo.c | 4 ++-- include/linux/mmzone.h | 16 +++++++++++++--- kernel/power/snapshot.c | 2 +- mm/memcontrol.c | 11 ++++------- mm/oom_kill.c | 2 +- mm/page_alloc.c | 8 ++++---- mm/slab.h | 15 ++++++++------- mm/slab_common.c | 4 ++-- mm/slob.c | 12 ++++++------ mm/slub.c | 8 ++++---- mm/vmscan.c | 3 ++- mm/workingset.c | 6 ++++-- 13 files changed, 53 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/drivers/base/node.c b/drivers/base/node.c index e21e31359297..0cf13e31603c 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -368,8 +368,8 @@ static ssize_t node_read_meminfo(struct device *dev, unsigned long sreclaimable, sunreclaimable; si_meminfo_node(&i, nid); - sreclaimable = node_page_state(pgdat, NR_SLAB_RECLAIMABLE); - sunreclaimable = node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE); + sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); + sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); n = sprintf(buf, "Node %d MemTotal: %8lu kB\n" "Node %d MemFree: %8lu kB\n" diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index e9a6841fc25b..38ea95fd919a 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -52,8 +52,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) pages[lru] = global_node_page_state(NR_LRU_BASE + lru); available = si_mem_available(); - sreclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE); - sunreclaim = global_node_page_state(NR_SLAB_UNRECLAIMABLE); + sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); + sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); show_val_kb(m, "MemTotal: ", i.totalram); show_val_kb(m, "MemFree: ", i.freeram); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f16306e15b98..b79100edd228 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -174,8 +174,8 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE, - NR_SLAB_UNRECLAIMABLE, + NR_SLAB_RECLAIMABLE_B, + NR_SLAB_UNRECLAIMABLE_B, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, @@ -213,7 +213,17 @@ enum node_stat_item { */ static __always_inline bool vmstat_item_in_bytes(int idx) { - return false; + /* + * Global and per-node slab counters track slab pages. + * It's expected that changes are multiples of PAGE_SIZE. + * Internally values are stored in pages. + * + * Per-memcg and per-lruvec counters track memory, consumed + * by individual slab objects. These counters are actually + * byte-precise. + */ + return (idx == NR_SLAB_RECLAIMABLE_B || + idx == NR_SLAB_UNRECLAIMABLE_B); } /* diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index cef154261fe2..d25749bce7cf 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1663,7 +1663,7 @@ static unsigned long minimum_image_size(unsigned long saveable) { unsigned long size; - size = global_node_page_state(NR_SLAB_RECLAIMABLE) + size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + global_node_page_state(NR_ACTIVE_ANON) + global_node_page_state(NR_INACTIVE_ANON) + global_node_page_state(NR_ACTIVE_FILE) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 61ae6658d59f..328b7e7bf9ab 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1391,9 +1391,8 @@ static char *memory_stat_format(struct mem_cgroup *memcg) (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * 1024); seq_buf_printf(&s, "slab %llu\n", - (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + - memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * - PAGE_SIZE); + (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + + memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B))); seq_buf_printf(&s, "sock %llu\n", (u64)memcg_page_state(memcg, MEMCG_SOCK) * PAGE_SIZE); @@ -1423,11 +1422,9 @@ static char *memory_stat_format(struct mem_cgroup *memcg) PAGE_SIZE); seq_buf_printf(&s, "slab_reclaimable %llu\n", - (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * - PAGE_SIZE); + (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B)); seq_buf_printf(&s, "slab_unreclaimable %llu\n", - (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * - PAGE_SIZE); + (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B)); /* Accumulated memory events */ diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 6e94962893ee..d30ce75f23fb 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -184,7 +184,7 @@ static bool is_dump_unreclaim_slabs(void) global_node_page_state(NR_ISOLATED_FILE) + global_node_page_state(NR_UNEVICTABLE); - return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru); + return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru); } /** diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 901a21f61d68..f9ad093814d2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5220,8 +5220,8 @@ long si_mem_available(void) * items that are in use, and cannot be freed. Cap this estimate at the * low watermark. */ - reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) + - global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); + reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); available += reclaimable - min(reclaimable / 2, wmark_low); if (available < 0) @@ -5364,8 +5364,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) global_node_page_state(NR_UNEVICTABLE), global_node_page_state(NR_FILE_DIRTY), global_node_page_state(NR_WRITEBACK), - global_node_page_state(NR_SLAB_RECLAIMABLE), - global_node_page_state(NR_SLAB_UNRECLAIMABLE), + global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), + global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), global_node_page_state(NR_FILE_MAPPED), global_node_page_state(NR_SHMEM), global_zone_page_state(NR_PAGETABLE), diff --git a/mm/slab.h b/mm/slab.h index fceb4341ba91..09be3ca6fe87 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -273,7 +273,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); static inline int cache_vmstat_idx(struct kmem_cache *s) { return (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE; + NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; } #ifdef CONFIG_SLUB_DEBUG @@ -390,7 +390,7 @@ static __always_inline int memcg_charge_slab(struct page *page, if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), - nr_pages); + nr_pages << PAGE_SHIFT); percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages); return 0; } @@ -400,7 +400,7 @@ static __always_inline int memcg_charge_slab(struct page *page, goto out; lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); - mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages); + mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages << PAGE_SHIFT); /* transer try_charge() page references to kmem_cache */ percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages); @@ -425,11 +425,12 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order, memcg = READ_ONCE(s->memcg_params.memcg); if (likely(!mem_cgroup_is_root(memcg))) { lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page)); - mod_lruvec_state(lruvec, cache_vmstat_idx(s), -nr_pages); + mod_lruvec_state(lruvec, cache_vmstat_idx(s), + -(nr_pages << PAGE_SHIFT)); memcg_kmem_uncharge(memcg, nr_pages); } else { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), - -nr_pages); + -(nr_pages << PAGE_SHIFT)); } rcu_read_unlock(); @@ -513,7 +514,7 @@ static __always_inline int charge_slab_page(struct page *page, { if (is_root_cache(s)) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), - 1 << order); + PAGE_SIZE << order); return 0; } @@ -525,7 +526,7 @@ static __always_inline void uncharge_slab_page(struct page *page, int order, { if (is_root_cache(s)) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), - -(1 << order)); + -(PAGE_SIZE << order)); return; } diff --git a/mm/slab_common.c b/mm/slab_common.c index 616ec8a0d91a..a73f168b1035 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1363,8 +1363,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) page = alloc_pages(flags, order); if (likely(page)) { ret = page_address(page); - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - 1 << order); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, + PAGE_SIZE << order); } ret = kasan_kmalloc_large(ret, size, flags); /* As ret might get tagged, call kmemleak hook after KASAN. */ diff --git a/mm/slob.c b/mm/slob.c index ac2aecfbc7a8..7cc9805c8091 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -202,8 +202,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node) if (!page) return NULL; - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - 1 << order); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, + PAGE_SIZE << order); return page_address(page); } @@ -214,8 +214,8 @@ static void slob_free_pages(void *b, int order) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += 1 << order; - mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, - -(1 << order)); + mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B, + -(PAGE_SIZE << order)); __free_pages(sp, order); } @@ -552,8 +552,8 @@ void kfree(const void *block) slob_free(m, *m + align); } else { unsigned int order = compound_order(sp); - mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, - -(1 << order)); + mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B, + -(PAGE_SIZE << order)); __free_pages(sp, order); } diff --git a/mm/slub.c b/mm/slub.c index ae39eb392396..2d73d677f7ac 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3991,8 +3991,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) page = alloc_pages_node(node, flags, order); if (page) { ptr = page_address(page); - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - 1 << order); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, + PAGE_SIZE << order); } return kmalloc_large_node_hook(ptr, size, flags); @@ -4123,8 +4123,8 @@ void kfree(const void *x) BUG_ON(!PageCompound(page)); kfree_hook(object); - mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, - -(1 << order)); + mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, + -(PAGE_SIZE << order)); __free_pages(page, order); return; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 749d239c62b2..2ac43664aba4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4222,7 +4222,8 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) * unmapped file backed pages. */ if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && - node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages) + node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= + pgdat->min_slab_pages) return NODE_RECLAIM_FULL; /* diff --git a/mm/workingset.c b/mm/workingset.c index 50b7937bab32..b199726924dd 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -486,8 +486,10 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) pages += lruvec_page_state_local(lruvec, NR_LRU_BASE + i); - pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE); - pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE); + pages += lruvec_page_state_local( + lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; + pages += lruvec_page_state_local( + lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; } else #endif pages = node_present_pages(sc->nid); -- cgit v1.2.3 From 4138fdfc8b5db5a7a4b9b50c69d475fb2ac351b7 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:20:42 -0700 Subject: mm: slub: implement SLUB version of obj_to_index() This commit implements SLUB version of the obj_to_index() function, which will be required to calculate the offset of obj_cgroup in the obj_cgroups vector to store/obtain the objcg ownership data. To make it faster, let's repeat the SLAB's trick introduced by commit 6a2d7a955d8d ("SLAB: use a multiply instead of a divide in obj_to_index()") and avoid an expensive division. Vlastimil Babka noticed, that SLUB does have already a similar function called slab_index(), which is defined only if SLUB_DEBUG is enabled. The function does a similar math, but with a division, and it also takes a page address instead of a page pointer. Let's remove slab_index() and replace it with the new helper __obj_to_index(), which takes a page address. obj_to_index() will be a simple wrapper taking a page pointer and passing page_address(page) into __obj_to_index(). Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Cc: Christoph Lameter Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-5-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 16 ++++++++++++++++ mm/slub.c | 15 +++++---------- 2 files changed, 21 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d2153789bd9f..30e91c83d401 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -8,6 +8,7 @@ * (C) 2007 SGI, Christoph Lameter */ #include +#include enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -86,6 +87,7 @@ struct kmem_cache { unsigned long min_partial; unsigned int size; /* The size of an object including metadata */ unsigned int object_size;/* The size of an object without metadata */ + struct reciprocal_value reciprocal_size; unsigned int offset; /* Free pointer offset */ #ifdef CONFIG_SLUB_CPU_PARTIAL /* Number of per cpu partial objects to keep around */ @@ -182,4 +184,18 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, return result; } +/* Determine object index from a given position */ +static inline unsigned int __obj_to_index(const struct kmem_cache *cache, + void *addr, void *obj) +{ + return reciprocal_divide(kasan_reset_tag(obj) - addr, + cache->reciprocal_size); +} + +static inline unsigned int obj_to_index(const struct kmem_cache *cache, + const struct page *page, void *obj) +{ + return __obj_to_index(cache, page_address(page), obj); +} + #endif /* _LINUX_SLUB_DEF_H */ diff --git a/mm/slub.c b/mm/slub.c index 2d73d677f7ac..2a3075538f26 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -317,12 +317,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) __p < (__addr) + (__objects) * (__s)->size; \ __p += (__s)->size) -/* Determine object index from a given position */ -static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) -{ - return (kasan_reset_tag(p) - addr) / s->size; -} - static inline unsigned int order_objects(unsigned int order, unsigned int size) { return ((unsigned int)PAGE_SIZE << order) / size; @@ -465,7 +459,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page) bitmap_zero(object_map, page->objects); for (p = page->freelist; p; p = get_freepointer(s, p)) - set_bit(slab_index(p, s, addr), object_map); + set_bit(__obj_to_index(s, addr, p), object_map); return object_map; } @@ -3754,6 +3748,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) */ size = ALIGN(size, s->align); s->size = size; + s->reciprocal_size = reciprocal_value(size); if (forced_order >= 0) order = forced_order; else @@ -3858,7 +3853,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, map = get_map(s, page); for_each_object(p, s, addr, page->objects) { - if (!test_bit(slab_index(p, s, addr), map)) { + if (!test_bit(__obj_to_index(s, addr, p), map)) { pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr); print_tracking(s, p); } @@ -4574,7 +4569,7 @@ static void validate_slab(struct kmem_cache *s, struct page *page) /* Now we know that a valid freelist exists */ map = get_map(s, page); for_each_object(p, s, addr, page->objects) { - u8 val = test_bit(slab_index(p, s, addr), map) ? + u8 val = test_bit(__obj_to_index(s, addr, p), map) ? SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; if (!check_object(s, page, p, val)) @@ -4765,7 +4760,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s, map = get_map(s, page); for_each_object(p, s, addr, page->objects) - if (!test_bit(slab_index(p, s, addr), map)) + if (!test_bit(__obj_to_index(s, addr, p), map)) add_location(t, s, get_track(s, p, alloc)); put_map(map); } -- cgit v1.2.3 From bf4f059954dcb221384b2f784677e19a13cd4bdb Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:20:49 -0700 Subject: mm: memcg/slab: obj_cgroup API Obj_cgroup API provides an ability to account sub-page sized kernel objects, which potentially outlive the original memory cgroup. The top-level API consists of the following functions: bool obj_cgroup_tryget(struct obj_cgroup *objcg); void obj_cgroup_get(struct obj_cgroup *objcg); void obj_cgroup_put(struct obj_cgroup *objcg); int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg); struct obj_cgroup *get_obj_cgroup_from_current(void); Object cgroup is basically a pointer to a memory cgroup with a per-cpu reference counter. It substitutes a memory cgroup in places where it's necessary to charge a custom amount of bytes instead of pages. All charged memory rounded down to pages is charged to the corresponding memory cgroup using __memcg_kmem_charge(). It implements reparenting: on memcg offlining it's getting reattached to the parent memory cgroup. Each online memory cgroup has an associated active object cgroup to handle new allocations and the list of all attached object cgroups. On offlining of a cgroup this list is reparented and for each object cgroup in the list the memcg pointer is swapped to the parent memory cgroup. It prevents long-living objects from pinning the original memory cgroup in the memory. The implementation is based on byte-sized per-cpu stocks. A sub-page sized leftover is stored in an atomic field, which is a part of obj_cgroup object. So on cgroup offlining the leftover is automatically reparented. memcg->objcg is rcu protected. objcg->memcg is a raw pointer, which is always pointing at a memory cgroup, but can be atomically swapped to the parent memory cgroup. So a user must ensure the lifetime of the cgroup, e.g. grab rcu_read_lock or css_set_lock. Suggested-by: Johannes Weiner Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Michal Hocko Cc: Tejun Heo Cc: Vlastimil Babka Link: http://lkml.kernel.org/r/20200623174037.3951353-7-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 51 ++++++++ mm/memcontrol.c | 288 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 338 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b250f8197710..f2f9d5d6b7d1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -23,6 +23,7 @@ #include struct mem_cgroup; +struct obj_cgroup; struct page; struct mm_struct; struct kmem_cache; @@ -192,6 +193,22 @@ struct memcg_cgwb_frn { struct wb_completion done; /* tracks in-flight foreign writebacks */ }; +/* + * Bucket for arbitrarily byte-sized objects charged to a memory + * cgroup. The bucket can be reparented in one piece when the cgroup + * is destroyed, without having to round up the individual references + * of all live memory objects in the wild. + */ +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct rcu_head rcu; + }; +}; + /* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide @@ -301,6 +318,8 @@ struct mem_cgroup { int kmemcg_id; enum memcg_kmem_state kmem_state; struct list_head kmem_caches; + struct obj_cgroup __rcu *objcg; + struct list_head objcg_list; /* list of inherited objcgs */ #endif #ifdef CONFIG_CGROUP_WRITEBACK @@ -416,6 +435,33 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; } +static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) +{ + return percpu_ref_tryget(&objcg->refcnt); +} + +static inline void obj_cgroup_get(struct obj_cgroup *objcg) +{ + percpu_ref_get(&objcg->refcnt); +} + +static inline void obj_cgroup_put(struct obj_cgroup *objcg) +{ + percpu_ref_put(&objcg->refcnt); +} + +/* + * After the initialization objcg->memcg is always pointing at + * a valid memcg, but can be atomically swapped to the parent memcg. + * + * The caller must ensure that the returned memcg won't be released: + * e.g. acquire the rcu_read_lock or css_set_lock. + */ +static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) +{ + return READ_ONCE(objcg->memcg); +} + static inline void mem_cgroup_put(struct mem_cgroup *memcg) { if (memcg) @@ -1368,6 +1414,11 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); +struct obj_cgroup *get_obj_cgroup_from_current(void); + +int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); +void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); + extern struct static_key_false memcg_kmem_enabled_key; extern struct workqueue_struct *memcg_kmem_cache_wq; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4f9a3f55db71..1cc784556e05 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -257,6 +257,98 @@ struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) } #ifdef CONFIG_MEMCG_KMEM +extern spinlock_t css_set_lock; + +static void obj_cgroup_release(struct percpu_ref *ref) +{ + struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); + struct mem_cgroup *memcg; + unsigned int nr_bytes; + unsigned int nr_pages; + unsigned long flags; + + /* + * At this point all allocated objects are freed, and + * objcg->nr_charged_bytes can't have an arbitrary byte value. + * However, it can be PAGE_SIZE or (x * PAGE_SIZE). + * + * The following sequence can lead to it: + * 1) CPU0: objcg == stock->cached_objcg + * 2) CPU1: we do a small allocation (e.g. 92 bytes), + * PAGE_SIZE bytes are charged + * 3) CPU1: a process from another memcg is allocating something, + * the stock if flushed, + * objcg->nr_charged_bytes = PAGE_SIZE - 92 + * 5) CPU0: we do release this object, + * 92 bytes are added to stock->nr_bytes + * 6) CPU0: stock is flushed, + * 92 bytes are added to objcg->nr_charged_bytes + * + * In the result, nr_charged_bytes == PAGE_SIZE. + * This page will be uncharged in obj_cgroup_release(). + */ + nr_bytes = atomic_read(&objcg->nr_charged_bytes); + WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); + nr_pages = nr_bytes >> PAGE_SHIFT; + + spin_lock_irqsave(&css_set_lock, flags); + memcg = obj_cgroup_memcg(objcg); + if (nr_pages) + __memcg_kmem_uncharge(memcg, nr_pages); + list_del(&objcg->list); + mem_cgroup_put(memcg); + spin_unlock_irqrestore(&css_set_lock, flags); + + percpu_ref_exit(ref); + kfree_rcu(objcg, rcu); +} + +static struct obj_cgroup *obj_cgroup_alloc(void) +{ + struct obj_cgroup *objcg; + int ret; + + objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); + if (!objcg) + return NULL; + + ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, + GFP_KERNEL); + if (ret) { + kfree(objcg); + return NULL; + } + INIT_LIST_HEAD(&objcg->list); + return objcg; +} + +static void memcg_reparent_objcgs(struct mem_cgroup *memcg, + struct mem_cgroup *parent) +{ + struct obj_cgroup *objcg, *iter; + + objcg = rcu_replace_pointer(memcg->objcg, NULL, true); + + spin_lock_irq(&css_set_lock); + + /* Move active objcg to the parent's list */ + xchg(&objcg->memcg, parent); + css_get(&parent->css); + list_add(&objcg->list, &parent->objcg_list); + + /* Move already reparented objcgs to the parent's list */ + list_for_each_entry(iter, &memcg->objcg_list, list) { + css_get(&parent->css); + xchg(&iter->memcg, parent); + css_put(&memcg->css); + } + list_splice(&memcg->objcg_list, &parent->objcg_list); + + spin_unlock_irq(&css_set_lock); + + percpu_ref_kill(&objcg->refcnt); +} + /* * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. * The main reason for not using cgroup id for this: @@ -2047,6 +2139,12 @@ EXPORT_SYMBOL(unlock_page_memcg); struct memcg_stock_pcp { struct mem_cgroup *cached; /* this never be root cgroup */ unsigned int nr_pages; + +#ifdef CONFIG_MEMCG_KMEM + struct obj_cgroup *cached_objcg; + unsigned int nr_bytes; +#endif + struct work_struct work; unsigned long flags; #define FLUSHING_CACHED_CHARGE 0 @@ -2054,6 +2152,22 @@ struct memcg_stock_pcp { static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); static DEFINE_MUTEX(percpu_charge_mutex); +#ifdef CONFIG_MEMCG_KMEM +static void drain_obj_stock(struct memcg_stock_pcp *stock); +static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, + struct mem_cgroup *root_memcg); + +#else +static inline void drain_obj_stock(struct memcg_stock_pcp *stock) +{ +} +static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, + struct mem_cgroup *root_memcg) +{ + return false; +} +#endif + /** * consume_stock: Try to consume stocked charge on this cpu. * @memcg: memcg to consume from. @@ -2120,6 +2234,7 @@ static void drain_local_stock(struct work_struct *dummy) local_irq_save(flags); stock = this_cpu_ptr(&memcg_stock); + drain_obj_stock(stock); drain_stock(stock); clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); @@ -2179,6 +2294,8 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) if (memcg && stock->nr_pages && mem_cgroup_is_descendant(memcg, root_memcg)) flush = true; + if (obj_stock_flush_required(stock, root_memcg)) + flush = true; rcu_read_unlock(); if (flush && @@ -2705,6 +2822,30 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p) return page->mem_cgroup; } +__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void) +{ + struct obj_cgroup *objcg = NULL; + struct mem_cgroup *memcg; + + if (unlikely(!current->mm && !current->active_memcg)) + return NULL; + + rcu_read_lock(); + if (unlikely(current->active_memcg)) + memcg = rcu_dereference(current->active_memcg); + else + memcg = mem_cgroup_from_task(current); + + for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { + objcg = rcu_dereference(memcg->objcg); + if (objcg && obj_cgroup_tryget(objcg)) + break; + } + rcu_read_unlock(); + + return objcg; +} + static int memcg_alloc_cache_id(void) { int id, size; @@ -2996,6 +3137,140 @@ void __memcg_kmem_uncharge_page(struct page *page, int order) if (PageKmemcg(page)) __ClearPageKmemcg(page); } + +static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +{ + struct memcg_stock_pcp *stock; + unsigned long flags; + bool ret = false; + + local_irq_save(flags); + + stock = this_cpu_ptr(&memcg_stock); + if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { + stock->nr_bytes -= nr_bytes; + ret = true; + } + + local_irq_restore(flags); + + return ret; +} + +static void drain_obj_stock(struct memcg_stock_pcp *stock) +{ + struct obj_cgroup *old = stock->cached_objcg; + + if (!old) + return; + + if (stock->nr_bytes) { + unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; + unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); + + if (nr_pages) { + rcu_read_lock(); + __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages); + rcu_read_unlock(); + } + + /* + * The leftover is flushed to the centralized per-memcg value. + * On the next attempt to refill obj stock it will be moved + * to a per-cpu stock (probably, on an other CPU), see + * refill_obj_stock(). + * + * How often it's flushed is a trade-off between the memory + * limit enforcement accuracy and potential CPU contention, + * so it might be changed in the future. + */ + atomic_add(nr_bytes, &old->nr_charged_bytes); + stock->nr_bytes = 0; + } + + obj_cgroup_put(old); + stock->cached_objcg = NULL; +} + +static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, + struct mem_cgroup *root_memcg) +{ + struct mem_cgroup *memcg; + + if (stock->cached_objcg) { + memcg = obj_cgroup_memcg(stock->cached_objcg); + if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) + return true; + } + + return false; +} + +static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) +{ + struct memcg_stock_pcp *stock; + unsigned long flags; + + local_irq_save(flags); + + stock = this_cpu_ptr(&memcg_stock); + if (stock->cached_objcg != objcg) { /* reset if necessary */ + drain_obj_stock(stock); + obj_cgroup_get(objcg); + stock->cached_objcg = objcg; + stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0); + } + stock->nr_bytes += nr_bytes; + + if (stock->nr_bytes > PAGE_SIZE) + drain_obj_stock(stock); + + local_irq_restore(flags); +} + +int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) +{ + struct mem_cgroup *memcg; + unsigned int nr_pages, nr_bytes; + int ret; + + if (consume_obj_stock(objcg, size)) + return 0; + + /* + * In theory, memcg->nr_charged_bytes can have enough + * pre-charged bytes to satisfy the allocation. However, + * flushing memcg->nr_charged_bytes requires two atomic + * operations, and memcg->nr_charged_bytes can't be big, + * so it's better to ignore it and try grab some new pages. + * memcg->nr_charged_bytes will be flushed in + * refill_obj_stock(), called from this function or + * independently later. + */ + rcu_read_lock(); + memcg = obj_cgroup_memcg(objcg); + css_get(&memcg->css); + rcu_read_unlock(); + + nr_pages = size >> PAGE_SHIFT; + nr_bytes = size & (PAGE_SIZE - 1); + + if (nr_bytes) + nr_pages += 1; + + ret = __memcg_kmem_charge(memcg, gfp, nr_pages); + if (!ret && nr_bytes) + refill_obj_stock(objcg, PAGE_SIZE - nr_bytes); + + css_put(&memcg->css); + return ret; +} + +void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) +{ + refill_obj_stock(objcg, size); +} + #endif /* CONFIG_MEMCG_KMEM */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -3416,6 +3691,7 @@ static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) #ifdef CONFIG_MEMCG_KMEM static int memcg_online_kmem(struct mem_cgroup *memcg) { + struct obj_cgroup *objcg; int memcg_id; if (cgroup_memory_nokmem) @@ -3428,6 +3704,14 @@ static int memcg_online_kmem(struct mem_cgroup *memcg) if (memcg_id < 0) return memcg_id; + objcg = obj_cgroup_alloc(); + if (!objcg) { + memcg_free_cache_id(memcg_id); + return -ENOMEM; + } + objcg->memcg = memcg; + rcu_assign_pointer(memcg->objcg, objcg); + static_branch_enable(&memcg_kmem_enabled_key); /* @@ -3464,9 +3748,10 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) parent = root_mem_cgroup; /* - * Deactivate and reparent kmem_caches. + * Deactivate and reparent kmem_caches and objcgs. */ memcg_deactivate_kmem_caches(memcg, parent); + memcg_reparent_objcgs(memcg, parent); kmemcg_id = memcg->kmemcg_id; BUG_ON(kmemcg_id < 0); @@ -5030,6 +5315,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) memcg->socket_pressure = jiffies; #ifdef CONFIG_MEMCG_KMEM memcg->kmemcg_id = -1; + INIT_LIST_HEAD(&memcg->objcg_list); #endif #ifdef CONFIG_CGROUP_WRITEBACK INIT_LIST_HEAD(&memcg->cgwb_list); -- cgit v1.2.3 From 286e04b8ed7a04279ae277f0f024430246ea5eec Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:20:52 -0700 Subject: mm: memcg/slab: allocate obj_cgroups for non-root slab pages Allocate and release memory to store obj_cgroup pointers for each non-root slab page. Reuse page->mem_cgroup pointer to store a pointer to the allocated space. This commit temporarily increases the memory footprint of the kernel memory accounting. To store obj_cgroup pointers we'll need a place for an objcg_pointer for each allocated object. However, the following patches in the series will enable sharing of slab pages between memory cgroups, which will dramatically increase the total slab utilization. And the final memory footprint will be significantly smaller than before. To distinguish between obj_cgroups and memcg pointers in case when it's not obvious which one is used (as in page_cgroup_ino()), let's always set the lowest bit in the obj_cgroup case. The original obj_cgroups pointer is marked to be ignored by kmemleak, which otherwise would report a memory leak for each allocated vector. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-8-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 5 ++++- include/linux/slab_def.h | 6 ++++++ include/linux/slub_def.h | 5 +++++ mm/memcontrol.c | 17 +++++++++++++--- mm/slab.h | 52 ++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 81 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 64ede5f150dc..0277fbab7c93 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -198,7 +198,10 @@ struct page { atomic_t _refcount; #ifdef CONFIG_MEMCG - struct mem_cgroup *mem_cgroup; + union { + struct mem_cgroup *mem_cgroup; + struct obj_cgroup **obj_cgroups; + }; #endif /* diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index abc7de77b988..ccda7b9669a5 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -114,4 +114,10 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, return reciprocal_divide(offset, cache->reciprocal_buffer_size); } +static inline int objs_per_slab_page(const struct kmem_cache *cache, + const struct page *page) +{ + return cache->num; +} + #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 30e91c83d401..f87302dcfe8c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -198,4 +198,9 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, return __obj_to_index(cache, page_address(page), obj); } +static inline int objs_per_slab_page(const struct kmem_cache *cache, + const struct page *page) +{ + return page->objects; +} #endif /* _LINUX_SLUB_DEF_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1cc784556e05..e6cd4c0d44d1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -569,10 +569,21 @@ ino_t page_cgroup_ino(struct page *page) unsigned long ino = 0; rcu_read_lock(); - if (PageSlab(page) && !PageTail(page)) + if (PageSlab(page) && !PageTail(page)) { memcg = memcg_from_slab_page(page); - else - memcg = READ_ONCE(page->mem_cgroup); + } else { + memcg = page->mem_cgroup; + + /* + * The lowest bit set means that memcg isn't a valid + * memcg pointer, but a obj_cgroups pointer. + * In this case the page is shared and doesn't belong + * to any specific memory cgroup. + */ + if ((unsigned long) memcg & 0x1UL) + memcg = NULL; + } + while (memcg && !(memcg->css.flags & CSS_ONLINE)) memcg = parent_mem_cgroup(memcg); if (memcg) diff --git a/mm/slab.h b/mm/slab.h index 161ca34acb01..9f78e3167f19 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -109,6 +109,7 @@ struct memcg_cache_params { #include #include #include +#include /* * State of the slab allocator. @@ -348,6 +349,18 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) return s->memcg_params.root_cache; } +static inline struct obj_cgroup **page_obj_cgroups(struct page *page) +{ + /* + * page->mem_cgroup and page->obj_cgroups are sharing the same + * space. To distinguish between them in case we don't know for sure + * that the page is a slab page (e.g. page_cgroup_ino()), let's + * always set the lowest bit of obj_cgroups. + */ + return (struct obj_cgroup **) + ((unsigned long)page->obj_cgroups & ~0x1UL); +} + /* * Expects a pointer to a slab page. Please note, that PageSlab() check * isn't sufficient, as it returns true also for tail compound slab pages, @@ -435,6 +448,28 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order, percpu_ref_put_many(&s->memcg_params.refcnt, nr_pages); } +static inline int memcg_alloc_page_obj_cgroups(struct page *page, + struct kmem_cache *s, gfp_t gfp) +{ + unsigned int objects = objs_per_slab_page(s, page); + void *vec; + + vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, + page_to_nid(page)); + if (!vec) + return -ENOMEM; + + kmemleak_not_leak(vec); + page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL); + return 0; +} + +static inline void memcg_free_page_obj_cgroups(struct page *page) +{ + kfree(page_obj_cgroups(page)); + page->obj_cgroups = NULL; +} + extern void slab_init_memcg_params(struct kmem_cache *); extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); @@ -484,6 +519,16 @@ static inline void memcg_uncharge_slab(struct page *page, int order, { } +static inline int memcg_alloc_page_obj_cgroups(struct page *page, + struct kmem_cache *s, gfp_t gfp) +{ + return 0; +} + +static inline void memcg_free_page_obj_cgroups(struct page *page) +{ +} + static inline void slab_init_memcg_params(struct kmem_cache *s) { } @@ -510,12 +555,18 @@ static __always_inline int charge_slab_page(struct page *page, gfp_t gfp, int order, struct kmem_cache *s) { + int ret; + if (is_root_cache(s)) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), PAGE_SIZE << order); return 0; } + ret = memcg_alloc_page_obj_cgroups(page, s, gfp); + if (ret) + return ret; + return memcg_charge_slab(page, gfp, order, s); } @@ -528,6 +579,7 @@ static __always_inline void uncharge_slab_page(struct page *page, int order, return; } + memcg_free_page_obj_cgroups(page); memcg_uncharge_slab(page, order, s); } -- cgit v1.2.3 From 964d4bd370d559d9bd8e4abc139e85d2753956fb Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:20:56 -0700 Subject: mm: memcg/slab: save obj_cgroup for non-root slab objects Store the obj_cgroup pointer in the corresponding place of page->obj_cgroups for each allocated non-root slab object. Make sure that each allocated object holds a reference to obj_cgroup. Objcg pointer is obtained from the memcg->objcg dereferencing in memcg_kmem_get_cache() and passed from pre_alloc_hook to post_alloc_hook. Then in case of successful allocation(s) it's getting stored in the page->obj_cgroups vector. The objcg obtaining part look a bit bulky now, but it will be simplified by next commits in the series. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-9-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 3 ++- mm/memcontrol.c | 14 +++++++++-- mm/slab.c | 18 ++++++++------ mm/slab.h | 60 +++++++++++++++++++++++++++++++++++++++++----- mm/slub.c | 14 +++++++---- 5 files changed, 88 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index f2f9d5d6b7d1..b845e908e76e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1404,7 +1404,8 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, } #endif -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep, + struct obj_cgroup **objcgp); void memcg_kmem_put_cache(struct kmem_cache *cachep); #ifdef CONFIG_MEMCG_KMEM diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e6cd4c0d44d1..ab96a120e630 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2973,7 +2973,8 @@ static inline bool memcg_kmem_bypass(void) * done with it, memcg_kmem_put_cache() must be called to release the * reference. */ -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep, + struct obj_cgroup **objcgp) { struct mem_cgroup *memcg; struct kmem_cache *memcg_cachep; @@ -3029,8 +3030,17 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) */ if (unlikely(!memcg_cachep)) memcg_schedule_kmem_cache_create(memcg, cachep); - else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) + else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) { + struct obj_cgroup *objcg = rcu_dereference(memcg->objcg); + + if (!objcg || !obj_cgroup_tryget(objcg)) { + percpu_ref_put(&memcg_cachep->memcg_params.refcnt); + goto out_unlock; + } + + *objcgp = objcg; cachep = memcg_cachep; + } out_unlock: rcu_read_unlock(); return cachep; diff --git a/mm/slab.c b/mm/slab.c index fa31cbb76124..1e90b67735aa 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3228,9 +3228,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long save_flags; void *ptr; int slab_node = numa_mem_id(); + struct obj_cgroup *objcg = NULL; flags &= gfp_allowed_mask; - cachep = slab_pre_alloc_hook(cachep, flags); + cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); if (unlikely(!cachep)) return NULL; @@ -3266,7 +3267,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr) memset(ptr, 0, cachep->object_size); - slab_post_alloc_hook(cachep, flags, 1, &ptr); + slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr); return ptr; } @@ -3307,9 +3308,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) { unsigned long save_flags; void *objp; + struct obj_cgroup *objcg = NULL; flags &= gfp_allowed_mask; - cachep = slab_pre_alloc_hook(cachep, flags); + cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags); if (unlikely(!cachep)) return NULL; @@ -3323,7 +3325,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp) memset(objp, 0, cachep->object_size); - slab_post_alloc_hook(cachep, flags, 1, &objp); + slab_post_alloc_hook(cachep, objcg, flags, 1, &objp); return objp; } @@ -3450,6 +3452,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp, memset(objp, 0, cachep->object_size); kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); + memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp); /* * Skip calling cache_free_alien() when the platform is not numa. @@ -3515,8 +3518,9 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { size_t i; + struct obj_cgroup *objcg = NULL; - s = slab_pre_alloc_hook(s, flags); + s = slab_pre_alloc_hook(s, &objcg, size, flags); if (!s) return 0; @@ -3539,13 +3543,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, for (i = 0; i < size; i++) memset(p[i], 0, s->object_size); - slab_post_alloc_hook(s, flags, size, p); + slab_post_alloc_hook(s, objcg, flags, size, p); /* FIXME: Trace call missing. Christoph would like a bulk variant */ return size; error: local_irq_enable(); cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); - slab_post_alloc_hook(s, flags, i, p); + slab_post_alloc_hook(s, objcg, flags, i, p); __kmem_cache_free_bulk(s, i, p); return 0; } diff --git a/mm/slab.h b/mm/slab.h index 9f78e3167f19..d4f9fc93546f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -470,6 +470,41 @@ static inline void memcg_free_page_obj_cgroups(struct page *page) page->obj_cgroups = NULL; } +static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, + struct obj_cgroup *objcg, + size_t size, void **p) +{ + struct page *page; + unsigned long off; + size_t i; + + for (i = 0; i < size; i++) { + if (likely(p[i])) { + page = virt_to_head_page(p[i]); + off = obj_to_index(s, page, p[i]); + obj_cgroup_get(objcg); + page_obj_cgroups(page)[off] = objcg; + } + } + obj_cgroup_put(objcg); + memcg_kmem_put_cache(s); +} + +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, + void *p) +{ + struct obj_cgroup *objcg; + unsigned int off; + + if (!memcg_kmem_enabled() || is_root_cache(s)) + return; + + off = obj_to_index(s, page, p); + objcg = page_obj_cgroups(page)[off]; + page_obj_cgroups(page)[off] = NULL; + obj_cgroup_put(objcg); +} + extern void slab_init_memcg_params(struct kmem_cache *); extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); @@ -529,6 +564,17 @@ static inline void memcg_free_page_obj_cgroups(struct page *page) { } +static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, + struct obj_cgroup *objcg, + size_t size, void **p) +{ +} + +static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, + void *p) +{ +} + static inline void slab_init_memcg_params(struct kmem_cache *s) { } @@ -631,7 +677,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s) } static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, - gfp_t flags) + struct obj_cgroup **objcgp, + size_t size, gfp_t flags) { flags &= gfp_allowed_mask; @@ -645,13 +692,14 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, if (memcg_kmem_enabled() && ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) - return memcg_kmem_get_cache(s); + return memcg_kmem_get_cache(s, objcgp); return s; } -static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, - size_t size, void **p) +static inline void slab_post_alloc_hook(struct kmem_cache *s, + struct obj_cgroup *objcg, + gfp_t flags, size_t size, void **p) { size_t i; @@ -663,8 +711,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, s->flags, flags); } - if (memcg_kmem_enabled()) - memcg_kmem_put_cache(s); + if (memcg_kmem_enabled() && !is_root_cache(s)) + memcg_slab_post_alloc_hook(s, objcg, size, p); } #ifndef CONFIG_SLOB diff --git a/mm/slub.c b/mm/slub.c index 2a3075538f26..47e63b1100d4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2817,8 +2817,9 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct kmem_cache_cpu *c; struct page *page; unsigned long tid; + struct obj_cgroup *objcg = NULL; - s = slab_pre_alloc_hook(s, gfpflags); + s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); if (!s) return NULL; redo: @@ -2894,7 +2895,7 @@ redo: if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) memset(object, 0, s->object_size); - slab_post_alloc_hook(s, gfpflags, 1, &object); + slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); return object; } @@ -3099,6 +3100,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s, void *tail_obj = tail ? : head; struct kmem_cache_cpu *c; unsigned long tid; + + memcg_slab_free_hook(s, page, head); redo: /* * Determine the currently cpus per cpu slab. @@ -3278,9 +3281,10 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, { struct kmem_cache_cpu *c; int i; + struct obj_cgroup *objcg = NULL; /* memcg and kmem_cache debug support */ - s = slab_pre_alloc_hook(s, flags); + s = slab_pre_alloc_hook(s, &objcg, size, flags); if (unlikely(!s)) return false; /* @@ -3334,11 +3338,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, } /* memcg and kmem_cache debug support */ - slab_post_alloc_hook(s, flags, size, p); + slab_post_alloc_hook(s, objcg, flags, size, p); return i; error: local_irq_enable(); - slab_post_alloc_hook(s, flags, i, p); + slab_post_alloc_hook(s, objcg, flags, i, p); __kmem_cache_free_bulk(s, i, p); return 0; } -- cgit v1.2.3 From 0f876e4dc55db5fafef774917fd66e1373c0f390 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:06 -0700 Subject: mm: memcg/slab: move memcg_kmem_bypass() to memcontrol.h To make the memcg_kmem_bypass() function available outside of the memcontrol.c, let's move it to memcontrol.h. The function is small and nicely fits into static inline sort of functions. It will be used from the slab code. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-12-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 12 ++++++++++++ mm/memcontrol.c | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b845e908e76e..83e2858aecf2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1440,6 +1440,18 @@ static inline bool memcg_kmem_enabled(void) return static_branch_unlikely(&memcg_kmem_enabled_key); } +static inline bool memcg_kmem_bypass(void) +{ + if (in_interrupt()) + return true; + + /* Allow remote memcg charging in kthread contexts. */ + if ((!current->mm || (current->flags & PF_KTHREAD)) && + !current->active_memcg) + return true; + return false; +} + static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0356e05bc6e6..5cb2a588cc10 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2945,18 +2945,6 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, queue_work(memcg_kmem_cache_wq, &cw->work); } -static inline bool memcg_kmem_bypass(void) -{ - if (in_interrupt()) - return true; - - /* Allow remote memcg charging in kthread contexts. */ - if ((!current->mm || (current->flags & PF_KTHREAD)) && - !current->active_memcg) - return true; - return false; -} - /** * memcg_kmem_get_cache: select the correct per-memcg cache for allocation * @cachep: the original global kmem cache -- cgit v1.2.3 From 9855609bde03e2472b99a95e869d29ee1e78a751 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:10 -0700 Subject: mm: memcg/slab: use a single set of kmem_caches for all accounted allocations This is fairly big but mostly red patch, which makes all accounted slab allocations use a single set of kmem_caches instead of creating a separate set for each memory cgroup. Because the number of non-root kmem_caches is now capped by the number of root kmem_caches, there is no need to shrink or destroy them prematurely. They can be perfectly destroyed together with their root counterparts. This allows to dramatically simplify the management of non-root kmem_caches and delete a ton of code. This patch performs the following changes: 1) introduces memcg_params.memcg_cache pointer to represent the kmem_cache which will be used for all non-root allocations 2) reuses the existing memcg kmem_cache creation mechanism to create memcg kmem_cache on the first allocation attempt 3) memcg kmem_caches are named -memcg, e.g. dentry-memcg 4) simplifies memcg_kmem_get_cache() to just return memcg kmem_cache or schedule it's creation and return the root cache 5) removes almost all non-root kmem_cache management code (separate refcounter, reparenting, shrinking, etc) 6) makes slab debugfs to display root_mem_cgroup css id and never show :dead and :deact flags in the memcg_slabinfo attribute. Following patches in the series will simplify the kmem_cache creation. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-13-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 5 +- include/linux/slab.h | 5 +- mm/memcontrol.c | 163 ++++------------ mm/slab.c | 16 +- mm/slab.h | 146 +++++--------- mm/slab_common.c | 459 +++++---------------------------------------- mm/slub.c | 38 +--- 7 files changed, 134 insertions(+), 698 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 83e2858aecf2..11fd18b3d6c6 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -317,7 +317,6 @@ struct mem_cgroup { /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; enum memcg_kmem_state kmem_state; - struct list_head kmem_caches; struct obj_cgroup __rcu *objcg; struct list_head objcg_list; /* list of inherited objcgs */ #endif @@ -1404,9 +1403,7 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, } #endif -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep, - struct obj_cgroup **objcgp); -void memcg_kmem_put_cache(struct kmem_cache *cachep); +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); #ifdef CONFIG_MEMCG_KMEM int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, diff --git a/include/linux/slab.h b/include/linux/slab.h index 0884d82c55ee..8b1f91e320f9 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -155,8 +155,7 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); -void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); -void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); +void memcg_create_kmem_cache(struct kmem_cache *cachep); /* * Please use this macro to create slab caches. Simply specify the @@ -580,8 +579,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) return __kmalloc_node(size, flags, node); } -int memcg_update_all_caches(int num_memcgs); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5cb2a588cc10..874704c4a48a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -350,7 +350,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg, } /* - * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. + * This will be used as a shrinker list's index. * The main reason for not using cgroup id for this: * this works better in sparse environments, where we have a lot of memcgs, * but only a few kmem-limited. Or also, if we have, for instance, 200 @@ -569,20 +569,16 @@ ino_t page_cgroup_ino(struct page *page) unsigned long ino = 0; rcu_read_lock(); - if (PageSlab(page) && !PageTail(page)) { - memcg = memcg_from_slab_page(page); - } else { - memcg = page->mem_cgroup; + memcg = page->mem_cgroup; - /* - * The lowest bit set means that memcg isn't a valid - * memcg pointer, but a obj_cgroups pointer. - * In this case the page is shared and doesn't belong - * to any specific memory cgroup. - */ - if ((unsigned long) memcg & 0x1UL) - memcg = NULL; - } + /* + * The lowest bit set means that memcg isn't a valid + * memcg pointer, but a obj_cgroups pointer. + * In this case the page is shared and doesn't belong + * to any specific memory cgroup. + */ + if ((unsigned long) memcg & 0x1UL) + memcg = NULL; while (memcg && !(memcg->css.flags & CSS_ONLINE)) memcg = parent_mem_cgroup(memcg); @@ -2822,12 +2818,18 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p) page = virt_to_head_page(p); /* - * Slab pages don't have page->mem_cgroup set because corresponding - * kmem caches can be reparented during the lifetime. That's why - * memcg_from_slab_page() should be used instead. + * Slab objects are accounted individually, not per-page. + * Memcg membership data for each individual object is saved in + * the page->obj_cgroups. */ - if (PageSlab(page)) - return memcg_from_slab_page(page); + if (page_has_obj_cgroups(page)) { + struct obj_cgroup *objcg; + unsigned int off; + + off = obj_to_index(page->slab_cache, page, p); + objcg = page_obj_cgroups(page)[off]; + return obj_cgroup_memcg(objcg); + } /* All other pages use page->mem_cgroup */ return page->mem_cgroup; @@ -2882,9 +2884,7 @@ static int memcg_alloc_cache_id(void) else if (size > MEMCG_CACHES_MAX_SIZE) size = MEMCG_CACHES_MAX_SIZE; - err = memcg_update_all_caches(size); - if (!err) - err = memcg_update_all_list_lrus(size); + err = memcg_update_all_list_lrus(size); if (!err) memcg_nr_cache_ids = size; @@ -2903,7 +2903,6 @@ static void memcg_free_cache_id(int id) } struct memcg_kmem_cache_create_work { - struct mem_cgroup *memcg; struct kmem_cache *cachep; struct work_struct work; }; @@ -2912,33 +2911,24 @@ static void memcg_kmem_cache_create_func(struct work_struct *w) { struct memcg_kmem_cache_create_work *cw = container_of(w, struct memcg_kmem_cache_create_work, work); - struct mem_cgroup *memcg = cw->memcg; struct kmem_cache *cachep = cw->cachep; - memcg_create_kmem_cache(memcg, cachep); + memcg_create_kmem_cache(cachep); - css_put(&memcg->css); kfree(cw); } /* * Enqueue the creation of a per-memcg kmem_cache. */ -static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, - struct kmem_cache *cachep) +static void memcg_schedule_kmem_cache_create(struct kmem_cache *cachep) { struct memcg_kmem_cache_create_work *cw; - if (!css_tryget_online(&memcg->css)) - return; - cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); - if (!cw) { - css_put(&memcg->css); + if (!cw) return; - } - cw->memcg = memcg; cw->cachep = cachep; INIT_WORK(&cw->work, memcg_kmem_cache_create_func); @@ -2946,102 +2936,26 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, } /** - * memcg_kmem_get_cache: select the correct per-memcg cache for allocation + * memcg_kmem_get_cache: select memcg or root cache for allocation * @cachep: the original global kmem cache * * Return the kmem_cache we're supposed to use for a slab allocation. - * We try to use the current memcg's version of the cache. * * If the cache does not exist yet, if we are the first user of it, we * create it asynchronously in a workqueue and let the current allocation * go through with the original cache. - * - * This function takes a reference to the cache it returns to assure it - * won't get destroyed while we are working with it. Once the caller is - * done with it, memcg_kmem_put_cache() must be called to release the - * reference. */ -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep, - struct obj_cgroup **objcgp) +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) { - struct mem_cgroup *memcg; struct kmem_cache *memcg_cachep; - struct memcg_cache_array *arr; - int kmemcg_id; - VM_BUG_ON(!is_root_cache(cachep)); - - if (memcg_kmem_bypass()) + memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache); + if (unlikely(!memcg_cachep)) { + memcg_schedule_kmem_cache_create(cachep); return cachep; - - rcu_read_lock(); - - if (unlikely(current->active_memcg)) - memcg = current->active_memcg; - else - memcg = mem_cgroup_from_task(current); - - if (!memcg || memcg == root_mem_cgroup) - goto out_unlock; - - kmemcg_id = READ_ONCE(memcg->kmemcg_id); - if (kmemcg_id < 0) - goto out_unlock; - - arr = rcu_dereference(cachep->memcg_params.memcg_caches); - - /* - * Make sure we will access the up-to-date value. The code updating - * memcg_caches issues a write barrier to match the data dependency - * barrier inside READ_ONCE() (see memcg_create_kmem_cache()). - */ - memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]); - - /* - * If we are in a safe context (can wait, and not in interrupt - * context), we could be be predictable and return right away. - * This would guarantee that the allocation being performed - * already belongs in the new cache. - * - * However, there are some clashes that can arrive from locking. - * For instance, because we acquire the slab_mutex while doing - * memcg_create_kmem_cache, this means no further allocation - * could happen with the slab_mutex held. So it's better to - * defer everything. - * - * If the memcg is dying or memcg_cache is about to be released, - * don't bother creating new kmem_caches. Because memcg_cachep - * is ZEROed as the fist step of kmem offlining, we don't need - * percpu_ref_tryget_live() here. css_tryget_online() check in - * memcg_schedule_kmem_cache_create() will prevent us from - * creation of a new kmem_cache. - */ - if (unlikely(!memcg_cachep)) - memcg_schedule_kmem_cache_create(memcg, cachep); - else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt)) { - struct obj_cgroup *objcg = rcu_dereference(memcg->objcg); - - if (!objcg || !obj_cgroup_tryget(objcg)) { - percpu_ref_put(&memcg_cachep->memcg_params.refcnt); - goto out_unlock; - } - - *objcgp = objcg; - cachep = memcg_cachep; } -out_unlock: - rcu_read_unlock(); - return cachep; -} -/** - * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache - * @cachep: the cache returned by memcg_kmem_get_cache - */ -void memcg_kmem_put_cache(struct kmem_cache *cachep) -{ - if (!is_root_cache(cachep)) - percpu_ref_put(&cachep->memcg_params.refcnt); + return memcg_cachep; } /** @@ -3731,7 +3645,6 @@ static int memcg_online_kmem(struct mem_cgroup *memcg) */ memcg->kmemcg_id = memcg_id; memcg->kmem_state = KMEM_ONLINE; - INIT_LIST_HEAD(&memcg->kmem_caches); return 0; } @@ -3744,22 +3657,13 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) if (memcg->kmem_state != KMEM_ONLINE) return; - /* - * Clear the online state before clearing memcg_caches array - * entries. The slab_mutex in memcg_deactivate_kmem_caches() - * guarantees that no cache will be created for this cgroup - * after we are done (see memcg_create_kmem_cache()). - */ + memcg->kmem_state = KMEM_ALLOCATED; parent = parent_mem_cgroup(memcg); if (!parent) parent = root_mem_cgroup; - /* - * Deactivate and reparent kmem_caches and objcgs. - */ - memcg_deactivate_kmem_caches(memcg, parent); memcg_reparent_objcgs(memcg, parent); kmemcg_id = memcg->kmemcg_id; @@ -5384,9 +5288,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) /* The following stuff does not apply to the root */ if (!parent) { -#ifdef CONFIG_MEMCG_KMEM - INIT_LIST_HEAD(&memcg->kmem_caches); -#endif root_mem_cgroup = memcg; return &memcg->css; } diff --git a/mm/slab.c b/mm/slab.c index 1e90b67735aa..0dd6956585dc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1249,7 +1249,7 @@ void __init kmem_cache_init(void) nr_node_ids * sizeof(struct kmem_cache_node *), SLAB_HWCACHE_ALIGN, 0, 0); list_add(&kmem_cache->list, &slab_caches); - memcg_link_cache(kmem_cache, NULL); + memcg_link_cache(kmem_cache); slab_state = PARTIAL; /* @@ -2253,17 +2253,6 @@ int __kmem_cache_shrink(struct kmem_cache *cachep) return (ret ? 1 : 0); } -#ifdef CONFIG_MEMCG -void __kmemcg_cache_deactivate(struct kmem_cache *cachep) -{ - __kmem_cache_shrink(cachep); -} - -void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) -{ -} -#endif - int __kmem_cache_shutdown(struct kmem_cache *cachep) { return __kmem_cache_shrink(cachep); @@ -3872,7 +3861,8 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, return ret; lockdep_assert_held(&slab_mutex); - for_each_memcg_cache(c, cachep) { + c = memcg_cache(cachep); + if (c) { /* return value determined by the root cache only */ __do_tune_cpucache(c, limit, batchcount, shared, gfp); } diff --git a/mm/slab.h b/mm/slab.h index 1f067e8bc377..e716b80befc2 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -32,66 +32,25 @@ struct kmem_cache { #else /* !CONFIG_SLOB */ -struct memcg_cache_array { - struct rcu_head rcu; - struct kmem_cache *entries[0]; -}; - /* * This is the main placeholder for memcg-related information in kmem caches. - * Both the root cache and the child caches will have it. For the root cache, - * this will hold a dynamically allocated array large enough to hold - * information about the currently limited memcgs in the system. To allow the - * array to be accessed without taking any locks, on relocation we free the old - * version only after a grace period. - * - * Root and child caches hold different metadata. + * Both the root cache and the child cache will have it. Some fields are used + * in both cases, other are specific to root caches. * * @root_cache: Common to root and child caches. NULL for root, pointer to * the root cache for children. * * The following fields are specific to root caches. * - * @memcg_caches: kmemcg ID indexed table of child caches. This table is - * used to index child cachces during allocation and cleared - * early during shutdown. - * - * @root_caches_node: List node for slab_root_caches list. - * - * @children: List of all child caches. While the child caches are also - * reachable through @memcg_caches, a child cache remains on - * this list until it is actually destroyed. - * - * The following fields are specific to child caches. - * - * @memcg: Pointer to the memcg this cache belongs to. - * - * @children_node: List node for @root_cache->children list. - * - * @kmem_caches_node: List node for @memcg->kmem_caches list. + * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory + * cgroups. + * @root_caches_node: list node for slab_root_caches list. */ struct memcg_cache_params { struct kmem_cache *root_cache; - union { - struct { - struct memcg_cache_array __rcu *memcg_caches; - struct list_head __root_caches_node; - struct list_head children; - bool dying; - }; - struct { - struct mem_cgroup *memcg; - struct list_head children_node; - struct list_head kmem_caches_node; - struct percpu_ref refcnt; - - void (*work_fn)(struct kmem_cache *); - union { - struct rcu_head rcu_head; - struct work_struct work; - }; - }; - }; + + struct kmem_cache *memcg_cache; + struct list_head __root_caches_node; }; #endif /* CONFIG_SLOB */ @@ -236,8 +195,6 @@ bool __kmem_cache_empty(struct kmem_cache *); int __kmem_cache_shutdown(struct kmem_cache *); void __kmem_cache_release(struct kmem_cache *); int __kmem_cache_shrink(struct kmem_cache *); -void __kmemcg_cache_deactivate(struct kmem_cache *s); -void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s); void slab_kmem_cache_release(struct kmem_cache *); void kmem_cache_shrink_all(struct kmem_cache *s); @@ -311,14 +268,6 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla extern struct list_head slab_root_caches; #define root_caches_node memcg_params.__root_caches_node -/* - * Iterate over all memcg caches of the given root cache. The caller must hold - * slab_mutex. - */ -#define for_each_memcg_cache(iter, root) \ - list_for_each_entry(iter, &(root)->memcg_params.children, \ - memcg_params.children_node) - static inline bool is_root_cache(struct kmem_cache *s) { return !s->memcg_params.root_cache; @@ -349,6 +298,13 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) return s->memcg_params.root_cache; } +static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) +{ + if (is_root_cache(s)) + return s->memcg_params.memcg_cache; + return NULL; +} + static inline struct obj_cgroup **page_obj_cgroups(struct page *page) { /* @@ -361,25 +317,9 @@ static inline struct obj_cgroup **page_obj_cgroups(struct page *page) ((unsigned long)page->obj_cgroups & ~0x1UL); } -/* - * Expects a pointer to a slab page. Please note, that PageSlab() check - * isn't sufficient, as it returns true also for tail compound slab pages, - * which do not have slab_cache pointer set. - * So this function assumes that the page can pass PageSlab() && !PageTail() - * check. - * - * The kmem_cache can be reparented asynchronously. The caller must ensure - * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex. - */ -static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) +static inline bool page_has_obj_cgroups(struct page *page) { - struct kmem_cache *s; - - s = READ_ONCE(page->slab_cache); - if (s && !is_root_cache(s)) - return READ_ONCE(s->memcg_params.memcg); - - return NULL; + return ((unsigned long)page->obj_cgroups & 0x1UL); } static inline int memcg_alloc_page_obj_cgroups(struct page *page, @@ -418,17 +358,25 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, size_t objects, gfp_t flags) { struct kmem_cache *cachep; + struct obj_cgroup *objcg; + + if (memcg_kmem_bypass()) + return s; - cachep = memcg_kmem_get_cache(s, objcgp); + cachep = memcg_kmem_get_cache(s); if (is_root_cache(cachep)) return s; - if (obj_cgroup_charge(*objcgp, flags, objects * obj_full_size(s))) { - obj_cgroup_put(*objcgp); - memcg_kmem_put_cache(cachep); + objcg = get_obj_cgroup_from_current(); + if (!objcg) + return s; + + if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { + obj_cgroup_put(objcg); cachep = NULL; } + *objcgp = objcg; return cachep; } @@ -467,7 +415,6 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, } } obj_cgroup_put(objcg); - memcg_kmem_put_cache(s); } static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, @@ -491,7 +438,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, } extern void slab_init_memcg_params(struct kmem_cache *); -extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); +extern void memcg_link_cache(struct kmem_cache *s); #else /* CONFIG_MEMCG_KMEM */ @@ -499,9 +446,6 @@ extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); #define slab_root_caches slab_caches #define root_caches_node list -#define for_each_memcg_cache(iter, root) \ - for ((void)(iter), (void)(root); 0; ) - static inline bool is_root_cache(struct kmem_cache *s) { return true; @@ -523,7 +467,17 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) return s; } -static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) +static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) +{ + return NULL; +} + +static inline bool page_has_obj_cgroups(struct page *page) +{ + return false; +} + +static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) { return NULL; } @@ -560,8 +514,7 @@ static inline void slab_init_memcg_params(struct kmem_cache *s) { } -static inline void memcg_link_cache(struct kmem_cache *s, - struct mem_cgroup *memcg) +static inline void memcg_link_cache(struct kmem_cache *s) { } @@ -582,17 +535,14 @@ static __always_inline int charge_slab_page(struct page *page, gfp_t gfp, int order, struct kmem_cache *s) { -#ifdef CONFIG_MEMCG_KMEM if (memcg_kmem_enabled() && !is_root_cache(s)) { int ret; ret = memcg_alloc_page_obj_cgroups(page, s, gfp); if (ret) return ret; - - percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); } -#endif + mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), PAGE_SIZE << order); return 0; @@ -601,12 +551,9 @@ static __always_inline int charge_slab_page(struct page *page, static __always_inline void uncharge_slab_page(struct page *page, int order, struct kmem_cache *s) { -#ifdef CONFIG_MEMCG_KMEM - if (memcg_kmem_enabled() && !is_root_cache(s)) { + if (memcg_kmem_enabled() && !is_root_cache(s)) memcg_free_page_obj_cgroups(page); - percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order); - } -#endif + mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), -(PAGE_SIZE << order)); } @@ -749,9 +696,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) void *slab_start(struct seq_file *m, loff_t *pos); void *slab_next(struct seq_file *m, void *p, loff_t *pos); void slab_stop(struct seq_file *m, void *p); -void *memcg_slab_start(struct seq_file *m, loff_t *pos); -void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); -void memcg_slab_stop(struct seq_file *m, void *p); int memcg_slab_show(struct seq_file *m, void *p); #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) diff --git a/mm/slab_common.c b/mm/slab_common.c index f86431d0de73..e752132eb64d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -133,141 +133,36 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, #ifdef CONFIG_MEMCG_KMEM LIST_HEAD(slab_root_caches); -static DEFINE_SPINLOCK(memcg_kmem_wq_lock); - -static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref); void slab_init_memcg_params(struct kmem_cache *s) { s->memcg_params.root_cache = NULL; - RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); - INIT_LIST_HEAD(&s->memcg_params.children); - s->memcg_params.dying = false; + s->memcg_params.memcg_cache = NULL; } -static int init_memcg_params(struct kmem_cache *s, - struct kmem_cache *root_cache) +static void init_memcg_params(struct kmem_cache *s, + struct kmem_cache *root_cache) { - struct memcg_cache_array *arr; - - if (root_cache) { - int ret = percpu_ref_init(&s->memcg_params.refcnt, - kmemcg_cache_shutdown, - 0, GFP_KERNEL); - if (ret) - return ret; - + if (root_cache) s->memcg_params.root_cache = root_cache; - INIT_LIST_HEAD(&s->memcg_params.children_node); - INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node); - return 0; - } - - slab_init_memcg_params(s); - - if (!memcg_nr_cache_ids) - return 0; - - arr = kvzalloc(sizeof(struct memcg_cache_array) + - memcg_nr_cache_ids * sizeof(void *), - GFP_KERNEL); - if (!arr) - return -ENOMEM; - - RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr); - return 0; -} - -static void destroy_memcg_params(struct kmem_cache *s) -{ - if (is_root_cache(s)) { - kvfree(rcu_access_pointer(s->memcg_params.memcg_caches)); - } else { - mem_cgroup_put(s->memcg_params.memcg); - WRITE_ONCE(s->memcg_params.memcg, NULL); - percpu_ref_exit(&s->memcg_params.refcnt); - } -} - -static void free_memcg_params(struct rcu_head *rcu) -{ - struct memcg_cache_array *old; - - old = container_of(rcu, struct memcg_cache_array, rcu); - kvfree(old); -} - -static int update_memcg_params(struct kmem_cache *s, int new_array_size) -{ - struct memcg_cache_array *old, *new; - - new = kvzalloc(sizeof(struct memcg_cache_array) + - new_array_size * sizeof(void *), GFP_KERNEL); - if (!new) - return -ENOMEM; - - old = rcu_dereference_protected(s->memcg_params.memcg_caches, - lockdep_is_held(&slab_mutex)); - if (old) - memcpy(new->entries, old->entries, - memcg_nr_cache_ids * sizeof(void *)); - - rcu_assign_pointer(s->memcg_params.memcg_caches, new); - if (old) - call_rcu(&old->rcu, free_memcg_params); - return 0; + else + slab_init_memcg_params(s); } -int memcg_update_all_caches(int num_memcgs) +void memcg_link_cache(struct kmem_cache *s) { - struct kmem_cache *s; - int ret = 0; - - mutex_lock(&slab_mutex); - list_for_each_entry(s, &slab_root_caches, root_caches_node) { - ret = update_memcg_params(s, num_memcgs); - /* - * Instead of freeing the memory, we'll just leave the caches - * up to this point in an updated state. - */ - if (ret) - break; - } - mutex_unlock(&slab_mutex); - return ret; -} - -void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg) -{ - if (is_root_cache(s)) { + if (is_root_cache(s)) list_add(&s->root_caches_node, &slab_root_caches); - } else { - css_get(&memcg->css); - s->memcg_params.memcg = memcg; - list_add(&s->memcg_params.children_node, - &s->memcg_params.root_cache->memcg_params.children); - list_add(&s->memcg_params.kmem_caches_node, - &s->memcg_params.memcg->kmem_caches); - } } static void memcg_unlink_cache(struct kmem_cache *s) { - if (is_root_cache(s)) { + if (is_root_cache(s)) list_del(&s->root_caches_node); - } else { - list_del(&s->memcg_params.children_node); - list_del(&s->memcg_params.kmem_caches_node); - } } #else -static inline int init_memcg_params(struct kmem_cache *s, - struct kmem_cache *root_cache) -{ - return 0; -} - -static inline void destroy_memcg_params(struct kmem_cache *s) +static inline void init_memcg_params(struct kmem_cache *s, + struct kmem_cache *root_cache) { } @@ -328,14 +223,6 @@ int slab_unmergeable(struct kmem_cache *s) if (s->refcount < 0) return 1; -#ifdef CONFIG_MEMCG_KMEM - /* - * Skip the dying kmem_cache. - */ - if (s->memcg_params.dying) - return 1; -#endif - return 0; } @@ -390,7 +277,7 @@ static struct kmem_cache *create_cache(const char *name, unsigned int object_size, unsigned int align, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *), - struct mem_cgroup *memcg, struct kmem_cache *root_cache) + struct kmem_cache *root_cache) { struct kmem_cache *s; int err; @@ -410,24 +297,20 @@ static struct kmem_cache *create_cache(const char *name, s->useroffset = useroffset; s->usersize = usersize; - err = init_memcg_params(s, root_cache); - if (err) - goto out_free_cache; - + init_memcg_params(s, root_cache); err = __kmem_cache_create(s, flags); if (err) goto out_free_cache; s->refcount = 1; list_add(&s->list, &slab_caches); - memcg_link_cache(s, memcg); + memcg_link_cache(s); out: if (err) return ERR_PTR(err); return s; out_free_cache: - destroy_memcg_params(s); kmem_cache_free(kmem_cache, s); goto out; } @@ -514,7 +397,7 @@ kmem_cache_create_usercopy(const char *name, s = create_cache(cache_name, size, calculate_alignment(flags, align, size), - flags, useroffset, usersize, ctor, NULL, NULL); + flags, useroffset, usersize, ctor, NULL); if (IS_ERR(s)) { err = PTR_ERR(s); kfree_const(cache_name); @@ -639,51 +522,27 @@ static int shutdown_cache(struct kmem_cache *s) #ifdef CONFIG_MEMCG_KMEM /* - * memcg_create_kmem_cache - Create a cache for a memory cgroup. - * @memcg: The memory cgroup the new cache is for. + * memcg_create_kmem_cache - Create a cache for non-root memory cgroups. * @root_cache: The parent of the new cache. * * This function attempts to create a kmem cache that will serve allocation - * requests going from @memcg to @root_cache. The new cache inherits properties - * from its parent. + * requests going all non-root memory cgroups to @root_cache. The new cache + * inherits properties from its parent. */ -void memcg_create_kmem_cache(struct mem_cgroup *memcg, - struct kmem_cache *root_cache) +void memcg_create_kmem_cache(struct kmem_cache *root_cache) { - static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */ - struct cgroup_subsys_state *css = &memcg->css; - struct memcg_cache_array *arr; struct kmem_cache *s = NULL; char *cache_name; - int idx; get_online_cpus(); get_online_mems(); mutex_lock(&slab_mutex); - /* - * The memory cgroup could have been offlined while the cache - * creation work was pending. - */ - if (memcg->kmem_state != KMEM_ONLINE) + if (root_cache->memcg_params.memcg_cache) goto out_unlock; - idx = memcg_cache_id(memcg); - arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches, - lockdep_is_held(&slab_mutex)); - - /* - * Since per-memcg caches are created asynchronously on first - * allocation (see memcg_kmem_get_cache()), several threads can try to - * create the same cache, but only one of them may succeed. - */ - if (arr->entries[idx]) - goto out_unlock; - - cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf)); - cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name, - css->serial_nr, memcg_name_buf); + cache_name = kasprintf(GFP_KERNEL, "%s-memcg", root_cache->name); if (!cache_name) goto out_unlock; @@ -691,7 +550,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, root_cache->align, root_cache->flags & CACHE_CREATE_MASK, root_cache->useroffset, root_cache->usersize, - root_cache->ctor, memcg, root_cache); + root_cache->ctor, root_cache); /* * If we could not create a memcg cache, do not complain, because * that's not critical at all as we can always proceed with the root @@ -708,7 +567,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, * initialized. */ smp_wmb(); - arr->entries[idx] = s; + root_cache->memcg_params.memcg_cache = s; out_unlock: mutex_unlock(&slab_mutex); @@ -717,200 +576,18 @@ out_unlock: put_online_cpus(); } -static void kmemcg_workfn(struct work_struct *work) -{ - struct kmem_cache *s = container_of(work, struct kmem_cache, - memcg_params.work); - - get_online_cpus(); - get_online_mems(); - - mutex_lock(&slab_mutex); - s->memcg_params.work_fn(s); - mutex_unlock(&slab_mutex); - - put_online_mems(); - put_online_cpus(); -} - -static void kmemcg_rcufn(struct rcu_head *head) -{ - struct kmem_cache *s = container_of(head, struct kmem_cache, - memcg_params.rcu_head); - - /* - * We need to grab blocking locks. Bounce to ->work. The - * work item shares the space with the RCU head and can't be - * initialized earlier. - */ - INIT_WORK(&s->memcg_params.work, kmemcg_workfn); - queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); -} - -static void kmemcg_cache_shutdown_fn(struct kmem_cache *s) -{ - WARN_ON(shutdown_cache(s)); -} - -static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref) -{ - struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache, - memcg_params.refcnt); - unsigned long flags; - - spin_lock_irqsave(&memcg_kmem_wq_lock, flags); - if (s->memcg_params.root_cache->memcg_params.dying) - goto unlock; - - s->memcg_params.work_fn = kmemcg_cache_shutdown_fn; - INIT_WORK(&s->memcg_params.work, kmemcg_workfn); - queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); - -unlock: - spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags); -} - -static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) -{ - __kmemcg_cache_deactivate_after_rcu(s); - percpu_ref_kill(&s->memcg_params.refcnt); -} - -static void kmemcg_cache_deactivate(struct kmem_cache *s) -{ - if (WARN_ON_ONCE(is_root_cache(s))) - return; - - __kmemcg_cache_deactivate(s); - s->flags |= SLAB_DEACTIVATED; - - /* - * memcg_kmem_wq_lock is used to synchronize memcg_params.dying - * flag and make sure that no new kmem_cache deactivation tasks - * are queued (see flush_memcg_workqueue() ). - */ - spin_lock_irq(&memcg_kmem_wq_lock); - if (s->memcg_params.root_cache->memcg_params.dying) - goto unlock; - - s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu; - call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn); -unlock: - spin_unlock_irq(&memcg_kmem_wq_lock); -} - -void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg, - struct mem_cgroup *parent) -{ - int idx; - struct memcg_cache_array *arr; - struct kmem_cache *s, *c; - unsigned int nr_reparented; - - idx = memcg_cache_id(memcg); - - get_online_cpus(); - get_online_mems(); - - mutex_lock(&slab_mutex); - list_for_each_entry(s, &slab_root_caches, root_caches_node) { - arr = rcu_dereference_protected(s->memcg_params.memcg_caches, - lockdep_is_held(&slab_mutex)); - c = arr->entries[idx]; - if (!c) - continue; - - kmemcg_cache_deactivate(c); - arr->entries[idx] = NULL; - } - nr_reparented = 0; - list_for_each_entry(s, &memcg->kmem_caches, - memcg_params.kmem_caches_node) { - WRITE_ONCE(s->memcg_params.memcg, parent); - css_put(&memcg->css); - nr_reparented++; - } - if (nr_reparented) { - list_splice_init(&memcg->kmem_caches, - &parent->kmem_caches); - css_get_many(&parent->css, nr_reparented); - } - mutex_unlock(&slab_mutex); - - put_online_mems(); - put_online_cpus(); -} - static int shutdown_memcg_caches(struct kmem_cache *s) { - struct memcg_cache_array *arr; - struct kmem_cache *c, *c2; - LIST_HEAD(busy); - int i; - BUG_ON(!is_root_cache(s)); - /* - * First, shutdown active caches, i.e. caches that belong to online - * memory cgroups. - */ - arr = rcu_dereference_protected(s->memcg_params.memcg_caches, - lockdep_is_held(&slab_mutex)); - for_each_memcg_cache_index(i) { - c = arr->entries[i]; - if (!c) - continue; - if (shutdown_cache(c)) - /* - * The cache still has objects. Move it to a temporary - * list so as not to try to destroy it for a second - * time while iterating over inactive caches below. - */ - list_move(&c->memcg_params.children_node, &busy); - else - /* - * The cache is empty and will be destroyed soon. Clear - * the pointer to it in the memcg_caches array so that - * it will never be accessed even if the root cache - * stays alive. - */ - arr->entries[i] = NULL; - } - - /* - * Second, shutdown all caches left from memory cgroups that are now - * offline. - */ - list_for_each_entry_safe(c, c2, &s->memcg_params.children, - memcg_params.children_node) - shutdown_cache(c); - - list_splice(&busy, &s->memcg_params.children); + if (s->memcg_params.memcg_cache) + WARN_ON(shutdown_cache(s->memcg_params.memcg_cache)); - /* - * A cache being destroyed must be empty. In particular, this means - * that all per memcg caches attached to it must be empty too. - */ - if (!list_empty(&s->memcg_params.children)) - return -EBUSY; return 0; } -static void memcg_set_kmem_cache_dying(struct kmem_cache *s) -{ - spin_lock_irq(&memcg_kmem_wq_lock); - s->memcg_params.dying = true; - spin_unlock_irq(&memcg_kmem_wq_lock); -} - static void flush_memcg_workqueue(struct kmem_cache *s) { - /* - * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make - * sure all registered rcu callbacks have been invoked. - */ - rcu_barrier(); - /* * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB * deactivates the memcg kmem_caches through workqueue. Make sure all @@ -918,30 +595,21 @@ static void flush_memcg_workqueue(struct kmem_cache *s) */ if (likely(memcg_kmem_cache_wq)) flush_workqueue(memcg_kmem_cache_wq); - - /* - * If we're racing with children kmem_cache deactivation, it might - * take another rcu grace period to complete their destruction. - * At this moment the corresponding percpu_ref_kill() call should be - * done, but it might take another rcu grace period to complete - * switching to the atomic mode. - * Please, note that we check without grabbing the slab_mutex. It's safe - * because at this moment the children list can't grow. - */ - if (!list_empty(&s->memcg_params.children)) - rcu_barrier(); } #else static inline int shutdown_memcg_caches(struct kmem_cache *s) { return 0; } + +static inline void flush_memcg_workqueue(struct kmem_cache *s) +{ +} #endif /* CONFIG_MEMCG_KMEM */ void slab_kmem_cache_release(struct kmem_cache *s) { __kmem_cache_release(s); - destroy_memcg_params(s); kfree_const(s->name); kmem_cache_free(kmem_cache, s); } @@ -953,6 +621,8 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; + flush_memcg_workqueue(s); + get_online_cpus(); get_online_mems(); @@ -962,22 +632,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; -#ifdef CONFIG_MEMCG_KMEM - memcg_set_kmem_cache_dying(s); - - mutex_unlock(&slab_mutex); - - put_online_mems(); - put_online_cpus(); - - flush_memcg_workqueue(s); - - get_online_cpus(); - get_online_mems(); - - mutex_lock(&slab_mutex); -#endif - err = shutdown_memcg_caches(s); if (!err) err = shutdown_cache(s); @@ -1019,7 +673,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) EXPORT_SYMBOL(kmem_cache_shrink); /** - * kmem_cache_shrink_all - shrink a cache and all memcg caches for root cache + * kmem_cache_shrink_all - shrink root and memcg caches * @s: The cache pointer */ void kmem_cache_shrink_all(struct kmem_cache *s) @@ -1036,21 +690,11 @@ void kmem_cache_shrink_all(struct kmem_cache *s) kasan_cache_shrink(s); __kmem_cache_shrink(s); - /* - * We have to take the slab_mutex to protect from the memcg list - * modification. - */ - mutex_lock(&slab_mutex); - for_each_memcg_cache(c, s) { - /* - * Don't need to shrink deactivated memcg caches. - */ - if (s->flags & SLAB_DEACTIVATED) - continue; + c = memcg_cache(s); + if (c) { kasan_cache_shrink(c); __kmem_cache_shrink(c); } - mutex_unlock(&slab_mutex); put_online_mems(); put_online_cpus(); } @@ -1105,7 +749,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, create_boot_cache(s, name, size, flags, useroffset, usersize); list_add(&s->list, &slab_caches); - memcg_link_cache(s, NULL); + memcg_link_cache(s); s->refcount = 1; return s; } @@ -1483,7 +1127,8 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) if (!is_root_cache(s)) return; - for_each_memcg_cache(c, s) { + c = memcg_cache(s); + if (c) { memset(&sinfo, 0, sizeof(sinfo)); get_slabinfo(c, &sinfo); @@ -1614,7 +1259,7 @@ module_init(slab_proc_init); #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM) /* - * Display information about kmem caches that have child memcg caches. + * Display information about kmem caches that have memcg cache. */ static int memcg_slabinfo_show(struct seq_file *m, void *unused) { @@ -1626,9 +1271,9 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused) seq_puts(m, " \n"); list_for_each_entry(s, &slab_root_caches, root_caches_node) { /* - * Skip kmem caches that don't have any memcg children. + * Skip kmem caches that don't have the memcg cache. */ - if (list_empty(&s->memcg_params.children)) + if (!s->memcg_params.memcg_cache) continue; memset(&sinfo, 0, sizeof(sinfo)); @@ -1637,23 +1282,13 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused) cache_name(s), sinfo.active_objs, sinfo.num_objs, sinfo.active_slabs, sinfo.num_slabs); - for_each_memcg_cache(c, s) { - struct cgroup_subsys_state *css; - char *status = ""; - - css = &c->memcg_params.memcg->css; - if (!(css->flags & CSS_ONLINE)) - status = ":dead"; - else if (c->flags & SLAB_DEACTIVATED) - status = ":deact"; - - memset(&sinfo, 0, sizeof(sinfo)); - get_slabinfo(c, &sinfo); - seq_printf(m, "%-17s %4d%-6s %6lu %6lu %6lu %6lu\n", - cache_name(c), css->id, status, - sinfo.active_objs, sinfo.num_objs, - sinfo.active_slabs, sinfo.num_slabs); - } + c = s->memcg_params.memcg_cache; + memset(&sinfo, 0, sizeof(sinfo)); + get_slabinfo(c, &sinfo); + seq_printf(m, "%-17s %4d %6lu %6lu %6lu %6lu\n", + cache_name(c), root_mem_cgroup->css.id, + sinfo.active_objs, sinfo.num_objs, + sinfo.active_slabs, sinfo.num_slabs); } mutex_unlock(&slab_mutex); return 0; diff --git a/mm/slub.c b/mm/slub.c index 47e63b1100d4..44a48a08a691 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4204,36 +4204,6 @@ int __kmem_cache_shrink(struct kmem_cache *s) return ret; } -#ifdef CONFIG_MEMCG -void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) -{ - /* - * Called with all the locks held after a sched RCU grace period. - * Even if @s becomes empty after shrinking, we can't know that @s - * doesn't have allocations already in-flight and thus can't - * destroy @s until the associated memcg is released. - * - * However, let's remove the sysfs files for empty caches here. - * Each cache has a lot of interface files which aren't - * particularly useful for empty draining caches; otherwise, we can - * easily end up with millions of unnecessary sysfs files on - * systems which have a lot of memory and transient cgroups. - */ - if (!__kmem_cache_shrink(s)) - sysfs_slab_remove(s); -} - -void __kmemcg_cache_deactivate(struct kmem_cache *s) -{ - /* - * Disable empty slabs caching. Used to avoid pinning offline - * memory cgroups by kmem pages that can be freed. - */ - slub_set_cpu_partial(s, 0); - s->min_partial = 0; -} -#endif /* CONFIG_MEMCG */ - static int slab_mem_going_offline_callback(void *arg) { struct kmem_cache *s; @@ -4390,7 +4360,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) } slab_init_memcg_params(s); list_add(&s->list, &slab_caches); - memcg_link_cache(s, NULL); + memcg_link_cache(s); return s; } @@ -4458,7 +4428,8 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, s->object_size = max(s->object_size, size); s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); - for_each_memcg_cache(c, s) { + c = memcg_cache(s); + if (c) { c->object_size = s->object_size; c->inuse = max(c->inuse, ALIGN(size, sizeof(void *))); } @@ -5591,7 +5562,8 @@ static ssize_t slab_attr_store(struct kobject *kobj, * directly either failed or succeeded, in which case we loop * through the descendants with best-effort propagation. */ - for_each_memcg_cache(c, s) + c = memcg_cache(s); + if (c) attribute->store(c, buf, len); mutex_unlock(&slab_mutex); } -- cgit v1.2.3 From d797b7d05405c519f7b62ea69a75cea1883863b2 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:14 -0700 Subject: mm: memcg/slab: simplify memcg cache creation Because the number of non-root kmem_caches doesn't depend on the number of memory cgroups anymore and is generally not very big, there is no more need for a dedicated workqueue. Also, as there is no more need to pass any arguments to the memcg_create_kmem_cache() except the root kmem_cache, it's possible to just embed the work structure into the kmem_cache and avoid the dynamic allocation of the work structure. This will also simplify the synchronization: for each root kmem_cache there is only one work. So there will be no more concurrent attempts to create a non-root kmem_cache for a root kmem_cache: the second and all following attempts to queue the work will fail. On the kmem_cache destruction path there is no more need to call the expensive flush_workqueue() and wait for all pending works to be finished. Instead, cancel_work_sync() can be used to cancel/wait for only one work. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-14-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 1 - mm/memcontrol.c | 48 +--------------------------------------------- mm/slab.h | 2 ++ mm/slab_common.c | 22 +++++++++++---------- 4 files changed, 15 insertions(+), 58 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 11fd18b3d6c6..2ac84dcfc9e5 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1418,7 +1418,6 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); extern struct static_key_false memcg_kmem_enabled_key; -extern struct workqueue_struct *memcg_kmem_cache_wq; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 874704c4a48a..c713867e496d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -399,8 +399,6 @@ void memcg_put_cache_ids(void) */ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); EXPORT_SYMBOL(memcg_kmem_enabled_key); - -struct workqueue_struct *memcg_kmem_cache_wq; #endif static int memcg_shrinker_map_size; @@ -2902,39 +2900,6 @@ static void memcg_free_cache_id(int id) ida_simple_remove(&memcg_cache_ida, id); } -struct memcg_kmem_cache_create_work { - struct kmem_cache *cachep; - struct work_struct work; -}; - -static void memcg_kmem_cache_create_func(struct work_struct *w) -{ - struct memcg_kmem_cache_create_work *cw = - container_of(w, struct memcg_kmem_cache_create_work, work); - struct kmem_cache *cachep = cw->cachep; - - memcg_create_kmem_cache(cachep); - - kfree(cw); -} - -/* - * Enqueue the creation of a per-memcg kmem_cache. - */ -static void memcg_schedule_kmem_cache_create(struct kmem_cache *cachep) -{ - struct memcg_kmem_cache_create_work *cw; - - cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); - if (!cw) - return; - - cw->cachep = cachep; - INIT_WORK(&cw->work, memcg_kmem_cache_create_func); - - queue_work(memcg_kmem_cache_wq, &cw->work); -} - /** * memcg_kmem_get_cache: select memcg or root cache for allocation * @cachep: the original global kmem cache @@ -2951,7 +2916,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache); if (unlikely(!memcg_cachep)) { - memcg_schedule_kmem_cache_create(cachep); + queue_work(system_wq, &cachep->memcg_params.work); return cachep; } @@ -7022,17 +6987,6 @@ static int __init mem_cgroup_init(void) { int cpu, node; -#ifdef CONFIG_MEMCG_KMEM - /* - * Kmem cache creation is mostly done with the slab_mutex held, - * so use a workqueue with limited concurrency to avoid stalling - * all worker threads in case lots of cgroups are created and - * destroyed simultaneously. - */ - memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); - BUG_ON(!memcg_kmem_cache_wq); -#endif - cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); diff --git a/mm/slab.h b/mm/slab.h index e716b80befc2..fd9fcdfb3789 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -45,12 +45,14 @@ struct kmem_cache { * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory * cgroups. * @root_caches_node: list node for slab_root_caches list. + * @work: work struct used to create the non-root cache. */ struct memcg_cache_params { struct kmem_cache *root_cache; struct kmem_cache *memcg_cache; struct list_head __root_caches_node; + struct work_struct work; }; #endif /* CONFIG_SLOB */ diff --git a/mm/slab_common.c b/mm/slab_common.c index e752132eb64d..b898698f6c8a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -134,10 +134,18 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, LIST_HEAD(slab_root_caches); +static void memcg_kmem_cache_create_func(struct work_struct *work) +{ + struct kmem_cache *cachep = container_of(work, struct kmem_cache, + memcg_params.work); + memcg_create_kmem_cache(cachep); +} + void slab_init_memcg_params(struct kmem_cache *s) { s->memcg_params.root_cache = NULL; s->memcg_params.memcg_cache = NULL; + INIT_WORK(&s->memcg_params.work, memcg_kmem_cache_create_func); } static void init_memcg_params(struct kmem_cache *s, @@ -586,15 +594,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static void flush_memcg_workqueue(struct kmem_cache *s) +static void cancel_memcg_cache_creation(struct kmem_cache *s) { - /* - * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB - * deactivates the memcg kmem_caches through workqueue. Make sure all - * previous workitems on workqueue are processed. - */ - if (likely(memcg_kmem_cache_wq)) - flush_workqueue(memcg_kmem_cache_wq); + cancel_work_sync(&s->memcg_params.work); } #else static inline int shutdown_memcg_caches(struct kmem_cache *s) @@ -602,7 +604,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static inline void flush_memcg_workqueue(struct kmem_cache *s) +static inline void cancel_memcg_cache_creation(struct kmem_cache *s) { } #endif /* CONFIG_MEMCG_KMEM */ @@ -621,7 +623,7 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; - flush_memcg_workqueue(s); + cancel_memcg_cache_creation(s); get_online_cpus(); get_online_mems(); -- cgit v1.2.3 From 272911a4ad18c48f8bc449a5db945a54987dd687 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:17 -0700 Subject: mm: memcg/slab: remove memcg_kmem_get_cache() The memcg_kmem_get_cache() function became really trivial, so let's just inline it into the single call point: memcg_slab_pre_alloc_hook(). It will make the code less bulky and can also help the compiler to generate a better code. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Link: http://lkml.kernel.org/r/20200623174037.3951353-15-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 -- mm/memcontrol.c | 25 +------------------------ mm/slab.h | 11 +++++++++-- mm/slab_common.c | 2 +- 4 files changed, 11 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 2ac84dcfc9e5..5a8b62d075e6 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1403,8 +1403,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, } #endif -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); - #ifdef CONFIG_MEMCG_KMEM int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, unsigned int nr_pages); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c713867e496d..a8113b77b23a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -393,7 +393,7 @@ void memcg_put_cache_ids(void) /* * A lot of the calls to the cache allocation functions are expected to be - * inlined by the compiler. Since the calls to memcg_kmem_get_cache are + * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are * conditional to this static branch, we'll have to allow modules that does * kmem_cache_alloc and the such to see this symbol as well */ @@ -2900,29 +2900,6 @@ static void memcg_free_cache_id(int id) ida_simple_remove(&memcg_cache_ida, id); } -/** - * memcg_kmem_get_cache: select memcg or root cache for allocation - * @cachep: the original global kmem cache - * - * Return the kmem_cache we're supposed to use for a slab allocation. - * - * If the cache does not exist yet, if we are the first user of it, we - * create it asynchronously in a workqueue and let the current allocation - * go through with the original cache. - */ -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) -{ - struct kmem_cache *memcg_cachep; - - memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache); - if (unlikely(!memcg_cachep)) { - queue_work(system_wq, &cachep->memcg_params.work); - return cachep; - } - - return memcg_cachep; -} - /** * __memcg_kmem_charge: charge a number of kernel pages to a memcg * @memcg: memory cgroup to charge diff --git a/mm/slab.h b/mm/slab.h index fd9fcdfb3789..342eac852967 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -365,9 +365,16 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, if (memcg_kmem_bypass()) return s; - cachep = memcg_kmem_get_cache(s); - if (is_root_cache(cachep)) + cachep = READ_ONCE(s->memcg_params.memcg_cache); + if (unlikely(!cachep)) { + /* + * If memcg cache does not exist yet, we schedule it's + * asynchronous creation and let the current allocation + * go through with the root cache. + */ + queue_work(system_wq, &s->memcg_params.work); return s; + } objcg = get_obj_cgroup_from_current(); if (!objcg) diff --git a/mm/slab_common.c b/mm/slab_common.c index b898698f6c8a..de0a46cf974a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -570,7 +570,7 @@ void memcg_create_kmem_cache(struct kmem_cache *root_cache) } /* - * Since readers won't lock (see memcg_kmem_get_cache()), we need a + * Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a * barrier here to ensure nobody will see the kmem_cache partially * initialized. */ -- cgit v1.2.3 From 10befea91b61c4e2c2d1df06a2e978d182fcf792 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:27 -0700 Subject: mm: memcg/slab: use a single set of kmem_caches for all allocations Instead of having two sets of kmem_caches: one for system-wide and non-accounted allocations and the second one shared by all accounted allocations, we can use just one. The idea is simple: space for obj_cgroup metadata can be allocated on demand and filled only for accounted allocations. It allows to remove a bunch of code which is required to handle kmem_cache clones for accounted allocations. There is no more need to create them, accumulate statistics, propagate attributes, etc. It's a quite significant simplification. Also, because the total number of slab_caches is reduced almost twice (not all kmem_caches have a memcg clone), some additional memory savings are expected. On my devvm it additionally saves about 3.5% of slab memory. [guro@fb.com: fix build on MIPS] Link: http://lkml.kernel.org/r/20200717214810.3733082-1-guro@fb.com Suggested-by: Johannes Weiner Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: Christoph Lameter Cc: Michal Hocko Cc: Tejun Heo Cc: Naresh Kamboju Link: http://lkml.kernel.org/r/20200623174037.3951353-18-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/slab.h | 2 - include/linux/slab_def.h | 3 - include/linux/slub_def.h | 10 --- mm/memcontrol.c | 25 +++++- mm/slab.c | 41 +-------- mm/slab.h | 194 +++++++++------------------------------ mm/slab_common.c | 230 ++--------------------------------------------- mm/slub.c | 163 +-------------------------------- 8 files changed, 78 insertions(+), 590 deletions(-) (limited to 'include') diff --git a/include/linux/slab.h b/include/linux/slab.h index 8b1f91e320f9..24df2393ec03 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -155,8 +155,6 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); -void memcg_create_kmem_cache(struct kmem_cache *cachep); - /* * Please use this macro to create slab caches. Simply specify the * name of the structure and maybe some flags that are listed above. diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index ccda7b9669a5..9eb430c163c2 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -72,9 +72,6 @@ struct kmem_cache { int obj_offset; #endif /* CONFIG_DEBUG_SLAB */ -#ifdef CONFIG_MEMCG - struct memcg_cache_params memcg_params; -#endif #ifdef CONFIG_KASAN struct kasan_cache kasan_info; #endif diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f87302dcfe8c..1be0ed5befa1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -108,17 +108,7 @@ struct kmem_cache { struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ - struct work_struct kobj_remove_work; #endif -#ifdef CONFIG_MEMCG - struct memcg_cache_params memcg_params; - /* For propagation, maximum size of a stored attr */ - unsigned int max_attr_size; -#ifdef CONFIG_SYSFS - struct kset *memcg_kset; -#endif -#endif - #ifdef CONFIG_SLAB_FREELIST_HARDENED unsigned long random; #endif diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a8113b77b23a..473f9b91d51f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2800,6 +2800,26 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg) } #ifdef CONFIG_MEMCG_KMEM +int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, + gfp_t gfp) +{ + unsigned int objects = objs_per_slab_page(s, page); + void *vec; + + vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, + page_to_nid(page)); + if (!vec) + return -ENOMEM; + + if (cmpxchg(&page->obj_cgroups, NULL, + (struct obj_cgroup **) ((unsigned long)vec | 0x1UL))) + kfree(vec); + else + kmemleak_not_leak(vec); + + return 0; +} + /* * Returns a pointer to the memory cgroup to which the kernel object is charged. * @@ -2826,7 +2846,10 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p) off = obj_to_index(page->slab_cache, page, p); objcg = page_obj_cgroups(page)[off]; - return obj_cgroup_memcg(objcg); + if (objcg) + return obj_cgroup_memcg(objcg); + + return NULL; } /* All other pages use page->mem_cgroup */ diff --git a/mm/slab.c b/mm/slab.c index f40e5c95e11a..684ebe5b0c7a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1379,11 +1379,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, return NULL; } - if (charge_slab_page(page, flags, cachep->gfporder, cachep)) { - __free_pages(page, cachep->gfporder); - return NULL; - } - + charge_slab_page(page, flags, cachep->gfporder, cachep); __SetPageSlab(page); /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ if (sk_memalloc_socks() && page_is_pfmemalloc(page)) @@ -3799,8 +3795,8 @@ fail: } /* Always called with the slab_mutex held */ -static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, - int batchcount, int shared, gfp_t gfp) +static int do_tune_cpucache(struct kmem_cache *cachep, int limit, + int batchcount, int shared, gfp_t gfp) { struct array_cache __percpu *cpu_cache, *prev; int cpu; @@ -3845,30 +3841,6 @@ setup_node: return setup_kmem_cache_nodes(cachep, gfp); } -static int do_tune_cpucache(struct kmem_cache *cachep, int limit, - int batchcount, int shared, gfp_t gfp) -{ - int ret; - struct kmem_cache *c; - - ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); - - if (slab_state < FULL) - return ret; - - if ((ret < 0) || !is_root_cache(cachep)) - return ret; - - lockdep_assert_held(&slab_mutex); - c = memcg_cache(cachep); - if (c) { - /* return value determined by the root cache only */ - __do_tune_cpucache(c, limit, batchcount, shared, gfp); - } - - return ret; -} - /* Called with slab_mutex held always */ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) { @@ -3881,13 +3853,6 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) if (err) goto end; - if (!is_root_cache(cachep)) { - struct kmem_cache *root = memcg_root_cache(cachep); - limit = root->limit; - shared = root->shared; - batchcount = root->batchcount; - } - if (limit && shared && batchcount) goto skip_setup; /* diff --git a/mm/slab.h b/mm/slab.h index 7500a707121b..ec8e22ee6544 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -30,28 +30,6 @@ struct kmem_cache { struct list_head list; /* List of all slab caches on the system */ }; -#else /* !CONFIG_SLOB */ - -/* - * This is the main placeholder for memcg-related information in kmem caches. - * Both the root cache and the child cache will have it. Some fields are used - * in both cases, other are specific to root caches. - * - * @root_cache: Common to root and child caches. NULL for root, pointer to - * the root cache for children. - * - * The following fields are specific to root caches. - * - * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory - * cgroups. - * @work: work struct used to create the non-root cache. - */ -struct memcg_cache_params { - struct kmem_cache *root_cache; - - struct kmem_cache *memcg_cache; - struct work_struct work; -}; #endif /* CONFIG_SLOB */ #ifdef CONFIG_SLAB @@ -196,7 +174,6 @@ int __kmem_cache_shutdown(struct kmem_cache *); void __kmem_cache_release(struct kmem_cache *); int __kmem_cache_shrink(struct kmem_cache *); void slab_kmem_cache_release(struct kmem_cache *); -void kmem_cache_shrink_all(struct kmem_cache *s); struct seq_file; struct file; @@ -263,43 +240,6 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla } #ifdef CONFIG_MEMCG_KMEM -static inline bool is_root_cache(struct kmem_cache *s) -{ - return !s->memcg_params.root_cache; -} - -static inline bool slab_equal_or_root(struct kmem_cache *s, - struct kmem_cache *p) -{ - return p == s || p == s->memcg_params.root_cache; -} - -/* - * We use suffixes to the name in memcg because we can't have caches - * created in the system with the same name. But when we print them - * locally, better refer to them with the base name - */ -static inline const char *cache_name(struct kmem_cache *s) -{ - if (!is_root_cache(s)) - s = s->memcg_params.root_cache; - return s->name; -} - -static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) -{ - if (is_root_cache(s)) - return s; - return s->memcg_params.root_cache; -} - -static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) -{ - if (is_root_cache(s)) - return s->memcg_params.memcg_cache; - return NULL; -} - static inline struct obj_cgroup **page_obj_cgroups(struct page *page) { /* @@ -317,21 +257,8 @@ static inline bool page_has_obj_cgroups(struct page *page) return ((unsigned long)page->obj_cgroups & 0x1UL); } -static inline int memcg_alloc_page_obj_cgroups(struct page *page, - struct kmem_cache *s, gfp_t gfp) -{ - unsigned int objects = objs_per_slab_page(s, page); - void *vec; - - vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, - page_to_nid(page)); - if (!vec) - return -ENOMEM; - - kmemleak_not_leak(vec); - page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL); - return 0; -} +int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, + gfp_t gfp); static inline void memcg_free_page_obj_cgroups(struct page *page) { @@ -348,38 +275,25 @@ static inline size_t obj_full_size(struct kmem_cache *s) return s->size + sizeof(struct obj_cgroup *); } -static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, - struct obj_cgroup **objcgp, - size_t objects, gfp_t flags) +static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, + size_t objects, + gfp_t flags) { - struct kmem_cache *cachep; struct obj_cgroup *objcg; if (memcg_kmem_bypass()) - return s; - - cachep = READ_ONCE(s->memcg_params.memcg_cache); - if (unlikely(!cachep)) { - /* - * If memcg cache does not exist yet, we schedule it's - * asynchronous creation and let the current allocation - * go through with the root cache. - */ - queue_work(system_wq, &s->memcg_params.work); - return s; - } + return NULL; objcg = get_obj_cgroup_from_current(); if (!objcg) - return s; + return NULL; if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { obj_cgroup_put(objcg); - cachep = NULL; + return NULL; } - *objcgp = objcg; - return cachep; + return objcg; } static inline void mod_objcg_state(struct obj_cgroup *objcg, @@ -398,15 +312,27 @@ static inline void mod_objcg_state(struct obj_cgroup *objcg, static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, - size_t size, void **p) + gfp_t flags, size_t size, + void **p) { struct page *page; unsigned long off; size_t i; + if (!objcg) + return; + + flags &= ~__GFP_ACCOUNT; for (i = 0; i < size; i++) { if (likely(p[i])) { page = virt_to_head_page(p[i]); + + if (!page_has_obj_cgroups(page) && + memcg_alloc_page_obj_cgroups(page, s, flags)) { + obj_cgroup_uncharge(objcg, obj_full_size(s)); + continue; + } + off = obj_to_index(s, page, p[i]); obj_cgroup_get(objcg); page_obj_cgroups(page)[off] = objcg; @@ -425,13 +351,19 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, struct obj_cgroup *objcg; unsigned int off; - if (!memcg_kmem_enabled() || is_root_cache(s)) + if (!memcg_kmem_enabled()) + return; + + if (!page_has_obj_cgroups(page)) return; off = obj_to_index(s, page, p); objcg = page_obj_cgroups(page)[off]; page_obj_cgroups(page)[off] = NULL; + if (!objcg) + return; + obj_cgroup_uncharge(objcg, obj_full_size(s)); mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s), -obj_full_size(s)); @@ -439,35 +371,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, obj_cgroup_put(objcg); } -extern void slab_init_memcg_params(struct kmem_cache *); - #else /* CONFIG_MEMCG_KMEM */ -static inline bool is_root_cache(struct kmem_cache *s) -{ - return true; -} - -static inline bool slab_equal_or_root(struct kmem_cache *s, - struct kmem_cache *p) -{ - return s == p; -} - -static inline const char *cache_name(struct kmem_cache *s) -{ - return s->name; -} - -static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) -{ - return s; -} - -static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) -{ - return NULL; -} - static inline bool page_has_obj_cgroups(struct page *page) { return false; @@ -488,16 +392,17 @@ static inline void memcg_free_page_obj_cgroups(struct page *page) { } -static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, - struct obj_cgroup **objcgp, - size_t objects, gfp_t flags) +static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, + size_t objects, + gfp_t flags) { return NULL; } static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, - size_t size, void **p) + gfp_t flags, size_t size, + void **p) { } @@ -505,11 +410,6 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, void *p) { } - -static inline void slab_init_memcg_params(struct kmem_cache *s) -{ -} - #endif /* CONFIG_MEMCG_KMEM */ static inline struct kmem_cache *virt_to_cache(const void *obj) @@ -523,27 +423,18 @@ static inline struct kmem_cache *virt_to_cache(const void *obj) return page->slab_cache; } -static __always_inline int charge_slab_page(struct page *page, - gfp_t gfp, int order, - struct kmem_cache *s) +static __always_inline void charge_slab_page(struct page *page, + gfp_t gfp, int order, + struct kmem_cache *s) { - if (memcg_kmem_enabled() && !is_root_cache(s)) { - int ret; - - ret = memcg_alloc_page_obj_cgroups(page, s, gfp); - if (ret) - return ret; - } - mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), PAGE_SIZE << order); - return 0; } static __always_inline void uncharge_slab_page(struct page *page, int order, struct kmem_cache *s) { - if (memcg_kmem_enabled() && !is_root_cache(s)) + if (memcg_kmem_enabled()) memcg_free_page_obj_cgroups(page); mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), @@ -555,12 +446,11 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) struct kmem_cache *cachep; if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && - !memcg_kmem_enabled() && !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) return s; cachep = virt_to_cache(x); - if (WARN(cachep && !slab_equal_or_root(cachep, s), + if (WARN(cachep && cachep != s, "%s: Wrong slab cache. %s but object is from %s\n", __func__, s->name, cachep->name)) print_tracking(cachep, x); @@ -613,7 +503,7 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, if (memcg_kmem_enabled() && ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) - return memcg_slab_pre_alloc_hook(s, objcgp, size, flags); + *objcgp = memcg_slab_pre_alloc_hook(s, size, flags); return s; } @@ -632,8 +522,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, s->flags, flags); } - if (memcg_kmem_enabled() && !is_root_cache(s)) - memcg_slab_post_alloc_hook(s, objcg, size, p); + if (memcg_kmem_enabled()) + memcg_slab_post_alloc_hook(s, objcg, flags, size, p); } #ifndef CONFIG_SLOB diff --git a/mm/slab_common.c b/mm/slab_common.c index ad67a03c592a..a513f3237155 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -130,36 +130,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, return i; } -#ifdef CONFIG_MEMCG_KMEM -static void memcg_kmem_cache_create_func(struct work_struct *work) -{ - struct kmem_cache *cachep = container_of(work, struct kmem_cache, - memcg_params.work); - memcg_create_kmem_cache(cachep); -} - -void slab_init_memcg_params(struct kmem_cache *s) -{ - s->memcg_params.root_cache = NULL; - s->memcg_params.memcg_cache = NULL; - INIT_WORK(&s->memcg_params.work, memcg_kmem_cache_create_func); -} - -static void init_memcg_params(struct kmem_cache *s, - struct kmem_cache *root_cache) -{ - if (root_cache) - s->memcg_params.root_cache = root_cache; - else - slab_init_memcg_params(s); -} -#else -static inline void init_memcg_params(struct kmem_cache *s, - struct kmem_cache *root_cache) -{ -} -#endif /* CONFIG_MEMCG_KMEM */ - /* * Figure out what the alignment of the objects will be given a set of * flags, a user specified alignment and the size of the objects. @@ -197,9 +167,6 @@ int slab_unmergeable(struct kmem_cache *s) if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) return 1; - if (!is_root_cache(s)) - return 1; - if (s->ctor) return 1; @@ -286,7 +253,6 @@ static struct kmem_cache *create_cache(const char *name, s->useroffset = useroffset; s->usersize = usersize; - init_memcg_params(s, root_cache); err = __kmem_cache_create(s, flags); if (err) goto out_free_cache; @@ -344,7 +310,6 @@ kmem_cache_create_usercopy(const char *name, get_online_cpus(); get_online_mems(); - memcg_get_cache_ids(); mutex_lock(&slab_mutex); @@ -394,7 +359,6 @@ kmem_cache_create_usercopy(const char *name, out_unlock: mutex_unlock(&slab_mutex); - memcg_put_cache_ids(); put_online_mems(); put_online_cpus(); @@ -507,87 +471,6 @@ static int shutdown_cache(struct kmem_cache *s) return 0; } -#ifdef CONFIG_MEMCG_KMEM -/* - * memcg_create_kmem_cache - Create a cache for non-root memory cgroups. - * @root_cache: The parent of the new cache. - * - * This function attempts to create a kmem cache that will serve allocation - * requests going all non-root memory cgroups to @root_cache. The new cache - * inherits properties from its parent. - */ -void memcg_create_kmem_cache(struct kmem_cache *root_cache) -{ - struct kmem_cache *s = NULL; - char *cache_name; - - get_online_cpus(); - get_online_mems(); - - mutex_lock(&slab_mutex); - - if (root_cache->memcg_params.memcg_cache) - goto out_unlock; - - cache_name = kasprintf(GFP_KERNEL, "%s-memcg", root_cache->name); - if (!cache_name) - goto out_unlock; - - s = create_cache(cache_name, root_cache->object_size, - root_cache->align, - root_cache->flags & CACHE_CREATE_MASK, - root_cache->useroffset, root_cache->usersize, - root_cache->ctor, root_cache); - /* - * If we could not create a memcg cache, do not complain, because - * that's not critical at all as we can always proceed with the root - * cache. - */ - if (IS_ERR(s)) { - kfree(cache_name); - goto out_unlock; - } - - /* - * Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a - * barrier here to ensure nobody will see the kmem_cache partially - * initialized. - */ - smp_wmb(); - root_cache->memcg_params.memcg_cache = s; - -out_unlock: - mutex_unlock(&slab_mutex); - - put_online_mems(); - put_online_cpus(); -} - -static int shutdown_memcg_caches(struct kmem_cache *s) -{ - BUG_ON(!is_root_cache(s)); - - if (s->memcg_params.memcg_cache) - WARN_ON(shutdown_cache(s->memcg_params.memcg_cache)); - - return 0; -} - -static void cancel_memcg_cache_creation(struct kmem_cache *s) -{ - cancel_work_sync(&s->memcg_params.work); -} -#else -static inline int shutdown_memcg_caches(struct kmem_cache *s) -{ - return 0; -} - -static inline void cancel_memcg_cache_creation(struct kmem_cache *s) -{ -} -#endif /* CONFIG_MEMCG_KMEM */ - void slab_kmem_cache_release(struct kmem_cache *s) { __kmem_cache_release(s); @@ -602,8 +485,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; - cancel_memcg_cache_creation(s); - get_online_cpus(); get_online_mems(); @@ -613,10 +494,7 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; - err = shutdown_memcg_caches(s); - if (!err) - err = shutdown_cache(s); - + err = shutdown_cache(s); if (err) { pr_err("kmem_cache_destroy %s: Slab cache still has objects\n", s->name); @@ -653,33 +531,6 @@ int kmem_cache_shrink(struct kmem_cache *cachep) } EXPORT_SYMBOL(kmem_cache_shrink); -/** - * kmem_cache_shrink_all - shrink root and memcg caches - * @s: The cache pointer - */ -void kmem_cache_shrink_all(struct kmem_cache *s) -{ - struct kmem_cache *c; - - if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) { - kmem_cache_shrink(s); - return; - } - - get_online_cpus(); - get_online_mems(); - kasan_cache_shrink(s); - __kmem_cache_shrink(s); - - c = memcg_cache(s); - if (c) { - kasan_cache_shrink(c); - __kmem_cache_shrink(c); - } - put_online_mems(); - put_online_cpus(); -} - bool slab_is_available(void) { return slab_state >= UP; @@ -708,8 +559,6 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, s->useroffset = useroffset; s->usersize = usersize; - slab_init_memcg_params(s); - err = __kmem_cache_create(s, flags); if (err) @@ -1098,25 +947,6 @@ void slab_stop(struct seq_file *m, void *p) mutex_unlock(&slab_mutex); } -static void -memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) -{ - struct kmem_cache *c; - struct slabinfo sinfo; - - c = memcg_cache(s); - if (c) { - memset(&sinfo, 0, sizeof(sinfo)); - get_slabinfo(c, &sinfo); - - info->active_slabs += sinfo.active_slabs; - info->num_slabs += sinfo.num_slabs; - info->shared_avail += sinfo.shared_avail; - info->active_objs += sinfo.active_objs; - info->num_objs += sinfo.num_objs; - } -} - static void cache_show(struct kmem_cache *s, struct seq_file *m) { struct slabinfo sinfo; @@ -1124,10 +954,8 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m) memset(&sinfo, 0, sizeof(sinfo)); get_slabinfo(s, &sinfo); - memcg_accumulate_slabinfo(s, &sinfo); - seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", - cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, + s->name, sinfo.active_objs, sinfo.num_objs, s->size, sinfo.objects_per_slab, (1 << sinfo.cache_order)); seq_printf(m, " : tunables %4u %4u %4u", @@ -1144,8 +972,7 @@ static int slab_show(struct seq_file *m, void *p) if (p == slab_caches.next) print_slabinfo_header(m); - if (is_root_cache(s)) - cache_show(s, m); + cache_show(s, m); return 0; } @@ -1170,13 +997,13 @@ void dump_unreclaimable_slab(void) pr_info("Name Used Total\n"); list_for_each_entry_safe(s, s2, &slab_caches, list) { - if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT)) + if (s->flags & SLAB_RECLAIM_ACCOUNT) continue; get_slabinfo(s, &sinfo); if (sinfo.num_objs > 0) - pr_info("%-17s %10luKB %10luKB\n", cache_name(s), + pr_info("%-17s %10luKB %10luKB\n", s->name, (sinfo.active_objs * s->size) / 1024, (sinfo.num_objs * s->size) / 1024); } @@ -1235,53 +1062,6 @@ static int __init slab_proc_init(void) } module_init(slab_proc_init); -#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM) -/* - * Display information about kmem caches that have memcg cache. - */ -static int memcg_slabinfo_show(struct seq_file *m, void *unused) -{ - struct kmem_cache *s, *c; - struct slabinfo sinfo; - - mutex_lock(&slab_mutex); - seq_puts(m, "# "); - seq_puts(m, " \n"); - list_for_each_entry(s, &slab_caches, list) { - /* - * Skip kmem caches that don't have the memcg cache. - */ - if (!s->memcg_params.memcg_cache) - continue; - - memset(&sinfo, 0, sizeof(sinfo)); - get_slabinfo(s, &sinfo); - seq_printf(m, "%-17s root %6lu %6lu %6lu %6lu\n", - cache_name(s), sinfo.active_objs, sinfo.num_objs, - sinfo.active_slabs, sinfo.num_slabs); - - c = s->memcg_params.memcg_cache; - memset(&sinfo, 0, sizeof(sinfo)); - get_slabinfo(c, &sinfo); - seq_printf(m, "%-17s %4d %6lu %6lu %6lu %6lu\n", - cache_name(c), root_mem_cgroup->css.id, - sinfo.active_objs, sinfo.num_objs, - sinfo.active_slabs, sinfo.num_slabs); - } - mutex_unlock(&slab_mutex); - return 0; -} -DEFINE_SHOW_ATTRIBUTE(memcg_slabinfo); - -static int __init memcg_slabinfo_init(void) -{ - debugfs_create_file("memcg_slabinfo", S_IFREG | S_IRUGO, - NULL, NULL, &memcg_slabinfo_fops); - return 0; -} - -late_initcall(memcg_slabinfo_init); -#endif /* CONFIG_DEBUG_FS && CONFIG_MEMCG_KMEM */ #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */ static __always_inline void *__do_krealloc(const void *p, size_t new_size, diff --git a/mm/slub.c b/mm/slub.c index 9cd724fe37d8..eba8f57d5734 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -218,14 +218,10 @@ enum track_item { TRACK_ALLOC, TRACK_FREE }; #ifdef CONFIG_SYSFS static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); -static void memcg_propagate_slab_attrs(struct kmem_cache *s); -static void sysfs_slab_remove(struct kmem_cache *s); #else static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } -static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } -static inline void sysfs_slab_remove(struct kmem_cache *s) { } #endif static inline void stat(const struct kmem_cache *s, enum stat_item si) @@ -1624,10 +1620,8 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, else page = __alloc_pages_node(node, flags, order); - if (page && charge_slab_page(page, flags, order, s)) { - __free_pages(page, order); - page = NULL; - } + if (page) + charge_slab_page(page, flags, order, s); return page; } @@ -3920,7 +3914,6 @@ int __kmem_cache_shutdown(struct kmem_cache *s) if (n->nr_partial || slabs_node(s, node)) return 1; } - sysfs_slab_remove(s); return 0; } @@ -4358,7 +4351,6 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) p->slab_cache = s; #endif } - slab_init_memcg_params(s); list_add(&s->list, &slab_caches); return s; } @@ -4414,7 +4406,7 @@ struct kmem_cache * __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)) { - struct kmem_cache *s, *c; + struct kmem_cache *s; s = find_mergeable(size, align, flags, name, ctor); if (s) { @@ -4427,12 +4419,6 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, s->object_size = max(s->object_size, size); s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); - c = memcg_cache(s); - if (c) { - c->object_size = s->object_size; - c->inuse = max(c->inuse, ALIGN(size, sizeof(void *))); - } - if (sysfs_slab_alias(s, name)) { s->refcount--; s = NULL; @@ -4454,7 +4440,6 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) if (slab_state <= UP) return 0; - memcg_propagate_slab_attrs(s); err = sysfs_slab_add(s); if (err) __kmem_cache_release(s); @@ -5312,7 +5297,7 @@ static ssize_t shrink_store(struct kmem_cache *s, const char *buf, size_t length) { if (buf[0] == '1') - kmem_cache_shrink_all(s); + kmem_cache_shrink(s); else return -EINVAL; return length; @@ -5536,99 +5521,9 @@ static ssize_t slab_attr_store(struct kobject *kobj, return -EIO; err = attribute->store(s, buf, len); -#ifdef CONFIG_MEMCG - if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { - struct kmem_cache *c; - - mutex_lock(&slab_mutex); - if (s->max_attr_size < len) - s->max_attr_size = len; - - /* - * This is a best effort propagation, so this function's return - * value will be determined by the parent cache only. This is - * basically because not all attributes will have a well - * defined semantics for rollbacks - most of the actions will - * have permanent effects. - * - * Returning the error value of any of the children that fail - * is not 100 % defined, in the sense that users seeing the - * error code won't be able to know anything about the state of - * the cache. - * - * Only returning the error code for the parent cache at least - * has well defined semantics. The cache being written to - * directly either failed or succeeded, in which case we loop - * through the descendants with best-effort propagation. - */ - c = memcg_cache(s); - if (c) - attribute->store(c, buf, len); - mutex_unlock(&slab_mutex); - } -#endif return err; } -static void memcg_propagate_slab_attrs(struct kmem_cache *s) -{ -#ifdef CONFIG_MEMCG - int i; - char *buffer = NULL; - struct kmem_cache *root_cache; - - if (is_root_cache(s)) - return; - - root_cache = s->memcg_params.root_cache; - - /* - * This mean this cache had no attribute written. Therefore, no point - * in copying default values around - */ - if (!root_cache->max_attr_size) - return; - - for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) { - char mbuf[64]; - char *buf; - struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); - ssize_t len; - - if (!attr || !attr->store || !attr->show) - continue; - - /* - * It is really bad that we have to allocate here, so we will - * do it only as a fallback. If we actually allocate, though, - * we can just use the allocated buffer until the end. - * - * Most of the slub attributes will tend to be very small in - * size, but sysfs allows buffers up to a page, so they can - * theoretically happen. - */ - if (buffer) - buf = buffer; - else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) && - !IS_ENABLED(CONFIG_SLUB_STATS)) - buf = mbuf; - else { - buffer = (char *) get_zeroed_page(GFP_KERNEL); - if (WARN_ON(!buffer)) - continue; - buf = buffer; - } - - len = attr->show(root_cache, buf); - if (len > 0) - attr->store(s, buf, len); - } - - if (buffer) - free_page((unsigned long)buffer); -#endif /* CONFIG_MEMCG */ -} - static void kmem_cache_release(struct kobject *k) { slab_kmem_cache_release(to_slab(k)); @@ -5648,10 +5543,6 @@ static struct kset *slab_kset; static inline struct kset *cache_kset(struct kmem_cache *s) { -#ifdef CONFIG_MEMCG - if (!is_root_cache(s)) - return s->memcg_params.root_cache->memcg_kset; -#endif return slab_kset; } @@ -5694,27 +5585,6 @@ static char *create_unique_id(struct kmem_cache *s) return name; } -static void sysfs_slab_remove_workfn(struct work_struct *work) -{ - struct kmem_cache *s = - container_of(work, struct kmem_cache, kobj_remove_work); - - if (!s->kobj.state_in_sysfs) - /* - * For a memcg cache, this may be called during - * deactivation and again on shutdown. Remove only once. - * A cache is never shut down before deactivation is - * complete, so no need to worry about synchronization. - */ - goto out; - -#ifdef CONFIG_MEMCG - kset_unregister(s->memcg_kset); -#endif -out: - kobject_put(&s->kobj); -} - static int sysfs_slab_add(struct kmem_cache *s) { int err; @@ -5722,8 +5592,6 @@ static int sysfs_slab_add(struct kmem_cache *s) struct kset *kset = cache_kset(s); int unmergeable = slab_unmergeable(s); - INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn); - if (!kset) { kobject_init(&s->kobj, &slab_ktype); return 0; @@ -5760,16 +5628,6 @@ static int sysfs_slab_add(struct kmem_cache *s) if (err) goto out_del_kobj; -#ifdef CONFIG_MEMCG - if (is_root_cache(s) && memcg_sysfs_enabled) { - s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); - if (!s->memcg_kset) { - err = -ENOMEM; - goto out_del_kobj; - } - } -#endif - if (!unmergeable) { /* Setup first alias */ sysfs_slab_alias(s, s->name); @@ -5783,19 +5641,6 @@ out_del_kobj: goto out; } -static void sysfs_slab_remove(struct kmem_cache *s) -{ - if (slab_state < FULL) - /* - * Sysfs has not been setup yet so no need to remove the - * cache from sysfs. - */ - return; - - kobject_get(&s->kobj); - schedule_work(&s->kobj_remove_work); -} - void sysfs_slab_unlink(struct kmem_cache *s) { if (slab_state >= FULL) -- cgit v1.2.3 From 991e7673859ed41e7ba83c8c4e57afe8cfebe314 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 6 Aug 2020 23:21:37 -0700 Subject: mm: memcontrol: account kernel stack per node Currently the kernel stack is being accounted per-zone. There is no need to do that. In addition due to being per-zone, memcg has to keep a separate MEMCG_KERNEL_STACK_KB. Make the stat per-node and deprecate MEMCG_KERNEL_STACK_KB as memcg_stat_item is an extension of node_stat_item. In addition localize the kernel stack stats updates to account_kernel_stack(). Signed-off-by: Shakeel Butt Signed-off-by: Andrew Morton Reviewed-by: Roman Gushchin Cc: Johannes Weiner Cc: Michal Hocko Link: http://lkml.kernel.org/r/20200630161539.1759185-1-shakeelb@google.com Signed-off-by: Linus Torvalds --- drivers/base/node.c | 4 ++-- fs/proc/meminfo.c | 4 ++-- include/linux/memcontrol.h | 21 +++++++++++++++++-- include/linux/mmzone.h | 8 ++++---- kernel/fork.c | 51 +++++++++++++--------------------------------- kernel/scs.c | 2 +- mm/memcontrol.c | 2 +- mm/page_alloc.c | 16 +++++++-------- mm/vmstat.c | 8 ++++---- 9 files changed, 55 insertions(+), 61 deletions(-) (limited to 'include') diff --git a/drivers/base/node.c b/drivers/base/node.c index 0cf13e31603c..508b80f6329b 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -440,9 +440,9 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), nid, K(i.sharedram), - nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB), + nid, node_page_state(pgdat, NR_KERNEL_STACK_KB), #ifdef CONFIG_SHADOW_CALL_STACK - nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_KB), + nid, node_page_state(pgdat, NR_KERNEL_SCS_KB), #endif nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), nid, 0UL, diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 38ea95fd919a..2a4c58f70fb9 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -101,10 +101,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "SReclaimable: ", sreclaimable); show_val_kb(m, "SUnreclaim: ", sunreclaim); seq_printf(m, "KernelStack: %8lu kB\n", - global_zone_page_state(NR_KERNEL_STACK_KB)); + global_node_page_state(NR_KERNEL_STACK_KB)); #ifdef CONFIG_SHADOW_CALL_STACK seq_printf(m, "ShadowCallStack:%8lu kB\n", - global_zone_page_state(NR_KERNEL_SCS_KB)); + global_node_page_state(NR_KERNEL_SCS_KB)); #endif show_val_kb(m, "PageTables: ", global_zone_page_state(NR_PAGETABLE)); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5a8b62d075e6..624400c27eba 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -32,8 +32,6 @@ struct kmem_cache; enum memcg_stat_item { MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, MEMCG_SOCK, - /* XXX: why are these zone and not node counters? */ - MEMCG_KERNEL_STACK_KB, MEMCG_NR_STAT, }; @@ -729,8 +727,19 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); + void mod_memcg_obj_state(void *p, int idx, int val); +static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, + int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_lruvec_slab_state(p, idx, val); + local_irq_restore(flags); +} + static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { @@ -1151,6 +1160,14 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, __mod_node_page_state(page_pgdat(page), idx, val); } +static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, + int val) +{ + struct page *page = virt_to_head_page(p); + + mod_node_page_state(page_pgdat(page), idx, val); +} + static inline void mod_memcg_obj_state(void *p, int idx, int val) { } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b79100edd228..a3bd54139a30 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -155,10 +155,6 @@ enum zone_stat_item { NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ NR_PAGETABLE, /* used for pagetables */ - NR_KERNEL_STACK_KB, /* measured in KiB */ -#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) - NR_KERNEL_SCS_KB, /* measured in KiB */ -#endif /* Second 128 byte cacheline */ NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) @@ -203,6 +199,10 @@ enum node_stat_item { NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ + NR_KERNEL_STACK_KB, /* measured in KiB */ +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) + NR_KERNEL_SCS_KB, /* measured in KiB */ +#endif NR_VM_NODE_STAT_ITEMS }; diff --git a/kernel/fork.c b/kernel/fork.c index 76d3f3387554..c7b4ce9d2647 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -276,13 +276,8 @@ static inline void free_thread_stack(struct task_struct *tsk) if (vm) { int i; - for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { - mod_memcg_page_state(vm->pages[i], - MEMCG_KERNEL_STACK_KB, - -(int)(PAGE_SIZE / 1024)); - + for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) memcg_kmem_uncharge_page(vm->pages[i], 0); - } for (i = 0; i < NR_CACHED_STACKS; i++) { if (this_cpu_cmpxchg(cached_stacks[i], @@ -382,31 +377,14 @@ static void account_kernel_stack(struct task_struct *tsk, int account) void *stack = task_stack_page(tsk); struct vm_struct *vm = task_stack_vm_area(tsk); - BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); - - if (vm) { - int i; - - BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); - for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { - mod_zone_page_state(page_zone(vm->pages[i]), - NR_KERNEL_STACK_KB, - PAGE_SIZE / 1024 * account); - } - } else { - /* - * All stack pages are in the same zone and belong to the - * same memcg. - */ - struct page *first_page = virt_to_page(stack); - - mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, - THREAD_SIZE / 1024 * account); - - mod_memcg_obj_state(stack, MEMCG_KERNEL_STACK_KB, - account * (THREAD_SIZE / 1024)); - } + /* All stack pages are in the same node. */ + if (vm) + mod_lruvec_page_state(vm->pages[0], NR_KERNEL_STACK_KB, + account * (THREAD_SIZE / 1024)); + else + mod_lruvec_slab_state(stack, NR_KERNEL_STACK_KB, + account * (THREAD_SIZE / 1024)); } static int memcg_charge_kernel_stack(struct task_struct *tsk) @@ -415,24 +393,23 @@ static int memcg_charge_kernel_stack(struct task_struct *tsk) struct vm_struct *vm = task_stack_vm_area(tsk); int ret; + BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); + if (vm) { int i; + BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); + for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { /* * If memcg_kmem_charge_page() fails, page->mem_cgroup - * pointer is NULL, and both memcg_kmem_uncharge_page() - * and mod_memcg_page_state() in free_thread_stack() - * will ignore this page. So it's safe. + * pointer is NULL, and memcg_kmem_uncharge_page() in + * free_thread_stack() will ignore this page. */ ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0); if (ret) return ret; - - mod_memcg_page_state(vm->pages[i], - MEMCG_KERNEL_STACK_KB, - PAGE_SIZE / 1024); } } #endif diff --git a/kernel/scs.c b/kernel/scs.c index 5d4d9bbdec36..4ff4a7ba0094 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -17,7 +17,7 @@ static void __scs_account(void *s, int account) { struct page *scs_page = virt_to_page(s); - mod_zone_page_state(page_zone(scs_page), NR_KERNEL_SCS_KB, + mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB, account * (SCS_SIZE / SZ_1K)); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 473f9b91d51f..a3e963366769 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1485,7 +1485,7 @@ static char *memory_stat_format(struct mem_cgroup *memcg) (u64)memcg_page_state(memcg, NR_FILE_PAGES) * PAGE_SIZE); seq_buf_printf(&s, "kernel_stack %llu\n", - (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * + (u64)memcg_page_state(memcg, NR_KERNEL_STACK_KB) * 1024); seq_buf_printf(&s, "slab %llu\n", (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f9ad093814d2..8d5d8526c2f3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5396,6 +5396,10 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) " anon_thp: %lukB" #endif " writeback_tmp:%lukB" + " kernel_stack:%lukB" +#ifdef CONFIG_SHADOW_CALL_STACK + " shadow_call_stack:%lukB" +#endif " all_unreclaimable? %s" "\n", pgdat->node_id, @@ -5417,6 +5421,10 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), #endif K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), + node_page_state(pgdat, NR_KERNEL_STACK_KB), +#ifdef CONFIG_SHADOW_CALL_STACK + node_page_state(pgdat, NR_KERNEL_SCS_KB), +#endif pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? "yes" : "no"); } @@ -5448,10 +5456,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) " present:%lukB" " managed:%lukB" " mlocked:%lukB" - " kernel_stack:%lukB" -#ifdef CONFIG_SHADOW_CALL_STACK - " shadow_call_stack:%lukB" -#endif " pagetables:%lukB" " bounce:%lukB" " free_pcp:%lukB" @@ -5473,10 +5477,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) K(zone->present_pages), K(zone_managed_pages(zone)), K(zone_page_state(zone, NR_MLOCK)), - zone_page_state(zone, NR_KERNEL_STACK_KB), -#ifdef CONFIG_SHADOW_CALL_STACK - zone_page_state(zone, NR_KERNEL_SCS_KB), -#endif K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_BOUNCE)), K(free_pcp), diff --git a/mm/vmstat.c b/mm/vmstat.c index b171a76bfe83..2b866cbab11d 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1140,10 +1140,6 @@ const char * const vmstat_text[] = { "nr_zone_write_pending", "nr_mlock", "nr_page_table_pages", - "nr_kernel_stack", -#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) - "nr_shadow_call_stack", -#endif "nr_bounce", #if IS_ENABLED(CONFIG_ZSMALLOC) "nr_zspages", @@ -1194,6 +1190,10 @@ const char * const vmstat_text[] = { "nr_kernel_misc_reclaimable", "nr_foll_pin_acquired", "nr_foll_pin_released", + "nr_kernel_stack", +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) + "nr_shadow_call_stack", +#endif /* enum writeback_stat_item counters */ "nr_dirty_threshold", -- cgit v1.2.3 From eda330e57b26df8fabce184736ae3d11e7a104bd Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 6 Aug 2020 23:21:47 -0700 Subject: mm: kmem: switch to static_branch_likely() in memcg_kmem_enabled() Currently memcg_kmem_enabled() is optimized for the kernel memory accounting being off. It was so for a long time, and arguably the reason behind was that the kernel memory accounting was initially an opt-in feature. However, now it's on by default on both cgroup v1 and cgroup v2, and it's on for all cgroups. So let's switch over to static_branch_likely() to reflect this fact. Unlikely there is a significant performance difference, as the cost of a memory allocation and its accounting significantly exceeds the cost of a jump. However, the conversion makes the code look more logically. Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Shakeel Butt Acked-by: Vlastimil Babka Cc: Johannes Weiner Cc: Michal Hocko Cc: Christoph Lameter Cc: David Rientjes Cc: Joonsoo Kim Cc: Pekka Enberg Link: http://lkml.kernel.org/r/20200707173612.124425-3-guro@fb.com Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 624400c27eba..3f8ff6519c9d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1448,7 +1448,7 @@ void memcg_put_cache_ids(void); static inline bool memcg_kmem_enabled(void) { - return static_branch_unlikely(&memcg_kmem_enabled_key); + return static_branch_likely(&memcg_kmem_enabled_key); } static inline bool memcg_kmem_bypass(void) -- cgit v1.2.3 From 22f7496f0b901249f23c5251eb8a10aae126b909 Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Thu, 6 Aug 2020 23:22:01 -0700 Subject: mm, memcg: avoid stale protection values when cgroup is above protection Patch series "mm, memcg: memory.{low,min} reclaim fix & cleanup", v4. This series contains a fix for a edge case in my earlier protection calculation patches, and a patch to make the area overall a little more robust to hopefully help avoid this in future. This patch (of 2): A cgroup can have both memory protection and a memory limit to isolate it from its siblings in both directions - for example, to prevent it from being shrunk below 2G under high pressure from outside, but also from growing beyond 4G under low pressure. Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim") implemented proportional scan pressure so that multiple siblings in excess of their protection settings don't get reclaimed equally but instead in accordance to their unprotected portion. During limit reclaim, this proportionality shouldn't apply of course: there is no competition, all pressure is from within the cgroup and should be applied as such. Reclaim should operate at full efficiency. However, mem_cgroup_protected() never expected anybody to look at the effective protection values when it indicated that the cgroup is above its protection. As a result, a query during limit reclaim may return stale protection values that were calculated by a previous reclaim cycle in which the cgroup did have siblings. When this happens, reclaim is unnecessarily hesitant and potentially slow to meet the desired limit. In theory this could lead to premature OOM kills, although it's not obvious this has occurred in practice. Workaround the problem by special casing reclaim roots in mem_cgroup_protection. These memcgs are never participating in the reclaim protection because the reclaim is internal. We have to ignore effective protection values for reclaim roots because mem_cgroup_protected might be called from racing reclaim contexts with different roots. Calculation is relying on root -> leaf tree traversal therefore top-down reclaim protection invariants should hold. The only exception is the reclaim root which should have effective protection set to 0 but that would be problematic for the following setup: Let's have global and A's reclaim in parallel: | A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) |\ | C (low = 1G, usage = 2.5G) B (low = 1G, usage = 0.5G) for A reclaim we have B.elow = B.low C.elow = C.low For the global reclaim A.elow = A.low B.elow = min(B.usage, B.low) because children_low_usage <= A.elow C.elow = min(C.usage, C.low) With the effective values resetting we have A reclaim A.elow = 0 B.elow = B.low C.elow = C.low and global reclaim could see the above and then B.elow = C.elow = 0 because children_low_usage > A.elow Which means that protected memcgs would get reclaimed. In future we would like to make mem_cgroup_protected more robust against racing reclaim contexts but that is likely more complex solution than this simple workaround. [hannes@cmpxchg.org - large part of the changelog] [mhocko@suse.com - workaround explanation] [chris@chrisdown.name - retitle] Fixes: 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim") Signed-off-by: Yafang Shao Signed-off-by: Chris Down Signed-off-by: Andrew Morton Acked-by: Michal Hocko Acked-by: Johannes Weiner Acked-by: Chris Down Acked-by: Roman Gushchin Link: http://lkml.kernel.org/r/cover.1594638158.git.chris@chrisdown.name Link: http://lkml.kernel.org/r/044fb8ecffd001c7905d27c0c2ad998069fdc396.1594638158.git.chris@chrisdown.name Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 42 ++++++++++++++++++++++++++++++++++++++++-- mm/memcontrol.c | 8 ++++++++ mm/vmscan.c | 3 ++- 3 files changed, 50 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3f8ff6519c9d..0301b231fd02 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -355,12 +355,49 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, +static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, bool in_low_reclaim) { if (mem_cgroup_disabled()) return 0; + /* + * There is no reclaim protection applied to a targeted reclaim. + * We are special casing this specific case here because + * mem_cgroup_protected calculation is not robust enough to keep + * the protection invariant for calculated effective values for + * parallel reclaimers with different reclaim target. This is + * especially a problem for tail memcgs (as they have pages on LRU) + * which would want to have effective values 0 for targeted reclaim + * but a different value for external reclaim. + * + * Example + * Let's have global and A's reclaim in parallel: + * | + * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) + * |\ + * | C (low = 1G, usage = 2.5G) + * B (low = 1G, usage = 0.5G) + * + * For the global reclaim + * A.elow = A.low + * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow + * C.elow = min(C.usage, C.low) + * + * With the effective values resetting we have A reclaim + * A.elow = 0 + * B.elow = B.low + * C.elow = C.low + * + * If the global reclaim races with A's reclaim then + * B.elow = C.elow = 0 because children_low_usage > A.elow) + * is possible and reclaiming B would be violating the protection. + * + */ + if (root == memcg) + return 0; + if (in_low_reclaim) return READ_ONCE(memcg->memory.emin); @@ -891,7 +928,8 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, +static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, bool in_low_reclaim) { return 0; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b4cb1bb61d46..c610617bb19a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6605,6 +6605,14 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, if (!root) root = root_mem_cgroup; + + /* + * Effective values of the reclaim targets are ignored so they + * can be stale. Have a look at mem_cgroup_protection for more + * details. + * TODO: calculation should be more robust so that we do not need + * that special casing. + */ if (memcg == root) return MEMCG_PROT_NONE; diff --git a/mm/vmscan.c b/mm/vmscan.c index 2ac43664aba4..9f0811d24255 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2331,7 +2331,8 @@ out: unsigned long protection; lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); - protection = mem_cgroup_protection(memcg, + protection = mem_cgroup_protection(sc->target_mem_cgroup, + memcg, sc->memcg_low_reclaim); if (protection) { -- cgit v1.2.3 From 45c7f7e1ef17f09fe70bad4b705ce43772153fd7 Mon Sep 17 00:00:00 2001 From: Chris Down Date: Thu, 6 Aug 2020 23:22:05 -0700 Subject: mm, memcg: decouple e{low,min} state mutations from protection checks mem_cgroup_protected currently is both used to set effective low and min and return a mem_cgroup_protection based on the result. As a user, this can be a little unexpected: it appears to be a simple predicate function, if not for the big warning in the comment above about the order in which it must be executed. This change makes it so that we separate the state mutations from the actual protection checks, which makes it more obvious where we need to be careful mutating internal state, and where we are simply checking and don't need to worry about that. [mhocko@suse.com - don't check protection on root memcgs] Suggested-by: Johannes Weiner Signed-off-by: Chris Down Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Acked-by: Michal Hocko Cc: Roman Gushchin Cc: Yafang Shao Link: http://lkml.kernel.org/r/ff3f915097fcee9f6d7041c084ef92d16aaeb56a.1594638158.git.chris@chrisdown.name Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 53 ++++++++++++++++++++++++++++++++++++---------- mm/memcontrol.c | 28 ++++++------------------ mm/vmscan.c | 17 ++++----------- 3 files changed, 53 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0301b231fd02..1bb49b600310 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -47,12 +47,6 @@ enum memcg_memory_event { MEMCG_NR_MEMORY_EVENTS, }; -enum mem_cgroup_protection { - MEMCG_PROT_NONE, - MEMCG_PROT_LOW, - MEMCG_PROT_MIN, -}; - struct mem_cgroup_reclaim_cookie { pg_data_t *pgdat; unsigned int generation; @@ -405,8 +399,36 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, READ_ONCE(memcg->memory.elow)); } -enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, - struct mem_cgroup *memcg); +void mem_cgroup_calculate_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg); + +static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) +{ + /* + * The root memcg doesn't account charges, and doesn't support + * protection. + */ + return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); + +} + +static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) +{ + if (!mem_cgroup_supports_protection(memcg)) + return false; + + return READ_ONCE(memcg->memory.elow) >= + page_counter_read(&memcg->memory); +} + +static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) +{ + if (!mem_cgroup_supports_protection(memcg)) + return false; + + return READ_ONCE(memcg->memory.emin) >= + page_counter_read(&memcg->memory); +} int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); @@ -935,10 +957,19 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, return 0; } -static inline enum mem_cgroup_protection mem_cgroup_protected( - struct mem_cgroup *root, struct mem_cgroup *memcg) +static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg) { - return MEMCG_PROT_NONE; +} + +static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) +{ + return false; +} + +static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) +{ + return false; } static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c610617bb19a..b30a52db6b2d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6587,21 +6587,15 @@ static unsigned long effective_protection(unsigned long usage, * * WARNING: This function is not stateless! It can only be used as part * of a top-down tree iteration, not for isolated queries. - * - * Returns one of the following: - * MEMCG_PROT_NONE: cgroup memory is not protected - * MEMCG_PROT_LOW: cgroup memory is protected as long there is - * an unprotected supply of reclaimable memory from other cgroups. - * MEMCG_PROT_MIN: cgroup memory is protected */ -enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, - struct mem_cgroup *memcg) +void mem_cgroup_calculate_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg) { unsigned long usage, parent_usage; struct mem_cgroup *parent; if (mem_cgroup_disabled()) - return MEMCG_PROT_NONE; + return; if (!root) root = root_mem_cgroup; @@ -6614,21 +6608,21 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, * that special casing. */ if (memcg == root) - return MEMCG_PROT_NONE; + return; usage = page_counter_read(&memcg->memory); if (!usage) - return MEMCG_PROT_NONE; + return; parent = parent_mem_cgroup(memcg); /* No parent means a non-hierarchical mode on v1 memcg */ if (!parent) - return MEMCG_PROT_NONE; + return; if (parent == root) { memcg->memory.emin = READ_ONCE(memcg->memory.min); memcg->memory.elow = READ_ONCE(memcg->memory.low); - goto out; + return; } parent_usage = page_counter_read(&parent->memory); @@ -6642,14 +6636,6 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, READ_ONCE(memcg->memory.low), READ_ONCE(parent->memory.elow), atomic_long_read(&parent->memory.children_low_usage))); - -out: - if (usage <= memcg->memory.emin) - return MEMCG_PROT_MIN; - else if (usage <= memcg->memory.elow) - return MEMCG_PROT_LOW; - else - return MEMCG_PROT_NONE; } /** diff --git a/mm/vmscan.c b/mm/vmscan.c index 9f0811d24255..5747867f0082 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2620,14 +2620,15 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) unsigned long reclaimed; unsigned long scanned; - switch (mem_cgroup_protected(target_memcg, memcg)) { - case MEMCG_PROT_MIN: + mem_cgroup_calculate_protection(target_memcg, memcg); + + if (mem_cgroup_below_min(memcg)) { /* * Hard protection. * If there is no reclaimable memory, OOM. */ continue; - case MEMCG_PROT_LOW: + } else if (mem_cgroup_below_low(memcg)) { /* * Soft protection. * Respect the protection only as long as @@ -2639,16 +2640,6 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) continue; } memcg_memory_event(memcg, MEMCG_LOW); - break; - case MEMCG_PROT_NONE: - /* - * All protection thresholds breached. We may - * still choose to vary the scan pressure - * applied based on by how much the cgroup in - * question has exceeded its protection - * thresholds (see get_scan_count). - */ - break; } reclaimed = sc->nr_reclaimed; -- cgit v1.2.3 From ca15ca406f660ad5fab55b851d2b269ce915c88d Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Thu, 6 Aug 2020 23:22:28 -0700 Subject: mm: remove unneeded includes of Patch series "mm: cleanup usage of " Most architectures have very similar versions of pXd_alloc_one() and pXd_free_one() for intermediate levels of page table. These patches add generic versions of these functions in and enable use of the generic functions where appropriate. In addition, functions declared and defined in headers are used mostly by core mm and early mm initialization in arch and there is no actual reason to have the included all over the place. The first patch in this series removes unneeded includes of In the end it didn't work out as neatly as I hoped and moving pXd_alloc_track() definitions to would require unnecessary changes to arches that have custom page table allocations, so I've decided to move lib/ioremap.c to mm/ and make pgalloc-track.h local to mm/. This patch (of 8): In most cases header is required only for allocations of page table memory. Most of the .c files that include that header do not use symbols declared in and do not require that header. As for the other header files that used to include , it is possible to move that include into the .c file that actually uses symbols from and drop the include from the header file. The process was somewhat automated using sed -i -E '/[<"]asm\/pgalloc\.h/d' \ $(grep -L -w -f /tmp/xx \ $(git grep -E -l '[<"]asm/pgalloc\.h')) where /tmp/xx contains all the symbols defined in arch/*/include/asm/pgalloc.h. [rppt@linux.ibm.com: fix powerpc warning] Signed-off-by: Mike Rapoport Signed-off-by: Andrew Morton Reviewed-by: Pekka Enberg Acked-by: Geert Uytterhoeven [m68k] Cc: Abdul Haleem Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Christophe Leroy Cc: Joerg Roedel Cc: Max Filippov Cc: Peter Zijlstra Cc: Satheesh Rajendran Cc: Stafford Horne Cc: Stephen Rothwell Cc: Steven Rostedt Cc: Joerg Roedel Cc: Matthew Wilcox Link: http://lkml.kernel.org/r/20200627143453.31835-1-rppt@kernel.org Link: http://lkml.kernel.org/r/20200627143453.31835-2-rppt@kernel.org Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/tlbflush.h | 1 - arch/alpha/kernel/core_irongate.c | 1 - arch/alpha/kernel/core_marvel.c | 1 - arch/alpha/kernel/core_titan.c | 1 - arch/alpha/kernel/machvec_impl.h | 2 -- arch/alpha/kernel/smp.c | 1 - arch/alpha/mm/numa.c | 1 - arch/arc/mm/fault.c | 1 - arch/arc/mm/init.c | 1 - arch/arm/include/asm/tlb.h | 1 - arch/arm/kernel/machine_kexec.c | 1 - arch/arm/kernel/smp.c | 1 - arch/arm/kernel/suspend.c | 1 - arch/arm/mach-omap2/omap-mpuss-lowpower.c | 1 - arch/arm/mm/hugetlbpage.c | 1 - arch/arm/mm/mmu.c | 1 + arch/arm64/kernel/smp.c | 1 - arch/arm64/mm/hugetlbpage.c | 1 - arch/arm64/mm/ioremap.c | 1 - arch/arm64/mm/mmu.c | 1 + arch/csky/kernel/smp.c | 1 - arch/ia64/include/asm/tlb.h | 1 - arch/ia64/kernel/process.c | 1 - arch/ia64/kernel/smp.c | 1 - arch/ia64/kernel/smpboot.c | 1 - arch/ia64/mm/contig.c | 1 - arch/ia64/mm/discontig.c | 1 - arch/ia64/mm/hugetlbpage.c | 1 - arch/ia64/mm/tlb.c | 1 - arch/m68k/include/asm/mmu_context.h | 2 +- arch/m68k/kernel/dma.c | 2 +- arch/m68k/kernel/traps.c | 3 +-- arch/m68k/mm/cache.c | 2 +- arch/m68k/mm/fault.c | 1 - arch/m68k/mm/kmap.c | 2 +- arch/m68k/mm/mcfmmu.c | 1 + arch/m68k/mm/memory.c | 1 - arch/m68k/sun3x/dvma.c | 2 +- arch/microblaze/include/asm/tlbflush.h | 1 - arch/microblaze/kernel/process.c | 1 - arch/microblaze/kernel/signal.c | 1 - arch/mips/sgi-ip32/ip32-memory.c | 1 - arch/openrisc/include/asm/tlbflush.h | 1 - arch/openrisc/kernel/or32_ksyms.c | 1 - arch/parisc/include/asm/mmu_context.h | 1 - arch/parisc/kernel/cache.c | 1 - arch/parisc/kernel/pci-dma.c | 1 - arch/parisc/kernel/process.c | 1 - arch/parisc/kernel/signal.c | 1 - arch/parisc/kernel/smp.c | 1 - arch/parisc/mm/hugetlbpage.c | 1 - arch/parisc/mm/ioremap.c | 2 +- arch/powerpc/include/asm/tlb.h | 1 - arch/powerpc/mm/book3s64/hash_hugetlbpage.c | 1 - arch/powerpc/mm/book3s64/hash_pgtable.c | 1 - arch/powerpc/mm/book3s64/hash_tlb.c | 1 - arch/powerpc/mm/book3s64/radix_hugetlbpage.c | 1 - arch/powerpc/mm/init_32.c | 1 - arch/powerpc/mm/kasan/8xx.c | 1 - arch/powerpc/mm/kasan/book3s_32.c | 1 - arch/powerpc/mm/mem.c | 1 - arch/powerpc/mm/nohash/40x.c | 1 - arch/powerpc/mm/nohash/8xx.c | 1 - arch/powerpc/mm/nohash/fsl_booke.c | 1 - arch/powerpc/mm/nohash/kaslr_booke.c | 1 - arch/powerpc/mm/nohash/tlb.c | 1 + arch/powerpc/mm/pgtable.c | 1 - arch/powerpc/mm/pgtable_64.c | 1 - arch/powerpc/mm/ptdump/hashpagetable.c | 2 +- arch/powerpc/mm/ptdump/ptdump.c | 1 - arch/powerpc/platforms/pseries/cmm.c | 1 - arch/riscv/mm/fault.c | 1 - arch/s390/include/asm/tlb.h | 1 - arch/s390/include/asm/tlbflush.h | 1 - arch/s390/kernel/machine_kexec.c | 1 - arch/s390/kernel/ptrace.c | 1 - arch/s390/kvm/diag.c | 1 - arch/s390/kvm/priv.c | 1 - arch/s390/kvm/pv.c | 1 - arch/s390/mm/cmm.c | 1 - arch/s390/mm/mmap.c | 1 - arch/s390/mm/pgtable.c | 1 - arch/sh/kernel/idle.c | 1 - arch/sh/kernel/machine_kexec.c | 1 - arch/sh/mm/cache-sh3.c | 1 - arch/sh/mm/cache-sh7705.c | 1 - arch/sh/mm/hugetlbpage.c | 1 - arch/sh/mm/init.c | 1 + arch/sh/mm/ioremap_fixed.c | 1 - arch/sh/mm/tlb-sh3.c | 1 - arch/sparc/include/asm/ide.h | 1 - arch/sparc/include/asm/tlb_64.h | 1 - arch/sparc/kernel/leon_smp.c | 1 - arch/sparc/kernel/process_32.c | 1 - arch/sparc/kernel/signal_32.c | 1 - arch/sparc/kernel/smp_32.c | 1 - arch/sparc/kernel/smp_64.c | 1 + arch/sparc/kernel/sun4m_irq.c | 1 - arch/sparc/mm/highmem.c | 1 - arch/sparc/mm/io-unit.c | 1 - arch/sparc/mm/iommu.c | 1 - arch/sparc/mm/tlb.c | 1 - arch/x86/ia32/ia32_aout.c | 1 - arch/x86/include/asm/mmu_context.h | 1 - arch/x86/kernel/alternative.c | 1 + arch/x86/kernel/apic/apic.c | 1 - arch/x86/kernel/mpparse.c | 1 - arch/x86/kernel/traps.c | 1 - arch/x86/mm/fault.c | 1 - arch/x86/mm/hugetlbpage.c | 1 - arch/x86/mm/kaslr.c | 1 - arch/x86/mm/pgtable_32.c | 1 - arch/x86/mm/pti.c | 1 - arch/x86/platform/uv/bios_uv.c | 1 + arch/xtensa/kernel/xtensa_ksyms.c | 1 - arch/xtensa/mm/cache.c | 1 - arch/xtensa/mm/fault.c | 1 - drivers/block/xen-blkback/common.h | 1 - drivers/iommu/ipmmu-vmsa.c | 1 - drivers/xen/balloon.c | 1 - drivers/xen/privcmd.c | 1 - fs/binfmt_elf_fdpic.c | 1 - include/asm-generic/tlb.h | 1 - mm/hugetlb.c | 1 + mm/sparse.c | 1 - 125 files changed, 17 insertions(+), 118 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h index f8b492408f51..94dc37cf873a 100644 --- a/arch/alpha/include/asm/tlbflush.h +++ b/arch/alpha/include/asm/tlbflush.h @@ -5,7 +5,6 @@ #include #include #include -#include #ifndef __EXTERN_INLINE #define __EXTERN_INLINE extern inline diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c index a9fd133a7fb2..72af1e72d833 100644 --- a/arch/alpha/kernel/core_irongate.c +++ b/arch/alpha/kernel/core_irongate.c @@ -302,7 +302,6 @@ irongate_init_arch(void) #include #include #include -#include #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr)) diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index 1db9d0eb2922..4c80d992a659 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c index 2a2820fb1be6..77f5d68ed04b 100644 --- a/arch/alpha/kernel/core_titan.c +++ b/arch/alpha/kernel/core_titan.c @@ -20,7 +20,6 @@ #include #include -#include #include #include diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h index 38f045ec5cd2..393d5d6ca5d2 100644 --- a/arch/alpha/kernel/machvec_impl.h +++ b/arch/alpha/kernel/machvec_impl.h @@ -7,8 +7,6 @@ * This file has goodies to help simplify instantiation of machine vectors. */ -#include - /* Whee. These systems don't have an HAE: IRONGATE, MARVEL, POLARIS, TSUNAMI, TITAN, WILDFIRE Fix things up for the GENERIC kernel by defining the HAE address diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 631cc17410d1..f4dd9f3f3001 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -36,7 +36,6 @@ #include #include -#include #include #include diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 5ad6087de1d6..0636e254a22f 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -17,7 +17,6 @@ #include #include -#include #include pg_data_t node_data[MAX_NUMNODES]; diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 72f5405a7ec5..7287c793d1c9 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -13,7 +13,6 @@ #include #include #include -#include #include /* diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index e7bdc2ac1c87..f886ac69d8ad 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 4d4e7b6aabff..9415222b49ad 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -27,7 +27,6 @@ #else /* !CONFIG_MMU */ #include -#include #include static inline void __tlb_remove_table(void *_table) diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 974b6c64d3e6..5d84ad333f05 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 9a6432557871..5d9da61eff62 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index d2c9338d74e8..24bd20564be7 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 67fa28532a3a..9fba98c2313a 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c index a1e5aace897a..dd7a0277c5c0 100644 --- a/arch/arm/mm/hugetlbpage.c +++ b/arch/arm/mm/hugetlbpage.c @@ -17,7 +17,6 @@ #include #include #include -#include /* * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index cc3c9a6a1113..c36f977b2ccb 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index e43a8ff19f0f..8059d50bc8cb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index aa421bf4956e..55ecf6de9ff7 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -17,7 +17,6 @@ #include #include #include -#include /* * HugeTLB Support Matrix diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 9be71bee902c..b5e83c46b23e 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -16,7 +16,6 @@ #include #include -#include static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, pgprot_t prot, void *caller) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1df25f26571d..cafefb147a5e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -35,6 +35,7 @@ #include #include #include +#include #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index e7425e6b0419..041d0de6a1b6 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -23,7 +23,6 @@ #include #include #include -#include #ifdef CONFIG_CPU_HAS_FPU #include #endif diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index f1f257d632b3..8d9da6f08a62 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -42,7 +42,6 @@ #include #include -#include #include #include diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 7a4de9d994c5..ec0b40f6e9c6 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index bbfd421e6deb..0e2742003121 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 016683b743c2..c29c600d7967 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -49,7 +49,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index d7d31c718d2d..e30e360beef8 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -21,7 +21,6 @@ #include #include -#include #include #include diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index da810ca234da..37f8b6875ac9 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 32352a73df0c..b331f94d20ac 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 71c19918e387..135b5135cace 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h index cac9f289d1f6..993fd7e37069 100644 --- a/arch/m68k/include/asm/mmu_context.h +++ b/arch/m68k/include/asm/mmu_context.h @@ -222,7 +222,7 @@ static inline void activate_mm(struct mm_struct *prev_mm, #include #include -#include +#include static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index 871a0e11da34..b1ca3522eccc 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -15,7 +15,7 @@ #include #include -#include +#include #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) void arch_dma_prep_coherent(struct page *page, size_t size) diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index df6fc782754f..546e81935fe8 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -35,10 +35,9 @@ #include #include #include -#include #include #include - +#include static const char *vec_names[] = { [VEC_RESETSP] = "RESET SP", diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c index 5ecb3310e874..b486c0889eec 100644 --- a/arch/m68k/mm/cache.c +++ b/arch/m68k/mm/cache.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index a94a814ad6ad..508abb63da67 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -15,7 +15,6 @@ #include #include -#include extern void die_if_kernel(char *, struct pt_regs *, long); diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 14d31d216cef..1269d513b221 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -19,8 +19,8 @@ #include #include #include -#include #include +#include #undef DEBUG diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 7d04210d34f0..7068126cedc1 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -20,6 +20,7 @@ #include #include #include +#include #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END)) diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 65e0c4071912..fe75aecfb238 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c index fef52d222d46..08bb92113026 100644 --- a/arch/m68k/sun3x/dvma.c +++ b/arch/m68k/sun3x/dvma.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include /* IOMMU support */ diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h index 6f8f5c77a050..1200e2bf14bb 100644 --- a/arch/microblaze/include/asm/tlbflush.h +++ b/arch/microblaze/include/asm/tlbflush.h @@ -15,7 +15,6 @@ #include /* For TASK_SIZE */ #include #include -#include extern void _tlbie(unsigned long address); extern void _tlbia(void); diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 6cabeab9e2ba..a9e46e525cd0 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -18,7 +18,6 @@ #include #include #include -#include #include /* for USER_DS macros */ #include diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index bdd6d0c86e16..65bf5fd8d473 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c index be1b2cfc4c3e..62b956cc2d1d 100644 --- a/arch/mips/sgi-ip32/ip32-memory.c +++ b/arch/mips/sgi-ip32/ip32-memory.c @@ -14,7 +14,6 @@ #include #include #include -#include extern void crime_init(void); diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h index 4a4639c65cbb..185dcd3731ed 100644 --- a/arch/openrisc/include/asm/tlbflush.h +++ b/arch/openrisc/include/asm/tlbflush.h @@ -17,7 +17,6 @@ #include #include -#include #include #include diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c index 277ac7a55752..212e5f85004c 100644 --- a/arch/openrisc/kernel/or32_ksyms.c +++ b/arch/openrisc/kernel/or32_ksyms.c @@ -26,7 +26,6 @@ #include #include #include -#include #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 07b89c74abeb..cb5f2f730421 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -5,7 +5,6 @@ #include #include #include -#include #include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 1eedfecc5137..b5e1d9f1b440 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 4f1596bb1936..38c68e131bbe 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -32,7 +32,6 @@ #include /* for DMA_CHUNK_SIZE */ #include #include /* get_order */ -#include #include #include /* for purge_tlb_*() macros */ diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index de6299ff1530..86ec30cc0e77 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 02895a8f2c55..5df5d4cd5d4c 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index f8a842ddd82d..6271139d2213 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -39,7 +39,6 @@ #include /* for CPU_IRQ_REGION and friends */ #include #include -#include #include #include #include diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 0e1e212f1c96..d7ba014a7fbb 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -15,7 +15,6 @@ #include #include -#include #include #include #include diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index 6e7c005aa09b..345ff0b66499 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include /* * Generic mapping function (not visible outside): diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 862985cf5180..fbc6f3002f23 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -12,7 +12,6 @@ #ifndef __powerpc64__ #include #endif -#include #ifndef __powerpc64__ #include #include diff --git a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c index 25acb9c5ee1b..964467b3a776 100644 --- a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c +++ b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c @@ -10,7 +10,6 @@ #include #include -#include #include #include diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 2a99167afbaf..fd9c7f91b092 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -9,7 +9,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c index 0fbf3dc9f2c2..eb0bccaf221e 100644 --- a/arch/powerpc/mm/book3s64/hash_tlb.c +++ b/arch/powerpc/mm/book3s64/hash_tlb.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c index c812b401b66c..cb91071eef52 100644 --- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c +++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c @@ -2,7 +2,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 5a5469eb3174..7ea19dc4883b 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -29,7 +29,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c index 569d98a41881..2784224054f8 100644 --- a/arch/powerpc/mm/kasan/8xx.c +++ b/arch/powerpc/mm/kasan/8xx.c @@ -5,7 +5,6 @@ #include #include #include -#include static int __init kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block) diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c index a32b4640b9de..202bd260a009 100644 --- a/arch/powerpc/mm/kasan/book3s_32.c +++ b/arch/powerpc/mm/kasan/book3s_32.c @@ -4,7 +4,6 @@ #include #include -#include #include int __init kasan_init_region(void *start, size_t size) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c2c11eb8dcfc..ab12916ec1a7 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -34,7 +34,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c index 13e74bc39ba5..95751c322f6c 100644 --- a/arch/powerpc/mm/nohash/40x.c +++ b/arch/powerpc/mm/nohash/40x.c @@ -32,7 +32,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 92e8929cbe3e..d2b37146ae6c 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -13,7 +13,6 @@ #include #include #include -#include #include diff --git a/arch/powerpc/mm/nohash/fsl_booke.c b/arch/powerpc/mm/nohash/fsl_booke.c index c06dfbb771f4..0c294827d6e5 100644 --- a/arch/powerpc/mm/nohash/fsl_booke.c +++ b/arch/powerpc/mm/nohash/fsl_booke.c @@ -37,7 +37,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index bce0e5349978..4c74e8a5482b 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index 696f568253a0..14514585db98 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 1136257c3a99..9c0547d77af3 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index bb43a8c04bee..cc6e2f94517f 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -31,7 +31,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c index a2c33efc7ce8..ff4b05a9e7f0 100644 --- a/arch/powerpc/mm/ptdump/hashpagetable.c +++ b/arch/powerpc/mm/ptdump/hashpagetable.c @@ -17,10 +17,10 @@ #include #include #include -#include #include #include #include +#include struct pg_state { struct seq_file *seq; diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index de6e05ef871c..f7ba13c41d13 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 9dba7e880885..45a3a3022a85 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index ae7b7fe24658..5873835a3e6b 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -14,7 +14,6 @@ #include #include -#include #include #include diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index aa406c05a350..954fa8ca6cbd 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -36,7 +36,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, #define p4d_free_tlb p4d_free_tlb #define pud_free_tlb pud_free_tlb -#include #include #include diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 2204704840ea..acce6a08a1fa 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -5,7 +5,6 @@ #include #include #include -#include /* * Flush all TLB entries on the local CPU. diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 93c6b8932fbd..d91989c7bd6a 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 3cc15c066298..3c72a3b77253 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 563429dece03..5b8ec1c447e1 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include "kvm-s390.h" diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 96ae368aa0a2..2f721a923b54 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 63e330109b63..eb99e2f95ebe 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 5c15ae3daf71..1141c8d5c0d0 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -21,7 +21,6 @@ #include #include -#include #include #ifdef CONFIG_CMM_IUCV diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 1b78f630a9ca..e54f928503c5 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -17,7 +17,6 @@ #include #include #include -#include #include static unsigned long stack_maxrandom_size(void) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 2e0cc19f4cd7..0d25f743b270 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index c20fc5487e05..0dc0f52f9bb8 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 4a98980b8a07..223c14f44af7 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c index 26f3bd43e850..bc595982d396 100644 --- a/arch/sh/mm/cache-sh3.c +++ b/arch/sh/mm/cache-sh3.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 48978293226c..4c67b3d88775 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index acd5652a0de3..220d7bc43d2b 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -17,7 +17,6 @@ #include #include -#include #include #include #include diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index a70ba0fdd0b3..a86ce13f392c 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -27,6 +27,7 @@ #include #include #include +#include #include pgd_t swapper_pg_dir[PTRS_PER_PGD]; diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c index 07e744d75fa0..aab3f82856bb 100644 --- a/arch/sh/mm/ioremap_fixed.c +++ b/arch/sh/mm/ioremap_fixed.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 869243518bb3..fb400afc2a49 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -21,7 +21,6 @@ #include #include -#include #include #include diff --git a/arch/sparc/include/asm/ide.h b/arch/sparc/include/asm/ide.h index 499aa2e6e276..904cc6cbc155 100644 --- a/arch/sparc/include/asm/ide.h +++ b/arch/sparc/include/asm/ide.h @@ -13,7 +13,6 @@ #include #ifdef CONFIG_SPARC64 -#include #include #include #include diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h index 6820d357581c..e841cae544c2 100644 --- a/arch/sparc/include/asm/tlb_64.h +++ b/arch/sparc/include/asm/tlb_64.h @@ -4,7 +4,6 @@ #include #include -#include #include #include diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 41829c024f92..1eed26d423fb 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index bd123f1de2e7..3f519e1047b6 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 3b005b6c3e0f..f1f8c8ebe641 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -23,7 +23,6 @@ #include #include -#include #include /* flush_sig_insns */ #include diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 76ce290c67cf..50c127ab46d5 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -29,7 +29,6 @@ #include #include -#include #include #include #include diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 0085e28bf019..e286e2badc8a 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c index 91b61f012d19..1079638986b5 100644 --- a/arch/sparc/kernel/sun4m_irq.c +++ b/arch/sparc/kernel/sun4m_irq.c @@ -16,7 +16,6 @@ #include #include -#include #include #include #include diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c index d1fc9a7b7d78..8f2a2afb048a 100644 --- a/arch/sparc/mm/highmem.c +++ b/arch/sparc/mm/highmem.c @@ -29,7 +29,6 @@ #include #include -#include #include static pte_t *kmap_pte; diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index bfcc04bfce54..430a47a1b6ae 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -15,7 +15,6 @@ #include #include -#include #include #include #include diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 35b002eb312e..3a388b1c5d4b 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -16,7 +16,6 @@ #include #include -#include #include #include #include diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index a32a16c18617..20ee14739333 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 385d3d172ee1..ca8a657edf59 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -30,7 +30,6 @@ #include #include -#include #include #include #include diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 47562147e70b..d98016b83755 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -9,7 +9,6 @@ #include -#include #include #include #include diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c826cddae157..d1175533d125 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index e0e2f020ec02..ccf726cc87b7 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index afac7ccce72f..c27b82b62c8b 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 438fc554d48d..1f66d2d1e998 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -62,7 +62,6 @@ #ifdef CONFIG_X86_64 #include -#include #include #else #include diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 5e5edd2ec893..0c7643d9f7cb 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -21,7 +21,6 @@ #include /* boot_cpu_has, ... */ #include /* dotraplinkage, ... */ -#include /* pgd_*(), ... */ #include /* VSYSCALL_ADDR */ #include /* emulate_vsyscall */ #include /* struct vm86 */ diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index cf5781142716..a0d023cb4292 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #if 0 /* This is just for testing */ diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index fb620fd9dae9..6e6b39710e5f 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -26,7 +26,6 @@ #include #include -#include #include #include diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 1953685c2ddf..c234634e26ba 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -11,7 +11,6 @@ #include #include -#include #include #include #include diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index a8a924b3c335..1aab92930569 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index a6e5f2c1805d..a2f447dffea6 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 24cf6972eace..415fe7faa37f 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #ifdef CONFIG_BLK_DEV_FD #include diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 2369433b734a..5835406b3cec 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -31,7 +31,6 @@ #include #include #include -#include /* * Note: diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index c4decc73fd86..c128dcc7c85b 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -20,7 +20,6 @@ #include #include #include -#include DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; void bad_page_fault(struct pt_regs*, unsigned long, int); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index a3eeccf3ac5f..c6ea5d38c509 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -36,7 +36,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 4c2972f3153b..6de86e73dfc3 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -28,7 +28,6 @@ #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) #include -#include #else #define arm_iommu_create_mapping(...) NULL #define arm_iommu_attach_device(...) -ENODEV diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 77c57568e5d7..f5c838a92b01 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -58,7 +58,6 @@ #include #include -#include #include #include diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index a250d118144a..5dfc59fd9f16 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -25,7 +25,6 @@ #include #include -#include #include #include diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 0f45521b237c..cf306e0798fd 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -38,7 +38,6 @@ #include #include -#include typedef char *elf_caddr_t; diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index ef75ec86f865..6661ee1cff47 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -14,7 +14,6 @@ #include #include #include -#include #include #include diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 590111ea6975..27556d4d49fe 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -31,6 +31,7 @@ #include #include +#include #include #include diff --git a/mm/sparse.c b/mm/sparse.c index b2b9a3e34696..97179d27801a 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -16,7 +16,6 @@ #include "internal.h" #include -#include /* * Permanent SPARSEMEM data: -- cgit v1.2.3 From 1355c31eeb7ea61a7f2f2937d17cd4e343a6b5af Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Thu, 6 Aug 2020 23:22:39 -0700 Subject: asm-generic: pgalloc: provide generic pmd_alloc_one() and pmd_free_one() For most architectures that support >2 levels of page tables, pmd_alloc_one() is a wrapper for __get_free_pages(), sometimes with __GFP_ZERO and sometimes followed by memset(0) instead. More elaborate versions on arm64 and x86 account memory for the user page tables and call to pgtable_pmd_page_ctor() as the part of PMD page initialization. Move the arm64 version to include/asm-generic/pgalloc.h and use the generic version on several architectures. The pgtable_pmd_page_ctor() is a NOP when ARCH_ENABLE_SPLIT_PMD_PTLOCK is not enabled, so there is no functional change for most architectures except of the addition of __GFP_ACCOUNT for allocation of user page tables. The pmd_free() is a wrapper for free_page() in all the cases, so no functional change here. Signed-off-by: Mike Rapoport Signed-off-by: Andrew Morton Reviewed-by: Pekka Enberg Cc: Matthew Wilcox Cc: Abdul Haleem Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Christophe Leroy Cc: Joerg Roedel Cc: Joerg Roedel Cc: Max Filippov Cc: Peter Zijlstra (Intel) Cc: Satheesh Rajendran Cc: Stafford Horne Cc: Stephen Rothwell Cc: Steven Rostedt Cc: Geert Uytterhoeven Link: http://lkml.kernel.org/r/20200627143453.31835-5-rppt@kernel.org Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/pgalloc.h | 15 +------------ arch/arm/include/asm/pgalloc.h | 11 --------- arch/arm64/include/asm/pgalloc.h | 27 +--------------------- arch/ia64/include/asm/pgalloc.h | 10 --------- arch/mips/include/asm/pgalloc.h | 8 ++----- arch/parisc/include/asm/pgalloc.h | 11 ++------- arch/riscv/include/asm/pgalloc.h | 13 +---------- arch/sh/include/asm/pgalloc.h | 3 +++ arch/um/include/asm/pgalloc.h | 8 +------ arch/um/include/asm/pgtable-3level.h | 3 --- arch/um/kernel/mem.c | 12 ---------- arch/x86/include/asm/pgalloc.h | 26 +--------------------- include/asm-generic/pgalloc.h | 43 ++++++++++++++++++++++++++++++++++++ 13 files changed, 55 insertions(+), 135 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h index a1a29f60934c..4834cd52e9d0 100644 --- a/arch/alpha/include/asm/pgalloc.h +++ b/arch/alpha/include/asm/pgalloc.h @@ -5,7 +5,7 @@ #include #include -#include /* for pte_{alloc,free}_one */ +#include /* * Allocate and free page tables. The xxx_kernel() versions are @@ -40,17 +40,4 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd) free_page((unsigned long)pgd); } -static inline pmd_t * -pmd_alloc_one(struct mm_struct *mm, unsigned long address) -{ - pmd_t *ret = (pmd_t *)__get_free_page(GFP_PGTABLE_USER); - return ret; -} - -static inline void -pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_page((unsigned long)pmd); -} - #endif /* _ALPHA_PGALLOC_H */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 069da393110c..c5bdfd404ea5 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -22,17 +22,6 @@ #ifdef CONFIG_ARM_LPAE -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pmd_t *)get_zeroed_page(GFP_KERNEL); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); - free_page((unsigned long)pmd); -} - static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 58e93583ddb6..7246d0a662e1 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -13,37 +13,12 @@ #include #include -#include /* for pte_{alloc,free}_one */ +#include #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #if CONFIG_PGTABLE_LEVELS > 2 -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - gfp_t gfp = GFP_PGTABLE_USER; - struct page *page; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - - page = alloc_page(gfp); - if (!page) - return NULL; - if (!pgtable_pmd_page_ctor(page)) { - __free_page(page); - return NULL; - } - return page_address(page); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) -{ - BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1)); - pgtable_pmd_page_dtor(virt_to_page(pmdp)); - free_page((unsigned long)pmdp); -} - static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) { set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot)); diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index 2a3050345099..5da1fc76477b 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -59,16 +59,6 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) pud_val(*pud_entry) = __pa(pmd); } -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pmd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_page((unsigned long)pmd); -} - #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) static inline void diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index fa77cb71f303..eed1b3e8c642 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -13,7 +13,8 @@ #include #include -#include /* for pte_{alloc,free}_one */ +#define __HAVE_ARCH_PMD_ALLOC_ONE +#include static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) @@ -70,11 +71,6 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) return pmd; } -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_pages((unsigned long)pmd, PMD_ORDER); -} - #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) #endif diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index 9ac74da256b8..689766b914ed 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -10,7 +10,8 @@ #include -#include /* for pte_{alloc,free}_one */ +#define __HAVE_ARCH_PMD_FREE +#include /* Allocate the top level pgd (page directory) * @@ -65,14 +66,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT))); } -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) -{ - pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER); - if (pmd) - memset(pmd, 0, PAGE_SIZE< #ifdef CONFIG_MMU -#include /* for pte_{alloc,free}_one */ +#include static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) @@ -62,17 +62,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) #ifndef __PAGETABLE_PMD_FOLDED -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pmd_t *)__get_free_page( - GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_page((unsigned long)pmd); -} - #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) #endif /* __PAGETABLE_PMD_FOLDED */ diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index d770da3f8b6f..811afb567101 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -3,6 +3,9 @@ #define __ASM_SH_PGALLOC_H #include + +#define __HAVE_ARCH_PMD_ALLOC_ONE +#define __HAVE_ARCH_PMD_FREE #include extern pgd_t *pgd_alloc(struct mm_struct *); diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h index 881e76da1938..bdde433dbdec 100644 --- a/arch/um/include/asm/pgalloc.h +++ b/arch/um/include/asm/pgalloc.h @@ -10,7 +10,7 @@ #include -#include /* for pte_{alloc,free}_one */ +#include #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte))) @@ -34,12 +34,6 @@ do { \ } while (0) #ifdef CONFIG_3_LEVEL_PGTABLES - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_page((unsigned long)pmd); -} - #define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x)) #endif diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index 36f452957cef..7e6a4180db9d 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h @@ -78,9 +78,6 @@ static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; } #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) #endif -struct mm_struct; -extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); - static inline void pud_clear (pud_t *pud) { set_pud(pud, __pud(_PAGE_NEWPAGE)); diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index c2ff76c8981e..a4accb14cbd5 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -201,18 +201,6 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) free_page((unsigned long) pgd); } -#ifdef CONFIG_3_LEVEL_PGTABLES -pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) -{ - pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL); - - if (pmd) - memset(pmd, 0, PAGE_SIZE); - - return pmd; -} -#endif - void *uml_kmalloc(int size, int flags) { return kmalloc(size, flags); diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 29aa7859bdee..25feaa117c40 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -7,7 +7,7 @@ #include #define __HAVE_ARCH_PTE_ALLOC_ONE -#include /* for pte_{alloc,free}_one */ +#include static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } @@ -86,30 +86,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, #define pmd_pgtable(pmd) pmd_page(pmd) #if CONFIG_PGTABLE_LEVELS > 2 -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - struct page *page; - gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; - - if (mm == &init_mm) - gfp &= ~__GFP_ACCOUNT; - page = alloc_pages(gfp, 0); - if (!page) - return NULL; - if (!pgtable_pmd_page_ctor(page)) { - __free_pages(page, 0); - return NULL; - } - return (pmd_t *)page_address(page); -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); - pgtable_pmd_page_dtor(virt_to_page(pmd)); - free_page((unsigned long)pmd); -} - extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 73f7421413cb..1bc027891a00 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -102,6 +102,49 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) __free_page(pte_page); } + +#if CONFIG_PGTABLE_LEVELS > 2 + +#ifndef __HAVE_ARCH_PMD_ALLOC_ONE +/** + * pmd_alloc_one - allocate a page for PMD-level page table + * @mm: the mm_struct of the current context + * + * Allocates a page and runs the pgtable_pmd_page_ctor(). + * Allocations use %GFP_PGTABLE_USER in user context and + * %GFP_PGTABLE_KERNEL in kernel context. + * + * Return: pointer to the allocated memory or %NULL on error + */ +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + struct page *page; + gfp_t gfp = GFP_PGTABLE_USER; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + page = alloc_pages(gfp, 0); + if (!page) + return NULL; + if (!pgtable_pmd_page_ctor(page)) { + __free_pages(page, 0); + return NULL; + } + return (pmd_t *)page_address(page); +} +#endif + +#ifndef __HAVE_ARCH_PMD_FREE +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); + pgtable_pmd_page_dtor(virt_to_page(pmd)); + free_page((unsigned long)pmd); +} +#endif + +#endif /* CONFIG_PGTABLE_LEVELS > 2 */ + #endif /* CONFIG_MMU */ #endif /* __ASM_GENERIC_PGALLOC_H */ -- cgit v1.2.3 From d9e8b929670b4f79e07cdbcb0fb4f162a561d5c6 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Thu, 6 Aug 2020 23:22:44 -0700 Subject: asm-generic: pgalloc: provide generic pud_alloc_one() and pud_free_one() Several architectures define pud_alloc_one() as a wrapper for __get_free_page() and pud_free() as a wrapper for free_page(). Provide a generic implementation in asm-generic/pgalloc.h and use it where appropriate. Signed-off-by: Mike Rapoport Signed-off-by: Andrew Morton Reviewed-by: Pekka Enberg Cc: Abdul Haleem Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Christophe Leroy Cc: Joerg Roedel Cc: Joerg Roedel Cc: Max Filippov Cc: Peter Zijlstra (Intel) Cc: Satheesh Rajendran Cc: Stafford Horne Cc: Stephen Rothwell Cc: Steven Rostedt Cc: Geert Uytterhoeven Cc: Matthew Wilcox Link: http://lkml.kernel.org/r/20200627143453.31835-6-rppt@kernel.org Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/pgalloc.h | 11 ----------- arch/ia64/include/asm/pgalloc.h | 9 --------- arch/mips/include/asm/pgalloc.h | 6 +----- arch/x86/include/asm/pgalloc.h | 15 --------------- include/asm-generic/pgalloc.h | 30 ++++++++++++++++++++++++++++++ 5 files changed, 31 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 7246d0a662e1..0965945b595d 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -37,17 +37,6 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) #if CONFIG_PGTABLE_LEVELS > 3 -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pud_t *)__get_free_page(GFP_PGTABLE_USER); -} - -static inline void pud_free(struct mm_struct *mm, pud_t *pudp) -{ - BUG_ON((unsigned long)pudp & (PAGE_SIZE-1)); - free_page((unsigned long)pudp); -} - static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot) { set_p4d(p4dp, __p4d(__phys_to_p4d_val(pudp) | prot)); diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index 5da1fc76477b..06f80358e20f 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -41,15 +41,6 @@ p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud) p4d_val(*p4d_entry) = __pa(pud); } -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return (pud_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); -} - -static inline void pud_free(struct mm_struct *mm, pud_t *pud) -{ - free_page((unsigned long)pud); -} #define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) #endif /* CONFIG_PGTABLE_LEVELS == 4 */ diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index eed1b3e8c642..e5a840910ce0 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -14,6 +14,7 @@ #include #define __HAVE_ARCH_PMD_ALLOC_ONE +#define __HAVE_ARCH_PUD_ALLOC_ONE #include static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -87,11 +88,6 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) return pud; } -static inline void pud_free(struct mm_struct *mm, pud_t *pud) -{ - free_pages((unsigned long)pud, PUD_ORDER); -} - static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) { set_p4d(p4d, __p4d((unsigned long)pud)); diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 25feaa117c40..3d1085a14347 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -123,21 +123,6 @@ static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pu set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud))); } -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - gfp_t gfp = GFP_KERNEL_ACCOUNT; - - if (mm == &init_mm) - gfp &= ~__GFP_ACCOUNT; - return (pud_t *)get_zeroed_page(gfp); -} - -static inline void pud_free(struct mm_struct *mm, pud_t *pud) -{ - BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - free_page((unsigned long)pud); -} - extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 1bc027891a00..d361574aaadf 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -145,6 +145,36 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) #endif /* CONFIG_PGTABLE_LEVELS > 2 */ +#if CONFIG_PGTABLE_LEVELS > 3 + +#ifndef __HAVE_ARCH_PUD_FREE +/** + * pud_alloc_one - allocate a page for PUD-level page table + * @mm: the mm_struct of the current context + * + * Allocates a page using %GFP_PGTABLE_USER for user context and + * %GFP_PGTABLE_KERNEL for kernel context. + * + * Return: pointer to the allocated memory or %NULL on error + */ +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + gfp_t gfp = GFP_PGTABLE_USER; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + return (pud_t *)get_zeroed_page(gfp); +} +#endif + +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); + free_page((unsigned long)pud); +} + +#endif /* CONFIG_PGTABLE_LEVELS > 3 */ + #endif /* CONFIG_MMU */ #endif /* __ASM_GENERIC_PGALLOC_H */ -- cgit v1.2.3 From f9cb654cb550b7b87e8608b14fc3eca432429ffe Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Thu, 6 Aug 2020 23:22:47 -0700 Subject: asm-generic: pgalloc: provide generic pgd_free() Most architectures define pgd_free() as a wrapper for free_page(). Provide a generic version in asm-generic/pgalloc.h and enable its use for most architectures. Signed-off-by: Mike Rapoport Signed-off-by: Andrew Morton Reviewed-by: Pekka Enberg Acked-by: Geert Uytterhoeven [m68k] Cc: Abdul Haleem Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Christophe Leroy Cc: Joerg Roedel Cc: Joerg Roedel Cc: Max Filippov Cc: Peter Zijlstra (Intel) Cc: Satheesh Rajendran Cc: Stafford Horne Cc: Stephen Rothwell Cc: Steven Rostedt Cc: Matthew Wilcox Link: http://lkml.kernel.org/r/20200627143453.31835-7-rppt@kernel.org Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/pgalloc.h | 6 ------ arch/arm/include/asm/pgalloc.h | 1 + arch/arm64/include/asm/pgalloc.h | 1 + arch/csky/include/asm/pgalloc.h | 7 +------ arch/hexagon/include/asm/pgalloc.h | 7 +------ arch/ia64/include/asm/pgalloc.h | 5 ----- arch/m68k/include/asm/sun3_pgalloc.h | 7 +------ arch/microblaze/include/asm/pgalloc.h | 6 ------ arch/mips/include/asm/pgalloc.h | 5 ----- arch/nds32/mm/mm-nds32.c | 2 ++ arch/nios2/include/asm/pgalloc.h | 7 +------ arch/parisc/include/asm/pgalloc.h | 1 + arch/riscv/include/asm/pgalloc.h | 5 ----- arch/sh/include/asm/pgalloc.h | 1 + arch/um/include/asm/pgalloc.h | 1 - arch/um/kernel/mem.c | 5 ----- arch/x86/include/asm/pgalloc.h | 1 + arch/xtensa/include/asm/pgalloc.h | 5 ----- include/asm-generic/pgalloc.h | 7 +++++++ 19 files changed, 18 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h index 4834cd52e9d0..9c6a24fe493d 100644 --- a/arch/alpha/include/asm/pgalloc.h +++ b/arch/alpha/include/asm/pgalloc.h @@ -34,10 +34,4 @@ pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern pgd_t *pgd_alloc(struct mm_struct *mm); -static inline void -pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - #endif /* _ALPHA_PGALLOC_H */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index c5bdfd404ea5..15f4674715f8 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -65,6 +65,7 @@ static inline void clean_pte_table(pte_t *pte) #define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL #define __HAVE_ARCH_PTE_ALLOC_ONE +#define __HAVE_ARCH_PGD_FREE #include static inline pte_t * diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 0965945b595d..3c6a7f5988b1 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -13,6 +13,7 @@ #include #include +#define __HAVE_ARCH_PGD_FREE #include #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h index c7c1ed27e348..d58d8146b729 100644 --- a/arch/csky/include/asm/pgalloc.h +++ b/arch/csky/include/asm/pgalloc.h @@ -9,7 +9,7 @@ #include #define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL -#include /* for pte_{alloc,free}_one */ +#include static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) @@ -42,11 +42,6 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) return pte; } -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_pages((unsigned long)pgd, PGD_ORDER); -} - static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret; diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h index cc9be514a676..f0c47e6a7427 100644 --- a/arch/hexagon/include/asm/pgalloc.h +++ b/arch/hexagon/include/asm/pgalloc.h @@ -11,7 +11,7 @@ #include #include -#include /* for pte_{alloc,free}_one */ +#include extern unsigned long long kmap_generation; @@ -41,11 +41,6 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) return pgd; } -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long) pgd); -} - static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) { diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index 06f80358e20f..9601cfe83c94 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -29,11 +29,6 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); } -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - #if CONFIG_PGTABLE_LEVELS == 4 static inline void p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud) diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index 11b95dadf7c0..000f64869b91 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h @@ -13,7 +13,7 @@ #include -#include /* for pte_{alloc,free}_one */ +#include extern const char bad_pmd_string[]; @@ -40,11 +40,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page */ #define pmd_free(mm, x) do { } while (0) -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long) pgd); -} - static inline pgd_t * pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd; diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index ebb6b7939bb8..8839ce00ea05 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h @@ -28,12 +28,6 @@ static inline pgd_t *get_pgd(void) return (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0); } -static inline void free_pgd(pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - -#define pgd_free(mm, pgd) free_pgd(pgd) #define pgd_alloc(mm) get_pgd() #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index e5a840910ce0..8b18424b3120 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -49,11 +49,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern void pgd_init(unsigned long page); extern pgd_t *pgd_alloc(struct mm_struct *mm); -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_pages((unsigned long)pgd, PGD_ORDER); -} - #define __pte_free_tlb(tlb,pte,address) \ do { \ pgtable_pte_page_dtor(pte); \ diff --git a/arch/nds32/mm/mm-nds32.c b/arch/nds32/mm/mm-nds32.c index 8503bee882d1..55bec50ccc03 100644 --- a/arch/nds32/mm/mm-nds32.c +++ b/arch/nds32/mm/mm-nds32.c @@ -2,6 +2,8 @@ // Copyright (C) 2005-2017 Andes Technology Corporation #include + +#define __HAVE_ARCH_PGD_FREE #include #define FIRST_KERNEL_PGD_NR (USER_PTRS_PER_PGD) diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h index 0b146d773c85..e6600d2a5ae0 100644 --- a/arch/nios2/include/asm/pgalloc.h +++ b/arch/nios2/include/asm/pgalloc.h @@ -12,7 +12,7 @@ #include -#include /* for pte_{alloc,free}_one */ +#include static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) @@ -34,11 +34,6 @@ extern void pmd_init(unsigned long page, unsigned long pagetable); extern pgd_t *pgd_alloc(struct mm_struct *mm); -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_pages((unsigned long)pgd, PGD_ORDER); -} - #define __pte_free_tlb(tlb, pte, addr) \ do { \ pgtable_pte_page_dtor(pte); \ diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index 689766b914ed..cc7ecc2ef55d 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -11,6 +11,7 @@ #include #define __HAVE_ARCH_PMD_FREE +#define __HAVE_ARCH_PGD_FREE #include /* Allocate the top level pgd (page directory) diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index 8d3135f05b8e..23b1544e0ca5 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -55,11 +55,6 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) return pgd; } -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - #ifndef __PAGETABLE_PMD_FOLDED #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 811afb567101..0e6b0be25e33 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -6,6 +6,7 @@ #define __HAVE_ARCH_PMD_ALLOC_ONE #define __HAVE_ARCH_PMD_FREE +#define __HAVE_ARCH_PGD_FREE #include extern pgd_t *pgd_alloc(struct mm_struct *); diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h index bdde433dbdec..5393e13e07e0 100644 --- a/arch/um/include/asm/pgalloc.h +++ b/arch/um/include/asm/pgalloc.h @@ -25,7 +25,6 @@ * Allocate and free page tables. */ extern pgd_t *pgd_alloc(struct mm_struct *); -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); #define __pte_free_tlb(tlb,pte, address) \ do { \ diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index a4accb14cbd5..9242dc91d751 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -196,11 +196,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) return pgd; } -void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long) pgd); -} - void *uml_kmalloc(int size, int flags) { return kmalloc(size, flags); diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 3d1085a14347..62ad61d6fefc 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -7,6 +7,7 @@ #include #define __HAVE_ARCH_PTE_ALLOC_ONE +#define __HAVE_ARCH_PGD_FREE #include static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index 5553abed6933..d3a22da4d2c9 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h @@ -33,11 +33,6 @@ pgd_alloc(struct mm_struct *mm) return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER); } -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - static inline void ptes_clear(pte_t *ptep) { int i; diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index d361574aaadf..6f44810921aa 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -175,6 +175,13 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) #endif /* CONFIG_PGTABLE_LEVELS > 3 */ +#ifndef __HAVE_ARCH_PGD_FREE +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_page((unsigned long)pgd); +} +#endif + #endif /* CONFIG_MMU */ #endif /* __ASM_GENERIC_PGALLOC_H */ -- cgit v1.2.3 From 2a681cfa5bb41e78e7bfafbb748b581374ce9b1d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 6 Aug 2020 23:22:55 -0700 Subject: mm: move p?d_alloc_track to separate header file The functions are only used in two source files, so there is no need for them to be in the global header. Move them to the new header and include it only where needed. Signed-off-by: Joerg Roedel Signed-off-by: Andrew Morton Reviewed-by: Pekka Enberg Cc: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Abdul Haleem Cc: Satheesh Rajendran Cc: Stephen Rothwell Cc: Steven Rostedt (VMware) Cc: Mike Rapoport Cc: Christophe Leroy Cc: Arnd Bergmann Cc: Max Filippov Cc: Stafford Horne Cc: Geert Uytterhoeven Cc: Matthew Wilcox Link: http://lkml.kernel.org/r/20200609120533.25867-1-joro@8bytes.org Signed-off-by: Linus Torvalds --- include/linux/mm.h | 45 --------------------------------------------- mm/ioremap.c | 2 ++ mm/pgalloc-track.h | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ mm/vmalloc.c | 1 + 4 files changed, 54 insertions(+), 45 deletions(-) create mode 100644 mm/pgalloc-track.h (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 303a47a9769d..2830f1c0fdc3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2103,51 +2103,11 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, NULL : pud_offset(p4d, address); } -static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, - unsigned long address, - pgtbl_mod_mask *mod_mask) - -{ - if (unlikely(pgd_none(*pgd))) { - if (__p4d_alloc(mm, pgd, address)) - return NULL; - *mod_mask |= PGTBL_PGD_MODIFIED; - } - - return p4d_offset(pgd, address); -} - -static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d, - unsigned long address, - pgtbl_mod_mask *mod_mask) -{ - if (unlikely(p4d_none(*p4d))) { - if (__pud_alloc(mm, p4d, address)) - return NULL; - *mod_mask |= PGTBL_P4D_MODIFIED; - } - - return pud_offset(p4d, address); -} - static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? NULL: pmd_offset(pud, address); } - -static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, - unsigned long address, - pgtbl_mod_mask *mod_mask) -{ - if (unlikely(pud_none(*pud))) { - if (__pmd_alloc(mm, pud, address)) - return NULL; - *mod_mask |= PGTBL_PUD_MODIFIED; - } - - return pmd_offset(pud, address); -} #endif /* CONFIG_MMU */ #if USE_SPLIT_PTE_PTLOCKS @@ -2263,11 +2223,6 @@ static inline void pgtable_pte_page_dtor(struct page *page) ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ NULL: pte_offset_kernel(pmd, address)) -#define pte_alloc_kernel_track(pmd, address, mask) \ - ((unlikely(pmd_none(*(pmd))) && \ - (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\ - NULL: pte_offset_kernel(pmd, address)) - #if USE_SPLIT_PMD_PTLOCKS static struct page *pmd_to_page(pmd_t *pmd) diff --git a/mm/ioremap.c b/mm/ioremap.c index 5ee3526f71b8..5fa1ab41d152 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -13,6 +13,8 @@ #include #include +#include "pgalloc-track.h" + #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP static int __read_mostly ioremap_p4d_capable; static int __read_mostly ioremap_pud_capable; diff --git a/mm/pgalloc-track.h b/mm/pgalloc-track.h new file mode 100644 index 000000000000..1dcc865029a2 --- /dev/null +++ b/mm/pgalloc-track.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PGALLLC_TRACK_H +#define _LINUX_PGALLLC_TRACK_H + +#if defined(CONFIG_MMU) +static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, + unsigned long address, + pgtbl_mod_mask *mod_mask) +{ + if (unlikely(pgd_none(*pgd))) { + if (__p4d_alloc(mm, pgd, address)) + return NULL; + *mod_mask |= PGTBL_PGD_MODIFIED; + } + + return p4d_offset(pgd, address); +} + +static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d, + unsigned long address, + pgtbl_mod_mask *mod_mask) +{ + if (unlikely(p4d_none(*p4d))) { + if (__pud_alloc(mm, p4d, address)) + return NULL; + *mod_mask |= PGTBL_P4D_MODIFIED; + } + + return pud_offset(p4d, address); +} + +static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, + unsigned long address, + pgtbl_mod_mask *mod_mask) +{ + if (unlikely(pud_none(*pud))) { + if (__pmd_alloc(mm, pud, address)) + return NULL; + *mod_mask |= PGTBL_PUD_MODIFIED; + } + + return pmd_offset(pud, address); +} +#endif /* CONFIG_MMU */ + +#define pte_alloc_kernel_track(pmd, address, mask) \ + ((unlikely(pmd_none(*(pmd))) && \ + (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\ + NULL: pte_offset_kernel(pmd, address)) + +#endif /* _LINUX_PGALLLC_TRACK_H */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 5a2b55c8dd9a..5be3cf3b59de 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -41,6 +41,7 @@ #include #include "internal.h" +#include "pgalloc-track.h" bool is_vmalloc_addr(const void *x) { -- cgit v1.2.3 From 0a4954a850b0c4d0a5d18b1a55d6e5a653e362b5 Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Thu, 6 Aug 2020 23:23:11 -0700 Subject: percpu_counter: add percpu_counter_sync() percpu_counter's accuracy is related to its batch size. For a percpu_counter with a big batch, its deviation could be big, so when the counter's batch is runtime changed to a smaller value for better accuracy, there could also be requirment to reduce the big deviation. So add a percpu-counter sync function to be run on each CPU. Reported-by: kernel test robot Signed-off-by: Feng Tang Signed-off-by: Andrew Morton Cc: Dennis Zhou Cc: Tejun Heo Cc: Christoph Lameter Cc: Michal Hocko Cc: Qian Cai Cc: Andi Kleen Cc: Huang Ying Cc: Dave Hansen Cc: Haiyang Zhang Cc: Johannes Weiner Cc: Kees Cook Cc: "K. Y. Srinivasan" Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Tim Chen Link: http://lkml.kernel.org/r/1594389708-60781-4-git-send-email-feng.tang@intel.com Signed-off-by: Linus Torvalds --- include/linux/percpu_counter.h | 4 ++++ lib/percpu_counter.c | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+) (limited to 'include') diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 0a4f54dd4737..01861eebed79 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -44,6 +44,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); +void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { @@ -172,6 +173,9 @@ static inline bool percpu_counter_initialized(struct percpu_counter *fbc) return true; } +static inline void percpu_counter_sync(struct percpu_counter *fbc) +{ +} #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index a66595ba5543..a2345de90e93 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -98,6 +98,25 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) } EXPORT_SYMBOL(percpu_counter_add_batch); +/* + * For percpu_counter with a big batch, the devication of its count could + * be big, and there is requirement to reduce the deviation, like when the + * counter's batch could be runtime decreased to get a better accuracy, + * which can be achieved by running this sync function on each CPU. + */ +void percpu_counter_sync(struct percpu_counter *fbc) +{ + unsigned long flags; + s64 count; + + raw_spin_lock_irqsave(&fbc->lock, flags); + count = __this_cpu_read(*fbc->counters); + fbc->count += count; + __this_cpu_sub(*fbc->counters, count); + raw_spin_unlock_irqrestore(&fbc->lock, flags); +} +EXPORT_SYMBOL(percpu_counter_sync); + /* * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() -- cgit v1.2.3 From 56f3547bfa4d361148aa748ccb86073bc57f5e6c Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Thu, 6 Aug 2020 23:23:15 -0700 Subject: mm: adjust vm_committed_as_batch according to vm overcommit policy When checking a performance change for will-it-scale scalability mmap test [1], we found very high lock contention for spinlock of percpu counter 'vm_committed_as': 94.14% 0.35% [kernel.kallsyms] [k] _raw_spin_lock_irqsave 48.21% _raw_spin_lock_irqsave;percpu_counter_add_batch;__vm_enough_memory;mmap_region;do_mmap; 45.91% _raw_spin_lock_irqsave;percpu_counter_add_batch;__do_munmap; Actually this heavy lock contention is not always necessary. The 'vm_committed_as' needs to be very precise when the strict OVERCOMMIT_NEVER policy is set, which requires a rather small batch number for the percpu counter. So keep 'batch' number unchanged for strict OVERCOMMIT_NEVER policy, and lift it to 64X for OVERCOMMIT_ALWAYS and OVERCOMMIT_GUESS policies. Also add a sysctl handler to adjust it when the policy is reconfigured. Benchmark with the same testcase in [1] shows 53% improvement on a 8C/16T desktop, and 2097%(20X) on a 4S/72C/144T server. We tested with test platforms in 0day (server, desktop and laptop), and 80%+ platforms shows improvements with that test. And whether it shows improvements depends on if the test mmap size is bigger than the batch number computed. And if the lift is 16X, 1/3 of the platforms will show improvements, though it should help the mmap/unmap usage generally, as Michal Hocko mentioned: : I believe that there are non-synthetic worklaods which would benefit from : a larger batch. E.g. large in memory databases which do large mmaps : during startups from multiple threads. [1] https://lore.kernel.org/lkml/20200305062138.GI5972@shao2-debian/ Signed-off-by: Feng Tang Signed-off-by: Andrew Morton Acked-by: Michal Hocko Cc: Matthew Wilcox (Oracle) Cc: Johannes Weiner Cc: Mel Gorman Cc: Qian Cai Cc: Kees Cook Cc: Andi Kleen Cc: Tim Chen Cc: Dave Hansen Cc: Huang Ying Cc: Christoph Lameter Cc: Dennis Zhou Cc: Haiyang Zhang Cc: kernel test robot Cc: "K. Y. Srinivasan" Cc: Tejun Heo Link: http://lkml.kernel.org/r/1589611660-89854-4-git-send-email-feng.tang@intel.com Link: http://lkml.kernel.org/r/1592725000-73486-4-git-send-email-feng.tang@intel.com Link: http://lkml.kernel.org/r/1594389708-60781-5-git-send-email-feng.tang@intel.com Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ include/linux/mman.h | 4 ++++ kernel/sysctl.c | 2 +- mm/mm_init.c | 22 ++++++++++++++++------ mm/util.c | 41 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 64 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2830f1c0fdc3..1c3470550395 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -206,6 +206,8 @@ int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, loff_t *); +int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) diff --git a/include/linux/mman.h b/include/linux/mman.h index 4b08e9c9c538..6f34c33075f9 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -57,8 +57,12 @@ extern struct percpu_counter vm_committed_as; #ifdef CONFIG_SMP extern s32 vm_committed_as_batch; +extern void mm_compute_batch(int overcommit_policy); #else #define vm_committed_as_batch 0 +static inline void mm_compute_batch(int overcommit_policy) +{ +} #endif unsigned long vm_memory_committed(void); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 1b4d2dc270a5..f785de3caac0 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2671,7 +2671,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_overcommit_memory, .maxlen = sizeof(sysctl_overcommit_memory), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = overcommit_policy_handler, .extra1 = SYSCTL_ZERO, .extra2 = &two, }, diff --git a/mm/mm_init.c b/mm/mm_init.c index 435e5f794b3b..b06a30fbedff 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "internal.h" #ifdef CONFIG_DEBUG_MEMORY_INIT @@ -144,14 +145,23 @@ EXPORT_SYMBOL_GPL(mm_kobj); #ifdef CONFIG_SMP s32 vm_committed_as_batch = 32; -static void __meminit mm_compute_batch(void) +void mm_compute_batch(int overcommit_policy) { u64 memsized_batch; s32 nr = num_present_cpus(); s32 batch = max_t(s32, nr*2, 32); - - /* batch size set to 0.4% of (total memory/#cpus), or max int32 */ - memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff); + unsigned long ram_pages = totalram_pages(); + + /* + * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of + * (total memory/#cpus), and lift it to 25% for other policies + * to easy the possible lock contention for percpu_counter + * vm_committed_as, while the max limit is INT_MAX + */ + if (overcommit_policy == OVERCOMMIT_NEVER) + memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); + else + memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); vm_committed_as_batch = max_t(s32, memsized_batch, batch); } @@ -162,7 +172,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self, switch (action) { case MEM_ONLINE: case MEM_OFFLINE: - mm_compute_batch(); + mm_compute_batch(sysctl_overcommit_memory); default: break; } @@ -176,7 +186,7 @@ static struct notifier_block compute_batch_nb __meminitdata = { static int __init mm_compute_batch_init(void) { - mm_compute_batch(); + mm_compute_batch(sysctl_overcommit_memory); register_hotmemory_notifier(&compute_batch_nb); return 0; diff --git a/mm/util.c b/mm/util.c index 1c9d097d7e48..8d6280c05238 100644 --- a/mm/util.c +++ b/mm/util.c @@ -746,6 +746,47 @@ int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, return ret; } +static void sync_overcommit_as(struct work_struct *dummy) +{ + percpu_counter_sync(&vm_committed_as); +} + +int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos) +{ + struct ctl_table t; + int new_policy; + int ret; + + /* + * The deviation of sync_overcommit_as could be big with loose policy + * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to + * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply + * with the strict "NEVER", and to avoid possible race condtion (even + * though user usually won't too frequently do the switching to policy + * OVERCOMMIT_NEVER), the switch is done in the following order: + * 1. changing the batch + * 2. sync percpu count on each CPU + * 3. switch the policy + */ + if (write) { + t = *table; + t.data = &new_policy; + ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); + if (ret) + return ret; + + mm_compute_batch(new_policy); + if (new_policy == OVERCOMMIT_NEVER) + schedule_on_each_cpu(sync_overcommit_as); + sysctl_overcommit_memory = new_policy; + } else { + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + } + + return ret; +} + int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { -- cgit v1.2.3 From 1d9cfee7535c213038a615f112c900c2d0ba8f54 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 6 Aug 2020 23:23:19 -0700 Subject: mm/sparsemem: enable vmem_altmap support in vmemmap_populate_basepages() Patch series "arm64: Enable vmemmap mapping from device memory", v4. This series enables vmemmap backing memory allocation from device memory ranges on arm64. But before that, it enables vmemmap_populate_basepages() and vmemmap_alloc_block_buf() to accommodate struct vmem_altmap based alocation requests. This patch (of 3): vmemmap_populate_basepages() is used across platforms to allocate backing memory for vmemmap mapping. This is used as a standard default choice or as a fallback when intended huge pages allocation fails. This just creates entire vmemmap mapping with base pages (PAGE_SIZE). On arm64 platforms, vmemmap_populate_basepages() is called instead of the platform specific vmemmap_populate() when ARM64_SWAPPER_USES_SECTION_MAPS is not enabled as in case for ARM64_16K_PAGES and ARM64_64K_PAGES configs. At present vmemmap_populate_basepages() does not support allocating from driver defined struct vmem_altmap while trying to create vmemmap mapping for a device memory range. It prevents ARM64_16K_PAGES and ARM64_64K_PAGES configs on arm64 from supporting device memory with vmemap_altmap request. This enables vmem_altmap support in vmemmap_populate_basepages() unlocking device memory allocation for vmemap mapping on arm64 platforms with 16K or 64K base page configs. Each architecture should evaluate and decide on subscribing device memory based base page allocation through vmemmap_populate_basepages(). Hence lets keep it disabled on all archs in order to preserve the existing semantics. A subsequent patch enables it on arm64. Signed-off-by: Anshuman Khandual Signed-off-by: Andrew Morton Tested-by: Jia He Reviewed-by: David Hildenbrand Acked-by: Will Deacon Acked-by: Catalin Marinas Cc: Mark Rutland Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Tony Luck Cc: Fenghua Yu Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Mike Rapoport Cc: Michal Hocko Cc: "Matthew Wilcox (Oracle)" Cc: "Kirill A. Shutemov" Cc: Dan Williams Cc: Pavel Tatashin Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Hsin-Yi Wang Cc: Jonathan Corbet Cc: Michael Ellerman Cc: Paul Mackerras Cc: Robin Murphy Cc: Steve Capper Cc: Yu Zhao Link: http://lkml.kernel.org/r/1594004178-8861-1-git-send-email-anshuman.khandual@arm.com Link: http://lkml.kernel.org/r/1594004178-8861-2-git-send-email-anshuman.khandual@arm.com Signed-off-by: Linus Torvalds --- arch/arm64/mm/mmu.c | 2 +- arch/ia64/mm/discontig.c | 2 +- arch/riscv/mm/init.c | 2 +- arch/x86/mm/init_64.c | 6 +++--- include/linux/mm.h | 5 +++-- mm/sparse-vmemmap.c | 16 +++++++++++----- 6 files changed, 20 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index cafefb147a5e..a32ddd021fe9 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1070,7 +1070,7 @@ static void free_empty_tables(unsigned long addr, unsigned long end, int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_basepages(start, end, node); + return vmemmap_populate_basepages(start, end, node, NULL); } #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 37f8b6875ac9..2ba2127335a7 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -655,7 +655,7 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_basepages(start, end, node); + return vmemmap_populate_basepages(start, end, node, NULL); } void vmemmap_free(unsigned long start, unsigned long end, diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 79e9d55bdf1a..416e520d07d3 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -554,6 +554,6 @@ void __init paging_init(void) int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_basepages(start, end, node); + return vmemmap_populate_basepages(start, end, node, NULL); } #endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 3f4e29a78f2b..1acc5627b21c 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1545,7 +1545,7 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, vmemmap_verify((pte_t *)pmd, node, addr, next); continue; } - if (vmemmap_populate_basepages(addr, next, node)) + if (vmemmap_populate_basepages(addr, next, node, NULL)) return -ENOMEM; } return 0; @@ -1557,7 +1557,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, int err; if (end - start < PAGES_PER_SECTION * sizeof(struct page)) - err = vmemmap_populate_basepages(start, end, node); + err = vmemmap_populate_basepages(start, end, node, NULL); else if (boot_cpu_has(X86_FEATURE_PSE)) err = vmemmap_populate_hugepages(start, end, node, altmap); else if (altmap) { @@ -1565,7 +1565,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, __func__); err = -ENOMEM; } else - err = vmemmap_populate_basepages(start, end, node); + err = vmemmap_populate_basepages(start, end, node, NULL); if (!err) sync_global_pgds(start, end - 1); return err; diff --git a/include/linux/mm.h b/include/linux/mm.h index 1c3470550395..a7ff98738126 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2978,14 +2978,15 @@ pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); -pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); +pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, + struct vmem_altmap *altmap); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; void *vmemmap_alloc_block_buf(unsigned long size, int node); void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, - int node); + int node, struct vmem_altmap *altmap); int vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); void vmemmap_populate_print_last(void); diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 0db7738d76e9..ceed10dec31e 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -139,12 +139,18 @@ void __meminit vmemmap_verify(pte_t *pte, int node, start, end - 1); } -pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) +pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, + struct vmem_altmap *altmap) { pte_t *pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) { pte_t entry; - void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); + void *p; + + if (altmap) + p = altmap_alloc_block_buf(PAGE_SIZE, altmap); + else + p = vmemmap_alloc_block_buf(PAGE_SIZE, node); if (!p) return NULL; entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); @@ -212,8 +218,8 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) return pgd; } -int __meminit vmemmap_populate_basepages(unsigned long start, - unsigned long end, int node) +int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, + int node, struct vmem_altmap *altmap) { unsigned long addr = start; pgd_t *pgd; @@ -235,7 +241,7 @@ int __meminit vmemmap_populate_basepages(unsigned long start, pmd = vmemmap_pmd_populate(pud, addr, node); if (!pmd) return -ENOMEM; - pte = vmemmap_pte_populate(pmd, addr, node); + pte = vmemmap_pte_populate(pmd, addr, node, altmap); if (!pte) return -ENOMEM; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); -- cgit v1.2.3 From 56993b4e147e9f2ba91ac15ef9ae5ee0626a6850 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 6 Aug 2020 23:23:24 -0700 Subject: mm/sparsemem: enable vmem_altmap support in vmemmap_alloc_block_buf() There are many instances where vmemap allocation is often switched between regular memory and device memory just based on whether altmap is available or not. vmemmap_alloc_block_buf() is used in various platforms to allocate vmemmap mappings. Lets also enable it to handle altmap based device memory allocation along with existing regular memory allocations. This will help in avoiding the altmap based allocation switch in many places. To summarize there are two different methods to call vmemmap_alloc_block_buf(). vmemmap_alloc_block_buf(size, node, NULL) /* Allocate from system RAM */ vmemmap_alloc_block_buf(size, node, altmap) /* Allocate from altmap */ This converts altmap_alloc_block_buf() into a static function, drops it's entry from the header and updates Documentation/vm/memory-model.rst. Suggested-by: Robin Murphy Signed-off-by: Anshuman Khandual Signed-off-by: Andrew Morton Tested-by: Jia He Reviewed-by: Catalin Marinas Cc: Jonathan Corbet Cc: Will Deacon Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Dan Williams Cc: David Hildenbrand Cc: Fenghua Yu Cc: Hsin-Yi Wang Cc: "Kirill A. Shutemov" Cc: Mark Rutland Cc: "Matthew Wilcox (Oracle)" Cc: Michal Hocko Cc: Mike Rapoport Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Pavel Tatashin Cc: Steve Capper Cc: Tony Luck Cc: Yu Zhao Link: http://lkml.kernel.org/r/1594004178-8861-3-git-send-email-anshuman.khandual@arm.com Signed-off-by: Linus Torvalds --- Documentation/vm/memory-model.rst | 2 +- arch/arm64/mm/mmu.c | 2 +- arch/powerpc/mm/init_64.c | 4 ++-- arch/x86/mm/init_64.c | 5 +---- include/linux/mm.h | 4 ++-- mm/sparse-vmemmap.c | 28 +++++++++++++--------------- 6 files changed, 20 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/Documentation/vm/memory-model.rst b/Documentation/vm/memory-model.rst index cc65bc85d260..2b898a27b346 100644 --- a/Documentation/vm/memory-model.rst +++ b/Documentation/vm/memory-model.rst @@ -178,7 +178,7 @@ for persistent memory devices in pre-allocated storage on those devices. This storage is represented with :c:type:`struct vmem_altmap` that is eventually passed to vmemmap_populate() through a long chain of function calls. The vmemmap_populate() implementation may use the -`vmem_altmap` along with :c:func:`altmap_alloc_block_buf` helper to +`vmem_altmap` along with :c:func:`vmemmap_alloc_block_buf` helper to allocate memory map on the persistent memory device. ZONE_DEVICE diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a32ddd021fe9..f3d9ff323c8f 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1102,7 +1102,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, if (pmd_none(READ_ONCE(*pmdp))) { void *p = NULL; - p = vmemmap_alloc_block_buf(PMD_SIZE, node); + p = vmemmap_alloc_block_buf(PMD_SIZE, node, NULL); if (!p) return -ENOMEM; diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index bc73abf0bc25..3fd504d72c5e 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -225,12 +225,12 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, * fall back to system memory if the altmap allocation fail. */ if (altmap && !altmap_cross_boundary(altmap, start, page_size)) { - p = altmap_alloc_block_buf(page_size, altmap); + p = vmemmap_alloc_block_buf(page_size, node, altmap); if (!p) pr_debug("altmap block allocation failed, falling back to system memory"); } if (!p) - p = vmemmap_alloc_block_buf(page_size, node); + p = vmemmap_alloc_block_buf(page_size, node, NULL); if (!p) return -ENOMEM; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 1acc5627b21c..53e1d4f4ed9d 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1515,10 +1515,7 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, if (pmd_none(*pmd)) { void *p; - if (altmap) - p = altmap_alloc_block_buf(PMD_SIZE, altmap); - else - p = vmemmap_alloc_block_buf(PMD_SIZE, node); + p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); if (p) { pte_t entry; diff --git a/include/linux/mm.h b/include/linux/mm.h index a7ff98738126..98e35df8d88e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2982,8 +2982,8 @@ pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, struct vmem_altmap *altmap); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; -void *vmemmap_alloc_block_buf(unsigned long size, int node); -void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); +void *vmemmap_alloc_block_buf(unsigned long size, int node, + struct vmem_altmap *altmap); void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index ceed10dec31e..41eeac67723b 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -69,11 +69,19 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) __pa(MAX_DMA_ADDRESS)); } +static void * __meminit altmap_alloc_block_buf(unsigned long size, + struct vmem_altmap *altmap); + /* need to make sure size is all the same during early stage */ -void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) +void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, + struct vmem_altmap *altmap) { - void *ptr = sparse_buffer_alloc(size); + void *ptr; + + if (altmap) + return altmap_alloc_block_buf(size, altmap); + ptr = sparse_buffer_alloc(size); if (!ptr) ptr = vmemmap_alloc_block(size, node); return ptr; @@ -94,15 +102,8 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) return 0; } -/** - * altmap_alloc_block_buf - allocate pages from the device page map - * @altmap: device page map - * @size: size (in bytes) of the allocation - * - * Allocations are aligned to the size of the request. - */ -void * __meminit altmap_alloc_block_buf(unsigned long size, - struct vmem_altmap *altmap) +static void * __meminit altmap_alloc_block_buf(unsigned long size, + struct vmem_altmap *altmap) { unsigned long pfn, nr_pfns, nr_align; @@ -147,10 +148,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, pte_t entry; void *p; - if (altmap) - p = altmap_alloc_block_buf(PAGE_SIZE, altmap); - else - p = vmemmap_alloc_block_buf(PAGE_SIZE, node); + p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); if (!p) return NULL; entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); -- cgit v1.2.3 From 45e55300f11495ed58c53427da7f0d958800a30f Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Thu, 6 Aug 2020 23:23:37 -0700 Subject: mm: remove unnecessary wrapper function do_mmap_pgoff() The current split between do_mmap() and do_mmap_pgoff() was introduced in commit 1fcfd8db7f82 ("mm, mpx: add "vm_flags_t vm_flags" arg to do_mmap_pgoff()") to support MPX. The wrapper function do_mmap_pgoff() always passed 0 as the value of the vm_flags argument to do_mmap(). However, MPX support has subsequently been removed from the kernel and there were no more direct callers of do_mmap(); all calls were going via do_mmap_pgoff(). Simplify the code by removing do_mmap_pgoff() and changing all callers to directly call do_mmap(), which now no longer takes a vm_flags argument. Signed-off-by: Peter Collingbourne Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Reviewed-by: David Hildenbrand Link: http://lkml.kernel.org/r/20200727194109.1371462-1-pcc@google.com Signed-off-by: Linus Torvalds --- fs/aio.c | 6 +++--- fs/hugetlbfs/inode.c | 2 +- include/linux/fs.h | 2 +- include/linux/mm.h | 12 +----------- ipc/shm.c | 2 +- mm/mmap.c | 16 ++++++++-------- mm/nommu.c | 6 +++--- mm/shmem.c | 2 +- mm/util.c | 4 ++-- 9 files changed, 21 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/fs/aio.c b/fs/aio.c index 91e7cc4a9f17..5736bff48e9e 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -525,9 +525,9 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) return -EINTR; } - ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, - PROT_READ | PROT_WRITE, - MAP_SHARED, 0, &unused, NULL); + ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, + PROT_READ | PROT_WRITE, + MAP_SHARED, 0, &unused, NULL); mmap_write_unlock(mm); if (IS_ERR((void *)ctx->mmap_base)) { ctx->mmap_size = 0; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ef5313f9c78f..523954d00dff 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -140,7 +140,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) * already been checked by prepare_hugepage_range. If you add * any error returns here, do so after setting VM_HUGETLB, so * is_vm_hugetlb_page tests below unmap_region go the right - * way when do_mmap_pgoff unwinds (may be important on powerpc + * way when do_mmap unwinds (may be important on powerpc * and ia64). */ vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; diff --git a/include/linux/fs.h b/include/linux/fs.h index b1c3a14f12e8..f5da33bcaaae 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -528,7 +528,7 @@ static inline int mapping_mapped(struct address_space *mapping) /* * Might pages of this file have been modified in userspace? - * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff + * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. * diff --git a/include/linux/mm.h b/include/linux/mm.h index 98e35df8d88e..cf7e4605ff3f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2546,23 +2546,13 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr, struct list_head *uf); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, - struct list_head *uf); + unsigned long pgoff, unsigned long *populate, struct list_head *uf); extern int __do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf, bool downgrade); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); extern int do_madvise(unsigned long start, size_t len_in, int behavior); -static inline unsigned long -do_mmap_pgoff(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, unsigned long flags, - unsigned long pgoff, unsigned long *populate, - struct list_head *uf) -{ - return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); -} - #ifdef CONFIG_MMU extern int __mm_populate(unsigned long addr, unsigned long len, int ignore_errors); diff --git a/ipc/shm.c b/ipc/shm.c index 0a6dd94afa21..bf38d7e2fbe9 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -1558,7 +1558,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, goto invalid; } - addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL); + addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL); *raddr = addr; err = 0; if (IS_ERR_VALUE(addr)) diff --git a/mm/mmap.c b/mm/mmap.c index ba39409cdb17..40248d84ad5f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1030,7 +1030,7 @@ static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We don't check here for the merged mmap wrapping around the end of pagecache - * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which + * indices (16TB on ia32) because do_mmap() does not permit mmap's which * wrap, nor mmaps which cover the final page at index -1UL. */ static int @@ -1365,11 +1365,11 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, - unsigned long flags, vm_flags_t vm_flags, - unsigned long pgoff, unsigned long *populate, - struct list_head *uf) + unsigned long flags, unsigned long pgoff, + unsigned long *populate, struct list_head *uf) { struct mm_struct *mm = current->mm; + vm_flags_t vm_flags; int pkey = 0; *populate = 0; @@ -1431,7 +1431,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ - vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | + vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (flags & MAP_LOCKED) @@ -2230,7 +2230,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, /* * mmap_region() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge. - * do_mmap_pgoff() will clear pgoff, so match alignment. + * do_mmap() will clear pgoff, so match alignment. */ pgoff = 0; get_area = shmem_get_unmapped_area; @@ -3003,7 +3003,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, } file = get_file(vma->vm_file); - ret = do_mmap_pgoff(vma->vm_file, start, size, + ret = do_mmap(vma->vm_file, start, size, prot, flags, pgoff, &populate, NULL); fput(file); out: @@ -3223,7 +3223,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) * By setting it to reflect the virtual start address of the * vma, merges and splits can happen in a seamless way, just * using the existing file pgoff checks and manipulations. - * Similarly in do_mmap_pgoff and in do_brk. + * Similarly in do_mmap and in do_brk. */ if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); diff --git a/mm/nommu.c b/mm/nommu.c index 314174817b04..340ae7774c13 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1078,7 +1078,6 @@ unsigned long do_mmap(struct file *file, unsigned long len, unsigned long prot, unsigned long flags, - vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf) @@ -1086,6 +1085,7 @@ unsigned long do_mmap(struct file *file, struct vm_area_struct *vma; struct vm_region *region; struct rb_node *rb; + vm_flags_t vm_flags; unsigned long capabilities, result; int ret; @@ -1104,7 +1104,7 @@ unsigned long do_mmap(struct file *file, /* we've determined that we can make the mapping, now translate what we * now know into VMA flags */ - vm_flags |= determine_vm_flags(file, prot, flags, capabilities); + vm_flags = determine_vm_flags(file, prot, flags, capabilities); /* we're going to need to record the mapping */ region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); @@ -1763,7 +1763,7 @@ EXPORT_SYMBOL_GPL(access_process_vm); * * Check the shared mappings on an inode on behalf of a shrinking truncate to * make sure that that any outstanding VMAs aren't broken and then shrink the - * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't + * vm_regions that extend that beyond so that do_mmap() doesn't * automatically grant mappings that are too large. */ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, diff --git a/mm/shmem.c b/mm/shmem.c index c5c281893bb8..eb6b36d89722 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4245,7 +4245,7 @@ EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); /** * shmem_zero_setup - setup a shared anonymous mapping - * @vma: the vma to be mmapped is prepared by do_mmap_pgoff + * @vma: the vma to be mmapped is prepared by do_mmap */ int shmem_zero_setup(struct vm_area_struct *vma) { diff --git a/mm/util.c b/mm/util.c index 8d6280c05238..5ef378a2a038 100644 --- a/mm/util.c +++ b/mm/util.c @@ -503,8 +503,8 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, if (!ret) { if (mmap_write_lock_killable(mm)) return -EINTR; - ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, - &populate, &uf); + ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate, + &uf); mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate) -- cgit v1.2.3 From b8aa9d9d95b3b4b60d42ac95f65d33a92527aef3 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Thu, 6 Aug 2020 23:23:40 -0700 Subject: mm/mremap: it is sure to have enough space when extent meets requirement Patch series "mm/mremap: cleanup move_page_tables() a little", v5. move_page_tables() tries to move page table by PMD or PTE. The root reason is if it tries to move PMD, both old and new range should be PMD aligned. But current code calculate old range and new range separately. This leads to some redundant check and calculation. This cleanup tries to consolidate the range check in one place to reduce some extra range handling. This patch (of 3): old_end is passed to these two functions to check whether there is enough space to do the move, while this check is done before invoking these functions. These two functions only would be invoked when extent meets the requirement and there is one check before invoking these functions: if (extent > old_end - old_addr) extent = old_end - old_addr; This implies (old_end - old_addr) won't fail the check in these two functions. Signed-off-by: Wei Yang Signed-off-by: Andrew Morton Tested-by: Dmitry Osipenko Acked-by: Kirill A. Shutemov Cc: Vlastimil Babka Cc: Yang Shi Cc: Thomas Hellstrom (VMware) Cc: Anshuman Khandual Cc: Sean Christopherson Cc: Wei Yang Cc: Peter Xu Cc: Aneesh Kumar K.V Cc: Matthew Wilcox Cc: Thomas Hellstrom Link: http://lkml.kernel.org/r/20200710092835.56368-1-richard.weiyang@linux.alibaba.com Link: http://lkml.kernel.org/r/20200710092835.56368-2-richard.weiyang@linux.alibaba.com Link: http://lkml.kernel.org/r/20200708095028.41706-1-richard.weiyang@linux.alibaba.com Link: http://lkml.kernel.org/r/20200708095028.41706-2-richard.weiyang@linux.alibaba.com Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 2 +- mm/huge_memory.c | 7 ++----- mm/mremap.c | 10 ++++------ 3 files changed, 7 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 71f20776b06c..17c4c4975145 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, unsigned long old_end, + unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 78c84bee7e29..1e580fdad4d0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1722,17 +1722,14 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd) } bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, unsigned long old_end, - pmd_t *old_pmd, pmd_t *new_pmd) + unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) { spinlock_t *old_ptl, *new_ptl; pmd_t pmd; struct mm_struct *mm = vma->vm_mm; bool force_flush = false; - if ((old_addr & ~HPAGE_PMD_MASK) || - (new_addr & ~HPAGE_PMD_MASK) || - old_end - old_addr < HPAGE_PMD_SIZE) + if ((old_addr & ~HPAGE_PMD_MASK) || (new_addr & ~HPAGE_PMD_MASK)) return false; /* diff --git a/mm/mremap.c b/mm/mremap.c index 6b153dc05fe4..4f434062d154 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -193,15 +193,13 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, #ifdef CONFIG_HAVE_MOVE_PMD static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, unsigned long old_end, - pmd_t *old_pmd, pmd_t *new_pmd) + unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) { spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm; pmd_t pmd; - if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK) - || old_end - old_addr < PMD_SIZE) + if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)) return false; /* @@ -292,7 +290,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (need_rmap_locks) take_rmap_locks(vma); moved = move_huge_pmd(vma, old_addr, new_addr, - old_end, old_pmd, new_pmd); + old_pmd, new_pmd); if (need_rmap_locks) drop_rmap_locks(vma); if (moved) @@ -312,7 +310,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (need_rmap_locks) take_rmap_locks(vma); moved = move_normal_pmd(vma, old_addr, new_addr, - old_end, old_pmd, new_pmd); + old_pmd, new_pmd); if (need_rmap_locks) drop_rmap_locks(vma); if (moved) -- cgit v1.2.3 From c89ab04febf97d2db8ca4ef8e2866fadc474351b Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Thu, 6 Aug 2020 23:24:02 -0700 Subject: mm/sparse: cleanup the code surrounding memory_present() After removal of CONFIG_HAVE_MEMBLOCK_NODE_MAP we have two equivalent functions that call memory_present() for each region in memblock.memory: sparse_memory_present_with_active_regions() and membocks_present(). Moreover, all architectures have a call to either of these functions preceding the call to sparse_init() and in the most cases they are called one after the other. Mark the regions from memblock.memory as present during sparce_init() by making sparse_init() call memblocks_present(), make memblocks_present() and memory_present() functions static and remove redundant sparse_memory_present_with_active_regions() function. Also remove no longer required HAVE_MEMORY_PRESENT configuration option. Signed-off-by: Mike Rapoport Signed-off-by: Andrew Morton Link: http://lkml.kernel.org/r/20200712083130.22919-1-rppt@kernel.org Signed-off-by: Linus Torvalds --- Documentation/vm/memory-model.rst | 7 ++----- arch/arm/mm/init.c | 9 ++------- arch/arm64/mm/init.c | 6 ++---- arch/ia64/mm/discontig.c | 1 - arch/microblaze/mm/init.c | 3 --- arch/mips/kernel/setup.c | 8 -------- arch/mips/loongson64/numa.c | 1 - arch/mips/sgi-ip27/ip27-memory.c | 2 -- arch/parisc/mm/init.c | 5 ----- arch/powerpc/mm/mem.c | 2 -- arch/powerpc/mm/numa.c | 1 - arch/riscv/mm/init.c | 1 - arch/s390/mm/init.c | 1 - arch/sh/mm/init.c | 6 ------ arch/sh/mm/numa.c | 3 --- arch/sparc/mm/init_64.c | 1 - arch/x86/mm/init_32.c | 2 -- arch/x86/mm/init_64.c | 1 - include/linux/mm.h | 4 ---- include/linux/mmzone.h | 14 -------------- mm/Kconfig | 6 +----- mm/page_alloc.c | 16 ---------------- mm/sparse.c | 20 ++++++++++++-------- 23 files changed, 19 insertions(+), 101 deletions(-) (limited to 'include') diff --git a/Documentation/vm/memory-model.rst b/Documentation/vm/memory-model.rst index 2b898a27b346..769449734573 100644 --- a/Documentation/vm/memory-model.rst +++ b/Documentation/vm/memory-model.rst @@ -141,11 +141,8 @@ sections: `mem_section` objects and the number of rows is calculated to fit all the memory sections. -The architecture setup code should call :c:func:`memory_present` for -each active memory range or use :c:func:`memblocks_present` or -:c:func:`sparse_memory_present_with_active_regions` wrappers to -initialize the memory sections. Next, the actual memory maps should be -set up using :c:func:`sparse_init`. +The architecture setup code should call sparse_init() to +initialize the memory sections and the memory maps. With SPARSEMEM there are two possible ways to convert a PFN to the corresponding `struct page` - a "classic sparse" and "sparse diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 01e18e43b174..000c1b48e973 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -243,13 +243,8 @@ void __init bootmem_init(void) (phys_addr_t)max_low_pfn << PAGE_SHIFT); /* - * Sparsemem tries to allocate bootmem in memory_present(), - * so must be done after the fixed reservations - */ - memblocks_present(); - - /* - * sparse_init() needs the bootmem allocator up and running. + * sparse_init() tries to allocate memory from memblock, so must be + * done after the fixed reservations */ sparse_init(); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f8c19c6c8e71..481d22c32a2e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -430,11 +430,9 @@ void __init bootmem_init(void) #endif /* - * Sparsemem tries to allocate bootmem in memory_present(), so must be - * done after the fixed reservations. + * sparse_init() tries to allocate memory from memblock, so must be + * done after the fixed reservations */ - memblocks_present(); - sparse_init(); zone_sizes_init(min, max); diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 2ba2127335a7..dbe829fc5298 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -600,7 +600,6 @@ void __init paging_init(void) max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; - sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); #ifdef CONFIG_VIRTUAL_MEM_MAP diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 521b59ba716c..0880a003573d 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -172,9 +172,6 @@ void __init setup_memory(void) &memblock.memory, 0); } - /* XXX need to clip this if using highmem? */ - sparse_memory_present_with_active_regions(0); - paging_init(); } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 588b21245e00..bf5f5acab0a8 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -371,14 +371,6 @@ static void __init bootmem_init(void) #endif } - - /* - * In any case the added to the memblock memory regions - * (highmem/lowmem, available/reserved, etc) are considered - * as present, so inform sparsemem about them. - */ - memblocks_present(); - /* * Reserve initrd memory if needed. */ diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index 901f5be5ee76..ea8bb1bc667e 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -220,7 +220,6 @@ static __init void prom_meminit(void) cpumask_clear(&__node_cpumask[node]); } } - memblocks_present(); max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 1213215ea965..d411e0a90a5b 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -402,8 +402,6 @@ void __init prom_meminit(void) } __node_data[node] = &null_node; } - - memblocks_present(); } void __init prom_free_prom_memory(void) diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 39ea464c8bd9..4381b65ae1e0 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -689,11 +689,6 @@ void __init paging_init(void) flush_cache_all_local(); /* start with known state */ flush_tlb_all_local(NULL); - /* - * Mark all memblocks as present for sparsemem using - * memory_present() and then initialize sparsemem. - */ - memblocks_present(); sparse_init(); parisc_bootmem_free(); } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index ab12916ec1a7..ec68c9eeac0e 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -183,8 +183,6 @@ void __init mem_topology_setup(void) void __init initmem_init(void) { - /* XXX need to clip this if using highmem? */ - sparse_memory_present_with_active_regions(0); sparse_init(); } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9fcf2d195830..03a81d65095b 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -949,7 +949,6 @@ void __init initmem_init(void) get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); setup_node_data(nid, start_pfn, end_pfn); - sparse_memory_present_with_active_regions(nid); } sparse_init(); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 416e520d07d3..f6e6286b3d15 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -544,7 +544,6 @@ void mark_rodata_ro(void) void __init paging_init(void) { setup_vm_final(); - memblocks_present(); sparse_init(); setup_zero_page(); zone_sizes_init(); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 6dc7c3b60ef6..0d282081dc1f 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -115,7 +115,6 @@ void __init paging_init(void) __load_psw_mask(psw.mask); kasan_free_early_identity(); - sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); zone_dma_bits = 31; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index a86ce13f392c..613de8096335 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -241,12 +241,6 @@ static void __init do_init_bootmem(void) plat_mem_setup(); - for_each_memblock(memory, reg) { - int nid = memblock_get_region_node(reg); - - memory_present(nid, memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } sparse_init(); } diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index f7e4439deb17..50f0dc1744d0 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -53,7 +53,4 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) /* It's up */ node_set_online(nid); - - /* Kick sparsemem */ - sparse_memory_present_with_active_regions(nid); } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 02e6e5e0f106..fad6d3129904 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1610,7 +1610,6 @@ static unsigned long __init bootmem_init(unsigned long phys_base) /* XXX cpu notifier XXX */ - sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); return end_pfn; diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8b4afad84f4a..4cb958419fb0 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -678,7 +678,6 @@ void __init initmem_init(void) #endif memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); - sparse_memory_present_with_active_regions(0); #ifdef CONFIG_FLATMEM max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; @@ -718,7 +717,6 @@ void __init paging_init(void) * NOTE: at this point the bootmem allocator is fully available. */ olpc_dt_build_devicetree(); - sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); zone_sizes_init(); } diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 53e1d4f4ed9d..1c1209da55fe 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -817,7 +817,6 @@ void __init initmem_init(void) void __init paging_init(void) { - sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); /* diff --git a/include/linux/mm.h b/include/linux/mm.h index cf7e4605ff3f..392016ce5878 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2382,9 +2382,6 @@ static inline unsigned long get_num_physpages(void) * for_each_valid_physical_page_range() * memblock_add_node(base, size, nid) * free_area_init(max_zone_pfns); - * - * sparse_memory_present_with_active_regions() calls memory_present() for - * each range when SPARSEMEM is enabled. */ void free_area_init(unsigned long *max_zone_pfn); unsigned long node_map_pfn_alignment(void); @@ -2395,7 +2392,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -extern void sparse_memory_present_with_active_regions(int nid); #ifndef CONFIG_NEED_MULTIPLE_NODES static inline int early_pfn_to_nid(unsigned long pfn) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a3bd54139a30..2eef8afd3a0f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -839,18 +839,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); -#ifdef CONFIG_HAVE_MEMORY_PRESENT -void memory_present(int nid, unsigned long start, unsigned long end); -#else -static inline void memory_present(int nid, unsigned long start, unsigned long end) {} -#endif - -#if defined(CONFIG_SPARSEMEM) -void memblocks_present(void); -#else -static inline void memblocks_present(void) {} -#endif - #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else @@ -1407,8 +1395,6 @@ struct mminit_pfnnid_cache { #define early_pfn_valid(pfn) (1) #endif -void memory_present(int nid, unsigned long start, unsigned long end); - /* * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we * need to check pfn validity within that MAX_ORDER_NR_PAGES block. diff --git a/mm/Kconfig b/mm/Kconfig index d41f3fa7e923..6c974888f86f 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -88,13 +88,9 @@ config NEED_MULTIPLE_NODES def_bool y depends on DISCONTIGMEM || NUMA -config HAVE_MEMORY_PRESENT - def_bool y - depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM - # # SPARSEMEM_EXTREME (which is the default) does some bootmem -# allocations when memory_present() is called. If this cannot +# allocations when sparse_init() is called. If this cannot # be done on your architecture, select this option. However, # statically allocating the mem_section[] array can potentially # consume vast quantities of .bss, so be careful. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8d5d8526c2f3..f49de9e97bf2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6324,22 +6324,6 @@ void __meminit init_currently_empty_zone(struct zone *zone, zone->initialized = 1; } -/** - * sparse_memory_present_with_active_regions - Call memory_present for each active range - * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. - * - * If an architecture guarantees that all ranges registered contain no holes and may - * be freed, this function may be used instead of calling memory_present() manually. - */ -void __init sparse_memory_present_with_active_regions(int nid) -{ - unsigned long start_pfn, end_pfn; - int i, this_nid; - - for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) - memory_present(this_nid, start_pfn, end_pfn); -} - /** * get_pfn_range_for_nid - Return the start and end page frames for a node * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. diff --git a/mm/sparse.c b/mm/sparse.c index 1b5e0385f419..fcc3d176f1ea 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -249,7 +249,7 @@ void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) #endif /* Record a memory area against a node. */ -void __init memory_present(int nid, unsigned long start, unsigned long end) +static void __init memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; @@ -285,11 +285,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) } /* - * Mark all memblocks as present using memory_present(). This is a - * convenience function that is useful for a number of arches - * to mark all of the systems memory as present during initialization. + * Mark all memblocks as present using memory_present(). + * This is a convenience function that is useful to mark all of the systems + * memory as present during initialization. */ -void __init memblocks_present(void) +static void __init memblocks_present(void) { struct memblock_region *reg; @@ -574,9 +574,13 @@ failed: */ void __init sparse_init(void) { - unsigned long pnum_begin = first_present_section_nr(); - int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); - unsigned long pnum_end, map_count = 1; + unsigned long pnum_end, pnum_begin, map_count = 1; + int nid_begin; + + memblocks_present(); + + pnum_begin = first_present_section_nr(); + nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ set_pageblock_order(); -- cgit v1.2.3 From 26e760c9a7c8ec31fa1a6bfbbce3f63f189ccef0 Mon Sep 17 00:00:00 2001 From: Walter Wu Date: Thu, 6 Aug 2020 23:24:35 -0700 Subject: rcu: kasan: record and print call_rcu() call stack Patch series "kasan: memorize and print call_rcu stack", v8. This patchset improves KASAN reports by making them to have call_rcu() call stack information. It is useful for programmers to solve use-after-free or double-free memory issue. The KASAN report was as follows(cleaned up slightly): BUG: KASAN: use-after-free in kasan_rcu_reclaim+0x58/0x60 Freed by task 0: kasan_save_stack+0x24/0x50 kasan_set_track+0x24/0x38 kasan_set_free_info+0x18/0x20 __kasan_slab_free+0x10c/0x170 kasan_slab_free+0x10/0x18 kfree+0x98/0x270 kasan_rcu_reclaim+0x1c/0x60 Last call_rcu(): kasan_save_stack+0x24/0x50 kasan_record_aux_stack+0xbc/0xd0 call_rcu+0x8c/0x580 kasan_rcu_uaf+0xf4/0xf8 Generic KASAN will record the last two call_rcu() call stacks and print up to 2 call_rcu() call stacks in KASAN report. it is only suitable for generic KASAN. This feature considers the size of struct kasan_alloc_meta and kasan_free_meta, we try to optimize the structure layout and size, lets it get better memory consumption. [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437 [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ This patch (of 4): This feature will record the last two call_rcu() call stacks and prints up to 2 call_rcu() call stacks in KASAN report. When call_rcu() is called, we store the call_rcu() call stack into slub alloc meta-data, so that the KASAN report can print rcu stack. [1]https://bugzilla.kernel.org/show_bug.cgi?id=198437 [2]https://groups.google.com/forum/#!searchin/kasan-dev/better$20stack$20traces$20for$20rcu%7Csort:date/kasan-dev/KQsjT_88hDE/7rNUZprRBgAJ [walter-zh.wu@mediatek.com: build fix] Link: http://lkml.kernel.org/r/20200710162401.23816-1-walter-zh.wu@mediatek.com Suggested-by: Dmitry Vyukov Signed-off-by: Walter Wu Signed-off-by: Andrew Morton Tested-by: Dmitry Vyukov Reviewed-by: Dmitry Vyukov Reviewed-by: Andrey Konovalov Acked-by: Paul E. McKenney Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Josh Triplett Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Jonathan Corbet Cc: Matthias Brugger Link: http://lkml.kernel.org/r/20200710162123.23713-1-walter-zh.wu@mediatek.com Link: http://lkml.kernel.org/r/20200601050847.1096-1-walter-zh.wu@mediatek.com Link: http://lkml.kernel.org/r/20200601050927.1153-1-walter-zh.wu@mediatek.com Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 2 ++ kernel/rcu/tree.c | 2 ++ mm/kasan/common.c | 4 ++-- mm/kasan/generic.c | 21 +++++++++++++++++++++ mm/kasan/kasan.h | 9 +++++++++ mm/kasan/report.c | 28 +++++++++++++++++++++++----- 6 files changed, 59 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 82522e996c76..18452e35e7b2 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -174,11 +174,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); +void kasan_record_aux_stack(void *ptr); #else /* CONFIG_KASAN_GENERIC */ static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} +static inline void kasan_record_aux_stack(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ac7198ed3197..8ce77d9ac716 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -59,6 +59,7 @@ #include #include #include +#include #include "../time/tick-internal.h" #include "tree.h" @@ -2890,6 +2891,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func) head->func = func; head->next = NULL; local_irq_save(flags); + kasan_record_aux_stack(head); rdp = this_cpu_ptr(&rcu_data); /* Add the callback to our list. */ diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 757d4074fe28..ad24666f50e4 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -40,7 +40,7 @@ #include "kasan.h" #include "../slab.h" -static inline depot_stack_handle_t save_stack(gfp_t flags) +depot_stack_handle_t kasan_save_stack(gfp_t flags) { unsigned long entries[KASAN_STACK_DEPTH]; unsigned int nr_entries; @@ -53,7 +53,7 @@ static inline depot_stack_handle_t save_stack(gfp_t flags) static inline void set_track(struct kasan_track *track, gfp_t flags) { track->pid = current->pid; - track->stack = save_stack(flags); + track->stack = kasan_save_stack(flags); } void kasan_enable_current(void) diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 098a7dbaced6..d70586393b04 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -324,3 +324,24 @@ DEFINE_ASAN_SET_SHADOW(f2); DEFINE_ASAN_SET_SHADOW(f3); DEFINE_ASAN_SET_SHADOW(f5); DEFINE_ASAN_SET_SHADOW(f8); + +void kasan_record_aux_stack(void *addr) +{ + struct page *page = kasan_addr_to_page(addr); + struct kmem_cache *cache; + struct kasan_alloc_meta *alloc_info; + void *object; + + if (!(page && PageSlab(page))) + return; + + cache = page->slab_cache; + object = nearest_obj(cache, page, addr); + alloc_info = get_alloc_info(cache, object); + + /* + * record the last two call_rcu() call stacks. + */ + alloc_info->aux_stack[1] = alloc_info->aux_stack[0]; + alloc_info->aux_stack[0] = kasan_save_stack(GFP_NOWAIT); +} diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index cfade6413528..f89a195e336a 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -104,6 +104,13 @@ struct kasan_track { struct kasan_alloc_meta { struct kasan_track alloc_track; +#ifdef CONFIG_KASAN_GENERIC + /* + * call_rcu() call stack is stored into struct kasan_alloc_meta. + * The free stack is stored into struct kasan_free_meta. + */ + depot_stack_handle_t aux_stack[2]; +#endif struct kasan_track free_track[KASAN_NR_FREE_STACKS]; #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY u8 free_pointer_tag[KASAN_NR_FREE_STACKS]; @@ -159,6 +166,8 @@ void kasan_report_invalid_free(void *object, unsigned long ip); struct page *kasan_addr_to_page(const void *addr); +depot_stack_handle_t kasan_save_stack(gfp_t flags); + #if defined(CONFIG_KASAN_GENERIC) && \ (defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 51ec45407a0b..445a9d56eb13 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -106,15 +106,20 @@ static void end_report(unsigned long *flags) kasan_enable_current(); } +static void print_stack(depot_stack_handle_t stack) +{ + unsigned long *entries; + unsigned int nr_entries; + + nr_entries = stack_depot_fetch(stack, &entries); + stack_trace_print(entries, nr_entries, 0); +} + static void print_track(struct kasan_track *track, const char *prefix) { pr_err("%s by task %u:\n", prefix, track->pid); if (track->stack) { - unsigned long *entries; - unsigned int nr_entries; - - nr_entries = stack_depot_fetch(track->stack, &entries); - stack_trace_print(entries, nr_entries, 0); + print_stack(track->stack); } else { pr_err("(stack is not available)\n"); } @@ -193,6 +198,19 @@ static void describe_object(struct kmem_cache *cache, void *object, free_track = kasan_get_free_track(cache, object, tag); print_track(free_track, "Freed"); pr_err("\n"); + +#ifdef CONFIG_KASAN_GENERIC + if (alloc_info->aux_stack[0]) { + pr_err("Last call_rcu():\n"); + print_stack(alloc_info->aux_stack[0]); + pr_err("\n"); + } + if (alloc_info->aux_stack[1]) { + pr_err("Second to last call_rcu():\n"); + print_stack(alloc_info->aux_stack[1]); + pr_err("\n"); + } +#endif } describe_object_addr(cache, object, addr); -- cgit v1.2.3 From c0e16ab3b5887e86cd45b95e28cf66498b161ee1 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Thu, 6 Aug 2020 23:24:50 -0700 Subject: kasan: remove kasan_unpoison_stack_above_sp_to() kasan_unpoison_stack_above_sp_to() is defined in kasan code but never used. The function was introduced as part of the commit: commit 9f7d416c36124667 ("kprobes: Unpoison stack in jprobe_return() for KASAN") ... where it was necessary because x86's jprobe_return() would leave stale shadow on the stack, and was an oddity in that regard. Since then, jprobes were removed entirely, and as of commit: commit 80006dbee674f9fa ("kprobes/x86: Remove jprobe implementation") ... there have been no callers of this function. Remove the declaration and the implementation. Signed-off-by: Vincenzo Frascino Signed-off-by: Andrew Morton Reviewed-by: Mark Rutland Reviewed-by: Andrey Konovalov Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Link: http://lkml.kernel.org/r/20200706143505.23299-1-vincenzo.frascino@arm.com Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 2 -- mm/kasan/common.c | 15 --------------- 2 files changed, 17 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 18452e35e7b2..087fba34b209 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -38,7 +38,6 @@ extern void kasan_disable_current(void); void kasan_unpoison_shadow(const void *address, size_t size); void kasan_unpoison_task_stack(struct task_struct *task); -void kasan_unpoison_stack_above_sp_to(const void *watermark); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); @@ -101,7 +100,6 @@ void kasan_restore_multi_shot(bool enabled); static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {} -static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} static inline void kasan_enable_current(void) {} static inline void kasan_disable_current(void) {} diff --git a/mm/kasan/common.c b/mm/kasan/common.c index f18067736f7c..950fd372a07e 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -180,21 +180,6 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) kasan_unpoison_shadow(base, watermark - base); } -/* - * Clear all poison for the region between the current SP and a provided - * watermark value, as is sometimes required prior to hand-crafted asm function - * returns in the middle of functions. - */ -void kasan_unpoison_stack_above_sp_to(const void *watermark) -{ - const void *sp = __builtin_frame_address(0); - size_t size = watermark - sp; - - if (WARN_ON(sp > watermark)) - return; - kasan_unpoison_shadow(sp, size); -} - void kasan_alloc_pages(struct page *page, unsigned int order) { u8 tag; -- cgit v1.2.3 From 2c547f9da0539ad1f7ef7f08c8c82036d61b011a Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 6 Aug 2020 23:25:01 -0700 Subject: efi: provide empty efi_enter_virtual_mode implementation When CONFIG_EFI is not enabled, we might get an undefined reference to efi_enter_virtual_mode() error, if this efi_enabled() call isn't inlined into start_kernel(). This happens in particular, if start_kernel() is annodated with __no_sanitize_address. Reported-by: kernel test robot Signed-off-by: Andrey Konovalov Signed-off-by: Andrew Morton Acked-by: Ard Biesheuvel Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Elena Petrova Cc: Marco Elver Cc: Vincenzo Frascino Cc: Walter Wu Link: http://lkml.kernel.org/r/6514652d3a32d3ed33d6eb5c91d0af63bf0d1a0c.1596544734.git.andreyknvl@google.com Signed-off-by: Linus Torvalds --- include/linux/efi.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/efi.h b/include/linux/efi.h index 05c47f857383..73db1ae04cef 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -606,7 +606,11 @@ extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); +#ifdef CONFIG_EFI extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#else +static inline void efi_enter_virtual_mode (void) {} +#endif #ifdef CONFIG_X86 extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, -- cgit v1.2.3 From 0a18e60788d6a39436e8b5e91001b790043fc29c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 6 Aug 2020 23:25:27 -0700 Subject: mm: remove vm_total_pages The global variable "vm_total_pages" is a relic from older days. There is only a single user that reads the variable - build_all_zonelists() - and the first thing it does is update it. Use a local variable in build_all_zonelists() instead and remove the global variable. Signed-off-by: David Hildenbrand Signed-off-by: Andrew Morton Reviewed-by: Wei Yang Reviewed-by: Pankaj Gupta Reviewed-by: Mike Rapoport Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Huang Ying Cc: Minchan Kim Link: http://lkml.kernel.org/r/20200619132410.23859-2-david@redhat.com Signed-off-by: Linus Torvalds --- include/linux/swap.h | 1 - mm/memory_hotplug.c | 3 --- mm/page-writeback.c | 6 ++---- mm/page_alloc.c | 2 ++ mm/vmscan.c | 5 ----- 5 files changed, 4 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 5b3216ba39a9..4ab236692e05 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -372,7 +372,6 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); -extern unsigned long vm_total_pages; extern unsigned long reclaim_pages(struct list_head *page_list); #ifdef CONFIG_NUMA diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index da374cd3d45b..be3c62e3fb95 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -844,8 +844,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, kswapd_run(nid); kcompactd_run(nid); - vm_total_pages = nr_free_pagecache_pages(); - writeback_set_ratelimit(); memory_notify(MEM_ONLINE, &arg); @@ -1595,7 +1593,6 @@ static int __ref __offline_pages(unsigned long start_pfn, kcompactd_stop(node); } - vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 28b3e7a67565..4e4ddd67b71e 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2076,13 +2076,11 @@ static int page_writeback_cpu_online(unsigned int cpu) * Called early on to tune the page writeback dirty limits. * * We used to scale dirty pages according to how total memory - * related to pages that could be allocated for buffers (by - * comparing nr_free_buffer_pages() to vm_total_pages. + * related to pages that could be allocated for buffers. * * However, that was when we used "dirty_ratio" to scale with * all memory, and we don't do that any more. "dirty_ratio" - * is now applied to total non-HIGHPAGE memory (by subtracting - * totalhigh_pages from vm_total_pages), and as such we can't + * is now applied to total non-HIGHPAGE memory, and as such we can't * get into the old insane situation any more where we had * large amounts of dirty pages compared to a small amount of * non-HIGHMEM memory. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0fb5c97ac94c..20184e2a8cfe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5912,6 +5912,8 @@ build_all_zonelists_init(void) */ void __ref build_all_zonelists(pg_data_t *pgdat) { + unsigned long vm_total_pages; + if (system_state == SYSTEM_BOOTING) { build_all_zonelists_init(); } else { diff --git a/mm/vmscan.c b/mm/vmscan.c index 23156c252e0a..d4d7cd1d24c1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -170,11 +170,6 @@ struct scan_control { * From 0 .. 200. Higher means more swappy. */ int vm_swappiness = 60; -/* - * The total number of pages which are beyond the high watermark within all - * zones. - */ -unsigned long vm_total_pages; static void set_task_reclaim_state(struct task_struct *task, struct reclaim_state *rs) -- cgit v1.2.3 From 56b9413bcb369f1329a438c7b61d235b4123e794 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 6 Aug 2020 23:25:30 -0700 Subject: mm/page_alloc: remove nr_free_pagecache_pages() nr_free_pagecache_pages() isn't used outside page_alloc.c anymore - and the name does not really help to understand what's going on. Let's open-code it instead and add a comment. Signed-off-by: David Hildenbrand Signed-off-by: Andrew Morton Reviewed-by: Wei Yang Reviewed-by: Pankaj Gupta Reviewed-by: Mike Rapoport Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Minchan Kim Cc: Huang Ying Link: http://lkml.kernel.org/r/20200619132410.23859-3-david@redhat.com Signed-off-by: Linus Torvalds --- include/linux/swap.h | 1 - mm/page_alloc.c | 16 ++-------------- 2 files changed, 2 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 4ab236692e05..7eb59bc552a5 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -328,7 +328,6 @@ void workingset_update_node(struct xa_node *node); /* linux/mm/page_alloc.c */ extern unsigned long totalreserve_pages; extern unsigned long nr_free_buffer_pages(void); -extern unsigned long nr_free_pagecache_pages(void); /* Definition of global_zone_page_state not available yet */ #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 20184e2a8cfe..ba09a3aeb76b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5186,19 +5186,6 @@ unsigned long nr_free_buffer_pages(void) } EXPORT_SYMBOL_GPL(nr_free_buffer_pages); -/** - * nr_free_pagecache_pages - count number of pages beyond high watermark - * - * nr_free_pagecache_pages() counts the number of pages which are beyond the - * high watermark within all zones. - * - * Return: number of pages beyond high watermark within all zones. - */ -unsigned long nr_free_pagecache_pages(void) -{ - return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); -} - static inline void show_node(struct zone *zone) { if (IS_ENABLED(CONFIG_NUMA)) @@ -5920,7 +5907,8 @@ void __ref build_all_zonelists(pg_data_t *pgdat) __build_all_zonelists(pgdat); /* cpuset refresh routine should be here */ } - vm_total_pages = nr_free_pagecache_pages(); + /* Get the number of free pages beyond high watermark in all zones. */ + vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); /* * Disable grouping by mobility if the number of pages in the * system is too low to allow the mechanism to work. It would be -- cgit v1.2.3 From d38ac97f8a7c4519ba141bbd7c2f7a8da8c9ff8d Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Thu, 6 Aug 2020 23:25:41 -0700 Subject: mm/page_alloc.c: replace the definition of NR_MIGRATETYPE_BITS with PB_migratetype_bits We already have the definition of PB_migratetype_bits and current NR_MIGRATETYPE_BITS looks like a cyclic definition. Just use PB_migratetype_bits is enough. Signed-off-by: Wei Yang Signed-off-by: Andrew Morton Cc: Mel Gorman Link: http://lkml.kernel.org/r/20200623124201.8199-1-richard.weiyang@linux.alibaba.com Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2eef8afd3a0f..f509ede317b5 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -88,8 +88,7 @@ static inline bool is_migrate_movable(int mt) extern int page_group_by_mobility_disabled; -#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) -#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) +#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) #define get_pageblock_migratetype(page) \ get_pfnblock_flags_mask(page, page_to_pfn(page), \ -- cgit v1.2.3 From d93d5ab9ca01e24efc6add60371b0d5684b5c146 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Thu, 6 Aug 2020 23:25:48 -0700 Subject: mm/page_alloc.c: simplify pageblock bitmap access Due to commit e58469bafd05 ("mm: page_alloc: use word-based accesses for get/set pageblock bitmaps"), pageblock bitmap is accessed with word-based access. This operation could be simplified a little. Intuitively, if we want to get a bit range [start_idx, end_idx] in a word, we can do like this: mask = (1 << (end_bitidx - start_bitidx + 1)) - 1; ret = (word >> start_idx) & mask; And also if we want to set a bit range [start_idx, end_idx] with flags, we can do the same by just shift start_bitidx. By doing so we reduce some instructions for these two helper functions: Before Patched set_pfnblock_flags_mask 209 198(-5%) get_pfnblock_flags_mask 101 87(-13%) Since the syntax is changed a little, we need to check the whole 4-bit migrate_type instead of part of it. Signed-off-by: Wei Yang Signed-off-by: Andrew Morton Cc: Mel Gorman Link: http://lkml.kernel.org/r/20200623124201.8199-3-richard.weiyang@linux.alibaba.com Signed-off-by: Linus Torvalds --- include/linux/pageblock-flags.h | 22 +++++++--------------- mm/page_alloc.c | 13 ++++++------- 2 files changed, 13 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index c066fec5b74b..6556e4474409 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -66,25 +66,17 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long mask); /* Declarations for getting and setting flags. See mm/page_alloc.c */ -#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), \ - end_bitidx, \ - (1 << (end_bitidx - start_bitidx + 1)) - 1) -#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ - set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ - end_bitidx, \ - (1 << (end_bitidx - start_bitidx + 1)) - 1) - #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ - get_pageblock_flags_group(page, PB_migrate_skip, \ - PB_migrate_skip) + get_pfnblock_flags_mask(page, page_to_pfn(page), \ + PB_migrate_skip, (1 << (PB_migrate_skip))) #define clear_pageblock_skip(page) \ - set_pageblock_flags_group(page, 0, PB_migrate_skip, \ - PB_migrate_skip) + set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \ + PB_migrate_skip, (1 << PB_migrate_skip)) #define set_pageblock_skip(page) \ - set_pageblock_flags_group(page, 1, PB_migrate_skip, \ - PB_migrate_skip) + set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \ + page_to_pfn(page), \ + PB_migrate_skip, (1 << PB_migrate_skip)) #else static inline bool get_pageblock_skip(struct page *page) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6c524fb76b31..e1fdca9ef4e9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -489,8 +489,7 @@ static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page bitidx &= (BITS_PER_LONG-1); word = bitmap[word_bitidx]; - bitidx += end_bitidx; - return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; + return (word >> bitidx) & mask; } unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, @@ -532,9 +531,8 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); - bitidx += end_bitidx; - mask <<= (BITS_PER_LONG - bitidx - 1); - flags <<= (BITS_PER_LONG - bitidx - 1); + mask <<= bitidx; + flags <<= bitidx; word = READ_ONCE(bitmap[word_bitidx]); for (;;) { @@ -551,8 +549,9 @@ void set_pageblock_migratetype(struct page *page, int migratetype) migratetype < MIGRATE_PCPTYPES)) migratetype = MIGRATE_UNMOVABLE; - set_pageblock_flags_group(page, (unsigned long)migratetype, - PB_migrate, PB_migrate_end); + set_pfnblock_flags_mask(page, (unsigned long)migratetype, + page_to_pfn(page), PB_migrate_end, + MIGRATETYPE_MASK); } #ifdef CONFIG_DEBUG_VM -- cgit v1.2.3 From 535b81e209219e03f815379746bfd1eeb82d68e5 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Thu, 6 Aug 2020 23:25:51 -0700 Subject: mm/page_alloc.c: remove unnecessary end_bitidx for [set|get]_pfnblock_flags_mask() After previous cleanup, the end_bitidx is not necessary any more. Signed-off-by: Wei Yang Signed-off-by: Andrew Morton Cc: Mel Gorman Link: http://lkml.kernel.org/r/20200623124201.8199-4-richard.weiyang@linux.alibaba.com Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 +-- include/linux/pageblock-flags.h | 8 +++----- mm/page_alloc.c | 15 +++++---------- 3 files changed, 9 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f509ede317b5..635a96cd9b1f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -91,8 +91,7 @@ extern int page_group_by_mobility_disabled; #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) #define get_pageblock_migratetype(page) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), \ - PB_migrate_end, MIGRATETYPE_MASK) + get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) struct free_area { struct list_head free_list[MIGRATE_TYPES]; diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 6556e4474409..fff52ad370c1 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -56,27 +56,25 @@ struct page; unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, - unsigned long end_bitidx, unsigned long mask); void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, - unsigned long end_bitidx, unsigned long mask); /* Declarations for getting and setting flags. See mm/page_alloc.c */ #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ get_pfnblock_flags_mask(page, page_to_pfn(page), \ - PB_migrate_skip, (1 << (PB_migrate_skip))) + (1 << (PB_migrate_skip))) #define clear_pageblock_skip(page) \ set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \ - PB_migrate_skip, (1 << PB_migrate_skip)) + (1 << PB_migrate_skip)) #define set_pageblock_skip(page) \ set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \ page_to_pfn(page), \ - PB_migrate_skip, (1 << PB_migrate_skip)) + (1 << PB_migrate_skip)) #else static inline bool get_pageblock_skip(struct page *page) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e1fdca9ef4e9..53c89de89e46 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -469,14 +469,13 @@ static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages * @page: The page within the block of interest * @pfn: The target page frame number - * @end_bitidx: The last bit of interest to retrieve * @mask: mask of bits that the caller is interested in * * Return: pageblock_bits flags */ -static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page, +static __always_inline +unsigned long __get_pfnblock_flags_mask(struct page *page, unsigned long pfn, - unsigned long end_bitidx, unsigned long mask) { unsigned long *bitmap; @@ -493,15 +492,14 @@ static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page } unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, - unsigned long end_bitidx, unsigned long mask) { - return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask); + return __get_pfnblock_flags_mask(page, pfn, mask); } static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) { - return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK); + return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); } /** @@ -509,12 +507,10 @@ static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned * @page: The page within the block of interest * @flags: The flags to set * @pfn: The target page frame number - * @end_bitidx: The last bit of interest * @mask: mask of bits that the caller is interested in */ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, - unsigned long end_bitidx, unsigned long mask) { unsigned long *bitmap; @@ -550,8 +546,7 @@ void set_pageblock_migratetype(struct page *page, int migratetype) migratetype = MIGRATE_UNMOVABLE; set_pfnblock_flags_mask(page, (unsigned long)migratetype, - page_to_pfn(page), PB_migrate_end, - MIGRATETYPE_MASK); + page_to_pfn(page), MIGRATETYPE_MASK); } #ifdef CONFIG_DEBUG_VM -- cgit v1.2.3 From 8510e69c8efef82f2b37ea3e8ea19a27122c533e Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 6 Aug 2020 23:26:04 -0700 Subject: mm/page_alloc: fix memalloc_nocma_{save/restore} APIs Currently, memalloc_nocma_{save/restore} API that prevents CMA area in page allocation is implemented by using current_gfp_context(). However, there are two problems of this implementation. First, this doesn't work for allocation fastpath. In the fastpath, original gfp_mask is used since current_gfp_context() is introduced in order to control reclaim and it is on slowpath. So, CMA area can be allocated through the allocation fastpath even if memalloc_nocma_{save/restore} APIs are used. Currently, there is just one user for these APIs and it has a fallback method to prevent actual problem. Second, clearing __GFP_MOVABLE in current_gfp_context() has a side effect to exclude the memory on the ZONE_MOVABLE for allocation target. To fix these problems, this patch changes the implementation to exclude CMA area in page allocation. Main point of this change is using the alloc_flags. alloc_flags is mainly used to control allocation so it fits for excluding CMA area in allocation. Fixes: d7fefcc8de91 (mm/cma: add PF flag to force non cma alloc) Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Cc: Christoph Hellwig Cc: Roman Gushchin Cc: Mike Kravetz Cc: Naoya Horiguchi Cc: Michal Hocko Cc: "Aneesh Kumar K . V" Link: http://lkml.kernel.org/r/1595468942-29687-1-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/sched/mm.h | 8 +------- mm/page_alloc.c | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 6be66f52a2ad..85023ddc2dc2 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -175,12 +175,10 @@ static inline bool in_vfork(struct task_struct *tsk) * Applies per-task gfp context to the given allocation flags. * PF_MEMALLOC_NOIO implies GFP_NOIO * PF_MEMALLOC_NOFS implies GFP_NOFS - * PF_MEMALLOC_NOCMA implies no allocation from CMA region. */ static inline gfp_t current_gfp_context(gfp_t flags) { - if (unlikely(current->flags & - (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) { + if (unlikely(current->flags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence @@ -189,10 +187,6 @@ static inline gfp_t current_gfp_context(gfp_t flags) flags &= ~(__GFP_IO | __GFP_FS); else if (current->flags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; -#ifdef CONFIG_CMA - if (current->flags & PF_MEMALLOC_NOCMA) - flags &= ~__GFP_MOVABLE; -#endif } return flags; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9f9e15a502ae..167732f4d124 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2785,7 +2785,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, * allocating from CMA when over half of the zone's free memory * is in the CMA area. */ - if (migratetype == MIGRATE_MOVABLE && + if (alloc_flags & ALLOC_CMA && zone_page_state(zone, NR_FREE_CMA_PAGES) > zone_page_state(zone, NR_FREE_PAGES) / 2) { page = __rmqueue_cma_fallback(zone, order); @@ -2796,7 +2796,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, retry: page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page)) { - if (migratetype == MIGRATE_MOVABLE) + if (alloc_flags & ALLOC_CMA) page = __rmqueue_cma_fallback(zone, order); if (!page && __rmqueue_fallback(zone, order, migratetype, @@ -3687,6 +3687,20 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) return alloc_flags; } +static inline unsigned int current_alloc_flags(gfp_t gfp_mask, + unsigned int alloc_flags) +{ +#ifdef CONFIG_CMA + unsigned int pflags = current->flags; + + if (!(pflags & PF_MEMALLOC_NOCMA) && + gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + alloc_flags |= ALLOC_CMA; + +#endif + return alloc_flags; +} + /* * get_page_from_freelist goes through the zonelist trying to allocate * a page. @@ -4333,10 +4347,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask) } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; -#ifdef CONFIG_CMA - if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) - alloc_flags |= ALLOC_CMA; -#endif + alloc_flags = current_alloc_flags(gfp_mask, alloc_flags); + return alloc_flags; } @@ -4637,7 +4649,7 @@ retry: reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); if (reserve_flags) - alloc_flags = reserve_flags; + alloc_flags = current_alloc_flags(gfp_mask, reserve_flags); /* * Reset the nodemask and zonelist iterators if memory policies can be @@ -4714,7 +4726,7 @@ retry: /* Avoid allocations with no watermarks from looping endlessly */ if (tsk_is_oom_victim(current) && - (alloc_flags == ALLOC_OOM || + (alloc_flags & ALLOC_OOM || (gfp_mask & __GFP_NOMEMALLOC))) goto nopage; @@ -4806,8 +4818,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, if (should_fail_alloc_page(gfp_mask, order)) return false; - if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) - *alloc_flags |= ALLOC_CMA; + *alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags); return true; } -- cgit v1.2.3 From 38ce2a9e33db61a3041840310077072d6210ead4 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 6 Aug 2020 12:46:49 -0400 Subject: tracing: Add trace_array_init_printk() to initialize instance trace_printk() buffers As trace_array_printk() used with not global instances will not add noise to the main buffer, they are OK to have in the kernel (unlike trace_printk()). This require the subsystem to create their own tracing instance, and the trace_array_printk() only writes into those instances. Add trace_array_init_printk() to initialize the trace_printk() buffers without printing out the WARNING message. Reported-by: Sean Paul Reviewed-by: Sean Paul Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace.h | 1 + kernel/trace/trace.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'include') diff --git a/include/linux/trace.h b/include/linux/trace.h index 7fd86d3c691f..36d255d66f88 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -29,6 +29,7 @@ struct trace_array; void trace_printk_init_buffers(void); int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...); +int trace_array_init_printk(struct trace_array *tr); void trace_array_put(struct trace_array *tr); struct trace_array *trace_array_get_by_name(const char *name); int trace_array_destroy(struct trace_array *tr); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 06c0feae5ff9..c5f822736261 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3129,6 +3129,9 @@ static int alloc_percpu_trace_buffer(void) { struct trace_buffer_struct *buffers; + if (trace_percpu_buffer) + return 0; + buffers = alloc_percpu(struct trace_buffer_struct); if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) return -ENOMEM; @@ -3331,6 +3334,26 @@ int trace_array_vprintk(struct trace_array *tr, return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); } +/** + * trace_array_printk - Print a message to a specific instance + * @tr: The instance trace_array descriptor + * @ip: The instruction pointer that this is called from. + * @fmt: The format to print (printf format) + * + * If a subsystem sets up its own instance, they have the right to + * printk strings into their tracing instance buffer using this + * function. Note, this function will not write into the top level + * buffer (use trace_printk() for that), as writing into the top level + * buffer should only have events that can be individually disabled. + * trace_printk() is only used for debugging a kernel, and should not + * be ever encorporated in normal use. + * + * trace_array_printk() can be used, as it will not add noise to the + * top level tracing buffer. + * + * Note, trace_array_init_printk() must be called on @tr before this + * can be used. + */ __printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) @@ -3355,6 +3378,27 @@ int trace_array_printk(struct trace_array *tr, } EXPORT_SYMBOL_GPL(trace_array_printk); +/** + * trace_array_init_printk - Initialize buffers for trace_array_printk() + * @tr: The trace array to initialize the buffers for + * + * As trace_array_printk() only writes into instances, they are OK to + * have in the kernel (unlike trace_printk()). This needs to be called + * before trace_array_printk() can be used on a trace_array. + */ +int trace_array_init_printk(struct trace_array *tr) +{ + if (!tr) + return -ENOENT; + + /* This is only allowed for created instances */ + if (tr == &global_trace) + return -EINVAL; + + return alloc_percpu_trace_buffer(); +} +EXPORT_SYMBOL_GPL(trace_array_init_printk); + __printf(3, 4) int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...) -- cgit v1.2.3 From 87154ff86bf69ecf76600e56ecab0b79fc3f71ea Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 2 Aug 2020 09:43:59 -0700 Subject: drm: Remove unnecessary drm_panel_attach and drm_panel_detach These functions are now empty and no longer useful so remove the functions and their uses. Signed-off-by: Joe Perches Cc: Bernard Zhao Cc: Maarten Lankhorst Cc: Maxime Ripard , Cc: Thomas Zimmermann Cc: Thierry Reding Cc: David Airlie Cc: Daniel Vetter Cc: Linus Walleij Cc: Icenowy Zheng , Cc: Jagan Teki Cc: Laurent Pinchart Cc: Robert Chiras Cc: dri-devel@lists.freedesktop.org, Cc: linux-kernel@vger.kernel.org Cc: opensource.kernel@vivo.com Signed-off-by: Sam Ravnborg # Fixed build and a few warnings Link: https://patchwork.freedesktop.org/patch/msgid/9e13761020750b1ce2f1fabee23ef6e2a2942882.camel@perches.com --- drivers/gpu/drm/bridge/analogix/analogix-anx6345.c | 12 -------- drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 9 ------ drivers/gpu/drm/bridge/panel.c | 7 ----- drivers/gpu/drm/bridge/ti-sn65dsi86.c | 3 -- drivers/gpu/drm/drm_panel.c | 36 ---------------------- drivers/gpu/drm/exynos/exynos_drm_dpi.c | 8 ----- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 7 ++--- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c | 9 ------ drivers/gpu/drm/imx/imx-ldb.c | 10 ------ drivers/gpu/drm/imx/parallel-display.c | 6 ---- drivers/gpu/drm/mediatek/mtk_dsi.c | 14 --------- .../gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c | 7 +---- drivers/gpu/drm/msm/dsi/dsi_manager.c | 1 - drivers/gpu/drm/omapdrm/omap_drv.c | 17 ---------- drivers/gpu/drm/rcar-du/rcar_lvds.c | 6 +--- drivers/gpu/drm/rockchip/rockchip_lvds.c | 9 ------ drivers/gpu/drm/sti/sti_dvo.c | 2 -- drivers/gpu/drm/sun4i/sun4i_lvds.c | 9 ------ drivers/gpu/drm/sun4i/sun4i_rgb.c | 9 ------ drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c | 3 -- drivers/gpu/drm/tegra/dsi.c | 4 +-- drivers/gpu/drm/tegra/output.c | 10 ------ include/drm/drm_panel.h | 3 -- 23 files changed, 5 insertions(+), 196 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index f082b4ed4878..d9164fab044d 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -507,10 +507,6 @@ static const struct drm_connector_helper_funcs anx6345_connector_helper_funcs = static void anx6345_connector_destroy(struct drm_connector *connector) { - struct anx6345 *anx6345 = connector_to_anx6345(connector); - - if (anx6345->panel) - drm_panel_detach(anx6345->panel); drm_connector_cleanup(connector); } @@ -575,14 +571,6 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge, return err; } - if (anx6345->panel) { - err = drm_panel_attach(anx6345->panel, &anx6345->connector); - if (err) { - DRM_ERROR("Failed to attach panel: %d\n", err); - return err; - } - } - return 0; } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 76736fb8ed94..aa1bb86293fd 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1265,14 +1265,6 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, } } - if (dp->plat_data->panel) { - ret = drm_panel_attach(dp->plat_data->panel, &dp->connector); - if (ret) { - DRM_ERROR("Failed to attach panel\n"); - return ret; - } - } - return 0; } @@ -1803,7 +1795,6 @@ void analogix_dp_unbind(struct analogix_dp_device *dp) if (dp->plat_data->panel) { if (drm_panel_unprepare(dp->plat_data->panel)) DRM_ERROR("failed to turnoff the panel\n"); - drm_panel_detach(dp->plat_data->panel); } drm_dp_aux_unregister(&dp->aux); diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 1e63ed6b18aa..0ddc37551194 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -82,18 +82,11 @@ static int panel_bridge_attach(struct drm_bridge *bridge, drm_connector_attach_encoder(&panel_bridge->connector, bridge->encoder); - ret = drm_panel_attach(panel_bridge->panel, &panel_bridge->connector); - if (ret < 0) - return ret; - return 0; } static void panel_bridge_detach(struct drm_bridge *bridge) { - struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); - - drm_panel_detach(panel_bridge->panel); } static void panel_bridge_pre_enable(struct drm_bridge *bridge) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 86b9f0f87a14..454544e0da7d 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -394,9 +394,6 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, } pdata->dsi = dsi; - /* attach panel to bridge */ - drm_panel_attach(pdata->panel, &pdata->connector); - return 0; err_dsi_attach: diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index b8e9abb537cf..ba11c3641bf3 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -93,42 +93,6 @@ void drm_panel_remove(struct drm_panel *panel) } EXPORT_SYMBOL(drm_panel_remove); -/** - * drm_panel_attach - attach a panel to a connector - * @panel: DRM panel - * @connector: DRM connector - * - * After obtaining a pointer to a DRM panel a display driver calls this - * function to attach a panel to a connector. - * - * An error is returned if the panel is already attached to another connector. - * - * When unloading, the driver should detach from the panel by calling - * drm_panel_detach(). - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector) -{ - return 0; -} -EXPORT_SYMBOL(drm_panel_attach); - -/** - * drm_panel_detach - detach a panel from a connector - * @panel: DRM panel - * - * Detaches a panel from the connector it is attached to. If a panel is not - * attached to any connector this is effectively a no-op. - * - * This function should not be called by the panel device itself. It - * is only for the drm device that called drm_panel_attach(). - */ -void drm_panel_detach(struct drm_panel *panel) -{ -} -EXPORT_SYMBOL(drm_panel_detach); - /** * drm_panel_prepare - power on a panel * @panel: DRM panel diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 7ba5354e7d94..741323a2e6c3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -42,11 +42,6 @@ static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e) static enum drm_connector_status exynos_dpi_detect(struct drm_connector *connector, bool force) { - struct exynos_dpi *ctx = connector_to_dpi(connector); - - if (ctx->panel) - drm_panel_attach(ctx->panel, &ctx->connector); - return connector_status_connected; } @@ -249,8 +244,5 @@ int exynos_dpi_remove(struct drm_encoder *encoder) exynos_dpi_disable(&ctx->encoder); - if (ctx->panel) - drm_panel_detach(ctx->panel); - return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index ee96a95fb6be..db0eab53dcfe 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1551,12 +1551,10 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host, } dsi->panel = of_drm_find_panel(device->dev.of_node); - if (IS_ERR(dsi->panel)) { + if (IS_ERR(dsi->panel)) dsi->panel = NULL; - } else { - drm_panel_attach(dsi->panel, &dsi->connector); + else dsi->connector.status = connector_status_connected; - } } /* @@ -1596,7 +1594,6 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host, if (dsi->panel) { mutex_lock(&drm->mode_config.mutex); exynos_dsi_disable(&dsi->encoder); - drm_panel_detach(dsi->panel); dsi->panel = NULL; dsi->connector.status = connector_status_disconnected; mutex_unlock(&drm->mode_config.mutex); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 9b0c4736c21a..4d4a715b429d 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -40,10 +40,7 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector) { - struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector); - drm_connector_unregister(connector); - drm_panel_detach(fsl_con->panel); drm_connector_cleanup(connector); } @@ -101,12 +98,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, if (ret < 0) goto err_sysfs; - ret = drm_panel_attach(panel, connector); - if (ret) { - dev_err(fsl_dev->dev, "failed to attach panel\n"); - goto err_sysfs; - } - return 0; err_sysfs: diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 66ea68e8da87..35a6bad7dcf8 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -463,13 +463,6 @@ static int imx_ldb_register(struct drm_device *drm, drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder); } - if (imx_ldb_ch->panel) { - ret = drm_panel_attach(imx_ldb_ch->panel, - &imx_ldb_ch->connector); - if (ret) - return ret; - } - return 0; } @@ -718,9 +711,6 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, for (i = 0; i < 2; i++) { struct imx_ldb_channel *channel = &imx_ldb->channel[i]; - if (channel->panel) - drm_panel_detach(channel->panel); - kfree(channel->edid); i2c_put_adapter(channel->ddc); } diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index ac916c84a631..cba2abbb420d 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -298,9 +298,6 @@ static int imx_pd_register(struct drm_device *drm, DRM_MODE_CONNECTOR_DPI); } - if (imxpd->panel) - drm_panel_attach(imxpd->panel, &imxpd->connector); - if (imxpd->next_bridge) { ret = drm_bridge_attach(encoder, imxpd->next_bridge, &imxpd->bridge, 0); @@ -369,9 +366,6 @@ static void imx_pd_unbind(struct device *dev, struct device *master, { struct imx_parallel_display *imxpd = dev_get_drvdata(dev); - if (imxpd->panel) - drm_panel_detach(imxpd->panel); - kfree(imxpd->edid); } diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 270bf22c98fe..e4e56e53aeea 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -861,19 +861,7 @@ static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi) dsi->conn.dpms = DRM_MODE_DPMS_OFF; drm_connector_attach_encoder(&dsi->conn, &dsi->encoder); - if (dsi->panel) { - ret = drm_panel_attach(dsi->panel, &dsi->conn); - if (ret) { - DRM_ERROR("Failed to attach panel to drm\n"); - goto err_connector_cleanup; - } - } - return 0; - -err_connector_cleanup: - drm_connector_cleanup(&dsi->conn); - return ret; } static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi) @@ -921,8 +909,6 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) /* Skip connector cleanup if creation was delegated to the bridge */ if (dsi->conn.dev) drm_connector_cleanup(&dsi->conn); - if (dsi->panel) - drm_panel_detach(dsi->panel); } static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c index c7df71e2fafc..7288041dd86a 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c @@ -50,14 +50,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector) struct drm_panel *panel = mdp4_lvds_connector->panel; int ret = 0; - if (panel) { - drm_panel_attach(panel, connector); - + if (panel) ret = drm_panel_get_modes(panel, connector); - drm_panel_detach(panel); - } - return ret; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 4b363bd7ddff..1d28dfba2c9b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -328,7 +328,6 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector) * In dual DSI mode, we have one connector that can be * attached to the drm_panel. */ - drm_panel_attach(panel, connector); num = drm_panel_get_modes(panel, connector); if (!num) return 0; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 4526967978b7..53d5e184ee77 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -349,13 +349,6 @@ static int omap_modeset_init(struct drm_device *dev) drm_connector_attach_encoder(pipe->connector, encoder); - if (pipe->output->panel) { - ret = drm_panel_attach(pipe->output->panel, - pipe->connector); - if (ret < 0) - return ret; - } - crtc = omap_crtc_init(dev, pipe, priv->planes[i]); if (IS_ERR(crtc)) return PTR_ERR(crtc); @@ -394,18 +387,8 @@ static int omap_modeset_init(struct drm_device *dev) static void omap_modeset_fini(struct drm_device *ddev) { - struct omap_drm_private *priv = ddev->dev_private; - unsigned int i; - omap_drm_irq_uninstall(ddev); - for (i = 0; i < priv->num_pipes; i++) { - struct omap_drm_pipeline *pipe = &priv->pipes[i]; - - if (pipe->output->panel) - drm_panel_detach(pipe->output->panel); - } - drm_mode_config_cleanup(ddev); } diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index ab0d49618cf9..bced729a96fe 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -677,15 +677,11 @@ static int rcar_lvds_attach(struct drm_bridge *bridge, if (ret < 0) return ret; - return drm_panel_attach(lvds->panel, connector); + return 0; } static void rcar_lvds_detach(struct drm_bridge *bridge) { - struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); - - if (lvds->panel) - drm_panel_detach(lvds->panel); } static const struct drm_bridge_funcs rcar_lvds_bridge_ops = { diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 63f967902c2d..f292c6a6e20f 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -634,13 +634,6 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, "failed to attach encoder: %d\n", ret); goto err_free_connector; } - - ret = drm_panel_attach(lvds->panel, connector); - if (ret < 0) { - DRM_DEV_ERROR(drm_dev->dev, - "failed to attach panel: %d\n", ret); - goto err_free_connector; - } } else { ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0); if (ret) { @@ -676,8 +669,6 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master, encoder_funcs = lvds->soc_data->helper_funcs; encoder_funcs->disable(&lvds->encoder); - if (lvds->panel) - drm_panel_detach(lvds->panel); pm_runtime_disable(dev); drm_connector_cleanup(&lvds->connector); drm_encoder_cleanup(&lvds->encoder); diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index de4af7735c46..ddb4184f0726 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -389,8 +389,6 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force) dvo->panel = of_drm_find_panel(dvo->panel_node); if (IS_ERR(dvo->panel)) dvo->panel = NULL; - else - drm_panel_attach(dvo->panel, connector); } if (dvo->panel) diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index ffda3184aa12..d06dd313d3c0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -54,9 +54,6 @@ static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = { static void sun4i_lvds_connector_destroy(struct drm_connector *connector) { - struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector); - - drm_panel_detach(lvds->panel); drm_connector_cleanup(connector); } @@ -141,12 +138,6 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) drm_connector_attach_encoder(&lvds->connector, &lvds->encoder); - - ret = drm_panel_attach(lvds->panel, &lvds->connector); - if (ret) { - dev_err(drm->dev, "Couldn't attach our panel\n"); - goto err_cleanup_connector; - } } if (bridge) { diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 5a7d43939ae6..23df1ec03416 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -145,9 +145,6 @@ static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = { static void sun4i_rgb_connector_destroy(struct drm_connector *connector) { - struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); - - drm_panel_detach(rgb->panel); drm_connector_cleanup(connector); } @@ -233,12 +230,6 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) drm_connector_attach_encoder(&rgb->connector, &rgb->encoder); - - ret = drm_panel_attach(rgb->panel, &rgb->connector); - if (ret) { - dev_err(drm->dev, "Couldn't attach our panel\n"); - goto err_cleanup_connector; - } } if (rgb->bridge) { diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index aa67cb037e9d..a78aebfe773d 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -973,7 +973,6 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host, dsi->panel = panel; dsi->device = device; - drm_panel_attach(dsi->panel, &dsi->connector); drm_kms_helper_hotplug_event(dsi->drm); dev_info(host->dev, "Attached device %s\n", device->name); @@ -985,12 +984,10 @@ static int sun6i_dsi_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct sun6i_dsi *dsi = host_to_sun6i_dsi(host); - struct drm_panel *panel = dsi->panel; dsi->panel = NULL; dsi->device = NULL; - drm_panel_detach(panel); drm_kms_helper_hotplug_event(dsi->drm); return 0; diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 38beab9ab4f8..37b89c396d3b 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1493,10 +1493,8 @@ static int tegra_dsi_host_attach(struct mipi_dsi_host *host, if (IS_ERR(output->panel)) output->panel = NULL; - if (output->panel && output->connector.dev) { - drm_panel_attach(output->panel, &output->connector); + if (output->panel && output->connector.dev) drm_helper_hpd_irq_event(output->connector.dev); - } } return 0; diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index e36e5e7c2f69..a3adb9e4debf 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -179,13 +179,6 @@ void tegra_output_remove(struct tegra_output *output) int tegra_output_init(struct drm_device *drm, struct tegra_output *output) { int connector_type; - int err; - - if (output->panel) { - err = drm_panel_attach(output->panel, &output->connector); - if (err < 0) - return err; - } /* * The connector is now registered and ready to receive hotplug events @@ -220,9 +213,6 @@ void tegra_output_exit(struct tegra_output *output) */ if (output->hpd_gpio) disable_irq(output->hpd_irq); - - if (output->panel) - drm_panel_detach(output->panel); } void tegra_output_find_possible_crtcs(struct tegra_output *output, diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index ff066524cb70..45a1b5a2275d 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -178,9 +178,6 @@ void drm_panel_init(struct drm_panel *panel, struct device *dev, void drm_panel_add(struct drm_panel *panel); void drm_panel_remove(struct drm_panel *panel); -int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); -void drm_panel_detach(struct drm_panel *panel); - int drm_panel_prepare(struct drm_panel *panel); int drm_panel_unprepare(struct drm_panel *panel); -- cgit v1.2.3 From 519a8a6cf91dda095be2d36216fc4ebc525270a1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 10 Aug 2020 18:42:14 +0200 Subject: net: Revert "net: optimize the sockptr_t for unified kernel/user address spaces" This reverts commits 6d04fe15f78acdf8e32329e208552e226f7a8ae6 and a31edb2059ed4e498f9aa8230c734b59d0ad797a. It turns out the idea to share a single pointer for both kernel and user space address causes various kinds of problems. So use the slightly less optimal version that uses an extra bit, but which is guaranteed to be safe everywhere. Fixes: 6d04fe15f78a ("net: optimize the sockptr_t for unified kernel/user address spaces") Reported-by: Eric Dumazet Reported-by: John Stultz Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 26 ++------------------------ net/ipv4/bpfilter/sockopt.c | 14 ++++++-------- net/socket.c | 6 +----- 3 files changed, 9 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 96840def9d69..ea193414298b 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -8,26 +8,9 @@ #ifndef _LINUX_SOCKPTR_H #define _LINUX_SOCKPTR_H -#include #include #include -#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE -typedef union { - void *kernel; - void __user *user; -} sockptr_t; - -static inline bool sockptr_is_kernel(sockptr_t sockptr) -{ - return (unsigned long)sockptr.kernel >= TASK_SIZE; -} - -static inline sockptr_t KERNEL_SOCKPTR(void *p) -{ - return (sockptr_t) { .kernel = p }; -} -#else /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ typedef struct { union { void *kernel; @@ -45,15 +28,10 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p) { return (sockptr_t) { .kernel = p, .is_kernel = true }; } -#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ -static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p, - size_t size) +static inline sockptr_t USER_SOCKPTR(void __user *p) { - if (!access_ok(p, size)) - return -EFAULT; - *sp = (sockptr_t) { .user = p }; - return 0; + return (sockptr_t) { .user = p }; } static inline bool sockptr_is_null(sockptr_t sockptr) diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 545b2640f019..1b34cb9a7708 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -57,18 +57,16 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, return bpfilter_mbox_request(sk, optname, optval, optlen, true); } -int bpfilter_ip_get_sockopt(struct sock *sk, int optname, - char __user *user_optval, int __user *optlen) +int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, + int __user *optlen) { - sockptr_t optval; - int err, len; + int len; if (get_user(len, optlen)) return -EFAULT; - err = init_user_sockptr(&optval, user_optval, len); - if (err) - return err; - return bpfilter_mbox_request(sk, optname, optval, len, false); + + return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len, + false); } static int __init bpfilter_sockopt_init(void) diff --git a/net/socket.c b/net/socket.c index f4d5998bdcba..dbbe8ea7d395 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2095,7 +2095,7 @@ static bool sock_use_custom_sol_socket(const struct socket *sock) int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, int optlen) { - sockptr_t optval; + sockptr_t optval = USER_SOCKPTR(user_optval); char *kernel_optval = NULL; int err, fput_needed; struct socket *sock; @@ -2103,10 +2103,6 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, if (optlen < 0) return -EINVAL; - err = init_user_sockptr(&optval, user_optval, optlen); - if (err) - return err; - sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; -- cgit v1.2.3 From f19008e676366c44e9241af57f331b6c6edf9552 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Mon, 10 Aug 2020 13:38:39 -0400 Subject: tcp: correct read of TFO keys on big endian systems When TFO keys are read back on big endian systems either via the global sysctl interface or via getsockopt() using TCP_FASTOPEN_KEY, the values don't match what was written. For example, on s390x: # echo "1-2-3-4" > /proc/sys/net/ipv4/tcp_fastopen_key # cat /proc/sys/net/ipv4/tcp_fastopen_key 02000000-01000000-04000000-03000000 Instead of: # cat /proc/sys/net/ipv4/tcp_fastopen_key 00000001-00000002-00000003-00000004 Fix this by converting to the correct endianness on read. This was reported by Colin Ian King when running the 'tcp_fastopen_backup_key' net selftest on s390x, which depends on the read value matching what was written. I've confirmed that the test now passes on big and little endian systems. Signed-off-by: Jason Baron Fixes: 438ac88009bc ("net: fastopen: robustness and endianness fixes for SipHash") Cc: Ard Biesheuvel Cc: Eric Dumazet Reported-and-tested-by: Colin Ian King Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ net/ipv4/sysctl_net_ipv4.c | 16 ++++------------ net/ipv4/tcp.c | 16 ++++------------ net/ipv4/tcp_fastopen.c | 23 +++++++++++++++++++++++ 4 files changed, 33 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index dbf5c791a6eb..eab6c7510b5b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1672,6 +1672,8 @@ void tcp_fastopen_destroy_cipher(struct sock *sk); void tcp_fastopen_ctx_destroy(struct net *net); int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, void *primary_key, void *backup_key); +int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, + u64 *key); void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct request_sock *req, diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 5653e3b011bf..54023a46db04 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -301,24 +301,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH * 2 * TCP_FASTOPEN_KEY_MAX) + (TCP_FASTOPEN_KEY_MAX * 5)) }; - struct tcp_fastopen_context *ctx; - u32 user_key[TCP_FASTOPEN_KEY_MAX * 4]; - __le32 key[TCP_FASTOPEN_KEY_MAX * 4]; + u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)]; + __le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)]; char *backup_data; - int ret, i = 0, off = 0, n_keys = 0; + int ret, i = 0, off = 0, n_keys; tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); if (!tbl.data) return -ENOMEM; - rcu_read_lock(); - ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); - if (ctx) { - n_keys = tcp_fastopen_context_len(ctx); - memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys); - } - rcu_read_unlock(); - + n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key); if (!n_keys) { memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH); n_keys = 1; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c06d2bfd2ec4..31f3b858db81 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3685,22 +3685,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level, return 0; case TCP_FASTOPEN_KEY: { - __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; - struct tcp_fastopen_context *ctx; - unsigned int key_len = 0; + u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; + unsigned int key_len; if (get_user(len, optlen)) return -EFAULT; - rcu_read_lock(); - ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); - if (ctx) { - key_len = tcp_fastopen_context_len(ctx) * - TCP_FASTOPEN_KEY_LENGTH; - memcpy(&key[0], &ctx->key[0], key_len); - } - rcu_read_unlock(); - + key_len = tcp_fastopen_get_cipher(net, icsk, key) * + TCP_FASTOPEN_KEY_LENGTH; len = min_t(unsigned int, len, key_len); if (put_user(len, optlen)) return -EFAULT; diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 19ad9586c720..1bb85821f1e6 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -108,6 +108,29 @@ out: return err; } +int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, + u64 *key) +{ + struct tcp_fastopen_context *ctx; + int n_keys = 0, i; + + rcu_read_lock(); + if (icsk) + ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); + else + ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); + if (ctx) { + n_keys = tcp_fastopen_context_len(ctx); + for (i = 0; i < n_keys; i++) { + put_unaligned_le64(ctx->key[i].key[0], key + (i * 2)); + put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1); + } + } + rcu_read_unlock(); + + return n_keys; +} + static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req, struct sk_buff *syn, const siphash_key_t *key, -- cgit v1.2.3 From 444da3f52407d74c9aa12187ac6b01f76ee47d62 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 10 Aug 2020 11:21:11 -0700 Subject: bitfield.h: don't compile-time validate _val in FIELD_FIT When ur_load_imm_any() is inlined into jeq_imm(), it's possible for the compiler to deduce a case where _val can only have the value of -1 at compile time. Specifically, /* struct bpf_insn: _s32 imm */ u64 imm = insn->imm; /* sign extend */ if (imm >> 32) { /* non-zero only if insn->imm is negative */ /* inlined from ur_load_imm_any */ u32 __imm = imm >> 32; /* therefore, always 0xffffffff */ if (__builtin_constant_p(__imm) && __imm > 255) compiletime_assert_XXX() This can result in tripping a BUILD_BUG_ON() in __BF_FIELD_CHECK() that checks that a given value is representable in one byte (interpreted as unsigned). FIELD_FIT() should return true or false at runtime for whether a value can fit for not. Don't break the build over a value that's too large for the mask. We'd prefer to keep the inlining and compiler optimizations though we know this case will always return false. Cc: stable@vger.kernel.org Fixes: 1697599ee301a ("bitfield.h: add FIELD_FIT() helper") Link: https://lore.kernel.org/kernel-hardening/CAK7LNASvb0UDJ0U5wkYYRzTAdnEs64HjXpEUL7d=V0CXiAXcNw@mail.gmail.com/ Reported-by: Masahiro Yamada Debugged-by: Sami Tolvanen Signed-off-by: Jakub Kicinski Signed-off-by: Nick Desaulniers Signed-off-by: David S. Miller --- include/linux/bitfield.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 48ea093ff04c..4e035aca6f7e 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -77,7 +77,7 @@ */ #define FIELD_FIT(_mask, _val) \ ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) -- cgit v1.2.3 From 60e5da629a7c29e0987f6f02ec20b14c4ee0645e Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sun, 9 Aug 2020 07:30:06 +0200 Subject: sections.h: dereference_function_descriptor() returns void pointer The function dereference_function_descriptor() takes on hppa64, ppc64 and ia64 a pointer to a function descriptor and returns a (void) pointer to the dereferenced function. To make cross-arch coding easier, on all other architectures the dereference_function_descriptor() macro should return a void pointer too. Signed-off-by: Helge Deller --- include/asm-generic/sections.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 66397ed10acb..d16302d3eb59 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -60,8 +60,8 @@ extern __visible const void __nosave_begin, __nosave_end; /* Function descriptor handling (if any). Override in asm/sections.h */ #ifndef dereference_function_descriptor -#define dereference_function_descriptor(p) (p) -#define dereference_kernel_function_descriptor(p) (p) +#define dereference_function_descriptor(p) ((void *)(p)) +#define dereference_kernel_function_descriptor(p) ((void *)(p)) #endif /* random extra sections (if any). Override -- cgit v1.2.3 From b9d8cf2eb3ceecdee3434b87763492aee9e28845 Mon Sep 17 00:00:00 2001 From: Michael Kelley Date: Sun, 9 Aug 2020 18:29:51 -0700 Subject: x86/hyperv: Make hv_setup_sched_clock inline Make hv_setup_sched_clock inline so the reference to pv_ops works correctly with objtool updates to detect noinstr violations. See https://lore.kernel.org/patchwork/patch/1283635/ Signed-off-by: Michael Kelley Acked-by: Peter Zijlstra (Intel) Acked-by: Thomas Gleixner Link: https://lore.kernel.org/r/1597022991-24088-1-git-send-email-mikelley@microsoft.com Signed-off-by: Wei Liu --- arch/x86/include/asm/mshyperv.h | 12 ++++++++++++ arch/x86/kernel/cpu/mshyperv.c | 7 ------- include/asm-generic/mshyperv.h | 1 - 3 files changed, 12 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 60b944dd2df1..4f77b8f22e54 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -8,6 +8,7 @@ #include #include #include +#include typedef int (*hyperv_fill_flush_list_func)( struct hv_guest_mapping_flush_list *flush, @@ -54,6 +55,17 @@ typedef int (*hyperv_fill_flush_list_func)( vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK); #define hv_get_raw_timer() rdtsc_ordered() +/* + * Reference to pv_ops must be inline so objtool + * detection of noinstr violations can work correctly. + */ +static __always_inline void hv_setup_sched_clock(void *sched_clock) +{ +#ifdef CONFIG_PARAVIRT + pv_ops.time.sched_clock = sched_clock; +#endif +} + void hyperv_vector_handler(struct pt_regs *regs); static inline void hv_enable_stimer0_percpu_irq(int irq) {} diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index af94f05a5c66..31125448b174 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -361,13 +361,6 @@ static void __init ms_hyperv_init_platform(void) #endif } -void hv_setup_sched_clock(void *sched_clock) -{ -#ifdef CONFIG_PARAVIRT - pv_ops.time.sched_clock = sched_clock; -#endif -} - const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft Hyper-V", .detect = ms_hyperv_platform, diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 1c4fd950f091..c5edc5e08b94 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -168,7 +168,6 @@ void hyperv_report_panic_msg(phys_addr_t pa, size_t size); bool hv_is_hyperv_initialized(void); bool hv_is_hibernation_supported(void); void hyperv_cleanup(void); -void hv_setup_sched_clock(void *sched_clock); #else /* CONFIG_HYPERV */ static inline bool hv_is_hyperv_initialized(void) { return false; } static inline bool hv_is_hibernation_supported(void) { return false; } -- cgit v1.2.3 From e5b92773287c3eb3108a44785986a6c997866df8 Mon Sep 17 00:00:00 2001 From: Oleg Vasilev Date: Fri, 24 Apr 2020 18:20:51 +0530 Subject: drm: report dp downstream port type as a subconnector property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, downstream port type is only reported in debugfs. This information should be considered important since it reflects the actual physical connector type. Some userspace (e.g. window compositors) may want to show this info to a user. The 'subconnector' property is already utilized for DVI-I and TV-out for reporting connector subtype. The initial motivation for this feature came from i2c test [1]. It is supposed to be skipped on VGA connectors, but it cannot detect VGA over DP and fails instead. v2: - Ville: utilized drm_dp_is_branch() - Ville: implement DP 1.0 downstream type info - Replaced create_dp_properties with add_dp_subconnector_property - Added dp_set_subconnector_property helper v4: - Ville: add DP1.0 best assumption about subconnector - Ville: assume DVI is DVI-D - Ville: reuse Writeback enum value for Virtual subconnector - Renamed #defines: HDMI -> HDMIA, DP -> DisplayPort v5: rebase v6: - Jani Nikula: renamed a function name - Jani Nikula: addressed the issues with documentation [1]: https://bugs.freedesktop.org/show_bug.cgi?id=104097 Cc: Ville Syrjälä Cc: intel-gfx@lists.freedesktop.org Signed-off-by: Jeevan B Signed-off-by: Oleg Vasilev Reviewed-by: Emil Velikov Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1587732655-17544-1-git-send-email-jeevan.b@intel.com --- drivers/gpu/drm/drm_connector.c | 49 ++++++++++++++++++++++++++-- drivers/gpu/drm/drm_dp_helper.c | 71 +++++++++++++++++++++++++++++++++++++++++ include/drm/drm_connector.h | 3 ++ include/drm/drm_dp_helper.h | 8 +++++ include/drm/drm_mode_config.h | 6 ++++ include/uapi/drm/drm_mode.h | 21 +++++++----- 6 files changed, 148 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 00e40a26a800..3d48ad1c3682 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -850,7 +850,7 @@ static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = { DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = { - { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I, TV-out and DP */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ }; @@ -867,7 +867,7 @@ static const struct drm_prop_enum_list drm_tv_select_enum_list[] = { DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { - { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I, TV-out and DP */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ @@ -876,6 +876,19 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) +static const struct drm_prop_enum_list drm_dp_subconnector_enum_list[] = { + { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I, TV-out and DP */ + { DRM_MODE_SUBCONNECTOR_VGA, "VGA" }, /* DP */ + { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DP */ + { DRM_MODE_SUBCONNECTOR_HDMIA, "HDMI" }, /* DP */ + { DRM_MODE_SUBCONNECTOR_DisplayPort, "DP" }, /* DP */ + { DRM_MODE_SUBCONNECTOR_Wireless, "Wireless" }, /* DP */ + { DRM_MODE_SUBCONNECTOR_Native, "Native" }, /* DP */ +}; + +DRM_ENUM_NAME_FN(drm_get_dp_subconnector_name, + drm_dp_subconnector_enum_list) + static const struct drm_prop_enum_list hdmi_colorspaces[] = { /* For Default case, driver will set the colorspace */ { DRM_MODE_COLORIMETRY_DEFAULT, "Default" }, @@ -1217,6 +1230,14 @@ static const struct drm_prop_enum_list dp_colorspaces[] = { * can also expose this property to external outputs, in which case they * must support "None", which should be the default (since external screens * have a built-in scaler). + * + * subconnector: + * This property is used by DVI-I, TVout and DisplayPort to indicate different + * connector subtypes. Enum values more or less match with those from main + * connector types. + * For DVI-I and TVout there is also a matching property "select subconnector" + * allowing to switch between signal types. + * DP subconnector corresponds to a downstream port. */ int drm_connector_create_standard_properties(struct drm_device *dev) @@ -1305,6 +1326,30 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev) } EXPORT_SYMBOL(drm_mode_create_dvi_i_properties); +/** + * drm_connector_attach_dp_subconnector_property - create subconnector property for DP + * @connector: drm_connector to attach property + * + * Called by a driver when DP connector is created. + */ +void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector) +{ + struct drm_mode_config *mode_config = &connector->dev->mode_config; + + if (!mode_config->dp_subconnector_property) + mode_config->dp_subconnector_property = + drm_property_create_enum(connector->dev, + DRM_MODE_PROP_IMMUTABLE, + "subconnector", + drm_dp_subconnector_enum_list, + ARRAY_SIZE(drm_dp_subconnector_enum_list)); + + drm_object_attach_property(&connector->base, + mode_config->dp_subconnector_property, + DRM_MODE_SUBCONNECTOR_Unknown); +} +EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property); + /** * DOC: HDMI connector properties * diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index a3c82e726057..4c21cf69dad5 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -597,6 +597,77 @@ void drm_dp_downstream_debug(struct seq_file *m, } EXPORT_SYMBOL(drm_dp_downstream_debug); +/** + * drm_dp_subconnector_type() - get DP branch device type + * + */ +enum drm_mode_subconnector +drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) +{ + int type; + if (!drm_dp_is_branch(dpcd)) + return DRM_MODE_SUBCONNECTOR_Native; + /* DP 1.0 approach */ + if (dpcd[DP_DPCD_REV] == DP_DPCD_REV_10) { + type = dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_TYPE_MASK; + + switch (type) { + case DP_DWN_STRM_PORT_TYPE_TMDS: + /* Can be HDMI or DVI-D, DVI-D is a safer option */ + return DRM_MODE_SUBCONNECTOR_DVID; + case DP_DWN_STRM_PORT_TYPE_ANALOG: + /* Can be VGA or DVI-A, VGA is more popular */ + return DRM_MODE_SUBCONNECTOR_VGA; + case DP_DWN_STRM_PORT_TYPE_DP: + return DRM_MODE_SUBCONNECTOR_DisplayPort; + case DP_DWN_STRM_PORT_TYPE_OTHER: + default: + return DRM_MODE_SUBCONNECTOR_Unknown; + } + } + type = port_cap[0] & DP_DS_PORT_TYPE_MASK; + + switch (type) { + case DP_DS_PORT_TYPE_DP: + case DP_DS_PORT_TYPE_DP_DUALMODE: + return DRM_MODE_SUBCONNECTOR_DisplayPort; + case DP_DS_PORT_TYPE_VGA: + return DRM_MODE_SUBCONNECTOR_VGA; + case DP_DS_PORT_TYPE_DVI: + return DRM_MODE_SUBCONNECTOR_DVID; + case DP_DS_PORT_TYPE_HDMI: + return DRM_MODE_SUBCONNECTOR_HDMIA; + case DP_DS_PORT_TYPE_WIRELESS: + return DRM_MODE_SUBCONNECTOR_Wireless; + case DP_DS_PORT_TYPE_NON_EDID: + default: + return DRM_MODE_SUBCONNECTOR_Unknown; + } +} +EXPORT_SYMBOL(drm_dp_subconnector_type); + +/** + * drm_mode_set_dp_subconnector_property - set subconnector for DP connector + * + * Called by a driver on every detect event. + */ +void drm_dp_set_subconnector_property(struct drm_connector *connector, + enum drm_connector_status status, + const u8 *dpcd, + const u8 port_cap[4]) +{ + enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; + + if (status == connector_status_connected) + subconnector = drm_dp_subconnector_type(dpcd, port_cap); + drm_object_property_set_value(&connector->base, + connector->dev->mode_config.dp_subconnector_property, + subconnector); +} +EXPORT_SYMBOL(drm_dp_set_subconnector_property); + /* * I2C-over-AUX implementation */ diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index af145608b5ed..928136556174 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -1604,10 +1604,13 @@ const char *drm_get_dvi_i_subconnector_name(int val); const char *drm_get_dvi_i_select_name(int val); const char *drm_get_tv_subconnector_name(int val); const char *drm_get_tv_select_name(int val); +const char *drm_get_dp_subconnector_name(int val); const char *drm_get_content_protection_name(int val); const char *drm_get_hdcp_content_type_name(int val); int drm_mode_create_dvi_i_properties(struct drm_device *dev); +void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector); + int drm_mode_create_tv_margin_properties(struct drm_device *dev); int drm_mode_create_tv_properties(struct drm_device *dev, unsigned int num_modes, diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index e47dc22ebf50..5c2819924862 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -26,6 +26,7 @@ #include #include #include +#include /* * Unless otherwise noted, all values are from the DP 1.1a spec. Note that @@ -1619,6 +1620,13 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], struct drm_dp_aux *aux); +enum drm_mode_subconnector +drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]); +void drm_dp_set_subconnector_property(struct drm_connector *connector, + enum drm_connector_status status, + const u8 *dpcd, + const u8 port_cap[4]); void drm_dp_remote_aux_init(struct drm_dp_aux *aux); void drm_dp_aux_init(struct drm_dp_aux *aux); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index ffb9852a0638..f768c7cf7de3 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -680,6 +680,12 @@ struct drm_mode_config { */ struct drm_property *dvi_i_select_subconnector_property; + /** + * @dp_subconnector_property: Optional DP property to differentiate + * between different DP downstream port types. + */ + struct drm_property *dp_subconnector_property; + /** * @tv_subconnector_property: Optional TV property to differentiate * between different TV connector types. diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index deea447e5f22..863eda048265 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -332,14 +332,19 @@ struct drm_mode_get_encoder { /* This is for connectors with multiple signal types. */ /* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ enum drm_mode_subconnector { - DRM_MODE_SUBCONNECTOR_Automatic = 0, - DRM_MODE_SUBCONNECTOR_Unknown = 0, - DRM_MODE_SUBCONNECTOR_DVID = 3, - DRM_MODE_SUBCONNECTOR_DVIA = 4, - DRM_MODE_SUBCONNECTOR_Composite = 5, - DRM_MODE_SUBCONNECTOR_SVIDEO = 6, - DRM_MODE_SUBCONNECTOR_Component = 8, - DRM_MODE_SUBCONNECTOR_SCART = 9, + DRM_MODE_SUBCONNECTOR_Automatic = 0, /* DVI-I, TV */ + DRM_MODE_SUBCONNECTOR_Unknown = 0, /* DVI-I, TV, DP */ + DRM_MODE_SUBCONNECTOR_VGA = 1, /* DP */ + DRM_MODE_SUBCONNECTOR_DVID = 3, /* DVI-I DP */ + DRM_MODE_SUBCONNECTOR_DVIA = 4, /* DVI-I */ + DRM_MODE_SUBCONNECTOR_Composite = 5, /* TV */ + DRM_MODE_SUBCONNECTOR_SVIDEO = 6, /* TV */ + DRM_MODE_SUBCONNECTOR_Component = 8, /* TV */ + DRM_MODE_SUBCONNECTOR_SCART = 9, /* TV */ + DRM_MODE_SUBCONNECTOR_DisplayPort = 10, /* DP */ + DRM_MODE_SUBCONNECTOR_HDMIA = 11, /* DP */ + DRM_MODE_SUBCONNECTOR_Native = 15, /* DP */ + DRM_MODE_SUBCONNECTOR_Wireless = 18, /* DP */ }; #define DRM_MODE_CONNECTOR_Unknown 0 -- cgit v1.2.3 From efa8480a831673bb52400df9dbe5da0aacda97bf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 10 Aug 2020 18:44:24 -0600 Subject: fs: RWF_NOWAIT should imply IOCB_NOIO With the change allowing read-ahead for IOCB_NOWAIT, we changed the RWF_NOWAIT semantics of only doing cached reads. Since we know have IOCB_NOIO to manage that specific side of it, just make RWF_NOWAIT imply IOCB_NOIO as well to restore the previous behavior. Fixes: 2e85abf053b9 ("mm: allow read-ahead with IOCB_NOWAIT set") Reported-by: Dave Chinner Reviewed-by: Dave Chinner Signed-off-by: Jens Axboe --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index bd7ec3eaeed0..f1cca4bfdd7b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3293,7 +3293,7 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) if (flags & RWF_NOWAIT) { if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) return -EOPNOTSUPP; - kiocb_flags |= IOCB_NOWAIT; + kiocb_flags |= IOCB_NOWAIT | IOCB_NOIO; } if (flags & RWF_HIPRI) kiocb_flags |= IOCB_HIPRI; -- cgit v1.2.3 From f6ebbcf08f37b01827c51309a188e85165e498e7 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 6 Aug 2020 14:03:55 +0200 Subject: cpufreq: intel_pstate: Implement passive mode with HWP enabled Allow intel_pstate to work in the passive mode with HWP enabled and make it set the HWP minimum performance limit (HWP floor) to the P-state value given by the target frequency supplied by the cpufreq governor, so as to prevent the HWP algorithm and the CPU scheduler from working against each other, at least when the schedutil governor is in use, and update the intel_pstate documentation accordingly. Among other things, this allows utilization clamps to be taken into account, at least to a certain extent, when intel_pstate is in use and makes it more likely that sufficient capacity for deadline tasks will be provided. After this change, the resulting behavior of an HWP system with intel_pstate in the passive mode should be close to the behavior of the analogous non-HWP system with intel_pstate in the passive mode, except that the HWP algorithm is generally allowed to make the CPU run at a frequency above the floor P-state set by intel_pstate in the entire available range of P-states, while without HWP a CPU can run in a P-state above the requested one if the latter falls into the range of turbo P-states (referred to as the turbo range) or if the P-states of all CPUs in one package are coordinated with each other at the hardware level. [Note that in principle the HWP floor may not be taken into account by the processor if it falls into the turbo range, in which case the processor has a license to choose any P-state, either below or above the HWP floor, just like a non-HWP processor in the case when the target P-state falls into the turbo range.] With this change applied, intel_pstate in the passive mode assumes complete control over the HWP request MSR and concurrent changes of that MSR (eg. via the direct MSR access interface) are overridden by it. Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada Reviewed-by: Francisco Jerez --- Documentation/admin-guide/pm/intel_pstate.rst | 89 +++++----- drivers/cpufreq/cpufreq.c | 6 +- drivers/cpufreq/intel_pstate.c | 245 +++++++++++++++++++------- include/linux/cpufreq.h | 2 + 4 files changed, 229 insertions(+), 113 deletions(-) (limited to 'include') diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index 40d481cca368..f85767e09911 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst @@ -54,10 +54,13 @@ registered (see `below `_). Operation Modes =============== -``intel_pstate`` can operate in three different modes: in the active mode with -or without hardware-managed P-states support and in the passive mode. Which of -them will be in effect depends on what kernel command line options are used and -on the capabilities of the processor. +``intel_pstate`` can operate in two different modes, active or passive. In the +active mode, it uses its own internal performance scaling governor algorithm or +allows the hardware to do preformance scaling by itself, while in the passive +mode it responds to requests made by a generic ``CPUFreq`` governor implementing +a certain performance scaling algorithm. Which of them will be in effect +depends on what kernel command line options are used and on the capabilities of +the processor. Active Mode ----------- @@ -194,10 +197,11 @@ This is the default operation mode of ``intel_pstate`` for processors without hardware-managed P-states (HWP) support. It is always used if the ``intel_pstate=passive`` argument is passed to the kernel in the command line regardless of whether or not the given processor supports HWP. [Note that the -``intel_pstate=no_hwp`` setting implies ``intel_pstate=passive`` if it is used -without ``intel_pstate=active``.] Like in the active mode without HWP support, -in this mode ``intel_pstate`` may refuse to work with processors that are not -recognized by it. +``intel_pstate=no_hwp`` setting causes the driver to start in the passive mode +if it is not combined with ``intel_pstate=active``.] Like in the active mode +without HWP support, in this mode ``intel_pstate`` may refuse to work with +processors that are not recognized by it if HWP is prevented from being enabled +through the kernel command line. If the driver works in this mode, the ``scaling_driver`` policy attribute in ``sysfs`` for all ``CPUFreq`` policies contains the string "intel_cpufreq". @@ -318,10 +322,9 @@ manuals need to be consulted to get to it too. For this reason, there is a list of supported processors in ``intel_pstate`` and the driver initialization will fail if the detected processor is not in that -list, unless it supports the `HWP feature `_. [The interface to -obtain all of the information listed above is the same for all of the processors -supporting the HWP feature, which is why they all are supported by -``intel_pstate``.] +list, unless it supports the HWP feature. [The interface to obtain all of the +information listed above is the same for all of the processors supporting the +HWP feature, which is why ``intel_pstate`` works with all of them.] User Space Interface in ``sysfs`` @@ -425,22 +428,16 @@ argument is passed to the kernel in the command line. as well as the per-policy ones) are then reset to their default values, possibly depending on the target operation mode.] - That only is supported in some configurations, though (for example, if - the `HWP feature is enabled in the processor `_, - the operation mode of the driver cannot be changed), and if it is not - supported in the current configuration, writes to this attribute will - fail with an appropriate error. - ``energy_efficiency`` - This attribute is only present on platforms, which have CPUs matching - Kaby Lake or Coffee Lake desktop CPU model. By default - energy efficiency optimizations are disabled on these CPU models in HWP - mode by this driver. Enabling energy efficiency may limit maximum - operating frequency in both HWP and non HWP mode. In non HWP mode, - optimizations are done only in the turbo frequency range. In HWP mode, - optimizations are done in the entire frequency range. Setting this - attribute to "1" enables energy efficiency optimizations and setting - to "0" disables energy efficiency optimizations. + This attribute is only present on platforms with CPUs matching the Kaby + Lake or Coffee Lake desktop CPU model. By default, energy-efficiency + optimizations are disabled on these CPU models if HWP is enabled. + Enabling energy-efficiency optimizations may limit maximum operating + frequency with or without the HWP feature. With HWP enabled, the + optimizations are done only in the turbo frequency range. Without it, + they are done in the entire available frequency range. Setting this + attribute to "1" enables the energy-efficiency optimizations and setting + to "0" disables them. Interpretation of Policy Attributes ----------------------------------- @@ -484,8 +481,8 @@ Next, the following policy attributes have special meaning if policy for the time interval between the last two invocations of the driver's utilization update callback by the CPU scheduler for that CPU. -One more policy attribute is present if the `HWP feature is enabled in the -processor `_: +One more policy attribute is present if the HWP feature is enabled in the +processor: ``base_frequency`` Shows the base frequency of the CPU. Any frequency above this will be @@ -526,11 +523,11 @@ on the following rules, regardless of the current operation mode of the driver: 3. The global and per-policy limits can be set independently. -If the `HWP feature is enabled in the processor `_, the -resulting effective values are written into its registers whenever the limits -change in order to request its internal P-state selection logic to always set -P-states within these limits. Otherwise, the limits are taken into account by -scaling governors (in the `passive mode `_) and by the driver +In the `active mode with the HWP feature enabled `_, the +resulting effective values are written into hardware registers whenever the +limits change in order to request its internal P-state selection logic to always +set P-states within these limits. Otherwise, the limits are taken into account +by scaling governors (in the `passive mode `_) and by the driver every time before setting a new P-state for a CPU. Additionally, if the ``intel_pstate=per_cpu_perf_limits`` command line argument @@ -541,12 +538,11 @@ at all and the only way to set the limits is by using the policy attributes. Energy vs Performance Hints --------------------------- -If ``intel_pstate`` works in the `active mode with the HWP feature enabled -`_ in the processor, additional attributes are present -in every ``CPUFreq`` policy directory in ``sysfs``. They are intended to allow -user space to help ``intel_pstate`` to adjust the processor's internal P-state -selection logic by focusing it on performance or on energy-efficiency, or -somewhere between the two extremes: +If the hardware-managed P-states (HWP) is enabled in the processor, additional +attributes, intended to allow user space to help ``intel_pstate`` to adjust the +processor's internal P-state selection logic by focusing it on performance or on +energy-efficiency, or somewhere between the two extremes, are present in every +``CPUFreq`` policy directory in ``sysfs``. They are : ``energy_performance_preference`` Current value of the energy vs performance hint for the given policy @@ -650,12 +646,14 @@ of them have to be prepended with the ``intel_pstate=`` prefix. Do not register ``intel_pstate`` as the scaling driver even if the processor is supported by it. +``active`` + Register ``intel_pstate`` in the `active mode `_ to start + with. + ``passive`` Register ``intel_pstate`` in the `passive mode `_ to start with. - This option implies the ``no_hwp`` one described below. - ``force`` Register ``intel_pstate`` as the scaling driver instead of ``acpi-cpufreq`` even if the latter is preferred on the given system. @@ -670,13 +668,12 @@ of them have to be prepended with the ``intel_pstate=`` prefix. driver is used instead of ``acpi-cpufreq``. ``no_hwp`` - Do not enable the `hardware-managed P-states (HWP) feature - `_ even if it is supported by the processor. + Do not enable the hardware-managed P-states (HWP) feature even if it is + supported by the processor. ``hwp_only`` Register ``intel_pstate`` as the scaling driver only if the - `hardware-managed P-states (HWP) feature `_ is - supported by the processor. + hardware-managed P-states (HWP) feature is supported by the processor. ``support_acpi_ppc`` Take ACPI ``_PPC`` performance limits into account. diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index afad06b91c77..02ab56b2a0d8 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -73,8 +73,6 @@ static inline bool has_target(void) static unsigned int __cpufreq_get(struct cpufreq_policy *policy); static int cpufreq_init_governor(struct cpufreq_policy *policy); static void cpufreq_exit_governor(struct cpufreq_policy *policy); -static int cpufreq_start_governor(struct cpufreq_policy *policy); -static void cpufreq_stop_governor(struct cpufreq_policy *policy); static void cpufreq_governor_limits(struct cpufreq_policy *policy); static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_governor *new_gov, @@ -2266,7 +2264,7 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy) module_put(policy->governor->owner); } -static int cpufreq_start_governor(struct cpufreq_policy *policy) +int cpufreq_start_governor(struct cpufreq_policy *policy) { int ret; @@ -2293,7 +2291,7 @@ static int cpufreq_start_governor(struct cpufreq_policy *policy) return 0; } -static void cpufreq_stop_governor(struct cpufreq_policy *policy) +void cpufreq_stop_governor(struct cpufreq_policy *policy) { if (cpufreq_suspended || !policy->governor) return; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index fc459c9c00ff..e0220a6fbc69 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -36,6 +36,7 @@ #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 +#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 #ifdef CONFIG_ACPI @@ -220,6 +221,7 @@ struct global_params { * preference/bias * @epp_saved: Saved EPP/EPB during system suspend or CPU offline * operation + * @epp_cached Cached HWP energy-performance preference value * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set @@ -257,6 +259,7 @@ struct cpudata { s16 epp_policy; s16 epp_default; s16 epp_saved; + s16 epp_cached; u64 hwp_req_cached; u64 hwp_cap_cached; u64 last_io_update; @@ -639,6 +642,26 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw return index; } +static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) +{ + /* + * Use the cached HWP Request MSR value, because in the active mode the + * register itself may be updated by intel_pstate_hwp_boost_up() or + * intel_pstate_hwp_boost_down() at any time. + */ + u64 value = READ_ONCE(cpu->hwp_req_cached); + + value &= ~GENMASK_ULL(31, 24); + value |= (u64)epp << 24; + /* + * The only other updater of hwp_req_cached in the active mode, + * intel_pstate_hwp_set(), is called under the same lock as this + * function, so it cannot run in parallel with the update below. + */ + WRITE_ONCE(cpu->hwp_req_cached, value); + return wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); +} + static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, int pref_index, bool use_raw, u32 raw_epp) @@ -650,28 +673,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, epp = cpu_data->epp_default; if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { - /* - * Use the cached HWP Request MSR value, because the register - * itself may be updated by intel_pstate_hwp_boost_up() or - * intel_pstate_hwp_boost_down() at any time. - */ - u64 value = READ_ONCE(cpu_data->hwp_req_cached); - - value &= ~GENMASK_ULL(31, 24); - if (use_raw) epp = raw_epp; else if (epp == -EINVAL) epp = epp_values[pref_index - 1]; - value |= (u64)epp << 24; - /* - * The only other updater of hwp_req_cached in the active mode, - * intel_pstate_hwp_set(), is called under the same lock as this - * function, so it cannot run in parallel with the update below. - */ - WRITE_ONCE(cpu_data->hwp_req_cached, value); - ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); + ret = intel_pstate_set_epp(cpu_data, epp); } else { if (epp == -EINVAL) epp = (pref_index - 1) << 2; @@ -697,10 +704,12 @@ static ssize_t show_energy_performance_available_preferences( cpufreq_freq_attr_ro(energy_performance_available_preferences); +static struct cpufreq_driver intel_pstate; + static ssize_t store_energy_performance_preference( struct cpufreq_policy *policy, const char *buf, size_t count) { - struct cpudata *cpu_data = all_cpu_data[policy->cpu]; + struct cpudata *cpu = all_cpu_data[policy->cpu]; char str_preference[21]; bool raw = false; ssize_t ret; @@ -725,15 +734,44 @@ static ssize_t store_energy_performance_preference( raw = true; } + /* + * This function runs with the policy R/W semaphore held, which + * guarantees that the driver pointer will not change while it is + * running. + */ + if (!intel_pstate_driver) + return -EAGAIN; + mutex_lock(&intel_pstate_limits_lock); - ret = intel_pstate_set_energy_pref_index(cpu_data, ret, raw, epp); - if (!ret) - ret = count; + if (intel_pstate_driver == &intel_pstate) { + ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); + } else { + /* + * In the passive mode the governor needs to be stopped on the + * target CPU before the EPP update and restarted after it, + * which is super-heavy-weight, so make sure it is worth doing + * upfront. + */ + if (!raw) + epp = ret ? epp_values[ret - 1] : cpu->epp_default; + + if (cpu->epp_cached != epp) { + int err; + + cpufreq_stop_governor(policy); + ret = intel_pstate_set_epp(cpu, epp); + err = cpufreq_start_governor(policy); + if (!ret) { + cpu->epp_cached = epp; + ret = err; + } + } + } mutex_unlock(&intel_pstate_limits_lock); - return ret; + return ret ?: count; } static ssize_t show_energy_performance_preference( @@ -1145,8 +1183,6 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, return count; } -static struct cpufreq_driver intel_pstate; - static void update_qos_request(enum freq_qos_req_type type) { int max_state, turbo_max, freq, i, perf_pct; @@ -1330,9 +1366,10 @@ static const struct attribute_group intel_pstate_attr_group = { static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; +static struct kobject *intel_pstate_kobject; + static void __init intel_pstate_sysfs_expose_params(void) { - struct kobject *intel_pstate_kobject; int rc; intel_pstate_kobject = kobject_create_and_add("intel_pstate", @@ -1357,17 +1394,31 @@ static void __init intel_pstate_sysfs_expose_params(void) rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); WARN_ON(rc); - if (hwp_active) { - rc = sysfs_create_file(intel_pstate_kobject, - &hwp_dynamic_boost.attr); - WARN_ON(rc); - } - if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); WARN_ON(rc); } } + +static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) +{ + int rc; + + if (!hwp_active) + return; + + rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); + WARN_ON_ONCE(rc); +} + +static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) +{ + if (!hwp_active) + return; + + sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); +} + /************************** sysfs end ************************/ static void intel_pstate_hwp_enable(struct cpudata *cpudata) @@ -2247,7 +2298,10 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) { - intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); + if (hwp_active) + intel_pstate_hwp_force_min_perf(policy->cpu); + else + intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); } static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) @@ -2255,12 +2309,10 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) pr_debug("CPU %d exiting\n", policy->cpu); intel_pstate_clear_update_util_hook(policy->cpu); - if (hwp_active) { + if (hwp_active) intel_pstate_hwp_save_state(policy); - intel_pstate_hwp_force_min_perf(policy->cpu); - } else { - intel_cpufreq_stop_cpu(policy); - } + + intel_cpufreq_stop_cpu(policy); } static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) @@ -2390,13 +2442,71 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in fp_toint(cpu->iowait_boost * 100)); } +static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate, + bool fast_switch) +{ + u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; + + value &= ~HWP_MIN_PERF(~0L); + value |= HWP_MIN_PERF(target_pstate); + + /* + * The entire MSR needs to be updated in order to update the HWP min + * field in it, so opportunistically update the max too if needed. + */ + value &= ~HWP_MAX_PERF(~0L); + value |= HWP_MAX_PERF(cpu->max_perf_ratio); + + if (value == prev) + return; + + WRITE_ONCE(cpu->hwp_req_cached, value); + if (fast_switch) + wrmsrl(MSR_HWP_REQUEST, value); + else + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); +} + +static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu, + u32 target_pstate, bool fast_switch) +{ + if (fast_switch) + wrmsrl(MSR_IA32_PERF_CTL, + pstate_funcs.get_val(cpu, target_pstate)); + else + wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + pstate_funcs.get_val(cpu, target_pstate)); +} + +static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate, + bool fast_switch) +{ + int old_pstate = cpu->pstate.current_pstate; + + target_pstate = intel_pstate_prepare_request(cpu, target_pstate); + if (target_pstate != old_pstate) { + cpu->pstate.current_pstate = target_pstate; + if (hwp_active) + intel_cpufreq_adjust_hwp(cpu, target_pstate, + fast_switch); + else + intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, + fast_switch); + } + + intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : + INTEL_PSTATE_TRACE_TARGET, old_pstate); + + return target_pstate; +} + static int intel_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct cpudata *cpu = all_cpu_data[policy->cpu]; struct cpufreq_freqs freqs; - int target_pstate, old_pstate; + int target_pstate; update_turbo_state(); @@ -2404,6 +2514,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); + switch (relation) { case CPUFREQ_RELATION_L: target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); @@ -2415,15 +2526,11 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); break; } - target_pstate = intel_pstate_prepare_request(cpu, target_pstate); - old_pstate = cpu->pstate.current_pstate; - if (target_pstate != cpu->pstate.current_pstate) { - cpu->pstate.current_pstate = target_pstate; - wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, - pstate_funcs.get_val(cpu, target_pstate)); - } + + target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false); + freqs.new = target_pstate * cpu->pstate.scaling; - intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate); + cpufreq_freq_transition_end(policy, &freqs, false); return 0; @@ -2433,15 +2540,14 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpudata *cpu = all_cpu_data[policy->cpu]; - int target_pstate, old_pstate; + int target_pstate; update_turbo_state(); target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); - target_pstate = intel_pstate_prepare_request(cpu, target_pstate); - old_pstate = cpu->pstate.current_pstate; - intel_pstate_update_pstate(cpu, target_pstate); - intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); + + target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true); + return target_pstate * cpu->pstate.scaling; } @@ -2461,7 +2567,6 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) return ret; policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; - policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; /* This reflects the intel_pstate_get_cpu_pstates() setting. */ policy->cur = policy->cpuinfo.min_freq; @@ -2473,10 +2578,18 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - if (hwp_active) + if (hwp_active) { + u64 value; + intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state); - else + policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; + rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); + WRITE_ONCE(cpu->hwp_req_cached, value); + cpu->epp_cached = (value & GENMASK_ULL(31, 24)) >> 24; + } else { turbo_max = cpu->pstate.turbo_pstate; + policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; + } min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); min_freq *= cpu->pstate.scaling; @@ -2553,6 +2666,10 @@ static void intel_pstate_driver_cleanup(void) } } put_online_cpus(); + + if (intel_pstate_driver == &intel_pstate) + intel_pstate_sysfs_hide_hwp_dynamic_boost(); + intel_pstate_driver = NULL; } @@ -2560,6 +2677,9 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) { int ret; + if (driver == &intel_pstate) + intel_pstate_sysfs_expose_hwp_dynamic_boost(); + memset(&global, 0, sizeof(global)); global.max_perf_pct = 100; @@ -2577,9 +2697,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) static int intel_pstate_unregister_driver(void) { - if (hwp_active) - return -EBUSY; - cpufreq_unregister_driver(intel_pstate_driver); intel_pstate_driver_cleanup(); @@ -2835,7 +2952,10 @@ static int __init intel_pstate_init(void) hwp_active++; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; - default_driver = &intel_pstate; + intel_cpufreq.attr = hwp_cpufreq_attrs; + if (!default_driver) + default_driver = &intel_pstate; + goto hwp_cpu_matched; } } else { @@ -2906,14 +3026,13 @@ static int __init intel_pstate_setup(char *str) if (!str) return -EINVAL; - if (!strcmp(str, "disable")) { + if (!strcmp(str, "disable")) no_load = 1; - } else if (!strcmp(str, "active")) { + else if (!strcmp(str, "active")) default_driver = &intel_pstate; - } else if (!strcmp(str, "passive")) { + else if (!strcmp(str, "passive")) default_driver = &intel_cpufreq; - no_hwp = 1; - } + if (!strcmp(str, "no_hwp")) { pr_info("HWP disabled\n"); no_hwp = 1; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 58687a5bf9c8..8f141d4c859c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -576,6 +576,8 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); +int cpufreq_start_governor(struct cpufreq_policy *policy); +void cpufreq_stop_governor(struct cpufreq_policy *policy); #define cpufreq_governor_init(__governor) \ static int __init __governor##_init(void) \ -- cgit v1.2.3 From 62ffc589abb176821662efc4525ee4ac0b9c3894 Mon Sep 17 00:00:00 2001 From: Tim Froidcoeur Date: Tue, 11 Aug 2020 20:33:23 +0200 Subject: net: refactor bind_bucket fastreuse into helper Refactor the fastreuse update code in inet_csk_get_port into a small helper function that can be called from other places. Acked-by: Matthieu Baerts Signed-off-by: Tim Froidcoeur Signed-off-by: David S. Miller --- include/net/inet_connection_sock.h | 4 ++ net/ipv4/inet_connection_sock.c | 97 +++++++++++++++++++++----------------- 2 files changed, 57 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 1e209ce7d1bd..aa8893c68c50 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -304,6 +304,10 @@ void inet_csk_listen_stop(struct sock *sk); void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); +/* update the fast reuse flag when adding a socket */ +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk); + struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); #define TCP_PINGPONG_THRESH 3 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index d1a3913eebe0..b457dd2d6c75 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -296,6 +296,57 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb, ipv6_only_sock(sk), true, false); } +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk) +{ + kuid_t uid = sock_i_uid(sk); + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; + + if (hlist_empty(&tb->owners)) { + tb->fastreuse = reuse; + if (sk->sk_reuseport) { + tb->fastreuseport = FASTREUSEPORT_ANY; + tb->fastuid = uid; + tb->fast_rcv_saddr = sk->sk_rcv_saddr; + tb->fast_ipv6_only = ipv6_only_sock(sk); + tb->fast_sk_family = sk->sk_family; +#if IS_ENABLED(CONFIG_IPV6) + tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; +#endif + } else { + tb->fastreuseport = 0; + } + } else { + if (!reuse) + tb->fastreuse = 0; + if (sk->sk_reuseport) { + /* We didn't match or we don't have fastreuseport set on + * the tb, but we have sk_reuseport set on this socket + * and we know that there are no bind conflicts with + * this socket in this tb, so reset our tb's reuseport + * settings so that any subsequent sockets that match + * our current socket will be put on the fast path. + * + * If we reset we need to set FASTREUSEPORT_STRICT so we + * do extra checking for all subsequent sk_reuseport + * socks. + */ + if (!sk_reuseport_match(tb, sk)) { + tb->fastreuseport = FASTREUSEPORT_STRICT; + tb->fastuid = uid; + tb->fast_rcv_saddr = sk->sk_rcv_saddr; + tb->fast_ipv6_only = ipv6_only_sock(sk); + tb->fast_sk_family = sk->sk_family; +#if IS_ENABLED(CONFIG_IPV6) + tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; +#endif + } + } else { + tb->fastreuseport = 0; + } + } +} + /* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. * We try to allocate an odd port (and leave even ports for connect()) @@ -308,7 +359,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) struct inet_bind_hashbucket *head; struct net *net = sock_net(sk); struct inet_bind_bucket *tb = NULL; - kuid_t uid = sock_i_uid(sk); int l3mdev; l3mdev = inet_sk_bound_l3mdev(sk); @@ -345,49 +395,8 @@ tb_found: goto fail_unlock; } success: - if (hlist_empty(&tb->owners)) { - tb->fastreuse = reuse; - if (sk->sk_reuseport) { - tb->fastreuseport = FASTREUSEPORT_ANY; - tb->fastuid = uid; - tb->fast_rcv_saddr = sk->sk_rcv_saddr; - tb->fast_ipv6_only = ipv6_only_sock(sk); - tb->fast_sk_family = sk->sk_family; -#if IS_ENABLED(CONFIG_IPV6) - tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; -#endif - } else { - tb->fastreuseport = 0; - } - } else { - if (!reuse) - tb->fastreuse = 0; - if (sk->sk_reuseport) { - /* We didn't match or we don't have fastreuseport set on - * the tb, but we have sk_reuseport set on this socket - * and we know that there are no bind conflicts with - * this socket in this tb, so reset our tb's reuseport - * settings so that any subsequent sockets that match - * our current socket will be put on the fast path. - * - * If we reset we need to set FASTREUSEPORT_STRICT so we - * do extra checking for all subsequent sk_reuseport - * socks. - */ - if (!sk_reuseport_match(tb, sk)) { - tb->fastreuseport = FASTREUSEPORT_STRICT; - tb->fastuid = uid; - tb->fast_rcv_saddr = sk->sk_rcv_saddr; - tb->fast_ipv6_only = ipv6_only_sock(sk); - tb->fast_sk_family = sk->sk_family; -#if IS_ENABLED(CONFIG_IPV6) - tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; -#endif - } - } else { - tb->fastreuseport = 0; - } - } + inet_csk_update_fastreuse(tb, sk); + if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, port); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); -- cgit v1.2.3 From e92ae67d6ed881e9c6d6d432eb6b5817f150a115 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 3 Aug 2020 15:06:38 +0200 Subject: drm/ttm: rename ttm_resource_manager_func callbacks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The names get/put are associated with reference counting in the Linux kernel, use alloc/free instead. Signed-off-by: Christian König Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/384340/?series=80346&rev=1 --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 6 +++--- drivers/gpu/drm/nouveau/nouveau_ttm.c | 12 ++++++------ drivers/gpu/drm/ttm/ttm_bo.c | 8 ++++---- drivers/gpu/drm/ttm/ttm_range_manager.c | 16 ++++++++-------- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 4 ++-- include/drm/ttm/ttm_bo_driver.h | 18 +++++++++--------- 8 files changed, 36 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 010518148ef8..bab8feed46da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -311,7 +311,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man, } static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = { - .get_node = amdgpu_gtt_mgr_new, - .put_node = amdgpu_gtt_mgr_del, + .alloc = amdgpu_gtt_mgr_new, + .free = amdgpu_gtt_mgr_del, .debug = amdgpu_gtt_mgr_debug }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 7882efd275d1..ff70ae00e602 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -606,7 +606,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, } static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { - .get_node = amdgpu_vram_mgr_new, - .put_node = amdgpu_vram_mgr_del, - .debug = amdgpu_vram_mgr_debug + .alloc = amdgpu_vram_mgr_new, + .free = amdgpu_vram_mgr_del, + .debug = amdgpu_vram_mgr_debug }; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index e6a30865a00b..53c6f8827322 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -64,8 +64,8 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man, } const struct ttm_resource_manager_func nouveau_vram_manager = { - .get_node = nouveau_vram_manager_new, - .put_node = nouveau_manager_del, + .alloc = nouveau_vram_manager_new, + .free = nouveau_manager_del, }; static int @@ -87,8 +87,8 @@ nouveau_gart_manager_new(struct ttm_resource_manager *man, } const struct ttm_resource_manager_func nouveau_gart_manager = { - .get_node = nouveau_gart_manager_new, - .put_node = nouveau_manager_del, + .alloc = nouveau_gart_manager_new, + .free = nouveau_manager_del, }; static int @@ -119,8 +119,8 @@ nv04_gart_manager_new(struct ttm_resource_manager *man, } const struct ttm_resource_manager_func nv04_gart_manager = { - .get_node = nv04_gart_manager_new, - .put_node = nouveau_manager_del, + .alloc = nv04_gart_manager_new, + .free = nouveau_manager_del, }; int diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index ad09329b62d3..ae71c3ab6cc4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -846,20 +846,20 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); mem->mm_node = NULL; - if (!man->func || !man->func->get_node) + if (!man->func || !man->func->alloc) return 0; - return man->func->get_node(man, bo, place, mem); + return man->func->alloc(man, bo, place, mem); } void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); - if (!man->func || !man->func->put_node) + if (!man->func || !man->func->free) return; - man->func->put_node(man, mem); + man->func->free(man, mem); mem->mm_node = NULL; mem->mem_type = TTM_PL_SYSTEM; } diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 274a05ca13d3..770c8988c139 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -54,10 +54,10 @@ static inline struct ttm_range_manager *to_range_manager(struct ttm_resource_man return container_of(man, struct ttm_range_manager, manager); } -static int ttm_range_man_get_node(struct ttm_resource_manager *man, - struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_resource *mem) +static int ttm_range_man_alloc(struct ttm_resource_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *mem) { struct ttm_range_manager *rman = to_range_manager(man); struct drm_mm *mm = &rman->mm; @@ -95,8 +95,8 @@ static int ttm_range_man_get_node(struct ttm_resource_manager *man, return ret; } -static void ttm_range_man_put_node(struct ttm_resource_manager *man, - struct ttm_resource *mem) +static void ttm_range_man_free(struct ttm_resource_manager *man, + struct ttm_resource *mem) { struct ttm_range_manager *rman = to_range_manager(man); @@ -181,7 +181,7 @@ static void ttm_range_man_debug(struct ttm_resource_manager *man, } static const struct ttm_resource_manager_func ttm_range_manager_func = { - .get_node = ttm_range_man_get_node, - .put_node = ttm_range_man_put_node, + .alloc = ttm_range_man_alloc, + .free = ttm_range_man_free, .debug = ttm_range_man_debug }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 3fea7a6c7cfa..bb76acb5b0fc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -156,6 +156,6 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) } static const struct ttm_resource_manager_func vmw_gmrid_manager_func = { - .get_node = vmw_gmrid_man_get_node, - .put_node = vmw_gmrid_man_put_node, + .alloc = vmw_gmrid_man_get_node, + .free = vmw_gmrid_man_put_node, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index f594e2e6ab7e..3c00a9e7cfcc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -177,7 +177,7 @@ static void vmw_thp_debug(struct ttm_resource_manager *man, } const struct ttm_resource_manager_func vmw_thp_func = { - .get_node = vmw_thp_get_node, - .put_node = vmw_thp_put_node, + .alloc = vmw_thp_get_node, + .free = vmw_thp_put_node, .debug = vmw_thp_debug }; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index eb1c3312e175..bfdda61edadb 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -49,7 +49,7 @@ struct ttm_resource_manager; struct ttm_resource_manager_func { /** - * struct ttm_resource_manager member get_node + * struct ttm_resource_manager_func member alloc * * @man: Pointer to a memory type manager. * @bo: Pointer to the buffer object we're allocating space for. @@ -76,13 +76,13 @@ struct ttm_resource_manager_func { * an implementation can and must use either a mutex or a spinlock to * protect any data structures managing the space. */ - int (*get_node)(struct ttm_resource_manager *man, - struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_resource *mem); + int (*alloc)(struct ttm_resource_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *mem); /** - * struct ttm_resource_manager member put_node + * struct ttm_resource_manager_func member free * * @man: Pointer to a memory type manager. * @mem: Pointer to a struct ttm_resource to be filled in. @@ -91,11 +91,11 @@ struct ttm_resource_manager_func { * and that are identified by @mem::mm_node and @mem::start. May not * be called from within atomic context. */ - void (*put_node)(struct ttm_resource_manager *man, - struct ttm_resource *mem); + void (*free)(struct ttm_resource_manager *man, + struct ttm_resource *mem); /** - * struct ttm_resource_manager member debug + * struct ttm_resource_manager_func member debug * * @man: Pointer to a memory type manager. * @printer: Prefix to be used in printout to identify the caller. -- cgit v1.2.3 From b2458726b38cb69f3da3677dbdf53e47af0e8792 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 3 Aug 2020 16:25:15 +0200 Subject: drm/ttm: give resource functions their own [ch] files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a separate object we work within TTM. Signed-off-by: Christian König Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/384338/?series=80346&rev=1 --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 +- drivers/gpu/drm/radeon/radeon_ttm.c | 4 +- drivers/gpu/drm/ttm/Makefile | 3 +- drivers/gpu/drm/ttm/ttm_bo.c | 124 ++------------ drivers/gpu/drm/ttm/ttm_bo_util.c | 4 +- drivers/gpu/drm/ttm/ttm_resource.c | 151 +++++++++++++++++ include/drm/ttm/ttm_bo_api.h | 70 +------- include/drm/ttm/ttm_bo_driver.h | 189 --------------------- include/drm/ttm/ttm_resource.h | 263 +++++++++++++++++++++++++++++ 11 files changed, 446 insertions(+), 376 deletions(-) create mode 100644 drivers/gpu/drm/ttm/ttm_resource.c create mode 100644 include/drm/ttm/ttm_resource.h (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 43f4966331dd..b36d94f57d42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -381,7 +381,7 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, if (cpu_addr) amdgpu_bo_kunmap(*bo_ptr); - ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); + ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 67d2eef2f9eb..462402fcd1a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -578,7 +578,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, /* move BO (in tmp_mem) to new_mem */ r = ttm_bo_move_ttm(bo, ctx, new_mem); out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_resource_free(bo, &tmp_mem); return r; } @@ -625,7 +625,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, goto out_cleanup; } out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_resource_free(bo, &tmp_mem); return r; } @@ -1203,11 +1203,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) gtt->offset = (u64)tmp.start << PAGE_SHIFT; r = amdgpu_ttm_gart_bind(adev, bo, flags); if (unlikely(r)) { - ttm_bo_mem_put(bo, &tmp); + ttm_resource_free(bo, &tmp); return r; } - ttm_bo_mem_put(bo, &bo->mem); + ttm_resource_free(bo, &bo->mem); bo->mem = tmp; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 604a74323696..29d7d7e100f7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1191,7 +1191,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ret = ttm_bo_move_ttm(bo, &ctx, new_reg); out: - ttm_bo_mem_put(bo, &tmp_reg); + ttm_resource_free(bo, &tmp_reg); return ret; } @@ -1227,7 +1227,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, goto out; out: - ttm_bo_mem_put(bo, &tmp_reg); + ttm_resource_free(bo, &tmp_reg); return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 3355b69b13d1..31f4cf211b6a 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -271,7 +271,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, } r = ttm_bo_move_ttm(bo, &ctx, new_mem); out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_resource_free(bo, &tmp_mem); return r; } @@ -309,7 +309,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, goto out_cleanup; } out_cleanup: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_resource_free(bo, &tmp_mem); return r; } diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index e54326e6cea4..90c0da88cc98 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,7 +4,8 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o + ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \ + ttm_resource.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index ae71c3ab6cc4..55890314316b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -77,19 +77,6 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place, return 0; } -void ttm_resource_manager_debug(struct ttm_resource_manager *man, - struct drm_printer *p) -{ - drm_printf(p, " use_type: %d\n", man->use_type); - drm_printf(p, " use_tt: %d\n", man->use_tt); - drm_printf(p, " size: %llu\n", man->size); - drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); - drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); - if (man->func && man->func->debug) - (*man->func->debug)(man, p); -} -EXPORT_SYMBOL(ttm_resource_manager_debug); - static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { @@ -363,7 +350,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ttm_tt_destroy(bo->ttm); bo->ttm = NULL; - ttm_bo_mem_put(bo, &bo->mem); + ttm_resource_free(bo, &bo->mem); } static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) @@ -678,7 +665,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, if (unlikely(ret)) { if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); - ttm_bo_mem_put(bo, &evict_mem); + ttm_resource_free(bo, &evict_mem); goto out; } bo->evicted = true; @@ -767,11 +754,11 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, return r == -EDEADLK ? -EBUSY : r; } -static int ttm_mem_evict_first(struct ttm_bo_device *bdev, - struct ttm_resource_manager *man, - const struct ttm_place *place, - struct ttm_operation_ctx *ctx, - struct ww_acquire_ctx *ticket) +int ttm_mem_evict_first(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man, + const struct ttm_place *place, + struct ttm_operation_ctx *ctx, + struct ww_acquire_ctx *ticket) { struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; bool locked = false; @@ -839,32 +826,6 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } -static int ttm_bo_mem_get(struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_resource *mem) -{ - struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); - - mem->mm_node = NULL; - if (!man->func || !man->func->alloc) - return 0; - - return man->func->alloc(man, bo, place, mem); -} - -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem) -{ - struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type); - - if (!man->func || !man->func->free) - return; - - man->func->free(man, mem); - mem->mm_node = NULL; - mem->mem_type = TTM_PL_SYSTEM; -} -EXPORT_SYMBOL(ttm_bo_mem_put); - /** * Add the last move fence to the BO and reserve a new shared slot. */ @@ -915,7 +876,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ticket = dma_resv_locking_ctx(bo->base.resv); do { - ret = ttm_bo_mem_get(bo, place, mem); + ret = ttm_resource_alloc(bo, place, mem); if (likely(!ret)) break; if (unlikely(ret != -ENOSPC)) @@ -1056,7 +1017,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, goto error; type_found = true; - ret = ttm_bo_mem_get(bo, place, mem); + ret = ttm_resource_alloc(bo, place, mem); if (ret == -ENOSPC) continue; if (unlikely(ret)) @@ -1065,7 +1026,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, man = ttm_manager_type(bdev, mem->mem_type); ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); if (unlikely(ret)) { - ttm_bo_mem_put(bo, mem); + ttm_resource_free(bo, mem); if (ret == -EBUSY) continue; @@ -1132,7 +1093,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx); out_unlock: if (ret) - ttm_bo_mem_put(bo, &mem); + ttm_resource_free(bo, &mem); return ret; } @@ -1404,52 +1365,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_create); -int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_resource_manager *man) -{ - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false, - .flags = TTM_OPT_FLAG_FORCE_ALLOC - }; - struct ttm_bo_global *glob = &ttm_bo_glob; - struct dma_fence *fence; - int ret; - unsigned i; - - /* - * Can't use standard list traversal since we're unlocking. - */ - - spin_lock(&glob->lru_lock); - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { - while (!list_empty(&man->lru[i])) { - spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, man, NULL, &ctx, - NULL); - if (ret) - return ret; - spin_lock(&glob->lru_lock); - } - } - spin_unlock(&glob->lru_lock); - - spin_lock(&man->move_lock); - fence = dma_fence_get(man->move); - spin_unlock(&man->move_lock); - - if (fence) { - ret = dma_fence_wait(fence, false); - dma_fence_put(fence); - if (ret) - return ret; - } - - return 0; -} -EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); - - int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) { struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); @@ -1468,23 +1383,6 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) } EXPORT_SYMBOL(ttm_bo_evict_mm); -void ttm_resource_manager_init(struct ttm_resource_manager *man, - unsigned long p_size) -{ - unsigned i; - - man->use_io_reserve_lru = false; - mutex_init(&man->io_reserve_mutex); - spin_lock_init(&man->move_lock); - INIT_LIST_HEAD(&man->io_reserve_lru); - man->size = p_size; - - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) - INIT_LIST_HEAD(&man->lru[i]); - man->move = NULL; -} -EXPORT_SYMBOL(ttm_resource_manager_init); - static void ttm_bo_global_kobj_release(struct kobject *kobj) { struct ttm_bo_global *glob = diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 496158acd5b9..12be2d3fcc81 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -47,7 +47,7 @@ struct ttm_transfer_obj { void ttm_bo_free_old_node(struct ttm_buffer_object *bo) { - ttm_bo_mem_put(bo, &bo->mem); + ttm_resource_free(bo, &bo->mem); } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, @@ -398,7 +398,7 @@ out: * On error, keep the mm node! */ if (!ret) - ttm_bo_mem_put(bo, &old_copy); + ttm_resource_free(bo, &old_copy); return ret; } EXPORT_SYMBOL(ttm_bo_move_memcpy); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c new file mode 100644 index 000000000000..33b642532e5c --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -0,0 +1,151 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include +#include + +int ttm_resource_alloc(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *res) +{ + struct ttm_resource_manager *man = + ttm_manager_type(bo->bdev, res->mem_type); + + res->mm_node = NULL; + if (!man->func || !man->func->alloc) + return 0; + + return man->func->alloc(man, bo, place, res); +} + +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res) +{ + struct ttm_resource_manager *man = + ttm_manager_type(bo->bdev, res->mem_type); + + if (man->func && man->func->free) + man->func->free(man, res); + + res->mm_node = NULL; + res->mem_type = TTM_PL_SYSTEM; +} +EXPORT_SYMBOL(ttm_resource_free); + +/** + * ttm_resource_manager_init + * + * @man: memory manager object to init + * @p_size: size managed area in pages. + * + * Initialise core parts of a manager object. + */ +void ttm_resource_manager_init(struct ttm_resource_manager *man, + unsigned long p_size) +{ + unsigned i; + + man->use_io_reserve_lru = false; + mutex_init(&man->io_reserve_mutex); + spin_lock_init(&man->move_lock); + INIT_LIST_HEAD(&man->io_reserve_lru); + man->size = p_size; + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + INIT_LIST_HEAD(&man->lru[i]); + man->move = NULL; +} +EXPORT_SYMBOL(ttm_resource_manager_init); + +/* + * ttm_resource_manager_force_list_clean + * + * @bdev - device to use + * @man - manager to use + * + * Force all the objects out of a memory manager until clean. + * Part of memory manager cleanup sequence. + */ +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man) +{ + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false, + .flags = TTM_OPT_FLAG_FORCE_ALLOC + }; + struct ttm_bo_global *glob = &ttm_bo_glob; + struct dma_fence *fence; + int ret; + unsigned i; + + /* + * Can't use standard list traversal since we're unlocking. + */ + + spin_lock(&glob->lru_lock); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + while (!list_empty(&man->lru[i])) { + spin_unlock(&glob->lru_lock); + ret = ttm_mem_evict_first(bdev, man, NULL, &ctx, + NULL); + if (ret) + return ret; + spin_lock(&glob->lru_lock); + } + } + spin_unlock(&glob->lru_lock); + + spin_lock(&man->move_lock); + fence = dma_fence_get(man->move); + spin_unlock(&man->move_lock); + + if (fence) { + ret = dma_fence_wait(fence, false); + dma_fence_put(fence); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); + +/** + * ttm_resource_manager_debug + * + * @man: manager type to dump. + * @p: printer to use for debug. + */ +void ttm_resource_manager_debug(struct ttm_resource_manager *man, + struct drm_printer *p) +{ + drm_printf(p, " use_type: %d\n", man->use_type); + drm_printf(p, " use_tt: %d\n", man->use_tt); + drm_printf(p, " size: %llu\n", man->size); + drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); + drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); + if (man->func && man->func->debug) + (*man->func->debug)(man, p); +} +EXPORT_SYMBOL(ttm_resource_manager_debug); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 770ad2195875..dbb276abcf6a 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -42,6 +42,8 @@ #include #include +#include "ttm_resource.h" + struct ttm_bo_global; struct ttm_bo_device; @@ -54,57 +56,6 @@ struct ttm_place; struct ttm_lru_bulk_move; -struct ttm_resource_manager; - -/** - * struct ttm_bus_placement - * - * @addr: mapped virtual address - * @base: bus base address - * @is_iomem: is this io memory ? - * @size: size in byte - * @offset: offset from the base address - * @io_reserved_vm: The VM system has a refcount in @io_reserved_count - * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve - * - * Structure indicating the bus placement of an object. - */ -struct ttm_bus_placement { - void *addr; - phys_addr_t base; - unsigned long size; - unsigned long offset; - bool is_iomem; - bool io_reserved_vm; - uint64_t io_reserved_count; -}; - - -/** - * struct ttm_resource - * - * @mm_node: Memory manager node. - * @size: Requested size of memory region. - * @num_pages: Actual size of memory region in pages. - * @page_alignment: Page alignment. - * @placement: Placement flags. - * @bus: Placement on io bus accessible to the CPU - * - * Structure indicating the placement and space resources used by a - * buffer object. - */ - -struct ttm_resource { - void *mm_node; - unsigned long start; - unsigned long size; - unsigned long num_pages; - uint32_t page_alignment; - uint32_t mem_type; - uint32_t placement; - struct ttm_bus_placement bus; -}; - /** * enum ttm_bo_type * @@ -533,17 +484,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_alignment, bool interruptible, struct ttm_buffer_object **p_bo); -/** - * ttm_resource_manager_init - * - * @man: memory manager object to init - * @p_size: size managed area in pages. - * - * Initialise core parts of a manager object. - */ -void ttm_resource_manager_init(struct ttm_resource_manager *man, - unsigned long p_size); - /** * ttm_bo_evict_mm * @@ -680,6 +620,12 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo) return bo->base.dev != NULL; } +int ttm_mem_evict_first(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man, + const struct ttm_place *place, + struct ttm_operation_ctx *ctx, + struct ww_acquire_ctx *ticket); + /* Default number of pre-faulted pages in the TTM fault handler */ #define TTM_BO_VM_NUM_PREFAULT 16 diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index bfdda61edadb..27b4a1e92875 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -43,131 +43,6 @@ #include "ttm_placement.h" #include "ttm_tt.h" -#define TTM_MAX_BO_PRIORITY 4U - -struct ttm_resource_manager; - -struct ttm_resource_manager_func { - /** - * struct ttm_resource_manager_func member alloc - * - * @man: Pointer to a memory type manager. - * @bo: Pointer to the buffer object we're allocating space for. - * @placement: Placement details. - * @flags: Additional placement flags. - * @mem: Pointer to a struct ttm_resource to be filled in. - * - * This function should allocate space in the memory type managed - * by @man. Placement details if - * applicable are given by @placement. If successful, - * @mem::mm_node should be set to a non-null value, and - * @mem::start should be set to a value identifying the beginning - * of the range allocated, and the function should return zero. - * If the memory region accommodate the buffer object, @mem::mm_node - * should be set to NULL, and the function should return 0. - * If a system error occurred, preventing the request to be fulfilled, - * the function should return a negative error code. - * - * Note that @mem::mm_node will only be dereferenced by - * struct ttm_resource_manager functions and optionally by the driver, - * which has knowledge of the underlying type. - * - * This function may not be called from within atomic context, so - * an implementation can and must use either a mutex or a spinlock to - * protect any data structures managing the space. - */ - int (*alloc)(struct ttm_resource_manager *man, - struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_resource *mem); - - /** - * struct ttm_resource_manager_func member free - * - * @man: Pointer to a memory type manager. - * @mem: Pointer to a struct ttm_resource to be filled in. - * - * This function frees memory type resources previously allocated - * and that are identified by @mem::mm_node and @mem::start. May not - * be called from within atomic context. - */ - void (*free)(struct ttm_resource_manager *man, - struct ttm_resource *mem); - - /** - * struct ttm_resource_manager_func member debug - * - * @man: Pointer to a memory type manager. - * @printer: Prefix to be used in printout to identify the caller. - * - * This function is called to print out the state of the memory - * type manager to aid debugging of out-of-memory conditions. - * It may not be called from within atomic context. - */ - void (*debug)(struct ttm_resource_manager *man, - struct drm_printer *printer); -}; - -/** - * struct ttm_resource_manager - * - * @use_type: The memory type is enabled. - * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory - * managed by this memory type. - * @gpu_offset: If used, the GPU offset of the first managed page of - * fixed memory or the first managed location in an aperture. - * @size: Size of the managed region. - * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, - * as defined in ttm_placement_common.h - * @default_caching: The default caching policy used for a buffer object - * placed in this memory type if the user doesn't provide one. - * @func: structure pointer implementing the range manager. See above - * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures - * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions - * reserved by the TTM vm system. - * @io_reserve_lru: Optional lru list for unreserving io mem regions. - * @move_lock: lock for move fence - * static information. bdev::driver::io_mem_free is never used. - * @lru: The lru list for this memory type. - * @move: The fence of the last pipelined move operation. - * - * This structure is used to identify and manage memory types for a device. - */ - - - -struct ttm_resource_manager { - /* - * No protection. Constant from start. - */ - bool use_type; - bool use_tt; - uint64_t size; - uint32_t available_caching; - uint32_t default_caching; - const struct ttm_resource_manager_func *func; - struct mutex io_reserve_mutex; - bool use_io_reserve_lru; - spinlock_t move_lock; - - /* - * Protected by @io_reserve_mutex: - */ - - struct list_head io_reserve_lru; - - /* - * Protected by the global->lru_lock. - */ - - struct list_head lru[TTM_MAX_BO_PRIORITY]; - - /* - * Protected by @move_lock. - */ - struct dma_fence *move; -}; - /** * struct ttm_bo_driver * @@ -537,8 +412,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct ttm_operation_ctx *ctx); -void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem); - int ttm_bo_device_release(struct ttm_bo_device *bdev); /** @@ -675,59 +548,6 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) dma_resv_unlock(bo->base.resv); } -/** - * ttm_resource_manager_set_used - * - * @man: A memory manager object. - * @used: usage state to set. - * - * Set the manager in use flag. If disabled the manager is no longer - * used for object placement. - */ -static inline void ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used) -{ - man->use_type = used; -} - -/** - * ttm_resource_manager_used - * - * @man: Manager to get used state for - * - * Get the in use flag for a manager. - * Returns: - * true is used, false if not. - */ -static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man) -{ - return man->use_type; -} - -/** - * ttm_resource_manager_cleanup - * - * @man: A memory manager object. - * - * Cleanup the move fences from the memory manager object. - */ -static inline void ttm_resource_manager_cleanup(struct ttm_resource_manager *man) -{ - dma_fence_put(man->move); - man->move = NULL; -} - -/* - * ttm_resource_manager_force_list_clean - * - * @bdev - device to use - * @man - manager to use - * - * Force all the objects out of a memory manager until clean. - * Part of memory manager cleanup sequence. - */ -int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_resource_manager *man); - /* * ttm_bo_util.c */ @@ -874,13 +694,4 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, int ttm_range_man_fini(struct ttm_bo_device *bdev, unsigned type); -/** - * ttm_resource_manager_debug - * - * @man: manager type to dump. - * @p: printer to use for debug. - */ -void ttm_resource_manager_debug(struct ttm_resource_manager *man, - struct drm_printer *p); - #endif diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h new file mode 100644 index 000000000000..bac22a56f6cd --- /dev/null +++ b/include/drm/ttm/ttm_resource.h @@ -0,0 +1,263 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_RESOURCE_H_ +#define _TTM_RESOURCE_H_ + +#include +#include +#include +#include + +#define TTM_MAX_BO_PRIORITY 4U + +struct ttm_bo_device; +struct ttm_resource_manager; +struct ttm_resource; +struct ttm_place; +struct ttm_buffer_object; + +struct ttm_resource_manager_func { + /** + * struct ttm_resource_manager_func member alloc + * + * @man: Pointer to a memory type manager. + * @bo: Pointer to the buffer object we're allocating space for. + * @placement: Placement details. + * @flags: Additional placement flags. + * @mem: Pointer to a struct ttm_resource to be filled in. + * + * This function should allocate space in the memory type managed + * by @man. Placement details if + * applicable are given by @placement. If successful, + * @mem::mm_node should be set to a non-null value, and + * @mem::start should be set to a value identifying the beginning + * of the range allocated, and the function should return zero. + * If the memory region accommodate the buffer object, @mem::mm_node + * should be set to NULL, and the function should return 0. + * If a system error occurred, preventing the request to be fulfilled, + * the function should return a negative error code. + * + * Note that @mem::mm_node will only be dereferenced by + * struct ttm_resource_manager functions and optionally by the driver, + * which has knowledge of the underlying type. + * + * This function may not be called from within atomic context, so + * an implementation can and must use either a mutex or a spinlock to + * protect any data structures managing the space. + */ + int (*alloc)(struct ttm_resource_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *mem); + + /** + * struct ttm_resource_manager_func member free + * + * @man: Pointer to a memory type manager. + * @mem: Pointer to a struct ttm_resource to be filled in. + * + * This function frees memory type resources previously allocated + * and that are identified by @mem::mm_node and @mem::start. May not + * be called from within atomic context. + */ + void (*free)(struct ttm_resource_manager *man, + struct ttm_resource *mem); + + /** + * struct ttm_resource_manager_func member debug + * + * @man: Pointer to a memory type manager. + * @printer: Prefix to be used in printout to identify the caller. + * + * This function is called to print out the state of the memory + * type manager to aid debugging of out-of-memory conditions. + * It may not be called from within atomic context. + */ + void (*debug)(struct ttm_resource_manager *man, + struct drm_printer *printer); +}; + +/** + * struct ttm_resource_manager + * + * @use_type: The memory type is enabled. + * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory + * managed by this memory type. + * @gpu_offset: If used, the GPU offset of the first managed page of + * fixed memory or the first managed location in an aperture. + * @size: Size of the managed region. + * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, + * as defined in ttm_placement_common.h + * @default_caching: The default caching policy used for a buffer object + * placed in this memory type if the user doesn't provide one. + * @func: structure pointer implementing the range manager. See above + * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures + * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions + * reserved by the TTM vm system. + * @io_reserve_lru: Optional lru list for unreserving io mem regions. + * @move_lock: lock for move fence + * static information. bdev::driver::io_mem_free is never used. + * @lru: The lru list for this memory type. + * @move: The fence of the last pipelined move operation. + * + * This structure is used to identify and manage memory types for a device. + */ +struct ttm_resource_manager { + /* + * No protection. Constant from start. + */ + bool use_type; + bool use_tt; + uint64_t size; + uint32_t available_caching; + uint32_t default_caching; + const struct ttm_resource_manager_func *func; + struct mutex io_reserve_mutex; + bool use_io_reserve_lru; + spinlock_t move_lock; + + /* + * Protected by @io_reserve_mutex: + */ + + struct list_head io_reserve_lru; + + /* + * Protected by the global->lru_lock. + */ + + struct list_head lru[TTM_MAX_BO_PRIORITY]; + + /* + * Protected by @move_lock. + */ + struct dma_fence *move; +}; + +/** + * struct ttm_bus_placement + * + * @addr: mapped virtual address + * @base: bus base address + * @is_iomem: is this io memory ? + * @size: size in byte + * @offset: offset from the base address + * @io_reserved_vm: The VM system has a refcount in @io_reserved_count + * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve + * + * Structure indicating the bus placement of an object. + */ +struct ttm_bus_placement { + void *addr; + phys_addr_t base; + unsigned long size; + unsigned long offset; + bool is_iomem; + bool io_reserved_vm; + uint64_t io_reserved_count; +}; + +/** + * struct ttm_resource + * + * @mm_node: Memory manager node. + * @size: Requested size of memory region. + * @num_pages: Actual size of memory region in pages. + * @page_alignment: Page alignment. + * @placement: Placement flags. + * @bus: Placement on io bus accessible to the CPU + * + * Structure indicating the placement and space resources used by a + * buffer object. + */ +struct ttm_resource { + void *mm_node; + unsigned long start; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t placement; + struct ttm_bus_placement bus; +}; + +/** + * ttm_resource_manager_set_used + * + * @man: A memory manager object. + * @used: usage state to set. + * + * Set the manager in use flag. If disabled the manager is no longer + * used for object placement. + */ +static inline void +ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used) +{ + man->use_type = used; +} + +/** + * ttm_resource_manager_used + * + * @man: Manager to get used state for + * + * Get the in use flag for a manager. + * Returns: + * true is used, false if not. + */ +static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man) +{ + return man->use_type; +} + +/** + * ttm_resource_manager_cleanup + * + * @man: A memory manager object. + * + * Cleanup the move fences from the memory manager object. + */ +static inline void +ttm_resource_manager_cleanup(struct ttm_resource_manager *man) +{ + dma_fence_put(man->move); + man->move = NULL; +} + +int ttm_resource_alloc(struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource *res); +void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); + +void ttm_resource_manager_init(struct ttm_resource_manager *man, + unsigned long p_size); + +int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man); + +void ttm_resource_manager_debug(struct ttm_resource_manager *man, + struct drm_printer *p); + +#endif -- cgit v1.2.3 From 772616b031f06e05846488b01dab46a7c832da13 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Tue, 11 Aug 2020 18:30:21 -0700 Subject: mm: memcg/percpu: per-memcg percpu memory statistics MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Percpu memory can represent a noticeable chunk of the total memory consumption, especially on big machines with many CPUs. Let's track percpu memory usage for each memcg and display it in memory.stat. A percpu allocation is usually scattered over multiple pages (and nodes), and can be significantly smaller than a page. So let's add a byte-sized counter on the memcg level: MEMCG_PERCPU_B. Byte-sized vmstat infra created for slabs can be perfectly reused for percpu case. [guro@fb.com: v3] Link: http://lkml.kernel.org/r/20200623184515.4132564-4-guro@fb.com Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Shakeel Butt Acked-by: Dennis Zhou Acked-by: Johannes Weiner Cc: Christoph Lameter Cc: David Rientjes Cc: Joonsoo Kim Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Tejun Heo Cc: Tobin C. Harding Cc: Vlastimil Babka Cc: Waiman Long Cc: Bixuan Cui Cc: Michal Koutný Cc: Stephen Rothwell Link: http://lkml.kernel.org/r/20200608230819.832349-4-guro@fb.com Signed-off-by: Linus Torvalds --- Documentation/admin-guide/cgroup-v2.rst | 4 ++++ include/linux/memcontrol.h | 8 ++++++++ mm/memcontrol.c | 4 +++- mm/percpu.c | 10 ++++++++++ 4 files changed, 25 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index fa4018afa5a4..6be43781ec7f 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1274,6 +1274,10 @@ PAGE_SIZE multiple when read back. Amount of memory used for storing in-kernel data structures. + percpu + Amount of memory used for storing per-cpu kernel + data structures. + sock Amount of memory used in network transmission buffers diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1bb49b600310..2c2d301eac33 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -32,6 +32,7 @@ struct kmem_cache; enum memcg_stat_item { MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, MEMCG_SOCK, + MEMCG_PERCPU_B, MEMCG_NR_STAT, }; @@ -339,6 +340,13 @@ struct mem_cgroup { extern struct mem_cgroup *root_mem_cgroup; +static __always_inline bool memcg_stat_item_in_bytes(int idx) +{ + if (idx == MEMCG_PERCPU_B) + return true; + return vmstat_item_in_bytes(idx); +} + static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return (memcg == root_mem_cgroup); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8d9ceea7fe4d..36d5300f9b69 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -781,7 +781,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) if (mem_cgroup_disabled()) return; - if (vmstat_item_in_bytes(idx)) + if (memcg_stat_item_in_bytes(idx)) threshold <<= PAGE_SHIFT; x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); @@ -1488,6 +1488,8 @@ static char *memory_stat_format(struct mem_cgroup *memcg) seq_buf_printf(&s, "slab %llu\n", (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B))); + seq_buf_printf(&s, "percpu %llu\n", + (u64)memcg_page_state(memcg, MEMCG_PERCPU_B)); seq_buf_printf(&s, "sock %llu\n", (u64)memcg_page_state(memcg, MEMCG_SOCK) * PAGE_SIZE); diff --git a/mm/percpu.c b/mm/percpu.c index dc1a213293aa..f4709629e6de 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1610,6 +1610,11 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, if (chunk) { chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; + + rcu_read_lock(); + mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, + size * num_possible_cpus()); + rcu_read_unlock(); } else { obj_cgroup_uncharge(objcg, size * num_possible_cpus()); obj_cgroup_put(objcg); @@ -1628,6 +1633,11 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) obj_cgroup_uncharge(objcg, size * num_possible_cpus()); + rcu_read_lock(); + mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, + -(size * num_possible_cpus())); + rcu_read_unlock(); + obj_cgroup_put(objcg); } -- cgit v1.2.3 From 8ca39e6874f812a393bb66d9fdbb7598d5f0451c Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 11 Aug 2020 18:30:32 -0700 Subject: mm/hugetlb: add mempolicy check in the reservation routine In the reservation routine, we only check whether the cpuset meets the memory allocation requirements. But we ignore the mempolicy of MPOL_BIND case. If someone mmap hugetlb succeeds, but the subsequent memory allocation may fail due to mempolicy restrictions and receives the SIGBUS signal. This can be reproduced by the follow steps. 1) Compile the test case. cd tools/testing/selftests/vm/ gcc map_hugetlb.c -o map_hugetlb 2) Pre-allocate huge pages. Suppose there are 2 numa nodes in the system. Each node will pre-allocate one huge page. echo 2 > /proc/sys/vm/nr_hugepages 3) Run test case(mmap 4MB). We receive the SIGBUS signal. numactl --membind=3D0 ./map_hugetlb 4 With this patch applied, the mmap will fail in the step 3) and throw "mmap: Cannot allocate memory". [akpm@linux-foundation.org: include sched.h for `current'] Reported-by: Jianchao Guo Suggested-by: Michal Hocko Signed-off-by: Muchun Song Signed-off-by: Andrew Morton Reviewed-by: Mike Kravetz Cc: David Rientjes Cc: Mel Gorman Cc: Michel Lespinasse Cc: Baoquan He Link: http://lkml.kernel.org/r/20200728034938.14993-1-songmuchun@bytedance.com Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 16 +++++++++++++++- mm/hugetlb.c | 22 ++++++++++++++++++---- mm/mempolicy.c | 2 +- 3 files changed, 34 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index ea9c15b60a96..5f1648c23e29 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -6,7 +6,7 @@ #ifndef _LINUX_MEMPOLICY_H #define _LINUX_MEMPOLICY_H 1 - +#include #include #include #include @@ -152,6 +152,15 @@ extern int huge_node(struct vm_area_struct *vma, extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask); +extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); + +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) +{ + struct mempolicy *mpol = get_task_policy(current); + + return policy_nodemask(gfp, mpol); +} + extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone; @@ -281,5 +290,10 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, static inline void mpol_put_task_policy(struct task_struct *task) { } + +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) +{ + return NULL; +} #endif /* CONFIG_NUMA */ #endif diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e52c878940bb..dffafb5bf2ed 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3458,13 +3458,21 @@ static int __init default_hugepagesz_setup(char *s) } __setup("default_hugepagesz=", default_hugepagesz_setup); -static unsigned int cpuset_mems_nr(unsigned int *array) +static unsigned int allowed_mems_nr(struct hstate *h) { int node; unsigned int nr = 0; + nodemask_t *mpol_allowed; + unsigned int *array = h->free_huge_pages_node; + gfp_t gfp_mask = htlb_alloc_mask(h); + + mpol_allowed = policy_nodemask_current(gfp_mask); - for_each_node_mask(node, cpuset_current_mems_allowed) - nr += array[node]; + for_each_node_mask(node, cpuset_current_mems_allowed) { + if (!mpol_allowed || + (mpol_allowed && node_isset(node, *mpol_allowed))) + nr += array[node]; + } return nr; } @@ -3643,12 +3651,18 @@ static int hugetlb_acct_memory(struct hstate *h, long delta) * we fall back to check against current free page availability as * a best attempt and hopefully to minimize the impact of changing * semantics that cpuset has. + * + * Apart from cpuset, we also have memory policy mechanism that + * also determines from which node the kernel will allocate memory + * in a NUMA system. So similar to cpuset, we also should consider + * the memory policy of the current task. Similar to the description + * above. */ if (delta > 0) { if (gather_surplus_pages(h, delta) < 0) goto out; - if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { + if (delta > allowed_mems_nr(h)) { return_unused_surplus_pages(h, delta); goto out; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b9e85d467352..7af44d7cdd11 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1890,7 +1890,7 @@ static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) * Return a nodemask representing a mempolicy for filtering nodes for * page allocation */ -static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) +nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) { /* Lower zones don't get a nodemask applied for MPOL_BIND */ if (unlikely(policy->mode == MPOL_BIND) && -- cgit v1.2.3 From b518154e59aab3ad0780a169c5cc84bd4ee4357e Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:30:40 -0700 Subject: mm/vmscan: protect the workingset on anonymous LRU In current implementation, newly created or swap-in anonymous page is started on active list. Growing active list results in rebalancing active/inactive list so old pages on active list are demoted to inactive list. Hence, the page on active list isn't protected at all. Following is an example of this situation. Assume that 50 hot pages on active list. Numbers denote the number of pages on active/inactive list (active | inactive). 1. 50 hot pages on active list 50(h) | 0 2. workload: 50 newly created (used-once) pages 50(uo) | 50(h) 3. workload: another 50 newly created (used-once) pages 50(uo) | 50(uo), swap-out 50(h) This patch tries to fix this issue. Like as file LRU, newly created or swap-in anonymous pages will be inserted to the inactive list. They are promoted to active list if enough reference happens. This simple modification changes the above example as following. 1. 50 hot pages on active list 50(h) | 0 2. workload: 50 newly created (used-once) pages 50(h) | 50(uo) 3. workload: another 50 newly created (used-once) pages 50(h) | 50(uo), swap-out 50(uo) As you can see, hot pages on active list would be protected. Note that, this implementation has a drawback that the page cannot be promoted and will be swapped-out if re-access interval is greater than the size of inactive list but less than the size of total(active+inactive). To solve this potential issue, following patch will apply workingset detection similar to the one that's already applied to file LRU. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Link: http://lkml.kernel.org/r/1595490560-15117-3-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/swap.h | 2 +- kernel/events/uprobes.c | 2 +- mm/huge_memory.c | 2 +- mm/khugepaged.c | 2 +- mm/memory.c | 9 ++++----- mm/migrate.c | 2 +- mm/swap.c | 13 +++++++------ mm/swapfile.c | 2 +- mm/userfaultfd.c | 2 +- mm/vmscan.c | 4 +--- 10 files changed, 19 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 7eb59bc552a5..51ec9cdb92c0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -352,7 +352,7 @@ extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); -extern void lru_cache_add_active_or_unevictable(struct page *page, +extern void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma); /* linux/mm/vmscan.c */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 25de10c904e6..49047d479c57 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (new_page) { get_page(new_page); page_add_new_anon_rmap(new_page, vma, addr, false); - lru_cache_add_active_or_unevictable(new_page, vma); + lru_cache_add_inactive_or_unevictable(new_page, vma); } else /* no new page, just dec_mm_counter for old_page */ dec_mm_counter(mm, MM_ANONPAGES); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 206f52b36ffb..863c495776d7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -640,7 +640,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, entry = mk_huge_pmd(page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); page_add_new_anon_rmap(page, vma, haddr, true); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b52bd46ad146..15a9af791014 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1173,7 +1173,7 @@ static void collapse_huge_page(struct mm_struct *mm, spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); page_add_new_anon_rmap(new_page, vma, address, true); - lru_cache_add_active_or_unevictable(new_page, vma); + lru_cache_add_inactive_or_unevictable(new_page, vma); pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache_pmd(vma, address, pmd); diff --git a/mm/memory.c b/mm/memory.c index c39a13b09602..6fe8b5b22c57 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2715,7 +2715,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) */ ptep_clear_flush_notify(vma, vmf->address, vmf->pte); page_add_new_anon_rmap(new_page, vma, vmf->address, false); - lru_cache_add_active_or_unevictable(new_page, vma); + lru_cache_add_inactive_or_unevictable(new_page, vma); /* * We call the notify macro here because, when using secondary * mmu page tables (such as kvm shadow page tables), we want the @@ -3266,10 +3266,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) /* ksm created a completely new copy */ if (unlikely(page != swapcache && swapcache)) { page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); } else { do_page_add_anon_rmap(page, vma, vmf->address, exclusive); - activate_page(page); } swap_free(entry); @@ -3414,7 +3413,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); setpte: set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); @@ -3672,7 +3671,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) if (write && !(vma->vm_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, false); diff --git a/mm/migrate.c b/mm/migrate.c index d179657f8685..819e55130134 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2830,7 +2830,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, inc_mm_counter(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, addr, false); if (!is_zone_device_page(page)) - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); get_page(page); if (flush) { diff --git a/mm/swap.c b/mm/swap.c index de257c0a89b1..9285e60c7d6e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -476,23 +476,24 @@ void lru_cache_add(struct page *page) EXPORT_SYMBOL(lru_cache_add); /** - * lru_cache_add_active_or_unevictable + * lru_cache_add_inactive_or_unevictable * @page: the page to be added to LRU * @vma: vma in which page is mapped for determining reclaimability * - * Place @page on the active or unevictable LRU list, depending on its + * Place @page on the inactive or unevictable LRU list, depending on its * evictability. Note that if the page is not evictable, it goes * directly back onto it's zone's unevictable list, it does NOT use a * per cpu pagevec. */ -void lru_cache_add_active_or_unevictable(struct page *page, +void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma) { + bool unevictable; + VM_BUG_ON_PAGE(PageLRU(page), page); - if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) - SetPageActive(page); - else if (!TestSetPageMlocked(page)) { + unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; + if (unlikely(unevictable) && !TestSetPageMlocked(page)) { /* * We use the irq-unsafe __mod_zone_page_stat because this * counter is not modified from interrupt context, and the pte diff --git a/mm/swapfile.c b/mm/swapfile.c index 6c26916e95fd..82183432fdd0 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1915,7 +1915,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, page_add_anon_rmap(page, vma, addr, false); } else { /* ksm created a completely new copy */ page_add_new_anon_rmap(page, vma, addr, false); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); } swap_free(entry); /* diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index b80419320c7d..9a3d451402d7 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -123,7 +123,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, inc_mm_counter(dst_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, dst_vma, dst_addr, false); - lru_cache_add_active_or_unevictable(page, dst_vma); + lru_cache_add_inactive_or_unevictable(page, dst_vma); set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); diff --git a/mm/vmscan.c b/mm/vmscan.c index e34fc04b7045..783cd7fdc61a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -998,8 +998,6 @@ static enum page_references page_check_references(struct page *page, return PAGEREF_RECLAIM; if (referenced_ptes) { - if (PageSwapBacked(page)) - return PAGEREF_ACTIVATE; /* * All mapped pages start out with page table * references from the instantiating fault, so we need @@ -1022,7 +1020,7 @@ static enum page_references page_check_references(struct page *page, /* * Activate file-backed executable pages after first usage. */ - if (vm_flags & VM_EXEC) + if ((vm_flags & VM_EXEC) && !PageSwapBacked(page)) return PAGEREF_ACTIVATE; return PAGEREF_KEEP; -- cgit v1.2.3 From 170b04b7ae49634df103810dad67b22cf8a99aa6 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:30:43 -0700 Subject: mm/workingset: prepare the workingset detection infrastructure for anon LRU To prepare the workingset detection for anon LRU, this patch splits workingset event counters for refault, activate and restore into anon and file variants, as well as the refaults counter in struct lruvec. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Link: http://lkml.kernel.org/r/1595490560-15117-4-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 16 +++++++++++----- mm/memcontrol.c | 16 +++++++++++----- mm/vmscan.c | 15 ++++++++++----- mm/vmstat.c | 9 ++++++--- mm/workingset.c | 8 +++++--- 5 files changed, 43 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 635a96cd9b1f..efbd95dd3bbf 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -173,9 +173,15 @@ enum node_stat_item { NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, - WORKINGSET_REFAULT, - WORKINGSET_ACTIVATE, - WORKINGSET_RESTORE, + WORKINGSET_REFAULT_BASE, + WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, + WORKINGSET_REFAULT_FILE, + WORKINGSET_ACTIVATE_BASE, + WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, + WORKINGSET_ACTIVATE_FILE, + WORKINGSET_RESTORE_BASE, + WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, + WORKINGSET_RESTORE_FILE, WORKINGSET_NODERECLAIM, NR_ANON_MAPPED, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. @@ -277,8 +283,8 @@ struct lruvec { unsigned long file_cost; /* Non-resident age, driven by LRU movement */ atomic_long_t nonresident_age; - /* Refaults at the time of last reclaim cycle */ - unsigned long refaults; + /* Refaults at the time of last reclaim cycle, anon=0, file=1 */ + unsigned long refaults[2]; /* Various lruvec state flags (enum lruvec_flags) */ unsigned long flags; #ifdef CONFIG_MEMCG diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f1fd265b9f9e..c6c579217855 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1530,12 +1530,18 @@ static char *memory_stat_format(struct mem_cgroup *memcg) seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), memcg_events(memcg, PGMAJFAULT)); - seq_buf_printf(&s, "workingset_refault %lu\n", - memcg_page_state(memcg, WORKINGSET_REFAULT)); - seq_buf_printf(&s, "workingset_activate %lu\n", - memcg_page_state(memcg, WORKINGSET_ACTIVATE)); + seq_buf_printf(&s, "workingset_refault_anon %lu\n", + memcg_page_state(memcg, WORKINGSET_REFAULT_ANON)); + seq_buf_printf(&s, "workingset_refault_file %lu\n", + memcg_page_state(memcg, WORKINGSET_REFAULT_FILE)); + seq_buf_printf(&s, "workingset_activate_anon %lu\n", + memcg_page_state(memcg, WORKINGSET_ACTIVATE_ANON)); + seq_buf_printf(&s, "workingset_activate_file %lu\n", + memcg_page_state(memcg, WORKINGSET_ACTIVATE_FILE)); seq_buf_printf(&s, "workingset_restore %lu\n", - memcg_page_state(memcg, WORKINGSET_RESTORE)); + memcg_page_state(memcg, WORKINGSET_RESTORE_ANON)); + seq_buf_printf(&s, "workingset_restore %lu\n", + memcg_page_state(memcg, WORKINGSET_RESTORE_FILE)); seq_buf_printf(&s, "workingset_nodereclaim %lu\n", memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); diff --git a/mm/vmscan.c b/mm/vmscan.c index 783cd7fdc61a..017f323318a3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2683,7 +2683,10 @@ again: if (!sc->force_deactivate) { unsigned long refaults; - if (inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) + refaults = lruvec_page_state(target_lruvec, + WORKINGSET_ACTIVATE_ANON); + if (refaults != target_lruvec->refaults[0] || + inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) sc->may_deactivate |= DEACTIVATE_ANON; else sc->may_deactivate &= ~DEACTIVATE_ANON; @@ -2694,8 +2697,8 @@ again: * rid of any stale active pages quickly. */ refaults = lruvec_page_state(target_lruvec, - WORKINGSET_ACTIVATE); - if (refaults != target_lruvec->refaults || + WORKINGSET_ACTIVATE_FILE); + if (refaults != target_lruvec->refaults[1] || inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) sc->may_deactivate |= DEACTIVATE_FILE; else @@ -2972,8 +2975,10 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) unsigned long refaults; target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); - refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE); - target_lruvec->refaults = refaults; + refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); + target_lruvec->refaults[0] = refaults; + refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE); + target_lruvec->refaults[1] = refaults; } /* diff --git a/mm/vmstat.c b/mm/vmstat.c index 2b866cbab11d..fef03463a0cf 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1167,9 +1167,12 @@ const char * const vmstat_text[] = { "nr_isolated_anon", "nr_isolated_file", "workingset_nodes", - "workingset_refault", - "workingset_activate", - "workingset_restore", + "workingset_refault_anon", + "workingset_refault_file", + "workingset_activate_anon", + "workingset_activate_file", + "workingset_restore_anon", + "workingset_restore_file", "workingset_nodereclaim", "nr_anon_pages", "nr_mapped", diff --git a/mm/workingset.c b/mm/workingset.c index b199726924dd..941bbaa6c262 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -280,6 +281,7 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) */ void workingset_refault(struct page *page, void *shadow) { + bool file = page_is_file_lru(page); struct mem_cgroup *eviction_memcg; struct lruvec *eviction_lruvec; unsigned long refault_distance; @@ -346,7 +348,7 @@ void workingset_refault(struct page *page, void *shadow) memcg = page_memcg(page); lruvec = mem_cgroup_lruvec(memcg, pgdat); - inc_lruvec_state(lruvec, WORKINGSET_REFAULT); + inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file); /* * Compare the distance to the existing workingset size. We @@ -366,7 +368,7 @@ void workingset_refault(struct page *page, void *shadow) SetPageActive(page); workingset_age_nonresident(lruvec, hpage_nr_pages(page)); - inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE); + inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file); /* Page was active prior to eviction */ if (workingset) { @@ -375,7 +377,7 @@ void workingset_refault(struct page *page, void *shadow) spin_lock_irq(&page_pgdat(page)->lru_lock); lru_note_cost_page(page); spin_unlock_irq(&page_pgdat(page)->lru_lock); - inc_lruvec_state(lruvec, WORKINGSET_RESTORE); + inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file); } out: rcu_read_unlock(); -- cgit v1.2.3 From 3852f6768ede542ed48b9077bedf482c7ecb6327 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:30:47 -0700 Subject: mm/swapcache: support to handle the shadow entries Workingset detection for anonymous page will be implemented in the following patch and it requires to store the shadow entries into the swapcache. This patch implements an infrastructure to store the shadow entry in the swapcache. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Vlastimil Babka Link: http://lkml.kernel.org/r/1595490560-15117-5-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/swap.h | 17 ++++++++++++---- mm/shmem.c | 3 ++- mm/swap_state.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++------ mm/swapfile.c | 2 ++ mm/vmscan.c | 2 +- 5 files changed, 69 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 51ec9cdb92c0..8a4c592698a9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -414,9 +414,13 @@ extern struct address_space *swapper_spaces[]; extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); -extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); -extern void __delete_from_swap_cache(struct page *, swp_entry_t entry); +extern int add_to_swap_cache(struct page *page, swp_entry_t entry, + gfp_t gfp, void **shadowp); +extern void __delete_from_swap_cache(struct page *page, + swp_entry_t entry, void *shadow); extern void delete_from_swap_cache(struct page *); +extern void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); extern struct page *lookup_swap_cache(swp_entry_t entry, @@ -570,13 +574,13 @@ static inline int add_to_swap(struct page *page) } static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, - gfp_t gfp_mask) + gfp_t gfp_mask, void **shadowp) { return -1; } static inline void __delete_from_swap_cache(struct page *page, - swp_entry_t entry) + swp_entry_t entry, void *shadow) { } @@ -584,6 +588,11 @@ static inline void delete_from_swap_cache(struct page *page) { } +static inline void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end) +{ +} + static inline int page_swapcount(struct page *page) { return 0; diff --git a/mm/shmem.c b/mm/shmem.c index eb6b36d89722..49bde088a7ea 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1434,7 +1434,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) list_add(&info->swaplist, &shmem_swaplist); if (add_to_swap_cache(page, swap, - __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN) == 0) { + __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, + NULL) == 0) { spin_lock_irq(&info->lock); shmem_recalc_inode(inode); info->swapped++; diff --git a/mm/swap_state.c b/mm/swap_state.c index e82f4f8b1f63..a29b33c7c236 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -110,12 +110,14 @@ void show_swap_cache_info(void) * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ -int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) +int add_to_swap_cache(struct page *page, swp_entry_t entry, + gfp_t gfp, void **shadowp) { struct address_space *address_space = swap_address_space(entry); pgoff_t idx = swp_offset(entry); XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); unsigned long i, nr = hpage_nr_pages(page); + void *old; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageSwapCache(page), page); @@ -125,16 +127,25 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) SetPageSwapCache(page); do { + unsigned long nr_shadows = 0; + xas_lock_irq(&xas); xas_create_range(&xas); if (xas_error(&xas)) goto unlock; for (i = 0; i < nr; i++) { VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); + old = xas_load(&xas); + if (xa_is_value(old)) { + nr_shadows++; + if (shadowp) + *shadowp = old; + } set_page_private(page + i, entry.val + i); xas_store(&xas, page); xas_next(&xas); } + address_space->nrexceptional -= nr_shadows; address_space->nrpages += nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); ADD_CACHE_INFO(add_total, nr); @@ -154,7 +165,8 @@ unlock: * This must be called only on pages that have * been verified to be in the swap cache. */ -void __delete_from_swap_cache(struct page *page, swp_entry_t entry) +void __delete_from_swap_cache(struct page *page, + swp_entry_t entry, void *shadow) { struct address_space *address_space = swap_address_space(entry); int i, nr = hpage_nr_pages(page); @@ -166,12 +178,14 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry) VM_BUG_ON_PAGE(PageWriteback(page), page); for (i = 0; i < nr; i++) { - void *entry = xas_store(&xas, NULL); + void *entry = xas_store(&xas, shadow); VM_BUG_ON_PAGE(entry != page, entry); set_page_private(page + i, 0); xas_next(&xas); } ClearPageSwapCache(page); + if (shadow) + address_space->nrexceptional += nr; address_space->nrpages -= nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); ADD_CACHE_INFO(del_total, nr); @@ -208,7 +222,7 @@ int add_to_swap(struct page *page) * Add it to the swap cache. */ err = add_to_swap_cache(page, entry, - __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); + __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); if (err) /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely @@ -246,13 +260,44 @@ void delete_from_swap_cache(struct page *page) struct address_space *address_space = swap_address_space(entry); xa_lock_irq(&address_space->i_pages); - __delete_from_swap_cache(page, entry); + __delete_from_swap_cache(page, entry, NULL); xa_unlock_irq(&address_space->i_pages); put_swap_page(page, entry); page_ref_sub(page, hpage_nr_pages(page)); } +void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end) +{ + unsigned long curr = begin; + void *old; + + for (;;) { + unsigned long nr_shadows = 0; + swp_entry_t entry = swp_entry(type, curr); + struct address_space *address_space = swap_address_space(entry); + XA_STATE(xas, &address_space->i_pages, curr); + + xa_lock_irq(&address_space->i_pages); + xas_for_each(&xas, old, end) { + if (!xa_is_value(old)) + continue; + xas_store(&xas, NULL); + nr_shadows++; + } + address_space->nrexceptional -= nr_shadows; + xa_unlock_irq(&address_space->i_pages); + + /* search the next swapcache until we meet end */ + curr >>= SWAP_ADDRESS_SPACE_SHIFT; + curr++; + curr <<= SWAP_ADDRESS_SPACE_SHIFT; + if (curr > end) + break; + } +} + /* * If we are the only user, then try to free up the swap cache. * @@ -429,7 +474,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, __SetPageSwapBacked(page); /* May fail (-ENOMEM) if XArray node allocation failed. */ - if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK)) { + if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) { put_swap_page(page, entry); goto fail_unlock; } diff --git a/mm/swapfile.c b/mm/swapfile.c index 82183432fdd0..e653eea1eb88 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -696,6 +696,7 @@ static void add_to_avail_list(struct swap_info_struct *p) static void swap_range_free(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries) { + unsigned long begin = offset; unsigned long end = offset + nr_entries - 1; void (*swap_slot_free_notify)(struct block_device *, unsigned long); @@ -721,6 +722,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, swap_slot_free_notify(si->bdev, offset); offset++; } + clear_shadow_from_swap_cache(si->type, begin, end); } static void set_cluster_next(struct swap_info_struct *si, unsigned long next) diff --git a/mm/vmscan.c b/mm/vmscan.c index 017f323318a3..e84c4dd08c4e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -896,7 +896,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; mem_cgroup_swapout(page, swap); - __delete_from_swap_cache(page, swap); + __delete_from_swap_cache(page, swap, NULL); xa_unlock_irqrestore(&mapping->i_pages, flags); put_swap_page(page, swap); workingset_eviction(page, target_memcg); -- cgit v1.2.3 From aae466b0052e1888edd1d7f473d4310d64936196 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:30:50 -0700 Subject: mm/swap: implement workingset detection for anonymous LRU This patch implements workingset detection for anonymous LRU. All the infrastructure is implemented by the previous patches so this patch just activates the workingset detection by installing/retrieving the shadow entry and adding refault calculation. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Link: http://lkml.kernel.org/r/1595490560-15117-6-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/swap.h | 6 ++++++ mm/memory.c | 11 ++++------- mm/swap_state.c | 23 ++++++++++++++++++----- mm/vmscan.c | 7 ++++--- mm/workingset.c | 15 +++++++++++---- 5 files changed, 43 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index 8a4c592698a9..661046994db4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -414,6 +414,7 @@ extern struct address_space *swapper_spaces[]; extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); +extern void *get_shadow_from_swap_cache(swp_entry_t entry); extern int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp, void **shadowp); extern void __delete_from_swap_cache(struct page *page, @@ -573,6 +574,11 @@ static inline int add_to_swap(struct page *page) return 0; } +static inline void *get_shadow_from_swap_cache(swp_entry_t entry) +{ + return NULL; +} + static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask, void **shadowp) { diff --git a/mm/memory.c b/mm/memory.c index 6fe8b5b22c57..de311fc7639e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3098,6 +3098,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) int locked; int exclusive = 0; vm_fault_t ret = 0; + void *shadow = NULL; if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) goto out; @@ -3149,13 +3150,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out_page; } - /* - * XXX: Move to lru_cache_add() when it - * supports new vs putback - */ - spin_lock_irq(&page_pgdat(page)->lru_lock); - lru_note_cost_page(page); - spin_unlock_irq(&page_pgdat(page)->lru_lock); + shadow = get_shadow_from_swap_cache(entry); + if (shadow) + workingset_refault(page, shadow); lru_cache_add(page); swap_readpage(page, true); diff --git a/mm/swap_state.c b/mm/swap_state.c index a29b33c7c236..b73aabdfd35a 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -106,6 +106,20 @@ void show_swap_cache_info(void) printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); } +void *get_shadow_from_swap_cache(swp_entry_t entry) +{ + struct address_space *address_space = swap_address_space(entry); + pgoff_t idx = swp_offset(entry); + struct page *page; + + page = find_get_entry(address_space, idx); + if (xa_is_value(page)) + return page; + if (page) + put_page(page); + return NULL; +} + /* * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. @@ -406,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, { struct swap_info_struct *si; struct page *page; + void *shadow = NULL; *new_page_allocated = false; @@ -474,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, __SetPageSwapBacked(page); /* May fail (-ENOMEM) if XArray node allocation failed. */ - if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) { + if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { put_swap_page(page, entry); goto fail_unlock; } @@ -484,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, goto fail_unlock; } - /* XXX: Move to lru_cache_add() when it supports new vs putback */ - spin_lock_irq(&page_pgdat(page)->lru_lock); - lru_note_cost_page(page); - spin_unlock_irq(&page_pgdat(page)->lru_lock); + if (shadow) + workingset_refault(page, shadow); /* Caller will initiate read into locked page */ SetPageWorkingset(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index e84c4dd08c4e..66d73fea80e4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, { unsigned long flags; int refcount; + void *shadow = NULL; BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); @@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; mem_cgroup_swapout(page, swap); - __delete_from_swap_cache(page, swap, NULL); + if (reclaimed && !mapping_exiting(mapping)) + shadow = workingset_eviction(page, target_memcg); + __delete_from_swap_cache(page, swap, shadow); xa_unlock_irqrestore(&mapping->i_pages, flags); put_swap_page(page, swap); - workingset_eviction(page, target_memcg); } else { void (*freepage)(struct page *); - void *shadow = NULL; freepage = mapping->a_ops->freepage; /* diff --git a/mm/workingset.c b/mm/workingset.c index 941bbaa6c262..8cbe4e3cbe5c 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -353,15 +353,22 @@ void workingset_refault(struct page *page, void *shadow) /* * Compare the distance to the existing workingset size. We * don't activate pages that couldn't stay resident even if - * all the memory was available to the page cache. Whether - * cache can compete with anon or not depends on having swap. + * all the memory was available to the workingset. Whether + * workingset competition needs to consider anon or not depends + * on having swap. */ workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); - if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { + if (!file) { workingset_size += lruvec_page_state(eviction_lruvec, - NR_INACTIVE_ANON); + NR_INACTIVE_FILE); + } + if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { workingset_size += lruvec_page_state(eviction_lruvec, NR_ACTIVE_ANON); + if (file) { + workingset_size += lruvec_page_state(eviction_lruvec, + NR_INACTIVE_ANON); + } } if (refault_distance > workingset_size) goto out; -- cgit v1.2.3 From facdaa917c4d5a376d09d25865f5a863f906234a Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Tue, 11 Aug 2020 18:31:00 -0700 Subject: mm: proactive compaction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For some applications, we need to allocate almost all memory as hugepages. However, on a running system, higher-order allocations can fail if the memory is fragmented. Linux kernel currently does on-demand compaction as we request more hugepages, but this style of compaction incurs very high latency. Experiments with one-time full memory compaction (followed by hugepage allocations) show that kernel is able to restore a highly fragmented memory state to a fairly compacted memory state within <1 sec for a 32G system. Such data suggests that a more proactive compaction can help us allocate a large fraction of memory as hugepages keeping allocation latencies low. For a more proactive compaction, the approach taken here is to define a new sysctl called 'vm.compaction_proactiveness' which dictates bounds for external fragmentation which kcompactd tries to maintain. The tunable takes a value in range [0, 100], with a default of 20. Note that a previous version of this patch [1] was found to introduce too many tunables (per-order extfrag{low, high}), but this one reduces them to just one sysctl. Also, the new tunable is an opaque value instead of asking for specific bounds of "external fragmentation", which would have been difficult to estimate. The internal interpretation of this opaque value allows for future fine-tuning. Currently, we use a simple translation from this tunable to [low, high] "fragmentation score" thresholds (low=100-proactiveness, high=low+10%). The score for a node is defined as weighted mean of per-zone external fragmentation. A zone's present_pages determines its weight. To periodically check per-node score, we reuse per-node kcompactd threads, which are woken up every 500 milliseconds to check the same. If a node's score exceeds its high threshold (as derived from user-provided proactiveness value), proactive compaction is started until its score reaches its low threshold value. By default, proactiveness is set to 20, which implies threshold values of low=80 and high=90. This patch is largely based on ideas from Michal Hocko [2]. See also the LWN article [3]. Performance data ================ System: x64_64, 1T RAM, 80 CPU threads. Kernel: 5.6.0-rc3 + this patch echo madvise | sudo tee /sys/kernel/mm/transparent_hugepage/enabled echo madvise | sudo tee /sys/kernel/mm/transparent_hugepage/defrag Before starting the driver, the system was fragmented from a userspace program that allocates all memory and then for each 2M aligned section, frees 3/4 of base pages using munmap. The workload is mainly anonymous userspace pages, which are easy to move around. I intentionally avoided unmovable pages in this test to see how much latency we incur when hugepage allocations hit direct compaction. 1. Kernel hugepage allocation latencies With the system in such a fragmented state, a kernel driver then allocates as many hugepages as possible and measures allocation latency: (all latency values are in microseconds) - With vanilla 5.6.0-rc3 percentile latency –––––––––– ––––––– 5 7894 10 9496 25 12561 30 15295 40 18244 50 21229 60 27556 75 30147 80 31047 90 32859 95 33799 Total 2M hugepages allocated = 383859 (749G worth of hugepages out of 762G total free => 98% of free memory could be allocated as hugepages) - With 5.6.0-rc3 + this patch, with proactiveness=20 sysctl -w vm.compaction_proactiveness=20 percentile latency –––––––––– ––––––– 5 2 10 2 25 3 30 3 40 3 50 4 60 4 75 4 80 4 90 5 95 429 Total 2M hugepages allocated = 384105 (750G worth of hugepages out of 762G total free => 98% of free memory could be allocated as hugepages) 2. JAVA heap allocation In this test, we first fragment memory using the same method as for (1). Then, we start a Java process with a heap size set to 700G and request the heap to be allocated with THP hugepages. We also set THP to madvise to allow hugepage backing of this heap. /usr/bin/time java -Xms700G -Xmx700G -XX:+UseTransparentHugePages -XX:+AlwaysPreTouch The above command allocates 700G of Java heap using hugepages. - With vanilla 5.6.0-rc3 17.39user 1666.48system 27:37.89elapsed - With 5.6.0-rc3 + this patch, with proactiveness=20 8.35user 194.58system 3:19.62elapsed Elapsed time remains around 3:15, as proactiveness is further increased. Note that proactive compaction happens throughout the runtime of these workloads. The situation of one-time compaction, sufficient to supply hugepages for following allocation stream, can probably happen for more extreme proactiveness values, like 80 or 90. In the above Java workload, proactiveness is set to 20. The test starts with a node's score of 80 or higher, depending on the delay between the fragmentation step and starting the benchmark, which gives more-or-less time for the initial round of compaction. As t he benchmark consumes hugepages, node's score quickly rises above the high threshold (90) and proactive compaction starts again, which brings down the score to the low threshold level (80). Repeat. bpftrace also confirms proactive compaction running 20+ times during the runtime of this Java benchmark. kcompactd threads consume 100% of one of the CPUs while it tries to bring a node's score within thresholds. Backoff behavior ================ Above workloads produce a memory state which is easy to compact. However, if memory is filled with unmovable pages, proactive compaction should essentially back off. To test this aspect: - Created a kernel driver that allocates almost all memory as hugepages followed by freeing first 3/4 of each hugepage. - Set proactiveness=40 - Note that proactive_compact_node() is deferred maximum number of times with HPAGE_FRAG_CHECK_INTERVAL_MSEC of wait between each check (=> ~30 seconds between retries). [1] https://patchwork.kernel.org/patch/11098289/ [2] https://lore.kernel.org/linux-mm/20161230131412.GI13301@dhcp22.suse.cz/ [3] https://lwn.net/Articles/817905/ Signed-off-by: Nitin Gupta Signed-off-by: Andrew Morton Tested-by: Oleksandr Natalenko Reviewed-by: Vlastimil Babka Reviewed-by: Khalid Aziz Reviewed-by: Oleksandr Natalenko Cc: Vlastimil Babka Cc: Khalid Aziz Cc: Michal Hocko Cc: Mel Gorman Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Joonsoo Kim Cc: David Rientjes Cc: Nitin Gupta Cc: Oleksandr Natalenko Link: http://lkml.kernel.org/r/20200616204527.19185-1-nigupta@nvidia.com Signed-off-by: Linus Torvalds --- Documentation/admin-guide/sysctl/vm.rst | 15 +++ include/linux/compaction.h | 2 + kernel/sysctl.c | 9 ++ mm/compaction.c | 183 +++++++++++++++++++++++++++++++- mm/internal.h | 1 + mm/vmstat.c | 18 ++++ 6 files changed, 223 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index d997cc3c26d0..4b9d2e8e9142 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -119,6 +119,21 @@ all zones are compacted such that free memory is available in contiguous blocks where possible. This can be important for example in the allocation of huge pages although processes will also directly compact memory as required. +compaction_proactiveness +======================== + +This tunable takes a value in the range [0, 100] with a default value of +20. This tunable determines how aggressively compaction is done in the +background. Setting it to 0 disables proactive compaction. + +Note that compaction has a non-trivial system-wide impact as pages +belonging to different processes are moved around, which could also lead +to latency spikes in unsuspecting applications. The kernel employs +various heuristics to avoid wasting CPU cycles if it detects that +proactive compaction is not being effective. + +Be careful when setting it to extreme values like 100, as that may +cause excessive background compaction activity. compact_unevictable_allowed =========================== diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 6fa0eea3f530..7a242d46454e 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -85,11 +85,13 @@ static inline unsigned long compact_gap(unsigned int order) #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; +extern int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; +extern int extfrag_for_order(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f785de3caac0..ab72e52f8a7b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2851,6 +2851,15 @@ static struct ctl_table vm_table[] = { .mode = 0200, .proc_handler = sysctl_compaction_handler, }, + { + .procname = "compaction_proactiveness", + .data = &sysctl_compaction_proactiveness, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &one_hundred, + }, { .procname = "extfrag_threshold", .data = &sysctl_extfrag_threshold, diff --git a/mm/compaction.c b/mm/compaction.c index 86375605faa9..544a98811c82 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -50,6 +50,24 @@ static inline void count_compact_events(enum vm_event_item item, long delta) #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) +/* + * Fragmentation score check interval for proactive compaction purposes. + */ +static const int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; + +/* + * Page order with-respect-to which proactive compaction + * calculates external fragmentation, which is used as + * the "fragmentation score" of a node/zone. + */ +#if defined CONFIG_TRANSPARENT_HUGEPAGE +#define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER +#elif defined HUGETLB_PAGE_ORDER +#define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER +#else +#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) +#endif + static unsigned long release_freepages(struct list_head *freelist) { struct page *page, *next; @@ -1857,6 +1875,76 @@ static inline bool is_via_compact_memory(int order) return order == -1; } +static bool kswapd_is_running(pg_data_t *pgdat) +{ + return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING); +} + +/* + * A zone's fragmentation score is the external fragmentation wrt to the + * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value + * in the range [0, 100]. + * + * The scaling factor ensures that proactive compaction focuses on larger + * zones like ZONE_NORMAL, rather than smaller, specialized zones like + * ZONE_DMA32. For smaller zones, the score value remains close to zero, + * and thus never exceeds the high threshold for proactive compaction. + */ +static int fragmentation_score_zone(struct zone *zone) +{ + unsigned long score; + + score = zone->present_pages * + extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); + return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); +} + +/* + * The per-node proactive (background) compaction process is started by its + * corresponding kcompactd thread when the node's fragmentation score + * exceeds the high threshold. The compaction process remains active till + * the node's score falls below the low threshold, or one of the back-off + * conditions is met. + */ +static int fragmentation_score_node(pg_data_t *pgdat) +{ + unsigned long score = 0; + int zoneid; + + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + struct zone *zone; + + zone = &pgdat->node_zones[zoneid]; + score += fragmentation_score_zone(zone); + } + + return score; +} + +static int fragmentation_score_wmark(pg_data_t *pgdat, bool low) +{ + int wmark_low; + + /* + * Cap the low watermak to avoid excessive compaction + * activity in case a user sets the proactivess tunable + * close to 100 (maximum). + */ + wmark_low = max(100 - sysctl_compaction_proactiveness, 5); + return low ? wmark_low : min(wmark_low + 10, 100); +} + +static bool should_proactive_compact_node(pg_data_t *pgdat) +{ + int wmark_high; + + if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) + return false; + + wmark_high = fragmentation_score_wmark(pgdat, false); + return fragmentation_score_node(pgdat) > wmark_high; +} + static enum compact_result __compact_finished(struct compact_control *cc) { unsigned int order; @@ -1883,6 +1971,25 @@ static enum compact_result __compact_finished(struct compact_control *cc) return COMPACT_PARTIAL_SKIPPED; } + if (cc->proactive_compaction) { + int score, wmark_low; + pg_data_t *pgdat; + + pgdat = cc->zone->zone_pgdat; + if (kswapd_is_running(pgdat)) + return COMPACT_PARTIAL_SKIPPED; + + score = fragmentation_score_zone(cc->zone); + wmark_low = fragmentation_score_wmark(pgdat, true); + + if (score > wmark_low) + ret = COMPACT_CONTINUE; + else + ret = COMPACT_SUCCESS; + + goto out; + } + if (is_via_compact_memory(cc->order)) return COMPACT_CONTINUE; @@ -1941,6 +2048,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) } } +out: if (cc->contended || fatal_signal_pending(current)) ret = COMPACT_CONTENDED; @@ -2421,6 +2529,41 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, return rc; } +/* + * Compact all zones within a node till each zone's fragmentation score + * reaches within proactive compaction thresholds (as determined by the + * proactiveness tunable). + * + * It is possible that the function returns before reaching score targets + * due to various back-off conditions, such as, contention on per-node or + * per-zone locks. + */ +static void proactive_compact_node(pg_data_t *pgdat) +{ + int zoneid; + struct zone *zone; + struct compact_control cc = { + .order = -1, + .mode = MIGRATE_SYNC_LIGHT, + .ignore_skip_hint = true, + .whole_zone = true, + .gfp_mask = GFP_KERNEL, + .proactive_compaction = true, + }; + + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + zone = &pgdat->node_zones[zoneid]; + if (!populated_zone(zone)) + continue; + + cc.zone = zone; + + compact_zone(&cc, NULL); + + VM_BUG_ON(!list_empty(&cc.freepages)); + VM_BUG_ON(!list_empty(&cc.migratepages)); + } +} /* Compact all zones within a node */ static void compact_node(int nid) @@ -2467,6 +2610,13 @@ static void compact_nodes(void) /* The written value is actually unused, all memory is compacted */ int sysctl_compact_memory; +/* + * Tunable for proactive compaction. It determines how + * aggressively the kernel should compact memory in the + * background. It takes values in the range [0, 100]. + */ +int __read_mostly sysctl_compaction_proactiveness = 20; + /* * This is the entry point for compacting all nodes via * /proc/sys/vm/compact_memory @@ -2646,6 +2796,7 @@ static int kcompactd(void *p) { pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; + unsigned int proactive_defer = 0; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); @@ -2661,12 +2812,34 @@ static int kcompactd(void *p) unsigned long pflags; trace_mm_compaction_kcompactd_sleep(pgdat->node_id); - wait_event_freezable(pgdat->kcompactd_wait, - kcompactd_work_requested(pgdat)); + if (wait_event_freezable_timeout(pgdat->kcompactd_wait, + kcompactd_work_requested(pgdat), + msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC))) { + + psi_memstall_enter(&pflags); + kcompactd_do_work(pgdat); + psi_memstall_leave(&pflags); + continue; + } - psi_memstall_enter(&pflags); - kcompactd_do_work(pgdat); - psi_memstall_leave(&pflags); + /* kcompactd wait timeout */ + if (should_proactive_compact_node(pgdat)) { + unsigned int prev_score, score; + + if (proactive_defer) { + proactive_defer--; + continue; + } + prev_score = fragmentation_score_node(pgdat); + proactive_compact_node(pgdat); + score = fragmentation_score_node(pgdat); + /* + * Defer proactive compaction if the fragmentation + * score did not go down i.e. no progress made. + */ + proactive_defer = score < prev_score ? + 0 : 1 << COMPACT_MAX_DEFER_SHIFT; + } } return 0; diff --git a/mm/internal.h b/mm/internal.h index 9886db20d94f..42cf0b610847 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -239,6 +239,7 @@ struct compact_control { bool no_set_skip_hint; /* Don't mark blocks for skipping */ bool ignore_block_suitable; /* Scan blocks considered unsuitable */ bool direct_compaction; /* False from kcompactd or /proc/... */ + bool proactive_compaction; /* kcompactd proactive compaction */ bool whole_zone; /* Whole zone should/has been scanned */ bool contended; /* Signal lock or sched contention */ bool rescan; /* Rescanning the same pageblock */ diff --git a/mm/vmstat.c b/mm/vmstat.c index fef03463a0cf..f183aa37994e 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1096,6 +1096,24 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); } +/* + * Calculates external fragmentation within a zone wrt the given order. + * It is defined as the percentage of pages found in blocks of size + * less than 1 << order. It returns values in range [0, 100]. + */ +int extfrag_for_order(struct zone *zone, unsigned int order) +{ + struct contig_page_info info; + + fill_contig_page_info(zone, order, &info); + if (info.free_pages == 0) + return 0; + + return div_u64((info.free_pages - + (info.free_blocks_suitable << order)) * 100, + info.free_pages); +} + /* Same as __fragmentation index but allocs contig_page_info on stack */ int fragmentation_index(struct zone *zone, unsigned int order) { -- cgit v1.2.3 From d34c0a7599ea8c301bc471dfa1eb2bf2db6752d1 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Tue, 11 Aug 2020 18:31:07 -0700 Subject: mm: use unsigned types for fragmentation score Proactive compaction uses per-node/zone "fragmentation score" which is always in range [0, 100], so use unsigned type of these scores as well as for related constants. Signed-off-by: Nitin Gupta Signed-off-by: Andrew Morton Reviewed-by: Baoquan He Cc: Luis Chamberlain Cc: Kees Cook Cc: Iurii Zaikin Cc: Vlastimil Babka Cc: Joonsoo Kim Link: http://lkml.kernel.org/r/20200618010319.13159-1-nigupta@nvidia.com Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 4 ++-- kernel/sysctl.c | 2 +- mm/compaction.c | 18 +++++++++--------- mm/vmstat.c | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 7a242d46454e..25a521d299c1 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -85,13 +85,13 @@ static inline unsigned long compact_gap(unsigned int order) #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; -extern int sysctl_compaction_proactiveness; +extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; -extern int extfrag_for_order(struct zone *zone, unsigned int order); +extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ab72e52f8a7b..287862f91717 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2854,7 +2854,7 @@ static struct ctl_table vm_table[] = { { .procname = "compaction_proactiveness", .data = &sysctl_compaction_proactiveness, - .maxlen = sizeof(int), + .maxlen = sizeof(sysctl_compaction_proactiveness), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, diff --git a/mm/compaction.c b/mm/compaction.c index ed8ea1511634..b7d433f1706a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -53,7 +53,7 @@ static inline void count_compact_events(enum vm_event_item item, long delta) /* * Fragmentation score check interval for proactive compaction purposes. */ -static const int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; +static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; /* * Page order with-respect-to which proactive compaction @@ -1890,7 +1890,7 @@ static bool kswapd_is_running(pg_data_t *pgdat) * ZONE_DMA32. For smaller zones, the score value remains close to zero, * and thus never exceeds the high threshold for proactive compaction. */ -static int fragmentation_score_zone(struct zone *zone) +static unsigned int fragmentation_score_zone(struct zone *zone) { unsigned long score; @@ -1906,9 +1906,9 @@ static int fragmentation_score_zone(struct zone *zone) * the node's score falls below the low threshold, or one of the back-off * conditions is met. */ -static int fragmentation_score_node(pg_data_t *pgdat) +static unsigned int fragmentation_score_node(pg_data_t *pgdat) { - unsigned long score = 0; + unsigned int score = 0; int zoneid; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { @@ -1921,17 +1921,17 @@ static int fragmentation_score_node(pg_data_t *pgdat) return score; } -static int fragmentation_score_wmark(pg_data_t *pgdat, bool low) +static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) { - int wmark_low; + unsigned int wmark_low; /* * Cap the low watermak to avoid excessive compaction * activity in case a user sets the proactivess tunable * close to 100 (maximum). */ - wmark_low = max(100 - sysctl_compaction_proactiveness, 5); - return low ? wmark_low : min(wmark_low + 10, 100); + wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); + return low ? wmark_low : min(wmark_low + 10, 100U); } static bool should_proactive_compact_node(pg_data_t *pgdat) @@ -2615,7 +2615,7 @@ int sysctl_compact_memory; * aggressively the kernel should compact memory in the * background. It takes values in the range [0, 100]. */ -int __read_mostly sysctl_compaction_proactiveness = 20; +unsigned int __read_mostly sysctl_compaction_proactiveness = 20; /* * This is the entry point for compacting all nodes via diff --git a/mm/vmstat.c b/mm/vmstat.c index f183aa37994e..3bb034c99887 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1101,7 +1101,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in * It is defined as the percentage of pages found in blocks of size * less than 1 << order. It returns values in range [0, 100]. */ -int extfrag_for_order(struct zone *zone, unsigned int order) +unsigned int extfrag_for_order(struct zone *zone, unsigned int order) { struct contig_page_info info; -- cgit v1.2.3 From 860b32729a21e0565585a9e2ecea2e5244d65acd Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 11 Aug 2020 18:31:10 -0700 Subject: mm/compaction: correct the comments of compact_defer_shift There is no compact_defer_limit. It should be compact_defer_shift in use. and add compact_order_failed explanation. Signed-off-by: Alex Shi Signed-off-by: Andrew Morton Reviewed-by: Alexander Duyck Link: http://lkml.kernel.org/r/3bd60e1b-a74e-050d-ade4-6e8f54e00b92@linux.alibaba.com Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 1 + mm/compaction.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index efbd95dd3bbf..8379432f4f2f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -536,6 +536,7 @@ struct zone { * On compaction failure, 1< Date: Tue, 11 Aug 2020 18:31:19 -0700 Subject: include/linux/mempolicy.h: fix typo Change "interlave" to "interleave". Signed-off-by: Yanfei Xu Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Link: http://lkml.kernel.org/r/20200810063454.9357-1-yanfei.xu@windriver.com Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5f1648c23e29..5f1c74df264d 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -28,7 +28,7 @@ struct mm_struct; * the process policy is used. Interrupts ignore the memory policy * of the current process. * - * Locking policy for interlave: + * Locking policy for interleave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on * mmap_lock. -- cgit v1.2.3 From 9066e5cfb73cdbcdbb49e87999482ab615e9fc76 Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Tue, 11 Aug 2020 18:31:22 -0700 Subject: mm, oom: make the calculation of oom badness more accurate Recently we found an issue on our production environment that when memcg oom is triggered the oom killer doesn't chose the process with largest resident memory but chose the first scanned process. Note that all processes in this memcg have the same oom_score_adj, so the oom killer should chose the process with largest resident memory. Bellow is part of the oom info, which is enough to analyze this issue. [7516987.983223] memory: usage 16777216kB, limit 16777216kB, failcnt 52843037 [7516987.983224] memory+swap: usage 16777216kB, limit 9007199254740988kB, failcnt 0 [7516987.983225] kmem: usage 301464kB, limit 9007199254740988kB, failcnt 0 [...] [7516987.983293] [ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name [7516987.983510] [ 5740] 0 5740 257 1 32768 0 -998 pause [7516987.983574] [58804] 0 58804 4594 771 81920 0 -998 entry_point.bas [7516987.983577] [58908] 0 58908 7089 689 98304 0 -998 cron [7516987.983580] [58910] 0 58910 16235 5576 163840 0 -998 supervisord [7516987.983590] [59620] 0 59620 18074 1395 188416 0 -998 sshd [7516987.983594] [59622] 0 59622 18680 6679 188416 0 -998 python [7516987.983598] [59624] 0 59624 1859266 5161 548864 0 -998 odin-agent [7516987.983600] [59625] 0 59625 707223 9248 983040 0 -998 filebeat [7516987.983604] [59627] 0 59627 416433 64239 774144 0 -998 odin-log-agent [7516987.983607] [59631] 0 59631 180671 15012 385024 0 -998 python3 [7516987.983612] [61396] 0 61396 791287 3189 352256 0 -998 client [7516987.983615] [61641] 0 61641 1844642 29089 946176 0 -998 client [7516987.983765] [ 9236] 0 9236 2642 467 53248 0 -998 php_scanner [7516987.983911] [42898] 0 42898 15543 838 167936 0 -998 su [7516987.983915] [42900] 1000 42900 3673 867 77824 0 -998 exec_script_vr2 [7516987.983918] [42925] 1000 42925 36475 19033 335872 0 -998 python [7516987.983921] [57146] 1000 57146 3673 848 73728 0 -998 exec_script_J2p [7516987.983925] [57195] 1000 57195 186359 22958 491520 0 -998 python2 [7516987.983928] [58376] 1000 58376 275764 14402 290816 0 -998 rosmaster [7516987.983931] [58395] 1000 58395 155166 4449 245760 0 -998 rosout [7516987.983935] [58406] 1000 58406 18285584 3967322 37101568 0 -998 data_sim [7516987.984221] oom-kill:constraint=CONSTRAINT_MEMCG,nodemask=(null),cpuset=3aa16c9482ae3a6f6b78bda68a55d32c87c99b985e0f11331cddf05af6c4d753,mems_allowed=0-1,oom_memcg=/kubepods/podf1c273d3-9b36-11ea-b3df-246e9693c184,task_memcg=/kubepods/podf1c273d3-9b36-11ea-b3df-246e9693c184/1f246a3eeea8f70bf91141eeaf1805346a666e225f823906485ea0b6c37dfc3d,task=pause,pid=5740,uid=0 [7516987.984254] Memory cgroup out of memory: Killed process 5740 (pause) total-vm:1028kB, anon-rss:4kB, file-rss:0kB, shmem-rss:0kB [7516988.092344] oom_reaper: reaped process 5740 (pause), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB We can find that the first scanned process 5740 (pause) was killed, but its rss is only one page. That is because, when we calculate the oom badness in oom_badness(), we always ignore the negtive point and convert all of these negtive points to 1. Now as oom_score_adj of all the processes in this targeted memcg have the same value -998, the points of these processes are all negtive value. As a result, the first scanned process will be killed. The oom_socre_adj (-998) in this memcg is set by kubelet, because it is a a Guaranteed pod, which has higher priority to prevent from being killed by system oom. To fix this issue, we should make the calculation of oom point more accurate. We can achieve it by convert the chosen_point from 'unsigned long' to 'long'. [cai@lca.pw: reported a issue in the previous version] [mhocko@suse.com: fixed the issue reported by Cai] [mhocko@suse.com: add the comment in proc_oom_score()] [laoar.shao@gmail.com: v3] Link: http://lkml.kernel.org/r/1594396651-9931-1-git-send-email-laoar.shao@gmail.com Signed-off-by: Yafang Shao Signed-off-by: Andrew Morton Tested-by: Naresh Kamboju Acked-by: Michal Hocko Cc: David Rientjes Cc: Qian Cai Link: http://lkml.kernel.org/r/1594309987-9919-1-git-send-email-laoar.shao@gmail.com Signed-off-by: Linus Torvalds --- fs/proc/base.c | 11 ++++++++++- include/linux/oom.h | 4 ++-- mm/oom_kill.c | 22 ++++++++++------------ 3 files changed, 22 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/fs/proc/base.c b/fs/proc/base.c index a333caeca291..617db4e0faa0 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -551,8 +551,17 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, { unsigned long totalpages = totalram_pages() + total_swap_pages; unsigned long points = 0; + long badness; + + badness = oom_badness(task, totalpages); + /* + * Special case OOM_SCORE_ADJ_MIN for all others scale the + * badness value into [0, 2000] range which we have been + * exporting for a long time so userspace might depend on it. + */ + if (badness != LONG_MIN) + points = (1000 + badness * 1000 / (long)totalpages) * 2 / 3; - points = oom_badness(task, totalpages) * 1000 / totalpages; seq_printf(m, "%lu\n", points); return 0; diff --git a/include/linux/oom.h b/include/linux/oom.h index c696c265f019..f022f581ac29 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -48,7 +48,7 @@ struct oom_control { /* Used by oom implementation, do not set */ unsigned long totalpages; struct task_struct *chosen; - unsigned long chosen_points; + long chosen_points; /* Used to print the constraint info. */ enum oom_constraint constraint; @@ -107,7 +107,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) bool __oom_reap_task_mm(struct mm_struct *mm); -extern unsigned long oom_badness(struct task_struct *p, +long oom_badness(struct task_struct *p, unsigned long totalpages); extern bool out_of_memory(struct oom_control *oc); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d30ce75f23fb..48e0db54d838 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -196,17 +196,17 @@ static bool is_dump_unreclaim_slabs(void) * predictable as possible. The goal is to return the highest value for the * task consuming the most memory to avoid subsequent oom failures. */ -unsigned long oom_badness(struct task_struct *p, unsigned long totalpages) +long oom_badness(struct task_struct *p, unsigned long totalpages) { long points; long adj; if (oom_unkillable_task(p)) - return 0; + return LONG_MIN; p = find_lock_task_mm(p); if (!p) - return 0; + return LONG_MIN; /* * Do not even consider tasks which are explicitly marked oom @@ -218,7 +218,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned long totalpages) test_bit(MMF_OOM_SKIP, &p->mm->flags) || in_vfork(p)) { task_unlock(p); - return 0; + return LONG_MIN; } /* @@ -233,11 +233,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned long totalpages) adj *= totalpages / 1000; points += adj; - /* - * Never return 0 for an eligible task regardless of the root bonus and - * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). - */ - return points > 0 ? points : 1; + return points; } static const char * const oom_constraint_text[] = { @@ -310,7 +306,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc) static int oom_evaluate_task(struct task_struct *task, void *arg) { struct oom_control *oc = arg; - unsigned long points; + long points; if (oom_unkillable_task(task)) goto next; @@ -336,12 +332,12 @@ static int oom_evaluate_task(struct task_struct *task, void *arg) * killed first if it triggers an oom, then select it. */ if (oom_task_origin(task)) { - points = ULONG_MAX; + points = LONG_MAX; goto select; } points = oom_badness(task, oc->totalpages); - if (!points || points < oc->chosen_points) + if (points == LONG_MIN || points < oc->chosen_points) goto next; select: @@ -365,6 +361,8 @@ abort: */ static void select_bad_process(struct oom_control *oc) { + oc->chosen_points = LONG_MIN; + if (is_memcg_oom(oc)) mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); else { -- cgit v1.2.3 From 34ae204f18519f0920bd50a644abd6fefc8dbfcf Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 11 Aug 2020 18:31:38 -0700 Subject: hugetlbfs: remove call to huge_pte_alloc without i_mmap_rwsem Commit c0d0381ade79 ("hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization") requires callers of huge_pte_alloc to hold i_mmap_rwsem in at least read mode. This is because the explicit locking in huge_pmd_share (called by huge_pte_alloc) was removed. When restructuring the code, the call to huge_pte_alloc in the else block at the beginning of hugetlb_fault was missed. Unfortunately, that else clause is exercised when there is no page table entry. This will likely lead to a call to huge_pmd_share. If huge_pmd_share thinks pmd sharing is possible, it will traverse the mapping tree (i_mmap) without holding i_mmap_rwsem. If someone else is modifying the tree, bad things such as addressing exceptions or worse could happen. Simply remove the else clause. It should have been removed previously. The code following the else will call huge_pte_alloc with the appropriate locking. To prevent this type of issue in the future, add routines to assert that i_mmap_rwsem is held, and call these routines in huge pmd sharing routines. Fixes: c0d0381ade79 ("hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization") Suggested-by: Matthew Wilcox Signed-off-by: Mike Kravetz Signed-off-by: Andrew Morton Cc: Michal Hocko Cc: Hugh Dickins Cc: Naoya Horiguchi Cc: "Aneesh Kumar K.V" Cc: Andrea Arcangeli Cc: "Kirill A.Shutemov" Cc: Davidlohr Bueso Cc: Prakash Sangappa Cc: Link: http://lkml.kernel.org/r/e670f327-5cf9-1959-96e4-6dc7cc30d3d5@oracle.com Signed-off-by: Linus Torvalds --- include/linux/fs.h | 10 ++++++++++ include/linux/hugetlb.h | 8 +++++--- mm/hugetlb.c | 15 +++++++-------- mm/rmap.c | 2 +- 4 files changed, 23 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 011af396aa17..7c69dd7c6160 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -518,6 +518,16 @@ static inline void i_mmap_unlock_read(struct address_space *mapping) up_read(&mapping->i_mmap_rwsem); } +static inline void i_mmap_assert_locked(struct address_space *mapping) +{ + lockdep_assert_held(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_assert_write_locked(struct address_space *mapping) +{ + lockdep_assert_held_write(&mapping->i_mmap_rwsem); +} + /* * Might pages of this file be mapped into userspace? */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 50650d0d01b9..a520bf26e5d8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -164,7 +164,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); +int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep); void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, @@ -203,8 +204,9 @@ static inline struct address_space *hugetlb_page_mapping_lock_write( return NULL; } -static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, - pte_t *ptep) +static inline int huge_pmd_unshare(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep) { return 0; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dffafb5bf2ed..8a18f1234e80 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3967,7 +3967,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, continue; ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, &address, ptep)) { + if (huge_pmd_unshare(mm, vma, &address, ptep)) { spin_unlock(ptl); /* * We just unmapped a page of PMDs by clearing a PUD. @@ -4554,10 +4554,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) return VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); - } else { - ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); - if (!ptep) - return VM_FAULT_OOM; } /* @@ -5034,7 +5030,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, &address, ptep)) { + if (huge_pmd_unshare(mm, vma, &address, ptep)) { pages++; spin_unlock(ptl); shared_pmd = true; @@ -5415,12 +5411,14 @@ out: * returns: 1 successfully unmapped a shared pte page * 0 the underlying pte page is not shared, or it is the last user */ -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep) { pgd_t *pgd = pgd_offset(mm, *addr); p4d_t *p4d = p4d_offset(pgd, *addr); pud_t *pud = pud_offset(p4d, *addr); + i_mmap_assert_write_locked(vma->vm_file->f_mapping); BUG_ON(page_count(virt_to_page(ptep)) == 0); if (page_count(virt_to_page(ptep)) == 1) return 0; @@ -5438,7 +5436,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) return NULL; } -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep) { return 0; } diff --git a/mm/rmap.c b/mm/rmap.c index 5fe2dedce1fc..6cce9ef06753 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1469,7 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * do this outside rmap routines. */ VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); - if (huge_pmd_unshare(mm, &address, pvmw.pte)) { + if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { /* * huge_pmd_unshare unmapped an entire PMD * page. There is no way of knowing exactly -- cgit v1.2.3 From 4958e4d86ecb011a81f5d80ce2023ef9f10692d8 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Tue, 11 Aug 2020 18:31:48 -0700 Subject: mm: thp: remove debug_cow switch Since commit 3917c80280c93a7123f ("thp: change CoW semantics for anon-THP"), the CoW page fault of THP has been rewritten, debug_cow is not used anymore. So, just remove it. Signed-off-by: Yang Shi Signed-off-by: Andrew Morton Reviewed-by: Zi Yan Cc: Kirill A. Shutemov Link: http://lkml.kernel.org/r/1592270980-116062-1-git-send-email-yang.shi@linux.alibaba.com Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 7 ------- mm/huge_memory.c | 21 --------------------- 2 files changed, 28 deletions(-) (limited to 'include') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 17c4c4975145..467302056e17 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -181,13 +181,6 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1< Date: Tue, 11 Aug 2020 18:31:51 -0700 Subject: mm/vmstat: add events for THP migration without split Add following new vmstat events which will help in validating THP migration without split. Statistics reported through these new VM events will help in performance debugging. 1. THP_MIGRATION_SUCCESS 2. THP_MIGRATION_FAILURE 3. THP_MIGRATION_SPLIT In addition, these new events also update normal page migration statistics appropriately via PGMIGRATE_SUCCESS and PGMIGRATE_FAILURE. While here, this updates current trace event 'mm_migrate_pages' to accommodate now available THP statistics. [akpm@linux-foundation.org: s/hpage_nr_pages/thp_nr_pages/] [ziy@nvidia.com: v2] Link: http://lkml.kernel.org/r/C5E3C65C-8253-4638-9D3C-71A61858BB8B@nvidia.com [anshuman.khandual@arm.com: s/thp_nr_pages/hpage_nr_pages/] Link: http://lkml.kernel.org/r/1594287583-16568-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Signed-off-by: Zi Yan Signed-off-by: Andrew Morton Reviewed-by: Daniel Jordan Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Zi Yan Cc: John Hubbard Cc: Naoya Horiguchi Link: http://lkml.kernel.org/r/1594080415-27924-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Linus Torvalds --- Documentation/vm/page_migration.rst | 27 +++++++++++++++++++ include/linux/vm_event_item.h | 3 +++ include/trace/events/migrate.h | 17 +++++++++--- mm/migrate.c | 52 +++++++++++++++++++++++++++++++------ mm/vmstat.c | 3 +++ 5 files changed, 91 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/Documentation/vm/page_migration.rst b/Documentation/vm/page_migration.rst index 1d6cd7db4e43..68883ac485fa 100644 --- a/Documentation/vm/page_migration.rst +++ b/Documentation/vm/page_migration.rst @@ -253,5 +253,32 @@ which are function pointers of struct address_space_operations. PG_isolated is alias with PG_reclaim flag so driver shouldn't use the flag for own purpose. +Monitoring Migration +===================== + +The following events (counters) can be used to monitor page migration. + +1. PGMIGRATE_SUCCESS: Normal page migration success. Each count means that a + page was migrated. If the page was a non-THP page, then this counter is + increased by one. If the page was a THP, then this counter is increased by + the number of THP subpages. For example, migration of a single 2MB THP that + has 4KB-size base pages (subpages) will cause this counter to increase by + 512. + +2. PGMIGRATE_FAIL: Normal page migration failure. Same counting rules as for + _SUCCESS, above: this will be increased by the number of subpages, if it was + a THP. + +3. THP_MIGRATION_SUCCESS: A THP was migrated without being split. + +4. THP_MIGRATION_FAIL: A THP could not be migrated nor it could be split. + +5. THP_MIGRATION_SPLIT: A THP was migrated, but not as such: first, the THP had + to be split. After splitting, a migration retry was used for it's sub-pages. + +THP_MIGRATION_* events also update the appropriate PGMIGRATE_SUCCESS or +PGMIGRATE_FAIL events. For example, a THP migration failure will cause both +THP_MIGRATION_FAIL and PGMIGRATE_FAIL to increase. + Christoph Lameter, May 8, 2006. Minchan Kim, Mar 28, 2016. diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 24fc7c3ae7d6..2e6ca53b9bbd 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -56,6 +56,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #endif #ifdef CONFIG_MIGRATION PGMIGRATE_SUCCESS, PGMIGRATE_FAIL, + THP_MIGRATION_SUCCESS, + THP_MIGRATION_FAIL, + THP_MIGRATION_SPLIT, #endif #ifdef CONFIG_COMPACTION COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED, diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 705b33d1e395..4d434398d64d 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -46,13 +46,18 @@ MIGRATE_REASON TRACE_EVENT(mm_migrate_pages, TP_PROTO(unsigned long succeeded, unsigned long failed, - enum migrate_mode mode, int reason), + unsigned long thp_succeeded, unsigned long thp_failed, + unsigned long thp_split, enum migrate_mode mode, int reason), - TP_ARGS(succeeded, failed, mode, reason), + TP_ARGS(succeeded, failed, thp_succeeded, thp_failed, + thp_split, mode, reason), TP_STRUCT__entry( __field( unsigned long, succeeded) __field( unsigned long, failed) + __field( unsigned long, thp_succeeded) + __field( unsigned long, thp_failed) + __field( unsigned long, thp_split) __field( enum migrate_mode, mode) __field( int, reason) ), @@ -60,13 +65,19 @@ TRACE_EVENT(mm_migrate_pages, TP_fast_assign( __entry->succeeded = succeeded; __entry->failed = failed; + __entry->thp_succeeded = thp_succeeded; + __entry->thp_failed = thp_failed; + __entry->thp_split = thp_split; __entry->mode = mode; __entry->reason = reason; ), - TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s", + TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s", __entry->succeeded, __entry->failed, + __entry->thp_succeeded, + __entry->thp_failed, + __entry->thp_split, __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); diff --git a/mm/migrate.c b/mm/migrate.c index b52f30113366..5ea275878383 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1418,22 +1418,35 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, enum migrate_mode mode, int reason) { int retry = 1; + int thp_retry = 1; int nr_failed = 0; int nr_succeeded = 0; + int nr_thp_succeeded = 0; + int nr_thp_failed = 0; + int nr_thp_split = 0; int pass = 0; + bool is_thp = false; struct page *page; struct page *page2; int swapwrite = current->flags & PF_SWAPWRITE; - int rc; + int rc, nr_subpages; if (!swapwrite) current->flags |= PF_SWAPWRITE; - for(pass = 0; pass < 10 && retry; pass++) { + for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { retry = 0; + thp_retry = 0; list_for_each_entry_safe(page, page2, from, lru) { retry: + /* + * THP statistics is based on the source huge page. + * Capture required information that might get lost + * during migration. + */ + is_thp = PageTransHuge(page); + nr_subpages = hpage_nr_pages(page); cond_resched(); if (PageHuge(page)) @@ -1464,15 +1477,30 @@ retry: unlock_page(page); if (!rc) { list_safe_reset_next(page, page2, lru); + nr_thp_split++; goto retry; } } + if (is_thp) { + nr_thp_failed++; + nr_failed += nr_subpages; + goto out; + } nr_failed++; goto out; case -EAGAIN: + if (is_thp) { + thp_retry++; + break; + } retry++; break; case MIGRATEPAGE_SUCCESS: + if (is_thp) { + nr_thp_succeeded++; + nr_succeeded += nr_subpages; + break; + } nr_succeeded++; break; default: @@ -1482,19 +1510,27 @@ retry: * removed from migration page list and not * retried in the next outer loop. */ + if (is_thp) { + nr_thp_failed++; + nr_failed += nr_subpages; + break; + } nr_failed++; break; } } } - nr_failed += retry; + nr_failed += retry + thp_retry; + nr_thp_failed += thp_retry; rc = nr_failed; out: - if (nr_succeeded) - count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); - if (nr_failed) - count_vm_events(PGMIGRATE_FAIL, nr_failed); - trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); + count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); + count_vm_events(PGMIGRATE_FAIL, nr_failed); + count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); + count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); + count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); + trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded, + nr_thp_failed, nr_thp_split, mode, reason); if (!swapwrite) current->flags &= ~PF_SWAPWRITE; diff --git a/mm/vmstat.c b/mm/vmstat.c index 3bb034c99887..727a26d1ec1d 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1277,6 +1277,9 @@ const char * const vmstat_text[] = { #ifdef CONFIG_MIGRATION "pgmigrate_success", "pgmigrate_fail", + "thp_migration_success", + "thp_migration_fail", + "thp_migration_split", #endif #ifdef CONFIG_COMPACTION "compact_migrate_scanned", -- cgit v1.2.3 From af161bee93332a1ff10ba029f41936d21850ae82 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 11 Aug 2020 18:32:06 -0700 Subject: include/linux/sched/mm.h: optimize current_gfp_context() The current_gfp_context() converts a number of PF_MEMALLOC_* per-process flags into the corresponding GFP_* flags for memory allocation. In that function, current->flags is accessed 3 times. That may lead to duplicated access of the same memory location. This is not usually a problem with minimal debug config options on as the compiler can optimize away the duplicated memory accesses. With most of the debug config options on, however, that may not be the case. For example, the x86-64 object size of the __need_fs_reclaim() in a debug kernel that calls current_gfp_context() was 309 bytes. With this patch applied, the object size is reduced to 202 bytes. This is a saving of 107 bytes and will probably be slightly faster too. Use READ_ONCE() to access current->flags to prevent the compiler from possibly accessing current->flags multiple times. Signed-off-by: Waiman Long Signed-off-by: Andrew Morton Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Mathieu Desnoyers Cc: Michel Lespinasse Link: http://lkml.kernel.org/r/20200618212936.9776-1-longman@redhat.com Signed-off-by: Linus Torvalds --- include/linux/sched/mm.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 85023ddc2dc2..f889e332912f 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -178,14 +178,16 @@ static inline bool in_vfork(struct task_struct *tsk) */ static inline gfp_t current_gfp_context(gfp_t flags) { - if (unlikely(current->flags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { + unsigned int pflags = READ_ONCE(current->flags); + + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence */ - if (current->flags & PF_MEMALLOC_NOIO) + if (pflags & PF_MEMALLOC_NOIO) flags &= ~(__GFP_IO | __GFP_FS); - else if (current->flags & PF_MEMALLOC_NOFS) + else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; } return flags; -- cgit v1.2.3 From 1067b261cc973d68c29de54ef0587c56786e6bd5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:27 -0700 Subject: mm: drop duplicated words in Drop the doubled words "used" and "by". Drop the repeated acronym "TLB" and make several other fixes around it. (capital letters, spellos) Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: SeongJae Park Link: http://lkml.kernel.org/r/2bb6e13e-44df-4920-52d9-4d3539945f73@infradead.org Signed-off-by: Linus Torvalds --- include/linux/pgtable.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 53e97da1e8e2..a124c21e3204 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -804,7 +804,7 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, /* * No-op macros that just return the current protection value. Defined here - * because these macros can be used used even if CONFIG_MMU is not defined. + * because these macros can be used even if CONFIG_MMU is not defined. */ #ifndef pgprot_nx @@ -1234,7 +1234,7 @@ static inline int pmd_trans_unstable(pmd_t *pmd) * Technically a PTE can be PROTNONE even when not doing NUMA balancing but * the only case the kernel cares is for NUMA balancing and is only ever set * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked - * _PAGE_PROTNONE so by by default, implement the helper as "always no". It + * _PAGE_PROTNONE so by default, implement the helper as "always no". It * is the responsibility of the caller to distinguish between PROT_NONE * protections and NUMA hinting fault protections. */ @@ -1318,10 +1318,10 @@ static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) /* * ARCHes with special requirements for evicting THP backing TLB entries can * implement this. Otherwise also, it can help optimize normal TLB flush in - * THP regime. stock flush_tlb_range() typically has optimization to nuke the - * entire TLB TLB if flush span is greater than a threshold, which will - * likely be true for a single huge page. Thus a single thp flush will - * invalidate the entire TLB which is not desitable. + * THP regime. Stock flush_tlb_range() typically has optimization to nuke the + * entire TLB if flush span is greater than a threshold, which will + * likely be true for a single huge page. Thus a single THP flush will + * invalidate the entire TLB which is not desirable. * e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) -- cgit v1.2.3 From 11192337206d2211bce3ba4ec1575439b6045654 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:30 -0700 Subject: mm: drop duplicated words in Drop the doubled words "to" and "the". Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: SeongJae Park Link: http://lkml.kernel.org/r/d9fae8d6-0d60-4d52-9385-3199ee98de49@infradead.org Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index f6a82f9bccd7..f97b10117d44 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -479,7 +479,7 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags) { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } /* - * vm_fault is filled by the the pagefault handler and passed to the vma's + * vm_fault is filled by the pagefault handler and passed to the vma's * ->fault function. The vma's ->fault is responsible for returning a bitmask * of VM_FAULT_xxx flags that give details about how the fault was handled. * @@ -2599,7 +2599,7 @@ extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); -/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ +/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ extern int expand_downwards(struct vm_area_struct *vma, unsigned long address); #if VM_GROWSUP -- cgit v1.2.3 From 3ecabd31341fb4307f156a2bf4467c3f8fa9101b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:33 -0700 Subject: include/linux/highmem.h: fix duplicated words in a comment Change the doubled word "is" in a comment to "it is". Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Link: http://lkml.kernel.org/r/ad605959-0083-4794-8d31-6b073300dd6f@infradead.org Signed-off-by: Linus Torvalds --- include/linux/highmem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/highmem.h b/include/linux/highmem.h index d6e82e3de027..14e6202ce47f 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -73,7 +73,7 @@ static inline void kunmap(struct page *page) * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * - * However when holding an atomic kmap is is not legal to sleep, so atomic + * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. * * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap -- cgit v1.2.3 From c82f16b52cb3b50ac1b47e6c590e3dddefc5a97c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:36 -0700 Subject: include/linux/frontswap.h: drop duplicated word in a comment Drop the doubled word "in" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Cc: Konrad Rzeszutek Wilk Link: http://lkml.kernel.org/r/3af7ed91-ad62-8445-40a4-9e07a64b9523@infradead.org Signed-off-by: Linus Torvalds --- include/linux/frontswap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index 6d775984905b..b07d88c92bb2 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -10,7 +10,7 @@ /* * Return code to denote that requested number of * frontswap pages are unused(moved to page cache). - * Used in in shmem_unuse and try_to_unuse. + * Used in shmem_unuse and try_to_unuse. */ #define FRONTSWAP_PAGES_UNUSED 2 -- cgit v1.2.3 From 0845f83122d93ae3bafa7ea10209de2148a3b9bc Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:40 -0700 Subject: include/linux/memcontrol.h: drop duplicate word and fix spello Drop the doubled word "for" in a comment. Fix spello of "incremented". Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Acked-by: Chris Down Cc: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Link: http://lkml.kernel.org/r/b04aa2e4-7c95-12f0-599d-43d07fb28134@infradead.org Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 2c2d301eac33..385237e4cb44 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -65,8 +65,8 @@ struct mem_cgroup_id { /* * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremated by the number of pages. This counter is used for - * for trigger some periodic events. This is straightforward and better + * it will be incremented by the number of pages. This counter is used + * to trigger some periodic events. This is straightforward and better * than using jiffies etc. to handle periodic memcg event. */ enum mem_cgroup_events_target { -- cgit v1.2.3 From bfe00c5bbd9ee37b99c281429556c335271d027b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Aug 2020 18:33:34 -0700 Subject: syscalls: use uaccess_kernel in addr_limit_user_check Patch series "clean up address limit helpers", v2. In preparation for eventually phasing out direct use of set_fs(), this series removes the segment_eq() arch helper that is only used to implement or duplicate the uaccess_kernel() API, and then adds descriptive helpers to force the kernel address limit. This patch (of 6): Use the uaccess_kernel helper instead of duplicating it. [hch@lst.de: arm: don't call addr_limit_user_check for nommu] Link: http://lkml.kernel.org/r/20200721045834.GA9613@lst.de Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Tested-by: Guenter Roeck Acked-by: Linus Torvalds Cc: Nick Hu Cc: Greentime Hu Cc: Vincent Chen Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Geert Uytterhoeven Link: http://lkml.kernel.org/r/20200714105505.935079-1-hch@lst.de Link: http://lkml.kernel.org/r/20200710135706.537715-1-hch@lst.de Link: http://lkml.kernel.org/r/20200710135706.537715-2-hch@lst.de Signed-off-by: Linus Torvalds --- arch/arm/kernel/signal.c | 2 ++ include/linux/syscalls.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index ab2568996ddb..c9dc912b83f0 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -713,7 +713,9 @@ struct page *get_signal_page(void) /* Defer to generic check */ asmlinkage void addr_limit_check_failed(void) { +#ifdef CONFIG_MMU addr_limit_user_check(); +#endif } #ifdef CONFIG_DEBUG_RSEQ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a2429d336593..dc2b827c81e5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -263,7 +263,7 @@ static inline void addr_limit_user_check(void) return; #endif - if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS), + if (CHECK_DATA_CORRUPTION(uaccess_kernel(), "Invalid address limit on user-mode return")) force_sig(SIGKILL); -- cgit v1.2.3 From 428e2976a5bf7e7f5554286d7a5a33b8147b106a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Aug 2020 18:33:44 -0700 Subject: uaccess: remove segment_eq segment_eq is only used to implement uaccess_kernel. Just open code uaccess_kernel in the arch uaccess headers and remove one layer of indirection. Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Acked-by: Linus Torvalds Acked-by: Greentime Hu Acked-by: Geert Uytterhoeven Cc: Nick Hu Cc: Vincent Chen Cc: Paul Walmsley Cc: Palmer Dabbelt Link: http://lkml.kernel.org/r/20200710135706.537715-5-hch@lst.de Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/uaccess.h | 2 +- arch/arc/include/asm/segment.h | 3 +-- arch/arm/include/asm/uaccess.h | 4 ++-- arch/arm64/include/asm/uaccess.h | 2 +- arch/csky/include/asm/segment.h | 2 +- arch/h8300/include/asm/segment.h | 2 +- arch/ia64/include/asm/uaccess.h | 2 +- arch/m68k/include/asm/segment.h | 2 +- arch/microblaze/include/asm/uaccess.h | 2 +- arch/mips/include/asm/uaccess.h | 2 +- arch/nds32/include/asm/uaccess.h | 2 +- arch/nios2/include/asm/uaccess.h | 2 +- arch/openrisc/include/asm/uaccess.h | 2 +- arch/parisc/include/asm/uaccess.h | 2 +- arch/powerpc/include/asm/uaccess.h | 3 +-- arch/riscv/include/asm/uaccess.h | 4 +--- arch/s390/include/asm/uaccess.h | 2 +- arch/sh/include/asm/segment.h | 3 +-- arch/sparc/include/asm/uaccess_32.h | 2 +- arch/sparc/include/asm/uaccess_64.h | 2 +- arch/x86/include/asm/uaccess.h | 2 +- arch/xtensa/include/asm/uaccess.h | 2 +- include/asm-generic/uaccess.h | 4 ++-- include/linux/uaccess.h | 2 -- 24 files changed, 25 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 1fe2b56cb861..1b6f25efa247 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -20,7 +20,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * Is a address valid? This does a straightforward calculation rather diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h index 6a2a5be5026d..871f8ab11bfd 100644 --- a/arch/arc/include/asm/segment.h +++ b/arch/arc/include/asm/segment.h @@ -14,8 +14,7 @@ typedef unsigned long mm_segment_t; #define KERNEL_DS MAKE_MM_SEG(0) #define USER_DS MAKE_MM_SEG(TASK_SIZE) - -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) #endif /* __ASSEMBLY__ */ #endif /* __ASMARC_SEGMENT_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index b5fdd30252f8..a13d90206472 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -76,7 +76,7 @@ static inline void set_fs(mm_segment_t fs) modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* * We use 33-bit arithmetic here. Success returns zero, failure returns @@ -267,7 +267,7 @@ extern int __put_user_8(void *, unsigned long long); */ #define USER_DS KERNEL_DS -#define segment_eq(a, b) (1) +#define uaccess_kernel() (true) #define __addr_ok(addr) ((void)(addr), 1) #define __range_ok(addr, size) ((void)(addr), 0) #define get_fs() (KERNEL_DS) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 8d7c466f809b..991dd5f031e4 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -50,7 +50,7 @@ static inline void set_fs(mm_segment_t fs) CONFIG_ARM64_UAO)); } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* * Test whether a block of memory is a valid user space address. diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h index db2640d5f575..79ede9b1a646 100644 --- a/arch/csky/include/asm/segment.h +++ b/arch/csky/include/asm/segment.h @@ -13,6 +13,6 @@ typedef struct { #define USER_DS ((mm_segment_t) { 0x80000000UL }) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif /* __ASM_CSKY_SEGMENT_H */ diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h index a407978f9f9f..37950725d9b9 100644 --- a/arch/h8300/include/asm/segment.h +++ b/arch/h8300/include/asm/segment.h @@ -33,7 +33,7 @@ static inline mm_segment_t get_fs(void) return USER_DS; } -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 8aa473a4b0f4..179243c3dfc7 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -50,7 +50,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * When accessing user memory, we need to make sure the entire area really is in diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index c6686559e9b7..2b5e68a71ef7 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h @@ -52,7 +52,7 @@ static inline void set_fs(mm_segment_t val) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #endif -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 6723c56ec378..304b04ffea2f 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -41,7 +41,7 @@ # define get_fs() (current_thread_info()->addr_limit) # define set_fs(val) (current_thread_info()->addr_limit = (val)) -# define segment_eq(a, b) ((a).seg == (b).seg) +# define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #ifndef CONFIG_MMU diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 62b298c50905..61fc01f177a6 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -72,7 +72,7 @@ extern u64 __ua_limit; #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * eva_kernel_access() - determine whether kernel memory access on an EVA system diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 3a9219f53ee0..010ba5f1d7dd 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -44,7 +44,7 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index e83f831a76f9..a741abbed6fb 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -30,7 +30,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(seg) (current_thread_info()->addr_limit = (seg)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define __access_ok(addr, len) \ (((signed long)(((long)get_fs().seg) & \ diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 17c24f14615f..48b691530d3e 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -43,7 +43,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* Ensure that the range from addr to addr+size is all within the process' * address space diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index ebbb9ffe038c..ed2cd4fb479b 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -14,7 +14,7 @@ #define KERNEL_DS ((mm_segment_t){0}) #define USER_DS ((mm_segment_t){1}) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 64c04ab09112..00699903f1ef 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -38,8 +38,7 @@ static inline void set_fs(mm_segment_t fs) set_thread_flag(TIF_FSCHECK); } -#define segment_eq(a, b) ((a).seg == (b).seg) - +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define user_addr_max() (get_fs().seg) #ifdef __powerpc64__ diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 22de922d6ecb..f56c66b3f5fe 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -64,11 +64,9 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a, b) ((a).seg == (b).seg) - +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define user_addr_max() (get_fs().seg) - /** * access_ok: - Checks if a user space pointer is valid * @addr: User space pointer to start of block to check diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 324438889fe1..f09444d6aeab 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -32,7 +32,7 @@ #define USER_DS_SACF (3) #define get_fs() (current->thread.mm_segment) -#define segment_eq(a,b) (((a) & 2) == ((b) & 2)) +#define uaccess_kernel() ((get_fs() & 2) == KERNEL_DS) void set_fs(mm_segment_t fs); diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h index 33d1d28057cb..02e54a3335d6 100644 --- a/arch/sh/include/asm/segment.h +++ b/arch/sh/include/asm/segment.h @@ -24,8 +24,7 @@ typedef struct { #define USER_DS KERNEL_DS #endif -#define segment_eq(a, b) ((a).seg == (b).seg) - +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index d6d8413eca83..0a2d3ebc4bb8 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -28,7 +28,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test * can be fairly lightweight. diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index bf9d330073b2..698cf69f74e9 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -32,7 +32,7 @@ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define set_fs(val) \ do { \ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 2f3e8f2a958f..ecefaffd15d4 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -33,7 +33,7 @@ static inline void set_fs(mm_segment_t fs) set_thread_flag(TIF_FSCHECK); } -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define user_addr_max() (current->thread.addr_limit.seg) /* diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index e57f0d0a88d8..b9758119feca 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -35,7 +35,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index e935318804f8..ba68ee4dabfa 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -86,8 +86,8 @@ static inline void set_fs(mm_segment_t fs) } #endif -#ifndef segment_eq -#define segment_eq(a, b) ((a).seg == (b).seg) +#ifndef uaccess_kernel +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 0a76ddc07d59..5c62d0c6f15b 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,8 +6,6 @@ #include #include -#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) - #include /* -- cgit v1.2.3 From 3d13f313ce4c34c524ccc37986fe77172f601ff3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Aug 2020 18:33:47 -0700 Subject: uaccess: add force_uaccess_{begin,end} helpers Add helpers to wrap the get_fs/set_fs magic for undoing any damange done by set_fs(KERNEL_DS). There is no real functional benefit, but this documents the intent of these calls better, and will allow stubbing the functions out easily for kernels builds that do not allow address space overrides in the future. [hch@lst.de: drop two incorrect hunks, fix a commit log typo] Link: http://lkml.kernel.org/r/20200714105505.935079-6-hch@lst.de Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Acked-by: Linus Torvalds Acked-by: Mark Rutland Acked-by: Greentime Hu Acked-by: Geert Uytterhoeven Cc: Nick Hu Cc: Vincent Chen Cc: Paul Walmsley Cc: Palmer Dabbelt Link: http://lkml.kernel.org/r/20200710135706.537715-6-hch@lst.de Signed-off-by: Linus Torvalds --- arch/arm64/kernel/sdei.c | 2 +- arch/m68k/include/asm/tlbflush.h | 6 +++--- arch/mips/kernel/unaligned.c | 27 +++++++++++++-------------- arch/nds32/mm/alignment.c | 7 +++---- arch/sh/kernel/traps_32.c | 12 +++++------- drivers/firmware/arm_sdei.c | 5 ++--- include/linux/uaccess.h | 18 ++++++++++++++++++ kernel/events/callchain.c | 5 ++--- kernel/events/core.c | 5 ++--- kernel/kthread.c | 5 ++--- kernel/stacktrace.c | 5 ++--- mm/maccess.c | 22 ++++++++++------------ 12 files changed, 63 insertions(+), 56 deletions(-) (limited to 'include') diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index dab88260b137..7689f2031c0c 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -180,7 +180,7 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, /* * We didn't take an exception to get here, set PAN. UAO will be cleared - * by sdei_event_handler()s set_fs(USER_DS) call. + * by sdei_event_handler()s force_uaccess_begin() call. */ __uaccess_enable_hw_pan(); diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h index 191e75a6bb24..5337bc2c262f 100644 --- a/arch/m68k/include/asm/tlbflush.h +++ b/arch/m68k/include/asm/tlbflush.h @@ -85,10 +85,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) { - mm_segment_t old_fs = get_fs(); - set_fs(USER_DS); + mm_segment_t old_fs = force_uaccess_begin(); + __flush_tlb_one(addr); - set_fs(old_fs); + force_uaccess_end(old_fs); } } diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 0adce604fa44..126a5f3f4e4c 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -191,17 +191,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, * memory, so we need to "switch" the address limit to * user space, so that address check can work properly. */ - seg = get_fs(); - set_fs(USER_DS); + seg = force_uaccess_begin(); switch (insn.spec3_format.func) { case lhe_op: if (!access_ok(addr, 2)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } LoadHWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } compute_return_epc(regs); @@ -209,12 +208,12 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lwe_op: if (!access_ok(addr, 4)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } LoadWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } compute_return_epc(regs); @@ -222,12 +221,12 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lhue_op: if (!access_ok(addr, 2)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } LoadHWUE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } compute_return_epc(regs); @@ -235,35 +234,35 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case she_op: if (!access_ok(addr, 2)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreHWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } break; case swe_op: if (!access_ok(addr, 4)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } break; default: - set_fs(seg); + force_uaccess_end(seg); goto sigill; } - set_fs(seg); + force_uaccess_end(seg); } #endif break; diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c index c8b9061a2ee3..1eb7ded6992b 100644 --- a/arch/nds32/mm/alignment.c +++ b/arch/nds32/mm/alignment.c @@ -512,7 +512,7 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs) { unsigned long inst; int ret = -EFAULT; - mm_segment_t seg = get_fs(); + mm_segment_t seg; inst = get_inst(regs->ipc); @@ -520,13 +520,12 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs) "Faulting addr: 0x%08lx, pc: 0x%08lx [inst: 0x%08lx ]\n", addr, regs->ipc, inst); - set_fs(USER_DS); - + seg = force_uaccess_begin(); if (inst & NDS32_16BIT_INSTRUCTION) ret = do_16((inst >> 16) & 0xffff, regs); else ret = do_32(inst, regs); - set_fs(seg); + force_uaccess_end(seg); return ret; } diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 058c6181bb30..b62ad0ba2395 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -482,8 +482,6 @@ asmlinkage void do_address_error(struct pt_regs *regs, error_code = lookup_exception_vector(); #endif - oldfs = get_fs(); - if (user_mode(regs)) { int si_code = BUS_ADRERR; unsigned int user_action; @@ -491,13 +489,13 @@ asmlinkage void do_address_error(struct pt_regs *regs, local_irq_enable(); inc_unaligned_user_access(); - set_fs(USER_DS); + oldfs = force_uaccess_begin(); if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1), sizeof(instruction))) { - set_fs(oldfs); + force_uaccess_end(oldfs); goto uspace_segv; } - set_fs(oldfs); + force_uaccess_end(oldfs); /* shout about userspace fixups */ unaligned_fixups_notify(current, instruction, regs); @@ -520,11 +518,11 @@ fixup: goto uspace_segv; } - set_fs(USER_DS); + oldfs = force_uaccess_begin(); tmp = handle_unaligned_access(instruction, regs, &user_mem_access, 0, address); - set_fs(oldfs); + force_uaccess_end(oldfs); if (tmp == 0) return; /* sorted */ diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index e7e36aab2386..b4b9ce97f415 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -1136,15 +1136,14 @@ int sdei_event_handler(struct pt_regs *regs, * access kernel memory. * Do the same here because this doesn't come via the same entry code. */ - orig_addr_limit = get_fs(); - set_fs(USER_DS); + orig_addr_limit = force_uaccess_begin(); err = arg->callback(event_num, regs, arg->callback_arg); if (err) pr_err_ratelimited("event %u on CPU %u failed with error: %d\n", event_num, smp_processor_id(), err); - set_fs(orig_addr_limit); + force_uaccess_end(orig_addr_limit); return err; } diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 5c62d0c6f15b..94b285411659 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -8,6 +8,24 @@ #include +/* + * Force the uaccess routines to be wired up for actual userspace access, + * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone + * using force_uaccess_end below. + */ +static inline mm_segment_t force_uaccess_begin(void) +{ + mm_segment_t fs = get_fs(); + + set_fs(USER_DS); + return fs; +} + +static inline void force_uaccess_end(mm_segment_t oldfs) +{ + set_fs(oldfs); +} + /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index c6ce894e4ce9..58cbe357fb2b 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -217,10 +217,9 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, if (add_mark) perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); - fs = get_fs(); - set_fs(USER_DS); + fs = force_uaccess_begin(); perf_callchain_user(&ctx, regs); - set_fs(fs); + force_uaccess_end(fs); } } diff --git a/kernel/events/core.c b/kernel/events/core.c index d1f0a7e5b182..6961333ebad5 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6453,10 +6453,9 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, /* Data. */ sp = perf_user_stack_pointer(regs); - fs = get_fs(); - set_fs(USER_DS); + fs = force_uaccess_begin(); rem = __output_copy_user(handle, (void *) sp, dump_size); - set_fs(fs); + force_uaccess_end(fs); dyn_size = dump_size - rem; perf_output_skip(handle, rem); diff --git a/kernel/kthread.c b/kernel/kthread.c index b2807e7be772..3edaa380dc7b 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -1258,8 +1258,7 @@ void kthread_use_mm(struct mm_struct *mm) if (active_mm != mm) mmdrop(active_mm); - to_kthread(tsk)->oldfs = get_fs(); - set_fs(USER_DS); + to_kthread(tsk)->oldfs = force_uaccess_begin(); } EXPORT_SYMBOL_GPL(kthread_use_mm); @@ -1274,7 +1273,7 @@ void kthread_unuse_mm(struct mm_struct *mm) WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); WARN_ON_ONCE(!tsk->mm); - set_fs(to_kthread(tsk)->oldfs); + force_uaccess_end(to_kthread(tsk)->oldfs); task_lock(tsk); sync_mm_rss(mm); diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index 2af66e449aa6..946f44a9e86a 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c @@ -233,10 +233,9 @@ unsigned int stack_trace_save_user(unsigned long *store, unsigned int size) if (current->flags & PF_KTHREAD) return 0; - fs = get_fs(); - set_fs(USER_DS); + fs = force_uaccess_begin(); arch_stack_walk_user(consume_entry, &c, task_pt_regs(current)); - set_fs(fs); + force_uaccess_end(fs); return c.len; } diff --git a/mm/maccess.c b/mm/maccess.c index f98ff91e32c6..3bd70405f2d8 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -205,15 +205,14 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) long copy_from_user_nofault(void *dst, const void __user *src, size_t size) { long ret = -EFAULT; - mm_segment_t old_fs = get_fs(); + mm_segment_t old_fs = force_uaccess_begin(); - set_fs(USER_DS); if (access_ok(src, size)) { pagefault_disable(); ret = __copy_from_user_inatomic(dst, src, size); pagefault_enable(); } - set_fs(old_fs); + force_uaccess_end(old_fs); if (ret) return -EFAULT; @@ -233,15 +232,14 @@ EXPORT_SYMBOL_GPL(copy_from_user_nofault); long copy_to_user_nofault(void __user *dst, const void *src, size_t size) { long ret = -EFAULT; - mm_segment_t old_fs = get_fs(); + mm_segment_t old_fs = force_uaccess_begin(); - set_fs(USER_DS); if (access_ok(dst, size)) { pagefault_disable(); ret = __copy_to_user_inatomic(dst, src, size); pagefault_enable(); } - set_fs(old_fs); + force_uaccess_end(old_fs); if (ret) return -EFAULT; @@ -270,17 +268,17 @@ EXPORT_SYMBOL_GPL(copy_to_user_nofault); long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count) { - mm_segment_t old_fs = get_fs(); + mm_segment_t old_fs; long ret; if (unlikely(count <= 0)) return 0; - set_fs(USER_DS); + old_fs = force_uaccess_begin(); pagefault_disable(); ret = strncpy_from_user(dst, unsafe_addr, count); pagefault_enable(); - set_fs(old_fs); + force_uaccess_end(old_fs); if (ret >= count) { ret = count; @@ -310,14 +308,14 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, */ long strnlen_user_nofault(const void __user *unsafe_addr, long count) { - mm_segment_t old_fs = get_fs(); + mm_segment_t old_fs; int ret; - set_fs(USER_DS); + old_fs = force_uaccess_begin(); pagefault_disable(); ret = strnlen_user(unsafe_addr, count); pagefault_enable(); - set_fs(old_fs); + force_uaccess_end(old_fs); return ret; } -- cgit v1.2.3 From c5f748e2f2ad970078de11bd6b12bcd81147c636 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:33:57 -0700 Subject: include/linux/compiler-clang.h: drop duplicated word in a comment Drop the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Nathan Chancellor Link: http://lkml.kernel.org/r/6a18c301-3505-742f-4dd7-0f38d0e537b9@infradead.org Signed-off-by: Linus Torvalds --- include/linux/compiler-clang.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 8a072d00e688..cee0c728d39a 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -40,7 +40,7 @@ #endif /* - * Not all versions of clang implement the the type-generic versions + * Not all versions of clang implement the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements * __has_builtin allowing us to avoid awkward version * checks. Unfortunately, we don't know which version of gcc clang -- cgit v1.2.3 From cd1a406fa46f81b1a859ef874b8413a6fa6ac7e8 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:34:00 -0700 Subject: include/linux/exportfs.h: drop duplicated word in a comment Drop the doubled word "a" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Cc: Alexander Viro Link: http://lkml.kernel.org/r/c61b707a-8fd8-5b1b-aab0-679122881543@infradead.org Signed-off-by: Linus Torvalds --- include/linux/exportfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index d896b8657085..3ceb72b67a7a 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -178,7 +178,7 @@ struct fid { * get_name: * @get_name should find a name for the given @child in the given @parent * directory. The name should be stored in the @name (with the - * understanding that it is already pointing to a a %NAME_MAX+1 sized + * understanding that it is already pointing to a %NAME_MAX+1 sized * buffer. get_name() should return %0 on success, a negative error code * or error. @get_name will be called without @parent->i_mutex held. * -- cgit v1.2.3 From 121ae8da9cd49f7139bf80f26352337458e63fe1 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:34:04 -0700 Subject: include/linux/async_tx.h: drop duplicated word in a comment Drop the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Cc: Dan Williams Link: http://lkml.kernel.org/r/e85802f7-8f48-8b4c-29b3-ea237a2c7ae9@infradead.org Signed-off-by: Linus Torvalds --- include/linux/async_tx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 75e582b8d2d9..4c328fef403c 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -36,7 +36,7 @@ struct dma_chan_ref { /** * async_tx_flags - modifiers for the async_* calls * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the - * the destination address is not a source. The asynchronous case handles this + * destination address is not a source. The asynchronous case handles this * implicitly, the synchronous case needs to zero the destination block. * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is * also one of the source addresses. In the synchronous case the destination -- cgit v1.2.3 From f48ff83e9c1a8fc2e8bfedb4855933eb1ed159b2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:34:07 -0700 Subject: include/linux/xz.h: drop duplicated word Drop the doubled word "than" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Cc: Lasse Collin Link: http://lkml.kernel.org/r/05ebba7a-c1e4-01ae-fc7b-15c081b33f3e@infradead.org Signed-off-by: Linus Torvalds --- include/linux/xz.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/xz.h b/include/linux/xz.h index 64cffa6ddfce..24ad7d875977 100644 --- a/include/linux/xz.h +++ b/include/linux/xz.h @@ -28,7 +28,7 @@ * enum xz_mode - Operation mode * * @XZ_SINGLE: Single-call mode. This uses less RAM than - * than multi-call modes, because the LZMA2 + * multi-call modes, because the LZMA2 * dictionary doesn't need to be allocated as * part of the decoder state. All required data * structures are allocated at initialization, -- cgit v1.2.3 From 8043fc147a97ec2eefc582487f344f2cbe86d12e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Aug 2020 18:34:10 -0700 Subject: kernel: add a kernel_wait helper Add a helper that waits for a pid and stores the status in the passed in kernel pointer. Use it to fix the usage of kernel_wait4 in call_usermodehelper_exec_sync that only happens to work due to the implicit set_fs(KERNEL_DS) for kernel threads. Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Acked-by: "Eric W. Biederman" Cc: Luis Chamberlain Link: http://lkml.kernel.org/r/20200721130449.5008-1-hch@lst.de Signed-off-by: Linus Torvalds --- include/linux/sched/task.h | 1 + kernel/exit.c | 16 ++++++++++++++++ kernel/umh.c | 29 ++++------------------------- 3 files changed, 21 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index ae3060f0b0c9..a98965007eef 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -88,6 +88,7 @@ struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); +int kernel_wait(pid_t pid, int *stat); extern void free_task(struct task_struct *tsk); diff --git a/kernel/exit.c b/kernel/exit.c index c2d2961576f2..733e80f334e7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1626,6 +1626,22 @@ long kernel_wait4(pid_t upid, int __user *stat_addr, int options, return ret; } +int kernel_wait(pid_t pid, int *stat) +{ + struct wait_opts wo = { + .wo_type = PIDTYPE_PID, + .wo_pid = find_get_pid(pid), + .wo_flags = WEXITED, + }; + int ret; + + ret = do_wait(&wo); + if (ret > 0 && wo.wo_stat) + *stat = wo.wo_stat; + put_pid(wo.wo_pid); + return ret; +} + SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, int, options, struct rusage __user *, ru) { diff --git a/kernel/umh.c b/kernel/umh.c index a25433f9cd9a..fcf3ee803630 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -119,37 +119,16 @@ static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info) { pid_t pid; - /* If SIGCLD is ignored kernel_wait4 won't populate the status. */ + /* If SIGCLD is ignored do_wait won't populate the status. */ kernel_sigaction(SIGCHLD, SIG_DFL); pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD); - if (pid < 0) { + if (pid < 0) sub_info->retval = pid; - } else { - int ret = -ECHILD; - /* - * Normally it is bogus to call wait4() from in-kernel because - * wait4() wants to write the exit code to a userspace address. - * But call_usermodehelper_exec_sync() always runs as kernel - * thread (workqueue) and put_user() to a kernel address works - * OK for kernel threads, due to their having an mm_segment_t - * which spans the entire address space. - * - * Thus the __user pointer cast is valid here. - */ - kernel_wait4(pid, (int __user *)&ret, 0, NULL); - - /* - * If ret is 0, either call_usermodehelper_exec_async failed and - * the real error code is already in sub_info->retval or - * sub_info->retval is 0 anyway, so don't mess with it then. - */ - if (ret) - sub_info->retval = ret; - } + else + kernel_wait(pid, &sub_info->retval); /* Restore default kernel sig handler */ kernel_sigaction(SIGCHLD, SIG_IGN); - umh_complete(sub_info); } -- cgit v1.2.3 From 376653435dacf84a8aca87e66aff94079a817cf2 Mon Sep 17 00:00:00 2001 From: Arvind Sankar Date: Tue, 11 Aug 2020 18:34:16 -0700 Subject: kernel.h: remove duplicate include of asm/div64.h This seems to have been added inadvertently in commit 72deb455b5ec ("block: remove CONFIG_LBDAF") Fixes: 72deb455b5ec ("block: remove CONFIG_LBDAF") Signed-off-by: Arvind Sankar Signed-off-by: Andrew Morton Reviewed-by: Christoph Hellwig Link: http://lkml.kernel.org/r/20200727034852.2813453-1-nivedita@alum.mit.edu Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 7339a00c895e..e19c13616666 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -17,7 +17,6 @@ #include #include #include -#include #define STACK_MAGIC 0xdeadbeef -- cgit v1.2.3 From 7f317d34906c1033f0752fc137dda04e43979bb8 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Tue, 11 Aug 2020 18:34:19 -0700 Subject: include/: replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Signed-off-by: Alexander A. Klimov Signed-off-by: Andrew Morton Reviewed-by: Kees Cook Link: http://lkml.kernel.org/r/20200726110117.16346-1-grandmaster@al2klimov.de Signed-off-by: Linus Torvalds --- include/clocksource/timer-ti-dm.h | 2 +- include/linux/btree.h | 2 +- include/linux/delay.h | 2 +- include/linux/dma/k3-psil.h | 2 +- include/linux/dma/k3-udma-glue.h | 2 +- include/linux/dma/ti-cppi5.h | 2 +- include/linux/irqchip/irq-omap-intc.h | 2 +- include/linux/jhash.h | 2 +- include/linux/leds-ti-lmu-common.h | 2 +- include/linux/platform_data/davinci-cpufreq.h | 2 +- include/linux/platform_data/davinci_asp.h | 2 +- include/linux/platform_data/elm.h | 2 +- include/linux/platform_data/gpio-davinci.h | 2 +- include/linux/platform_data/gpmc-omap.h | 2 +- include/linux/platform_data/mtd-davinci-aemif.h | 2 +- include/linux/platform_data/omap-twl4030.h | 2 +- include/linux/platform_data/uio_pruss.h | 2 +- include/linux/platform_data/usb-omap.h | 2 +- include/linux/soc/ti/k3-ringacc.h | 2 +- include/linux/soc/ti/knav_qmss.h | 2 +- include/linux/soc/ti/ti-msgmgr.h | 2 +- include/linux/wkup_m3_ipc.h | 2 +- include/linux/xxhash.h | 2 +- include/linux/xz.h | 2 +- include/linux/zlib.h | 2 +- include/soc/arc/aux.h | 2 +- include/uapi/linux/elf.h | 2 +- include/uapi/linux/map_to_7segment.h | 2 +- include/uapi/linux/types.h | 2 +- include/uapi/linux/usb/ch9.h | 2 +- 30 files changed, 30 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h index 531ca87fcd08..4c61dade8835 100644 --- a/include/clocksource/timer-ti-dm.h +++ b/include/clocksource/timer-ti-dm.h @@ -1,7 +1,7 @@ /* * OMAP Dual-Mode Timers * - * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/ * Tarun Kanti DebBarma * Thara Gopinath * diff --git a/include/linux/btree.h b/include/linux/btree.h index 68f858c831b1..243ee544397a 100644 --- a/include/linux/btree.h +++ b/include/linux/btree.h @@ -10,7 +10,7 @@ * * A B+Tree is a data structure for looking up arbitrary (currently allowing * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure - * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not + * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not * use binary search to find the key on lookups. * * Each B+Tree consists of a head, that contains bookkeeping information and diff --git a/include/linux/delay.h b/include/linux/delay.h index 5e016a4029d9..1d0e2ce6b6d9 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -16,7 +16,7 @@ * 3. CPU clock rate changes. * * Please see this thread: - * http://lists.openwall.net/linux-kernel/2011/01/09/56 + * https://lists.openwall.net/linux-kernel/2011/01/09/56 */ #include diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h index 61d5cc0ad601..1962f75fa2d3 100644 --- a/include/linux/dma/k3-psil.h +++ b/include/linux/dma/k3-psil.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef K3_PSIL_H_ diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h index caadbab1632a..5eb34ad973a7 100644 --- a/include/linux/dma/k3-udma-glue.h +++ b/include/linux/dma/k3-udma-glue.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef K3_UDMA_GLUE_H_ diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h index 579356ae447e..5896441ee604 100644 --- a/include/linux/dma/ti-cppi5.h +++ b/include/linux/dma/ti-cppi5.h @@ -2,7 +2,7 @@ /* * CPPI5 descriptors interface * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef __TI_CPPI5_H__ diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h index 216e5adf80ce..dca379c0d7eb 100644 --- a/include/linux/irqchip/irq-omap-intc.h +++ b/include/linux/irqchip/irq-omap-intc.h @@ -2,7 +2,7 @@ /** * irq-omap-intc.h - INTC Idle Functions * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * * Author: Felipe Balbi */ diff --git a/include/linux/jhash.h b/include/linux/jhash.h index ba2f6a9776b6..19ddd43aee68 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -5,7 +5,7 @@ * * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) * - * http://burtleburtle.net/bob/hash/ + * https://burtleburtle.net/bob/hash/ * * These are the credits from Bob's sources: * diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h index 5eb111f38803..420b61e5a213 100644 --- a/include/linux/leds-ti-lmu-common.h +++ b/include/linux/leds-ti-lmu-common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ // TI LMU Common Core -// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ #ifndef _TI_LMU_COMMON_H_ #define _TI_LMU_COMMON_H_ diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h index 3fbf9f2793b5..bc208c64e3d7 100644 --- a/include/linux/platform_data/davinci-cpufreq.h +++ b/include/linux/platform_data/davinci-cpufreq.h @@ -2,7 +2,7 @@ /* * TI DaVinci CPUFreq platform support. * - * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/ + * Copyright (C) 2009 Texas Instruments, Inc. https://www.ti.com/ */ #ifndef _MACH_DAVINCI_CPUFREQ_H diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 7fe80f1c7e08..5d1fb0d78a22 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -1,7 +1,7 @@ /* * TI DaVinci Audio Serial Port support * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h index 0f491d8abfdd..3cc78f0447b1 100644 --- a/include/linux/platform_data/elm.h +++ b/include/linux/platform_data/elm.h @@ -2,7 +2,7 @@ /* * BCH Error Location Module * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __ELM_H diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index a93841bfb9f7..e182a46e609f 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -1,7 +1,7 @@ /* * DaVinci GPIO Platform Related Defines * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h index ef663e570552..c9cc4e32435d 100644 --- a/include/linux/platform_data/gpmc-omap.h +++ b/include/linux/platform_data/gpmc-omap.h @@ -2,7 +2,7 @@ /* * OMAP GPMC Platform data * - * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com + * Copyright (C) 2014 Texas Instruments, Inc. - https://www.ti.com * Roger Quadros */ diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h index a403dd51dacc..a49826214a39 100644 --- a/include/linux/platform_data/mtd-davinci-aemif.h +++ b/include/linux/platform_data/mtd-davinci-aemif.h @@ -1,7 +1,7 @@ /* * TI DaVinci AEMIF support * - * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/ + * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/ * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h index 8419c8caf54e..0dd851ea1c72 100644 --- a/include/linux/platform_data/omap-twl4030.h +++ b/include/linux/platform_data/omap-twl4030.h @@ -3,7 +3,7 @@ * omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030 * codec, header. * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * All rights reserved. * * Author: Peter Ujfalusi diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h index 3d47d219827f..31f2e22661bc 100644 --- a/include/linux/platform_data/uio_pruss.h +++ b/include/linux/platform_data/uio_pruss.h @@ -3,7 +3,7 @@ * * Platform data for uio_pruss driver * - * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h index fa579b4c666b..5e70d667031c 100644 --- a/include/linux/platform_data/usb-omap.h +++ b/include/linux/platform_data/usb-omap.h @@ -1,7 +1,7 @@ /* * usb-omap.h - Platform data for the various OMAP USB IPs * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h index 7ac115432fa1..5a472eca5ee4 100644 --- a/include/linux/soc/ti/k3-ringacc.h +++ b/include/linux/soc/ti/k3-ringacc.h @@ -2,7 +2,7 @@ /* * K3 Ring Accelerator (RA) subsystem interface * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef __SOC_TI_K3_RINGACC_API_H_ diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h index 9745df6ed9d3..c75ef99c99ca 100644 --- a/include/linux/soc/ti/knav_qmss.h +++ b/include/linux/soc/ti/knav_qmss.h @@ -1,7 +1,7 @@ /* * Keystone Navigator Queue Management Sub-System header * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * Author: Sandeep Nair * Cyril Chemparathy * Santosh Shilimkar diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h index eac8e0c6fe11..1f6e76d423cf 100644 --- a/include/linux/soc/ti/ti-msgmgr.h +++ b/include/linux/soc/ti/ti-msgmgr.h @@ -1,7 +1,7 @@ /* * Texas Instruments' Message Manager * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h index e497e621dbb7..3f496967b538 100644 --- a/include/linux/wkup_m3_ipc.h +++ b/include/linux/wkup_m3_ipc.h @@ -1,7 +1,7 @@ /* * TI Wakeup M3 for AMx3 SoCs Power Management Routines * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Dave Gerlach * * This program is free software; you can redistribute it and/or diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h index 52b073fea17f..df42511438d0 100644 --- a/include/linux/xxhash.h +++ b/include/linux/xxhash.h @@ -34,7 +34,7 @@ * ("BSD"). * * You can contact the author at: - * - xxHash homepage: http://cyan4973.github.io/xxHash/ + * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository: https://github.com/Cyan4973/xxHash */ diff --git a/include/linux/xz.h b/include/linux/xz.h index 24ad7d875977..9884c8440188 100644 --- a/include/linux/xz.h +++ b/include/linux/xz.h @@ -2,7 +2,7 @@ * XZ decompressor * * Authors: Lasse Collin - * Igor Pavlov + * Igor Pavlov * * This file has been put into the public domain. * You can do whatever you want with this file. diff --git a/include/linux/zlib.h b/include/linux/zlib.h index c757d848a758..78ede944c082 100644 --- a/include/linux/zlib.h +++ b/include/linux/zlib.h @@ -23,7 +23,7 @@ The data format used by the zlib library is described by RFCs (Request for - Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt + Comments) 1950 to 1952 in the files https://www.ietf.org/rfc/rfc1950.txt (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). */ diff --git a/include/soc/arc/aux.h b/include/soc/arc/aux.h index e223c4ffa153..9c2eff6140b6 100644 --- a/include/soc/arc/aux.h +++ b/include/soc/arc/aux.h @@ -22,7 +22,7 @@ static inline int read_aux_reg(u32 r) /* * function helps elide unused variable warning - * see: http://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html + * see: https://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html */ static inline void write_aux_reg(u32 r, u32 v) { diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index c6dd0215482e..22220945a5fd 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -53,7 +53,7 @@ typedef __s64 Elf64_Sxword; * * - Oracle: Linker and Libraries. * Part No: 817–1984–19, August 2011. - * http://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf + * https://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf * * - System V ABI AMD64 Architecture Processor Supplement * Draft Version 0.99.4, diff --git a/include/uapi/linux/map_to_7segment.h b/include/uapi/linux/map_to_7segment.h index f9ed18134b83..13a06e5e966e 100644 --- a/include/uapi/linux/map_to_7segment.h +++ b/include/uapi/linux/map_to_7segment.h @@ -24,7 +24,7 @@ * of (ASCII) characters to a 7-segments notation. * * The 7 segment's wikipedia notation below is used as standard. - * See: http://en.wikipedia.org/wiki/Seven_segment_display + * See: https://en.wikipedia.org/wiki/Seven_segment_display * * Notation: +-a-+ * f b diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index 2fce8b6876e9..f6d2f83cbe29 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -7,7 +7,7 @@ #ifndef __ASSEMBLY__ #ifndef __KERNEL__ #ifndef __EXPORTED_HEADERS__ -#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" +#warning "Attempt to use kernel headers from user space, see https://kernelnewbies.org/KernelHeaders" #endif /* __EXPORTED_HEADERS__ */ #endif diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 48766fdf6580..0f865ae4ba89 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -1229,7 +1229,7 @@ struct usb_set_sel_req { * As per USB compliance update, a device that is actively drawing * more than 100mA from USB must report itself as bus-powered in * the GetStatus(DEVICE) call. - * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34 + * https://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34 */ #define USB_SELF_POWER_VBUS_MAX_DRAW 100 -- cgit v1.2.3 From 9e58c5e2fcd8d8d8820ea277e0f577f563eed1f1 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 11 Aug 2020 18:34:23 -0700 Subject: include/linux/poison.h: remove obsolete comment When the definition was changed, the comment became stale. Just remove it since there isn't anything useful to say here. Fixes: b8a0255db958 ("include/linux/poison.h: use POISON_POINTER_DELTA for poison pointers") Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Cc: Vasily Kulikov Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20200730174108.GJ23808@casper.infradead.org Signed-off-by: Linus Torvalds --- include/linux/poison.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/linux/poison.h b/include/linux/poison.h index df34330b4e34..dc8ae5d8db03 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -24,10 +24,6 @@ #define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA) /********** include/linux/timer.h **********/ -/* - * Magic number "tsta" to indicate a static timer initializer - * for the object debugging code. - */ #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/page_poison.c **********/ -- cgit v1.2.3 From 25fd529c34d063d1bef23742f2e8f8341c639dc3 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 11 Aug 2020 18:34:26 -0700 Subject: sparse: group the defines by functionality By popular demand, reorder the defines for sparse annotations and group them by functionality. Signed-off-by: Luc Van Oostenryck Signed-off-by: Andrew Morton Acked-by: Miguel Ojeda Cc: Geert Uytterhoeven Link: lore.kernel.org/r/CAMuHMdWQsirja-h3wBcZezk+H2Q_HShhAks8Hc8ps5fTAp=ObQ@mail.gmail.com Link: http://lkml.kernel.org/r/20200621143652.53798-1-luc.vanoostenryck@gmail.com Signed-off-by: Linus Torvalds --- include/linux/compiler_types.h | 44 ++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 2e231ba8fe3f..4b33cb385f96 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -5,48 +5,54 @@ #ifndef __ASSEMBLY__ #ifdef __CHECKER__ +/* address spaces */ # define __kernel __attribute__((address_space(0))) # define __user __attribute__((noderef, address_space(__user))) -# define __safe __attribute__((safe)) -# define __force __attribute__((force)) -# define __nocast __attribute__((nocast)) # define __iomem __attribute__((noderef, address_space(__iomem))) +# define __percpu __attribute__((noderef, address_space(__percpu))) +# define __rcu __attribute__((noderef, address_space(__rcu))) +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +/* context/locking */ # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(__percpu))) -# define __rcu __attribute__((noderef, address_space(__rcu))) +/* other */ +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __safe __attribute__((safe)) # define __private __attribute__((noderef)) -extern void __chk_user_ptr(const volatile void __user *); -extern void __chk_io_ptr(const volatile void __iomem *); # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) #else /* __CHECKER__ */ +/* address spaces */ +# define __kernel # ifdef STRUCTLEAK_PLUGIN -# define __user __attribute__((user)) +# define __user __attribute__((user)) # else # define __user # endif -# define __kernel -# define __safe -# define __force -# define __nocast # define __iomem -# define __chk_user_ptr(x) (void)0 -# define __chk_io_ptr(x) (void)0 -# define __builtin_warning(x, y...) (1) +# define __percpu +# define __rcu +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +/* context/locking */ # define __must_hold(x) # define __acquires(x) # define __releases(x) -# define __acquire(x) (void)0 -# define __release(x) (void)0 +# define __acquire(x) (void)0 +# define __release(x) (void)0 # define __cond_lock(x,c) (c) -# define __percpu -# define __rcu +/* other */ +# define __force +# define __nocast +# define __safe # define __private # define ACCESS_PRIVATE(p, member) ((p)->member) +# define __builtin_warning(x, y...) (1) #endif /* __CHECKER__ */ /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ -- cgit v1.2.3 From 0a650e472d2039a5f768acd82f2a7068bf338dfd Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 11 Aug 2020 18:34:35 -0700 Subject: lib/generic-radix-tree.c: remove unneeded __rcu struct __genradix is defined as having its member 'root' annotated as __rcu. But in the corresponding API RCU is not used. Sparse reports this type mismatch as: lib/generic-radix-tree.c:56:35: warning: incorrect type in initializer (different address spaces) lib/generic-radix-tree.c:56:35: expected struct genradix_root *r lib/generic-radix-tree.c:56:35: got struct genradix_root [noderef] *__val with 6 other ones. So, correct root's type by removing this unneeded __rcu. Signed-off-by: Luc Van Oostenryck Signed-off-by: Andrew Morton Cc: Kent Overstreet Link: http://lkml.kernel.org/r/20200621161745.55396-1-luc.vanoostenryck@gmail.com Signed-off-by: Linus Torvalds --- include/linux/generic-radix-tree.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index 02393c0c98f9..bfd00320c7f3 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -44,7 +44,7 @@ struct genradix_root; struct __genradix { - struct genradix_root __rcu *root; + struct genradix_root *root; }; /* -- cgit v1.2.3 From b642e44e8ab335868b549fe5753b783ca47bf3a3 Mon Sep 17 00:00:00 2001 From: Kars Mulder Date: Tue, 11 Aug 2020 18:34:53 -0700 Subject: kstrto*: correct documentation references to simple_strto*() The documentation of the kstrto*() functions reference the simple_strtoull function by "used as a replacement for [the obsolete] simple_strtoull". All these functions describes themselves as replacements for the function simple_strtoull, even though a function like kstrtol() would be more aptly described as a replacement of simple_strtol(). Fix these references by making the documentation of kstrto*() reference the closest simple_strto*() equivalent available. The functions kstrto[u]int() do not have direct simple_strto[u]int() equivalences, so these are made to refer to simple_strto[u]l() instead. Furthermore, add parentheses after function names, as is standard in kernel documentation. Fixes: 4c925d6031f71 ("kstrto*: add documentation") Signed-off-by: Kars Mulder Signed-off-by: Andrew Morton Reviewed-by: Andy Shevchenko Cc: Eldad Zack Cc: Miguel Ojeda Cc: Geert Uytterhoeven Cc: Mans Rullgard Cc: Petr Mladek Link: http://lkml.kernel.org/r/1ee1-5f234c00-f3-165a6440@234394593 Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 4 ++-- lib/kstrtox.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e19c13616666..47b1dad63ffc 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -346,7 +346,7 @@ int __must_check kstrtoll(const char *s, unsigned int base, long long *res); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoull. Return code must be checked. + * Used as a replacement for the simple_strtoul(). Return code must be checked. */ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) { @@ -374,7 +374,7 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoull. Return code must be checked. + * Used as a replacement for the simple_strtol(). Return code must be checked. */ static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) { diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 1006bf70bf74..252ac414ba9a 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -115,7 +115,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull. Return code must + * Used as a replacement for the obsolete simple_strtoull(). Return code must * be checked. */ int kstrtoull(const char *s, unsigned int base, unsigned long long *res) @@ -139,7 +139,7 @@ EXPORT_SYMBOL(kstrtoull); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull. Return code must + * Used as a replacement for the obsolete simple_strtoll(). Return code must * be checked. */ int kstrtoll(const char *s, unsigned int base, long long *res) @@ -211,7 +211,7 @@ EXPORT_SYMBOL(_kstrtol); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull. Return code must + * Used as a replacement for the obsolete simple_strtoul(). Return code must * be checked. */ int kstrtouint(const char *s, unsigned int base, unsigned int *res) @@ -242,7 +242,7 @@ EXPORT_SYMBOL(kstrtouint); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull. Return code must + * Used as a replacement for the obsolete simple_strtol(). Return code must * be checked. */ int kstrtoint(const char *s, unsigned int base, int *res) -- cgit v1.2.3 From ef0f2685336bbc334e8b6997ce9b155e5f7edd31 Mon Sep 17 00:00:00 2001 From: Kars Mulder Date: Tue, 11 Aug 2020 18:34:56 -0700 Subject: kstrto*: do not describe simple_strto*() as obsolete/replaced The documentation of the kstrto*() functions describes kstrto*() as "replacements" of the "obsolete" simple_strto*() functions. Both of these terms are inaccurate: they're not replacements because they have different behaviour, and the simple_strto*() are not obsolete because there are cases where they have benefits over kstrto*(). Remove usage of the terms "replacement" and "obsolete" in reference to simple_strto*(), and instead use the term "preferred over". Fixes: 4c925d6031f71 ("kstrto*: add documentation") Fixes: 885e68e8b7b13 ("kernel.h: update comment about simple_strto() functions") Signed-off-by: Kars Mulder Signed-off-by: Andrew Morton Reviewed-by: Andy Shevchenko Cc: Eldad Zack Cc: Miguel Ojeda Cc: Geert Uytterhoeven Cc: Mans Rullgard Cc: Petr Mladek Link: http://lkml.kernel.org/r/29b9-5f234c80-13-4e3aa200@244003027 Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 4 ++-- lib/kstrtox.c | 12 ++++-------- 2 files changed, 6 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 47b1dad63ffc..92517ae53e7c 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -346,7 +346,7 @@ int __must_check kstrtoll(const char *s, unsigned int base, long long *res); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoul(). Return code must be checked. + * Preferred over simple_strtoul(). Return code must be checked. */ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) { @@ -374,7 +374,7 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtol(). Return code must be checked. + * Preferred over simple_strtol(). Return code must be checked. */ static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) { diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 252ac414ba9a..a14ccf905055 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -115,8 +115,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull(). Return code must - * be checked. + * Preferred over simple_strtoull(). Return code must be checked. */ int kstrtoull(const char *s, unsigned int base, unsigned long long *res) { @@ -139,8 +138,7 @@ EXPORT_SYMBOL(kstrtoull); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoll(). Return code must - * be checked. + * Preferred over simple_strtoll(). Return code must be checked. */ int kstrtoll(const char *s, unsigned int base, long long *res) { @@ -211,8 +209,7 @@ EXPORT_SYMBOL(_kstrtol); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoul(). Return code must - * be checked. + * Preferred over simple_strtoul(). Return code must be checked. */ int kstrtouint(const char *s, unsigned int base, unsigned int *res) { @@ -242,8 +239,7 @@ EXPORT_SYMBOL(kstrtouint); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtol(). Return code must - * be checked. + * Preferred over simple_strtol(). Return code must be checked. */ int kstrtoint(const char *s, unsigned int base, int *res) { -- cgit v1.2.3 From 2fb3244f0a58ceb3d866ac63f644dfa31cae430f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:35:21 -0700 Subject: autofs: fix doubled word Change doubled word "is" to "it is". Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Acked-by: Ian Kent Link: http://lkml.kernel.org/r/5a82befd-40f8-8dc0-3498-cbc0436cad9b@infradead.org Signed-off-by: Linus Torvalds --- include/uapi/linux/auto_dev-ioctl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h index 374742651c30..62e625356dc8 100644 --- a/include/uapi/linux/auto_dev-ioctl.h +++ b/include/uapi/linux/auto_dev-ioctl.h @@ -82,7 +82,7 @@ struct args_ismountpoint { /* * All the ioctls use this structure. * When sending a path size must account for the total length - * of the chunk of memory otherwise is is the size of the + * of the chunk of memory otherwise it is the size of the * structure. */ -- cgit v1.2.3 From 0935288c6e008c0682ab6171fb5605dcc049e7bd Mon Sep 17 00:00:00 2001 From: Vijay Balakrishna Date: Tue, 11 Aug 2020 18:36:33 -0700 Subject: kdump: append kernel build-id string to VMCOREINFO Make kernel GNU build-id available in VMCOREINFO. Having build-id in VMCOREINFO facilitates presenting appropriate kernel namelist image with debug information file to kernel crash dump analysis tools. Currently VMCOREINFO lacks uniquely identifiable key for crash analysis automation. Regarding if this patch is necessary or matching of linux_banner and OSRELEASE in VMCOREINFO employed by crash(8) meets the need -- IMO, build-id approach more foolproof, in most instances it is a cryptographic hash generated using internal code/ELF bits unlike kernel version string upon which linux_banner is based that is external to the code. I feel each is intended for a different purpose. Also OSRELEASE is not suitable when two different kernel builds from same version with different features enabled. Currently for most linux (and non-linux) systems build-id can be extracted using standard methods for file types such as user mode crash dumps, shared libraries, loadable kernel modules etc., This is an exception for linux kernel dump. Having build-id in VMCOREINFO brings some uniformity for automation tools. Tyler said: : I think this is a nice improvement over today's linux_banner approach for : correlating vmlinux to a kernel dump. : : The elf notes parsing in this patch lines up with what is described in in : the "Notes (Nhdr)" section of the elf(5) man page. : : BUILD_ID_MAX is sufficient to hold a sha1 build-id, which is the default : build-id type today in GNU ld(2). It is also sufficient to hold the : "fast" build-id, which is the default build-id type today in LLVM lld(2). Signed-off-by: Vijay Balakrishna Signed-off-by: Andrew Morton Reviewed-by: Tyler Hicks Acked-by: Baoquan He Cc: Dave Young Cc: Vivek Goyal Link: http://lkml.kernel.org/r/1591849672-34104-1-git-send-email-vijayb@linux.microsoft.com Signed-off-by: Linus Torvalds --- include/linux/crash_core.h | 6 ++++++ kernel/crash_core.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) (limited to 'include') diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 525510a9f965..6594dbc34a37 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -38,6 +38,8 @@ phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_OSRELEASE(value) \ vmcoreinfo_append_str("OSRELEASE=%s\n", value) +#define VMCOREINFO_BUILD_ID(value) \ + vmcoreinfo_append_str("BUILD-ID=%s\n", value) #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ @@ -64,6 +66,10 @@ extern unsigned char *vmcoreinfo_data; extern size_t vmcoreinfo_size; extern u32 *vmcoreinfo_note; +/* raw contents of kernel .notes section */ +extern const void __start_notes __weak; +extern const void __stop_notes __weak; + Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 18175687133a..106e4500fd53 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -11,6 +11,8 @@ #include #include +#include + /* vmcoreinfo stuff */ unsigned char *vmcoreinfo_data; size_t vmcoreinfo_size; @@ -376,6 +378,53 @@ phys_addr_t __weak paddr_vmcoreinfo_note(void) } EXPORT_SYMBOL(paddr_vmcoreinfo_note); +#define NOTES_SIZE (&__stop_notes - &__start_notes) +#define BUILD_ID_MAX SHA1_DIGEST_SIZE +#define NT_GNU_BUILD_ID 3 + +struct elf_note_section { + struct elf_note n_hdr; + u8 n_data[]; +}; + +/* + * Add build ID from .notes section as generated by the GNU ld(1) + * or LLVM lld(1) --build-id option. + */ +static void add_build_id_vmcoreinfo(void) +{ + char build_id[BUILD_ID_MAX * 2 + 1]; + int n_remain = NOTES_SIZE; + + while (n_remain >= sizeof(struct elf_note)) { + const struct elf_note_section *note_sec = + &__start_notes + NOTES_SIZE - n_remain; + const u32 n_namesz = note_sec->n_hdr.n_namesz; + + if (note_sec->n_hdr.n_type == NT_GNU_BUILD_ID && + n_namesz != 0 && + !strcmp((char *)¬e_sec->n_data[0], "GNU")) { + if (note_sec->n_hdr.n_descsz <= BUILD_ID_MAX) { + const u32 n_descsz = note_sec->n_hdr.n_descsz; + const u8 *s = ¬e_sec->n_data[n_namesz]; + + s = PTR_ALIGN(s, 4); + bin2hex(build_id, s, n_descsz); + build_id[2 * n_descsz] = '\0'; + VMCOREINFO_BUILD_ID(build_id); + return; + } + pr_warn("Build ID is too large to include in vmcoreinfo: %u > %u\n", + note_sec->n_hdr.n_descsz, + BUILD_ID_MAX); + return; + } + n_remain -= sizeof(struct elf_note) + + ALIGN(note_sec->n_hdr.n_namesz, 4) + + ALIGN(note_sec->n_hdr.n_descsz, 4); + } +} + static int __init crash_save_vmcoreinfo_init(void) { vmcoreinfo_data = (unsigned char *)get_zeroed_page(GFP_KERNEL); @@ -394,6 +443,7 @@ static int __init crash_save_vmcoreinfo_init(void) } VMCOREINFO_OSRELEASE(init_uts_ns.name.release); + add_build_id_vmcoreinfo(); VMCOREINFO_PAGESIZE(PAGE_SIZE); VMCOREINFO_SYMBOL(init_uts_ns); -- cgit v1.2.3 From 79076e1241bb3bf02d0aac7d39120d8161fe07b1 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 11 Aug 2020 18:36:46 -0700 Subject: kernel/panic.c: make oops_may_print() return bool The return value of oops_may_print() is true or false, so change its type to reflect that. Signed-off-by: Tiezhu Yang Signed-off-by: Andrew Morton Reviewed-by: Kees Cook Cc: Xuefeng Li Link: http://lkml.kernel.org/r/1591103358-32087-1-git-send-email-yangtiezhu@loongson.cn Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 2 +- kernel/panic.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 92517ae53e7c..211005163682 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -322,7 +322,7 @@ void nmi_panic(struct pt_regs *regs, const char *msg); extern void oops_enter(void); extern void oops_exit(void); void print_oops_end_marker(void); -extern int oops_may_print(void); +extern bool oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; diff --git a/kernel/panic.c b/kernel/panic.c index e2157ca387c8..93ba061d1c79 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -505,7 +505,7 @@ static void do_oops_enter_exit(void) * Return true if the calling CPU is allowed to print oops-related info. * This is a bit racy.. */ -int oops_may_print(void) +bool oops_may_print(void) { return pause_on_oops_flag == 0; } -- cgit v1.2.3 From 63037f74725ddd8a767ed2ad0369e60a3bf1f2ce Mon Sep 17 00:00:00 2001 From: Yue Hu Date: Tue, 11 Aug 2020 18:36:53 -0700 Subject: panic: make print_oops_end_marker() static Since print_oops_end_marker() is not used externally, also remove it in kernel.h at the same time. Signed-off-by: Yue Hu Signed-off-by: Andrew Morton Cc: Kees Cook Link: http://lkml.kernel.org/r/20200724011516.12756-1-zbestahu@gmail.com Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 1 - kernel/panic.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 211005163682..500def620d8f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -321,7 +321,6 @@ void panic(const char *fmt, ...) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); extern void oops_enter(void); extern void oops_exit(void); -void print_oops_end_marker(void); extern bool oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; diff --git a/kernel/panic.c b/kernel/panic.c index 93ba061d1c79..aef8872ba843 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -551,7 +551,7 @@ static int init_oops_id(void) } late_initcall(init_oops_id); -void print_oops_end_marker(void) +static void print_oops_end_marker(void) { init_oops_id(); pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id); -- cgit v1.2.3 From b4b382238ed2f94f0d3860f9120b66404fa99463 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:14 -0700 Subject: mm/migrate: move migration helper from .h to .c It's not performance sensitive function. Move it to .c. This is a preparation step for future change. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Acked-by: Mike Kravetz Acked-by: Michal Hocko Cc: Christoph Hellwig Cc: Naoya Horiguchi Cc: Roman Gushchin Link: http://lkml.kernel.org/r/1594622517-20681-3-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 33 +++++---------------------------- mm/migrate.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 540998d9810b..abeb4b15b297 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -31,34 +31,6 @@ enum migrate_reason { /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ extern const char *migrate_reason_names[MR_TYPES]; -static inline struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask) -{ - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; - unsigned int order = 0; - struct page *new_page = NULL; - - if (PageHuge(page)) - return alloc_huge_page_nodemask(page_hstate(compound_head(page)), - preferred_nid, nodemask); - - if (PageTransHuge(page)) { - gfp_mask |= GFP_TRANSHUGE; - order = HPAGE_PMD_ORDER; - } - - if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) - gfp_mask |= __GFP_HIGHMEM; - - new_page = __alloc_pages_nodemask(gfp_mask, order, - preferred_nid, nodemask); - - if (new_page && PageTransHuge(new_page)) - prep_transhuge_page(new_page); - - return new_page; -} - #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); @@ -67,6 +39,8 @@ extern int migrate_page(struct address_space *mapping, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); +extern struct page *new_page_nodemask(struct page *page, + int preferred_nid, nodemask_t *nodemask); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); @@ -85,6 +59,9 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } +static inline struct page *new_page_nodemask(struct page *page, + int preferred_nid, nodemask_t *nodemask) + { return NULL; } static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } diff --git a/mm/migrate.c b/mm/migrate.c index 52896b4921a7..5269bc520aee 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1538,6 +1538,35 @@ out: return rc; } +struct page *new_page_nodemask(struct page *page, + int preferred_nid, nodemask_t *nodemask) +{ + gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; + unsigned int order = 0; + struct page *new_page = NULL; + + if (PageHuge(page)) + return alloc_huge_page_nodemask( + page_hstate(compound_head(page)), + preferred_nid, nodemask); + + if (PageTransHuge(page)) { + gfp_mask |= GFP_TRANSHUGE; + order = HPAGE_PMD_ORDER; + } + + if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) + gfp_mask |= __GFP_HIGHMEM; + + new_page = __alloc_pages_nodemask(gfp_mask, order, + preferred_nid, nodemask); + + if (new_page && PageTransHuge(new_page)) + prep_transhuge_page(new_page); + + return new_page; +} + #ifdef CONFIG_NUMA static int store_status(int __user *status, int start, int value, int nr) -- cgit v1.2.3 From d92bbc2719bd2be237ee336113b63492a6baca3b Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:17 -0700 Subject: mm/hugetlb: unify migration callbacks There is no difference between two migration callback functions, alloc_huge_page_node() and alloc_huge_page_nodemask(), except __GFP_THISNODE handling. It's redundant to have two almost similar functions in order to handle this flag. So, this patch tries to remove one by introducing a new argument, gfp_mask, to alloc_huge_page_nodemask(). After introducing gfp_mask argument, it's caller's job to provide correct gfp_mask. So, every callsites for alloc_huge_page_nodemask() are changed to provide gfp_mask. Note that it's safe to remove a node id check in alloc_huge_page_node() since there is no caller passing NUMA_NO_NODE as a node id. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Reviewed-by: Mike Kravetz Reviewed-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Christoph Hellwig Cc: Naoya Horiguchi Cc: Roman Gushchin Link: http://lkml.kernel.org/r/1594622517-20681-4-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 26 ++++++++++++++++++-------- mm/hugetlb.c | 35 ++--------------------------------- mm/mempolicy.c | 10 ++++++---- mm/migrate.c | 11 +++++++---- 4 files changed, 33 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index a520bf26e5d8..3517edde681e 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -10,6 +10,7 @@ #include #include #include +#include struct ctl_table; struct user_struct; @@ -506,9 +507,8 @@ struct huge_bootmem_page { struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); -struct page *alloc_huge_page_node(struct hstate *h, int nid); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask); + nodemask_t *nmask, gfp_t gfp_mask); struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address); struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, @@ -694,6 +694,15 @@ static inline bool hugepage_movable_supported(struct hstate *h) return true; } +/* Movability of hugepages depends on migration support. */ +static inline gfp_t htlb_alloc_mask(struct hstate *h) +{ + if (hugepage_movable_supported(h)) + return GFP_HIGHUSER_MOVABLE; + else + return GFP_HIGHUSER; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -761,13 +770,9 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma, return NULL; } -static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) -{ - return NULL; -} - static inline struct page * -alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) +alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, + nodemask_t *nmask, gfp_t gfp_mask) { return NULL; } @@ -880,6 +885,11 @@ static inline bool hugepage_movable_supported(struct hstate *h) return false; } +static inline gfp_t htlb_alloc_mask(struct hstate *h) +{ + return 0; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b66bf74e999e..eaab9ef88e9d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1093,15 +1093,6 @@ retry_cpuset: return NULL; } -/* Movability of hugepages depends on migration support. */ -static inline gfp_t htlb_alloc_mask(struct hstate *h) -{ - if (hugepage_movable_supported(h)) - return GFP_HIGHUSER_MOVABLE; - else - return GFP_HIGHUSER; -} - static struct page *dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, @@ -1985,32 +1976,10 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, return page; } -/* page migration callback function */ -struct page *alloc_huge_page_node(struct hstate *h, int nid) -{ - gfp_t gfp_mask = htlb_alloc_mask(h); - struct page *page = NULL; - - if (nid != NUMA_NO_NODE) - gfp_mask |= __GFP_THISNODE; - - spin_lock(&hugetlb_lock); - if (h->free_huge_pages - h->resv_huge_pages > 0) - page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL); - spin_unlock(&hugetlb_lock); - - if (!page) - page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); - - return page; -} - /* page migration callback function */ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask) + nodemask_t *nmask, gfp_t gfp_mask) { - gfp_t gfp_mask = htlb_alloc_mask(h); - spin_lock(&hugetlb_lock); if (h->free_huge_pages - h->resv_huge_pages > 0) { struct page *page; @@ -2038,7 +2007,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, gfp_mask = htlb_alloc_mask(h); node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); - page = alloc_huge_page_nodemask(h, node, nodemask); + page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); mpol_cond_put(mpol); return page; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 25b7e412c20b..9ae2b704bdf6 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1068,10 +1068,12 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist, /* page allocation callback for NUMA node migration */ struct page *alloc_new_node_page(struct page *page, unsigned long node) { - if (PageHuge(page)) - return alloc_huge_page_node(page_hstate(compound_head(page)), - node); - else if (PageTransHuge(page)) { + if (PageHuge(page)) { + struct hstate *h = page_hstate(compound_head(page)); + gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; + + return alloc_huge_page_nodemask(h, node, NULL, gfp_mask); + } else if (PageTransHuge(page)) { struct page *thp; thp = alloc_pages_node(node, diff --git a/mm/migrate.c b/mm/migrate.c index 5269bc520aee..8d084e9bc13b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1545,10 +1545,13 @@ struct page *new_page_nodemask(struct page *page, unsigned int order = 0; struct page *new_page = NULL; - if (PageHuge(page)) - return alloc_huge_page_nodemask( - page_hstate(compound_head(page)), - preferred_nid, nodemask); + if (PageHuge(page)) { + struct hstate *h = page_hstate(compound_head(page)); + + gfp_mask = htlb_alloc_mask(h); + return alloc_huge_page_nodemask(h, preferred_nid, + nodemask, gfp_mask); + } if (PageTransHuge(page)) { gfp_mask |= GFP_TRANSHUGE; -- cgit v1.2.3 From 19fc7bed252c16ace29491e4cfa2bafb264eb505 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:25 -0700 Subject: mm/migrate: introduce a standard migration target allocation function There are some similar functions for migration target allocation. Since there is no fundamental difference, it's better to keep just one rather than keeping all variants. This patch implements base migration target allocation function. In the following patches, variants will be converted to use this function. Changes should be mechanical, but, unfortunately, there are some differences. First, some callers' nodemask is assgined to NULL since NULL nodemask will be considered as all available nodes, that is, &node_states[N_MEMORY]. Second, for hugetlb page allocation, gfp_mask is redefined as regular hugetlb allocation gfp_mask plus __GFP_THISNODE if user provided gfp_mask has it. This is because future caller of this function requires to set this node constaint. Lastly, if provided nodeid is NUMA_NO_NODE, nodeid is set up to the node where migration source lives. It helps to remove simple wrappers for setting up the nodeid. Note that PageHighmem() call in previous function is changed to open-code "is_highmem_idx()" since it provides more readability. [akpm@linux-foundation.org: tweak patch title, per Vlastimil] [akpm@linux-foundation.org: fix typo in comment] Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Christoph Hellwig Cc: Mike Kravetz Cc: Naoya Horiguchi Cc: Roman Gushchin Link: http://lkml.kernel.org/r/1594622517-20681-6-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 15 +++++++++++++++ include/linux/migrate.h | 9 +++++---- mm/internal.h | 7 +++++++ mm/memory-failure.c | 7 +++++-- mm/memory_hotplug.c | 12 ++++++++---- mm/migrate.c | 26 ++++++++++++++++---------- mm/page_isolation.c | 7 +++++-- 7 files changed, 61 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3517edde681e..30e1f14119c8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -703,6 +703,16 @@ static inline gfp_t htlb_alloc_mask(struct hstate *h) return GFP_HIGHUSER; } +static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) +{ + gfp_t modified_mask = htlb_alloc_mask(h); + + /* Some callers might want to enforce node */ + modified_mask |= (gfp_mask & __GFP_THISNODE); + + return modified_mask; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -890,6 +900,11 @@ static inline gfp_t htlb_alloc_mask(struct hstate *h) return 0; } +static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) +{ + return 0; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/include/linux/migrate.h b/include/linux/migrate.h index abeb4b15b297..0f8d1583fa8e 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -10,6 +10,8 @@ typedef struct page *new_page_t(struct page *page, unsigned long private); typedef void free_page_t(struct page *page, unsigned long private); +struct migration_target_control; + /* * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; @@ -39,8 +41,7 @@ extern int migrate_page(struct address_space *mapping, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); -extern struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask); +extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); @@ -59,8 +60,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } -static inline struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask) +static inline struct page *alloc_migration_target(struct page *page, + unsigned long private) { return NULL; } static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } diff --git a/mm/internal.h b/mm/internal.h index 42cf0b610847..f725aa8a9698 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -614,4 +614,11 @@ static inline bool is_migrate_highatomic_page(struct page *page) void setup_zone_pageset(struct zone *zone); extern struct page *alloc_new_node_page(struct page *page, unsigned long node); + +struct migration_target_control { + int nid; /* preferred node id */ + nodemask_t *nmask; + gfp_t gfp_mask; +}; + #endif /* __MM_INTERNAL_H */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 47b8ccb1fb9b..f1aa6433f404 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1648,9 +1648,12 @@ EXPORT_SYMBOL(unpoison_memory); static struct page *new_page(struct page *p, unsigned long private) { - int nid = page_to_nid(p); + struct migration_target_control mtc = { + .nid = page_to_nid(p), + .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, + }; - return new_page_nodemask(p, nid, &node_states[N_MEMORY]); + return alloc_migration_target(p, (unsigned long)&mtc); } /* diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0a9e1972fbe7..c32ead89c911 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1276,19 +1276,23 @@ found: static struct page *new_node_page(struct page *page, unsigned long private) { - int nid = page_to_nid(page); nodemask_t nmask = node_states[N_MEMORY]; + struct migration_target_control mtc = { + .nid = page_to_nid(page), + .nmask = &nmask, + .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, + }; /* * try to allocate from a different node but reuse this node if there * are no other online nodes to be used (e.g. we are offlining a part * of the only existing node) */ - node_clear(nid, nmask); + node_clear(mtc.nid, nmask); if (nodes_empty(nmask)) - node_set(nid, nmask); + node_set(mtc.nid, nmask); - return new_page_nodemask(page, nid, &nmask); + return alloc_migration_target(page, (unsigned long)&mtc); } static int diff --git a/mm/migrate.c b/mm/migrate.c index 46cca5c2ebff..48b1f149494b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1538,19 +1538,26 @@ out: return rc; } -struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask) +struct page *alloc_migration_target(struct page *page, unsigned long private) { - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; + struct migration_target_control *mtc; + gfp_t gfp_mask; unsigned int order = 0; struct page *new_page = NULL; + int nid; + int zidx; + + mtc = (struct migration_target_control *)private; + gfp_mask = mtc->gfp_mask; + nid = mtc->nid; + if (nid == NUMA_NO_NODE) + nid = page_to_nid(page); if (PageHuge(page)) { struct hstate *h = page_hstate(compound_head(page)); - gfp_mask = htlb_alloc_mask(h); - return alloc_huge_page_nodemask(h, preferred_nid, - nodemask, gfp_mask); + gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); + return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); } if (PageTransHuge(page)) { @@ -1562,12 +1569,11 @@ struct page *new_page_nodemask(struct page *page, gfp_mask |= GFP_TRANSHUGE; order = HPAGE_PMD_ORDER; } - - if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) + zidx = zone_idx(page_zone(page)); + if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; - new_page = __alloc_pages_nodemask(gfp_mask, order, - preferred_nid, nodemask); + new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask); if (new_page && PageTransHuge(new_page)) prep_transhuge_page(new_page); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index aec26d972b9f..f25c66ea37ac 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -309,7 +309,10 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, struct page *alloc_migrate_target(struct page *page, unsigned long private) { - int nid = page_to_nid(page); + struct migration_target_control mtc = { + .nid = page_to_nid(page), + .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, + }; - return new_page_nodemask(page, nid, &node_states[N_MEMORY]); + return alloc_migration_target(page, (unsigned long)&mtc); } -- cgit v1.2.3 From 41b4dc14ee807cb1bd15e67cad287534046f92dc Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:34 -0700 Subject: mm/gup: restrict CMA region by using allocation scope API We have well defined scope API to exclude CMA region. Use it rather than manipulating gfp_mask manually. With this change, we can now restore __GFP_MOVABLE for gfp_mask like as usual migration target allocation. It would result in that the ZONE_MOVABLE is also searched by page allocator. For hugetlb, gfp_mask is redefined since it has a regular allocation mask filter for migration target. __GPF_NOWARN is added to hugetlb gfp_mask filter since a new user for gfp_mask filter, gup, want to be silent when allocation fails. Note that this can be considered as a fix for the commit 9a4e9f3b2d73 ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region"). However, "Fixes" tag isn't added here since it is just suboptimal but it doesn't cause any problem. Suggested-by: Michal Hocko Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Vlastimil Babka Cc: Christoph Hellwig Cc: Roman Gushchin Cc: Mike Kravetz Cc: Naoya Horiguchi Cc: "Aneesh Kumar K . V" Link: http://lkml.kernel.org/r/1596180906-8442-1-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 2 ++ mm/gup.c | 17 ++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 30e1f14119c8..d86c82749836 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -710,6 +710,8 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) /* Some callers might want to enforce node */ modified_mask |= (gfp_mask & __GFP_THISNODE); + modified_mask |= (gfp_mask & __GFP_NOWARN); + return modified_mask; } diff --git a/mm/gup.c b/mm/gup.c index d8a33dd1430d..6c20c6e37635 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1620,10 +1620,12 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private) * Trying to allocate a page for migration. Ignore allocation * failure warnings. We don't force __GFP_THISNODE here because * this node here is the node where we have CMA reservation and - * in some case these nodes will have really less non movable + * in some case these nodes will have really less non CMA * allocation memory. + * + * Note that CMA region is prohibited by allocation scope. */ - gfp_t gfp_mask = GFP_USER | __GFP_NOWARN; + gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN; if (PageHighMem(page)) gfp_mask |= __GFP_HIGHMEM; @@ -1631,6 +1633,8 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private) #ifdef CONFIG_HUGETLB_PAGE if (PageHuge(page)) { struct hstate *h = page_hstate(page); + + gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); /* * We don't want to dequeue from the pool because pool pages will * mostly be from the CMA region. @@ -1645,11 +1649,6 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private) */ gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN; - /* - * Remove the movable mask so that we don't allocate from - * CMA area again. - */ - thp_gfpmask &= ~__GFP_MOVABLE; thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER); if (!thp) return NULL; @@ -1795,7 +1794,6 @@ static long __gup_longterm_locked(struct task_struct *tsk, vmas_tmp, NULL, gup_flags); if (gup_flags & FOLL_LONGTERM) { - memalloc_nocma_restore(flags); if (rc < 0) goto out; @@ -1808,9 +1806,10 @@ static long __gup_longterm_locked(struct task_struct *tsk, rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages, vmas_tmp, gup_flags); +out: + memalloc_nocma_restore(flags); } -out: if (vmas_tmp != vmas) kfree(vmas_tmp); return rc; -- cgit v1.2.3 From bbe88753bd42b1faf1458dde8f58ff1239990436 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:38 -0700 Subject: mm/hugetlb: make hugetlb migration callback CMA aware new_non_cma_page() in gup.c requires to allocate the new page that is not on the CMA area. new_non_cma_page() implements it by using allocation scope APIs. However, there is a work-around for hugetlb. Normal hugetlb page allocation API for migration is alloc_huge_page_nodemask(). It consists of two steps. First is dequeing from the pool. Second is, if there is no available page on the queue, allocating by using the page allocator. new_non_cma_page() can't use this API since first step (deque) isn't aware of scope API to exclude CMA area. So, new_non_cma_page() exports hugetlb internal function for the second step, alloc_migrate_huge_page(), to global scope and uses it directly. This is suboptimal since hugetlb pages on the queue cannot be utilized. This patch tries to fix this situation by making the deque function on hugetlb CMA aware. In the deque function, CMA memory is skipped if PF_MEMALLOC_NOCMA flag is found. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Mike Kravetz Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: "Aneesh Kumar K . V" Cc: Christoph Hellwig Cc: Naoya Horiguchi Cc: Roman Gushchin Link: http://lkml.kernel.org/r/1596180906-8442-2-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 2 -- mm/gup.c | 6 +----- mm/hugetlb.c | 11 +++++++++-- 3 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d86c82749836..d5cc5f802dd4 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -511,8 +511,6 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask); struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address); -struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, - int nid, nodemask_t *nmask); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); diff --git a/mm/gup.c b/mm/gup.c index 6c20c6e37635..c55427beeb26 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1635,11 +1635,7 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private) struct hstate *h = page_hstate(page); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); - /* - * We don't want to dequeue from the pool because pool pages will - * mostly be from the CMA region. - */ - return alloc_migrate_huge_page(h, gfp_mask, nid, NULL); + return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask); } #endif if (PageTransHuge(page)) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eaab9ef88e9d..a301c2d672bf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -1040,10 +1041,16 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) { struct page *page; + bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA); + + list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { + if (nocma && is_migrate_cma_page(page)) + continue; - list_for_each_entry(page, &h->hugepage_freelists[nid], lru) if (!PageHWPoison(page)) break; + } + /* * if 'non-isolated free hugepage' not found on the list, * the allocation fails. @@ -1935,7 +1942,7 @@ out_unlock: return page; } -struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, +static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct page *page; -- cgit v1.2.3 From bce617edecada007aee8610fbe2c14d10b8de2f6 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 11 Aug 2020 18:37:44 -0700 Subject: mm: do page fault accounting in handle_mm_fault Patch series "mm: Page fault accounting cleanups", v5. This is v5 of the pf accounting cleanup series. It originates from Gerald Schaefer's report on an issue a week ago regarding to incorrect page fault accountings for retried page fault after commit 4064b9827063 ("mm: allow VM_FAULT_RETRY for multiple times"): https://lore.kernel.org/lkml/20200610174811.44b94525@thinkpad/ What this series did: - Correct page fault accounting: we do accounting for a page fault (no matter whether it's from #PF handling, or gup, or anything else) only with the one that completed the fault. For example, page fault retries should not be counted in page fault counters. Same to the perf events. - Unify definition of PERF_COUNT_SW_PAGE_FAULTS: currently this perf event is used in an adhoc way across different archs. Case (1): for many archs it's done at the entry of a page fault handler, so that it will also cover e.g. errornous faults. Case (2): for some other archs, it is only accounted when the page fault is resolved successfully. Case (3): there're still quite some archs that have not enabled this perf event. Since this series will touch merely all the archs, we unify this perf event to always follow case (1), which is the one that makes most sense. And since we moved the accounting into handle_mm_fault, the other two MAJ/MIN perf events are well taken care of naturally. - Unify definition of "major faults": the definition of "major fault" is slightly changed when used in accounting (not VM_FAULT_MAJOR). More information in patch 1. - Always account the page fault onto the one that triggered the page fault. This does not matter much for #PF handlings, but mostly for gup. More information on this in patch 25. Patchset layout: Patch 1: Introduced the accounting in handle_mm_fault(), not enabled. Patch 2-23: Enable the new accounting for arch #PF handlers one by one. Patch 24: Enable the new accounting for the rest outliers (gup, iommu, etc.) Patch 25: Cleanup GUP task_struct pointer since it's not needed any more This patch (of 25): This is a preparation patch to move page fault accountings into the general code in handle_mm_fault(). This includes both the per task flt_maj/flt_min counters, and the major/minor page fault perf events. To do this, the pt_regs pointer is passed into handle_mm_fault(). PERF_COUNT_SW_PAGE_FAULTS should still be kept in per-arch page fault handlers. So far, all the pt_regs pointer that passed into handle_mm_fault() is NULL, which means this patch should have no intented functional change. Suggested-by: Linus Torvalds Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Cc: Albert Ou Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Brian Cain Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Chris Zankel Cc: Dave Hansen Cc: David S. Miller Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Greentime Hu Cc: Guo Ren Cc: Heiko Carstens Cc: Helge Deller Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Ivan Kokshaysky Cc: James E.J. Bottomley Cc: John Hubbard Cc: Jonas Bonn Cc: Ley Foon Tan Cc: "Luck, Tony" Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Nick Hu Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Stafford Horne Cc: Stefan Kristiansson Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vincent Chen Cc: Vineet Gupta Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/20200707225021.200906-1-peterx@redhat.com Link: http://lkml.kernel.org/r/20200707225021.200906-2-peterx@redhat.com Signed-off-by: Linus Torvalds --- arch/alpha/mm/fault.c | 2 +- arch/arc/mm/fault.c | 2 +- arch/arm/mm/fault.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/csky/mm/fault.c | 3 +- arch/hexagon/mm/vm_fault.c | 2 +- arch/ia64/mm/fault.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/microblaze/mm/fault.c | 2 +- arch/mips/mm/fault.c | 2 +- arch/nds32/mm/fault.c | 2 +- arch/nios2/mm/fault.c | 2 +- arch/openrisc/mm/fault.c | 2 +- arch/parisc/mm/fault.c | 2 +- arch/powerpc/mm/copro_fault.c | 2 +- arch/powerpc/mm/fault.c | 2 +- arch/riscv/mm/fault.c | 2 +- arch/s390/mm/fault.c | 2 +- arch/sh/mm/fault.c | 2 +- arch/sparc/mm/fault_32.c | 4 +-- arch/sparc/mm/fault_64.c | 2 +- arch/um/kernel/trap.c | 2 +- arch/x86/mm/fault.c | 2 +- arch/xtensa/mm/fault.c | 2 +- drivers/iommu/amd/iommu_v2.c | 2 +- drivers/iommu/intel/svm.c | 3 +- include/linux/mm.h | 7 +++-- mm/gup.c | 4 +-- mm/hmm.c | 3 +- mm/ksm.c | 3 +- mm/memory.c | 64 ++++++++++++++++++++++++++++++++++++++++++- 31 files changed, 103 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index c2303a8c2b9f..1983e43a5e2f 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -148,7 +148,7 @@ retry: /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 7287c793d1c9..587dea524e6b 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -130,7 +130,7 @@ retry: goto bad_area; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index c6550eddfce1..01a8e0f8fef7 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -224,7 +224,7 @@ good_area: goto out; } - return handle_mm_fault(vma, addr & PAGE_MASK, flags); + return handle_mm_fault(vma, addr & PAGE_MASK, flags, NULL); check_stack: /* Don't allow expansion below FIRST_USER_ADDRESS */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 8afb238ff335..be29f4076fe3 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -428,7 +428,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, */ if (!(vma->vm_flags & vm_flags)) return VM_FAULT_BADACCESS; - return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); + return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, NULL); } static bool is_el0_instruction_abort(unsigned int esr) diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index b1dce9f2f04d..b252e6e4d32f 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -150,7 +150,8 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0, + NULL); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index cd3808f96b93..f12f330e7946 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -88,7 +88,7 @@ good_area: break; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 3a4dec334cc5..abf2808f9b4b 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -143,7 +143,7 @@ retry: * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 508abb63da67..08b35a318ebe 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -134,7 +134,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); pr_debug("handle_mm_fault returns %x\n", fault); if (fault_signal_pending(fault, regs)) diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index a2bfe587b491..1a3d4c4ca28b 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -214,7 +214,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 01b168a90434..b1db39784db9 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -152,7 +152,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index 8fb73f6401a0..d0ecc8fb5b23 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -206,7 +206,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, addr, flags); + fault = handle_mm_fault(vma, addr, flags, NULL); /* * If we need to retry but a fatal signal is pending, handle the diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 4112ef0e247e..86beb9a2698e 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -131,7 +131,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index d2224ccca294..3daa491d1edb 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -159,7 +159,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 66ac0719bd49..e32d06928c24 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -302,7 +302,7 @@ good_area: * fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index b83abbead4a2..2d0276abe0a6 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -64,7 +64,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, } ret = 0; - *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0); + *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL); if (unlikely(*flt & VM_FAULT_ERROR)) { if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 925a7231abb3..c6a5225a3521 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -511,7 +511,7 @@ retry: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); major |= fault & VM_FAULT_MAJOR; diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 5873835a3e6b..30c1124d0fb6 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -109,7 +109,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, addr, flags); + fault = handle_mm_fault(vma, addr, flags, NULL); /* * If we need to retry but a fatal signal is pending, handle the diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index aebf9183bedd..ad783aaaf649 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -476,7 +476,7 @@ retry: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) { fault = VM_FAULT_SIGNAL; if (flags & FAULT_FLAG_RETRY_NOWAIT) diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index fbe1f2fe9a8c..3c0a11827f7e 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -482,7 +482,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) if (mm_fault_error(regs, error_code, address, fault)) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index cfef656eda0f..06af03db4417 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -234,7 +234,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; @@ -410,7 +410,7 @@ good_area: if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } - switch (handle_mm_fault(vma, address, flags)) { + switch (handle_mm_fault(vma, address, flags, NULL)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index a3806614e4dc..9ebee14ee893 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -422,7 +422,7 @@ good_area: goto bad_area; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) goto exit_exception; diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 2b3afa354a90..8d9870d76da1 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -71,7 +71,7 @@ good_area: do { vm_fault_t fault; - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto out_nosemaphore; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 0c7643d9f7cb..e1bf5555d80a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1291,7 +1291,7 @@ good_area: * userland). The return to userland is identified whenever * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); major |= fault & VM_FAULT_MAJOR; /* Quick path to respond to signals */ diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index c128dcc7c85b..e72c8c1359a6 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -107,7 +107,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index e4b025c5637c..c259108ab6dd 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -495,7 +495,7 @@ static void do_fault(struct work_struct *work) if (access_error(vma, fault)) goto out; - ret = handle_mm_fault(vma, address, flags); + ret = handle_mm_fault(vma, address, flags, NULL); out: mmap_read_unlock(mm); diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 6c87c807a0ab..5ae59a6ad681 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -872,7 +872,8 @@ static irqreturn_t prq_event_thread(int irq, void *d) goto invalid; ret = handle_mm_fault(vma, address, - req->wr_req ? FAULT_FLAG_WRITE : 0); + req->wr_req ? FAULT_FLAG_WRITE : 0, + NULL); if (ret & VM_FAULT_ERROR) goto invalid; diff --git a/include/linux/mm.h b/include/linux/mm.h index f97b10117d44..ec0ffb423769 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -38,6 +38,7 @@ struct file_ra_state; struct user_struct; struct writeback_control; struct bdi_writeback; +struct pt_regs; void init_mm_internals(void); @@ -1658,7 +1659,8 @@ int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags); + unsigned long address, unsigned int flags, + struct pt_regs *regs); extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); @@ -1668,7 +1670,8 @@ void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); #else static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags) + unsigned long address, unsigned int flags, + struct pt_regs *regs) { /* should never happen if there's no MMU */ BUG(); diff --git a/mm/gup.c b/mm/gup.c index e9d1d0cc18f0..ae7121d729fa 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -884,7 +884,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, fault_flags |= FAULT_FLAG_TRIED; } - ret = handle_mm_fault(vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags, NULL); if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); @@ -1238,7 +1238,7 @@ retry: fatal_signal_pending(current)) return -EINTR; - ret = handle_mm_fault(vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags, NULL); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); diff --git a/mm/hmm.c b/mm/hmm.c index bb279319bf40..943cb2ba4442 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -75,7 +75,8 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end, } for (; addr < end; addr += PAGE_SIZE) - if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR) + if (handle_mm_fault(vma, addr, fault_flags, NULL) & + VM_FAULT_ERROR) return -EFAULT; return -EBUSY; } diff --git a/mm/ksm.c b/mm/ksm.c index 217842a66912..0aa2247bddd7 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -480,7 +480,8 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) break; if (PageKsm(page)) ret = handle_mm_fault(vma, addr, - FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, + NULL); else ret = VM_FAULT_WRITE; put_page(page); diff --git a/mm/memory.c b/mm/memory.c index 325bb575e7ec..9b7d35734caa 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -71,6 +71,8 @@ #include #include #include +#include +#include #include @@ -4356,6 +4358,64 @@ retry_pud: return handle_pte_fault(&vmf); } +/** + * mm_account_fault - Do page fault accountings + * + * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting + * of perf event counters, but we'll still do the per-task accounting to + * the task who triggered this page fault. + * @address: the faulted address. + * @flags: the fault flags. + * @ret: the fault retcode. + * + * This will take care of most of the page fault accountings. Meanwhile, it + * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter + * updates. However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should + * still be in per-arch page fault handlers at the entry of page fault. + */ +static inline void mm_account_fault(struct pt_regs *regs, + unsigned long address, unsigned int flags, + vm_fault_t ret) +{ + bool major; + + /* + * We don't do accounting for some specific faults: + * + * - Unsuccessful faults (e.g. when the address wasn't valid). That + * includes arch_vma_access_permitted() failing before reaching here. + * So this is not a "this many hardware page faults" counter. We + * should use the hw profiling for that. + * + * - Incomplete faults (VM_FAULT_RETRY). They will only be counted + * once they're completed. + */ + if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY)) + return; + + /* + * We define the fault as a major fault when the final successful fault + * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't + * handle it immediately previously). + */ + major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); + + /* + * If the fault is done for GUP, regs will be NULL, and we will skip + * the fault accounting. + */ + if (!regs) + return; + + if (major) { + current->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); + } else { + current->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); + } +} + /* * By the time we get here, we already hold the mm semaphore * @@ -4363,7 +4423,7 @@ retry_pud: * return value. See filemap_fault() and __lock_page_or_retry(). */ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, - unsigned int flags) + unsigned int flags, struct pt_regs *regs) { vm_fault_t ret; @@ -4404,6 +4464,8 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, mem_cgroup_oom_synchronize(false); } + mm_account_fault(regs, address, flags, ret); + return ret; } EXPORT_SYMBOL_GPL(handle_mm_fault); -- cgit v1.2.3 From 64019a2e467a288a16b65ab55ddcbf58c1b00187 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 11 Aug 2020 18:39:01 -0700 Subject: mm/gup: remove task_struct pointer for all gup code After the cleanup of page fault accounting, gup does not need to pass task_struct around any more. Remove that parameter in the whole gup stack. Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Reviewed-by: John Hubbard Link: http://lkml.kernel.org/r/20200707225021.200906-26-peterx@redhat.com Signed-off-by: Linus Torvalds --- arch/arc/kernel/process.c | 2 +- arch/s390/kvm/interrupt.c | 2 +- arch/s390/kvm/kvm-s390.c | 2 +- arch/s390/kvm/priv.c | 8 +-- arch/s390/mm/gmap.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +- drivers/infiniband/core/umem_odp.c | 2 +- drivers/vfio/vfio_iommu_type1.c | 4 +- fs/exec.c | 2 +- include/linux/mm.h | 9 ++- kernel/events/uprobes.c | 6 +- kernel/futex.c | 2 +- mm/gup.c | 101 ++++++++++++---------------- mm/memory.c | 2 +- mm/process_vm_access.c | 2 +- security/tomoyo/domain.c | 2 +- virt/kvm/async_pf.c | 2 +- virt/kvm/kvm_main.c | 2 +- 18 files changed, 69 insertions(+), 87 deletions(-) (limited to 'include') diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index e12c80d71b78..efeba1fe7252 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -91,7 +91,7 @@ fault: goto fail; mmap_read_lock(current->mm); - ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr, + ret = fixup_user_fault(current->mm, (unsigned long) uaddr, FAULT_FLAG_WRITE, NULL); mmap_read_unlock(current->mm); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 1608fd99bbee..2f177298c663 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2768,7 +2768,7 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr) struct page *page = NULL; mmap_read_lock(kvm->mm); - get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE, + get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, &page, NULL, NULL); mmap_read_unlock(kvm->mm); return page; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 66da278a67fb..6b74b92c1a58 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1892,7 +1892,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) r = set_guest_storage_key(current->mm, hva, keys[i], 0); if (r) { - r = fixup_user_fault(current, current->mm, hva, + r = fixup_user_fault(current->mm, hva, FAULT_FLAG_WRITE, &unlocked); if (r) break; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 2f721a923b54..cd74989ce0b0 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -273,7 +273,7 @@ retry: rc = get_guest_storage_key(current->mm, vmaddr, &key); if (rc) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); if (!rc) { mmap_read_unlock(current->mm); @@ -319,7 +319,7 @@ retry: mmap_read_lock(current->mm); rc = reset_guest_reference_bit(current->mm, vmaddr); if (rc < 0) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); if (!rc) { mmap_read_unlock(current->mm); @@ -390,7 +390,7 @@ static int handle_sske(struct kvm_vcpu *vcpu) m3 & SSKE_MC); if (rc < 0) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } @@ -1094,7 +1094,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) rc = cond_set_guest_storage_key(current->mm, vmaddr, key, NULL, nq, mr, mc); if (rc < 0) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 190357ff86b3..8747487c50a8 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -649,7 +649,7 @@ retry: rc = vmaddr; goto out_up; } - if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, + if (fixup_user_fault(gmap->mm, vmaddr, fault_flags, &unlocked)) { rc = -EFAULT; goto out_up; @@ -879,7 +879,7 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr, BUG_ON(gmap_is_shadow(gmap)); fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0; - if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked)) + if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked)) return -EFAULT; if (unlocked) /* lost mmap_lock, caller has to retry __gmap_translate */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index e946032b13e4..2c2bf24140c9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -469,7 +469,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) locked = 1; } ret = pin_user_pages_remote - (work->task, mm, + (mm, obj->userptr.ptr + pinned * PAGE_SIZE, npages - pinned, flags, diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 5e32f61a2fe4..cc6b4befde7c 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -439,7 +439,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, * complex (and doesn't gain us much performance in most use * cases). */ - npages = get_user_pages_remote(owning_process, owning_mm, + npages = get_user_pages_remote(owning_mm, user_virt, gup_num_pages, flags, local_page_list, NULL, NULL); mmap_read_unlock(owning_mm); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 5e556ac9102a..9d41105bfd01 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -425,7 +425,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, if (ret) { bool unlocked = false; - ret = fixup_user_fault(NULL, mm, vaddr, + ret = fixup_user_fault(mm, vaddr, FAULT_FLAG_REMOTE | (write_fault ? FAULT_FLAG_WRITE : 0), &unlocked); @@ -453,7 +453,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, flags |= FOLL_WRITE; mmap_read_lock(mm); - ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM, + ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, page, NULL, NULL); if (ret == 1) { *pfn = page_to_pfn(page[0]); diff --git a/fs/exec.c b/fs/exec.c index a57d9785832b..a91003e28eaa 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -217,7 +217,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, * We are doing an exec(). 'current' is the process * doing the exec and bprm->mm is the new process's mm. */ - ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, + ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags, &page, NULL, NULL); if (ret <= 0) return NULL; diff --git a/include/linux/mm.h b/include/linux/mm.h index ec0ffb423769..e7602a3bcef1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1661,7 +1661,7 @@ int invalidate_inode_page(struct page *page); extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct pt_regs *regs); -extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, +extern int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); void unmap_mapping_pages(struct address_space *mapping, @@ -1677,8 +1677,7 @@ static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, BUG(); return VM_FAULT_SIGBUS; } -static inline int fixup_user_fault(struct task_struct *tsk, - struct mm_struct *mm, unsigned long address, +static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { /* should never happen if there's no MMU */ @@ -1704,11 +1703,11 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); -long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); -long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 49047d479c57..649fd53dc9ad 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -376,7 +376,7 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) if (!vaddr || !d) return -EINVAL; - ret = get_user_pages_remote(NULL, mm, vaddr, 1, + ret = get_user_pages_remote(mm, vaddr, 1, FOLL_WRITE, &page, &vma, NULL); if (unlikely(ret <= 0)) { /* @@ -477,7 +477,7 @@ retry: if (is_register) gup_flags |= FOLL_SPLIT_PMD; /* Read the page with vaddr into memory */ - ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags, + ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, &old_page, &vma, NULL); if (ret <= 0) return ret; @@ -2029,7 +2029,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) * but we treat this as a 'remote' access since it is * essentially a kernel access to the memory. */ - result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, + result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, NULL, NULL); if (result < 0) return result; diff --git a/kernel/futex.c b/kernel/futex.c index 83404124b77b..61e8153e6c76 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -678,7 +678,7 @@ static int fault_in_user_writeable(u32 __user *uaddr) int ret; mmap_read_lock(mm); - ret = fixup_user_fault(current, mm, (unsigned long)uaddr, + ret = fixup_user_fault(mm, (unsigned long)uaddr, FAULT_FLAG_WRITE, NULL); mmap_read_unlock(mm); diff --git a/mm/gup.c b/mm/gup.c index d5d44c68fa19..39e58df6925d 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -859,7 +859,7 @@ unmap: * does not include FOLL_NOWAIT, the mmap_lock may be released. If it * is, *@locked will be set to 0 and -EBUSY returned. */ -static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, +static int faultin_page(struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *locked) { unsigned int fault_flags = 0; @@ -962,7 +962,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) /** * __get_user_pages() - pin user pages in memory - * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin @@ -1021,7 +1020,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ -static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, +static long __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1103,8 +1102,7 @@ retry: page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page) { - ret = faultin_page(tsk, vma, start, &foll_flags, - locked); + ret = faultin_page(vma, start, &foll_flags, locked); switch (ret) { case 0: goto retry; @@ -1178,8 +1176,6 @@ static bool vma_permits_fault(struct vm_area_struct *vma, /** * fixup_user_fault() - manually resolve a user page fault - * @tsk: the task_struct to use for page fault accounting, or - * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() @@ -1207,7 +1203,7 @@ static bool vma_permits_fault(struct vm_area_struct *vma, * This function will not return with an unlocked mmap_lock. So it has not the * same semantics wrt the @mm->mmap_lock as does filemap_fault(). */ -int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, +int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { @@ -1256,8 +1252,7 @@ EXPORT_SYMBOL_GPL(fixup_user_fault); * Please note that this function, unlike __get_user_pages will not * return 0 for nr_pages > 0 without FOLL_NOWAIT */ -static __always_inline long __get_user_pages_locked(struct task_struct *tsk, - struct mm_struct *mm, +static __always_inline long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, @@ -1290,7 +1285,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, pages_done = 0; lock_dropped = false; for (;;) { - ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, + ret = __get_user_pages(mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ @@ -1350,7 +1345,7 @@ retry: } *locked = 1; - ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, + ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, pages, NULL, locked); if (!*locked) { /* Continue to retry until we succeeded */ @@ -1437,7 +1432,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ - return __get_user_pages(current, mm, start, nr_pages, gup_flags, + return __get_user_pages(mm, start, nr_pages, gup_flags, NULL, NULL, locked); } @@ -1521,7 +1516,7 @@ struct page *get_dump_page(unsigned long addr) struct vm_area_struct *vma; struct page *page; - if (__get_user_pages(current, current->mm, addr, 1, + if (__get_user_pages(current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; @@ -1530,8 +1525,7 @@ struct page *get_dump_page(unsigned long addr) } #endif /* CONFIG_ELF_CORE */ #else /* CONFIG_MMU */ -static long __get_user_pages_locked(struct task_struct *tsk, - struct mm_struct *mm, unsigned long start, +static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int foll_flags) @@ -1596,8 +1590,7 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages) } #ifdef CONFIG_CMA -static long check_and_migrate_cma_pages(struct task_struct *tsk, - struct mm_struct *mm, +static long check_and_migrate_cma_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, @@ -1675,7 +1668,7 @@ check_again: * again migrating any new CMA pages which we failed to isolate * earlier. */ - ret = __get_user_pages_locked(tsk, mm, start, nr_pages, + ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL, gup_flags); @@ -1689,8 +1682,7 @@ check_again: return ret; } #else -static long check_and_migrate_cma_pages(struct task_struct *tsk, - struct mm_struct *mm, +static long check_and_migrate_cma_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, @@ -1705,8 +1697,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk, * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which * allows us to process the FOLL_LONGTERM flag. */ -static long __gup_longterm_locked(struct task_struct *tsk, - struct mm_struct *mm, +static long __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, @@ -1731,7 +1722,7 @@ static long __gup_longterm_locked(struct task_struct *tsk, flags = memalloc_nocma_save(); } - rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, + rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas_tmp, NULL, gup_flags); if (gup_flags & FOLL_LONGTERM) { @@ -1745,7 +1736,7 @@ static long __gup_longterm_locked(struct task_struct *tsk, goto out; } - rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages, + rc = check_and_migrate_cma_pages(mm, start, rc, pages, vmas_tmp, gup_flags); out: memalloc_nocma_restore(flags); @@ -1756,22 +1747,20 @@ out: return rc; } #else /* !CONFIG_FS_DAX && !CONFIG_CMA */ -static __always_inline long __gup_longterm_locked(struct task_struct *tsk, - struct mm_struct *mm, +static __always_inline long __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int flags) { - return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, + return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL, flags); } #endif /* CONFIG_FS_DAX || CONFIG_CMA */ #ifdef CONFIG_MMU -static long __get_user_pages_remote(struct task_struct *tsk, - struct mm_struct *mm, +static long __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1790,20 +1779,18 @@ static long __get_user_pages_remote(struct task_struct *tsk, * This will check the vmas (even if our vmas arg is NULL) * and return -ENOTSUPP if DAX isn't allowed in this case: */ - return __gup_longterm_locked(tsk, mm, start, nr_pages, pages, + return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } - return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, + return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, locked, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } /** * get_user_pages_remote() - pin user pages in memory - * @tsk: the task_struct to use for page fault accounting, or - * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin @@ -1862,7 +1849,7 @@ static long __get_user_pages_remote(struct task_struct *tsk, * should use get_user_pages_remote because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ -long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1874,13 +1861,13 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) return -EINVAL; - return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags, + return __get_user_pages_remote(mm, start, nr_pages, gup_flags, pages, vmas, locked); } EXPORT_SYMBOL(get_user_pages_remote); #else /* CONFIG_MMU */ -long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1888,8 +1875,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, return 0; } -static long __get_user_pages_remote(struct task_struct *tsk, - struct mm_struct *mm, +static long __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1909,11 +1895,10 @@ static long __get_user_pages_remote(struct task_struct *tsk, * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * - * This is the same as get_user_pages_remote(), just with a - * less-flexible calling convention where we assume that the task - * and mm being operated on are the current task's and don't allow - * passing of a locked parameter. We also obviously don't pass - * FOLL_REMOTE in here. + * This is the same as get_user_pages_remote(), just with a less-flexible + * calling convention where we assume that the mm being operated on belongs to + * the current task, and doesn't allow passing of a locked parameter. We also + * obviously don't pass FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, @@ -1926,7 +1911,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) return -EINVAL; - return __gup_longterm_locked(current, current->mm, start, nr_pages, + return __gup_longterm_locked(current->mm, start, nr_pages, pages, vmas, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); @@ -1936,7 +1921,7 @@ EXPORT_SYMBOL(get_user_pages); * * mmap_read_lock(mm); * do_something() - * get_user_pages(tsk, mm, ..., pages, NULL); + * get_user_pages(mm, ..., pages, NULL); * mmap_read_unlock(mm); * * to: @@ -1944,7 +1929,7 @@ EXPORT_SYMBOL(get_user_pages); * int locked = 1; * mmap_read_lock(mm); * do_something() - * get_user_pages_locked(tsk, mm, ..., pages, &locked); + * get_user_pages_locked(mm, ..., pages, &locked); * if (locked) * mmap_read_unlock(mm); * @@ -1982,7 +1967,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) return -EINVAL; - return __get_user_pages_locked(current, current->mm, start, nr_pages, + return __get_user_pages_locked(current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } @@ -1992,12 +1977,12 @@ EXPORT_SYMBOL(get_user_pages_locked); * get_user_pages_unlocked() is suitable to replace the form: * * mmap_read_lock(mm); - * get_user_pages(tsk, mm, ..., pages, NULL); + * get_user_pages(mm, ..., pages, NULL); * mmap_read_unlock(mm); * * with: * - * get_user_pages_unlocked(tsk, mm, ..., pages); + * get_user_pages_unlocked(mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead if specific gup_flags @@ -2020,7 +2005,7 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, return -EINVAL; mmap_read_lock(mm); - ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, + ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, &locked, gup_flags | FOLL_TOUCH); if (locked) mmap_read_unlock(mm); @@ -2665,7 +2650,7 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages, */ if (gup_flags & FOLL_LONGTERM) { mmap_read_lock(current->mm); - ret = __gup_longterm_locked(current, current->mm, + ret = __gup_longterm_locked(current->mm, start, nr_pages, pages, NULL, gup_flags); mmap_read_unlock(current->mm); @@ -2908,10 +2893,8 @@ int pin_user_pages_fast_only(unsigned long start, int nr_pages, EXPORT_SYMBOL_GPL(pin_user_pages_fast_only); /** - * pin_user_pages_remote() - pin pages of a remote process (task != current) + * pin_user_pages_remote() - pin pages of a remote process * - * @tsk: the task_struct to use for page fault accounting, or - * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin @@ -2932,7 +2915,7 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast_only); * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. */ -long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -2942,7 +2925,7 @@ long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, return -EINVAL; gup_flags |= FOLL_PIN; - return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags, + return __get_user_pages_remote(mm, start, nr_pages, gup_flags, pages, vmas, locked); } EXPORT_SYMBOL(pin_user_pages_remote); @@ -2974,7 +2957,7 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, return -EINVAL; gup_flags |= FOLL_PIN; - return __gup_longterm_locked(current, current->mm, start, nr_pages, + return __gup_longterm_locked(current->mm, start, nr_pages, pages, vmas, gup_flags); } EXPORT_SYMBOL(pin_user_pages); @@ -3019,7 +3002,7 @@ long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, return -EINVAL; gup_flags |= FOLL_PIN; - return __get_user_pages_locked(current, current->mm, start, nr_pages, + return __get_user_pages_locked(current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } diff --git a/mm/memory.c b/mm/memory.c index 2b7f0e00f312..228efaca75d3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4742,7 +4742,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, void *maddr; struct page *page = NULL; - ret = get_user_pages_remote(tsk, mm, addr, 1, + ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page, &vma, NULL); if (ret <= 0) { #ifndef CONFIG_HAVE_IOREMAP_PROT diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index cc85ce81914a..29c052099aff 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -105,7 +105,7 @@ static int process_vm_rw_single_vec(unsigned long addr, * current/current->mm */ mmap_read_lock(mm); - pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages, + pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages, flags, process_pages, NULL, &locked); if (locked) diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 53b3e1f5f227..dc4ecc0b2038 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -914,7 +914,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, * (represented by bprm). 'current' is the process doing * the execve(). */ - if (get_user_pages_remote(current, bprm->mm, pos, 1, + if (get_user_pages_remote(bprm->mm, pos, 1, FOLL_FORCE, &page, NULL, NULL) <= 0) return false; #else diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 390f758d5a27..dd777688d14a 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -61,7 +61,7 @@ static void async_pf_execute(struct work_struct *work) * access remotely. */ mmap_read_lock(mm); - get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL, + get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL, &locked); if (locked) mmap_read_unlock(mm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2c2c0254c2d8..737666db02de 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1893,7 +1893,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, * not call the fault handler, so do it here. */ bool unlocked = false; - r = fixup_user_fault(current, current->mm, addr, + r = fixup_user_fault(current->mm, addr, (write_fault ? FAULT_FLAG_WRITE : 0), &unlocked); if (unlocked) -- cgit v1.2.3 From 2404b73c3f1a5f15726c6ecd226b56f6f992767f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 10 Aug 2020 13:52:15 +0200 Subject: netfilter: avoid ipv6 -> nf_defrag_ipv6 module dependency nf_ct_frag6_gather is part of nf_defrag_ipv6.ko, not ipv6 core. The current use of the netfilter ipv6 stub indirections causes a module dependency between ipv6 and nf_defrag_ipv6. This prevents nf_defrag_ipv6 module from being removed because ipv6 can't be unloaded. Remove the indirection and always use a direct call. This creates a depency from nf_conntrack_bridge to nf_defrag_ipv6 instead: modinfo nf_conntrack depends: nf_conntrack,nf_defrag_ipv6,bridge .. and nf_conntrack already depends on nf_defrag_ipv6 anyway. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter_ipv6.h | 18 ------------------ net/bridge/netfilter/nf_conntrack_bridge.c | 8 ++++++-- net/ipv6/netfilter.c | 3 --- 3 files changed, 6 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index aac42c28fe62..9b67394471e1 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -58,7 +58,6 @@ struct nf_ipv6_ops { int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); #if IS_MODULE(CONFIG_IPV6) - int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user); int (*br_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, @@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, #include -static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb, - u32 user) -{ -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (!v6_ops) - return 1; - - return v6_ops->br_defrag(net, skb, user); -#elif IS_BUILTIN(CONFIG_IPV6) - return nf_ct_frag6_gather(net, skb, user); -#else - return 1; -#endif -} - int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index 809673222382..8d033a75a766 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -168,6 +168,7 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb, static unsigned int nf_ct_br_defrag6(struct sk_buff *skb, const struct nf_hook_state *state) { +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) u16 zone_id = NF_CT_DEFAULT_ZONE_ID; enum ip_conntrack_info ctinfo; struct br_input_skb_cb cb; @@ -180,14 +181,17 @@ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb, br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm)); - err = nf_ipv6_br_defrag(state->net, skb, - IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id); + err = nf_ct_frag6_gather(state->net, skb, + IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id); /* queued */ if (err == -EINPROGRESS) return NF_STOLEN; br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size); return err == 0 ? NF_ACCEPT : NF_DROP; +#else + return NF_ACCEPT; +#endif } static int nf_ct_br_ip_check(const struct sk_buff *skb) diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 409e79b84a83..6d0e942d082d 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -245,9 +245,6 @@ static const struct nf_ipv6_ops ipv6ops = { .route_input = ip6_route_input, .fragment = ip6_fragment, .reroute = nf_ip6_reroute, -#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) - .br_defrag = nf_ct_frag6_gather, -#endif #if IS_MODULE(CONFIG_IPV6) .br_fragment = br_ip6_fragment, #endif -- cgit v1.2.3 From 466a62d7642f02f36d37d9b30c19a725538a01ca Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Thu, 11 Jun 2020 07:35:33 +0100 Subject: mfd: core: Make a best effort attempt to match devices with the correct of_nodes Currently, when a child platform device (sometimes referred to as a sub-device) is registered via the Multi-Functional Device (MFD) API, the framework attempts to match the newly registered platform device with its associated Device Tree (OF) node. Until now, the device has been allocated the first node found with an identical OF compatible string. Unfortunately, if there are, say for example '3' devices which are to be handled by the same driver and therefore have the same compatible string, each of them will be allocated a pointer to the *first* node. An example Device Tree entry might look like this: mfd_of_test { compatible = "mfd,of-test-parent"; #address-cells = <0x02>; #size-cells = <0x02>; child@aaaaaaaaaaaaaaaa { compatible = "mfd,of-test-child"; reg = <0xaaaaaaaa 0xaaaaaaaa 0 0x11>, <0xbbbbbbbb 0xbbbbbbbb 0 0x22>; }; child@cccccccc { compatible = "mfd,of-test-child"; reg = <0x00000000 0xcccccccc 0 0x33>; }; child@dddddddd00000000 { compatible = "mfd,of-test-child"; reg = <0xdddddddd 0x00000000 0 0x44>; }; }; When used with example sub-device registration like this: static const struct mfd_cell mfd_of_test_cell[] = { OF_MFD_CELL("mfd-of-test-child", NULL, NULL, 0, 0, "mfd,of-test-child"), OF_MFD_CELL("mfd-of-test-child", NULL, NULL, 0, 1, "mfd,of-test-child"), OF_MFD_CELL("mfd-of-test-child", NULL, NULL, 0, 2, "mfd,of-test-child") }; ... the current implementation will result in all devices being allocated the first OF node found containing a matching compatible string: [0.712511] mfd-of-test-child mfd-of-test-child.0: Probing platform device: 0 [0.712710] mfd-of-test-child mfd-of-test-child.0: Using OF node: child@aaaaaaaaaaaaaaaa [0.713033] mfd-of-test-child mfd-of-test-child.1: Probing platform device: 1 [0.713381] mfd-of-test-child mfd-of-test-child.1: Using OF node: child@aaaaaaaaaaaaaaaa [0.713691] mfd-of-test-child mfd-of-test-child.2: Probing platform device: 2 [0.713889] mfd-of-test-child mfd-of-test-child.2: Using OF node: child@aaaaaaaaaaaaaaaa After this patch each device will be allocated a unique OF node: [0.712511] mfd-of-test-child mfd-of-test-child.0: Probing platform device: 0 [0.712710] mfd-of-test-child mfd-of-test-child.0: Using OF node: child@aaaaaaaaaaaaaaaa [0.713033] mfd-of-test-child mfd-of-test-child.1: Probing platform device: 1 [0.713381] mfd-of-test-child mfd-of-test-child.1: Using OF node: child@cccccccc [0.713691] mfd-of-test-child mfd-of-test-child.2: Probing platform device: 2 [0.713889] mfd-of-test-child mfd-of-test-child.2: Using OF node: child@dddddddd00000000 Which is fine if all OF nodes are identical. However if we wish to apply an attribute to particular device, we really need to ensure the correct OF node will be associated with the device containing the correct address. We accomplish this by matching the device's address expressed in DT with one provided during sub-device registration. Like this: static const struct mfd_cell mfd_of_test_cell[] = { OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 1, "mfd,of-test-child", 0xdddddddd00000000), OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 2, "mfd,of-test-child", 0xaaaaaaaaaaaaaaaa), OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 3, "mfd,of-test-child", 0x00000000cccccccc) }; This will ensure a specific device (designated here using the platform_ids; 1, 2 and 3) is matched with a particular OF node: [0.712511] mfd-of-test-child mfd-of-test-child.0: Probing platform device: 0 [0.712710] mfd-of-test-child mfd-of-test-child.0: Using OF node: child@dddddddd00000000 [0.713033] mfd-of-test-child mfd-of-test-child.1: Probing platform device: 1 [0.713381] mfd-of-test-child mfd-of-test-child.1: Using OF node: child@aaaaaaaaaaaaaaaa [0.713691] mfd-of-test-child mfd-of-test-child.2: Probing platform device: 2 [0.713889] mfd-of-test-child mfd-of-test-child.2: Using OF node: child@cccccccc This implementation is still not infallible, hence the mention of "best effort" in the commit subject. Since we have not *insisted* on the existence of 'reg' properties (in some scenarios they just do not make sense) and no device currently uses the new 'of_reg' attribute, we have to make an on-the-fly judgement call whether to associate the OF node anyway. Which we do in cases where parent drivers haven't specified a particular OF node to match to. So there is a *slight* possibility of the following result (note: the implementation here is convoluted, but it shows you one means by which this process can still break): /* * First entry will match to the first OF node with matching compatible * Second will fail, since the first took its OF node and is no longer available * Third will succeed */ static const struct mfd_cell mfd_of_test_cell[] = { OF_MFD_CELL("mfd-of-test-child", NULL, NULL, 0, 1, "mfd,of-test-child"), OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 2, "mfd,of-test-child", 0xaaaaaaaaaaaaaaaa), OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 3, "mfd,of-test-child", 0x00000000cccccccc) }; The result: [0.753869] mfd-of-test-parent mfd_of_test: Registering 3 devices [0.756597] mfd-of-test-child: Failed to locate of_node [id: 2] [0.759999] mfd-of-test-child mfd-of-test-child.1: Probing platform device: 1 [0.760314] mfd-of-test-child mfd-of-test-child.1: Using OF node: child@aaaaaaaaaaaaaaaa [0.760908] mfd-of-test-child mfd-of-test-child.2: Probing platform device: 2 [0.761183] mfd-of-test-child mfd-of-test-child.2: No OF node associated with this device [0.761621] mfd-of-test-child mfd-of-test-child.3: Probing platform device: 3 [0.761899] mfd-of-test-child mfd-of-test-child.3: Using OF node: child@cccccccc We could code around this with some pre-parsing semantics, but the added complexity required to cover each and every corner-case is not justified. Merely patching the current failing (via this patch) is already working with some pretty small corner-cases. Other issues should be patched in the parent drivers which can be achieved simply by implementing OF_MFD_CELL_REG(). Signed-off-by: Lee Jones --- drivers/mfd/mfd-core.c | 95 ++++++++++++++++++++++++++++++++++++++++++------ include/linux/mfd/core.h | 10 +++++ 2 files changed, 93 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 720e5c8b1588..b201842f82ad 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -17,8 +18,17 @@ #include #include #include +#include #include +static LIST_HEAD(mfd_of_node_list); + +struct mfd_of_node_entry { + struct list_head list; + struct device *dev; + struct device_node *np; +}; + static struct device_type mfd_dev_type = { .name = "mfd_device", }; @@ -107,6 +117,55 @@ static inline void mfd_acpi_add_device(const struct mfd_cell *cell, } #endif +static int mfd_match_of_node_to_dev(struct platform_device *pdev, + struct device_node *np, + const struct mfd_cell *cell) +{ +#if IS_ENABLED(CONFIG_OF) + struct mfd_of_node_entry *of_entry; + const __be32 *reg; + u64 of_node_addr; + + /* Skip devices 'disabled' by Device Tree */ + if (!of_device_is_available(np)) + return -ENODEV; + + /* Skip if OF node has previously been allocated to a device */ + list_for_each_entry(of_entry, &mfd_of_node_list, list) + if (of_entry->np == np) + return -EAGAIN; + + if (!cell->use_of_reg) + /* No of_reg defined - allocate first free compatible match */ + goto allocate_of_node; + + /* We only care about each node's first defined address */ + reg = of_get_address(np, 0, NULL, NULL); + if (!reg) + /* OF node does not contatin a 'reg' property to match to */ + return -EAGAIN; + + of_node_addr = of_read_number(reg, of_n_addr_cells(np)); + + if (cell->of_reg != of_node_addr) + /* No match */ + return -EAGAIN; + +allocate_of_node: + of_entry = kzalloc(sizeof(*of_entry), GFP_KERNEL); + if (!of_entry) + return -ENOMEM; + + of_entry->dev = &pdev->dev; + of_entry->np = np; + list_add_tail(&of_entry->list, &mfd_of_node_list); + + pdev->dev.of_node = np; + pdev->dev.fwnode = &np->fwnode; +#endif + return 0; +} + static int mfd_add_device(struct device *parent, int id, const struct mfd_cell *cell, struct resource *mem_base, @@ -115,6 +174,7 @@ static int mfd_add_device(struct device *parent, int id, struct resource *res; struct platform_device *pdev; struct device_node *np = NULL; + struct mfd_of_node_entry *of_entry, *tmp; int ret = -ENOMEM; int platform_id; int r; @@ -149,19 +209,22 @@ static int mfd_add_device(struct device *parent, int id, if (ret < 0) goto fail_res; - if (parent->of_node && cell->of_compatible) { + if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) { for_each_child_of_node(parent->of_node, np) { if (of_device_is_compatible(np, cell->of_compatible)) { - if (!of_device_is_available(np)) { - /* Ignore disabled devices error free */ - ret = 0; + ret = mfd_match_of_node_to_dev(pdev, np, cell); + if (ret == -EAGAIN) + continue; + if (ret) goto fail_alias; - } - pdev->dev.of_node = np; - pdev->dev.fwnode = &np->fwnode; + break; } } + + if (!pdev->dev.of_node) + pr_warn("%s: Failed to locate of_node [id: %d]\n", + cell->name, platform_id); } mfd_acpi_add_device(cell, pdev); @@ -170,13 +233,13 @@ static int mfd_add_device(struct device *parent, int id, ret = platform_device_add_data(pdev, cell->platform_data, cell->pdata_size); if (ret) - goto fail_alias; + goto fail_of_entry; } if (cell->properties) { ret = platform_device_add_properties(pdev, cell->properties); if (ret) - goto fail_alias; + goto fail_of_entry; } for (r = 0; r < cell->num_resources; r++) { @@ -213,18 +276,18 @@ static int mfd_add_device(struct device *parent, int id, if (has_acpi_companion(&pdev->dev)) { ret = acpi_check_resource_conflict(&res[r]); if (ret) - goto fail_alias; + goto fail_of_entry; } } } ret = platform_device_add_resources(pdev, res, cell->num_resources); if (ret) - goto fail_alias; + goto fail_of_entry; ret = platform_device_add(pdev); if (ret) - goto fail_alias; + goto fail_of_entry; if (cell->pm_runtime_no_callbacks) pm_runtime_no_callbacks(&pdev->dev); @@ -233,6 +296,12 @@ static int mfd_add_device(struct device *parent, int id, return 0; +fail_of_entry: + list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list) + if (of_entry->dev == &pdev->dev) { + list_del(&of_entry->list); + kfree(of_entry); + } fail_alias: regulator_bulk_unregister_supply_alias(&pdev->dev, cell->parent_supplies, @@ -297,6 +366,8 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies, cell->num_parent_supplies); + kfree(cell); + platform_device_unregister(pdev); return 0; } diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index ab76cdd06199..c437a73b43a3 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -78,6 +78,16 @@ struct mfd_cell { */ const char *of_compatible; + /* + * Address as defined in Device Tree. Used to compement 'of_compatible' + * (above) when matching OF nodes with devices that have identical + * compatible strings + */ + const u64 of_reg; + + /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */ + bool use_of_reg; + /* Matches ACPI */ const struct mfd_cell_acpi_match *acpi_match; -- cgit v1.2.3 From d097965bb6682afe1f8481923b16c033f708923b Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Thu, 11 Jun 2020 10:18:43 +0100 Subject: mfd: core: Fix formatting of MFD helpers Remove unnecessary '\'s and leading tabs. This will help to clean-up future diffs when subsequent changes are made. Hint: The aforementioned changes follow this patch. Signed-off-by: Lee Jones --- include/linux/mfd/core.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index c437a73b43a3..da9b066e1594 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -26,20 +26,20 @@ .id = (_id), \ } -#define OF_MFD_CELL(_name, _res, _pdata, _pdsize,_id, _compat) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) \ +#define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) -#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) \ +#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) -#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) \ +#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) -#define MFD_CELL_RES(_name, _res) \ - MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) \ +#define MFD_CELL_RES(_name, _res) \ + MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) -#define MFD_CELL_NAME(_name) \ - MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) \ +#define MFD_CELL_NAME(_name) \ + MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) struct irq_domain; struct property_entry; -- cgit v1.2.3 From 44e6171ed04a0cd0378e2503f03a444ebdd4e8e3 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Thu, 11 Jun 2020 10:12:05 +0100 Subject: mfd: core: Add OF_MFD_CELL_REG() helper Extend current list of helpers to provide support for parent drivers wishing to match specific child devices to particular OF nodes. Signed-off-by: Lee Jones --- include/linux/mfd/core.h | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index da9b066e1594..6d68f44a26a1 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -14,7 +14,7 @@ #define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource)) -#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _match)\ +#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, _use_of_reg, _match) \ { \ .name = (_name), \ .resources = (_res), \ @@ -22,24 +22,29 @@ .platform_data = (_pdata), \ .pdata_size = (_pdsize), \ .of_compatible = (_compat), \ + .of_reg = (_of_reg), \ + .use_of_reg = (_use_of_reg), \ .acpi_match = (_match), \ .id = (_id), \ } +#define OF_MFD_CELL_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL) + #define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL) #define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match) #define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, NULL) #define MFD_CELL_RES(_name, _res) \ - MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) + MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, 0, false, NULL) #define MFD_CELL_NAME(_name) \ - MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) + MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL) struct irq_domain; struct property_entry; -- cgit v1.2.3 From 7d2594cd1fa0b03b2746ce811926ee150a3a14fa Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Wed, 1 Jul 2020 23:23:48 +0200 Subject: mfd: smsc-ece1099: Remove driver This MFD driver has no user. The keypad driver of this device never made it into the kernel. Therefore, this driver is useless. Remove it. Signed-off-by: Michael Walle Cc: Sourav Poddar Signed-off-by: Lee Jones --- Documentation/driver-api/index.rst | 1 - Documentation/driver-api/smsc_ece1099.rst | 60 ----------------- drivers/mfd/Kconfig | 12 ---- drivers/mfd/Makefile | 1 - drivers/mfd/smsc-ece1099.c | 87 ------------------------- include/linux/mfd/smsc.h | 104 ------------------------------ 6 files changed, 265 deletions(-) delete mode 100644 Documentation/driver-api/smsc_ece1099.rst delete mode 100644 drivers/mfd/smsc-ece1099.c delete mode 100644 include/linux/mfd/smsc.h (limited to 'include') diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index 6567187e7687..1397a30188eb 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -98,7 +98,6 @@ available subsections can be seen below. rfkill serial/index sm501 - smsc_ece1099 switchtec sync_file vfio-mediated-device diff --git a/Documentation/driver-api/smsc_ece1099.rst b/Documentation/driver-api/smsc_ece1099.rst deleted file mode 100644 index 079277421eaf..000000000000 --- a/Documentation/driver-api/smsc_ece1099.rst +++ /dev/null @@ -1,60 +0,0 @@ -================================================= -Msc Keyboard Scan Expansion/GPIO Expansion device -================================================= - -What is smsc-ece1099? ----------------------- - -The ECE1099 is a 40-Pin 3.3V Keyboard Scan Expansion -or GPIO Expansion device. The device supports a keyboard -scan matrix of 23x8. The device is connected to a Master -via the SMSC BC-Link interface or via the SMBus. -Keypad scan Input(KSI) and Keypad Scan Output(KSO) signals -are multiplexed with GPIOs. - -Interrupt generation --------------------- - -Interrupts can be generated by an edge detection on a GPIO -pin or an edge detection on one of the bus interface pins. -Interrupts can also be detected on the keyboard scan interface. -The bus interrupt pin (BC_INT# or SMBUS_INT#) is asserted if -any bit in one of the Interrupt Status registers is 1 and -the corresponding Interrupt Mask bit is also 1. - -In order for software to determine which device is the source -of an interrupt, it should first read the Group Interrupt Status Register -to determine which Status register group is a source for the interrupt. -Software should read both the Status register and the associated Mask register, -then AND the two values together. Bits that are 1 in the result of the AND -are active interrupts. Software clears an interrupt by writing a 1 to the -corresponding bit in the Status register. - -Communication Protocol ----------------------- - -- SMbus slave Interface - The host processor communicates with the ECE1099 device - through a series of read/write registers via the SMBus - interface. SMBus is a serial communication protocol between - a computer host and its peripheral devices. The SMBus data - rate is 10KHz minimum to 400 KHz maximum - -- Slave Bus Interface - The ECE1099 device SMBus implementation is a subset of the - SMBus interface to the host. The device is a slave-only SMBus device. - The implementation in the device is a subset of SMBus since it - only supports four protocols. - - The Write Byte, Read Byte, Send Byte, and Receive Byte protocols are the - only valid SMBus protocols for the device. - -- BC-LinkTM Interface - The BC-Link is a proprietary bus that allows communication - between a Master device and a Companion device. The Master - device uses this serial bus to read and write registers - located on the Companion device. The bus comprises three signals, - BC_CLK, BC_DAT and BC_INT#. The Master device always provides the - clock, BC_CLK, and the Companion device is the source for an - independent asynchronous interrupt signal, BC_INT#. The ECE1099 - supports BC-Link speeds up to 24MHz. diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index d13bb0abfd6f..33df0837ab41 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1193,18 +1193,6 @@ config MFD_SKY81452 This driver can also be built as a module. If so, the module will be called sky81452. -config MFD_SMSC - bool "SMSC ECE1099 series chips" - depends on I2C=y - select MFD_CORE - select REGMAP_I2C - help - If you say yes here you get support for the - ece1099 chips from SMSC. - - To compile this driver as a module, choose M here: the - module will be called smsc. - config MFD_SC27XX_PMIC tristate "Spreadtrum SC27xx PMICs" depends on ARCH_SPRD || COMPILE_TEST diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 1c8d6be3347d..a60e5f835283 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -127,7 +127,6 @@ obj-$(CONFIG_MFD_CPCAP) += motorola-cpcap.o obj-$(CONFIG_MCP) += mcp-core.o obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o -obj-$(CONFIG_MFD_SMSC) += smsc-ece1099.o obj-$(CONFIG_MCP_UCB1200_TS) += ucb1x00-ts.o ifeq ($(CONFIG_SA1100_ASSABET),y) diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c deleted file mode 100644 index 57b792eb58fd..000000000000 --- a/drivers/mfd/smsc-ece1099.c +++ /dev/null @@ -1,87 +0,0 @@ -/* - * TI SMSC MFD Driver - * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com - * - * Author: Sourav Poddar - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; GPL v2. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static const struct regmap_config smsc_regmap_config = { - .reg_bits = 8, - .val_bits = 8, - .max_register = SMSC_VEN_ID_H, - .cache_type = REGCACHE_RBTREE, -}; - -static int smsc_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) -{ - struct smsc *smsc; - int devid, rev, venid_l, venid_h; - int ret; - - smsc = devm_kzalloc(&i2c->dev, sizeof(*smsc), GFP_KERNEL); - if (!smsc) - return -ENOMEM; - - smsc->regmap = devm_regmap_init_i2c(i2c, &smsc_regmap_config); - if (IS_ERR(smsc->regmap)) - return PTR_ERR(smsc->regmap); - - i2c_set_clientdata(i2c, smsc); - smsc->dev = &i2c->dev; - -#ifdef CONFIG_OF - of_property_read_u32(i2c->dev.of_node, "clock", &smsc->clk); -#endif - - regmap_read(smsc->regmap, SMSC_DEV_ID, &devid); - regmap_read(smsc->regmap, SMSC_DEV_REV, &rev); - regmap_read(smsc->regmap, SMSC_VEN_ID_L, &venid_l); - regmap_read(smsc->regmap, SMSC_VEN_ID_H, &venid_h); - - dev_info(&i2c->dev, "SMSCxxx devid: %02x rev: %02x venid: %02x\n", - devid, rev, (venid_h << 8) | venid_l); - - ret = regmap_write(smsc->regmap, SMSC_CLK_CTRL, smsc->clk); - if (ret) - return ret; - -#ifdef CONFIG_OF - if (i2c->dev.of_node) - ret = devm_of_platform_populate(&i2c->dev); -#endif - - return ret; -} - -static const struct i2c_device_id smsc_i2c_id[] = { - { "smscece1099", 0}, - {}, -}; - -static struct i2c_driver smsc_i2c_driver = { - .driver = { - .name = "smsc", - }, - .probe = smsc_i2c_probe, - .id_table = smsc_i2c_id, -}; -builtin_i2c_driver(smsc_i2c_driver); diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h deleted file mode 100644 index 83944124e886..000000000000 --- a/include/linux/mfd/smsc.h +++ /dev/null @@ -1,104 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * SMSC ECE1099 - * - * Copyright 2012 Texas Instruments Inc. - * - * Author: Sourav Poddar - */ - -#ifndef __LINUX_MFD_SMSC_H -#define __LINUX_MFD_SMSC_H - -#include - -#define SMSC_ID_ECE1099 1 -#define SMSC_NUM_CLIENTS 2 - -#define SMSC_BASE_ADDR 0x38 -#define OMAP_GPIO_SMSC_IRQ 151 - -#define SMSC_MAXGPIO 32 -#define SMSC_BANK(offs) ((offs) >> 3) -#define SMSC_BIT(offs) (1u << ((offs) & 0x7)) - -struct smsc { - struct device *dev; - struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS]; - struct regmap *regmap; - int clk; - /* Stored chip id */ - int id; -}; - -struct smsc_gpio; -struct smsc_keypad; - -static inline int smsc_read(struct device *child, unsigned int reg, - unsigned int *dest) -{ - struct smsc *smsc = dev_get_drvdata(child->parent); - - return regmap_read(smsc->regmap, reg, dest); -} - -static inline int smsc_write(struct device *child, unsigned int reg, - unsigned int value) -{ - struct smsc *smsc = dev_get_drvdata(child->parent); - - return regmap_write(smsc->regmap, reg, value); -} - -/* Registers for SMSC */ -#define SMSC_RESET 0xF5 -#define SMSC_GRP_INT 0xF9 -#define SMSC_CLK_CTRL 0xFA -#define SMSC_WKUP_CTRL 0xFB -#define SMSC_DEV_ID 0xFC -#define SMSC_DEV_REV 0xFD -#define SMSC_VEN_ID_L 0xFE -#define SMSC_VEN_ID_H 0xFF - -/* CLK VALUE */ -#define SMSC_CLK_VALUE 0x13 - -/* Registers for function GPIO INPUT */ -#define SMSC_GPIO_DATA_IN_START 0x00 - -/* Registers for function GPIO OUPUT */ -#define SMSC_GPIO_DATA_OUT_START 0x05 - -/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/ -#define SMSC_GPIO_INPUT_LOW 0x01 -#define SMSC_GPIO_INPUT_RISING 0x09 -#define SMSC_GPIO_INPUT_FALLING 0x11 -#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19 -#define SMSC_GPIO_OUTPUT_PP 0x21 -#define SMSC_GPIO_OUTPUT_OP 0x31 - -#define GRP_INT_STAT 0xf9 -#define SMSC_GPI_INT 0x0f -#define SMSC_CFG_START 0x0A - -/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/ -#define SMSC_GPIO_INT_STAT_START 0x32 - -/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/ -#define SMSC_GPIO_INT_MASK_START 0x37 - -/* Registers for SMSC function KEYPAD*/ -#define SMSC_KP_OUT 0x40 -#define SMSC_KP_IN 0x41 -#define SMSC_KP_INT_STAT 0x42 -#define SMSC_KP_INT_MASK 0x43 - -/* Definitions for keypad */ -#define SMSC_KP_KSO 0x70 -#define SMSC_KP_KSI 0x51 -#define SMSC_KSO_ALL_LOW 0x20 -#define SMSC_KP_SET_LOW_PWR 0x0B -#define SMSC_KP_SET_HIGH 0xFF -#define SMSC_KSO_EVAL 0x00 - -#endif /* __LINUX_MFD_SMSC_H */ -- cgit v1.2.3 From 091c6110862bce4e2380e353cb062dcb6a56bcb6 Mon Sep 17 00:00:00 2001 From: Adam Thomson Date: Mon, 13 Jul 2020 10:38:57 +0100 Subject: mfd: da9063: Fix revision handling to correctly select reg tables The current implementation performs checking in the i2c_probe() function of the variant_code but does this immediately after the containing struct has been initialised as all zero. This means the check for variant code will always default to using the BB tables and will never select AD. The variant code is subsequently set by device_init() and later used by the RTC so really it's a little fortunate this mismatch works. This update adds raw I2C read access functionality to read the chip and variant/revision information (common to all revisions) so that it can subsequently correctly choose the proper regmap tables for real initialisation. Signed-off-by: Adam Thomson Signed-off-by: Lee Jones --- drivers/mfd/da9063-core.c | 31 ------ drivers/mfd/da9063-i2c.c | 184 +++++++++++++++++++++++++++++++---- include/linux/mfd/da9063/registers.h | 15 ++- 3 files changed, 177 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c index b125f90dd375..a353d52210a9 100644 --- a/drivers/mfd/da9063-core.c +++ b/drivers/mfd/da9063-core.c @@ -160,7 +160,6 @@ static int da9063_clear_fault_log(struct da9063 *da9063) int da9063_device_init(struct da9063 *da9063, unsigned int irq) { - int model, variant_id, variant_code; int ret; ret = da9063_clear_fault_log(da9063); @@ -171,36 +170,6 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq) da9063->irq_base = -1; da9063->chip_irq = irq; - ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_ID, &model); - if (ret < 0) { - dev_err(da9063->dev, "Cannot read chip model id.\n"); - return -EIO; - } - if (model != PMIC_CHIP_ID_DA9063) { - dev_err(da9063->dev, "Invalid chip model id: 0x%02x\n", model); - return -ENODEV; - } - - ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_VARIANT, &variant_id); - if (ret < 0) { - dev_err(da9063->dev, "Cannot read chip variant id.\n"); - return -EIO; - } - - variant_code = variant_id >> DA9063_CHIP_VARIANT_SHIFT; - - dev_info(da9063->dev, - "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n", - model, variant_id); - - if (variant_code < PMIC_DA9063_BB && variant_code != PMIC_DA9063_AD) { - dev_err(da9063->dev, - "Cannot support variant code: 0x%02X\n", variant_code); - return -ENODEV; - } - - da9063->variant_code = variant_code; - ret = da9063_irq_init(da9063); if (ret) { dev_err(da9063->dev, "Cannot initialize interrupts.\n"); diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c index 455de74c0dd2..481548948bc0 100644 --- a/drivers/mfd/da9063-i2c.c +++ b/drivers/mfd/da9063-i2c.c @@ -22,12 +22,124 @@ #include #include +/* + * Raw I2C access required for just accessing chip and variant info before we + * know which device is present. The info read from the device using this + * approach is then used to select the correct regmap tables. + */ + +#define DA9063_REG_PAGE_SIZE 0x100 +#define DA9063_REG_PAGED_ADDR_MASK 0xFF + +enum da9063_page_sel_buf_fmt { + DA9063_PAGE_SEL_BUF_PAGE_REG = 0, + DA9063_PAGE_SEL_BUF_PAGE_VAL, + DA9063_PAGE_SEL_BUF_SIZE, +}; + +enum da9063_paged_read_msgs { + DA9063_PAGED_READ_MSG_PAGE_SEL = 0, + DA9063_PAGED_READ_MSG_REG_SEL, + DA9063_PAGED_READ_MSG_DATA, + DA9063_PAGED_READ_MSG_CNT, +}; + +static int da9063_i2c_blockreg_read(struct i2c_client *client, u16 addr, + u8 *buf, int count) +{ + struct i2c_msg xfer[DA9063_PAGED_READ_MSG_CNT]; + u8 page_sel_buf[DA9063_PAGE_SEL_BUF_SIZE]; + u8 page_num, paged_addr; + int ret; + + /* Determine page info based on register address */ + page_num = (addr / DA9063_REG_PAGE_SIZE); + if (page_num > 1) { + dev_err(&client->dev, "Invalid register address provided\n"); + return -EINVAL; + } + + paged_addr = (addr % DA9063_REG_PAGE_SIZE) & DA9063_REG_PAGED_ADDR_MASK; + page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_REG] = DA9063_REG_PAGE_CON; + page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_VAL] = + (page_num << DA9063_I2C_PAGE_SEL_SHIFT) & DA9063_REG_PAGE_MASK; + + /* Write reg address, page selection */ + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].addr = client->addr; + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].flags = 0; + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].len = DA9063_PAGE_SEL_BUF_SIZE; + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].buf = page_sel_buf; + + /* Select register address */ + xfer[DA9063_PAGED_READ_MSG_REG_SEL].addr = client->addr; + xfer[DA9063_PAGED_READ_MSG_REG_SEL].flags = 0; + xfer[DA9063_PAGED_READ_MSG_REG_SEL].len = sizeof(paged_addr); + xfer[DA9063_PAGED_READ_MSG_REG_SEL].buf = &paged_addr; + + /* Read data */ + xfer[DA9063_PAGED_READ_MSG_DATA].addr = client->addr; + xfer[DA9063_PAGED_READ_MSG_DATA].flags = I2C_M_RD; + xfer[DA9063_PAGED_READ_MSG_DATA].len = count; + xfer[DA9063_PAGED_READ_MSG_DATA].buf = buf; + + ret = i2c_transfer(client->adapter, xfer, DA9063_PAGED_READ_MSG_CNT); + if (ret < 0) { + dev_err(&client->dev, "Paged block read failed: %d\n", ret); + return ret; + } + + if (ret != DA9063_PAGED_READ_MSG_CNT) { + dev_err(&client->dev, "Paged block read failed to complete\n"); + return -EIO; + } + + return 0; +} + +enum { + DA9063_DEV_ID_REG = 0, + DA9063_VAR_ID_REG, + DA9063_CHIP_ID_REGS, +}; + +static int da9063_get_device_type(struct i2c_client *i2c, struct da9063 *da9063) +{ + u8 buf[DA9063_CHIP_ID_REGS]; + int ret; + + ret = da9063_i2c_blockreg_read(i2c, DA9063_REG_DEVICE_ID, buf, + DA9063_CHIP_ID_REGS); + if (ret) + return ret; + + if (buf[DA9063_DEV_ID_REG] != PMIC_CHIP_ID_DA9063) { + dev_err(da9063->dev, + "Invalid chip device ID: 0x%02x\n", + buf[DA9063_DEV_ID_REG]); + return -ENODEV; + } + + dev_info(da9063->dev, + "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n", + buf[DA9063_DEV_ID_REG], buf[DA9063_VAR_ID_REG]); + + da9063->variant_code = + (buf[DA9063_VAR_ID_REG] & DA9063_VARIANT_ID_MRC_MASK) + >> DA9063_VARIANT_ID_MRC_SHIFT; + + return 0; +} + +/* + * Variant specific regmap configs + */ + static const struct regmap_range da9063_ad_readable_ranges[] = { regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_AD_REG_SECOND_D), regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_AD_REG_GP_ID_19), - regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), }; static const struct regmap_range da9063_ad_writeable_ranges[] = { @@ -72,7 +184,7 @@ static const struct regmap_range da9063_bb_readable_ranges[] = { regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19), - regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), }; static const struct regmap_range da9063_bb_writeable_ranges[] = { @@ -117,7 +229,7 @@ static const struct regmap_range da9063l_bb_readable_ranges[] = { regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19), - regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), }; static const struct regmap_range da9063l_bb_writeable_ranges[] = { @@ -159,7 +271,7 @@ static const struct regmap_access_table da9063l_bb_volatile_table = { static const struct regmap_range_cfg da9063_range_cfg[] = { { .range_min = DA9063_REG_PAGE_CON, - .range_max = DA9063_REG_CHIP_VARIANT, + .range_max = DA9063_REG_CONFIG_ID, .selector_reg = DA9063_REG_PAGE_CON, .selector_mask = 1 << DA9063_I2C_PAGE_SEL_SHIFT, .selector_shift = DA9063_I2C_PAGE_SEL_SHIFT, @@ -173,7 +285,7 @@ static struct regmap_config da9063_regmap_config = { .val_bits = 8, .ranges = da9063_range_cfg, .num_ranges = ARRAY_SIZE(da9063_range_cfg), - .max_register = DA9063_REG_CHIP_VARIANT, + .max_register = DA9063_REG_CONFIG_ID, .cache_type = REGCACHE_RBTREE, }; @@ -199,18 +311,56 @@ static int da9063_i2c_probe(struct i2c_client *i2c, da9063->chip_irq = i2c->irq; da9063->type = id->driver_data; - if (da9063->variant_code == PMIC_DA9063_AD) { - da9063_regmap_config.rd_table = &da9063_ad_readable_table; - da9063_regmap_config.wr_table = &da9063_ad_writeable_table; - da9063_regmap_config.volatile_table = &da9063_ad_volatile_table; - } else if (da9063->type == PMIC_TYPE_DA9063L) { - da9063_regmap_config.rd_table = &da9063l_bb_readable_table; - da9063_regmap_config.wr_table = &da9063l_bb_writeable_table; - da9063_regmap_config.volatile_table = &da9063l_bb_volatile_table; - } else { - da9063_regmap_config.rd_table = &da9063_bb_readable_table; - da9063_regmap_config.wr_table = &da9063_bb_writeable_table; - da9063_regmap_config.volatile_table = &da9063_bb_volatile_table; + ret = da9063_get_device_type(i2c, da9063); + if (ret) + return ret; + + switch (da9063->type) { + case PMIC_TYPE_DA9063: + switch (da9063->variant_code) { + case PMIC_DA9063_AD: + da9063_regmap_config.rd_table = + &da9063_ad_readable_table; + da9063_regmap_config.wr_table = + &da9063_ad_writeable_table; + da9063_regmap_config.volatile_table = + &da9063_ad_volatile_table; + break; + case PMIC_DA9063_BB: + case PMIC_DA9063_CA: + da9063_regmap_config.rd_table = + &da9063_bb_readable_table; + da9063_regmap_config.wr_table = + &da9063_bb_writeable_table; + da9063_regmap_config.volatile_table = + &da9063_bb_volatile_table; + break; + default: + dev_err(da9063->dev, + "Chip variant not supported for DA9063\n"); + return -ENODEV; + } + break; + case PMIC_TYPE_DA9063L: + switch (da9063->variant_code) { + case PMIC_DA9063_BB: + case PMIC_DA9063_CA: + da9063_regmap_config.rd_table = + &da9063l_bb_readable_table; + da9063_regmap_config.wr_table = + &da9063l_bb_writeable_table; + da9063_regmap_config.volatile_table = + &da9063l_bb_volatile_table; + break; + default: + dev_err(da9063->dev, + "Chip variant not supported for DA9063L\n"); + return -ENODEV; + } + break; + default: + dev_err(da9063->dev, "Chip type not supported\n"); + return -ENODEV; } da9063->regmap = devm_regmap_init_i2c(i2c, &da9063_regmap_config); diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h index ba706b0e28c2..1dbabf1b3cb8 100644 --- a/include/linux/mfd/da9063/registers.h +++ b/include/linux/mfd/da9063/registers.h @@ -292,8 +292,10 @@ #define DA9063_BB_REG_GP_ID_19 0x134 /* Chip ID and variant */ -#define DA9063_REG_CHIP_ID 0x181 -#define DA9063_REG_CHIP_VARIANT 0x182 +#define DA9063_REG_DEVICE_ID 0x181 +#define DA9063_REG_VARIANT_ID 0x182 +#define DA9063_REG_CUSTOMER_ID 0x183 +#define DA9063_REG_CONFIG_ID 0x184 /* * PMIC registers bits @@ -929,9 +931,6 @@ #define DA9063_RTC_CLOCK 0x40 #define DA9063_OUT_32K_EN 0x80 -/* DA9063_REG_CHIP_VARIANT */ -#define DA9063_CHIP_VARIANT_SHIFT 4 - /* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */ #define DA9063_BIO_ILIM_MASK 0x0F #define DA9063_BMEM_ILIM_MASK 0xF0 @@ -1065,4 +1064,10 @@ #define DA9063_MON_A10_IDX_LDO9 0x04 #define DA9063_MON_A10_IDX_LDO10 0x05 +/* DA9063_REG_VARIANT_ID (addr=0x182) */ +#define DA9063_VARIANT_ID_VRC_SHIFT 0 +#define DA9063_VARIANT_ID_VRC_MASK 0x0F +#define DA9063_VARIANT_ID_MRC_SHIFT 4 +#define DA9063_VARIANT_ID_MRC_MASK 0xF0 + #endif /* _DA9063_REG_H */ -- cgit v1.2.3 From 9ece3601aed46f7b460b79cd7d60908b47b2b0d4 Mon Sep 17 00:00:00 2001 From: Adam Thomson Date: Mon, 13 Jul 2020 10:38:59 +0100 Subject: mfd: da9063: Add support for latest DA silicon revision This update adds new regmap tables to support the latest DA silicon which will automatically be selected based on the chip and variant information read from the device. Signed-off-by: Adam Thomson Signed-off-by: Lee Jones --- drivers/mfd/da9063-i2c.c | 91 ++++++++++++++++++++++++++++++++++++----- include/linux/mfd/da9063/core.h | 1 + 2 files changed, 82 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c index 481548948bc0..b8217ad303ce 100644 --- a/drivers/mfd/da9063-i2c.c +++ b/drivers/mfd/da9063-i2c.c @@ -197,7 +197,7 @@ static const struct regmap_range da9063_bb_writeable_ranges[] = { regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19), }; -static const struct regmap_range da9063_bb_volatile_ranges[] = { +static const struct regmap_range da9063_bb_da_volatile_ranges[] = { regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D), regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B), regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F), @@ -219,9 +219,9 @@ static const struct regmap_access_table da9063_bb_writeable_table = { .n_yes_ranges = ARRAY_SIZE(da9063_bb_writeable_ranges), }; -static const struct regmap_access_table da9063_bb_volatile_table = { - .yes_ranges = da9063_bb_volatile_ranges, - .n_yes_ranges = ARRAY_SIZE(da9063_bb_volatile_ranges), +static const struct regmap_access_table da9063_bb_da_volatile_table = { + .yes_ranges = da9063_bb_da_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_bb_da_volatile_ranges), }; static const struct regmap_range da9063l_bb_readable_ranges[] = { @@ -241,7 +241,7 @@ static const struct regmap_range da9063l_bb_writeable_ranges[] = { regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19), }; -static const struct regmap_range da9063l_bb_volatile_ranges[] = { +static const struct regmap_range da9063l_bb_da_volatile_ranges[] = { regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D), regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B), regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F), @@ -263,9 +263,64 @@ static const struct regmap_access_table da9063l_bb_writeable_table = { .n_yes_ranges = ARRAY_SIZE(da9063l_bb_writeable_ranges), }; -static const struct regmap_access_table da9063l_bb_volatile_table = { - .yes_ranges = da9063l_bb_volatile_ranges, - .n_yes_ranges = ARRAY_SIZE(da9063l_bb_volatile_ranges), +static const struct regmap_access_table da9063l_bb_da_volatile_table = { + .yes_ranges = da9063l_bb_da_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063l_bb_da_volatile_ranges), +}; + +static const struct regmap_range da9063_da_readable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_BB_REG_SECOND_D), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), +}; + +static const struct regmap_range da9063_da_writeable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON), + regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON), + regmap_reg_range(DA9063_REG_COUNT_S, DA9063_BB_REG_ALARM_Y), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4), + regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11), +}; + +static const struct regmap_access_table da9063_da_readable_table = { + .yes_ranges = da9063_da_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_da_readable_ranges), +}; + +static const struct regmap_access_table da9063_da_writeable_table = { + .yes_ranges = da9063_da_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_da_writeable_ranges), +}; + +static const struct regmap_range da9063l_da_readable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_MON_A10_RES), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), +}; + +static const struct regmap_range da9063l_da_writeable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON), + regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4), + regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11), +}; + +static const struct regmap_access_table da9063l_da_readable_table = { + .yes_ranges = da9063l_da_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063l_da_readable_ranges), +}; + +static const struct regmap_access_table da9063l_da_writeable_table = { + .yes_ranges = da9063l_da_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063l_da_writeable_ranges), }; static const struct regmap_range_cfg da9063_range_cfg[] = { @@ -333,7 +388,15 @@ static int da9063_i2c_probe(struct i2c_client *i2c, da9063_regmap_config.wr_table = &da9063_bb_writeable_table; da9063_regmap_config.volatile_table = - &da9063_bb_volatile_table; + &da9063_bb_da_volatile_table; + break; + case PMIC_DA9063_DA: + da9063_regmap_config.rd_table = + &da9063_da_readable_table; + da9063_regmap_config.wr_table = + &da9063_da_writeable_table; + da9063_regmap_config.volatile_table = + &da9063_bb_da_volatile_table; break; default: dev_err(da9063->dev, @@ -350,7 +413,15 @@ static int da9063_i2c_probe(struct i2c_client *i2c, da9063_regmap_config.wr_table = &da9063l_bb_writeable_table; da9063_regmap_config.volatile_table = - &da9063l_bb_volatile_table; + &da9063l_bb_da_volatile_table; + break; + case PMIC_DA9063_DA: + da9063_regmap_config.rd_table = + &da9063l_da_readable_table; + da9063_regmap_config.wr_table = + &da9063l_da_writeable_table; + da9063_regmap_config.volatile_table = + &da9063l_bb_da_volatile_table; break; default: dev_err(da9063->dev, diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h index 5cd06ab26352..fa7a43f02f27 100644 --- a/include/linux/mfd/da9063/core.h +++ b/include/linux/mfd/da9063/core.h @@ -35,6 +35,7 @@ enum da9063_variant_codes { PMIC_DA9063_AD = 0x3, PMIC_DA9063_BB = 0x5, PMIC_DA9063_CA = 0x6, + PMIC_DA9063_DA = 0x7, }; /* Interrupts */ -- cgit v1.2.3 From 23ef2b642b85f03a109fbf5958134a5e40193dbd Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:29:17 -0700 Subject: mfd: da9055: pdata.h: Drop a duplicated word Drop the repeated word "that" in a comment. Signed-off-by: Randy Dunlap Acked-by: Adam Thomson Signed-off-by: Lee Jones --- include/linux/mfd/da9055/pdata.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h index eac48e483190..d3f126990ad0 100644 --- a/include/linux/mfd/da9055/pdata.h +++ b/include/linux/mfd/da9055/pdata.h @@ -35,7 +35,7 @@ struct da9055_pdata { int *gpio_rsel; /* * Regulator mode control bits value (GPI offset) that - * that controls the regulator state, 0 if not available. + * controls the regulator state, 0 if not available. */ enum gpio_select *reg_ren; /* -- cgit v1.2.3 From e7b85500885f2a70129f5d3a72153e23b37d0fe5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:29:31 -0700 Subject: mfd: max77693-private: Drop a duplicated word Drop the repeated word "in" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Lee Jones --- include/linux/mfd/max77693-private.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index e798c81aec31..311f7d3d2323 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -131,7 +131,7 @@ enum max77693_pmic_reg { #define FLASH_INT_FLED1_SHORT BIT(3) #define FLASH_INT_OVER_CURRENT BIT(4) -/* Fast charge timer in in hours */ +/* Fast charge timer in hours */ #define DEFAULT_FAST_CHARGE_TIMER 4 /* microamps */ #define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000 -- cgit v1.2.3 From 114294d276279d6cda81f9c685452239ea89cdb8 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 23 Jul 2020 11:54:58 +0100 Subject: mfd: mfd-core: Add mechanism for removal of a subset of children Currently, the only way to remove MFD children is with a call to mfd_remove_devices, which will remove all the children. Under some circumstances it is useful to remove only a subset of the child devices. For example if some additional clean up is required between removal of certain child devices. To accomplish this a level field is added to mfd_cell, the normal mfd_remove_devices is modified to not remove devices that are set to a higher level and a corresponding mfd_remove_devices_late function is added to remove those children. See further discussion at: https://lore.kernel.org/lkml/20200616075834.GF2608702@dell/ Suggested-by: Lee Jones Signed-off-by: Charles Keepax Signed-off-by: Lee Jones --- drivers/mfd/mfd-core.c | 16 +++++++++++++++- include/linux/mfd/core.h | 5 +++++ 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index b201842f82ad..c3651f06684f 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -356,6 +356,7 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) { struct platform_device *pdev; const struct mfd_cell *cell; + int *level = data; if (dev->type != &mfd_dev_type) return 0; @@ -363,6 +364,9 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) pdev = to_platform_device(dev); cell = mfd_get_cell(pdev); + if (level && cell->level > *level) + return 0; + regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies, cell->num_parent_supplies); @@ -372,9 +376,19 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) return 0; } +void mfd_remove_devices_late(struct device *parent) +{ + int level = MFD_DEP_LEVEL_HIGH; + + device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn); +} +EXPORT_SYMBOL(mfd_remove_devices_late); + void mfd_remove_devices(struct device *parent) { - device_for_each_child_reverse(parent, NULL, mfd_remove_devices_fn); + int level = MFD_DEP_LEVEL_NORMAL; + + device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn); } EXPORT_SYMBOL(mfd_remove_devices); diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 6d68f44a26a1..4b35baa14d30 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -46,6 +46,9 @@ #define MFD_CELL_NAME(_name) \ MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL) +#define MFD_DEP_LEVEL_NORMAL 0 +#define MFD_DEP_LEVEL_HIGH 1 + struct irq_domain; struct property_entry; @@ -63,6 +66,7 @@ struct mfd_cell_acpi_match { struct mfd_cell { const char *name; int id; + int level; int (*enable)(struct platform_device *dev); int (*disable)(struct platform_device *dev); @@ -150,6 +154,7 @@ static inline int mfd_add_hotplug_devices(struct device *parent, } extern void mfd_remove_devices(struct device *parent); +extern void mfd_remove_devices_late(struct device *parent); extern int devm_mfd_add_devices(struct device *dev, int id, const struct mfd_cell *cells, int n_devs, -- cgit v1.2.3 From 4f4ed4543e2096dc0158ff79bd6d8bc09e27fa93 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Wed, 22 Jul 2020 21:24:54 +0200 Subject: mfd: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Acked-by: Rob Herring Signed-off-by: Lee Jones --- Documentation/devicetree/bindings/mfd/twl-family.txt | 2 +- drivers/mfd/hi6421-pmic-core.c | 2 +- drivers/mfd/lp873x.c | 2 +- drivers/mfd/lp87565.c | 2 +- drivers/mfd/omap-usb-host.c | 2 +- drivers/mfd/omap-usb-tll.c | 2 +- drivers/mfd/ti_am335x_tscadc.c | 2 +- drivers/mfd/tps65086.c | 2 +- drivers/mfd/tps65217.c | 2 +- drivers/mfd/tps65218.c | 2 +- drivers/mfd/tps65912-core.c | 2 +- drivers/mfd/tps65912-i2c.c | 2 +- drivers/mfd/tps65912-spi.c | 2 +- include/linux/mfd/hi6421-pmic.h | 2 +- include/linux/mfd/lp873x.h | 2 +- include/linux/mfd/lp87565.h | 2 +- include/linux/mfd/ti_am335x_tscadc.h | 2 +- include/linux/mfd/tps65086.h | 2 +- include/linux/mfd/tps65217.h | 2 +- include/linux/mfd/tps65218.h | 2 +- include/linux/mfd/tps65912.h | 2 +- 21 files changed, 21 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/Documentation/devicetree/bindings/mfd/twl-family.txt b/Documentation/devicetree/bindings/mfd/twl-family.txt index 56f244b5d8a4..c2f9302965de 100644 --- a/Documentation/devicetree/bindings/mfd/twl-family.txt +++ b/Documentation/devicetree/bindings/mfd/twl-family.txt @@ -26,7 +26,7 @@ Optional node: Example: /* * Integrated Power Management Chip - * http://www.ti.com/lit/ds/symlink/twl6030.pdf + * https://www.ti.com/lit/ds/symlink/twl6030.pdf */ twl@48 { compatible = "ti,twl6030"; diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c index edfc172b8607..eba88b80d969 100644 --- a/drivers/mfd/hi6421-pmic-core.c +++ b/drivers/mfd/hi6421-pmic-core.c @@ -5,7 +5,7 @@ * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com * Copyright (c) <2013-2017> Linaro Ltd. - * http://www.linaro.org + * https://www.linaro.org * * Author: Guodong Xu */ diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c index 873c608e6a5d..858c9e0a49a4 100644 --- a/drivers/mfd/lp873x.c +++ b/drivers/mfd/lp873x.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ * * Author: Keerthy * diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c index 4a5c8ade4ae0..2268be9113f1 100644 --- a/drivers/mfd/lp87565.c +++ b/drivers/mfd/lp87565.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/ * * Author: Keerthy */ diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index aca5a160c1b2..1e6431cb8536 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -2,7 +2,7 @@ /** * omap-usb-host.c - The USBHS core driver for OMAP EHCI & OHCI * - * Copyright (C) 2011-2013 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2011-2013 Texas Instruments Incorporated - https://www.ti.com * Author: Keshava Munegowda * Author: Roger Quadros */ diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index 04a444007cf4..16fad79c73f1 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -2,7 +2,7 @@ /** * omap-usb-tll.c - The USB TLL driver for OMAP EHCI & OHCI * - * Copyright (C) 2012-2013 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012-2013 Texas Instruments Incorporated - https://www.ti.com * Author: Keshava Munegowda * Author: Roger Quadros */ diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index 926c289cb040..0e6e25308190 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -1,7 +1,7 @@ /* * TI Touch Screen / ADC MFD driver * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c index 43119a6867fe..341466ef20cc 100644 --- a/drivers/mfd/tps65086.c +++ b/drivers/mfd/tps65086.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c index 923602599549..2d9c282ec917 100644 --- a/drivers/mfd/tps65217.c +++ b/drivers/mfd/tps65217.c @@ -3,7 +3,7 @@ * * TPS65217 chip family multi-function driver * - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index d41dd864b472..167e9fc308ef 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -1,7 +1,7 @@ /* * Driver for TPS65218 Integrated power management chipsets * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 as diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c index f33567bc428d..b55b1d5d6955 100644 --- a/drivers/mfd/tps65912-core.c +++ b/drivers/mfd/tps65912-core.c @@ -1,7 +1,7 @@ /* * Core functions for TI TPS65912x PMICs * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c index 785d19f6f7c9..f7c22ea7b36c 100644 --- a/drivers/mfd/tps65912-i2c.c +++ b/drivers/mfd/tps65912-i2c.c @@ -1,7 +1,7 @@ /* * I2C access driver for TI TPS65912x PMICs * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c index f78be039e463..21a8d6ac5c4a 100644 --- a/drivers/mfd/tps65912-spi.c +++ b/drivers/mfd/tps65912-spi.c @@ -1,7 +1,7 @@ /* * SPI access driver for TI TPS65912x PMICs * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h index bbc64484c021..2cadf8897c64 100644 --- a/include/linux/mfd/hi6421-pmic.h +++ b/include/linux/mfd/hi6421-pmic.h @@ -5,7 +5,7 @@ * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com * Copyright (c) <2013-2014> Linaro Ltd. - * http://www.linaro.org + * https://www.linaro.org * * Author: Guodong Xu */ diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h index edbec8350a49..5546688c7da7 100644 --- a/include/linux/mfd/lp873x.h +++ b/include/linux/mfd/lp873x.h @@ -1,7 +1,7 @@ /* * Functions to access LP873X power management chip. * - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h index ce965354bbad..43716aca46fa 100644 --- a/include/linux/mfd/lp87565.h +++ b/include/linux/mfd/lp87565.h @@ -2,7 +2,7 @@ /* * Functions to access LP87565 power management chip. * - * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __LINUX_MFD_LP87565_H diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 483168403ae5..ffc091b77633 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -4,7 +4,7 @@ /* * TI Touch Screen / ADC MFD driver * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h index a228ae4c88d9..e0a417e53766 100644 --- a/include/linux/mfd/tps65086.h +++ b/include/linux/mfd/tps65086.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index b5dd108421c8..db7091824ed0 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -3,7 +3,7 @@ * * Functions to access TPS65217 power management chip. * - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index b0470c35162d..f4ca367e3473 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -3,7 +3,7 @@ * * Functions to access TPS65219 power management chip. * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index b25d0297ba88..7943e413deae 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or -- cgit v1.2.3 From 6f92337b6bffb3d9e509024d6ef5c3f2b112757d Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Thu, 13 Aug 2020 09:21:12 +0300 Subject: xen: Sync up with the canonical protocol definition in Xen This is the sync up with the canonical definition of the display protocol in Xen. 1. Add protocol version as an integer Version string, which is in fact an integer, is hard to handle in the code that supports different protocol versions. To simplify that also add the version as an integer. 2. Pass buffer offset with XENDISPL_OP_DBUF_CREATE There are cases when display data buffer is created with non-zero offset to the data start. Handle such cases and provide that offset while creating a display buffer. 3. Add XENDISPL_OP_GET_EDID command Add an optional request for reading Extended Display Identification Data (EDID) structure which allows better configuration of the display connectors over the configuration set in XenStore. With this change connectors may have multiple resolutions defined with respect to detailed timing definitions and additional properties normally provided by displays. If this request is not supported by the backend then visible area is defined by the relevant XenStore's "resolution" property. If backend provides extended display identification data (EDID) with XENDISPL_OP_GET_EDID request then EDID values must take precedence over the resolutions defined in XenStore. 4. Bump protocol version to 2. Signed-off-by: Oleksandr Andrushchenko Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/20200813062113.11030-5-andr2000@gmail.com Signed-off-by: Juergen Gross --- include/xen/interface/io/displif.h | 91 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 88 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h index fdc279dc4a88..d43ca0361f86 100644 --- a/include/xen/interface/io/displif.h +++ b/include/xen/interface/io/displif.h @@ -38,7 +38,8 @@ * Protocol version ****************************************************************************** */ -#define XENDISPL_PROTOCOL_VERSION "1" +#define XENDISPL_PROTOCOL_VERSION "2" +#define XENDISPL_PROTOCOL_VERSION_INT 2 /* ****************************************************************************** @@ -202,6 +203,9 @@ * Width and height of the connector in pixels separated by * XENDISPL_RESOLUTION_SEPARATOR. This defines visible area of the * display. + * If backend provides extended display identification data (EDID) with + * XENDISPL_OP_GET_EDID request then EDID values must take precedence + * over the resolutions defined here. * *------------------ Connector Request Transport Parameters ------------------- * @@ -349,6 +353,8 @@ #define XENDISPL_OP_FB_DETACH 0x13 #define XENDISPL_OP_SET_CONFIG 0x14 #define XENDISPL_OP_PG_FLIP 0x15 +/* The below command is available in protocol version 2 and above. */ +#define XENDISPL_OP_GET_EDID 0x16 /* ****************************************************************************** @@ -377,6 +383,10 @@ #define XENDISPL_FIELD_BE_ALLOC "be-alloc" #define XENDISPL_FIELD_UNIQUE_ID "unique-id" +#define XENDISPL_EDID_BLOCK_SIZE 128 +#define XENDISPL_EDID_BLOCK_COUNT 256 +#define XENDISPL_EDID_MAX_SIZE (XENDISPL_EDID_BLOCK_SIZE * XENDISPL_EDID_BLOCK_COUNT) + /* ****************************************************************************** * STATUS RETURN CODES @@ -451,7 +461,9 @@ * +----------------+----------------+----------------+----------------+ * | gref_directory | 40 * +----------------+----------------+----------------+----------------+ - * | reserved | 44 + * | data_ofs | 44 + * +----------------+----------------+----------------+----------------+ + * | reserved | 48 * +----------------+----------------+----------------+----------------+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| * +----------------+----------------+----------------+----------------+ @@ -494,6 +506,7 @@ * buffer size (buffer_sz) exceeds what can be addressed by this single page, * then reference to the next page must be supplied (see gref_dir_next_page * below) + * data_ofs - uint32_t, offset of the data in the buffer, octets */ #define XENDISPL_DBUF_FLG_REQ_ALLOC (1 << 0) @@ -506,6 +519,7 @@ struct xendispl_dbuf_create_req { uint32_t buffer_sz; uint32_t flags; grant_ref_t gref_directory; + uint32_t data_ofs; }; /* @@ -731,6 +745,44 @@ struct xendispl_page_flip_req { uint64_t fb_cookie; }; +/* + * Request EDID - request EDID describing current connector: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | _OP_GET_EDID | reserved | 4 + * +----------------+----------------+----------------+----------------+ + * | buffer_sz | 8 + * +----------------+----------------+----------------+----------------+ + * | gref_directory | 12 + * +----------------+----------------+----------------+----------------+ + * | reserved | 16 + * +----------------+----------------+----------------+----------------+ + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| + * +----------------+----------------+----------------+----------------+ + * | reserved | 64 + * +----------------+----------------+----------------+----------------+ + * + * Notes: + * - This command is not available in protocol version 1 and should be + * ignored. + * - This request is optional and if not supported then visible area + * is defined by the relevant XenStore's "resolution" property. + * - Shared buffer, allocated for EDID storage, must not be less then + * XENDISPL_EDID_MAX_SIZE octets. + * + * buffer_sz - uint32_t, buffer size to be allocated, octets + * gref_directory - grant_ref_t, a reference to the first shared page + * describing EDID buffer references. See XENDISPL_OP_DBUF_CREATE for + * grant page directory structure (struct xendispl_page_directory). + * + * See response format for this request. + */ + +struct xendispl_get_edid_req { + uint32_t buffer_sz; + grant_ref_t gref_directory; +}; + /* *---------------------------------- Responses -------------------------------- * @@ -753,6 +805,35 @@ struct xendispl_page_flip_req { * id - uint16_t, private guest value, echoed from request * status - int32_t, response status, zero on success and -XEN_EXX on failure * + * + * Get EDID response - response for XENDISPL_OP_GET_EDID: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | operation | reserved | 4 + * +----------------+----------------+----------------+----------------+ + * | status | 8 + * +----------------+----------------+----------------+----------------+ + * | edid_sz | 12 + * +----------------+----------------+----------------+----------------+ + * | reserved | 16 + * +----------------+----------------+----------------+----------------+ + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| + * +----------------+----------------+----------------+----------------+ + * | reserved | 64 + * +----------------+----------------+----------------+----------------+ + * + * Notes: + * - This response is not available in protocol version 1 and should be + * ignored. + * + * edid_sz - uint32_t, size of the EDID, octets + */ + +struct xendispl_get_edid_resp { + uint32_t edid_sz; +}; + +/* *----------------------------------- Events ---------------------------------- * * Events are sent via a shared page allocated by the front and propagated by @@ -804,6 +885,7 @@ struct xendispl_req { struct xendispl_fb_detach_req fb_detach; struct xendispl_set_config_req set_config; struct xendispl_page_flip_req pg_flip; + struct xendispl_get_edid_req get_edid; uint8_t reserved[56]; } op; }; @@ -813,7 +895,10 @@ struct xendispl_resp { uint8_t operation; uint8_t reserved; int32_t status; - uint8_t reserved1[56]; + union { + struct xendispl_get_edid_resp get_edid; + uint8_t reserved1[56]; + } op; }; struct xendispl_evt { -- cgit v1.2.3 From 94c7eb54c4b8e81618ec79f414fe1ca5767f9720 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 13 Aug 2020 10:06:43 -0700 Subject: random32: add a tracepoint for prandom_u32() There has been some heat around prandom_u32() lately, and some people were wondering if there was a simple way to determine how often it was used, before considering making it maybe 10 times more expensive. This tracepoint exports the generated pseudo random value. Tested: perf list | grep prandom_u32 random:prandom_u32 [Tracepoint event] perf record -a [-g] [-C1] -e random:prandom_u32 sleep 1 [ perf record: Woken up 0 times to write data ] [ perf record: Captured and wrote 259.748 MB perf.data (924087 samples) ] perf report --nochildren ... 97.67% ksoftirqd/1 [kernel.vmlinux] [k] prandom_u32 | ---prandom_u32 prandom_u32 | |--48.86%--tcp_v4_syn_recv_sock | tcp_check_req | tcp_v4_rcv | ... --48.81%--tcp_conn_request tcp_v4_conn_request tcp_rcv_state_process ... perf script Signed-off-by: Eric Dumazet Cc: Willy Tarreau Cc: Sedat Dilek Tested-by: Sedat Dilek Signed-off-by: David S. Miller --- include/trace/events/random.h | 17 +++++++++++++++++ lib/random32.c | 2 ++ 2 files changed, 19 insertions(+) (limited to 'include') diff --git a/include/trace/events/random.h b/include/trace/events/random.h index 32c10a515e2d..9570a10cb949 100644 --- a/include/trace/events/random.h +++ b/include/trace/events/random.h @@ -307,6 +307,23 @@ TRACE_EVENT(urandom_read, __entry->pool_left, __entry->input_left) ); +TRACE_EVENT(prandom_u32, + + TP_PROTO(unsigned int ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field( unsigned int, ret) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("ret=%u" , __entry->ret) +); + #endif /* _TRACE_RANDOM_H */ /* This part must be outside protection */ diff --git a/lib/random32.c b/lib/random32.c index 3d749abb9e80..932345323af0 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -39,6 +39,7 @@ #include #include #include +#include #ifdef CONFIG_RANDOM32_SELFTEST static void __init prandom_state_selftest(void); @@ -82,6 +83,7 @@ u32 prandom_u32(void) u32 res; res = prandom_u32_state(state); + trace_prandom_u32(res); put_cpu_var(net_rand_state); return res; -- cgit v1.2.3 From 9420139f516d7fbc248ce17f35275cb005ed98ea Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 14 Aug 2020 12:26:24 +0200 Subject: dma-pool: fix coherent pool allocations for IOMMU mappings When allocating coherent pool memory for an IOMMU mapping we don't care about the DMA mask. Move the guess for the initial GFP mask into the dma_direct_alloc_pages and pass dma_coherent_ok as a function pointer argument so that it doesn't get applied to the IOMMU case. Signed-off-by: Christoph Hellwig Tested-by: Amit Pundir --- drivers/iommu/dma-iommu.c | 4 +- include/linux/dma-direct.h | 3 -- include/linux/dma-mapping.h | 5 +- kernel/dma/direct.c | 13 +++-- kernel/dma/pool.c | 114 +++++++++++++++++++------------------------- 5 files changed, 62 insertions(+), 77 deletions(-) (limited to 'include') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 4959f5df21bd..5141d49a046b 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1035,8 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !gfpflags_allow_blocking(gfp) && !coherent) - cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, - gfp); + page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, + gfp, NULL); else cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); if (!cpu_addr) diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 5a3ce2a24794..6e87225600ae 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -73,9 +73,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, } u64 dma_direct_get_required_mask(struct device *dev); -gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, - u64 *phys_mask); -bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 016b96b384bd..52635e91143b 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -522,8 +522,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, const void *caller); void dma_common_free_remap(void *cpu_addr, size_t size); -void *dma_alloc_from_pool(struct device *dev, size_t size, - struct page **ret_page, gfp_t flags); +struct page *dma_alloc_from_pool(struct device *dev, size_t size, + void **cpu_addr, gfp_t flags, + bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); bool dma_free_from_pool(struct device *dev, void *start, size_t size); int diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index bb0041e99659..db6ef07aec3b 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -43,7 +43,7 @@ u64 dma_direct_get_required_mask(struct device *dev) return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } -gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, +static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, u64 *phys_limit) { u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); @@ -68,7 +68,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, return 0; } -bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) +static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { return phys_to_dma_direct(dev, phys) + size - 1 <= min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); @@ -161,8 +161,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, size = PAGE_ALIGN(size); if (dma_should_alloc_from_pool(dev, gfp, attrs)) { - ret = dma_alloc_from_pool(dev, size, &page, gfp); - if (!ret) + u64 phys_mask; + + gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, + &phys_mask); + page = dma_alloc_from_pool(dev, size, &ret, gfp, + dma_coherent_ok); + if (!page) return NULL; goto done; } diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index 6bc74a2d5127..5d071d4a3cba 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -196,93 +196,75 @@ static int __init dma_atomic_pool_init(void) } postcore_initcall(dma_atomic_pool_init); -static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev) +static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) { - u64 phys_mask; - gfp_t gfp; - - gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, - &phys_mask); - if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA) + if (prev == NULL) { + if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) + return atomic_pool_dma32; + if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) + return atomic_pool_dma; + return atomic_pool_kernel; + } + if (prev == atomic_pool_kernel) + return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma; + if (prev == atomic_pool_dma32) return atomic_pool_dma; - if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32) - return atomic_pool_dma32; - return atomic_pool_kernel; + return NULL; } -static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool) +static struct page *__dma_alloc_from_pool(struct device *dev, size_t size, + struct gen_pool *pool, void **cpu_addr, + bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)) { - if (bad_pool == atomic_pool_kernel) - return atomic_pool_dma32 ? : atomic_pool_dma; + unsigned long addr; + phys_addr_t phys; - if (bad_pool == atomic_pool_dma32) - return atomic_pool_dma; + addr = gen_pool_alloc(pool, size); + if (!addr) + return NULL; - return NULL; -} + phys = gen_pool_virt_to_phys(pool, addr); + if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) { + gen_pool_free(pool, addr, size); + return NULL; + } -static inline struct gen_pool *dma_guess_pool(struct device *dev, - struct gen_pool *bad_pool) -{ - if (bad_pool) - return dma_get_safer_pool(bad_pool); + if (gen_pool_avail(pool) < atomic_pool_size) + schedule_work(&atomic_pool_work); - return dma_guess_pool_from_device(dev); + *cpu_addr = (void *)addr; + memset(*cpu_addr, 0, size); + return pfn_to_page(__phys_to_pfn(phys)); } -void *dma_alloc_from_pool(struct device *dev, size_t size, - struct page **ret_page, gfp_t flags) +struct page *dma_alloc_from_pool(struct device *dev, size_t size, + void **cpu_addr, gfp_t gfp, + bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)) { struct gen_pool *pool = NULL; - unsigned long val = 0; - void *ptr = NULL; - phys_addr_t phys; - - while (1) { - pool = dma_guess_pool(dev, pool); - if (!pool) { - WARN(1, "Failed to get suitable pool for %s\n", - dev_name(dev)); - break; - } - - val = gen_pool_alloc(pool, size); - if (!val) - continue; - - phys = gen_pool_virt_to_phys(pool, val); - if (dma_coherent_ok(dev, phys, size)) - break; - - gen_pool_free(pool, val, size); - val = 0; - } - - - if (val) { - *ret_page = pfn_to_page(__phys_to_pfn(phys)); - ptr = (void *)val; - memset(ptr, 0, size); + struct page *page; - if (gen_pool_avail(pool) < atomic_pool_size) - schedule_work(&atomic_pool_work); + while ((pool = dma_guess_pool(pool, gfp))) { + page = __dma_alloc_from_pool(dev, size, pool, cpu_addr, + phys_addr_ok); + if (page) + return page; } - return ptr; + WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); + return NULL; } bool dma_free_from_pool(struct device *dev, void *start, size_t size) { struct gen_pool *pool = NULL; - while (1) { - pool = dma_guess_pool(dev, pool); - if (!pool) - return false; - - if (gen_pool_has_addr(pool, (unsigned long)start, size)) { - gen_pool_free(pool, (unsigned long)start, size); - return true; - } + while ((pool = dma_guess_pool(pool, 0))) { + if (!gen_pool_has_addr(pool, (unsigned long)start, size)) + continue; + gen_pool_free(pool, (unsigned long)start, size); + return true; } + + return false; } -- cgit v1.2.3 From 5848dc5b1b76d83599dcec1b39f188a7cbdca7e2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 14 Aug 2020 15:22:43 -0700 Subject: dma-debug: remove debug_dma_assert_idle() function This remoes the code from the COW path to call debug_dma_assert_idle(), which was added many years ago. Google shows that it hasn't caught anything in the 6+ years we've had it apart from a false positive, and Hugh just noticed how it had a very unfortunate spinlock serialization in the COW path. He fixed that issue the previous commit (a85ffd59bd36: "dma-debug: fix debug_dma_assert_idle(), use rcu_read_lock()"), but let's see if anybody even notices when we remove this function entirely. NOTE! We keep the dma tracking infrastructure that was added by the commit that introduced it. Partly to make it easier to resurrect this debug code if we ever deside to, and partly because that tracking by pfn and offset looks quite reasonable. The problem with this debug code was simply that it was expensive and didn't seem worth it, not that it was wrong per se. Acked-by: Dan Williams Acked-by: Christoph Hellwig Signed-off-by: Linus Torvalds --- include/linux/dma-debug.h | 6 ------ kernel/dma/Kconfig | 5 ----- kernel/dma/debug.c | 46 +--------------------------------------------- mm/memory.c | 2 -- 4 files changed, 1 insertion(+), 58 deletions(-) (limited to 'include') diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index 4208f94d93f7..7b3b04ba60f3 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -67,8 +67,6 @@ extern void debug_dma_sync_sg_for_device(struct device *dev, extern void debug_dma_dump_mappings(struct device *dev); -extern void debug_dma_assert_idle(struct page *page); - #else /* CONFIG_DMA_API_DEBUG */ static inline void dma_debug_add_bus(struct bus_type *bus) @@ -157,10 +155,6 @@ static inline void debug_dma_dump_mappings(struct device *dev) { } -static inline void debug_dma_assert_idle(struct page *page) -{ -} - #endif /* CONFIG_DMA_API_DEBUG */ #endif /* __DMA_DEBUG_H */ diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index f4770fcfa62b..5732b2b3ef17 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -186,11 +186,6 @@ config DMA_API_DEBUG drivers like double-freeing of DMA mappings or freeing mappings that were never allocated. - This also attempts to catch cases where a page owned by DMA is - accessed by the cpu in a way that could cause data corruption. For - example, this enables cow_user_page() to check that the source page is - not undergoing DMA. - This option causes a performance degradation. Use only if you want to debug device drivers and dma interactions. diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 6f4f4b9d2d03..8e9f7b301c6d 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -448,9 +448,6 @@ void debug_dma_dump_mappings(struct device *dev) * dma_active_cacheline entry to track per event. dma_map_sg(), on the * other hand, consumes a single dma_debug_entry, but inserts 'nents' * entries into the tree. - * - * At any time debug_dma_assert_idle() can be called to trigger a - * warning if any cachelines in the given page are in the active set. */ static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); static DEFINE_SPINLOCK(radix_lock); @@ -497,10 +494,7 @@ static void active_cacheline_inc_overlap(phys_addr_t cln) overlap = active_cacheline_set_overlap(cln, ++overlap); /* If we overflowed the overlap counter then we're potentially - * leaking dma-mappings. Otherwise, if maps and unmaps are - * balanced then this overflow may cause false negatives in - * debug_dma_assert_idle() as the cacheline may be marked idle - * prematurely. + * leaking dma-mappings. */ WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), @@ -555,44 +549,6 @@ static void active_cacheline_remove(struct dma_debug_entry *entry) spin_unlock_irqrestore(&radix_lock, flags); } -/** - * debug_dma_assert_idle() - assert that a page is not undergoing dma - * @page: page to lookup in the dma_active_cacheline tree - * - * Place a call to this routine in cases where the cpu touching the page - * before the dma completes (page is dma_unmapped) will lead to data - * corruption. - */ -void debug_dma_assert_idle(struct page *page) -{ - struct dma_debug_entry *entry; - unsigned long pfn; - phys_addr_t cln; - - if (dma_debug_disabled()) - return; - - if (!page) - return; - - pfn = page_to_pfn(page); - cln = (phys_addr_t) pfn << CACHELINE_PER_PAGE_SHIFT; - - rcu_read_lock(); - if (!radix_tree_gang_lookup(&dma_active_cacheline, (void **) &entry, - cln, 1) || entry->pfn != pfn) - entry = NULL; - rcu_read_unlock(); - - if (!entry) - return; - - cln = to_cacheline_number(entry); - err_printk(entry->dev, entry, - "cpu touching an active dma mapped cacheline [cln=%pa]\n", - &cln); -} - /* * Wrapper function for adding an entry to the hash. * This function takes care of locking itself. diff --git a/mm/memory.c b/mm/memory.c index 228efaca75d3..d3c3bbd65a7e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2411,8 +2411,6 @@ static inline bool cow_user_page(struct page *dst, struct page *src, struct mm_struct *mm = vma->vm_mm; unsigned long addr = vmf->address; - debug_dma_assert_idle(src); - if (likely(src)) { copy_user_highpage(dst, src, addr, vma); return true; -- cgit v1.2.3 From 9922c1deff915c2b67ec79ea6b87c289772c6492 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Fri, 14 Aug 2020 17:30:04 -0700 Subject: asm-generic: pgalloc.h: use correct #ifdef to enable pud_alloc_one() The #ifdef statement that guards the generic version of pud_alloc_one() by mistake used __HAVE_ARCH_PUD_FREE instead of __HAVE_ARCH_PUD_ALLOC_ONE. Fix it. Fixes: d9e8b929670b ("asm-generic: pgalloc: provide generic pud_alloc_one() and pud_free_one()") Reported-by: kernel test robot Signed-off-by: Mike Rapoport Signed-off-by: Andrew Morton Link: http://lkml.kernel.org/r/20200812191415.GE163101@linux.ibm.com Signed-off-by: Linus Torvalds --- include/asm-generic/pgalloc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 6f44810921aa..02932efad3ab 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -147,7 +147,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) #if CONFIG_PGTABLE_LEVELS > 3 -#ifndef __HAVE_ARCH_PUD_FREE +#ifndef __HAVE_ARCH_PUD_ALLOC_ONE /** * pud_alloc_one - allocate a page for PUD-level page table * @mm: the mm_struct of the current context -- cgit v1.2.3 From 1378a5ee451a5e87d0d8dd6356a0b7844db231f6 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:23 -0700 Subject: mm: store compound_nr as well as compound_order Patch series "THP prep patches". These are some generic cleanups and improvements, which I would like merged into mmotm soon. The first one should be a performance improvement for all users of compound pages, and the others are aimed at getting code to compile away when CONFIG_TRANSPARENT_HUGEPAGE is disabled (ie small systems). Also better documented / less confusing than the current prefix mixture of compound, hpage and thp. This patch (of 7): This removes a few instructions from functions which need to know how many pages are in a compound page. The storage used is either page->mapping on 64-bit or page->index on 32-bit. Both of these are fine to overlay on tail pages. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-1-willy@infradead.org Link: http://lkml.kernel.org/r/20200629151959.15779-2-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/mm.h | 5 ++++- include/linux/mm_types.h | 1 + mm/page_alloc.c | 5 +++-- 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index e7602a3bcef1..10b1e6e5f885 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -922,12 +922,15 @@ static inline int compound_pincount(struct page *page) static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; + page[1].compound_nr = 1U << order; } /* Returns the number of pages in this potentially compound page. */ static inline unsigned long compound_nr(struct page *page) { - return 1UL << compound_order(page); + if (!PageHead(page)) + return 1; + return page[1].compound_nr; } /* Returns the number of bytes in this potentially compound page. */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0277fbab7c93..496c3ff97cce 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -134,6 +134,7 @@ struct page { unsigned char compound_dtor; unsigned char compound_order; atomic_t compound_mapcount; + unsigned int compound_nr; /* 1 << compound_order */ }; struct { /* Second tail page of compound page */ unsigned long _compound_pad_1; /* compound_head */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8b7d0ecf30b1..0e2bab486fea 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -666,8 +666,6 @@ void prep_compound_page(struct page *page, unsigned int order) int i; int nr_pages = 1 << order; - set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); - set_compound_order(page, order); __SetPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; @@ -675,6 +673,9 @@ void prep_compound_page(struct page *page, unsigned int order) p->mapping = TAIL_MAPPING; set_compound_head(p, page); } + + set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); + set_compound_order(page, order); atomic_set(compound_mapcount_ptr(page), -1); if (hpage_pincount_available(page)) atomic_set(compound_pincount_ptr(page), 0); -- cgit v1.2.3 From 419015675fef6c4b28689bd2ace559564c2e106c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:26 -0700 Subject: mm: move page-flags include to top of file Give up on the notion that we can remove page-flags.h from mm.h. There are currently 14 inline functions which use a PageFoo function. Also, two of the files directly included by mm.h include page-flags.h themselves, and there are probably more indirect inclusions. So just include it at the top like any other header file. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-3-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/mm.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 10b1e6e5f885..fd0cd4e93029 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -668,11 +669,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma); struct mmu_gather; struct inode; -/* - * FIXME: take this include out, include page-flags.h in - * files which need it (119 of them) - */ -#include #include /* -- cgit v1.2.3 From 6ffbb45826f5d9ae09aa60cd88594b7816c96190 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:30 -0700 Subject: mm: add thp_order This function returns the order of a transparent huge page. It compiles to 0 if CONFIG_TRANSPARENT_HUGEPAGE is disabled. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-4-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 467302056e17..9521cfdf18ca 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -258,6 +258,19 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, else return NULL; } + +/** + * thp_order - Order of a transparent huge page. + * @page: Head page of a transparent huge page. + */ +static inline unsigned int thp_order(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + if (PageHead(page)) + return HPAGE_PMD_ORDER; + return 0; +} + static inline int hpage_nr_pages(struct page *page) { if (unlikely(PageTransHuge(page))) @@ -317,6 +330,12 @@ static inline struct list_head *page_deferred_list(struct page *page) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) +static inline unsigned int thp_order(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return 0; +} + static inline int hpage_nr_pages(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); -- cgit v1.2.3 From af3bbc12df80e8c279b94c752b6edca29841f4f5 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:33 -0700 Subject: mm: add thp_size This function returns the number of bytes in a THP. It is like page_size(), but compiles to just PAGE_SIZE if CONFIG_TRANSPARENT_HUGEPAGE is disabled. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-5-willy@infradead.org Signed-off-by: Linus Torvalds --- drivers/nvdimm/btt.c | 4 +--- drivers/nvdimm/pmem.c | 6 ++---- include/linux/huge_mm.h | 11 +++++++++++ mm/internal.h | 2 +- mm/page_io.c | 2 +- mm/page_vma_mapped.c | 4 ++-- 6 files changed, 18 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 412d21d8f643..0ff610e728ff 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1490,10 +1490,8 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector, { struct btt *btt = bdev->bd_disk->private_data; int rc; - unsigned int len; - len = hpage_nr_pages(page) * PAGE_SIZE; - rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector); + rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector); if (rc == 0) page_endio(page, op_is_write(op), 0); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 94790e6e0e4c..fab29b514372 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -238,11 +238,9 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, blk_status_t rc; if (op_is_write(op)) - rc = pmem_do_write(pmem, page, 0, sector, - hpage_nr_pages(page) * PAGE_SIZE); + rc = pmem_do_write(pmem, page, 0, sector, thp_size(page)); else - rc = pmem_do_read(pmem, page, 0, sector, - hpage_nr_pages(page) * PAGE_SIZE); + rc = pmem_do_read(pmem, page, 0, sector, thp_size(page)); /* * The ->rw_page interface is subtle and tricky. The core * retries on any error, so we can only invoke page_endio() in diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9521cfdf18ca..9b33ac774fdd 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -462,4 +462,15 @@ static inline bool thp_migration_supported(void) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +/** + * thp_size - Size of a transparent huge page. + * @page: Head page of a transparent huge page. + * + * Return: Number of bytes in this page. + */ +static inline unsigned long thp_size(struct page *page) +{ + return PAGE_SIZE << thp_order(page); +} + #endif /* _LINUX_HUGE_MM_H */ diff --git a/mm/internal.h b/mm/internal.h index d11a9a8d2135..912bb1a1c10e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -396,7 +396,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) unsigned long start, end; start = __vma_address(page, vma); - end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); + end = start + thp_size(page) - PAGE_SIZE; /* page should be within @vma mapping range */ VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); diff --git a/mm/page_io.c b/mm/page_io.c index 9e362567d454..f5e8bec8a8c7 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -40,7 +40,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; bio->bi_end_io = end_io; - bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0); + bio_add_page(bio, page, thp_size(page), 0); } return bio; } diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 719c35246cfa..e65629c056e8 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -227,7 +227,7 @@ next_pte: if (pvmw->address >= pvmw->vma->vm_end || pvmw->address >= __vma_address(pvmw->page, pvmw->vma) + - hpage_nr_pages(pvmw->page) * PAGE_SIZE) + thp_size(pvmw->page)) return not_found(pvmw); /* Did we cross page table boundary? */ if (pvmw->address % PMD_SIZE == 0) { @@ -268,7 +268,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) unsigned long start, end; start = __vma_address(page, vma); - end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); + end = start + thp_size(page) - PAGE_SIZE; if (unlikely(end < vma->vm_start || start >= vma->vm_end)) return 0; -- cgit v1.2.3 From 6c357848b44b4016ca422178aa368a7472245f6f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:37 -0700 Subject: mm: replace hpage_nr_pages with thp_nr_pages The thp prefix is more frequently used than hpage and we should be consistent between the various functions. [akpm@linux-foundation.org: fix mm/migrate.c] Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: Mike Kravetz Cc: David Hildenbrand Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-6-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 13 +++++++++---- include/linux/mm_inline.h | 6 +++--- include/linux/pagemap.h | 6 +++--- mm/compaction.c | 2 +- mm/filemap.c | 2 +- mm/gup.c | 2 +- mm/internal.h | 2 +- mm/memcontrol.c | 10 +++++----- mm/memory_hotplug.c | 7 +++---- mm/mempolicy.c | 2 +- mm/migrate.c | 18 +++++++++--------- mm/mlock.c | 9 ++++----- mm/page_io.c | 2 +- mm/page_vma_mapped.c | 2 +- mm/rmap.c | 8 ++++---- mm/swap.c | 16 ++++++++-------- mm/swap_state.c | 6 +++--- mm/swapfile.c | 2 +- mm/vmscan.c | 6 +++--- mm/workingset.c | 6 +++--- 20 files changed, 65 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9b33ac774fdd..229f986d535a 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -271,9 +271,14 @@ static inline unsigned int thp_order(struct page *page) return 0; } -static inline int hpage_nr_pages(struct page *page) +/** + * thp_nr_pages - The number of regular pages in this huge page. + * @page: The head page of a huge page. + */ +static inline int thp_nr_pages(struct page *page) { - if (unlikely(PageTransHuge(page))) + VM_BUG_ON_PGFLAGS(PageTail(page), page); + if (PageHead(page)) return HPAGE_PMD_NR; return 1; } @@ -336,9 +341,9 @@ static inline unsigned int thp_order(struct page *page) return 0; } -static inline int hpage_nr_pages(struct page *page) +static inline int thp_nr_pages(struct page *page) { - VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PGFLAGS(PageTail(page), page); return 1; } diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 219bef41d87c..8fc71e9d7bb0 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -48,14 +48,14 @@ static __always_inline void update_lru_size(struct lruvec *lruvec, static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); } static __always_inline void add_page_to_lru_list_tail(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add_tail(&page->lru, &lruvec->lists[lru]); } @@ -63,7 +63,7 @@ static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { list_del(&page->lru); - update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page)); } /** diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d1f4eff605ad..7de11dcd534d 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -381,7 +381,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) if (PageHuge(head)) return head; - return head + (index & (hpage_nr_pages(head) - 1)); + return head + (index & (thp_nr_pages(head) - 1)); } struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); @@ -773,7 +773,7 @@ static inline struct page *readahead_page(struct readahead_control *rac) page = xa_load(&rac->mapping->i_pages, rac->_index); VM_BUG_ON_PAGE(!PageLocked(page), page); - rac->_batch_count = hpage_nr_pages(page); + rac->_batch_count = thp_nr_pages(page); return page; } @@ -796,7 +796,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac, VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageTail(page), page); array[i++] = page; - rac->_batch_count += hpage_nr_pages(page); + rac->_batch_count += thp_nr_pages(page); /* * The page cache isn't using multi-index entries yet, diff --git a/mm/compaction.c b/mm/compaction.c index b89581bf859c..176dcded298e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1009,7 +1009,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, del_page_from_lru_list(page, lruvec, page_lru(page)); mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), - hpage_nr_pages(page)); + thp_nr_pages(page)); isolate_success: list_add(&page->lru, &cc->migratepages); diff --git a/mm/filemap.c b/mm/filemap.c index 8e75bce0346d..653190943aa7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -198,7 +198,7 @@ static void unaccount_page_cache_page(struct address_space *mapping, if (PageHuge(page)) return; - nr = hpage_nr_pages(page); + nr = thp_nr_pages(page); __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); if (PageSwapBacked(page)) { diff --git a/mm/gup.c b/mm/gup.c index 39e58df6925d..ae096ea7583f 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1637,7 +1637,7 @@ check_again: mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_lru(head), - hpage_nr_pages(head)); + thp_nr_pages(head)); } } } diff --git a/mm/internal.h b/mm/internal.h index 912bb1a1c10e..10c677655912 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -369,7 +369,7 @@ extern void clear_page_mlock(struct page *page); static inline void mlock_migrate_page(struct page *newpage, struct page *page) { if (TestClearPageMlocked(page)) { - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); /* Holding pmd lock, no change in irq context: __mod is safe */ __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9d87082e64aa..b807952b4d43 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5589,7 +5589,7 @@ static int mem_cgroup_move_account(struct page *page, { struct lruvec *from_vec, *to_vec; struct pglist_data *pgdat; - unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; + unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; int ret; VM_BUG_ON(from == to); @@ -6682,7 +6682,7 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root, */ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { - unsigned int nr_pages = hpage_nr_pages(page); + unsigned int nr_pages = thp_nr_pages(page); struct mem_cgroup *memcg = NULL; int ret = 0; @@ -6912,7 +6912,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) return; /* Force-charge the new page. The old one will be freed soon */ - nr_pages = hpage_nr_pages(newpage); + nr_pages = thp_nr_pages(newpage); page_counter_charge(&memcg->memory, nr_pages); if (do_memsw_account()) @@ -7114,7 +7114,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * ancestor for the swap instead and transfer the memory+swap charge. */ swap_memcg = mem_cgroup_id_get_online(memcg); - nr_entries = hpage_nr_pages(page); + nr_entries = thp_nr_pages(page); /* Get references for the tail pages, too */ if (nr_entries > 1) mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); @@ -7158,7 +7158,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) */ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) { - unsigned int nr_pages = hpage_nr_pages(page); + unsigned int nr_pages = thp_nr_pages(page); struct page_counter *counter; struct mem_cgroup *memcg; unsigned short oldid; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c32ead89c911..e9d5ab5d3ca0 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1299,7 +1299,7 @@ static int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; - struct page *page; + struct page *page, *head; int ret = 0; LIST_HEAD(source); @@ -1307,15 +1307,14 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); + head = compound_head(page); if (PageHuge(page)) { - struct page *head = compound_head(page); pfn = page_to_pfn(head) + compound_nr(head) - 1; isolate_huge_page(head, &source); continue; } else if (PageTransHuge(page)) - pfn = page_to_pfn(compound_head(page)) - + hpage_nr_pages(page) - 1; + pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; /* * HWPoison pages have elevated reference counts so the migration would diff --git a/mm/mempolicy.c b/mm/mempolicy.c index afaa09ff9f6c..eddbe4e56c73 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1049,7 +1049,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist, list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_lru(head), - hpage_nr_pages(head)); + thp_nr_pages(head)); } else if (flags & MPOL_MF_STRICT) { /* * Non-movable page may reach here. And, there may be diff --git a/mm/migrate.c b/mm/migrate.c index 5053439be6ab..34a842a8eb6a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l) put_page(page); } else { mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -hpage_nr_pages(page)); + page_is_file_lru(page), -thp_nr_pages(page)); putback_lru_page(page); } } @@ -386,7 +386,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page) */ expected_count += is_device_private_page(page); if (mapping) - expected_count += hpage_nr_pages(page) + page_has_private(page); + expected_count += thp_nr_pages(page) + page_has_private(page); return expected_count; } @@ -441,7 +441,7 @@ int migrate_page_move_mapping(struct address_space *mapping, */ newpage->index = page->index; newpage->mapping = page->mapping; - page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ + page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */ if (PageSwapBacked(page)) { __SetPageSwapBacked(newpage); if (PageSwapCache(page)) { @@ -474,7 +474,7 @@ int migrate_page_move_mapping(struct address_space *mapping, * to one less reference. * We know this isn't the last reference. */ - page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); + page_ref_unfreeze(page, expected_count - thp_nr_pages(page)); xas_unlock(&xas); /* Leave irq disabled to prevent preemption while updating stats */ @@ -591,7 +591,7 @@ static void copy_huge_page(struct page *dst, struct page *src) } else { /* thp page */ BUG_ON(!PageTransHuge(src)); - nr_pages = hpage_nr_pages(src); + nr_pages = thp_nr_pages(src); } for (i = 0; i < nr_pages; i++) { @@ -1213,7 +1213,7 @@ out: */ if (likely(!__PageMovable(page))) mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -hpage_nr_pages(page)); + page_is_file_lru(page), -thp_nr_pages(page)); } /* @@ -1446,7 +1446,7 @@ retry: * during migration. */ is_thp = PageTransHuge(page); - nr_subpages = hpage_nr_pages(page); + nr_subpages = thp_nr_pages(page); cond_resched(); if (PageHuge(page)) @@ -1670,7 +1670,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_lru(head), - hpage_nr_pages(head)); + thp_nr_pages(head)); } out_putpage: /* @@ -2034,7 +2034,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) page_lru = page_is_file_lru(page); mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, - hpage_nr_pages(page)); + thp_nr_pages(page)); /* * Isolating the page has taken another reference, so the diff --git a/mm/mlock.c b/mm/mlock.c index f8736136fad7..93ca2bf30b4f 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -61,8 +61,7 @@ void clear_page_mlock(struct page *page) if (!TestClearPageMlocked(page)) return; - mod_zone_page_state(page_zone(page), NR_MLOCK, - -hpage_nr_pages(page)); + mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page)); count_vm_event(UNEVICTABLE_PGCLEARED); /* * The previous TestClearPageMlocked() corresponds to the smp_mb() @@ -95,7 +94,7 @@ void mlock_vma_page(struct page *page) if (!TestSetPageMlocked(page)) { mod_zone_page_state(page_zone(page), NR_MLOCK, - hpage_nr_pages(page)); + thp_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); if (!isolate_lru_page(page)) putback_lru_page(page); @@ -192,7 +191,7 @@ unsigned int munlock_vma_page(struct page *page) /* * Serialize with any parallel __split_huge_page_refcount() which * might otherwise copy PageMlocked to part of the tail pages before - * we clear it in the head page. It also stabilizes hpage_nr_pages(). + * we clear it in the head page. It also stabilizes thp_nr_pages(). */ spin_lock_irq(&pgdat->lru_lock); @@ -202,7 +201,7 @@ unsigned int munlock_vma_page(struct page *page) goto unlock_out; } - nr_pages = hpage_nr_pages(page); + nr_pages = thp_nr_pages(page); __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); if (__munlock_isolate_lru_page(page, true)) { diff --git a/mm/page_io.c b/mm/page_io.c index f5e8bec8a8c7..454b70d8cda7 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -274,7 +274,7 @@ static inline void count_swpout_vm_event(struct page *page) if (unlikely(PageTransHuge(page))) count_vm_event(THP_SWPOUT); #endif - count_vm_events(PSWPOUT, hpage_nr_pages(page)); + count_vm_events(PSWPOUT, thp_nr_pages(page)); } #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index e65629c056e8..5e77b269c330 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -61,7 +61,7 @@ static inline bool pfn_is_match(struct page *page, unsigned long pfn) return page_pfn == pfn; /* THP can be referenced by any subpage */ - return pfn >= page_pfn && pfn - page_pfn < hpage_nr_pages(page); + return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page); } /** diff --git a/mm/rmap.c b/mm/rmap.c index 6cce9ef06753..4ace1e32f705 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1130,7 +1130,7 @@ void do_page_add_anon_rmap(struct page *page, } if (first) { - int nr = compound ? hpage_nr_pages(page) : 1; + int nr = compound ? thp_nr_pages(page) : 1; /* * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and @@ -1169,7 +1169,7 @@ void do_page_add_anon_rmap(struct page *page, void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, bool compound) { - int nr = compound ? hpage_nr_pages(page) : 1; + int nr = compound ? thp_nr_pages(page) : 1; VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); __SetPageSwapBacked(page); @@ -1860,7 +1860,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, return; pgoff_start = page_to_pgoff(page); - pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; + pgoff_end = pgoff_start + thp_nr_pages(page) - 1; anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; @@ -1913,7 +1913,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, return; pgoff_start = page_to_pgoff(page); - pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; + pgoff_end = pgoff_start + thp_nr_pages(page) - 1; if (!locked) i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, diff --git a/mm/swap.c b/mm/swap.c index 9285e60c7d6e..d26c22baf7c5 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, del_page_from_lru_list(page, lruvec, page_lru(page)); ClearPageActive(page); add_page_to_lru_list_tail(page, lruvec, page_lru(page)); - (*pgmoved) += hpage_nr_pages(page); + (*pgmoved) += thp_nr_pages(page); } } @@ -312,7 +312,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) void lru_note_cost_page(struct page *page) { lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)), - page_is_file_lru(page), hpage_nr_pages(page)); + page_is_file_lru(page), thp_nr_pages(page)); } static void __activate_page(struct page *page, struct lruvec *lruvec, @@ -320,7 +320,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int lru = page_lru_base_type(page); - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); del_page_from_lru_list(page, lruvec, lru); SetPageActive(page); @@ -500,7 +500,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page, * lock is held(spinlock), which implies preemption disabled. */ __mod_zone_page_state(page_zone(page), NR_MLOCK, - hpage_nr_pages(page)); + thp_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); } lru_cache_add(page); @@ -532,7 +532,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, { int lru; bool active; - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); if (!PageLRU(page)) return; @@ -580,7 +580,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, { if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { int lru = page_lru_base_type(page); - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); ClearPageActive(page); @@ -599,7 +599,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { bool active = PageActive(page); - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); del_page_from_lru_list(page, lruvec, LRU_INACTIVE_ANON + active); @@ -972,7 +972,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, { enum lru_list lru; int was_unevictable = TestClearPageUnevictable(page); - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); VM_BUG_ON_PAGE(PageLRU(page), page); diff --git a/mm/swap_state.c b/mm/swap_state.c index b73aabdfd35a..d9d4a49f3241 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -130,7 +130,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, struct address_space *address_space = swap_address_space(entry); pgoff_t idx = swp_offset(entry); XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); - unsigned long i, nr = hpage_nr_pages(page); + unsigned long i, nr = thp_nr_pages(page); void *old; VM_BUG_ON_PAGE(!PageLocked(page), page); @@ -183,7 +183,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry, void *shadow) { struct address_space *address_space = swap_address_space(entry); - int i, nr = hpage_nr_pages(page); + int i, nr = thp_nr_pages(page); pgoff_t idx = swp_offset(entry); XA_STATE(xas, &address_space->i_pages, idx); @@ -278,7 +278,7 @@ void delete_from_swap_cache(struct page *page) xa_unlock_irq(&address_space->i_pages); put_swap_page(page, entry); - page_ref_sub(page, hpage_nr_pages(page)); + page_ref_sub(page, thp_nr_pages(page)); } void clear_shadow_from_swap_cache(int type, unsigned long begin, diff --git a/mm/swapfile.c b/mm/swapfile.c index e653eea1eb88..eb410d3c8de8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1370,7 +1370,7 @@ void put_swap_page(struct page *page, swp_entry_t entry) unsigned char *map; unsigned int i, free_entries = 0; unsigned char val; - int size = swap_entry_size(hpage_nr_pages(page)); + int size = swap_entry_size(thp_nr_pages(page)); si = _swap_info_get(entry); if (!si) diff --git a/mm/vmscan.c b/mm/vmscan.c index 738115ed75e2..99e1796eb833 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1354,7 +1354,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: - stat->nr_pageout += hpage_nr_pages(page); + stat->nr_pageout += thp_nr_pages(page); if (PageWriteback(page)) goto keep; @@ -1862,7 +1862,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, SetPageLRU(page); lru = page_lru(page); - nr_pages = hpage_nr_pages(page); + nr_pages = thp_nr_pages(page); update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); list_move(&page->lru, &lruvec->lists[lru]); @@ -2065,7 +2065,7 @@ static void shrink_active_list(unsigned long nr_to_scan, * so we ignore them here. */ if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) { - nr_rotated += hpage_nr_pages(page); + nr_rotated += thp_nr_pages(page); list_add(&page->lru, &l_active); continue; } diff --git a/mm/workingset.c b/mm/workingset.c index 8cbe4e3cbe5c..92e66113a577 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -263,7 +263,7 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) VM_BUG_ON_PAGE(!PageLocked(page), page); lruvec = mem_cgroup_lruvec(target_memcg, pgdat); - workingset_age_nonresident(lruvec, hpage_nr_pages(page)); + workingset_age_nonresident(lruvec, thp_nr_pages(page)); /* XXX: target_memcg can be NULL, go through lruvec */ memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); eviction = atomic_long_read(&lruvec->nonresident_age); @@ -374,7 +374,7 @@ void workingset_refault(struct page *page, void *shadow) goto out; SetPageActive(page); - workingset_age_nonresident(lruvec, hpage_nr_pages(page)); + workingset_age_nonresident(lruvec, thp_nr_pages(page)); inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file); /* Page was active prior to eviction */ @@ -411,7 +411,7 @@ void workingset_activation(struct page *page) if (!mem_cgroup_disabled() && !memcg) goto out; lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); - workingset_age_nonresident(lruvec, hpage_nr_pages(page)); + workingset_age_nonresident(lruvec, thp_nr_pages(page)); out: rcu_read_unlock(); } -- cgit v1.2.3 From 2be1d71841b7ecfb01ce4c59f6e1d082c3c18a8a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:40 -0700 Subject: mm: add thp_head This is like compound_head() but compiles away when CONFIG_TRANSPARENT_HUGEPAGE is not enabled. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-7-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 229f986d535a..8a8bc46a2432 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -259,6 +259,15 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, return NULL; } +/** + * thp_head - Head page of a transparent huge page. + * @page: Any page (tail, head or regular) found in the page cache. + */ +static inline struct page *thp_head(struct page *page) +{ + return compound_head(page); +} + /** * thp_order - Order of a transparent huge page. * @page: Head page of a transparent huge page. @@ -335,6 +344,12 @@ static inline struct list_head *page_deferred_list(struct page *page) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) +static inline struct page *thp_head(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return page; +} + static inline unsigned int thp_order(struct page *page) { VM_BUG_ON_PGFLAGS(PageTail(page), page); -- cgit v1.2.3 From ee6c400f5c05459b8c5f2884a176a1287ce2f68f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:43 -0700 Subject: mm: introduce offset_in_thp Mirroring offset_in_page(), this gives you the offset within this particular page, no matter what size page it is. It optimises down to offset_in_page() if CONFIG_TRANSPARENT_HUGEPAGE is not set. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: David Hildenbrand Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-8-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index fd0cd4e93029..2e7ec3ee34cf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1594,6 +1594,7 @@ static inline void clear_page_pfmemalloc(struct page *page) extern void pagefault_out_of_memory(void); #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) +#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) /* * Flags passed to show_mem() and show_free_areas() to suppress output in -- cgit v1.2.3 From 88db0aa2421666d2f73486d15b239a4521983d55 Mon Sep 17 00:00:00 2001 From: Xiaoming Ni Date: Fri, 14 Aug 2020 17:31:07 -0700 Subject: all arch: remove system call sys_sysctl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since commit 61a47c1ad3a4dc ("sysctl: Remove the sysctl system call"), sys_sysctl is actually unavailable: any input can only return an error. We have been warning about people using the sysctl system call for years and believe there are no more users. Even if there are users of this interface if they have not complained or fixed their code by now they probably are not going to, so there is no point in warning them any longer. So completely remove sys_sysctl on all architectures. [nixiaoming@huawei.com: s390: fix build error for sys_call_table_emu] Link: http://lkml.kernel.org/r/20200618141426.16884-1-nixiaoming@huawei.com Signed-off-by: Xiaoming Ni Signed-off-by: Andrew Morton Acked-by: Will Deacon [arm/arm64] Acked-by: "Eric W. Biederman" Cc: Aleksa Sarai Cc: Alexander Shishkin Cc: Al Viro Cc: Andi Kleen Cc: Andrew Morton Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Bin Meng Cc: Borislav Petkov Cc: Brian Gerst Cc: Catalin Marinas Cc: chenzefeng Cc: Christian Borntraeger Cc: Christian Brauner Cc: Chris Zankel Cc: David Howells Cc: David S. Miller Cc: Diego Elio Pettenò Cc: Dmitry Vyukov Cc: Dominik Brodowski Cc: Fenghua Yu Cc: Geert Uytterhoeven Cc: Heiko Carstens Cc: Helge Deller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Iurii Zaikin Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: Jiri Olsa Cc: Kars de Jong Cc: Kees Cook Cc: Krzysztof Kozlowski Cc: Luis Chamberlain Cc: Marco Elver Cc: Mark Rutland Cc: Martin K. Petersen Cc: Masahiro Yamada Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Miklos Szeredi Cc: Minchan Kim Cc: Namhyung Kim Cc: Naveen N. Rao Cc: Nick Piggin Cc: Oleg Nesterov Cc: Olof Johansson Cc: Paul Burton Cc: "Paul E. McKenney" Cc: Paul Mackerras Cc: Peter Zijlstra (Intel) Cc: Randy Dunlap Cc: Ravi Bangoria Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sami Tolvanen Cc: Sargun Dhillon Cc: Stephen Rothwell Cc: Sudeep Holla Cc: Sven Schnelle Cc: Thiago Jung Bauermann Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Tony Luck Cc: Vasily Gorbik Cc: Vlastimil Babka Cc: Yoshinori Sato Cc: Zhou Yanjie Link: http://lkml.kernel.org/r/20200616030734.87257-1-nixiaoming@huawei.com Signed-off-by: Linus Torvalds --- arch/alpha/kernel/syscalls/syscall.tbl | 2 +- arch/arm/configs/am200epdkit_defconfig | 1 - arch/arm/tools/syscall.tbl | 2 +- arch/arm64/include/asm/unistd32.h | 4 +- arch/ia64/kernel/syscalls/syscall.tbl | 2 +- arch/m68k/kernel/syscalls/syscall.tbl | 2 +- arch/microblaze/kernel/syscalls/syscall.tbl | 2 +- arch/mips/configs/cu1000-neo_defconfig | 1 - arch/mips/kernel/syscalls/syscall_n32.tbl | 2 +- arch/mips/kernel/syscalls/syscall_n64.tbl | 2 +- arch/mips/kernel/syscalls/syscall_o32.tbl | 2 +- arch/parisc/kernel/syscalls/syscall.tbl | 2 +- arch/powerpc/kernel/syscalls/syscall.tbl | 2 +- arch/s390/kernel/syscalls/syscall.tbl | 2 +- arch/sh/configs/dreamcast_defconfig | 1 - arch/sh/configs/espt_defconfig | 1 - arch/sh/configs/hp6xx_defconfig | 1 - arch/sh/configs/landisk_defconfig | 1 - arch/sh/configs/lboxre2_defconfig | 1 - arch/sh/configs/microdev_defconfig | 1 - arch/sh/configs/migor_defconfig | 1 - arch/sh/configs/r7780mp_defconfig | 1 - arch/sh/configs/r7785rp_defconfig | 1 - arch/sh/configs/rts7751r2d1_defconfig | 1 - arch/sh/configs/rts7751r2dplus_defconfig | 1 - arch/sh/configs/se7206_defconfig | 1 - arch/sh/configs/se7343_defconfig | 1 - arch/sh/configs/se7619_defconfig | 1 - arch/sh/configs/se7705_defconfig | 1 - arch/sh/configs/se7750_defconfig | 1 - arch/sh/configs/se7751_defconfig | 1 - arch/sh/configs/secureedge5410_defconfig | 1 - arch/sh/configs/sh03_defconfig | 1 - arch/sh/configs/sh7710voipgw_defconfig | 1 - arch/sh/configs/sh7757lcr_defconfig | 1 - arch/sh/configs/sh7763rdp_defconfig | 1 - arch/sh/configs/shmin_defconfig | 1 - arch/sh/configs/titan_defconfig | 1 - arch/sh/kernel/syscalls/syscall.tbl | 2 +- arch/sparc/kernel/syscalls/syscall.tbl | 2 +- arch/x86/entry/syscalls/syscall_32.tbl | 2 +- arch/x86/entry/syscalls/syscall_64.tbl | 2 +- arch/xtensa/kernel/syscalls/syscall.tbl | 2 +- include/linux/compat.h | 1 - include/linux/syscalls.h | 2 - include/linux/sysctl.h | 6 +- kernel/Makefile | 2 +- kernel/sys_ni.c | 1 - kernel/sysctl_binary.c | 171 --------------------- tools/perf/arch/powerpc/entry/syscalls/syscall.tbl | 2 +- tools/perf/arch/s390/entry/syscalls/syscall.tbl | 2 +- tools/perf/arch/x86/entry/syscalls/syscall_64.tbl | 2 +- 52 files changed, 24 insertions(+), 227 deletions(-) delete mode 100644 kernel/sysctl_binary.c (limited to 'include') diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index a28fb211881d..ec8bed9e7b75 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -249,7 +249,7 @@ 316 common mlockall sys_mlockall 317 common munlockall sys_munlockall 318 common sysinfo sys_sysinfo -319 common _sysctl sys_sysctl +319 common _sysctl sys_ni_syscall # 320 was sys_idle 321 common oldumount sys_oldumount 322 common swapon sys_swapon diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig index f56ac394caf1..4e49d6cb2f62 100644 --- a/arch/arm/configs/am200epdkit_defconfig +++ b/arch/arm/configs/am200epdkit_defconfig @@ -3,7 +3,6 @@ CONFIG_LOCALVERSION="gum" CONFIG_SYSVIPC=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 7e8ee4adf269..171077cbf419 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -162,7 +162,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 17e81bd9a2d3..734860ac7cf9 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -308,8 +308,8 @@ __SYSCALL(__NR_writev, compat_sys_writev) __SYSCALL(__NR_getsid, sys_getsid) #define __NR_fdatasync 148 __SYSCALL(__NR_fdatasync, sys_fdatasync) -#define __NR__sysctl 149 -__SYSCALL(__NR__sysctl, compat_sys_sysctl) + /* 149 was sys_sysctl */ +__SYSCALL(149, sys_ni_syscall) #define __NR_mlock 150 __SYSCALL(__NR_mlock, sys_mlock) #define __NR_munlock 151 diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index ced9c83e47c9..f52a41f4c340 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -135,7 +135,7 @@ 123 common writev sys_writev 124 common pread64 sys_pread64 125 common pwrite64 sys_pwrite64 -126 common _sysctl sys_sysctl +126 common _sysctl sys_ni_syscall 127 common mmap sys_mmap 128 common munmap sys_munmap 129 common mlock sys_mlock diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 1a4822de7292..81fc799d8392 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -156,7 +156,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index a3f4be8e7238..b4e263916f41 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -156,7 +156,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/mips/configs/cu1000-neo_defconfig b/arch/mips/configs/cu1000-neo_defconfig index 6b471cdb16cf..e924c817f73d 100644 --- a/arch/mips/configs/cu1000-neo_defconfig +++ b/arch/mips/configs/cu1000-neo_defconfig @@ -17,7 +17,6 @@ CONFIG_CGROUP_CPUACCT=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 6b4ee92e3aed..f9df9edb67a4 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -159,7 +159,7 @@ 149 n32 munlockall sys_munlockall 150 n32 vhangup sys_vhangup 151 n32 pivot_root sys_pivot_root -152 n32 _sysctl compat_sys_sysctl +152 n32 _sysctl sys_ni_syscall 153 n32 prctl sys_prctl 154 n32 adjtimex sys_adjtimex_time32 155 n32 setrlimit compat_sys_setrlimit diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 391acbf425a0..557f9954a2b9 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -159,7 +159,7 @@ 149 n64 munlockall sys_munlockall 150 n64 vhangup sys_vhangup 151 n64 pivot_root sys_pivot_root -152 n64 _sysctl sys_sysctl +152 n64 _sysctl sys_ni_syscall 153 n64 prctl sys_prctl 154 n64 adjtimex sys_adjtimex 155 n64 setrlimit sys_setrlimit diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 5727c5187508..195b43cf27c8 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -164,7 +164,7 @@ 150 o32 unused150 sys_ni_syscall 151 o32 getsid sys_getsid 152 o32 fdatasync sys_fdatasync -153 o32 _sysctl sys_sysctl compat_sys_sysctl +153 o32 _sysctl sys_ni_syscall 154 o32 mlock sys_mlock 155 o32 munlock sys_munlock 156 o32 mlockall sys_mlockall diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 292baabefade..def64d221cd4 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -163,7 +163,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl compat_sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index be9f74546068..c2d737ff2e7b 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -197,7 +197,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 nospu _sysctl sys_sysctl compat_sys_sysctl +149 nospu _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index f1fda4375526..10456bc936fb 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -138,7 +138,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid sys_getsid 148 common fdatasync sys_fdatasync sys_fdatasync -149 common _sysctl sys_sysctl compat_sys_sysctl +149 common _sysctl - - 150 common mlock sys_mlock sys_mlock 151 common munlock sys_munlock sys_munlock 152 common mlockall sys_mlockall sys_mlockall diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig index ae067e0b15e3..6a82c7b8ff32 100644 --- a/arch/sh/configs/dreamcast_defconfig +++ b/arch/sh/configs/dreamcast_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig index a5b865a75d22..9a988c347e9d 100644 --- a/arch/sh/configs/espt_defconfig +++ b/arch/sh/configs/espt_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig index a92db6694ce2..70e6605d7f7e 100644 --- a/arch/sh/configs/hp6xx_defconfig +++ b/arch/sh/configs/hp6xx_defconfig @@ -3,7 +3,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7709=y diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig index 567af752b1bb..ba6ec042606f 100644 --- a/arch/sh/configs/landisk_defconfig +++ b/arch/sh/configs/landisk_defconfig @@ -1,6 +1,5 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_EXTRA_PASS=y CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig index 10f6d371ce2c..05e4ac6fed5f 100644 --- a/arch/sh/configs/lboxre2_defconfig +++ b/arch/sh/configs/lboxre2_defconfig @@ -1,6 +1,5 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_EXTRA_PASS=y CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/microdev_defconfig b/arch/sh/configs/microdev_defconfig index ed84d1303acf..c65667d00313 100644 --- a/arch/sh/configs/microdev_defconfig +++ b/arch/sh/configs/microdev_defconfig @@ -2,7 +2,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH4_202=y diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig index 37e9521a99e5..a24cf8cd2cea 100644 --- a/arch/sh/configs/migor_defconfig +++ b/arch/sh/configs/migor_defconfig @@ -4,7 +4,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig index c97ec60cff27..e922659fdadb 100644 --- a/arch/sh/configs/r7780mp_defconfig +++ b/arch/sh/configs/r7780mp_defconfig @@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set CONFIG_SLAB=y diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig index 55fce65eb454..5978866358ec 100644 --- a/arch/sh/configs/r7785rp_defconfig +++ b/arch/sh/configs/r7785rp_defconfig @@ -7,7 +7,6 @@ CONFIG_RCU_TRACE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig index 6a3cfe08295f..fc9c22152b08 100644 --- a/arch/sh/configs/rts7751r2d1_defconfig +++ b/arch/sh/configs/rts7751r2d1_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig index 2b3d7d280672..ff3fd6787fd6 100644 --- a/arch/sh/configs/rts7751r2dplus_defconfig +++ b/arch/sh/configs/rts7751r2dplus_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig index 21a43f14ffac..ff5bb4489922 100644 --- a/arch/sh/configs/se7206_defconfig +++ b/arch/sh/configs/se7206_defconfig @@ -18,7 +18,6 @@ CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_BLK_DEV_INITRD=y # CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_ALL=y # CONFIG_ELF_CORE is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig index 4e794e719a28..5d6c19338ebf 100644 --- a/arch/sh/configs/se7343_defconfig +++ b/arch/sh/configs/se7343_defconfig @@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig index 3264415a5931..71a672c30716 100644 --- a/arch/sh/configs/se7619_defconfig +++ b/arch/sh/configs/se7619_defconfig @@ -1,7 +1,6 @@ # CONFIG_LOCALVERSION_AUTO is not set CONFIG_LOG_BUF_SHIFT=14 # CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set # CONFIG_ELF_CORE is not set diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig index 4496b94b7d88..ed00a6eeadf5 100644 --- a/arch/sh/configs/se7705_defconfig +++ b/arch/sh/configs/se7705_defconfig @@ -2,7 +2,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig index b23f67542728..3f1c13799d79 100644 --- a/arch/sh/configs/se7750_defconfig +++ b/arch/sh/configs/se7750_defconfig @@ -5,7 +5,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig index 162343683937..4a024065bb75 100644 --- a/arch/sh/configs/se7751_defconfig +++ b/arch/sh/configs/se7751_defconfig @@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/secureedge5410_defconfig b/arch/sh/configs/secureedge5410_defconfig index 360592d63a2f..8422599cfb04 100644 --- a/arch/sh/configs/secureedge5410_defconfig +++ b/arch/sh/configs/secureedge5410_defconfig @@ -1,7 +1,6 @@ # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig index 87db9a84b5ec..f0073ed39947 100644 --- a/arch/sh/configs/sh03_defconfig +++ b/arch/sh/configs/sh03_defconfig @@ -3,7 +3,6 @@ CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=m diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig index 08426913c0e3..0d814770b07f 100644 --- a/arch/sh/configs/sh7710voipgw_defconfig +++ b/arch/sh/configs/sh7710voipgw_defconfig @@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig index d0933a9b9799..a2700ab165af 100644 --- a/arch/sh/configs/sh7757lcr_defconfig +++ b/arch/sh/configs/sh7757lcr_defconfig @@ -8,7 +8,6 @@ CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_ALL=y CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig index d0a0aa74cecf..26c5fd02c87a 100644 --- a/arch/sh/configs/sh7763rdp_defconfig +++ b/arch/sh/configs/sh7763rdp_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig index a27b129b93c5..c0b6f40d01cc 100644 --- a/arch/sh/configs/shmin_defconfig +++ b/arch/sh/configs/shmin_defconfig @@ -1,7 +1,6 @@ # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 # CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set # CONFIG_BUG is not set diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig index 4ec961ace688..ba887f1351be 100644 --- a/arch/sh/configs/titan_defconfig +++ b/arch/sh/configs/titan_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index 96848db9659e..ae0a00beea5f 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -156,7 +156,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 46024e80ee86..4af114e84f20 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -300,7 +300,7 @@ 249 64 nanosleep sys_nanosleep 250 32 mremap sys_mremap 250 64 mremap sys_64_mremap -251 common _sysctl sys_sysctl compat_sys_sysctl +251 common _sysctl sys_ni_syscall 252 common getsid sys_getsid 253 common fdatasync sys_fdatasync 254 32 nfsservctl sys_ni_syscall sys_nis_syscall diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index e31a75262c9c..9d1102873666 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -160,7 +160,7 @@ 146 i386 writev sys_writev compat_sys_writev 147 i386 getsid sys_getsid 148 i386 fdatasync sys_fdatasync -149 i386 _sysctl sys_sysctl compat_sys_sysctl +149 i386 _sysctl sys_ni_syscall 150 i386 mlock sys_mlock 151 i386 munlock sys_munlock 152 i386 mlockall sys_mlockall diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 9d82078c949a..f30d6ae9a688 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -164,7 +164,7 @@ 153 common vhangup sys_vhangup 154 common modify_ldt sys_modify_ldt 155 common pivot_root sys_pivot_root -156 64 _sysctl sys_sysctl +156 64 _sysctl sys_ni_syscall 157 common prctl sys_prctl 158 common arch_prctl sys_arch_prctl 159 common adjtimex sys_adjtimex diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index d216ccba42f7..6276e3c2d3fc 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -222,7 +222,7 @@ 204 common quotactl sys_quotactl # 205 was old nfsservctl 205 common nfsservctl sys_ni_syscall -206 common _sysctl sys_sysctl +206 common _sysctl sys_ni_syscall 207 common bdflush sys_bdflush 208 common uname sys_newuname 209 common sysinfo sys_sysinfo diff --git a/include/linux/compat.h b/include/linux/compat.h index c4255d8a4a8a..d38c4d7e83bd 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -851,7 +851,6 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, unsigned flags); -asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); /* obsolete: fs/readdir.c */ asmlinkage long compat_sys_old_readdir(unsigned int fd, diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index dc2b827c81e5..75ac7f8ae93c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -47,7 +47,6 @@ struct stat64; struct statfs; struct statfs64; struct statx; -struct __sysctl_args; struct sysinfo; struct timespec; struct __kernel_old_timeval; @@ -1117,7 +1116,6 @@ asmlinkage long sys_send(int, void __user *, size_t, unsigned); asmlinkage long sys_bdflush(int func, long data); asmlinkage long sys_oldumount(char __user *name); asmlinkage long sys_uselib(const char __user *library); -asmlinkage long sys_sysctl(struct __sysctl_args __user *args); asmlinkage long sys_sysfs(int option, unsigned long arg1, unsigned long arg2); asmlinkage long sys_fork(void); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 50bb7f383a1b..51298a4f4623 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -74,15 +74,13 @@ int proc_do_static_key(struct ctl_table *table, int write, void *buffer, * sysctl names can be mirrored automatically under /proc/sys. The * procname supplied controls /proc naming. * - * The table's mode will be honoured both for sys_sysctl(2) and - * proc-fs access. + * The table's mode will be honoured for proc-fs access. * * Leaf nodes in the sysctl tree will be represented by a single file * under /proc; non-leaf nodes will be represented by directories. A * null procname disables /proc mirroring at this node. * - * sysctl(2) can automatically manage read and write requests through - * the sysctl table. The data and maxlen fields of the ctl_table + * The data and maxlen fields of the ctl_table * struct enable minimal validation of the values being written to be * performed, and the mode field allows minimal authentication. * diff --git a/kernel/Makefile b/kernel/Makefile index b3da548691c9..9a20016d4900 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -5,7 +5,7 @@ obj-y = fork.o exec_domain.o panic.o \ cpu.o exit.o softirq.o resource.o \ - sysctl.o sysctl_binary.o capability.o ptrace.o user.o \ + sysctl.o capability.o ptrace.o user.o \ signal.o sys.o umh.o workqueue.o pid.o task_work.o \ extable.o params.o \ kthread.o sys_ni.o nsproxy.o \ diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 3b69a560a7ac..4d59775ea79c 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -364,7 +364,6 @@ COND_SYSCALL(socketcall); COND_SYSCALL_COMPAT(socketcall); /* compat syscalls for arm64, x86, ... */ -COND_SYSCALL_COMPAT(sysctl); COND_SYSCALL_COMPAT(fanotify_mark); /* x86 */ diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c deleted file mode 100644 index 7d550cc76a3b..000000000000 --- a/kernel/sysctl_binary.c +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include "../fs/xfs/xfs_sysctl.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static ssize_t binary_sysctl(const int *name, int nlen, - void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) -{ - return -ENOSYS; -} - -static void deprecated_sysctl_warning(const int *name, int nlen) -{ - int i; - - /* - * CTL_KERN/KERN_VERSION is used by older glibc and cannot - * ever go away. - */ - if (nlen >= 2 && name[0] == CTL_KERN && name[1] == KERN_VERSION) - return; - - if (printk_ratelimit()) { - printk(KERN_INFO - "warning: process `%s' used the deprecated sysctl " - "system call with ", current->comm); - for (i = 0; i < nlen; i++) - printk(KERN_CONT "%d.", name[i]); - printk(KERN_CONT "\n"); - } - return; -} - -#define WARN_ONCE_HASH_BITS 8 -#define WARN_ONCE_HASH_SIZE (1<nlen. */ - if (nlen < 0 || nlen > CTL_MAXNAME) - return -ENOTDIR; - /* Read in the sysctl name for simplicity */ - for (i = 0; i < nlen; i++) - if (get_user(name[i], args_name + i)) - return -EFAULT; - - warn_on_bintable(name, nlen); - - return binary_sysctl(name, nlen, oldval, oldlen, newval, newlen); -} - -SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args) -{ - struct __sysctl_args tmp; - size_t oldlen = 0; - ssize_t result; - - if (copy_from_user(&tmp, args, sizeof(tmp))) - return -EFAULT; - - if (tmp.oldval && !tmp.oldlenp) - return -EFAULT; - - if (tmp.oldlenp && get_user(oldlen, tmp.oldlenp)) - return -EFAULT; - - result = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, oldlen, - tmp.newval, tmp.newlen); - - if (result >= 0) { - oldlen = result; - result = 0; - } - - if (tmp.oldlenp && put_user(oldlen, tmp.oldlenp)) - return -EFAULT; - - return result; -} - - -#ifdef CONFIG_COMPAT - -struct compat_sysctl_args { - compat_uptr_t name; - int nlen; - compat_uptr_t oldval; - compat_uptr_t oldlenp; - compat_uptr_t newval; - compat_size_t newlen; - compat_ulong_t __unused[4]; -}; - -COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args) -{ - struct compat_sysctl_args tmp; - compat_size_t __user *compat_oldlenp; - size_t oldlen = 0; - ssize_t result; - - if (copy_from_user(&tmp, args, sizeof(tmp))) - return -EFAULT; - - if (tmp.oldval && !tmp.oldlenp) - return -EFAULT; - - compat_oldlenp = compat_ptr(tmp.oldlenp); - if (compat_oldlenp && get_user(oldlen, compat_oldlenp)) - return -EFAULT; - - result = do_sysctl(compat_ptr(tmp.name), tmp.nlen, - compat_ptr(tmp.oldval), oldlen, - compat_ptr(tmp.newval), tmp.newlen); - - if (result >= 0) { - oldlen = result; - result = 0; - } - - if (compat_oldlenp && put_user(oldlen, compat_oldlenp)) - return -EFAULT; - - return result; -} - -#endif /* CONFIG_COMPAT */ diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index b190f2eb2611..3ca6fe057a0b 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -193,7 +193,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 nospu _sysctl sys_sysctl compat_sys_sysctl +149 nospu _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index 56ae24b6e4be..6a0bbea225db 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -138,7 +138,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid sys_getsid 148 common fdatasync sys_fdatasync sys_fdatasync -149 common _sysctl sys_sysctl compat_sys_sysctl +149 common _sysctl - - 150 common mlock sys_mlock compat_sys_mlock 151 common munlock sys_munlock compat_sys_munlock 152 common mlockall sys_mlockall sys_mlockall diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 9d82078c949a..f30d6ae9a688 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -164,7 +164,7 @@ 153 common vhangup sys_vhangup 154 common modify_ldt sys_modify_ldt 155 common pivot_root sys_pivot_root -156 64 _sysctl sys_sysctl +156 64 _sysctl sys_ni_syscall 157 common prctl sys_prctl 158 common arch_prctl sys_arch_prctl 159 common adjtimex sys_adjtimex -- cgit v1.2.3 From e0e3f42fd96cf830c61aa720f0abb4eef41c5ce3 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Fri, 14 Aug 2020 17:31:37 -0700 Subject: mm/memcontrol: fix a data race in scan count struct mem_cgroup_per_node mz.lru_zone_size[zone_idx][lru] could be accessed concurrently as noticed by KCSAN, BUG: KCSAN: data-race in lruvec_lru_size / mem_cgroup_update_lru_size write to 0xffff9c804ca285f8 of 8 bytes by task 50951 on cpu 12: mem_cgroup_update_lru_size+0x11c/0x1d0 mem_cgroup_update_lru_size at mm/memcontrol.c:1266 isolate_lru_pages+0x6a9/0xf30 shrink_active_list+0x123/0xcc0 shrink_lruvec+0x8fd/0x1380 shrink_node+0x317/0xd80 do_try_to_free_pages+0x1f7/0xa10 try_to_free_pages+0x26c/0x5e0 __alloc_pages_slowpath+0x458/0x1290 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_vma+0x8a/0x2c0 do_anonymous_page+0x170/0x700 __handle_mm_fault+0xc9f/0xd00 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40 read to 0xffff9c804ca285f8 of 8 bytes by task 50964 on cpu 95: lruvec_lru_size+0xbb/0x270 mem_cgroup_get_zone_lru_size at include/linux/memcontrol.h:536 (inlined by) lruvec_lru_size at mm/vmscan.c:326 shrink_lruvec+0x1d0/0x1380 shrink_node+0x317/0xd80 do_try_to_free_pages+0x1f7/0xa10 try_to_free_pages+0x26c/0x5e0 __alloc_pages_slowpath+0x458/0x1290 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_current+0xa6/0x120 alloc_slab_page+0x3b1/0x540 allocate_slab+0x70/0x660 new_slab+0x46/0x70 ___slab_alloc+0x4ad/0x7d0 __slab_alloc+0x43/0x70 kmem_cache_alloc+0x2c3/0x420 getname_flags+0x4c/0x230 getname+0x22/0x30 do_sys_openat2+0x205/0x3b0 do_sys_open+0x9a/0xf0 __x64_sys_openat+0x62/0x80 do_syscall_64+0x91/0xb47 entry_SYSCALL_64_after_hwframe+0x49/0xbe Reported by Kernel Concurrency Sanitizer on: CPU: 95 PID: 50964 Comm: cc1 Tainted: G W O L 5.5.0-next-20200204+ #6 Hardware name: HPE ProLiant DL385 Gen10/ProLiant DL385 Gen10, BIOS A40 07/10/2019 The write is under lru_lock, but the read is done as lockless. The scan count is used to determine how aggressively the anon and file LRU lists should be scanned. Load tearing could generate an inefficient heuristic, so fix it by adding READ_ONCE() for the read. Signed-off-by: Qian Cai Signed-off-by: Andrew Morton Cc: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Link: http://lkml.kernel.org/r/20200206034945.2481-1-cai@lca.pw Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 385237e4cb44..d0b036123c6a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -630,7 +630,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, struct mem_cgroup_per_node *mz; mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - return mz->lru_zone_size[zone_idx][lru]; + return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); } void mem_cgroup_handle_over_high(void); -- cgit v1.2.3 From c403f6a3a792a6601185497c12b0bdf4be880439 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Fri, 14 Aug 2020 17:31:53 -0700 Subject: mm: annotate a data race in page_zonenum() BUG: KCSAN: data-race in page_cpupid_xchg_last / put_page write (marked) to 0xfffffc0d48ec1a00 of 8 bytes by task 91442 on cpu 3: page_cpupid_xchg_last+0x51/0x80 page_cpupid_xchg_last at mm/mmzone.c:109 (discriminator 11) wp_page_reuse+0x3e/0xc0 wp_page_reuse at mm/memory.c:2453 do_wp_page+0x472/0x7b0 do_wp_page at mm/memory.c:2798 __handle_mm_fault+0xcb0/0xd00 handle_pte_fault at mm/memory.c:4049 (inlined by) __handle_mm_fault at mm/memory.c:4163 handle_mm_fault+0xfc/0x2f0 handle_mm_fault at mm/memory.c:4200 do_page_fault+0x263/0x6f9 do_user_addr_fault at arch/x86/mm/fault.c:1465 (inlined by) do_page_fault at arch/x86/mm/fault.c:1539 page_fault+0x34/0x40 read to 0xfffffc0d48ec1a00 of 8 bytes by task 94817 on cpu 69: put_page+0x15a/0x1f0 page_zonenum at include/linux/mm.h:923 (inlined by) is_zone_device_page at include/linux/mm.h:929 (inlined by) page_is_devmap_managed at include/linux/mm.h:948 (inlined by) put_page at include/linux/mm.h:1023 wp_page_copy+0x571/0x930 wp_page_copy at mm/memory.c:2615 do_wp_page+0x107/0x7b0 __handle_mm_fault+0xcb0/0xd00 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40 Reported by Kernel Concurrency Sanitizer on: CPU: 69 PID: 94817 Comm: systemd-udevd Tainted: G W O L 5.5.0-next-20200204+ #6 Hardware name: HPE ProLiant DL385 Gen10/ProLiant DL385 Gen10, BIOS A40 07/10/2019 A page never changes its zone number. The zone number happens to be stored in the same word as other bits which are modified, but the zone number bits will never be modified by any other write, so it can accept a reload of the zone bits after an intervening write and it don't need to use READ_ONCE(). Thus, annotate this data race using ASSERT_EXCLUSIVE_BITS() to also assert that there are no concurrent writes to it. Suggested-by: Marco Elver Signed-off-by: Qian Cai Signed-off-by: Andrew Morton Cc: Paul E. McKenney Cc: David Hildenbrand Cc: Jan Kara Cc: John Hubbard Cc: Ira Weiny Cc: Dan Williams Link: http://lkml.kernel.org/r/1581619089-14472-1-git-send-email-cai@lca.pw Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2e7ec3ee34cf..1983e08f5906 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1067,6 +1067,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); static inline enum zone_type page_zonenum(const struct page *page) { + ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } -- cgit v1.2.3 From 7f897acbe5d57995438c831670b7c400e9c0dc00 Mon Sep 17 00:00:00 2001 From: Romain Naour Date: Fri, 14 Aug 2020 17:31:57 -0700 Subject: include/asm-generic/vmlinux.lds.h: align ro_after_init Since the patch [1], building the kernel using a toolchain built with binutils 2.33.1 prevents booting a sh4 system under Qemu. Apply the patch provided by Alan Modra [2] that fix alignment of rodata. [1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=ebd2263ba9a9124d93bbc0ece63d7e0fae89b40e [2] https://www.sourceware.org/ml/binutils/2019-12/msg00112.html Signed-off-by: Romain Naour Signed-off-by: Andrew Morton Cc: Alan Modra Cc: Bin Meng Cc: Chen Zhou Cc: Geert Uytterhoeven Cc: John Paul Adrian Glaubitz Cc: Krzysztof Kozlowski Cc: Kuninori Morimoto Cc: Rich Felker Cc: Sam Ravnborg Cc: Yoshinori Sato Cc: Arnd Bergmann Cc: Link: https://marc.info/?l=linux-sh&m=158429470221261 Signed-off-by: Linus Torvalds --- include/asm-generic/vmlinux.lds.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 7616ff0b96ec..5430febd34be 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -394,6 +394,7 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ + . = ALIGN(8); \ __start_ro_after_init = .; \ *(.data..ro_after_init) \ JUMP_TABLE_DATA \ -- cgit v1.2.3 From 8f28ca6bd8211214faf717677bbffe375c2a6072 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 14 Aug 2020 17:32:07 -0700 Subject: iomap: constify ioreadX() iomem argument (as in generic implementation) Patch series "iomap: Constify ioreadX() iomem argument", v3. The ioread8/16/32() and others have inconsistent interface among the architectures: some taking address as const, some not. It seems there is nothing really stopping all of them to take pointer to const. This patch (of 4): The ioreadX() and ioreadX_rep() helpers have inconsistent interface. On some architectures void *__iomem address argument is a pointer to const, on some not. Implementations of ioreadX() do not modify the memory under the address so they can be converted to a "const" version for const-safety and consistency among architectures. [krzk@kernel.org: sh: clk: fix assignment from incompatible pointer type for ioreadX()] Link: http://lkml.kernel.org/r/20200723082017.24053-1-krzk@kernel.org [akpm@linux-foundation.org: fix drivers/mailbox/bcm-pdc-mailbox.c] Link: http://lkml.kernel.org/r/202007132209.Rxmv4QyS%25lkp@intel.com Suggested-by: Geert Uytterhoeven Signed-off-by: Krzysztof Kozlowski Signed-off-by: Andrew Morton Reviewed-by: Geert Uytterhoeven Reviewed-by: Arnd Bergmann Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Yoshinori Sato Cc: Rich Felker Cc: Kalle Valo Cc: "David S. Miller" Cc: Jakub Kicinski Cc: Dave Jiang Cc: Jon Mason Cc: Allen Hubbe Cc: "Michael S. Tsirkin" Cc: Jason Wang Link: http://lkml.kernel.org/r/20200709072837.5869-1-krzk@kernel.org Link: http://lkml.kernel.org/r/20200709072837.5869-2-krzk@kernel.org Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/core_apecs.h | 6 +-- arch/alpha/include/asm/core_cia.h | 6 +-- arch/alpha/include/asm/core_lca.h | 6 +-- arch/alpha/include/asm/core_marvel.h | 4 +- arch/alpha/include/asm/core_mcpcia.h | 6 +-- arch/alpha/include/asm/core_t2.h | 2 +- arch/alpha/include/asm/io.h | 12 +++--- arch/alpha/include/asm/io_trivial.h | 16 ++++---- arch/alpha/include/asm/jensen.h | 2 +- arch/alpha/include/asm/machvec.h | 6 +-- arch/alpha/kernel/core_marvel.c | 2 +- arch/alpha/kernel/io.c | 12 +++--- arch/parisc/include/asm/io.h | 4 +- arch/parisc/lib/iomap.c | 72 +++++++++++++++++------------------ arch/powerpc/kernel/iomap.c | 28 +++++++------- arch/sh/kernel/iomap.c | 22 +++++------ drivers/mailbox/bcm-pdc-mailbox.c | 2 +- drivers/sh/clk/cpg.c | 2 +- include/asm-generic/iomap.h | 28 +++++++------- include/linux/io-64-nonatomic-hi-lo.h | 4 +- include/linux/io-64-nonatomic-lo-hi.h | 4 +- lib/iomap.c | 30 +++++++-------- 22 files changed, 138 insertions(+), 138 deletions(-) (limited to 'include') diff --git a/arch/alpha/include/asm/core_apecs.h b/arch/alpha/include/asm/core_apecs.h index 0a07055bc0fe..2d9726fc02ef 100644 --- a/arch/alpha/include/asm/core_apecs.h +++ b/arch/alpha/include/asm/core_apecs.h @@ -384,7 +384,7 @@ struct el_apecs_procdata } \ } while (0) -__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int apecs_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -420,7 +420,7 @@ __EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int apecs_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -456,7 +456,7 @@ __EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int apecs_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr < APECS_DENSE_MEM) diff --git a/arch/alpha/include/asm/core_cia.h b/arch/alpha/include/asm/core_cia.h index c706a7f2b061..cb22991f6761 100644 --- a/arch/alpha/include/asm/core_cia.h +++ b/arch/alpha/include/asm/core_cia.h @@ -342,7 +342,7 @@ struct el_CIA_sysdata_mcheck { #define vuip volatile unsigned int __force * #define vulp volatile unsigned long __force * -__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int cia_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -374,7 +374,7 @@ __EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int cia_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -404,7 +404,7 @@ __EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int cia_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr < CIA_DENSE_MEM) diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h index 84d5e5b84f4f..ec86314418cb 100644 --- a/arch/alpha/include/asm/core_lca.h +++ b/arch/alpha/include/asm/core_lca.h @@ -230,7 +230,7 @@ union el_lca { } while (0) -__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int lca_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -266,7 +266,7 @@ __EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int lca_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -302,7 +302,7 @@ __EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int lca_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr < LCA_DENSE_MEM) diff --git a/arch/alpha/include/asm/core_marvel.h b/arch/alpha/include/asm/core_marvel.h index cc6fd92d5fa9..b266e02e284b 100644 --- a/arch/alpha/include/asm/core_marvel.h +++ b/arch/alpha/include/asm/core_marvel.h @@ -332,10 +332,10 @@ struct io7 { #define vucp volatile unsigned char __force * #define vusp volatile unsigned short __force * -extern unsigned int marvel_ioread8(void __iomem *); +extern unsigned int marvel_ioread8(const void __iomem *); extern void marvel_iowrite8(u8 b, void __iomem *); -__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr) +__EXTERN_INLINE unsigned int marvel_ioread16(const void __iomem *addr) { return __kernel_ldwu(*(vusp)addr); } diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h index b30dc128210d..cb24d1bd6141 100644 --- a/arch/alpha/include/asm/core_mcpcia.h +++ b/arch/alpha/include/asm/core_mcpcia.h @@ -267,7 +267,7 @@ extern inline int __mcpcia_is_mmio(unsigned long addr) return (addr & 0x80000000UL) == 0; } -__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int mcpcia_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; @@ -291,7 +291,7 @@ __EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + hose + 0x00) = w; } -__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int mcpcia_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; @@ -315,7 +315,7 @@ __EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + hose + 0x08) = w; } -__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int mcpcia_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long)xaddr; diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h index e0b33d09e93a..12bb7addc789 100644 --- a/arch/alpha/include/asm/core_t2.h +++ b/arch/alpha/include/asm/core_t2.h @@ -572,7 +572,7 @@ __EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr) it doesn't make sense to merge the pio and mmio routines. */ #define IOPORT(OS, NS) \ -__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \ +__EXTERN_INLINE unsigned int t2_ioread##NS(const void __iomem *xaddr) \ { \ if (t2_is_mmio(xaddr)) \ return t2_read##OS(xaddr); \ diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index 640e1a2f57b4..1f6a909d1fa5 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -150,9 +150,9 @@ static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ alpha_mv.mv_##NAME(b, addr); \ } -REMAP1(unsigned int, ioread8, /**/) -REMAP1(unsigned int, ioread16, /**/) -REMAP1(unsigned int, ioread32, /**/) +REMAP1(unsigned int, ioread8, const) +REMAP1(unsigned int, ioread16, const) +REMAP1(unsigned int, ioread32, const) REMAP1(u8, readb, const volatile) REMAP1(u16, readw, const volatile) REMAP1(u32, readl, const volatile) @@ -307,7 +307,7 @@ static inline int __is_mmio(const volatile void __iomem *addr) */ #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) -extern inline unsigned int ioread8(void __iomem *addr) +extern inline unsigned int ioread8(const void __iomem *addr) { unsigned int ret; mb(); @@ -316,7 +316,7 @@ extern inline unsigned int ioread8(void __iomem *addr) return ret; } -extern inline unsigned int ioread16(void __iomem *addr) +extern inline unsigned int ioread16(const void __iomem *addr) { unsigned int ret; mb(); @@ -359,7 +359,7 @@ extern inline void outw(u16 b, unsigned long port) #endif #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) -extern inline unsigned int ioread32(void __iomem *addr) +extern inline unsigned int ioread32(const void __iomem *addr) { unsigned int ret; mb(); diff --git a/arch/alpha/include/asm/io_trivial.h b/arch/alpha/include/asm/io_trivial.h index ba3d8f0cfe0c..a1a29cbe02fa 100644 --- a/arch/alpha/include/asm/io_trivial.h +++ b/arch/alpha/include/asm/io_trivial.h @@ -7,15 +7,15 @@ #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) __EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a) +IO_CONCAT(__IO_PREFIX,ioread8)(const void __iomem *a) { - return __kernel_ldbu(*(volatile u8 __force *)a); + return __kernel_ldbu(*(const volatile u8 __force *)a); } __EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a) +IO_CONCAT(__IO_PREFIX,ioread16)(const void __iomem *a) { - return __kernel_ldwu(*(volatile u16 __force *)a); + return __kernel_ldwu(*(const volatile u16 __force *)a); } __EXTERN_INLINE void @@ -33,9 +33,9 @@ IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a) #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) __EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a) +IO_CONCAT(__IO_PREFIX,ioread32)(const void __iomem *a) { - return *(volatile u32 __force *)a; + return *(const volatile u32 __force *)a; } __EXTERN_INLINE void @@ -73,14 +73,14 @@ IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) __EXTERN_INLINE u8 IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) { - void __iomem *addr = (void __iomem *)a; + const void __iomem *addr = (const void __iomem *)a; return IO_CONCAT(__IO_PREFIX,ioread8)(addr); } __EXTERN_INLINE u16 IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) { - void __iomem *addr = (void __iomem *)a; + const void __iomem *addr = (const void __iomem *)a; return IO_CONCAT(__IO_PREFIX,ioread16)(addr); } diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h index 436dc905b6ad..916895155a88 100644 --- a/arch/alpha/include/asm/jensen.h +++ b/arch/alpha/include/asm/jensen.h @@ -305,7 +305,7 @@ __EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr) that it doesn't make sense to merge them. */ #define IOPORT(OS, NS) \ -__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \ +__EXTERN_INLINE unsigned int jensen_ioread##NS(const void __iomem *xaddr) \ { \ if (jensen_is_mmio(xaddr)) \ return jensen_read##OS(xaddr - 0x100000000ul); \ diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h index a6b73c6d10ee..a4e96e2bec74 100644 --- a/arch/alpha/include/asm/machvec.h +++ b/arch/alpha/include/asm/machvec.h @@ -46,9 +46,9 @@ struct alpha_machine_vector void (*mv_pci_tbi)(struct pci_controller *hose, dma_addr_t start, dma_addr_t end); - unsigned int (*mv_ioread8)(void __iomem *); - unsigned int (*mv_ioread16)(void __iomem *); - unsigned int (*mv_ioread32)(void __iomem *); + unsigned int (*mv_ioread8)(const void __iomem *); + unsigned int (*mv_ioread16)(const void __iomem *); + unsigned int (*mv_ioread32)(const void __iomem *); void (*mv_iowrite8)(u8, void __iomem *); void (*mv_iowrite16)(u16, void __iomem *); diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index 4c80d992a659..4485b77f8658 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -806,7 +806,7 @@ void __iomem *marvel_ioportmap (unsigned long addr) } unsigned int -marvel_ioread8(void __iomem *xaddr) +marvel_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (__marvel_is_port_kbd(addr)) diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c index 938de13adfbf..838586abb1e0 100644 --- a/arch/alpha/kernel/io.c +++ b/arch/alpha/kernel/io.c @@ -14,7 +14,7 @@ "generic", which bumps through the machine vector. */ unsigned int -ioread8(void __iomem *addr) +ioread8(const void __iomem *addr) { unsigned int ret; mb(); @@ -23,7 +23,7 @@ ioread8(void __iomem *addr) return ret; } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { unsigned int ret; mb(); @@ -32,7 +32,7 @@ unsigned int ioread16(void __iomem *addr) return ret; } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { unsigned int ret; mb(); @@ -257,7 +257,7 @@ EXPORT_SYMBOL(readq_relaxed); /* * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. */ -void ioread8_rep(void __iomem *port, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *port, void *dst, unsigned long count) { while ((unsigned long)dst & 0x3) { if (!count) @@ -300,7 +300,7 @@ EXPORT_SYMBOL(insb); * the interfaces seems to be slow: just using the inlined version * of the inw() breaks things. */ -void ioread16_rep(void __iomem *port, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { if (!count) @@ -340,7 +340,7 @@ EXPORT_SYMBOL(insw); * but the interfaces seems to be slow: just using the inlined version * of the inl() breaks things. */ -void ioread32_rep(void __iomem *port, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { while (count--) { diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 116effe26143..45e20d38dc59 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -303,8 +303,8 @@ extern void outsl (unsigned long port, const void *src, unsigned long count); #define ioread64be ioread64be #define iowrite64 iowrite64 #define iowrite64be iowrite64be -extern u64 ioread64(void __iomem *addr); -extern u64 ioread64be(void __iomem *addr); +extern u64 ioread64(const void __iomem *addr); +extern u64 ioread64be(const void __iomem *addr); extern void iowrite64(u64 val, void __iomem *addr); extern void iowrite64be(u64 val, void __iomem *addr); diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index 0195aec657e2..ce400417d54e 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -43,13 +43,13 @@ #endif struct iomap_ops { - unsigned int (*read8)(void __iomem *); - unsigned int (*read16)(void __iomem *); - unsigned int (*read16be)(void __iomem *); - unsigned int (*read32)(void __iomem *); - unsigned int (*read32be)(void __iomem *); - u64 (*read64)(void __iomem *); - u64 (*read64be)(void __iomem *); + unsigned int (*read8)(const void __iomem *); + unsigned int (*read16)(const void __iomem *); + unsigned int (*read16be)(const void __iomem *); + unsigned int (*read32)(const void __iomem *); + unsigned int (*read32be)(const void __iomem *); + u64 (*read64)(const void __iomem *); + u64 (*read64be)(const void __iomem *); void (*write8)(u8, void __iomem *); void (*write16)(u16, void __iomem *); void (*write16be)(u16, void __iomem *); @@ -57,9 +57,9 @@ struct iomap_ops { void (*write32be)(u32, void __iomem *); void (*write64)(u64, void __iomem *); void (*write64be)(u64, void __iomem *); - void (*read8r)(void __iomem *, void *, unsigned long); - void (*read16r)(void __iomem *, void *, unsigned long); - void (*read32r)(void __iomem *, void *, unsigned long); + void (*read8r)(const void __iomem *, void *, unsigned long); + void (*read16r)(const void __iomem *, void *, unsigned long); + void (*read32r)(const void __iomem *, void *, unsigned long); void (*write8r)(void __iomem *, const void *, unsigned long); void (*write16r)(void __iomem *, const void *, unsigned long); void (*write32r)(void __iomem *, const void *, unsigned long); @@ -69,17 +69,17 @@ struct iomap_ops { #define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff) -static unsigned int ioport_read8(void __iomem *addr) +static unsigned int ioport_read8(const void __iomem *addr) { return inb(ADDR2PORT(addr)); } -static unsigned int ioport_read16(void __iomem *addr) +static unsigned int ioport_read16(const void __iomem *addr) { return inw(ADDR2PORT(addr)); } -static unsigned int ioport_read32(void __iomem *addr) +static unsigned int ioport_read32(const void __iomem *addr) { return inl(ADDR2PORT(addr)); } @@ -99,17 +99,17 @@ static void ioport_write32(u32 datum, void __iomem *addr) outl(datum, ADDR2PORT(addr)); } -static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read8r(const void __iomem *addr, void *dst, unsigned long count) { insb(ADDR2PORT(addr), dst, count); } -static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read16r(const void __iomem *addr, void *dst, unsigned long count) { insw(ADDR2PORT(addr), dst, count); } -static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read32r(const void __iomem *addr, void *dst, unsigned long count) { insl(ADDR2PORT(addr), dst, count); } @@ -150,37 +150,37 @@ static const struct iomap_ops ioport_ops = { /* Legacy I/O memory ops */ -static unsigned int iomem_read8(void __iomem *addr) +static unsigned int iomem_read8(const void __iomem *addr) { return readb(addr); } -static unsigned int iomem_read16(void __iomem *addr) +static unsigned int iomem_read16(const void __iomem *addr) { return readw(addr); } -static unsigned int iomem_read16be(void __iomem *addr) +static unsigned int iomem_read16be(const void __iomem *addr) { return __raw_readw(addr); } -static unsigned int iomem_read32(void __iomem *addr) +static unsigned int iomem_read32(const void __iomem *addr) { return readl(addr); } -static unsigned int iomem_read32be(void __iomem *addr) +static unsigned int iomem_read32be(const void __iomem *addr) { return __raw_readl(addr); } -static u64 iomem_read64(void __iomem *addr) +static u64 iomem_read64(const void __iomem *addr) { return readq(addr); } -static u64 iomem_read64be(void __iomem *addr) +static u64 iomem_read64be(const void __iomem *addr) { return __raw_readq(addr); } @@ -220,7 +220,7 @@ static void iomem_write64be(u64 datum, void __iomem *addr) __raw_writel(datum, addr); } -static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read8r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u8 *)dst = __raw_readb(addr); @@ -228,7 +228,7 @@ static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) } } -static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read16r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u16 *)dst = __raw_readw(addr); @@ -236,7 +236,7 @@ static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count) } } -static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read32r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u32 *)dst = __raw_readl(addr); @@ -297,49 +297,49 @@ static const struct iomap_ops *iomap_ops[8] = { }; -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr); return *((u8 *)addr); } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr); return le16_to_cpup((u16 *)addr); } -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr); return *((u16 *)addr); } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr); return le32_to_cpup((u32 *)addr); } -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr); return *((u32 *)addr); } -u64 ioread64(void __iomem *addr) +u64 ioread64(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr); return le64_to_cpup((u64 *)addr); } -u64 ioread64be(void __iomem *addr) +u64 ioread64be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr); @@ -411,7 +411,7 @@ void iowrite64be(u64 datum, void __iomem *addr) /* Repeating interfaces */ -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count); @@ -423,7 +423,7 @@ void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) } } -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count); @@ -435,7 +435,7 @@ void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) } } -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count); diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index 5ac84efc6ede..9fe4fb3b08aa 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -15,23 +15,23 @@ * Here comes the ppc64 implementation of the IOMAP * interfaces. */ -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { return readb(addr); } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { return readw(addr); } -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { return readw_be(addr); } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { return readl(addr); } -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { return readl_be(addr); } @@ -41,27 +41,27 @@ EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); #ifdef __powerpc64__ -u64 ioread64(void __iomem *addr) +u64 ioread64(const void __iomem *addr) { return readq(addr); } -u64 ioread64_lo_hi(void __iomem *addr) +u64 ioread64_lo_hi(const void __iomem *addr) { return readq(addr); } -u64 ioread64_hi_lo(void __iomem *addr) +u64 ioread64_hi_lo(const void __iomem *addr) { return readq(addr); } -u64 ioread64be(void __iomem *addr) +u64 ioread64be(const void __iomem *addr) { return readq_be(addr); } -u64 ioread64be_lo_hi(void __iomem *addr) +u64 ioread64be_lo_hi(const void __iomem *addr) { return readq_be(addr); } -u64 ioread64be_hi_lo(void __iomem *addr) +u64 ioread64be_hi_lo(const void __iomem *addr) { return readq_be(addr); } @@ -139,15 +139,15 @@ EXPORT_SYMBOL(iowrite64be_hi_lo); * FIXME! We could make these do EEH handling if we really * wanted. Not clear if we do. */ -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { readsb(addr, dst, count); } -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { readsw(addr, dst, count); } -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { readsl(addr, dst, count); } diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c index ef9e2c97cbb7..0a0dff4e66de 100644 --- a/arch/sh/kernel/iomap.c +++ b/arch/sh/kernel/iomap.c @@ -8,31 +8,31 @@ #include #include -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { return readb(addr); } EXPORT_SYMBOL(ioread8); -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { return readw(addr); } EXPORT_SYMBOL(ioread16); -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { return be16_to_cpu(__raw_readw(addr)); } EXPORT_SYMBOL(ioread16be); -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { return readl(addr); } EXPORT_SYMBOL(ioread32); -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { return be32_to_cpu(__raw_readl(addr)); } @@ -74,7 +74,7 @@ EXPORT_SYMBOL(iowrite32be); * convert to CPU byte order. We write in "IO byte * order" (we also don't have IO barriers). */ -static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) +static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count) { while (--count >= 0) { u8 data = __raw_readb(addr); @@ -83,7 +83,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) } } -static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) +static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count) { while (--count >= 0) { u16 data = __raw_readw(addr); @@ -92,7 +92,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) } } -static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) +static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count) { while (--count >= 0) { u32 data = __raw_readl(addr); @@ -125,19 +125,19 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) } } -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { mmio_insb(addr, dst, count); } EXPORT_SYMBOL(ioread8_rep); -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { mmio_insw(addr, dst, count); } EXPORT_SYMBOL(ioread16_rep); -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { mmio_insl(addr, dst, count); } diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c index c10a9318a4b7..53945ca5d785 100644 --- a/drivers/mailbox/bcm-pdc-mailbox.c +++ b/drivers/mailbox/bcm-pdc-mailbox.c @@ -679,7 +679,7 @@ pdc_receive(struct pdc_state *pdcs) /* read last_rx_curr from register once */ pdcs->last_rx_curr = - (ioread32(&pdcs->rxregs_64->status0) & + (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) & CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; do { diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index a5cacfe24a42..fd72d9088bdc 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c @@ -40,7 +40,7 @@ static int sh_clk_mstp_enable(struct clk *clk) { sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); if (clk->status_reg) { - unsigned int (*read)(void __iomem *addr); + unsigned int (*read)(const void __iomem *addr); int i; void __iomem *mapped_status = (phys_addr_t)clk->status_reg - (phys_addr_t)clk->enable_reg + clk->mapped_reg; diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 9d28a5e82f73..649224664969 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -26,14 +26,14 @@ * in the low address range. Architectures for which this is not * true can't use this generic implementation. */ -extern unsigned int ioread8(void __iomem *); -extern unsigned int ioread16(void __iomem *); -extern unsigned int ioread16be(void __iomem *); -extern unsigned int ioread32(void __iomem *); -extern unsigned int ioread32be(void __iomem *); +extern unsigned int ioread8(const void __iomem *); +extern unsigned int ioread16(const void __iomem *); +extern unsigned int ioread16be(const void __iomem *); +extern unsigned int ioread32(const void __iomem *); +extern unsigned int ioread32be(const void __iomem *); #ifdef CONFIG_64BIT -extern u64 ioread64(void __iomem *); -extern u64 ioread64be(void __iomem *); +extern u64 ioread64(const void __iomem *); +extern u64 ioread64be(const void __iomem *); #endif #ifdef readq @@ -41,10 +41,10 @@ extern u64 ioread64be(void __iomem *); #define ioread64_hi_lo ioread64_hi_lo #define ioread64be_lo_hi ioread64be_lo_hi #define ioread64be_hi_lo ioread64be_hi_lo -extern u64 ioread64_lo_hi(void __iomem *addr); -extern u64 ioread64_hi_lo(void __iomem *addr); -extern u64 ioread64be_lo_hi(void __iomem *addr); -extern u64 ioread64be_hi_lo(void __iomem *addr); +extern u64 ioread64_lo_hi(const void __iomem *addr); +extern u64 ioread64_hi_lo(const void __iomem *addr); +extern u64 ioread64be_lo_hi(const void __iomem *addr); +extern u64 ioread64be_hi_lo(const void __iomem *addr); #endif extern void iowrite8(u8, void __iomem *); @@ -79,9 +79,9 @@ extern void iowrite64be_hi_lo(u64 val, void __iomem *addr); * memory across multiple ports, use "memcpy_toio()" * and friends. */ -extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); -extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); -extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count); +extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count); +extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count); extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index ae21b72cce85..f32522bb3aa5 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -57,7 +57,7 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) #ifndef ioread64_hi_lo #define ioread64_hi_lo ioread64_hi_lo -static inline u64 ioread64_hi_lo(void __iomem *addr) +static inline u64 ioread64_hi_lo(const void __iomem *addr) { u32 low, high; @@ -79,7 +79,7 @@ static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) #ifndef ioread64be_hi_lo #define ioread64be_hi_lo ioread64be_hi_lo -static inline u64 ioread64be_hi_lo(void __iomem *addr) +static inline u64 ioread64be_hi_lo(const void __iomem *addr) { u32 low, high; diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index faaa842dbdb9..448a21435dba 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -57,7 +57,7 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) #ifndef ioread64_lo_hi #define ioread64_lo_hi ioread64_lo_hi -static inline u64 ioread64_lo_hi(void __iomem *addr) +static inline u64 ioread64_lo_hi(const void __iomem *addr) { u32 low, high; @@ -79,7 +79,7 @@ static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) #ifndef ioread64be_lo_hi #define ioread64be_lo_hi ioread64be_lo_hi -static inline u64 ioread64be_lo_hi(void __iomem *addr) +static inline u64 ioread64be_lo_hi(const void __iomem *addr) { u32 low, high; diff --git a/lib/iomap.c b/lib/iomap.c index e909ab71e995..fbaa3e8f19d6 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -70,27 +70,27 @@ static void bad_io_access(unsigned long port, const char *access) #define mmio_read64be(addr) swab64(readq(addr)) #endif -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { IO_COND(addr, return inb(port), return readb(addr)); return 0xff; } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { IO_COND(addr, return inw(port), return readw(addr)); return 0xffff; } -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr)); return 0xffff; } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { IO_COND(addr, return inl(port), return readl(addr)); return 0xffffffff; } -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr)); return 0xffffffff; @@ -142,26 +142,26 @@ static u64 pio_read64be_hi_lo(unsigned long port) return lo | (hi << 32); } -u64 ioread64_lo_hi(void __iomem *addr) +u64 ioread64_lo_hi(const void __iomem *addr) { IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr)); return 0xffffffffffffffffULL; } -u64 ioread64_hi_lo(void __iomem *addr) +u64 ioread64_hi_lo(const void __iomem *addr) { IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr)); return 0xffffffffffffffffULL; } -u64 ioread64be_lo_hi(void __iomem *addr) +u64 ioread64be_lo_hi(const void __iomem *addr) { IO_COND(addr, return pio_read64be_lo_hi(port), return mmio_read64be(addr)); return 0xffffffffffffffffULL; } -u64 ioread64be_hi_lo(void __iomem *addr) +u64 ioread64be_hi_lo(const void __iomem *addr) { IO_COND(addr, return pio_read64be_hi_lo(port), return mmio_read64be(addr)); @@ -275,7 +275,7 @@ EXPORT_SYMBOL(iowrite64be_hi_lo); * order" (we also don't have IO barriers). */ #ifndef mmio_insb -static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) +static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count) { while (--count >= 0) { u8 data = __raw_readb(addr); @@ -283,7 +283,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) dst++; } } -static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) +static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count) { while (--count >= 0) { u16 data = __raw_readw(addr); @@ -291,7 +291,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) dst++; } } -static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) +static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count) { while (--count >= 0) { u32 data = __raw_readl(addr); @@ -325,15 +325,15 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) } #endif -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count)); } -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count)); } -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count)); } -- cgit v1.2.3 From 5f3e7503b97b3d068304ac8bb74faa10b804b24d Mon Sep 17 00:00:00 2001 From: Derek Basehore Date: Fri, 14 Aug 2020 00:56:06 +0300 Subject: drm/panel: Add helper for reading DT rotation This adds a helper function for reading the rotation (panel orientation) from the device tree. Reviewed-by: Sam Ravnborg Tested-by: Dmitry Osipenko Signed-off-by: Derek Basehore Signed-off-by: Dmitry Osipenko Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200813215609.28643-2-digetx@gmail.com --- drivers/gpu/drm/drm_panel.c | 43 +++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_panel.h | 10 ++++++++++ 2 files changed, 53 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index ba11c3641bf3..f634371c717a 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -260,6 +260,49 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np) return ERR_PTR(-EPROBE_DEFER); } EXPORT_SYMBOL(of_drm_find_panel); + +/** + * of_drm_get_panel_orientation - look up the orientation of the panel through + * the "rotation" binding from a device tree node + * @np: device tree node of the panel + * @orientation: orientation enum to be filled in + * + * Looks up the rotation of a panel in the device tree. The orientation of the + * panel is expressed as a property name "rotation" in the device tree. The + * rotation in the device tree is counter clockwise. + * + * Return: 0 when a valid rotation value (0, 90, 180, or 270) is read or the + * rotation property doesn't exist. Return a negative error code on failure. + */ +int of_drm_get_panel_orientation(const struct device_node *np, + enum drm_panel_orientation *orientation) +{ + int rotation, ret; + + ret = of_property_read_u32(np, "rotation", &rotation); + if (ret == -EINVAL) { + /* Don't return an error if there's no rotation property. */ + *orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; + return 0; + } + + if (ret < 0) + return ret; + + if (rotation == 0) + *orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; + else if (rotation == 90) + *orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP; + else if (rotation == 180) + *orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; + else if (rotation == 270) + *orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP; + else + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(of_drm_get_panel_orientation); #endif #if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE) diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 45a1b5a2275d..33605c3f0eba 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -35,6 +35,8 @@ struct drm_device; struct drm_panel; struct display_timing; +enum drm_panel_orientation; + /** * struct drm_panel_funcs - perform operations on a given panel * @@ -188,11 +190,19 @@ int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector #if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) struct drm_panel *of_drm_find_panel(const struct device_node *np); +int of_drm_get_panel_orientation(const struct device_node *np, + enum drm_panel_orientation *orientation); #else static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) { return ERR_PTR(-ENODEV); } + +static inline int of_drm_get_panel_orientation(const struct device_node *np, + enum drm_panel_orientation *orientation) +{ + return -ENODEV; +} #endif #if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ -- cgit v1.2.3 From 53bf2b0e4e4c1ff0a957474237f9dcd20036ca54 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Thu, 6 Aug 2020 13:18:16 +0530 Subject: firmware: ti_sci: Add support for getting resource with subtype With SYSFW ABI 3.0 changes, interrupts coming out of an interrupt controller is identified by a type and it is consistent across SoCs. Similarly global events for Interrupt aggregator. So add an API to get resource range using a resource type. Signed-off-by: Lokesh Vutla Signed-off-by: Marc Zyngier Acked-by: Nishanth Menon Link: https://lore.kernel.org/r/20200806074826.24607-4-lokeshvutla@ti.com --- drivers/firmware/ti_sci.c | 89 +++++++++++++++++++++++++--------- include/linux/soc/ti/ti_sci_protocol.h | 13 +++++ 2 files changed, 80 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 03bd01ba5fe7..722af9ee53d6 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -3208,61 +3208,50 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res) EXPORT_SYMBOL_GPL(ti_sci_get_num_resources); /** - * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device + * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device * @handle: TISCI handle * @dev: Device pointer to which the resource is assigned * @dev_id: TISCI device id to which the resource is assigned - * @of_prop: property name by which the resource are represented + * @sub_types: Array of sub_types assigned corresponding to device + * @sets: Number of sub_types * * Return: Pointer to ti_sci_resource if all went well else appropriate * error pointer. */ -struct ti_sci_resource * -devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, - struct device *dev, u32 dev_id, char *of_prop) +static struct ti_sci_resource * +devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle, + struct device *dev, u32 dev_id, u32 *sub_types, + u32 sets) { struct ti_sci_resource *res; bool valid_set = false; - u32 resource_subtype; int i, ret; res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); if (!res) return ERR_PTR(-ENOMEM); - ret = of_property_count_elems_of_size(dev_of_node(dev), of_prop, - sizeof(u32)); - if (ret < 0) { - dev_err(dev, "%s resource type ids not available\n", of_prop); - return ERR_PTR(ret); - } - res->sets = ret; - + res->sets = sets; res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc), GFP_KERNEL); if (!res->desc) return ERR_PTR(-ENOMEM); for (i = 0; i < res->sets; i++) { - ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i, - &resource_subtype); - if (ret) - return ERR_PTR(-EINVAL); - ret = handle->ops.rm_core_ops.get_range(handle, dev_id, - resource_subtype, + sub_types[i], &res->desc[i].start, &res->desc[i].num); if (ret) { dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n", - dev_id, resource_subtype); + dev_id, sub_types[i]); res->desc[i].start = 0; res->desc[i].num = 0; continue; } dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n", - dev_id, resource_subtype, res->desc[i].start, + dev_id, sub_types[i], res->desc[i].start, res->desc[i].num); valid_set = true; @@ -3280,6 +3269,62 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, return ERR_PTR(-EINVAL); } +/** + * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device + * @handle: TISCI handle + * @dev: Device pointer to which the resource is assigned + * @dev_id: TISCI device id to which the resource is assigned + * @of_prop: property name by which the resource are represented + * + * Return: Pointer to ti_sci_resource if all went well else appropriate + * error pointer. + */ +struct ti_sci_resource * +devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, + struct device *dev, u32 dev_id, char *of_prop) +{ + struct ti_sci_resource *res; + u32 *sub_types; + int sets; + + sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop, + sizeof(u32)); + if (sets < 0) { + dev_err(dev, "%s resource type ids not available\n", of_prop); + return ERR_PTR(sets); + } + + sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL); + if (!sub_types) + return ERR_PTR(-ENOMEM); + + of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets); + res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types, + sets); + + kfree(sub_types); + return res; +} +EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource); + +/** + * devm_ti_sci_get_resource() - Get a resource range assigned to the device + * @handle: TISCI handle + * @dev: Device pointer to which the resource is assigned + * @dev_id: TISCI device id to which the resource is assigned + * @suub_type: TISCI resource subytpe representing the resource. + * + * Return: Pointer to ti_sci_resource if all went well else appropriate + * error pointer. + */ +struct ti_sci_resource * +devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, + u32 dev_id, u32 sub_type) +{ + return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1); +} +EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); + static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, void *cmd) { diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 49c5d29cd33c..cf27b080e148 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -220,6 +220,9 @@ struct ti_sci_rm_core_ops { u16 *range_start, u16 *range_num); }; +#define TI_SCI_RESASG_SUBTYPE_IR_OUTPUT 0 +#define TI_SCI_RESASG_SUBTYPE_IA_VINT 0xa +#define TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT 0xd /** * struct ti_sci_rm_irq_ops: IRQ management operations * @set_irq: Set an IRQ route between the requested source @@ -556,6 +559,9 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res); struct ti_sci_resource * devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, struct device *dev, u32 dev_id, char *of_prop); +struct ti_sci_resource * +devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, + u32 dev_id, u32 sub_type); #else /* CONFIG_TI_SCI_PROTOCOL */ @@ -609,6 +615,13 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, { return ERR_PTR(-EINVAL); } + +static inline struct ti_sci_resource * +devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, + u32 dev_id, u32 sub_type); +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_TI_SCI_PROTOCOL */ #endif /* __TISCI_PROTOCOL_H */ -- cgit v1.2.3 From 29e44f4535faa71a70827af3639b5e6762d8f02a Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 17 Aug 2020 11:07:28 +0100 Subject: watch_queue: Limit the number of watches a user can hold Impose a limit on the number of watches that a user can hold so that they can't use this mechanism to fill up all the available memory. This is done by putting a counter in user_struct that's incremented when a watch is allocated and decreased when it is released. If the number exceeds the RLIMIT_NOFILE limit, the watch is rejected with EAGAIN. This can be tested by the following means: (1) Create a watch queue and attach it to fd 5 in the program given - in this case, bash: keyctl watch_session /tmp/nlog /tmp/gclog 5 bash (2) In the shell, set the maximum number of files to, say, 99: ulimit -n 99 (3) Add 200 keyrings: for ((i=0; i<200; i++)); do keyctl newring a$i @s || break; done (4) Try to watch all of the keyrings: for ((i=0; i<200; i++)); do echo $i; keyctl watch_add 5 %:a$i || break; done This should fail when the number of watches belonging to the user hits 99. (5) Remove all the keyrings and all of those watches should go away: for ((i=0; i<200; i++)); do keyctl unlink %:a$i; done (6) Kill off the watch queue by exiting the shell spawned by watch_session. Fixes: c73be61cede5 ("pipe: Add general notification queue support") Reported-by: Linus Torvalds Signed-off-by: David Howells Reviewed-by: Jarkko Sakkinen Signed-off-by: Linus Torvalds --- include/linux/sched/user.h | 3 +++ kernel/watch_queue.c | 8 ++++++++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 917d88edb7b9..a8ec3b6093fc 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -36,6 +36,9 @@ struct user_struct { defined(CONFIG_NET) || defined(CONFIG_IO_URING) atomic_long_t locked_vm; #endif +#ifdef CONFIG_WATCH_QUEUE + atomic_t nr_watches; /* The number of watches this user currently has */ +#endif /* Miscellaneous per-user rate limit */ struct ratelimit_state ratelimit; diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index f74020f6bd9d..0ef8f65bd2d7 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -393,6 +393,7 @@ static void free_watch(struct rcu_head *rcu) struct watch *watch = container_of(rcu, struct watch, rcu); put_watch_queue(rcu_access_pointer(watch->queue)); + atomic_dec(&watch->cred->user->nr_watches); put_cred(watch->cred); } @@ -452,6 +453,13 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist) watch->cred = get_current_cred(); rcu_assign_pointer(watch->watch_list, wlist); + if (atomic_inc_return(&watch->cred->user->nr_watches) > + task_rlimit(current, RLIMIT_NOFILE)) { + atomic_dec(&watch->cred->user->nr_watches); + put_cred(watch->cred); + return -EAGAIN; + } + spin_lock_bh(&wqueue->lock); kref_get(&wqueue->usage); kref_get(&watch->usage); -- cgit v1.2.3 From 77ef38574beb3e0b414db48e9c0f04633df68ba6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 14 Aug 2020 11:38:42 +0200 Subject: drm/modeset-lock: Take the modeset BKL for legacy drivers This fell off in the conversion in commit 9bcaa3fe58ab7559e71df798bcff6e0795158695 Author: Michal Orzel Date: Tue Apr 28 19:10:04 2020 +0200 drm: Replace drm_modeset_lock/unlock_all with DRM_MODESET_LOCK_ALL_* helpers but it's caught by the drm_warn_on_modeset_not_all_locked() that the legacy modeset code uses. Since this is the bkl and it's unclear what's all protected, play it safe and grab it again for legacy drivers. Unfortunately this means we need to sprinkle a few more #includes around. Also we need to add the drm_device as a parameter to the _END macro. Finally remove the mute_lock() from setcrtc, since that's now done by the macro. Cc: Alex Deucher References: https://gitlab.freedesktop.org/drm/amd/-/issues/1224 Fixes: 9bcaa3fe58ab ("drm: Replace drm_modeset_lock/unlock_all with DRM_MODESET_LOCK_ALL_* helpers") Cc: Michal Orzel Cc: Daniel Vetter Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Thomas Zimmermann Cc: David Airlie Cc: Daniel Vetter Cc: dri-devel@lists.freedesktop.org Cc: # v5.8+ Signed-off-by: Daniel Vetter Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20200814093842.3048472-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_atomic_helper.c | 7 ++++--- drivers/gpu/drm/drm_color_mgmt.c | 2 +- drivers/gpu/drm/drm_crtc.c | 4 +--- drivers/gpu/drm/drm_mode_object.c | 4 ++-- drivers/gpu/drm/drm_plane.c | 2 +- include/drm/drm_modeset_lock.h | 9 +++++++-- 6 files changed, 16 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 85d163f16801..b78e142a5620 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -3105,7 +3106,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) if (ret) DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); } EXPORT_SYMBOL(drm_atomic_helper_shutdown); @@ -3245,7 +3246,7 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) } unlock: - DRM_MODESET_LOCK_ALL_END(ctx, err); + DRM_MODESET_LOCK_ALL_END(dev, ctx, err); if (err) return ERR_PTR(err); @@ -3326,7 +3327,7 @@ int drm_atomic_helper_resume(struct drm_device *dev, err = drm_atomic_helper_commit_duplicated_state(state, &ctx); - DRM_MODESET_LOCK_ALL_END(ctx, err); + DRM_MODESET_LOCK_ALL_END(dev, ctx, err); drm_atomic_state_put(state); return err; diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index c93123ff7c21..138ff34b31db 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -294,7 +294,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, crtc->gamma_size, &ctx); out: - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 4936e1080e41..eb1c33e5d0f4 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -561,7 +561,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id)) return -EACCES; - mutex_lock(&crtc->dev->mode_config.mutex); DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); @@ -728,8 +727,7 @@ out: fb = NULL; mode = NULL; - DRM_MODESET_LOCK_ALL_END(ctx, ret); - mutex_unlock(&crtc->dev->mode_config.mutex); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index 901b078abf40..db05f386a709 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -428,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, out_unref: drm_mode_object_put(obj); out: - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } @@ -470,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj, break; } drm_property_change_valid_put(prop, ref); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 4af173ced327..fdbafc2b8199 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -791,7 +791,7 @@ static int setplane_internal(struct drm_plane *plane, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, &ctx); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(plane->dev, ctx, ret); return ret; } diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index 4fc9a43ac45a..aafd07388eb7 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -164,6 +164,8 @@ int drm_modeset_lock_all_ctx(struct drm_device *dev, * is 0, so no error checking is necessary */ #define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_lock(&dev->mode_config.mutex); \ drm_modeset_acquire_init(&ctx, flags); \ modeset_lock_retry: \ ret = drm_modeset_lock_all_ctx(dev, &ctx); \ @@ -172,6 +174,7 @@ modeset_lock_retry: \ /** * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks + * @dev: drm device * @ctx: local modeset acquire context, will be dereferenced * @ret: local ret/err/etc variable to track error status * @@ -188,7 +191,7 @@ modeset_lock_retry: \ * to that failure. In both of these cases the code between BEGIN/END will not * be run, so the failure will reflect the inability to grab the locks. */ -#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \ +#define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \ modeset_lock_fail: \ if (ret == -EDEADLK) { \ ret = drm_modeset_backoff(&ctx); \ @@ -196,6 +199,8 @@ modeset_lock_fail: \ goto modeset_lock_retry; \ } \ drm_modeset_drop_locks(&ctx); \ - drm_modeset_acquire_fini(&ctx); + drm_modeset_acquire_fini(&ctx); \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_unlock(&dev->mode_config.mutex); #endif /* DRM_MODESET_LOCK_H_ */ -- cgit v1.2.3 From 0b76e642f9ad2471e899e2dd71b9543b7e85e9f6 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 16 Aug 2020 15:25:49 -0700 Subject: phylink: : fix function prototype kernel-doc warning Fix a kernel-doc warning for the pcs_config() function prototype: ../include/linux/phylink.h:406: warning: Excess function parameter 'permit_pause_to_mac' description in 'pcs_config' Fixes: 7137e18f6f88 ("net: phylink: add struct phylink_pcs") Signed-off-by: Randy Dunlap Cc: Russell King Cc: David S. Miller Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- include/linux/phylink.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/phylink.h b/include/linux/phylink.h index a8e876317e25..c36fb41a7d90 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -402,7 +402,8 @@ void pcs_get_state(struct phylink_pcs *pcs, * For most 10GBASE-R, there is no advertisement. */ int pcs_config(struct phylink_pcs *pcs, unsigned int mode, - phy_interface_t interface, const unsigned long *advertising); + phy_interface_t interface, const unsigned long *advertising, + bool permit_pause_to_mac); /** * pcs_an_restart() - restart 802.3z BaseX autonegotiation -- cgit v1.2.3 From bd05220c7be3356046861c317d9c287ca50445ba Mon Sep 17 00:00:00 2001 From: Jessica Clarke Date: Tue, 11 Aug 2020 19:24:57 +0100 Subject: arch/ia64: Restore arch-specific pgd_offset_k implementation IA-64 is special and treats pgd_offset_k() differently to pgd_offset(), using different formulae to calculate the indices into the kernel and user PGDs. The index into the user PGDs takes into account the region number, but the index into the kernel (init_mm) PGD always assumes a predefined kernel region number. Commit 974b9b2c68f3 ("mm: consolidate pte_index() and pte_offset_*() definitions") made IA-64 use a generic pgd_offset_k() which incorrectly used pgd_index() for kernel page tables. As a result, the index into the kernel PGD was going out of bounds and the kernel hung during early boot. Allow overrides of pgd_offset_k() and override it on IA-64 with the old implementation that will correctly index the kernel PGD. Fixes: 974b9b2c68f3 ("mm: consolidate pte_index() and pte_offset_*() definitions") Reported-by: John Paul Adrian Glaubitz Signed-off-by: Jessica Clarke Tested-by: John Paul Adrian Glaubitz Acked-by: Tony Luck Signed-off-by: Mike Rapoport --- arch/ia64/include/asm/pgtable.h | 9 +++++++++ include/linux/pgtable.h | 2 ++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 10850897a91c..779b6972aa84 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -366,6 +366,15 @@ pgd_index (unsigned long address) } #define pgd_index pgd_index +/* + * In the kernel's mapped region we know everything is in region number 5, so + * as an optimisation its PGD already points to the area for that region. + * However, this also means that we cannot use pgd_index() and we must + * never add the region here. + */ +#define pgd_offset_k(addr) \ + (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) + /* Look up a pgd entry in the gate area. On IA-64, the gate-area resides in the kernel-mapped segment, hence we use pgd_offset_k() here. */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index a124c21e3204..e8cbc2e795d5 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -117,7 +117,9 @@ static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ +#ifndef pgd_offset_k #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) +#endif /* * In many cases it is known that a virtual address is mapped at PMD or PTE -- cgit v1.2.3 From a0308938ec81cd0dca9d75833ec0dd1b8708917e Mon Sep 17 00:00:00 2001 From: David Stevens Date: Tue, 18 Aug 2020 16:13:41 +0900 Subject: virtio: add dma-buf support for exported objects This change adds a new flavor of dma-bufs that can be used by virtio drivers to share exported objects. A virtio dma-buf can be queried by virtio drivers to obtain the UUID which identifies the underlying exported object. Signed-off-by: David Stevens Acked-by: Michael S. Tsirkin Link: http://patchwork.freedesktop.org/patch/msgid/20200818071343.3461203-2-stevensd@chromium.org Signed-off-by: Gerd Hoffmann --- drivers/virtio/Makefile | 2 +- drivers/virtio/virtio.c | 6 +++ drivers/virtio/virtio_dma_buf.c | 85 +++++++++++++++++++++++++++++++++++++++++ include/linux/virtio.h | 1 + include/linux/virtio_dma_buf.h | 37 ++++++++++++++++++ 5 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 drivers/virtio/virtio_dma_buf.c create mode 100644 include/linux/virtio_dma_buf.h (limited to 'include') diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 4d993791f2d7..49da768ee7fd 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o +obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o virtio_dma_buf.o obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index a977e32a88f2..5d46f0ded92d 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -357,6 +357,12 @@ out: } EXPORT_SYMBOL_GPL(register_virtio_device); +bool is_virtio_device(struct device *dev) +{ + return dev->bus == &virtio_bus; +} +EXPORT_SYMBOL_GPL(is_virtio_device); + void unregister_virtio_device(struct virtio_device *dev) { int index = dev->index; /* save for after device release */ diff --git a/drivers/virtio/virtio_dma_buf.c b/drivers/virtio/virtio_dma_buf.c new file mode 100644 index 000000000000..45d6e8647dcf --- /dev/null +++ b/drivers/virtio/virtio_dma_buf.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * dma-bufs for virtio exported objects + * + * Copyright (C) 2020 Google, Inc. + */ + +#include + +/** + * virtio_dma_buf_export - Creates a new dma-buf for a virtio exported object + * @exp_info: [in] see dma_buf_export(). ops MUST refer to a dma_buf_ops + * struct embedded in a virtio_dma_buf_ops. + * + * This wraps dma_buf_export() to allow virtio drivers to create a dma-buf + * for an virtio exported object that can be queried by other virtio drivers + * for the object's UUID. + */ +struct dma_buf *virtio_dma_buf_export + (const struct dma_buf_export_info *exp_info) +{ + const struct virtio_dma_buf_ops *virtio_ops = + container_of(exp_info->ops, + const struct virtio_dma_buf_ops, ops); + + if (!exp_info->ops || + exp_info->ops->attach != &virtio_dma_buf_attach || + !virtio_ops->get_uuid) { + return ERR_PTR(-EINVAL); + } + + return dma_buf_export(exp_info); +} +EXPORT_SYMBOL(virtio_dma_buf_export); + +/** + * virtio_dma_buf_attach - mandatory attach callback for virtio dma-bufs + */ +int virtio_dma_buf_attach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ + int ret; + const struct virtio_dma_buf_ops *ops = + container_of(dma_buf->ops, + const struct virtio_dma_buf_ops, ops); + + if (ops->device_attach) { + ret = ops->device_attach(dma_buf, attach); + if (ret) + return ret; + } + return 0; +} +EXPORT_SYMBOL(virtio_dma_buf_attach); + +/** + * is_virtio_dma_buf - returns true if the given dma-buf is a virtio dma-buf + * @dma_buf: buffer to query + */ +bool is_virtio_dma_buf(struct dma_buf *dma_buf) +{ + return dma_buf->ops->attach == &virtio_dma_buf_attach; +} +EXPORT_SYMBOL(is_virtio_dma_buf); + +/** + * virtio_dma_buf_get_uuid - gets a virtio dma-buf's exported object's uuid + * @dma_buf: [in] buffer to query + * @uuid: [out] the uuid + * + * Returns: 0 on success, negative on failure. + */ +int virtio_dma_buf_get_uuid(struct dma_buf *dma_buf, + uuid_t *uuid) +{ + const struct virtio_dma_buf_ops *ops = + container_of(dma_buf->ops, + const struct virtio_dma_buf_ops, ops); + + if (!is_virtio_dma_buf(dma_buf)) + return -EINVAL; + + return ops->get_uuid(dma_buf, uuid); +} +EXPORT_SYMBOL(virtio_dma_buf_get_uuid); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index a493eac08393..55ea329fe72a 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -127,6 +127,7 @@ static inline struct virtio_device *dev_to_virtio(struct device *_dev) void virtio_add_status(struct virtio_device *dev, unsigned int status); int register_virtio_device(struct virtio_device *dev); void unregister_virtio_device(struct virtio_device *dev); +bool is_virtio_device(struct device *dev); void virtio_break_device(struct virtio_device *dev); diff --git a/include/linux/virtio_dma_buf.h b/include/linux/virtio_dma_buf.h new file mode 100644 index 000000000000..a2fdf217ac62 --- /dev/null +++ b/include/linux/virtio_dma_buf.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * dma-bufs for virtio exported objects + * + * Copyright (C) 2020 Google, Inc. + */ + +#ifndef _LINUX_VIRTIO_DMA_BUF_H +#define _LINUX_VIRTIO_DMA_BUF_H + +#include +#include +#include + +/** + * struct virtio_dma_buf_ops - operations possible on exported object dma-buf + * @ops: the base dma_buf_ops. ops.attach MUST be virtio_dma_buf_attach. + * @device_attach: [optional] callback invoked by virtio_dma_buf_attach during + * all attach operations. + * @get_uid: [required] callback to get the uuid of the exported object. + */ +struct virtio_dma_buf_ops { + struct dma_buf_ops ops; + int (*device_attach)(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach); + int (*get_uuid)(struct dma_buf *dma_buf, uuid_t *uuid); +}; + +int virtio_dma_buf_attach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach); + +struct dma_buf *virtio_dma_buf_export + (const struct dma_buf_export_info *exp_info); +bool is_virtio_dma_buf(struct dma_buf *dma_buf); +int virtio_dma_buf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid); + +#endif /* _LINUX_VIRTIO_DMA_BUF_H */ -- cgit v1.2.3 From 592d9fba33c275b72cb4dae99c187444daafcd33 Mon Sep 17 00:00:00 2001 From: David Stevens Date: Tue, 18 Aug 2020 16:13:42 +0900 Subject: virtio-gpu: add VIRTIO_GPU_F_RESOURCE_UUID feature This feature allows the guest to request a UUID from the host for a particular virtio_gpu resource. The UUID can then be shared with other virtio devices, to allow the other host devices to access the virtio_gpu's corresponding host resource. Signed-off-by: David Stevens Acked-by: Michael S. Tsirkin Link: http://patchwork.freedesktop.org/patch/msgid/20200818071343.3461203-3-stevensd@chromium.org Signed-off-by: Gerd Hoffmann --- include/uapi/linux/virtio_gpu.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include') diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h index 0c85914d9369..9721d58b4d58 100644 --- a/include/uapi/linux/virtio_gpu.h +++ b/include/uapi/linux/virtio_gpu.h @@ -50,6 +50,10 @@ * VIRTIO_GPU_CMD_GET_EDID */ #define VIRTIO_GPU_F_EDID 1 +/* + * VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID + */ +#define VIRTIO_GPU_F_RESOURCE_UUID 2 enum virtio_gpu_ctrl_type { VIRTIO_GPU_UNDEFINED = 0, @@ -66,6 +70,7 @@ enum virtio_gpu_ctrl_type { VIRTIO_GPU_CMD_GET_CAPSET_INFO, VIRTIO_GPU_CMD_GET_CAPSET, VIRTIO_GPU_CMD_GET_EDID, + VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID, /* 3d commands */ VIRTIO_GPU_CMD_CTX_CREATE = 0x0200, @@ -87,6 +92,7 @@ enum virtio_gpu_ctrl_type { VIRTIO_GPU_RESP_OK_CAPSET_INFO, VIRTIO_GPU_RESP_OK_CAPSET, VIRTIO_GPU_RESP_OK_EDID, + VIRTIO_GPU_RESP_OK_RESOURCE_UUID, /* error responses */ VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, @@ -340,4 +346,17 @@ enum virtio_gpu_formats { VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134, }; +/* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */ +struct virtio_gpu_resource_assign_uuid { + struct virtio_gpu_ctrl_hdr hdr; + __le32 resource_id; + __le32 padding; +}; + +/* VIRTIO_GPU_RESP_OK_RESOURCE_UUID */ +struct virtio_gpu_resp_resource_uuid { + struct virtio_gpu_ctrl_hdr hdr; + __u8 uuid[16]; +}; + #endif -- cgit v1.2.3 From e2d732fdb7a9e421720a644580cd6a9400f97f60 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Tue, 11 Aug 2020 19:59:58 -0400 Subject: drm/scheduler: Scheduler priority fixes (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove DRM_SCHED_PRIORITY_LOW, as it was used in only one place. Rename and separate by a line DRM_SCHED_PRIORITY_MAX to DRM_SCHED_PRIORITY_COUNT as it represents a (total) count of said priorities and it is used as such in loops throughout the code. (0-based indexing is the the count number.) Remove redundant word HIGH in priority names, and rename *KERNEL* to *HIGH*, as it really means that, high. v2: Add back KERNEL and remove SW and HW, in lieu of a single HIGH between NORMAL and KERNEL. Signed-off-by: Luben Tuikov Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/scheduler/sched_main.c | 4 ++-- include/drm/gpu_scheduler.h | 12 +++++++----- 8 files changed, 18 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 8842c55d4490..fc695126b6e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { static int amdgpu_ctx_priority_permit(struct drm_file *filp, enum drm_sched_priority priority) { - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) + if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT) return -EINVAL; /* NORMAL and below are accessible by everyone */ @@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) { switch (prio) { - case DRM_SCHED_PRIORITY_HIGH_HW: + case DRM_SCHED_PRIORITY_HIGH: case DRM_SCHED_PRIORITY_KERNEL: return AMDGPU_GFX_PIPE_PRIO_HIGH; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 937029ad5271..dcfe8a3b03ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { struct drm_sched_rq *rq = &sched->sched_rq[i]; if (!rq) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 13ea8ebc421c..6d4fc79bf84a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, &ring->sched; } - for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i) atomic_set(&ring->num_jobs[i], 0); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index da871d84b742..7112137689db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -243,7 +243,7 @@ struct amdgpu_ring { bool has_compute_vm_bug; bool no_scheduler; - atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; + atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT]; struct mutex priority_mutex; /* protected by priority_mutex */ int priority; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index c799691dfa84..17661ede9488 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -36,14 +36,14 @@ enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) { switch (amdgpu_priority) { case AMDGPU_CTX_PRIORITY_VERY_HIGH: - return DRM_SCHED_PRIORITY_HIGH_HW; + return DRM_SCHED_PRIORITY_HIGH; case AMDGPU_CTX_PRIORITY_HIGH: - return DRM_SCHED_PRIORITY_HIGH_SW; + return DRM_SCHED_PRIORITY_HIGH; case AMDGPU_CTX_PRIORITY_NORMAL: return DRM_SCHED_PRIORITY_NORMAL; case AMDGPU_CTX_PRIORITY_LOW: case AMDGPU_CTX_PRIORITY_VERY_LOW: - return DRM_SCHED_PRIORITY_LOW; + return DRM_SCHED_PRIORITY_MIN; case AMDGPU_CTX_PRIORITY_UNSET: return DRM_SCHED_PRIORITY_UNSET; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2d502e98fad0..3d2712a4cf4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2109,7 +2109,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) ring = adev->mman.buffer_funcs_ring; sched = &ring->sched; r = drm_sched_entity_init(&adev->mman.entity, - DRM_SCHED_PRIORITY_KERNEL, &sched, + DRM_SCHED_PRIORITY_KERNEL, &sched, 1, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 2f319102ae9f..19f381e5e661 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -623,7 +623,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); if (entity) break; @@ -851,7 +851,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->name = name; sched->timeout = timeout; sched->hang_limit = hang_limit; - for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) drm_sched_rq_init(sched, &sched->sched_rq[i]); init_waitqueue_head(&sched->wake_up_worker); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index a21b3b92135a..b30026ccd564 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -33,14 +33,16 @@ struct drm_gpu_scheduler; struct drm_sched_rq; +/* These are often used as an (initial) index + * to an array, and as such should start at 0. + */ enum drm_sched_priority { DRM_SCHED_PRIORITY_MIN, - DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, DRM_SCHED_PRIORITY_NORMAL, - DRM_SCHED_PRIORITY_HIGH_SW, - DRM_SCHED_PRIORITY_HIGH_HW, + DRM_SCHED_PRIORITY_HIGH, DRM_SCHED_PRIORITY_KERNEL, - DRM_SCHED_PRIORITY_MAX, + + DRM_SCHED_PRIORITY_COUNT, DRM_SCHED_PRIORITY_INVALID = -1, DRM_SCHED_PRIORITY_UNSET = -2 }; @@ -274,7 +276,7 @@ struct drm_gpu_scheduler { uint32_t hw_submission_limit; long timeout; const char *name; - struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; atomic_t hw_rq_count; -- cgit v1.2.3 From 9af5e21dace795891544042abda877ada39abacc Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Tue, 11 Aug 2020 20:56:58 -0400 Subject: drm/scheduler: Remove priority macro INVALID (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove DRM_SCHED_PRIORITY_INVALID. We no longer carry around an invalid priority and cut it off at the source. Backwards compatibility behaviour of AMDGPU CTX IOCTL passing in garbage for context priority from user space and then mapping that to DRM_SCHED_PRIORITY_NORMAL is preserved. v2: Revert "res" --> "r" and "prio" --> "priority". Signed-off-by: Luben Tuikov Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 5 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 40 +++++++++++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h | 3 ++- include/drm/gpu_scheduler.h | 1 - 4 files changed, 34 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index fc695126b6e7..ba243cc8f585 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -388,13 +388,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = filp->driver_priv; - r = 0; id = args->in.ctx_id; - priority = amdgpu_to_sched_priority(args->in.priority); + r = amdgpu_to_sched_priority(args->in.priority, &priority); /* For backwards compatibility reasons, we need to accept * ioctls with garbage in the priority field */ - if (priority == DRM_SCHED_PRIORITY_INVALID) + if (r == -EINVAL) priority = DRM_SCHED_PRIORITY_NORMAL; switch (args->in.op) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 17661ede9488..9581283a4c78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -32,24 +32,32 @@ #include "amdgpu_vm.h" -enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) +int amdgpu_to_sched_priority(int amdgpu_priority, + enum drm_sched_priority *prio) { switch (amdgpu_priority) { case AMDGPU_CTX_PRIORITY_VERY_HIGH: - return DRM_SCHED_PRIORITY_HIGH; + *prio = DRM_SCHED_PRIORITY_HIGH; + break; case AMDGPU_CTX_PRIORITY_HIGH: - return DRM_SCHED_PRIORITY_HIGH; + *prio = DRM_SCHED_PRIORITY_HIGH; + break; case AMDGPU_CTX_PRIORITY_NORMAL: - return DRM_SCHED_PRIORITY_NORMAL; + *prio = DRM_SCHED_PRIORITY_NORMAL; + break; case AMDGPU_CTX_PRIORITY_LOW: case AMDGPU_CTX_PRIORITY_VERY_LOW: - return DRM_SCHED_PRIORITY_MIN; + *prio = DRM_SCHED_PRIORITY_MIN; + break; case AMDGPU_CTX_PRIORITY_UNSET: - return DRM_SCHED_PRIORITY_UNSET; + *prio = DRM_SCHED_PRIORITY_UNSET; + break; default: WARN(1, "Invalid context priority %d\n", amdgpu_priority); - return DRM_SCHED_PRIORITY_INVALID; + return -EINVAL; } + + return 0; } static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, @@ -119,9 +127,20 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, enum drm_sched_priority priority; int r; - priority = amdgpu_to_sched_priority(args->in.priority); - if (priority == DRM_SCHED_PRIORITY_INVALID) + /* First check the op, then the op's argument. + */ + switch (args->in.op) { + case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: + case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE: + break; + default: + DRM_ERROR("Invalid sched op specified: %d\n", args->in.op); return -EINVAL; + } + + r = amdgpu_to_sched_priority(args->in.priority, &priority); + if (r) + return r; switch (args->in.op) { case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: @@ -136,7 +155,8 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, priority); break; default: - DRM_ERROR("Invalid sched op specified: %d\n", args->in.op); + /* Impossible. + */ r = -EINVAL; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h index 12299fd95691..67e5b2472f6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h @@ -30,7 +30,8 @@ enum drm_sched_priority; struct drm_device; struct drm_file; -enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority); +int amdgpu_to_sched_priority(int amdgpu_priority, + enum drm_sched_priority *prio); int amdgpu_sched_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index b30026ccd564..a33590e62108 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -43,7 +43,6 @@ enum drm_sched_priority { DRM_SCHED_PRIORITY_KERNEL, DRM_SCHED_PRIORITY_COUNT, - DRM_SCHED_PRIORITY_INVALID = -1, DRM_SCHED_PRIORITY_UNSET = -2 }; -- cgit v1.2.3 From 4ef1a7cb08e94da1f2f2a34ee6cefe7ae142dc98 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Aug 2020 14:30:49 +0800 Subject: ipv6: some fixes for ipv6_dev_find() This patch is to do 3 things for ipv6_dev_find(): As David A. noticed, - rt6_lookup() is not really needed. Different from __ip_dev_find(), ipv6_dev_find() doesn't have a compatibility problem, so remove it. As Hideaki suggested, - "valid" (non-tentative) check for the address is also needed. ipv6_chk_addr() calls ipv6_chk_addr_and_flags(), which will traverse the address hash list, but it's heavy to be called inside ipv6_dev_find(). This patch is to reuse the code of ipv6_chk_addr_and_flags() for ipv6_dev_find(). - dev parameter is passed into ipv6_dev_find(), as link-local addresses from user space has sin6_scope_id set and the dev lookup needs it. Fixes: 81f6cb31222d ("ipv6: add ipv6_dev_find()") Suggested-by: YOSHIFUJI Hideaki Reported-by: David Ahern Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/addrconf.h | 3 ++- net/ipv6/addrconf.c | 60 +++++++++++++++++++------------------------------- net/tipc/udp_media.c | 8 +++---- 3 files changed, 28 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/include/net/addrconf.h b/include/net/addrconf.h index ba3f6c15ad2b..18f783dcd55f 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -97,7 +97,8 @@ bool ipv6_chk_custom_prefix(const struct in6_addr *addr, int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); -struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr); +struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr, + struct net_device *dev); struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8e761b8c47c6..01146b66d666 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1893,12 +1893,13 @@ EXPORT_SYMBOL(ipv6_chk_addr); * 2. does the address exist on the specific device * (skip_dev_check = false) */ -int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, - const struct net_device *dev, bool skip_dev_check, - int strict, u32 banned_flags) +static struct net_device * +__ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, bool skip_dev_check, + int strict, u32 banned_flags) { unsigned int hash = inet6_addr_hash(net, addr); - const struct net_device *l3mdev; + struct net_device *l3mdev, *ndev; struct inet6_ifaddr *ifp; u32 ifp_flags; @@ -1909,10 +1910,11 @@ int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, dev = NULL; hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) { - if (!net_eq(dev_net(ifp->idev->dev), net)) + ndev = ifp->idev->dev; + if (!net_eq(dev_net(ndev), net)) continue; - if (l3mdev_master_dev_rcu(ifp->idev->dev) != l3mdev) + if (l3mdev_master_dev_rcu(ndev) != l3mdev) continue; /* Decouple optimistic from tentative for evaluation here. @@ -1923,15 +1925,23 @@ int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, : ifp->flags; if (ipv6_addr_equal(&ifp->addr, addr) && !(ifp_flags&banned_flags) && - (!dev || ifp->idev->dev == dev || + (!dev || ndev == dev || !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) { rcu_read_unlock(); - return 1; + return ndev; } } rcu_read_unlock(); - return 0; + return NULL; +} + +int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, bool skip_dev_check, + int strict, u32 banned_flags) +{ + return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check, + strict, banned_flags) ? 1 : 0; } EXPORT_SYMBOL(ipv6_chk_addr_and_flags); @@ -1990,35 +2000,11 @@ EXPORT_SYMBOL(ipv6_chk_prefix); * * The caller should be protected by RCU, or RTNL. */ -struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr) +struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr, + struct net_device *dev) { - unsigned int hash = inet6_addr_hash(net, addr); - struct inet6_ifaddr *ifp, *result = NULL; - struct net_device *dev = NULL; - - rcu_read_lock(); - hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) { - if (net_eq(dev_net(ifp->idev->dev), net) && - ipv6_addr_equal(&ifp->addr, addr)) { - result = ifp; - break; - } - } - - if (!result) { - struct rt6_info *rt; - - rt = rt6_lookup(net, addr, NULL, 0, NULL, 0); - if (rt) { - dev = rt->dst.dev; - ip6_rt_put(rt); - } - } else { - dev = result->idev->dev; - } - rcu_read_unlock(); - - return dev; + return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1, + IFA_F_TENTATIVE); } EXPORT_SYMBOL(ipv6_dev_find); diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 53f0de0676b7..911d13cd2e67 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -660,6 +660,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; u8 node_id[NODE_ID_LEN] = {0,}; + struct net_device *dev; int rmcast = 0; ub = kzalloc(sizeof(*ub), GFP_ATOMIC); @@ -714,8 +715,6 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, rcu_assign_pointer(ub->bearer, b); tipc_udp_media_addr_set(&b->addr, &local); if (local.proto == htons(ETH_P_IP)) { - struct net_device *dev; - dev = __ip_dev_find(net, local.ipv4.s_addr, false); if (!dev) { err = -ENODEV; @@ -738,9 +737,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, b->mtu = b->media->mtu; #if IS_ENABLED(CONFIG_IPV6) } else if (local.proto == htons(ETH_P_IPV6)) { - struct net_device *dev; - - dev = ipv6_dev_find(net, &local.ipv6); + dev = ub->ifindex ? __dev_get_by_index(net, ub->ifindex) : NULL; + dev = ipv6_dev_find(net, &local.ipv6, dev); if (!dev) { err = -ENODEV; goto err; -- cgit v1.2.3 From 27bc446e2def38db3244a6eb4bb1d6312936610a Mon Sep 17 00:00:00 2001 From: brookxu Date: Mon, 17 Aug 2020 15:36:15 +0800 Subject: ext4: limit the length of per-inode prealloc list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the scenario of writing sparse files, the per-inode prealloc list may be very long, resulting in high overhead for ext4_mb_use_preallocated(). To circumvent this problem, we limit the maximum length of per-inode prealloc list to 512 and allow users to modify it. After patching, we observed that the sys ratio of cpu has dropped, and the system throughput has increased significantly. We created a process to write the sparse file, and the running time of the process on the fixed kernel was significantly reduced, as follows: Running time on unfixed kernel: [root@TENCENT64 ~]# time taskset 0x01 ./sparse /data1/sparce.dat real 0m2.051s user 0m0.008s sys 0m2.026s Running time on fixed kernel: [root@TENCENT64 ~]# time taskset 0x01 ./sparse /data1/sparce.dat real 0m0.471s user 0m0.004s sys 0m0.395s Signed-off-by: Chunguang Xu Link: https://lore.kernel.org/r/d7a98178-056b-6db5-6bce-4ead23f4a257@gmail.com Signed-off-by: Theodore Ts'o --- Documentation/admin-guide/ext4.rst | 3 ++ fs/ext4/ext4.h | 4 ++- fs/ext4/extents.c | 10 +++--- fs/ext4/file.c | 2 +- fs/ext4/indirect.c | 2 +- fs/ext4/inode.c | 6 ++-- fs/ext4/ioctl.c | 2 +- fs/ext4/mballoc.c | 74 +++++++++++++++++++++++++++++++++----- fs/ext4/mballoc.h | 4 +++ fs/ext4/move_extent.c | 4 +-- fs/ext4/super.c | 3 +- fs/ext4/sysfs.c | 2 ++ include/trace/events/ext4.h | 17 +++++---- 13 files changed, 104 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst index 7fc6a72920c9..59bcc4a92602 100644 --- a/Documentation/admin-guide/ext4.rst +++ b/Documentation/admin-guide/ext4.rst @@ -482,6 +482,9 @@ Files in /sys/fs/ext4/: multiple of this tuning parameter if the stripe size is not set in the ext4 superblock + mb_max_inode_prealloc + The maximum length of per-inode ext4_prealloc_space list. + mb_max_to_scan The maximum number of extents the multiblock allocator will search to find the best extent. diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 71b4370a3f91..523e00d7b392 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1070,6 +1070,7 @@ struct ext4_inode_info { struct timespec64 i_crtime; /* mballoc */ + atomic_t i_prealloc_active; struct list_head i_prealloc_list; spinlock_t i_prealloc_lock; @@ -1518,6 +1519,7 @@ struct ext4_sb_info { unsigned int s_mb_stats; unsigned int s_mb_order2_reqs; unsigned int s_mb_group_prealloc; + unsigned int s_mb_max_inode_prealloc; unsigned int s_max_dir_size_kb; /* where last allocation was done - for stream allocation */ unsigned long s_mb_last_group; @@ -2682,7 +2684,7 @@ extern int ext4_mb_release(struct super_block *); extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *, struct ext4_allocation_request *, int *); extern int ext4_mb_reserve_blocks(struct super_block *, int); -extern void ext4_discard_preallocations(struct inode *); +extern void ext4_discard_preallocations(struct inode *, unsigned int); extern int __init ext4_init_mballoc(void); extern void ext4_exit_mballoc(void); extern ext4_group_t ext4_mb_prefetch(struct super_block *sb, diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0eea09aa0f26..a0481582187a 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -100,7 +100,7 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) * i_mutex. So we can safely drop the i_data_sem here. */ BUG_ON(EXT4_JOURNAL(inode) == NULL); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); *dropped = 1; return 0; @@ -4266,7 +4266,7 @@ got_allocated_blocks: * not a good idea to call discard here directly, * but otherwise we'd need to call it every free(). */ - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; ext4_free_blocks(handle, inode, NULL, newblock, @@ -5293,7 +5293,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) } down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start); @@ -5307,7 +5307,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_ext_shift_extents(inode, handle, punch_stop, punch_stop - punch_start, SHIFT_LEFT); @@ -5439,7 +5439,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); path = ext4_find_extent(inode, offset_lblk, NULL, 0); if (IS_ERR(path)) { diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 7a2720517bbb..e608ce3fb535 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -147,7 +147,7 @@ static int ext4_release_file(struct inode *inode, struct file *filp) (atomic_read(&inode->i_writecount) == 1) && !EXT4_I(inode)->i_reserved_data_blocks) { down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); } if (is_dx(inode) && filp->private_data) diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 433ca8415c5a..80c9f33800be 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -696,7 +696,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode, * i_mutex. So we can safely drop the i_data_sem here. */ BUG_ON(EXT4_JOURNAL(inode) == NULL); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); *dropped = 1; return 0; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 0b07576af3bf..77543f988258 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -383,7 +383,7 @@ void ext4_da_update_reserve_space(struct inode *inode, */ if ((ei->i_reserved_data_blocks == 0) && !inode_is_open_for_write(inode)) - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); } static int __check_block_validity(struct inode *inode, const char *func, @@ -4055,7 +4055,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) if (stop_block > first_block) { down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_es_remove_extent(inode, first_block, stop_block - first_block); @@ -4210,7 +4210,7 @@ int ext4_truncate(struct inode *inode) down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) err = ext4_ext_truncate(handle, inode); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 6e70a63dcca7..36eca3bc036a 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -202,7 +202,7 @@ static long swap_inode_boot_loader(struct super_block *sb, reset_inode_seed(inode); reset_inode_seed(inode_bl); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); err = ext4_mark_inode_dirty(handle, inode); if (err < 0) { diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 45ac6088b4ac..132c118d12e1 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2878,6 +2878,7 @@ int ext4_mb_init(struct super_block *sb) sbi->s_mb_stats = MB_DEFAULT_STATS; sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; + sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC; /* * The default group preallocation is 512, which for 4k block * sizes translates to 2 megabytes. However for bigalloc file @@ -3816,6 +3817,26 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); } +static void ext4_mb_mark_pa_deleted(struct super_block *sb, + struct ext4_prealloc_space *pa) +{ + struct ext4_inode_info *ei; + + if (pa->pa_deleted) { + ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", + pa->pa_type, pa->pa_pstart, pa->pa_lstart, + pa->pa_len); + return; + } + + pa->pa_deleted = 1; + + if (pa->pa_type == MB_INODE_PA) { + ei = EXT4_I(pa->pa_inode); + atomic_dec(&ei->i_prealloc_active); + } +} + static void ext4_mb_pa_callback(struct rcu_head *head) { struct ext4_prealloc_space *pa; @@ -3848,7 +3869,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, return; } - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); grp_blk = pa->pa_pstart; @@ -3972,6 +3993,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) spin_lock(pa->pa_obj_lock); list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); + atomic_inc(&ei->i_prealloc_active); } /* @@ -4182,7 +4204,7 @@ repeat: } /* seems this one can be freed ... */ - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); /* we can trust pa_free ... */ free += pa->pa_free; @@ -4245,7 +4267,7 @@ out_dbg: * * FIXME!! Make sure it is valid at all the call sites */ -void ext4_discard_preallocations(struct inode *inode) +void ext4_discard_preallocations(struct inode *inode, unsigned int needed) { struct ext4_inode_info *ei = EXT4_I(inode); struct super_block *sb = inode->i_sb; @@ -4263,15 +4285,19 @@ void ext4_discard_preallocations(struct inode *inode) mb_debug(sb, "discard preallocation for inode %lu\n", inode->i_ino); - trace_ext4_discard_preallocations(inode); + trace_ext4_discard_preallocations(inode, + atomic_read(&ei->i_prealloc_active), needed); INIT_LIST_HEAD(&list); + if (needed == 0) + needed = UINT_MAX; + repeat: /* first, collect all pa's in the inode */ spin_lock(&ei->i_prealloc_lock); - while (!list_empty(&ei->i_prealloc_list)) { - pa = list_entry(ei->i_prealloc_list.next, + while (!list_empty(&ei->i_prealloc_list) && needed) { + pa = list_entry(ei->i_prealloc_list.prev, struct ext4_prealloc_space, pa_inode_list); BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); spin_lock(&pa->pa_lock); @@ -4288,10 +4314,11 @@ repeat: } if (pa->pa_deleted == 0) { - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &list); + needed--; continue; } @@ -4592,7 +4619,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, BUG_ON(pa->pa_type != MB_GROUP_PA); /* seems this one can be freed ... */ - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); @@ -4690,11 +4717,30 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) return ; } +/* + * if per-inode prealloc list is too long, trim some PA + */ +static void ext4_mb_trim_inode_pa(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + int count, delta; + + count = atomic_read(&ei->i_prealloc_active); + delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1; + if (count > sbi->s_mb_max_inode_prealloc + delta) { + count -= sbi->s_mb_max_inode_prealloc; + ext4_discard_preallocations(inode, count); + } +} + /* * release all resource we used in allocation */ static int ext4_mb_release_context(struct ext4_allocation_context *ac) { + struct inode *inode = ac->ac_inode; + struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_prealloc_space *pa = ac->ac_pa; if (pa) { @@ -4720,6 +4766,17 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) ext4_mb_add_n_trim(ac); } } + + if (pa->pa_type == MB_INODE_PA) { + /* + * treat per-inode prealloc list as a lru list, then try + * to trim the least recently used PA. + */ + spin_lock(pa->pa_obj_lock); + list_move(&pa->pa_inode_list, &ei->i_prealloc_list); + spin_unlock(pa->pa_obj_lock); + } + ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_page) @@ -4729,6 +4786,7 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); + ext4_mb_trim_inode_pa(inode); return 0; } diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index 6b4d17c2935d..e75b4749aa1c 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h @@ -73,6 +73,10 @@ */ #define MB_DEFAULT_GROUP_PREALLOC 512 +/* + * maximum length of inode prealloc list + */ +#define MB_DEFAULT_MAX_INODE_PREALLOC 512 struct ext4_free_data { /* this links the free block information from sb_info */ diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 1ed86fb6c302..0d601b822875 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -686,8 +686,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, out: if (*moved_len) { - ext4_discard_preallocations(orig_inode); - ext4_discard_preallocations(donor_inode); + ext4_discard_preallocations(orig_inode, 0); + ext4_discard_preallocations(donor_inode, 0); } ext4_ext_drop_refs(path); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index daa94c7f7271..13bdddc081e0 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1127,6 +1127,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) inode_set_iversion(&ei->vfs_inode, 1); spin_lock_init(&ei->i_raw_lock); INIT_LIST_HEAD(&ei->i_prealloc_list); + atomic_set(&ei->i_prealloc_active, 0); spin_lock_init(&ei->i_prealloc_lock); ext4_es_init_tree(&ei->i_es_tree); rwlock_init(&ei->i_es_lock); @@ -1220,7 +1221,7 @@ void ext4_clear_inode(struct inode *inode) { invalidate_inode_buffers(inode); clear_inode(inode); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); dquot_drop(inode); if (EXT4_I(inode)->jinode) { diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index 7fee11cc30e7..bfabb799fa45 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -218,6 +218,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); +EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc); EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error); EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval); @@ -264,6 +265,7 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(mb_order2_req), ATTR_LIST(mb_stream_req), ATTR_LIST(mb_group_prealloc), + ATTR_LIST(mb_max_inode_prealloc), ATTR_LIST(max_writeback_mb_bump), ATTR_LIST(extent_max_zeroout_kb), ATTR_LIST(trigger_fs_error), diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 8008d2e116b9..4c8b99ec8606 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -746,24 +746,29 @@ TRACE_EVENT(ext4_mb_release_group_pa, ); TRACE_EVENT(ext4_discard_preallocations, - TP_PROTO(struct inode *inode), + TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed), - TP_ARGS(inode), + TP_ARGS(inode, len, needed), TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned int, len ) + __field( unsigned int, needed ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; + __entry->len = len; + __entry->needed = needed; ), - TP_printk("dev %d,%d ino %lu", + TP_printk("dev %d,%d ino %lu len: %u needed %u", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino) + (unsigned long) __entry->ino, __entry->len, + __entry->needed) ); TRACE_EVENT(ext4_mb_discard_preallocations, -- cgit v1.2.3 From cc5453a5b7e90c39f713091a7ebc53c1f87d1700 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 18 Aug 2020 16:15:58 +0200 Subject: netfilter: conntrack: allow sctp hearbeat after connection re-use If an sctp connection gets re-used, heartbeats are flagged as invalid because their vtag doesn't match. Handle this in a similar way as TCP conntrack when it suspects that the endpoints and conntrack are out-of-sync. When a HEARTBEAT request fails its vtag validation, flag this in the conntrack state and accept the packet. When a HEARTBEAT_ACK is received with an invalid vtag in the reverse direction after we allowed such a HEARTBEAT through, assume we are out-of-sync and re-set the vtag info. v2: remove left-over snippet from an older incarnation that moved new_state/old_state assignments, thats not needed so keep that as-is. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nf_conntrack_sctp.h | 2 ++ net/netfilter/nf_conntrack_proto_sctp.c | 39 ++++++++++++++++++++++++++--- 2 files changed, 37 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 9a33f171aa82..625f491b95de 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -9,6 +9,8 @@ struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; + u8 last_dir; + u8 flags; }; #endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 4f897b14b606..810cca24b399 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, }; +#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 + #define sNO SCTP_CONNTRACK_NONE #define sCL SCTP_CONNTRACK_CLOSED #define sCW SCTP_CONNTRACK_COOKIE_WAIT @@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, u_int32_t offset, count; unsigned int *timeouts; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; + bool ignore = false; if (sctp_error(skb, dataoff, state)) return -NF_ACCEPT; @@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, /* Sec 8.5.1 (D) */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; - } else if (sch->type == SCTP_CID_HEARTBEAT || - sch->type == SCTP_CID_HEARTBEAT_ACK) { + } else if (sch->type == SCTP_CID_HEARTBEAT) { + if (ct->proto.sctp.vtag[dir] == 0) { + pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir); + ct->proto.sctp.vtag[dir] = sh->vtag; + } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.last_dir = dir; + ignore = true; + continue; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + } + } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) { if (ct->proto.sctp.vtag[dir] == 0) { pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir); ct->proto.sctp.vtag[dir] = sh->vtag; } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { - pr_debug("Verification tag check failed\n"); - goto out_unlock; + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 || + ct->proto.sctp.last_dir == dir) + goto out_unlock; + + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.vtag[dir] = sh->vtag; + ct->proto.sctp.vtag[!dir] = 0; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; } } @@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, } spin_unlock_bh(&ct->lock); + /* allow but do not refresh timeout */ + if (ignore) + return NF_ACCEPT; + timeouts = nf_ct_timeout_lookup(ct); if (!timeouts) timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts; -- cgit v1.2.3 From 4700c4d80b7bb171f6996016ef121e1508860b42 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 19 Aug 2020 23:29:16 +0100 Subject: rxrpc: Fix loss of RTT samples due to interposed ACK The Rx protocol has a mechanism to help generate RTT samples that works by a client transmitting a REQUESTED-type ACK when it receives a DATA packet that has the REQUEST_ACK flag set. The peer, however, may interpose other ACKs before transmitting the REQUESTED-ACK, as can be seen in the following trace excerpt: rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07 rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0 rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0 ... DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the sequence number of the DATA packet), causing it to be discarded from the Tx ring. The ACK that was requested (labelled REQ, r=xx references the serial of the DATA packet) comes after the ping, but the sk_buff holding the timestamp has gone and the RTT sample is lost. This is particularly noticeable on RPC calls used to probe the service offered by the peer. A lot of peers end up with an unknown RTT because we only ever sent a single RPC. This confuses the server rotation algorithm. Fix this by caching the information about the outgoing packet in RTT calculations in the rxrpc_call struct rather than looking in the Tx ring. A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and PING-ACK transmissions are recorded in there. When the appropriate response ACK is received, the buffer is checked for a match and, if found, an RTT sample is recorded. If a received ACK refers to a packet with a later serial number than an entry in the cache, that entry is presumed lost and the entry is made available to record a new transmission. ACKs types other than REQUESTED-type and PING-type cause any matching sample to be cancelled as they don't necessarily represent a useful measurement. If there's no space in the buffer on ping/data transmission, the sample base is discarded. Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets") Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 27 ++++++++--- net/rxrpc/ar-internal.h | 13 +++--- net/rxrpc/call_object.c | 1 + net/rxrpc/input.c | 104 +++++++++++++++++++++++++------------------ net/rxrpc/output.c | 82 +++++++++++++++++++++++++--------- net/rxrpc/rtt.c | 3 +- 6 files changed, 154 insertions(+), 76 deletions(-) (limited to 'include') diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 059b6e45a028..c33079b986e8 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -138,11 +138,16 @@ enum rxrpc_recvmsg_trace { }; enum rxrpc_rtt_tx_trace { + rxrpc_rtt_tx_cancel, rxrpc_rtt_tx_data, + rxrpc_rtt_tx_no_slot, rxrpc_rtt_tx_ping, }; enum rxrpc_rtt_rx_trace { + rxrpc_rtt_rx_cancel, + rxrpc_rtt_rx_lost, + rxrpc_rtt_rx_obsolete, rxrpc_rtt_rx_ping_response, rxrpc_rtt_rx_requested_ack, }; @@ -339,10 +344,15 @@ enum rxrpc_tx_point { E_(rxrpc_recvmsg_wait, "WAIT") #define rxrpc_rtt_tx_traces \ + EM(rxrpc_rtt_tx_cancel, "CNCE") \ EM(rxrpc_rtt_tx_data, "DATA") \ + EM(rxrpc_rtt_tx_no_slot, "FULL") \ E_(rxrpc_rtt_tx_ping, "PING") #define rxrpc_rtt_rx_traces \ + EM(rxrpc_rtt_rx_cancel, "CNCL") \ + EM(rxrpc_rtt_rx_obsolete, "OBSL") \ + EM(rxrpc_rtt_rx_lost, "LOST") \ EM(rxrpc_rtt_rx_ping_response, "PONG") \ E_(rxrpc_rtt_rx_requested_ack, "RACK") @@ -1087,38 +1097,43 @@ TRACE_EVENT(rxrpc_recvmsg, TRACE_EVENT(rxrpc_rtt_tx, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why, - rxrpc_serial_t send_serial), + int slot, rxrpc_serial_t send_serial), - TP_ARGS(call, why, send_serial), + TP_ARGS(call, why, slot, send_serial), TP_STRUCT__entry( __field(unsigned int, call ) __field(enum rxrpc_rtt_tx_trace, why ) + __field(int, slot ) __field(rxrpc_serial_t, send_serial ) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; + __entry->slot = slot; __entry->send_serial = send_serial; ), - TP_printk("c=%08x %s sr=%08x", + TP_printk("c=%08x [%d] %s sr=%08x", __entry->call, + __entry->slot, __print_symbolic(__entry->why, rxrpc_rtt_tx_traces), __entry->send_serial) ); TRACE_EVENT(rxrpc_rtt_rx, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, + int slot, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, u32 rtt, u32 rto), - TP_ARGS(call, why, send_serial, resp_serial, rtt, rto), + TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto), TP_STRUCT__entry( __field(unsigned int, call ) __field(enum rxrpc_rtt_rx_trace, why ) + __field(int, slot ) __field(rxrpc_serial_t, send_serial ) __field(rxrpc_serial_t, resp_serial ) __field(u32, rtt ) @@ -1128,14 +1143,16 @@ TRACE_EVENT(rxrpc_rtt_rx, TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; + __entry->slot = slot; __entry->send_serial = send_serial; __entry->resp_serial = resp_serial; __entry->rtt = rtt; __entry->rto = rto; ), - TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u", + TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u", __entry->call, + __entry->slot, __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __entry->send_serial, __entry->resp_serial, diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 6d29a3603a3e..884cff7bb169 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -488,7 +488,6 @@ enum rxrpc_call_flag { RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ - RXRPC_CALL_PINGING, /* Ping in process */ RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ @@ -673,9 +672,13 @@ struct rxrpc_call { rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ - /* ping management */ - rxrpc_serial_t ping_serial; /* Last ping sent */ - ktime_t ping_time; /* Time last ping sent */ + /* RTT management */ + rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */ + ktime_t rtt_sent_at[4]; /* Time packet sent */ + unsigned long rtt_avail; /* Mask of available slots in bits 0-3, + * Mask of pending samples in 8-11 */ +#define RXRPC_CALL_RTT_AVAIL_MASK 0xf +#define RXRPC_CALL_RTT_PEND_SHIFT 8 /* transmission-phase ACK management */ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ @@ -1037,7 +1040,7 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call, /* * rtt.c */ -void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, +void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int, rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool); void rxrpc_peer_init_rtt(struct rxrpc_peer *); diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 38a46167523f..a40fae013942 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -153,6 +153,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; call->rxnet = rxnet; + call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK; atomic_inc(&rxnet->nr_calls); return call; diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index a7699e56eac8..19ddfc9807e8 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -608,36 +608,57 @@ unlock: } /* - * Process a requested ACK. + * See if there's a cached RTT probe to complete. */ -static void rxrpc_input_requested_ack(struct rxrpc_call *call, - ktime_t resp_time, - rxrpc_serial_t orig_serial, - rxrpc_serial_t ack_serial) +static void rxrpc_complete_rtt_probe(struct rxrpc_call *call, + ktime_t resp_time, + rxrpc_serial_t acked_serial, + rxrpc_serial_t ack_serial, + enum rxrpc_rtt_rx_trace type) { - struct rxrpc_skb_priv *sp; - struct sk_buff *skb; + rxrpc_serial_t orig_serial; + unsigned long avail; ktime_t sent_at; - int ix; + bool matched = false; + int i; - for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) { - skb = call->rxtx_buffer[ix]; - if (!skb) - continue; + avail = READ_ONCE(call->rtt_avail); + smp_rmb(); /* Read avail bits before accessing data. */ - sent_at = skb->tstamp; - smp_rmb(); /* Read timestamp before serial. */ - sp = rxrpc_skb(skb); - if (sp->hdr.serial != orig_serial) + for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) { + if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail)) continue; - goto found; - } - return; + sent_at = call->rtt_sent_at[i]; + orig_serial = call->rtt_serial[i]; + + if (orig_serial == acked_serial) { + clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); + smp_mb(); /* Read data before setting avail bit */ + set_bit(i, &call->rtt_avail); + if (type != rxrpc_rtt_rx_cancel) + rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial, + sent_at, resp_time); + else + trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i, + orig_serial, acked_serial, 0, 0); + matched = true; + } + + /* If a later serial is being acked, then mark this slot as + * being available. + */ + if (after(acked_serial, orig_serial)) { + trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i, + orig_serial, acked_serial, 0, 0); + clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); + smp_wmb(); + set_bit(i, &call->rtt_avail); + } + } -found: - rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack, - orig_serial, ack_serial, sent_at, resp_time); + if (!matched) + trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0); } /* @@ -682,27 +703,11 @@ static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call) */ static void rxrpc_input_ping_response(struct rxrpc_call *call, ktime_t resp_time, - rxrpc_serial_t orig_serial, + rxrpc_serial_t acked_serial, rxrpc_serial_t ack_serial) { - rxrpc_serial_t ping_serial; - ktime_t ping_time; - - ping_time = call->ping_time; - smp_rmb(); - ping_serial = READ_ONCE(call->ping_serial); - - if (orig_serial == call->acks_lost_ping) + if (acked_serial == call->acks_lost_ping) rxrpc_input_check_for_lost_ack(call); - - if (before(orig_serial, ping_serial) || - !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags)) - return; - if (after(orig_serial, ping_serial)) - return; - - rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response, - orig_serial, ack_serial, ping_time, resp_time); } /* @@ -869,12 +874,23 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) first_soft_ack, prev_pkt, summary.ack_reason, nr_acks); - if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) + switch (buf.ack.reason) { + case RXRPC_ACK_PING_RESPONSE: rxrpc_input_ping_response(call, skb->tstamp, acked_serial, ack_serial); - if (buf.ack.reason == RXRPC_ACK_REQUESTED) - rxrpc_input_requested_ack(call, skb->tstamp, acked_serial, - ack_serial); + rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, + rxrpc_rtt_rx_ping_response); + break; + case RXRPC_ACK_REQUESTED: + rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, + rxrpc_rtt_rx_requested_ack); + break; + default: + if (acked_serial != 0) + rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, + rxrpc_rtt_rx_cancel); + break; + } if (buf.ack.reason == RXRPC_ACK_PING) { _proto("Rx ACK %%%u PING Request", ack_serial); diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 1ba43c3df4ad..3cfff7922ba8 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -123,6 +123,49 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, return top - hard_ack + 3; } +/* + * Record the beginning of an RTT probe. + */ +static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial, + enum rxrpc_rtt_tx_trace why) +{ + unsigned long avail = call->rtt_avail; + int rtt_slot = 9; + + if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK)) + goto no_slot; + + rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK); + if (!test_and_clear_bit(rtt_slot, &call->rtt_avail)) + goto no_slot; + + call->rtt_serial[rtt_slot] = serial; + call->rtt_sent_at[rtt_slot] = ktime_get_real(); + smp_wmb(); /* Write data before avail bit */ + set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); + + trace_rxrpc_rtt_tx(call, why, rtt_slot, serial); + return rtt_slot; + +no_slot: + trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial); + return -1; +} + +/* + * Cancel an RTT probe. + */ +static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call, + rxrpc_serial_t serial, int rtt_slot) +{ + if (rtt_slot != -1) { + clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); + smp_wmb(); /* Clear pending bit before setting slot */ + set_bit(rtt_slot, &call->rtt_avail); + trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial); + } +} + /* * Send an ACK call packet. */ @@ -136,7 +179,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, rxrpc_serial_t serial; rxrpc_seq_t hard_ack, top; size_t len, n; - int ret; + int ret, rtt_slot = -1; u8 reason; if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) @@ -196,18 +239,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, if (_serial) *_serial = serial; - if (ping) { - call->ping_serial = serial; - smp_wmb(); - /* We need to stick a time in before we send the packet in case - * the reply gets back before kernel_sendmsg() completes - but - * asking UDP to send the packet can take a relatively long - * time. - */ - call->ping_time = ktime_get_real(); - set_bit(RXRPC_CALL_PINGING, &call->flags); - trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial); - } + if (ping) + rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping); ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); conn->params.peer->last_tx_at = ktime_get_seconds(); @@ -221,8 +254,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, if (call->state < RXRPC_CALL_COMPLETE) { if (ret < 0) { - if (ping) - clear_bit(RXRPC_CALL_PINGING, &call->flags); + rxrpc_cancel_rtt_probe(call, serial, rtt_slot); rxrpc_propose_ACK(call, pkt->ack.reason, ntohl(pkt->ack.serial), false, true, @@ -321,7 +353,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, struct kvec iov[2]; rxrpc_serial_t serial; size_t len; - int ret; + int ret, rtt_slot = -1; _enter(",{%d}", skb->len); @@ -397,6 +429,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, sp->hdr.serial = serial; smp_wmb(); /* Set serial before timestamp */ skb->tstamp = ktime_get_real(); + if (whdr.flags & RXRPC_REQUEST_ACK) + rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data); /* send the packet by UDP * - returns -EMSGSIZE if UDP would have to fragment the packet @@ -408,12 +442,15 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, conn->params.peer->last_tx_at = ktime_get_seconds(); up_read(&conn->params.local->defrag_sem); - if (ret < 0) + if (ret < 0) { + rxrpc_cancel_rtt_probe(call, serial, rtt_slot); trace_rxrpc_tx_fail(call->debug_id, serial, ret, rxrpc_tx_point_call_data_nofrag); - else + } else { trace_rxrpc_tx_packet(call->debug_id, &whdr, rxrpc_tx_point_call_data_nofrag); + } + rxrpc_tx_backoff(call, ret); if (ret == -EMSGSIZE) goto send_fragmentable; @@ -422,7 +459,6 @@ done: if (ret >= 0) { if (whdr.flags & RXRPC_REQUEST_ACK) { call->peer->rtt_last_req = skb->tstamp; - trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); if (call->peer->rtt_count > 1) { unsigned long nowj = jiffies, ack_lost_at; @@ -469,6 +505,8 @@ send_fragmentable: sp->hdr.serial = serial; smp_wmb(); /* Set serial before timestamp */ skb->tstamp = ktime_get_real(); + if (whdr.flags & RXRPC_REQUEST_ACK) + rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data); switch (conn->params.local->srx.transport.family) { case AF_INET6: @@ -487,12 +525,14 @@ send_fragmentable: BUG(); } - if (ret < 0) + if (ret < 0) { + rxrpc_cancel_rtt_probe(call, serial, rtt_slot); trace_rxrpc_tx_fail(call->debug_id, serial, ret, rxrpc_tx_point_call_data_frag); - else + } else { trace_rxrpc_tx_packet(call->debug_id, &whdr, rxrpc_tx_point_call_data_frag); + } rxrpc_tx_backoff(call, ret); up_write(&conn->params.local->defrag_sem); diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c index 928d8b34a3ee..1221b0637a7e 100644 --- a/net/rxrpc/rtt.c +++ b/net/rxrpc/rtt.c @@ -146,6 +146,7 @@ static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us) * exclusive access to the peer RTT data. */ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, + int rtt_slot, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, ktime_t send_time, ktime_t resp_time) { @@ -162,7 +163,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, peer->rtt_count++; spin_unlock(&peer->rtt_input_lock); - trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, + trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial, peer->srtt_us >> 3, peer->rto_j); } -- cgit v1.2.3 From 1d4adfaf65746203861c72d9d78de349eb97d528 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 20 Aug 2020 15:13:00 +0100 Subject: rxrpc: Make rxrpc_kernel_get_srtt() indicate validity Fix rxrpc_kernel_get_srtt() to indicate the validity of the returned smoothed RTT. If we haven't had any valid samples yet, the SRTT isn't useful. Fixes: c410bf01933e ("rxrpc: Fix the excessive initial retransmission timeout") Signed-off-by: David Howells --- fs/afs/fs_probe.c | 4 ++-- fs/afs/vl_probe.c | 4 ++-- include/net/af_rxrpc.h | 2 +- net/rxrpc/peer_object.c | 16 +++++++++++++--- 4 files changed, 18 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index 5d9ef517cf81..e7e98ad63a91 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -161,8 +161,8 @@ responded: } } - rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); - if (rtt_us < server->probe.rtt) { + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; server->rtt = rtt_us; alist->preferred = index; diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c index e3aa013c2177..081b7e5b13f5 100644 --- a/fs/afs/vl_probe.c +++ b/fs/afs/vl_probe.c @@ -92,8 +92,8 @@ responded: } } - rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); - if (rtt_us < server->probe.rtt) { + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; alist->preferred = index; have_result = true; diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 91eacbdcf33d..f6abcc0bbd6e 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, struct sockaddr_rxrpc *); -u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *); +bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *); int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index ca29976bb193..68396d052052 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -502,11 +502,21 @@ EXPORT_SYMBOL(rxrpc_kernel_get_peer); * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT * @sock: The socket on which the call is in progress. * @call: The call to query + * @_srtt: Where to store the SRTT value. * - * Get the call's peer smoothed RTT. + * Get the call's peer smoothed RTT in uS. */ -u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call) +bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call, + u32 *_srtt) { - return call->peer->srtt_us >> 3; + struct rxrpc_peer *peer = call->peer; + + if (peer->rtt_count == 0) { + *_srtt = 1000000; /* 1S */ + return false; + } + + *_srtt = call->peer->srtt_us >> 3; + return true; } EXPORT_SYMBOL(rxrpc_kernel_get_srtt); -- cgit v1.2.3 From 2ac6795fcc085e8d03649f1bbd0d70aaff612cad Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 17 Aug 2020 18:12:49 +0530 Subject: clocksource/drivers: Add CLINT timer driver We add a separate CLINT timer driver for Linux RISC-V M-mode (i.e. RISC-V NoMMU kernel). The CLINT MMIO device provides three things: 1. 64bit free running counter register 2. 64bit per-CPU time compare registers 3. 32bit per-CPU inter-processor interrupt registers Unlike other timer devices, CLINT provides IPI registers along with timer registers. To use CLINT IPI registers, the CLINT timer driver provides IPI related callbacks to arch/riscv. Signed-off-by: Anup Patel Tested-by: Emil Renner Berhing Acked-by: Daniel Lezcano Reviewed-by: Atish Patra Reviewed-by: Palmer Dabbelt Signed-off-by: Palmer Dabbelt --- drivers/clocksource/Kconfig | 9 ++ drivers/clocksource/Makefile | 1 + drivers/clocksource/timer-clint.c | 226 ++++++++++++++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + 4 files changed, 237 insertions(+) create mode 100644 drivers/clocksource/timer-clint.c (limited to 'include') diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 3576ad7bd380..d95cc7234a66 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -663,6 +663,15 @@ config RISCV_TIMER is accessed via both the SBI and the rdcycle instruction. This is required for all RISC-V systems. +config CLINT_TIMER + bool "CLINT Timer for the RISC-V platform" if COMPILE_TEST + depends on GENERIC_SCHED_CLOCK && RISCV + select TIMER_PROBE + select TIMER_OF + help + This option enables the CLINT timer for RISC-V systems. The CLINT + driver is usually used for NoMMU RISC-V systems. + config CSKY_MP_TIMER bool "SMP Timer for the C-SKY platform" if COMPILE_TEST depends on CSKY diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index eaedb7240ae7..1c444cc3bb44 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -89,6 +89,7 @@ obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o obj-$(CONFIG_X86_NUMACHIP) += numachip.o obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o +obj-$(CONFIG_CLINT_TIMER) += timer-clint.o obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c new file mode 100644 index 000000000000..8eeafa82c03d --- /dev/null +++ b/drivers/clocksource/timer-clint.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + * + * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a + * CLINT MMIO timer device. + */ + +#define pr_fmt(fmt) "clint: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CLINT_IPI_OFF 0 +#define CLINT_TIMER_CMP_OFF 0x4000 +#define CLINT_TIMER_VAL_OFF 0xbff8 + +/* CLINT manages IPI and Timer for RISC-V M-mode */ +static u32 __iomem *clint_ipi_base; +static u64 __iomem *clint_timer_cmp; +static u64 __iomem *clint_timer_val; +static unsigned long clint_timer_freq; +static unsigned int clint_timer_irq; + +static void clint_send_ipi(const struct cpumask *target) +{ + unsigned int cpu; + + for_each_cpu(cpu, target) + writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu)); +} + +static void clint_clear_ipi(void) +{ + writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id())); +} + +static struct riscv_ipi_ops clint_ipi_ops = { + .ipi_inject = clint_send_ipi, + .ipi_clear = clint_clear_ipi, +}; + +#ifdef CONFIG_64BIT +#define clint_get_cycles() readq_relaxed(clint_timer_val) +#else +#define clint_get_cycles() readl_relaxed(clint_timer_val) +#define clint_get_cycles_hi() readl_relaxed(((u32 *)clint_timer_val) + 1) +#endif + +#ifdef CONFIG_64BIT +static u64 notrace clint_get_cycles64(void) +{ + return clint_get_cycles(); +} +#else /* CONFIG_64BIT */ +static u64 notrace clint_get_cycles64(void) +{ + u32 hi, lo; + + do { + hi = clint_get_cycles_hi(); + lo = clint_get_cycles(); + } while (hi != clint_get_cycles_hi()); + + return ((u64)hi << 32) | lo; +} +#endif /* CONFIG_64BIT */ + +static u64 clint_rdtime(struct clocksource *cs) +{ + return clint_get_cycles64(); +} + +static struct clocksource clint_clocksource = { + .name = "clint_clocksource", + .rating = 300, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .read = clint_rdtime, +}; + +static int clint_clock_next_event(unsigned long delta, + struct clock_event_device *ce) +{ + void __iomem *r = clint_timer_cmp + + cpuid_to_hartid_map(smp_processor_id()); + + csr_set(CSR_IE, IE_TIE); + writeq_relaxed(clint_get_cycles64() + delta, r); + return 0; +} + +static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = { + .name = "clint_clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 100, + .set_next_event = clint_clock_next_event, +}; + +static int clint_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu); + + ce->cpumask = cpumask_of(cpu); + clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff); + + enable_percpu_irq(clint_timer_irq, + irq_get_trigger_type(clint_timer_irq)); + return 0; +} + +static int clint_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(clint_timer_irq); + return 0; +} + +static irqreturn_t clint_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event); + + csr_clear(CSR_IE, IE_TIE); + evdev->event_handler(evdev); + + return IRQ_HANDLED; +} + +static int __init clint_timer_init_dt(struct device_node *np) +{ + int rc; + u32 i, nr_irqs; + void __iomem *base; + struct of_phandle_args oirq; + + /* + * Ensure that CLINT device interrupts are either RV_IRQ_TIMER or + * RV_IRQ_SOFT. If it's anything else then we ignore the device. + */ + nr_irqs = of_irq_count(np); + for (i = 0; i < nr_irqs; i++) { + if (of_irq_parse_one(np, i, &oirq)) { + pr_err("%pOFP: failed to parse irq %d.\n", np, i); + continue; + } + + if ((oirq.args_count != 1) || + (oirq.args[0] != RV_IRQ_TIMER && + oirq.args[0] != RV_IRQ_SOFT)) { + pr_err("%pOFP: invalid irq %d (hwirq %d)\n", + np, i, oirq.args[0]); + return -ENODEV; + } + + /* Find parent irq domain and map timer irq */ + if (!clint_timer_irq && + oirq.args[0] == RV_IRQ_TIMER && + irq_find_host(oirq.np)) + clint_timer_irq = irq_of_parse_and_map(np, i); + } + + /* If CLINT timer irq not found then fail */ + if (!clint_timer_irq) { + pr_err("%pOFP: timer irq not found\n", np); + return -ENODEV; + } + + base = of_iomap(np, 0); + if (!base) { + pr_err("%pOFP: could not map registers\n", np); + return -ENODEV; + } + + clint_ipi_base = base + CLINT_IPI_OFF; + clint_timer_cmp = base + CLINT_TIMER_CMP_OFF; + clint_timer_val = base + CLINT_TIMER_VAL_OFF; + clint_timer_freq = riscv_timebase; + + pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq); + + rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq); + if (rc) { + pr_err("%pOFP: clocksource register failed [%d]\n", np, rc); + goto fail_iounmap; + } + + sched_clock_register(clint_get_cycles64, 64, clint_timer_freq); + + rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt, + "clint-timer", &clint_clock_event); + if (rc) { + pr_err("registering percpu irq failed [%d]\n", rc); + goto fail_iounmap; + } + + rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING, + "clockevents/clint/timer:starting", + clint_timer_starting_cpu, + clint_timer_dying_cpu); + if (rc) { + pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc); + goto fail_free_irq; + } + + riscv_set_ipi_ops(&clint_ipi_ops); + clint_clear_ipi(); + + return 0; + +fail_free_irq: + free_irq(clint_timer_irq, &clint_clock_event); +fail_iounmap: + iounmap(base); + return rc; +} + +TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt); +TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index a2710e654b64..3215023d4852 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -132,6 +132,7 @@ enum cpuhp_state { CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_ARC_TIMER_STARTING, CPUHP_AP_RISCV_TIMER_STARTING, + CPUHP_AP_CLINT_TIMER_STARTING, CPUHP_AP_CSKY_TIMER_STARTING, CPUHP_AP_HYPERV_TIMER_STARTING, CPUHP_AP_KVM_STARTING, -- cgit v1.2.3 From da9125df854ea48a6240c66e8a67be06e2c12c03 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 20 Aug 2020 14:12:55 +0200 Subject: netfilter: nf_tables: incorrect enum nft_list_attributes definition This should be NFTA_LIST_UNSPEC instead of NFTA_LIST_UNPEC, all other similar attribute definitions are postfixed with _UNSPEC. Fixes: 96518518cc41 ("netfilter: add nftables") Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 42f351c1f5c5..2b8e12f7a4a6 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -133,7 +133,7 @@ enum nf_tables_msg_types { * @NFTA_LIST_ELEM: list element (NLA_NESTED) */ enum nft_list_attributes { - NFTA_LIST_UNPEC, + NFTA_LIST_UNSPEC, NFTA_LIST_ELEM, __NFTA_LIST_MAX }; -- cgit v1.2.3 From 1e105e6afa6c3d32bfb52c00ffa393894a525c27 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 20 Aug 2020 21:05:50 +0200 Subject: netfilter: nf_tables: fix destination register zeroing Following bug was reported via irc: nft list ruleset set knock_candidates_ipv4 { type ipv4_addr . inet_service size 65535 elements = { 127.0.0.1 . 123, 127.0.0.1 . 123 } } .. udp dport 123 add @knock_candidates_ipv4 { ip saddr . 123 } udp dport 123 add @knock_candidates_ipv4 { ip saddr . udp dport } It should not have been possible to add a duplicate set entry. After some debugging it turned out that the problem is the immediate value (123) in the second-to-last rule. Concatenations use 32bit registers, i.e. the elements are 8 bytes each, not 6 and it turns out the kernel inserted inet firewall @knock_candidates_ipv4 element 0100007f ffff7b00 : 0 [end] element 0100007f 00007b00 : 0 [end] Note the non-zero upper bits of the first element. It turns out that nft_immediate doesn't zero the destination register, but this is needed when the length isn't a multiple of 4. Furthermore, the zeroing in nft_payload is broken. We can't use [len / 4] = 0 -- if len is a multiple of 4, index is off by one. Skip zeroing in this case and use a conditional instead of (len -1) / 4. Fixes: 49499c3e6e18 ("netfilter: nf_tables: switch registers to 32 bit addressing") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 2 ++ net/netfilter/nft_payload.c | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index bf9491b77d16..224d194ad29d 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -143,6 +143,8 @@ static inline u64 nft_reg_load64(const u32 *sreg) static inline void nft_data_copy(u32 *dst, const struct nft_data *src, unsigned int len) { + if (len % NFT_REG32_SIZE) + dst[len / NFT_REG32_SIZE] = 0; memcpy(dst, src, len); } diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index ed7cb9f747f6..7a2e59638499 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -87,7 +87,9 @@ void nft_payload_eval(const struct nft_expr *expr, u32 *dest = ®s->data[priv->dreg]; int offset; - dest[priv->len / NFT_REG32_SIZE] = 0; + if (priv->len % NFT_REG32_SIZE) + dest[priv->len / NFT_REG32_SIZE] = 0; + switch (priv->base) { case NFT_PAYLOAD_LL_HEADER: if (!skb_mac_header_was_set(skb)) -- cgit v1.2.3 From b16fc097bc283184cde40e5b30d15705e1590410 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Fri, 21 Aug 2020 15:36:42 +0200 Subject: bpf: Fix two typos in uapi/linux/bpf.h Also remove trailing whitespaces in bpf_skb_get_tunnel_key example code. Signed-off-by: Tobias Klauser Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200821133642.18870-1-tklauser@distanz.ch --- include/uapi/linux/bpf.h | 10 +++++----- tools/include/uapi/linux/bpf.h | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0480f893facd..b6238b2209b7 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -767,7 +767,7 @@ union bpf_attr { * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice - * bloc (spanning several lines) is printed to kernel logs and + * block (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values @@ -1033,14 +1033,14 @@ union bpf_attr { * * int ret; * struct bpf_tunnel_key key = {}; - * + * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet - * + * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet - * + * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices @@ -1147,7 +1147,7 @@ union bpf_attr { * Description * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The - * indentifier retrieved is a user-provided tag, similar to the + * identifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0480f893facd..b6238b2209b7 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -767,7 +767,7 @@ union bpf_attr { * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice - * bloc (spanning several lines) is printed to kernel logs and + * block (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values @@ -1033,14 +1033,14 @@ union bpf_attr { * * int ret; * struct bpf_tunnel_key key = {}; - * + * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet - * + * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet - * + * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices @@ -1147,7 +1147,7 @@ union bpf_attr { * Description * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The - * indentifier retrieved is a user-provided tag, similar to the + * identifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. -- cgit v1.2.3 From df561f6688fef775baa341a0f5d960becd248b11 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sun, 23 Aug 2020 17:36:59 -0500 Subject: treewide: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. Also, remove unnecessary fall-through markings when it is the case. [1] https://www.kernel.org/doc/html/v5.7/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva --- arch/alpha/kernel/module.c | 2 +- arch/alpha/kernel/signal.c | 2 +- arch/alpha/kernel/traps.c | 4 +- arch/arc/kernel/disasm.c | 2 +- arch/arc/kernel/signal.c | 2 +- arch/arc/kernel/unwind.c | 6 +- arch/arm/kernel/hw_breakpoint.c | 10 +- arch/arm/kernel/signal.c | 2 +- arch/arm/mach-ep93xx/crunch.c | 2 +- arch/arm/mach-mmp/pm-mmp2.c | 8 +- arch/arm/mach-mmp/pm-pxa910.c | 10 +- arch/arm/mach-omap2/id.c | 8 -- arch/arm/mach-omap2/omap_device.c | 2 +- arch/arm/mach-orion5x/dns323-setup.c | 2 +- arch/arm/mach-rpc/riscpc.c | 2 +- arch/arm/mach-tegra/reset.c | 2 +- arch/arm/mm/alignment.c | 4 +- arch/arm/mm/proc-v7-bugs.c | 2 +- arch/arm/plat-omap/dma.c | 6 +- arch/arm/probes/decode.c | 2 +- arch/arm/probes/kprobes/core.c | 2 +- arch/arm64/kernel/acpi.c | 2 +- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/cpuinfo.c | 2 +- arch/arm64/kernel/hw_breakpoint.c | 6 +- arch/arm64/kernel/module.c | 8 +- arch/arm64/kernel/smp.c | 2 +- arch/arm64/kvm/handle_exit.c | 2 +- arch/arm64/kvm/hyp/include/hyp/debug-sr.h | 60 +++++------ arch/arm64/kvm/hyp/vgic-v3-sr.c | 16 +-- arch/arm64/mm/context.c | 2 +- arch/c6x/kernel/signal.c | 4 +- arch/csky/kernel/signal.c | 2 +- arch/h8300/kernel/signal.c | 2 +- arch/hexagon/kernel/module.c | 2 +- arch/hexagon/kernel/signal.c | 2 +- arch/ia64/kernel/crash.c | 2 +- arch/ia64/kernel/module.c | 2 +- arch/ia64/kernel/perfmon.c | 2 +- arch/ia64/kernel/signal.c | 2 +- arch/ia64/kernel/unaligned.c | 6 +- arch/ia64/kernel/unwind.c | 2 +- arch/m68k/atari/atakeyb.c | 2 +- arch/m68k/kernel/signal.c | 2 +- arch/m68k/mac/config.c | 2 +- arch/m68k/mac/via.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/microblaze/kernel/signal.c | 2 +- arch/mips/include/asm/unroll.h | 64 ++++++------ arch/nds32/kernel/fpu.c | 12 +-- arch/nds32/kernel/signal.c | 4 +- arch/openrisc/kernel/signal.c | 2 +- arch/parisc/kernel/signal.c | 2 +- arch/parisc/kernel/traps.c | 11 +- arch/parisc/mm/fault.c | 4 +- arch/powerpc/net/bpf_jit_comp.c | 2 +- arch/riscv/kernel/signal.c | 2 +- arch/riscv/net/bpf_jit_comp32.c | 4 +- arch/sh/drivers/platform_early.c | 2 +- arch/sh/kernel/disassemble.c | 4 +- arch/sh/kernel/kgdb.c | 2 +- arch/sh/kernel/signal_32.c | 2 +- arch/sparc/kernel/auxio_64.c | 1 - arch/sparc/kernel/central.c | 2 +- arch/sparc/kernel/kgdb_32.c | 2 +- arch/sparc/kernel/kgdb_64.c | 2 +- arch/sparc/kernel/pcr.c | 2 +- arch/sparc/kernel/prom_32.c | 2 +- arch/sparc/kernel/signal32.c | 4 +- arch/sparc/kernel/signal_32.c | 4 +- arch/sparc/kernel/signal_64.c | 4 +- arch/sparc/math-emu/math_32.c | 8 +- arch/sparc/net/bpf_jit_comp_32.c | 2 +- arch/um/kernel/signal.c | 2 +- arch/x86/boot/cmdline.c | 4 +- arch/x86/boot/compressed/kaslr.c | 2 +- arch/x86/events/intel/core.c | 6 +- arch/x86/events/intel/lbr.c | 2 +- arch/x86/kernel/alternative.c | 2 +- arch/x86/kernel/apic/io_apic.c | 4 +- arch/x86/kernel/apic/probe_32.c | 2 +- arch/x86/kernel/cpu/cacheinfo.c | 2 +- arch/x86/kernel/cpu/mce/inject.c | 2 +- arch/x86/kernel/cpu/mce/intel.c | 2 +- arch/x86/kernel/cpu/mtrr/cyrix.c | 2 +- arch/x86/kernel/hw_breakpoint.c | 2 +- arch/x86/kernel/kgdb.c | 4 +- arch/x86/kernel/mpparse.c | 4 +- arch/x86/kernel/ptrace.c | 2 +- arch/x86/kernel/reboot.c | 2 +- arch/x86/kernel/signal.c | 2 +- arch/x86/kernel/uprobes.c | 4 +- arch/x86/kvm/emulate.c | 2 +- arch/x86/kvm/hyperv.c | 2 +- arch/x86/kvm/irq_comm.c | 2 +- arch/x86/kvm/lapic.c | 6 +- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/vmx.c | 12 +-- arch/x86/kvm/x86.c | 11 +- arch/x86/lib/cmdline.c | 8 +- arch/x86/lib/insn-eval.c | 6 +- arch/x86/math-emu/errors.c | 2 +- arch/x86/math-emu/fpu_trig.c | 2 +- arch/x86/mm/ioremap.c | 2 +- arch/xtensa/kernel/signal.c | 2 +- block/badblocks.c | 2 +- block/bfq-iosched.c | 4 +- block/blk-wbt.c | 2 +- block/ioprio.c | 2 +- crypto/drbg.c | 2 +- crypto/tcrypt.c | 114 ++++++++++----------- drivers/accessibility/braille/braille_console.c | 2 +- drivers/ata/ahci_brcm.c | 2 +- drivers/ata/libahci_platform.c | 2 +- drivers/ata/libata-core.c | 16 +-- drivers/ata/libata-eh.c | 6 +- drivers/ata/libata-scsi.c | 4 +- drivers/ata/pata_atp867x.c | 4 +- drivers/ata/pata_serverworks.c | 2 +- drivers/ata/sata_mv.c | 12 +-- drivers/ata/sata_promise.c | 8 +- drivers/ata/sata_sx4.c | 2 +- drivers/atm/firestream.c | 2 +- drivers/atm/fore200e.c | 16 +-- drivers/atm/he.c | 4 +- drivers/atm/idt77105.c | 2 +- drivers/atm/lanai.c | 2 +- drivers/atm/zatm.c | 2 +- drivers/auxdisplay/panel.c | 6 +- drivers/base/firmware_loader/fallback.c | 4 +- drivers/block/aoe/aoecmd.c | 2 +- drivers/block/ataflop.c | 2 +- drivers/block/drbd/drbd_int.h | 2 +- drivers/block/drbd/drbd_main.c | 2 +- drivers/block/drbd/drbd_nl.c | 2 +- drivers/block/drbd/drbd_receiver.c | 12 +-- drivers/block/drbd/drbd_req.c | 4 +- drivers/block/floppy.c | 4 +- drivers/block/loop.c | 4 +- drivers/block/paride/pd.c | 4 +- drivers/block/pktcdvd.c | 2 +- drivers/block/rbd.c | 8 +- drivers/block/rsxx/core.c | 2 +- drivers/block/skd_main.c | 2 +- drivers/block/xen-blkback/blkback.c | 2 +- drivers/block/xen-blkback/xenbus.c | 2 +- drivers/block/xen-blkfront.c | 5 +- drivers/bus/ti-sysc.c | 2 +- drivers/char/agp/ali-agp.c | 2 +- drivers/char/ipmi/kcs_bmc.c | 2 +- drivers/char/lp.c | 4 +- drivers/char/mem.c | 2 +- drivers/char/nvram.c | 2 +- drivers/clocksource/timer-cadence-ttc.c | 4 +- drivers/cpufreq/p4-clockmod.c | 2 +- drivers/cpufreq/speedstep-lib.c | 2 +- drivers/cpufreq/ti-cpufreq.c | 4 +- drivers/crypto/axis/artpec6_crypto.c | 2 +- drivers/crypto/cavium/cpt/cptvf_reqmanager.c | 4 +- drivers/crypto/chelsio/chcr_ktls.c | 4 +- drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c | 4 +- drivers/crypto/qat/qat_common/adf_pf2vf_msg.c | 2 +- drivers/crypto/qat/qat_common/qat_uclo.c | 6 +- drivers/crypto/ux500/cryp/cryp.c | 12 +-- drivers/dma/amba-pl08x.c | 10 +- drivers/dma/fsldma.c | 2 +- drivers/dma/imx-dma.c | 2 +- drivers/dma/iop-adma.h | 12 +-- drivers/dma/nbpfaxi.c | 2 +- drivers/dma/pl330.c | 10 +- drivers/dma/sh/shdma-base.c | 2 +- drivers/edac/amd64_edac.c | 2 +- drivers/edac/pnd2_edac.c | 2 +- drivers/firewire/core-device.c | 2 +- drivers/firewire/core-iso.c | 2 +- drivers/firewire/core-topology.c | 2 +- drivers/firewire/core-transaction.c | 4 +- drivers/firewire/ohci.c | 4 +- drivers/gpio/gpio-aspeed-sgpio.c | 6 +- drivers/gpio/gpio-aspeed.c | 6 +- drivers/gpio/gpio-ath79.c | 2 +- drivers/gpio/gpio-eic-sprd.c | 4 +- drivers/gpio/gpio-stmpe.c | 4 +- drivers/gpio/gpiolib-acpi.c | 2 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 2 +- drivers/gpu/drm/arm/malidp_hw.c | 6 +- drivers/gpu/drm/ast/ast_main.c | 2 +- drivers/gpu/drm/bridge/nwl-dsi.c | 2 - .../gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c | 4 +- drivers/gpu/drm/bridge/ti-sn65dsi86.c | 6 +- drivers/gpu/drm/drm_bufs.c | 2 +- drivers/gpu/drm/drm_dp_helper.c | 2 +- drivers/gpu/drm/drm_modes.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 10 +- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c | 6 +- drivers/gpu/drm/i915/display/icl_dsi.c | 6 +- drivers/gpu/drm/i915/display/intel_bios.c | 6 +- drivers/gpu/drm/i915/display/intel_cdclk.c | 10 +- drivers/gpu/drm/i915/display/intel_combo_phy.c | 6 +- drivers/gpu/drm/i915/display/intel_ddi.c | 4 +- drivers/gpu/drm/i915/display/intel_display.c | 20 ++-- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 8 +- drivers/gpu/drm/i915/display/intel_panel.c | 2 +- drivers/gpu/drm/i915/display/intel_sdvo.c | 12 +-- drivers/gpu/drm/i915/display/intel_sprite.c | 22 ++-- drivers/gpu/drm/i915/display/intel_tc.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 6 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/gt/intel_ggtt.c | 2 +- drivers/gpu/drm/i915/gt/intel_ring_submission.c | 2 +- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- drivers/gpu/drm/i915/i915_pmu.c | 2 +- drivers/gpu/drm/imx/ipuv3-plane.c | 2 +- drivers/gpu/drm/meson/meson_osd_afbcd.c | 2 +- drivers/gpu/drm/meson/meson_overlay.c | 4 +- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 4 +- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 2 +- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 +- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 +- drivers/gpu/drm/omapdrm/dss/venc.c | 2 +- drivers/gpu/drm/radeon/ci_dpm.c | 2 +- drivers/gpu/drm/radeon/r300.c | 4 +- drivers/gpu/drm/radeon/r420.c | 2 +- drivers/gpu/drm/radeon/r600_cs.c | 4 +- drivers/gpu/drm/radeon/radeon_uvd.c | 2 +- drivers/gpu/drm/radeon/si_dpm.c | 2 +- drivers/gpu/drm/radeon/uvd_v1_0.c | 2 +- drivers/gpu/drm/savage/savage_state.c | 10 +- drivers/gpu/drm/sti/sti_hdmi.c | 6 +- drivers/gpu/drm/sun4i/sun4i_tcon.c | 4 +- drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c | 2 +- drivers/gpu/drm/tegra/dc.c | 2 +- drivers/gpu/drm/tilcdc/tilcdc_crtc.c | 2 +- drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 +- drivers/gpu/drm/via/via_dmablit.c | 8 +- drivers/gpu/drm/xen/xen_drm_front.c | 3 - drivers/gpu/ipu-v3/ipu-dc.c | 2 +- drivers/hid/hid-lg-g15.c | 2 +- drivers/hid/hid-logitech-dj.c | 2 +- drivers/hid/hid-microsoft.c | 3 - drivers/hid/hid-rmi.c | 1 - drivers/hid/hid-roccat-kone.c | 2 +- drivers/hid/hid-uclogic-params.c | 2 +- drivers/hid/hid-wiimote-core.c | 2 - drivers/hid/usbhid/hiddev.c | 1 - drivers/hid/wacom_wac.c | 32 +++--- drivers/hsi/clients/ssi_protocol.c | 6 +- drivers/hsi/controllers/omap_ssi_core.c | 2 +- drivers/hv/hv_kvp.c | 2 +- drivers/hwmon/adt7462.c | 8 +- drivers/hwmon/emc1403.c | 4 +- drivers/hwmon/f71882fg.c | 4 +- drivers/hwmon/hwmon-vid.c | 4 +- drivers/hwmon/ina3221.c | 2 +- drivers/hwmon/nct6775.c | 2 +- drivers/hwmon/occ/common.c | 6 +- drivers/hwmon/w83627hf.c | 2 +- drivers/hwmon/w83781d.c | 2 +- drivers/hwmon/w83795.c | 2 +- drivers/hwtracing/coresight/coresight-cpu-debug.c | 4 +- drivers/hwtracing/coresight/coresight-etm4x.c | 1 - drivers/hwtracing/coresight/coresight-tmc.c | 2 - drivers/hwtracing/intel_th/sth.c | 4 +- drivers/i2c/busses/i2c-omap.c | 1 - drivers/i2c/busses/i2c-opal.c | 2 +- drivers/i3c/master/dw-i3c-master.c | 2 +- drivers/ide/hpt366.c | 6 +- drivers/ide/ide-cd.c | 4 +- drivers/ide/ide-floppy.c | 2 +- drivers/ide/ide-probe.c | 2 +- drivers/ide/ide-taskfile.c | 12 +-- drivers/ide/sis5513.c | 2 +- drivers/iio/accel/mma8452.c | 2 +- drivers/iio/adc/ab8500-gpadc.c | 2 +- drivers/iio/adc/cpcap-adc.c | 2 +- drivers/iio/chemical/sps30.c | 2 +- drivers/iio/dac/ad5592r-base.c | 2 - drivers/iio/dac/dpot-dac.c | 2 +- drivers/iio/health/max30102.c | 4 +- drivers/iio/imu/adis.c | 6 +- drivers/iio/industrialio-core.c | 4 +- drivers/iio/light/si1145.c | 2 +- drivers/iio/magnetometer/ak8974.c | 2 +- drivers/infiniband/core/cm.c | 12 +-- drivers/infiniband/core/cma.c | 3 +- drivers/infiniband/core/rw.c | 1 - drivers/infiniband/core/ucma.c | 4 +- drivers/infiniband/core/uverbs_ioctl.c | 4 +- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 2 +- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 2 +- drivers/infiniband/hw/cxgb4/cm.c | 4 +- drivers/infiniband/hw/cxgb4/qp.c | 2 +- drivers/infiniband/hw/hfi1/pio_copy.c | 1 - drivers/infiniband/hw/i40iw/i40iw_cm.c | 2 +- drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 5 +- drivers/infiniband/hw/i40iw/i40iw_hw.c | 3 +- drivers/infiniband/hw/i40iw/i40iw_main.c | 22 ++-- drivers/infiniband/hw/i40iw/i40iw_puda.c | 4 +- drivers/infiniband/hw/i40iw/i40iw_utils.c | 8 +- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 5 +- drivers/infiniband/hw/mlx4/cq.c | 4 +- drivers/infiniband/hw/mlx4/mcg.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 6 +- drivers/infiniband/hw/mlx5/cq.c | 4 +- drivers/infiniband/hw/mlx5/mad.c | 3 +- drivers/infiniband/hw/mlx5/main.c | 2 +- drivers/infiniband/hw/mlx5/qp.c | 4 +- drivers/infiniband/hw/mthca/mthca_av.c | 2 +- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 4 +- drivers/infiniband/hw/qedr/verbs.c | 2 +- drivers/infiniband/hw/qib/qib_iba6120.c | 4 +- drivers/infiniband/hw/qib/qib_iba7220.c | 4 +- drivers/infiniband/hw/qib/qib_iba7322.c | 6 +- drivers/infiniband/hw/qib/qib_mad.c | 12 +-- drivers/infiniband/hw/qib/qib_rc.c | 18 ++-- drivers/infiniband/hw/qib/qib_sdma.c | 2 +- drivers/infiniband/hw/qib/qib_uc.c | 8 +- drivers/infiniband/hw/qib/qib_verbs.c | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 2 +- drivers/infiniband/sw/rdmavt/qp.c | 2 +- drivers/infiniband/sw/rxe/rxe_comp.c | 2 +- drivers/infiniband/sw/rxe/rxe_task.c | 2 +- drivers/infiniband/sw/rxe/rxe_verbs.c | 2 +- drivers/infiniband/sw/siw/siw_cm.c | 2 - drivers/infiniband/sw/siw/siw_qp_rx.c | 4 +- drivers/infiniband/sw/siw/siw_qp_tx.c | 4 +- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 4 +- drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 +- drivers/infiniband/ulp/iser/iser_verbs.c | 2 +- drivers/infiniband/ulp/isert/ib_isert.c | 10 +- drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c | 1 - drivers/input/joystick/fsia6b.c | 4 +- drivers/input/joystick/gamecon.c | 1 - drivers/input/tablet/wacom_serial4.c | 2 +- drivers/input/touchscreen/atmel_mxt_ts.c | 2 +- drivers/input/touchscreen/wm831x-ts.c | 2 +- drivers/iommu/amd/init.c | 2 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 11 +- drivers/iommu/intel/iommu.c | 1 - drivers/iommu/virtio-iommu.c | 2 +- drivers/irqchip/irq-gic-v3-its.c | 4 +- drivers/irqchip/irq-gic-v3.c | 8 +- drivers/irqchip/irq-imx-gpcv2.c | 2 +- drivers/irqchip/irq-mips-gic.c | 2 +- drivers/irqchip/irq-vic.c | 2 +- drivers/isdn/hardware/mISDN/avmfritz.c | 2 +- drivers/isdn/hardware/mISDN/hfc_multi_8xx.h | 1 - drivers/isdn/hardware/mISDN/hfcpci.c | 2 +- drivers/isdn/hardware/mISDN/hfcsusb.c | 2 +- drivers/isdn/hardware/mISDN/isdnhdlc.c | 2 +- drivers/isdn/hardware/mISDN/mISDNinfineon.c | 2 +- drivers/isdn/hardware/mISDN/mISDNisar.c | 8 +- drivers/isdn/mISDN/stack.c | 2 +- drivers/lightnvm/pblk-core.c | 2 +- drivers/macintosh/adbhid.c | 2 +- drivers/macintosh/smu.c | 2 +- drivers/md/bcache/journal.c | 2 +- drivers/md/bcache/util.c | 14 +-- drivers/md/dm-crypt.c | 2 +- drivers/md/dm-mpath.c | 2 +- drivers/md/dm.c | 2 +- drivers/md/md-autodetect.c | 4 +- drivers/md/md-bitmap.c | 2 +- drivers/md/raid5.c | 4 +- drivers/media/common/v4l2-tpg/v4l2-tpg-core.c | 36 +++---- drivers/media/dvb-core/dvb_net.c | 2 +- drivers/media/dvb-frontends/bcm3510.c | 2 +- drivers/media/dvb-frontends/dib0090.c | 2 +- drivers/media/dvb-frontends/dib3000mb.c | 2 +- drivers/media/dvb-frontends/dib7000p.c | 2 +- drivers/media/dvb-frontends/drx39xyj/drxj.c | 103 ++++++++++--------- drivers/media/dvb-frontends/drxd_hard.c | 12 +-- drivers/media/dvb-frontends/drxk_hard.c | 24 ++--- drivers/media/dvb-frontends/lgdt3306a.c | 2 +- drivers/media/dvb-frontends/mt352.c | 2 +- drivers/media/dvb-frontends/mxl5xx.c | 2 +- drivers/media/dvb-frontends/or51132.c | 2 +- drivers/media/dvb-frontends/s5h1411.c | 2 +- drivers/media/dvb-frontends/zl10353.c | 4 +- drivers/media/pci/cx23885/cx23885-cards.c | 4 +- drivers/media/pci/ddbridge/ddbridge-core.c | 23 +++-- drivers/media/pci/meye/meye.c | 2 +- drivers/media/pci/ttpci/av7110.c | 4 +- drivers/media/pci/ttpci/av7110_hw.c | 2 +- drivers/media/pci/ttpci/av7110_ipack.c | 2 +- drivers/media/pci/ttpci/budget-av.c | 2 +- drivers/media/pci/ttpci/budget.c | 4 +- drivers/media/platform/sh_vou.c | 4 +- drivers/media/radio/radio-si476x.c | 3 +- drivers/media/radio/tea575x.c | 2 +- drivers/media/rc/bpf-lirc.c | 2 +- drivers/media/rc/ir-rc6-decoder.c | 2 +- drivers/media/rc/ir-sony-decoder.c | 2 +- drivers/media/tuners/xc5000.c | 2 +- drivers/media/usb/b2c2/flexcop-usb.c | 2 +- drivers/media/usb/cpia2/cpia2_core.c | 36 +++---- drivers/media/usb/cx231xx/cx231xx-video.c | 2 +- drivers/media/usb/dvb-usb/dib0700_devices.c | 2 +- drivers/media/usb/dvb-usb/dw2102.c | 6 +- drivers/media/v4l2-core/v4l2-ctrls.c | 2 +- drivers/media/v4l2-core/v4l2-ioctl.c | 2 - drivers/media/v4l2-core/videobuf-core.c | 2 +- drivers/memory/omap-gpmc.c | 1 - drivers/memstick/core/ms_block.c | 12 +-- drivers/memstick/host/jmb38x_ms.c | 4 +- drivers/memstick/host/tifm_ms.c | 4 +- drivers/message/fusion/mptbase.c | 6 +- drivers/message/fusion/mptsas.c | 2 +- drivers/message/fusion/mptscsih.c | 4 +- drivers/mfd/db8500-prcmu.c | 4 +- drivers/mfd/iqs62x.c | 6 +- drivers/mfd/mxs-lradc.c | 2 +- drivers/mfd/omap-usb-host.c | 4 +- drivers/mfd/rave-sp.c | 4 +- drivers/mfd/syscon.c | 2 +- drivers/misc/eeprom/at25.c | 10 +- drivers/misc/mic/scif/scif_api.c | 4 +- drivers/misc/mic/scif/scif_rma.c | 2 +- drivers/misc/sgi-gru/grukservices.c | 4 +- drivers/misc/sgi-xp/xpc_main.c | 4 +- drivers/misc/sgi-xp/xpc_partition.c | 4 +- drivers/misc/sgi-xp/xpc_uv.c | 2 +- drivers/mmc/core/host.c | 2 +- drivers/mmc/host/atmel-mci.c | 8 +- drivers/mmc/host/davinci_mmc.c | 2 +- drivers/mmc/host/dw_mmc-k3.c | 2 +- drivers/mmc/host/dw_mmc.c | 6 +- drivers/mmc/host/jz4740_mmc.c | 4 +- drivers/mmc/host/meson-mx-sdio.c | 2 +- drivers/mmc/host/renesas_sdhi_core.c | 2 +- drivers/mmc/host/sdhci-esdhc-imx.c | 2 +- drivers/mmc/host/sdhci-s3c.c | 2 +- drivers/mmc/host/sdhci-sprd.c | 2 +- drivers/mmc/host/sdhci-xenon-phy.c | 2 +- drivers/mmc/host/sdhci.c | 2 +- drivers/mmc/host/tifm_sd.c | 2 +- drivers/mmc/host/usdhi6rol0.c | 6 +- drivers/mux/adgs1408.c | 2 +- drivers/net/appletalk/cops.c | 2 +- drivers/net/arcnet/arc-rimi.c | 6 +- drivers/net/arcnet/com20020-isa.c | 12 +-- drivers/net/arcnet/com90io.c | 4 +- drivers/net/arcnet/com90xx.c | 6 +- drivers/net/bonding/bond_3ad.c | 4 +- drivers/net/bonding/bond_main.c | 8 +- drivers/net/can/at91_can.c | 4 +- drivers/net/can/peak_canfd/peak_pciefd_main.c | 2 +- drivers/net/can/sja1000/sja1000_platform.c | 2 +- drivers/net/can/slcan.c | 4 +- drivers/net/can/spi/mcp251x.c | 2 +- drivers/net/can/usb/peak_usb/pcan_usb.c | 2 +- drivers/net/can/usb/peak_usb/pcan_usb_core.c | 2 +- drivers/net/can/usb/peak_usb/pcan_usb_pro.c | 4 +- drivers/net/dsa/b53/b53_common.c | 2 +- drivers/net/dsa/b53/b53_serdes.c | 2 +- drivers/net/dsa/bcm_sf2.c | 2 +- drivers/net/dsa/microchip/ksz9477.c | 2 +- drivers/net/dsa/mt7530.c | 2 +- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- drivers/net/ethernet/3com/3c509.c | 4 +- drivers/net/ethernet/3com/3c574_cs.c | 2 +- drivers/net/ethernet/8390/axnet_cs.c | 2 +- drivers/net/ethernet/8390/pcnet_cs.c | 2 +- drivers/net/ethernet/alacritech/slicoss.c | 12 +-- drivers/net/ethernet/alteon/acenic.c | 2 +- drivers/net/ethernet/amd/amd8111e.c | 2 +- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 6 +- drivers/net/ethernet/broadcom/bgmac-bcma.c | 2 +- drivers/net/ethernet/broadcom/bgmac-platform.c | 2 +- drivers/net/ethernet/broadcom/bnx2.c | 14 +-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 14 +-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 4 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 16 +-- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 4 +- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 4 +- drivers/net/ethernet/broadcom/cnic.c | 4 +- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 4 +- drivers/net/ethernet/broadcom/genet/bcmmii.c | 2 +- drivers/net/ethernet/broadcom/tg3.c | 54 +++++----- drivers/net/ethernet/brocade/bna/bfa_ioc.c | 6 +- drivers/net/ethernet/brocade/bna/bna_enet.c | 2 +- drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 2 +- drivers/net/ethernet/cadence/macb_ptp.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 29 +++--- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 25 +++-- .../net/ethernet/cavium/thunder/nicvf_ethtool.c | 2 +- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 4 +- drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | 2 +- drivers/net/ethernet/chelsio/cxgb3/l2t.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/l2t.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 6 +- .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 2 +- drivers/net/ethernet/cisco/enic/enic_main.c | 2 +- drivers/net/ethernet/davicom/dm9000.c | 2 +- drivers/net/ethernet/dec/tulip/de4x5.c | 6 +- drivers/net/ethernet/dec/tulip/tulip_core.c | 2 +- drivers/net/ethernet/dec/tulip/winbond-840.c | 2 +- drivers/net/ethernet/emulex/benet/be_ethtool.c | 2 +- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 +- drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 2 +- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 4 +- drivers/net/ethernet/freescale/fman/fman_memac.c | 2 +- drivers/net/ethernet/freescale/fman/fman_port.c | 4 +- drivers/net/ethernet/freescale/ucc_geth.c | 2 +- drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 4 +- drivers/net/ethernet/ibm/ehea/ehea_main.c | 2 +- drivers/net/ethernet/ibm/emac/core.c | 2 +- drivers/net/ethernet/intel/e1000e/netdev.c | 1 - drivers/net/ethernet/intel/igb/igb_main.c | 1 - drivers/net/ethernet/marvell/mvneta.c | 4 +- drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c | 2 +- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 4 +- .../net/ethernet/marvell/octeontx2/af/rvu_nix.c | 2 +- drivers/net/ethernet/marvell/skge.c | 2 +- drivers/net/ethernet/marvell/sky2.c | 4 +- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 6 +- drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c | 4 +- drivers/net/ethernet/mellanox/mlxsw/core.c | 18 ++-- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 6 +- drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 4 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 10 +- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 32 +++--- .../net/ethernet/mellanox/mlxsw/spectrum_span.c | 6 +- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 12 +-- drivers/net/ethernet/microchip/lan743x_ethtool.c | 2 +- drivers/net/ethernet/mscc/ocelot.c | 2 +- drivers/net/ethernet/natsemi/natsemi.c | 2 +- drivers/net/ethernet/neterion/vxge/vxge-config.c | 6 +- drivers/net/ethernet/netronome/nfp/crypto/tls.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/action.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 2 +- .../net/ethernet/netronome/nfp/flower/offload.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_asm.c | 2 +- .../net/ethernet/netronome/nfp/nfp_net_common.c | 4 +- .../ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c | 4 +- .../net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c | 2 +- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c | 2 +- drivers/net/ethernet/packetengines/yellowfin.c | 2 +- .../ethernet/qlogic/netxen/netxen_nic_ethtool.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_dev.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 10 +- drivers/net/ethernet/qlogic/qla3xxx.c | 2 +- .../net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 4 +- drivers/net/ethernet/realtek/r8169_main.c | 4 +- drivers/net/ethernet/rocker/rocker_main.c | 8 +- drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c | 4 +- drivers/net/ethernet/sfc/falcon/ethtool.c | 2 +- drivers/net/ethernet/sfc/falcon/farch.c | 14 +-- drivers/net/ethernet/sfc/farch.c | 14 +-- drivers/net/ethernet/sfc/mcdi_filters.c | 2 +- drivers/net/ethernet/sfc/mcdi_port_common.c | 2 +- drivers/net/ethernet/sfc/rx.c | 2 +- drivers/net/ethernet/sis/sis900.c | 2 +- drivers/net/ethernet/smsc/smc911x.c | 2 +- drivers/net/ethernet/socionext/netsec.c | 4 +- .../net/ethernet/stmicro/stmmac/dwmac-anarion.c | 7 +- .../net/ethernet/stmicro/stmmac/stmmac_selftests.c | 4 +- drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | 2 +- drivers/net/ethernet/sun/cassini.c | 2 +- drivers/net/ethernet/sun/niu.c | 4 +- drivers/net/ethernet/sun/sungem.c | 2 +- drivers/net/ethernet/ti/cpsw-phy-sel.c | 4 +- drivers/net/ethernet/ti/cpsw_priv.c | 4 +- drivers/net/ethernet/ti/tlan.c | 2 +- drivers/net/ethernet/toshiba/ps3_gelic_wireless.c | 2 +- drivers/net/ethernet/toshiba/spider_net.c | 28 ++--- drivers/net/ethernet/xircom/xirc2ps_cs.c | 2 +- drivers/net/fddi/skfp/pcmplc.c | 4 +- drivers/net/fjes/fjes_main.c | 2 +- drivers/net/hamradio/baycom_epp.c | 2 +- drivers/net/hamradio/mkiss.c | 5 +- drivers/net/macvlan.c | 2 +- drivers/net/mii.c | 2 +- drivers/net/netdevsim/bus.c | 2 +- drivers/net/netdevsim/fib.c | 6 +- drivers/net/phy/adin.c | 4 +- drivers/net/phy/dp83640.c | 8 +- drivers/net/phy/fixed_phy.c | 4 +- drivers/net/phy/phy.c | 4 +- drivers/net/phy/phy_device.c | 2 +- drivers/net/phy/phylink.c | 4 +- drivers/net/phy/sfp-bus.c | 4 +- drivers/net/phy/sfp.c | 12 +-- drivers/net/plip/plip.c | 26 ++--- drivers/net/tun.c | 6 +- drivers/net/usb/aqc111.c | 6 +- drivers/net/usb/catc.c | 2 +- drivers/net/usb/cdc-phonet.c | 2 +- drivers/net/usb/lan78xx.c | 4 +- drivers/net/usb/pegasus.c | 4 +- drivers/net/usb/r8152.c | 6 +- drivers/net/usb/rtl8150.c | 2 +- drivers/net/usb/usbnet.c | 6 +- drivers/net/veth.c | 8 +- drivers/net/virtio_net.c | 6 +- drivers/net/vmxnet3/vmxnet3_ethtool.c | 2 +- drivers/net/wan/lapbether.c | 2 +- drivers/net/wan/sdla.c | 2 +- drivers/net/wan/x25_asy.c | 2 +- drivers/net/wimax/i2400m/control.c | 2 +- drivers/net/wimax/i2400m/usb-fw.c | 2 +- drivers/net/wimax/i2400m/usb-tx.c | 2 +- drivers/net/wimax/i2400m/usb.c | 2 +- drivers/net/xen-netback/hash.c | 2 +- drivers/net/xen-netback/xenbus.c | 2 +- drivers/net/xen-netfront.c | 2 +- drivers/nfc/pn533/pn533.c | 4 +- drivers/nfc/st21nfca/dep.c | 2 +- drivers/nfc/trf7970a.c | 4 +- drivers/ntb/ntb_transport.c | 4 +- drivers/nvme/host/core.c | 12 +-- drivers/nvme/host/pci.c | 2 +- drivers/nvme/host/rdma.c | 2 +- drivers/nvme/host/tcp.c | 1 - drivers/nvme/target/core.c | 2 +- drivers/nvme/target/fcloop.c | 2 +- drivers/nvme/target/io-cmd-bdev.c | 1 - drivers/nvme/target/rdma.c | 4 +- drivers/parport/ieee1284.c | 6 +- drivers/parport/parport_pc.c | 2 +- drivers/pci/controller/dwc/pci-imx6.c | 6 +- drivers/pci/controller/pci-rcar-gen2.c | 2 +- drivers/pci/hotplug/ibmphp_res.c | 2 +- drivers/pci/hotplug/pciehp_ctrl.c | 4 +- drivers/pci/hotplug/shpchp_ctrl.c | 4 +- drivers/pci/pci.c | 4 +- drivers/pci/proc.c | 2 +- drivers/pci/quirks.c | 4 +- drivers/pci/setup-bus.c | 2 +- drivers/pci/xen-pcifront.c | 2 +- drivers/pcmcia/db1xxx_ss.c | 8 +- drivers/perf/arm-ccn.c | 2 +- drivers/perf/arm_spe_pmu.c | 4 +- drivers/phy/qualcomm/phy-qcom-usb-hs.c | 2 +- drivers/phy/rockchip/phy-rockchip-inno-usb2.c | 8 +- drivers/platform/olpc/olpc-xo175-ec.c | 2 +- drivers/platform/x86/acer-wmi.c | 8 +- drivers/platform/x86/dell-laptop.c | 4 +- drivers/platform/x86/surfacepro3_button.c | 8 +- drivers/platform/x86/thinkpad_acpi.c | 6 +- drivers/platform/x86/toshiba_acpi.c | 2 +- drivers/power/supply/ab8500_charger.c | 4 +- drivers/power/supply/ab8500_fg.c | 4 +- drivers/power/supply/abx500_chargalg.c | 26 ++--- drivers/power/supply/axp20x_usb_power.c | 2 +- drivers/power/supply/cros_usbpd-charger.c | 2 +- drivers/power/supply/max8925_power.c | 2 +- drivers/power/supply/wm831x_power.c | 2 +- drivers/power/supply/wm8350_power.c | 2 +- drivers/ps3/ps3av.c | 2 +- drivers/ps3/ps3av_cmd.c | 4 +- drivers/rapidio/devices/rio_mport_cdev.c | 2 +- drivers/regulator/axp20x-regulator.c | 8 +- drivers/regulator/core.c | 2 +- drivers/regulator/slg51000-regulator.c | 2 +- drivers/regulator/twl6030-regulator.c | 2 +- drivers/remoteproc/omap_remoteproc.c | 1 - drivers/reset/reset-imx7.c | 14 +-- drivers/rpmsg/qcom_glink_native.c | 4 +- drivers/rtc/rtc-m41t80.c | 2 +- drivers/rtc/rtc-pcf85063.c | 2 +- drivers/rtc/rtc-pcf8523.c | 2 +- drivers/rtc/rtc-stmp3xxx.c | 2 +- drivers/s390/net/ctcm_fsms.c | 2 +- drivers/s390/net/ctcm_mpc.c | 6 +- drivers/s390/net/qeth_core_main.c | 4 +- drivers/s390/net/qeth_ethtool.c | 6 +- drivers/s390/net/qeth_l2_main.c | 2 +- drivers/s390/net/qeth_l3_main.c | 2 +- drivers/scsi/53c700.c | 2 +- drivers/scsi/BusLogic.c | 2 +- drivers/scsi/FlashPoint.c | 9 +- drivers/scsi/NCR5380.c | 2 +- drivers/scsi/aacraid/aachba.c | 8 +- drivers/scsi/aacraid/commsup.c | 2 +- drivers/scsi/aacraid/linit.c | 2 +- drivers/scsi/aic7xxx/aic79xx_core.c | 40 ++++---- drivers/scsi/aic7xxx/aic79xx_osm.c | 2 +- drivers/scsi/aic7xxx/aic7xxx_core.c | 28 ++--- drivers/scsi/aic94xx/aic94xx_scb.c | 10 +- drivers/scsi/aic94xx/aic94xx_tmf.c | 2 +- drivers/scsi/arcmsr/arcmsr_hba.c | 2 +- drivers/scsi/arm/fas216.c | 12 +-- drivers/scsi/be2iscsi/be_iscsi.c | 2 +- drivers/scsi/be2iscsi/be_main.c | 2 +- drivers/scsi/bfa/bfa_fcpim.c | 6 +- drivers/scsi/bfa/bfa_fcs_lport.c | 4 +- drivers/scsi/bfa/bfa_fcs_rport.c | 14 +-- drivers/scsi/bfa/bfa_ioc.c | 6 +- drivers/scsi/bfa/bfa_svc.c | 2 +- drivers/scsi/bnx2fc/bnx2fc_hwi.c | 1 - drivers/scsi/csiostor/csio_hw.c | 2 +- drivers/scsi/csiostor/csio_lnode.c | 1 - drivers/scsi/csiostor/csio_wr.c | 2 +- drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 2 +- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 2 +- drivers/scsi/cxlflash/main.c | 28 ++--- drivers/scsi/cxlflash/superpipe.c | 10 +- drivers/scsi/device_handler/scsi_dh_hp_sw.c | 4 +- drivers/scsi/esas2r/esas2r_flash.c | 2 +- drivers/scsi/esas2r/esas2r_init.c | 4 +- drivers/scsi/esp_scsi.c | 4 +- drivers/scsi/fcoe/fcoe_ctlr.c | 8 +- drivers/scsi/g_NCR5380.c | 2 +- drivers/scsi/hisi_sas/hisi_sas_main.c | 2 +- drivers/scsi/hpsa.c | 10 +- drivers/scsi/ibmvscsi/ibmvfc.c | 6 +- drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 6 +- drivers/scsi/imm.c | 14 +-- drivers/scsi/isci/phy.c | 2 +- drivers/scsi/isci/remote_device.c | 4 +- drivers/scsi/isci/remote_node_context.c | 6 +- drivers/scsi/isci/request.c | 2 +- drivers/scsi/libfc/fc_exch.c | 4 +- drivers/scsi/libfc/fc_fcp.c | 8 +- drivers/scsi/libfc/fc_lport.c | 2 +- drivers/scsi/libfc/fc_rport.c | 2 +- drivers/scsi/libiscsi.c | 6 +- drivers/scsi/libiscsi_tcp.c | 2 +- drivers/scsi/libsas/sas_ata.c | 2 +- drivers/scsi/libsas/sas_discover.c | 2 +- drivers/scsi/libsas/sas_expander.c | 2 +- drivers/scsi/libsas/sas_scsi_host.c | 2 +- drivers/scsi/lpfc/lpfc_ct.c | 4 +- drivers/scsi/lpfc/lpfc_els.c | 2 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 7 +- drivers/scsi/lpfc/lpfc_nportdisc.c | 2 +- drivers/scsi/lpfc/lpfc_nvme.c | 2 +- drivers/scsi/lpfc/lpfc_scsi.c | 8 +- drivers/scsi/lpfc/lpfc_sli.c | 28 ++--- drivers/scsi/megaraid.c | 12 +-- drivers/scsi/megaraid/megaraid_mbox.c | 2 +- drivers/scsi/megaraid/megaraid_sas_base.c | 2 +- drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- drivers/scsi/mesh.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_base.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_ctl.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 8 +- drivers/scsi/myrb.c | 8 +- drivers/scsi/ncr53c8xx.c | 14 +-- drivers/scsi/pcmcia/nsp_cs.c | 2 +- drivers/scsi/ppa.c | 10 +- drivers/scsi/qla2xxx/qla_gs.c | 2 +- drivers/scsi/qla2xxx/qla_init.c | 2 +- drivers/scsi/qla2xxx/qla_iocb.c | 2 +- drivers/scsi/qla2xxx/qla_isr.c | 10 +- drivers/scsi/qla2xxx/qla_sup.c | 6 +- drivers/scsi/qla2xxx/qla_target.c | 6 +- drivers/scsi/qla4xxx/ql4_os.c | 2 +- drivers/scsi/qlogicpti.c | 20 ++-- drivers/scsi/scsi_error.c | 28 ++--- drivers/scsi/scsi_ioctl.c | 4 +- drivers/scsi/scsi_lib.c | 2 +- drivers/scsi/smartpqi/smartpqi_init.c | 17 ++- drivers/scsi/sr.c | 4 +- drivers/scsi/st.c | 8 +- drivers/scsi/sun3_scsi.c | 4 +- drivers/scsi/sym53c8xx_2/sym_fw.c | 2 +- drivers/scsi/sym53c8xx_2/sym_hipd.c | 4 +- drivers/scsi/sym53c8xx_2/sym_nvram.c | 2 +- drivers/scsi/ufs/ufs_bsg.c | 2 +- drivers/scsi/ufs/ufshcd.c | 8 +- drivers/scsi/virtio_scsi.c | 2 +- drivers/scsi/vmw_pvscsi.c | 2 +- drivers/scsi/wd33c93.c | 2 +- drivers/scsi/xen-scsifront.c | 2 +- drivers/soc/qcom/socinfo.c | 22 ++-- drivers/soc/tegra/pmc.c | 2 +- drivers/spi/spi-bcm2835aux.c | 4 +- drivers/spi/spi-fsl-cpm.c | 4 +- drivers/spi/spi-sprd-adi.c | 2 +- drivers/ssb/driver_chipcommon.c | 2 +- drivers/ssb/driver_mipscore.c | 2 +- drivers/ssb/scan.c | 2 +- drivers/staging/media/atomisp/pci/atomisp_cmd.c | 2 +- .../media/atomisp/pci/atomisp_compat_css20.c | 8 +- drivers/staging/media/atomisp/pci/atomisp_ioctl.c | 1 - drivers/staging/media/atomisp/pci/atomisp_v4l2.c | 2 +- drivers/staging/media/atomisp/pci/hmm/hmm_bo.c | 2 +- drivers/staging/media/atomisp/pci/sh_css.c | 2 +- drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c | 2 +- .../staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c | 2 +- drivers/staging/media/imx/imx-media-csi.c | 2 +- drivers/staging/media/usbvision/usbvision-i2c.c | 6 +- drivers/target/iscsi/cxgbit/cxgbit_main.c | 2 +- drivers/target/iscsi/iscsi_target.c | 2 +- drivers/target/target_core_pr.c | 4 +- drivers/target/target_core_sbc.c | 2 +- drivers/target/target_core_transport.c | 4 +- drivers/target/tcm_fc/tfc_cmd.c | 2 +- drivers/thermal/qcom/tsens-v0_1.c | 8 +- drivers/thermal/qcom/tsens-v1.c | 4 +- drivers/thunderbolt/ctl.c | 2 +- drivers/thunderbolt/switch.c | 2 +- drivers/thunderbolt/tunnel.c | 4 +- drivers/tty/hvc/hvc_xen.c | 2 +- drivers/tty/mips_ejtag_fdc.c | 2 +- drivers/tty/n_gsm.c | 4 +- drivers/tty/n_hdlc.c | 2 +- drivers/tty/n_r3964.c | 1 - drivers/tty/serial/8250/8250_em.c | 2 +- drivers/tty/serial/8250/8250_fintek.c | 2 +- drivers/tty/serial/8250/8250_pci.c | 2 +- drivers/tty/serial/8250/8250_port.c | 2 +- drivers/tty/serial/8250/8250_uniphier.c | 6 +- drivers/tty/serial/atmel_serial.c | 2 +- drivers/tty/serial/omap-serial.c | 2 - drivers/tty/serial/rda-uart.c | 2 +- drivers/tty/serial/serial-tegra.c | 2 +- drivers/tty/serial/serial_core.c | 2 +- drivers/tty/serial/sunsu.c | 2 +- drivers/tty/serial/sunzilog.c | 2 +- drivers/tty/serial/xilinx_uartps.c | 2 +- drivers/tty/tty_ioctl.c | 2 +- drivers/tty/vt/vt.c | 6 +- drivers/usb/c67x00/c67x00-sched.c | 3 +- drivers/usb/core/hub.c | 2 +- drivers/usb/dwc3/core.c | 5 +- drivers/usb/gadget/function/f_mass_storage.c | 1 - drivers/usb/gadget/udc/atmel_usba_udc.c | 2 +- drivers/usb/gadget/udc/fsl_udc_core.c | 2 +- drivers/usb/gadget/udc/pxa25x_udc.c | 4 +- drivers/usb/host/isp116x-hcd.c | 6 +- drivers/usb/host/pci-quirks.c | 3 +- drivers/usb/host/xhci-dbgcap.c | 2 +- drivers/usb/host/xhci-hub.c | 2 +- drivers/usb/host/xhci-mem.c | 4 +- drivers/usb/host/xhci-ring.c | 2 +- drivers/usb/host/xhci.c | 2 +- drivers/usb/musb/cppi_dma.c | 2 +- drivers/usb/musb/musb_core.c | 13 ++- drivers/usb/musb/musb_dsps.c | 6 +- drivers/usb/musb/musb_gadget_ep0.c | 4 +- drivers/usb/musb/musb_host.c | 6 +- drivers/usb/musb/musb_virthub.c | 2 +- drivers/usb/musb/omap2430.c | 2 +- drivers/usb/musb/tusb6010.c | 2 +- drivers/usb/storage/sddr55.c | 2 +- drivers/usb/storage/uas.c | 2 +- drivers/usb/typec/tcpm/tcpci.c | 2 +- drivers/vfio/pci/vfio_pci.c | 2 +- drivers/vfio/vfio_iommu_type1.c | 2 +- drivers/video/backlight/adp8860_bl.c | 2 +- drivers/video/fbdev/acornfb.c | 2 +- drivers/video/fbdev/arcfb.c | 2 +- drivers/video/fbdev/atmel_lcdfb.c | 4 +- drivers/video/fbdev/aty/radeon_pm.c | 6 +- drivers/video/fbdev/cirrusfb.c | 4 +- drivers/video/fbdev/controlfb.c | 2 +- drivers/video/fbdev/core/fbmem.c | 2 +- drivers/video/fbdev/fsl-diu-fb.c | 4 +- drivers/video/fbdev/gxt4500.c | 2 +- drivers/video/fbdev/hyperv_fb.c | 4 +- drivers/video/fbdev/i740fb.c | 2 +- drivers/video/fbdev/mmp/fb/mmpfb.c | 2 - drivers/video/fbdev/nvidia/nv_hw.c | 2 +- drivers/video/fbdev/offb.c | 4 +- drivers/video/fbdev/omap/lcdc.c | 4 +- drivers/video/fbdev/omap/omapfb_main.c | 20 ++-- drivers/video/fbdev/omap2/omapfb/dss/dispc.c | 4 +- drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c | 2 +- drivers/video/fbdev/omap2/omapfb/omapfb-main.c | 2 +- drivers/video/fbdev/pm2fb.c | 4 +- drivers/video/fbdev/pxa168fb.c | 4 - drivers/video/fbdev/pxafb.c | 2 +- drivers/video/fbdev/riva/fbdev.c | 2 +- drivers/video/fbdev/s3c-fb.c | 6 +- drivers/video/fbdev/sa1100fb.c | 2 +- drivers/video/fbdev/savage/savagefb_driver.c | 2 +- drivers/video/fbdev/sh_mobile_lcdcfb.c | 4 +- drivers/video/fbdev/sis/sis_main.c | 8 +- drivers/video/fbdev/sm501fb.c | 2 +- drivers/video/fbdev/stifb.c | 4 +- drivers/video/fbdev/tdfxfb.c | 2 +- drivers/video/fbdev/via/lcd.c | 2 +- drivers/video/fbdev/xen-fbfront.c | 2 +- drivers/watchdog/sc1200wdt.c | 2 +- drivers/watchdog/wdrtas.c | 2 +- drivers/xen/pvcalls-front.c | 2 +- drivers/xen/xen-acpi-memhotplug.c | 2 +- drivers/xen/xen-pciback/xenbus.c | 2 +- drivers/xen/xen-scsiback.c | 2 +- drivers/xen/xenbus/xenbus_probe_frontend.c | 4 +- fs/9p/vfs_file.c | 2 +- fs/adfs/dir_f.c | 12 +-- fs/affs/inode.c | 2 +- fs/affs/super.c | 6 +- fs/afs/cmservice.c | 16 +-- fs/afs/file.c | 2 +- fs/afs/flock.c | 2 +- fs/afs/fsclient.c | 42 ++++---- fs/afs/misc.c | 18 ++-- fs/afs/rotate.c | 2 +- fs/afs/rxrpc.c | 6 +- fs/afs/vlclient.c | 24 ++--- fs/afs/write.c | 2 +- fs/afs/yfsclient.c | 50 ++++----- fs/aio.c | 2 +- fs/buffer.c | 2 +- fs/ceph/dir.c | 2 +- fs/ceph/file.c | 2 +- fs/cifs/cifssmb.c | 2 +- fs/cifs/connect.c | 10 +- fs/cifs/sess.c | 6 +- fs/cifs/smb2pdu.c | 2 +- fs/configfs/dir.c | 4 +- fs/dax.c | 2 +- fs/dlm/lock.c | 2 +- fs/erofs/zmap.c | 6 +- fs/ext2/inode.c | 4 +- fs/ext2/super.c | 2 +- fs/f2fs/f2fs.h | 2 +- fs/f2fs/node.c | 4 +- fs/fcntl.c | 4 +- fs/fs_context.c | 2 +- fs/fsopen.c | 2 +- fs/gfs2/bmap.c | 4 +- fs/gfs2/quota.c | 2 +- fs/hfsplus/wrapper.c | 2 +- fs/io_uring.c | 2 +- fs/iomap/seek.c | 4 +- fs/jffs2/fs.c | 2 +- fs/jffs2/readinode.c | 2 +- fs/libfs.c | 4 +- fs/locks.c | 6 +- fs/nfs/blocklayout/blocklayout.c | 2 +- fs/nfs/dir.c | 2 +- fs/nfs/filelayout/filelayout.c | 2 +- fs/nfs/flexfilelayout/flexfilelayout.c | 4 +- fs/nfs/fs_context.c | 22 ++-- fs/nfs/nfs3acl.c | 4 +- fs/nfs/nfs4file.c | 2 +- fs/nfs/nfs4idmap.c | 4 +- fs/nfs/nfs4proc.c | 32 +++--- fs/nfs/nfs4state.c | 14 +-- fs/nfs/pagelist.c | 2 +- fs/nfs/pnfs.c | 2 +- fs/nfs_common/nfsacl.c | 2 +- fs/nfsd/blocklayout.c | 4 +- fs/nfsd/nfs4callback.c | 2 +- fs/nfsd/nfs4layouts.c | 2 +- fs/nfsd/nfs4proc.c | 2 +- fs/nfsd/nfs4state.c | 12 +-- fs/nfsd/nfsfh.c | 4 +- fs/nfsd/nfsproc.c | 2 +- fs/nfsd/nfssvc.c | 2 +- fs/nfsd/vfs.c | 4 +- fs/nilfs2/bmap.c | 2 +- fs/nilfs2/recovery.c | 2 +- fs/nilfs2/segment.c | 19 ++-- fs/notify/fanotify/fanotify_user.c | 2 +- fs/ocfs2/cluster/quorum.c | 2 +- fs/pstore/zone.c | 1 - fs/quota/quota.c | 2 +- fs/seq_file.c | 2 +- fs/signalfd.c | 2 +- fs/ubifs/lprops.c | 4 +- fs/udf/symlink.c | 2 +- fs/ufs/util.h | 12 +-- fs/vboxsf/utils.c | 2 +- include/linux/compat.h | 6 +- include/linux/filter.h | 2 +- include/linux/jhash.h | 26 ++--- include/linux/mm.h | 9 +- include/linux/signal.h | 12 +-- include/linux/skbuff.h | 12 +-- include/math-emu/op-common.h | 10 +- ipc/sem.c | 4 +- ipc/shm.c | 4 +- kernel/auditfilter.c | 2 +- kernel/bpf/cgroup.c | 2 +- kernel/bpf/cpumap.c | 2 +- kernel/bpf/syscall.c | 2 +- kernel/bpf/verifier.c | 4 +- kernel/capability.c | 2 +- kernel/compat.c | 6 +- kernel/debug/gdbstub.c | 6 +- kernel/debug/kdb/kdb_keyboard.c | 4 +- kernel/debug/kdb/kdb_support.c | 6 +- kernel/events/core.c | 2 +- kernel/irq/handle.c | 2 +- kernel/irq/manage.c | 4 +- kernel/kallsyms.c | 4 +- kernel/power/hibernate.c | 2 +- kernel/power/qos.c | 4 +- kernel/sched/core.c | 2 +- kernel/sched/topology.c | 6 +- kernel/signal.c | 2 +- kernel/sys.c | 2 +- kernel/time/hrtimer.c | 2 +- kernel/time/posix-timers.c | 4 +- kernel/time/tick-broadcast.c | 2 +- kernel/time/timer.c | 2 +- kernel/trace/blktrace.c | 2 +- kernel/trace/trace_events_filter.c | 4 +- lib/asn1_decoder.c | 4 +- lib/assoc_array.c | 2 +- lib/bootconfig.c | 4 +- lib/cmdline.c | 10 +- lib/dim/net_dim.c | 2 +- lib/dim/rdma_dim.c | 4 +- lib/glob.c | 2 +- lib/siphash.c | 36 +++---- lib/ts_fsm.c | 2 +- lib/vsprintf.c | 15 +-- lib/xz/xz_dec_lzma2.c | 4 +- lib/xz/xz_dec_stream.c | 16 +-- lib/zstd/bitstream.h | 10 +- lib/zstd/compress.c | 2 +- lib/zstd/decompress.c | 12 +-- lib/zstd/huf_compress.c | 4 +- net/8021q/vlan_dev.c | 2 +- net/9p/trans_xen.c | 2 +- net/atm/common.c | 4 +- net/atm/lec.c | 2 +- net/atm/resources.c | 8 +- net/bpf/test_run.c | 2 +- net/can/j1939/socket.c | 2 +- net/can/j1939/transport.c | 20 ++-- net/ceph/ceph_hash.c | 20 ++-- net/ceph/crush/mapper.c | 2 +- net/ceph/messenger.c | 4 +- net/ceph/mon_client.c | 2 +- net/ceph/osd_client.c | 4 +- net/core/dev.c | 4 +- net/core/dev_ioctl.c | 6 +- net/core/devlink.c | 4 +- net/core/drop_monitor.c | 2 +- net/core/filter.c | 2 +- net/core/pktgen.c | 2 +- net/core/skmsg.c | 1 - net/core/sock.c | 2 +- net/dccp/ccids/ccid3.c | 2 +- net/dccp/feat.c | 3 +- net/dccp/input.c | 10 +- net/dccp/options.c | 2 +- net/dccp/output.c | 8 +- net/dccp/proto.c | 8 +- net/decnet/af_decnet.c | 6 +- net/decnet/dn_nsp_in.c | 2 +- net/decnet/dn_table.c | 2 +- net/decnet/sysctl_net_decnet.c | 2 +- net/dsa/slave.c | 2 +- net/ieee802154/6lowpan/reassembly.c | 2 +- net/ieee802154/6lowpan/rx.c | 4 +- net/iucv/af_iucv.c | 10 +- net/mpls/af_mpls.c | 2 +- net/mptcp/protocol.c | 3 +- net/ncsi/ncsi-manage.c | 4 +- net/netfilter/ipvs/ip_vs_proto_tcp.c | 2 +- net/netfilter/ipvs/ip_vs_proto_udp.c | 2 +- net/netlink/policy.c | 2 +- net/netrom/nr_in.c | 2 +- net/netrom/nr_route.c | 8 +- net/openvswitch/conntrack.c | 4 +- net/openvswitch/flow.c | 2 +- net/packet/af_packet.c | 2 +- net/phonet/pep.c | 10 +- net/rds/send.c | 2 +- net/rose/rose_in.c | 2 +- net/rose/rose_route.c | 4 +- net/rxrpc/af_rxrpc.c | 6 +- net/rxrpc/call_accept.c | 2 +- net/rxrpc/conn_client.c | 2 +- net/rxrpc/input.c | 6 +- net/rxrpc/local_object.c | 2 +- net/rxrpc/peer_event.c | 2 +- net/rxrpc/recvmsg.c | 2 +- net/rxrpc/sendmsg.c | 6 +- net/sched/sch_cake.c | 2 +- net/sctp/ipv6.c | 2 +- net/sctp/outqueue.c | 6 +- net/sctp/sm_make_chunk.c | 2 +- net/sctp/sm_sideeffect.c | 2 +- net/sctp/sm_statefuns.c | 2 +- net/smc/smc_close.c | 2 +- net/sunrpc/auth_gss/gss_krb5_wrap.c | 2 +- net/sunrpc/clnt.c | 22 ++-- net/sunrpc/xprt.c | 2 +- net/sunrpc/xprtrdma/verbs.c | 2 +- net/sunrpc/xprtsock.c | 8 +- net/tipc/bearer.c | 2 +- net/tipc/group.c | 2 +- net/tipc/link.c | 2 +- net/tipc/socket.c | 4 +- net/unix/af_unix.c | 2 +- net/wireless/chan.c | 4 +- net/wireless/mlme.c | 2 +- net/wireless/nl80211.c | 20 ++-- net/wireless/scan.c | 2 +- net/wireless/sme.c | 4 +- net/wireless/util.c | 4 +- net/wireless/wext-compat.c | 4 +- net/x25/x25_facilities.c | 2 +- net/x25/x25_in.c | 2 +- net/xfrm/xfrm_policy.c | 2 +- samples/bpf/hbm.c | 2 +- security/apparmor/domain.c | 2 +- security/apparmor/lib.c | 4 +- security/integrity/ima/ima_appraise.c | 4 +- security/integrity/ima/ima_policy.c | 8 +- security/integrity/ima/ima_template_lib.c | 2 +- security/keys/process_keys.c | 6 +- security/keys/request_key.c | 8 +- security/selinux/hooks.c | 8 +- security/selinux/ss/mls.c | 4 +- security/smack/smack_lsm.c | 2 +- security/tomoyo/common.c | 18 ++-- security/tomoyo/file.c | 2 +- sound/ppc/snd_ps3.c | 4 +- sound/soc/atmel/mchp-i2s-mcc.c | 2 +- sound/soc/codecs/jz4770.c | 2 +- sound/soc/codecs/pcm186x.c | 2 +- sound/soc/fsl/fsl_ssi.c | 2 +- sound/soc/hisilicon/hi6210-i2s.c | 4 +- sound/soc/intel/baytrail/sst-baytrail-pcm.c | 2 +- sound/soc/intel/boards/bytcht_es8316.c | 2 +- sound/soc/intel/boards/bytcr_rt5651.c | 4 +- sound/soc/intel/skylake/skl-pcm.c | 2 +- sound/soc/meson/axg-tdm-interface.c | 10 +- sound/soc/pxa/pxa-ssp.c | 2 +- sound/soc/rockchip/rockchip_pdm.c | 6 +- sound/soc/samsung/i2s.c | 2 +- sound/soc/soc-core.c | 2 +- sound/soc/soc-topology.c | 4 +- sound/soc/sof/intel/hda-dai.c | 4 +- sound/soc/sof/pcm.c | 4 +- sound/soc/ti/davinci-i2s.c | 2 +- sound/soc/ti/n810.c | 2 +- sound/soc/ti/omap-dmic.c | 4 +- sound/soc/ti/omap-mcpdm.c | 8 +- sound/soc/ti/rx51.c | 2 +- sound/soc/zte/zx-i2s.c | 4 +- sound/soc/zte/zx-spdif.c | 2 +- 1148 files changed, 2667 insertions(+), 2737 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index ac110ae8f978..5b60c248de9e 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -212,7 +212,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, STO_ALPHA_STD_GPLOAD) /* Omit the prologue. */ value += 8; - /* FALLTHRU */ + fallthrough; case R_ALPHA_BRADDR: value -= (u64)location + 4; if (value & 3) diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index a813020d2f11..15bc9d1e79f4 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -453,7 +453,7 @@ syscall_restart(unsigned long r0, unsigned long r19, regs->r0 = EINTR; break; } - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->r0 = r0; /* reset v0 and a3 and replay syscall */ regs->r19 = r19; diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 49754e07e04f..921d4b6e4d95 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -883,7 +883,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, case 0x26: /* sts */ fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg)); - /* FALLTHRU */ + fallthrough; case 0x2c: /* stl */ __asm__ __volatile__( @@ -911,7 +911,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, case 0x27: /* stt */ fake_reg = alpha_read_fp_reg(reg); - /* FALLTHRU */ + fallthrough; case 0x2d: /* stq */ __asm__ __volatile__( diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c index d04837d91b40..03f8b1be0c3a 100644 --- a/arch/arc/kernel/disasm.c +++ b/arch/arc/kernel/disasm.c @@ -339,7 +339,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state, case op_LDWX_S: /* LDWX_S c, [b, u6] */ state->x = 1; - /* intentional fall-through */ + fallthrough; case op_LDW_S: /* LDW_S c, [b, u6] */ state->zz = 2; diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 3d57ed0d8535..8222f8c54690 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -321,7 +321,7 @@ static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs) regs->r0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: /* diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index f87758a6851b..74ad4256022e 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -572,7 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end, #else BUILD_BUG_ON(sizeof(u32) != sizeof(value)); #endif - /* Fall through */ + fallthrough; case DW_EH_PE_native: if (end < (const void *)(ptr.pul + 1)) return 0; @@ -827,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, case DW_CFA_def_cfa: state->cfa.reg = get_uleb128(&ptr.p8, end); unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg); - /* fall through */ + fallthrough; case DW_CFA_def_cfa_offset: state->cfa.offs = get_uleb128(&ptr.p8, end); unw_debug("cfa_def_cfa_offset: 0x%lx ", @@ -835,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, break; case DW_CFA_def_cfa_sf: state->cfa.reg = get_uleb128(&ptr.p8, end); - /* fall through */ + fallthrough; case DW_CFA_def_cfa_offset_sf: state->cfa.offs = get_sleb128(&ptr.p8, end) * state->dataAlign; diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 7fff88e61252..7a4853b1213a 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -547,7 +547,7 @@ static int arch_build_bp_info(struct perf_event *bp, if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE) && max_watchpoint_len >= 8) break; - /* Else, fall through */ + fallthrough; default: return -EINVAL; } @@ -612,12 +612,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, /* Allow halfword watchpoints and breakpoints. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; - /* Else, fall through */ + fallthrough; case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; - /* Else, fall through */ + fallthrough; default: ret = -EINVAL; goto out; @@ -884,7 +884,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, break; case ARM_ENTRY_ASYNC_WATCHPOINT: WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); - /* Fall through */ + fallthrough; case ARM_ENTRY_SYNC_WATCHPOINT: watchpoint_handler(addr, fsr, regs); break; @@ -933,7 +933,7 @@ static bool core_has_os_save_restore(void) ARM_DBG_READ(c1, c1, 4, oslsr); if (oslsr & ARM_OSLSR_OSLM0) return true; - /* Else, fall through */ + fallthrough; default: return false; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index c9dc912b83f0..c1892f733f20 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -596,7 +596,7 @@ static int do_signal(struct pt_regs *regs, int syscall) switch (retval) { case -ERESTART_RESTARTBLOCK: restart -= 2; - /* Fall through */ + fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c index 1c05c5bf7e5c..757032d82f63 100644 --- a/arch/arm/mach-ep93xx/crunch.c +++ b/arch/arm/mach-ep93xx/crunch.c @@ -49,7 +49,7 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ - /* Fall through */ + fallthrough; case THREAD_NOTIFY_EXIT: crunch_task_release(thread); diff --git a/arch/arm/mach-mmp/pm-mmp2.c b/arch/arm/mach-mmp/pm-mmp2.c index 2d86381e152d..7a6f74c32d42 100644 --- a/arch/arm/mach-mmp/pm-mmp2.c +++ b/arch/arm/mach-mmp/pm-mmp2.c @@ -123,19 +123,19 @@ void mmp2_pm_enter_lowpower_mode(int state) case POWER_MODE_SYS_SLEEP: apcr |= MPMU_PCR_PJ_SLPEN; /* set the SLPEN bit */ apcr |= MPMU_PCR_PJ_VCTCXOSD; /* set VCTCXOSD */ - /* fall through */ + fallthrough; case POWER_MODE_CHIP_SLEEP: apcr |= MPMU_PCR_PJ_SLPEN; - /* fall through */ + fallthrough; case POWER_MODE_APPS_SLEEP: apcr |= MPMU_PCR_PJ_APBSD; /* set APBSD */ - /* fall through */ + fallthrough; case POWER_MODE_APPS_IDLE: apcr |= MPMU_PCR_PJ_AXISD; /* set AXISDD bit */ apcr |= MPMU_PCR_PJ_DDRCORSD; /* set DDRCORSD bit */ idle_cfg |= APMU_PJ_IDLE_CFG_PJ_PWRDWN; /* PJ power down */ apcr |= MPMU_PCR_PJ_SPSD; - /* fall through */ + fallthrough; case POWER_MODE_CORE_EXTIDLE: idle_cfg |= APMU_PJ_IDLE_CFG_PJ_IDLE; /* set the IDLE bit */ idle_cfg &= ~APMU_PJ_IDLE_CFG_ISO_MODE_CNTRL_MASK; diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c index 69ebe18ff209..1d71d73c1862 100644 --- a/arch/arm/mach-mmp/pm-pxa910.c +++ b/arch/arm/mach-mmp/pm-pxa910.c @@ -145,23 +145,23 @@ void pxa910_pm_enter_lowpower_mode(int state) case POWER_MODE_UDR: /* only shutdown APB in UDR */ apcr |= MPMU_APCR_STBYEN | MPMU_APCR_APBSD; - /* fall through */ + fallthrough; case POWER_MODE_SYS_SLEEP: apcr |= MPMU_APCR_SLPEN; /* set the SLPEN bit */ apcr |= MPMU_APCR_VCTCXOSD; /* set VCTCXOSD */ - /* fall through */ + fallthrough; case POWER_MODE_APPS_SLEEP: apcr |= MPMU_APCR_DDRCORSD; /* set DDRCORSD */ - /* fall through */ + fallthrough; case POWER_MODE_APPS_IDLE: apcr |= MPMU_APCR_AXISD; /* set AXISDD bit */ - /* fall through */ + fallthrough; case POWER_MODE_CORE_EXTIDLE: idle_cfg |= APMU_MOH_IDLE_CFG_MOH_IDLE; idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWRDWN; idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWR_SW(3) | APMU_MOH_IDLE_CFG_MOH_L2_PWR_SW(3); - /* fall through */ + fallthrough; case POWER_MODE_CORE_INTIDLE: break; } diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 1d119b974f5f..59755b5a1ad7 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -396,7 +396,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "3.1"; break; case 7: - /* FALLTHROUGH */ default: /* Use the latest known revision as default */ omap_revision = OMAP3430_REV_ES3_1_2; @@ -416,7 +415,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "1.0"; break; case 1: - /* FALLTHROUGH */ default: omap_revision = AM35XX_REV_ES1_1; cpu_rev = "1.1"; @@ -435,7 +433,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "1.1"; break; case 2: - /* FALLTHROUGH */ default: omap_revision = OMAP3630_REV_ES1_2; cpu_rev = "1.2"; @@ -456,7 +453,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "2.0"; break; case 3: - /* FALLTHROUGH */ default: omap_revision = TI8168_REV_ES2_1; cpu_rev = "2.1"; @@ -473,7 +469,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "2.0"; break; case 2: - /* FALLTHROUGH */ default: omap_revision = AM335X_REV_ES2_1; cpu_rev = "2.1"; @@ -491,7 +486,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "1.1"; break; case 2: - /* FALLTHROUGH */ default: omap_revision = AM437X_REV_ES1_2; cpu_rev = "1.2"; @@ -502,7 +496,6 @@ void __init omap3xxx_check_revision(void) case 0xb968: switch (rev) { case 0: - /* FALLTHROUGH */ case 1: omap_revision = TI8148_REV_ES1_0; cpu_rev = "1.0"; @@ -512,7 +505,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "2.0"; break; case 3: - /* FALLTHROUGH */ default: omap_revision = TI8148_REV_ES2_1; cpu_rev = "2.1"; diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 6b4548f3b57f..fc7bb2ca1672 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -240,7 +240,7 @@ static int _omap_device_notifier_call(struct notifier_block *nb, if (pdev->dev.of_node) omap_device_build_from_dt(pdev); omap_auxdata_legacy_init(dev); - /* fall through */ + fallthrough; default: od = to_omap_device(pdev); if (od) diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c index d13344b2ddcd..87cb47220e82 100644 --- a/arch/arm/mach-orion5x/dns323-setup.c +++ b/arch/arm/mach-orion5x/dns323-setup.c @@ -624,7 +624,7 @@ static void __init dns323_init(void) dns323ab_leds[0].active_low = 1; gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable"); gpio_direction_output(DNS323_GPIO_LED_POWER1, 0); - /* Fall through */ + fallthrough; case DNS323_REV_B1: i2c_register_board_info(0, dns323ab_i2c_devices, ARRAY_SIZE(dns323ab_i2c_devices)); diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c index ea2c84214bac..d23970bd638d 100644 --- a/arch/arm/mach-rpc/riscpc.c +++ b/arch/arm/mach-rpc/riscpc.c @@ -46,7 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag) switch (tag->u.acorn.vram_pages) { case 512: vram_size += PAGE_SIZE * 256; - /* Fall through - ??? */ + fallthrough; /* ??? */ case 256: vram_size += PAGE_SIZE * 256; default: diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c index 76a65df42d10..d5c805adf7a8 100644 --- a/arch/arm/mach-tegra/reset.c +++ b/arch/arm/mach-tegra/reset.c @@ -70,7 +70,7 @@ static void __init tegra_cpu_reset_handler_enable(void) switch (err) { case -ENOSYS: tegra_cpu_reset_handler_set(reset_address); - /* fall through */ + fallthrough; case 0: is_enabled = true; break; diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index f4bfc1cac91a..ea81e89e7740 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -694,7 +694,7 @@ thumb2arm(u16 tinstr) return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] | (tinstr & 255); /* register_list */ } - /* Else, fall through - for illegal instruction case */ + fallthrough; /* for illegal instruction case */ default: return BAD_INSTR; @@ -750,7 +750,7 @@ do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs, case 0xe8e0: case 0xe9e0: poffset->un = (tinst2 & 0xff) << 2; - /* Fall through */ + fallthrough; case 0xe940: case 0xe9c0: diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index c0fbfca5da8b..114c05ab4dd9 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -71,7 +71,7 @@ static void cpu_v7_spectre_init(void) /* Other ARM CPUs require no workaround */ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) break; - /* fallthrough */ + fallthrough; /* Cortex A57/A72 require firmware workaround */ case ARM_CPU_PART_CORTEX_A57: case ARM_CPU_PART_CORTEX_A72: { diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index b2e9e822426f..1eb59003bdec 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -309,14 +309,14 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) * not supported by current hardware on OMAP1 * w |= (0x03 << 7); */ - /* fall through */ + fallthrough; case OMAP_DMA_DATA_BURST_16: if (dma_omap2plus()) { burst = 0x3; break; } /* OMAP1 don't support burst 16 */ - /* fall through */ + fallthrough; default: BUG(); } @@ -393,7 +393,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) break; } /* OMAP1 don't support burst 16 */ - /* fall through */ + fallthrough; default: printk(KERN_ERR "Invalid DMA burst mode\n"); BUG(); diff --git a/arch/arm/probes/decode.c b/arch/arm/probes/decode.c index fe81a9c21f2d..c84053a81358 100644 --- a/arch/arm/probes/decode.c +++ b/arch/arm/probes/decode.c @@ -307,7 +307,7 @@ static bool __kprobes decode_regs(probes_opcode_t *pinsn, u32 regs, bool modify) case REG_TYPE_NOPCWB: if (!is_writeback(insn)) break; /* No writeback, so any register is OK */ - /* fall through... */ + fallthrough; case REG_TYPE_NOPC: case REG_TYPE_NOPCX: /* Reject PC (R15) */ diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 90b5bc723c83..feefa2055eba 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -280,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs) /* A nested probe was hit in FIQ, it is a BUG */ pr_warn("Unrecoverable kprobe detected.\n"); dump_kprobe(p); - /* fall through */ + fallthrough; default: /* impossible cases */ BUG(); diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 455966401102..a85174d05473 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -322,7 +322,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) */ if (memblock_is_map_memory(phys)) return (void __iomem *)__phys_to_virt(phys); - /* fall through */ + fallthrough; default: if (region->attribute & EFI_MEMORY_WB) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a389b999482e..6424584be01e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -686,7 +686,7 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, case FTR_HIGHER_OR_ZERO_SAFE: if (!cur || !new) break; - /* Fallthrough */ + fallthrough; case FTR_HIGHER_SAFE: ret = new > cur ? new : cur; break; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 393c6fb1f1cb..1886a02c3f50 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -327,7 +327,7 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) set_bit(ICACHEF_VPIPT, &__icache_flags); break; default: - /* Fallthrough */ + fallthrough; case ICACHE_POLICY_VIPT: /* Assume aliasing */ set_bit(ICACHEF_ALIASING, &__icache_flags); diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index af234a1e08b7..712e97c03e54 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -257,7 +257,7 @@ static int hw_breakpoint_control(struct perf_event *bp, * level. */ enable_debug_monitors(dbg_el); - /* Fall through */ + fallthrough; case HW_BREAKPOINT_RESTORE: /* Setup the address register. */ write_wb_reg(val_reg, i, info->address); @@ -541,13 +541,13 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; - /* Fallthrough */ + fallthrough; case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; - /* Fallthrough */ + fallthrough; default: return -EINVAL; } diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 1cd1a4d0ed30..2a1ad95d9b2c 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -315,21 +315,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, /* MOVW instruction relocations. */ case R_AARCH64_MOVW_UABS_G0_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_MOVKZ); @@ -397,7 +397,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_adrp(me, sechdrs, loc, val); if (ovf && ovf != -ERANGE) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 03957a1ae6c0..355ee9eed4dd 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -151,7 +151,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) break; } pr_crit("CPU%u: may not have shut down cleanly\n", cpu); - /* Fall through */ + fallthrough; case CPU_STUCK_IN_KERNEL: pr_crit("CPU%u: is stuck in kernel\n", cpu); if (status & CPU_STUCK_REASON_52_BIT_VA) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index fe6c7d79309d..5d690d60ccad 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -128,7 +128,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) switch (ESR_ELx_EC(esr)) { case ESR_ELx_EC_WATCHPT_LOW: run->debug.arch.far = vcpu->arch.fault.far_el2; - /* fall through */ + fallthrough; case ESR_ELx_EC_SOFTSTP_LOW: case ESR_ELx_EC_BREAKPT_LOW: case ESR_ELx_EC_BKPT32: diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h index 0297dc63988c..5e28ea6aa097 100644 --- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h @@ -21,70 +21,70 @@ #define save_debug(ptr,reg,nr) \ switch (nr) { \ case 15: ptr[15] = read_debug(reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: ptr[14] = read_debug(reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: ptr[13] = read_debug(reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: ptr[12] = read_debug(reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: ptr[11] = read_debug(reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: ptr[10] = read_debug(reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: ptr[9] = read_debug(reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: ptr[8] = read_debug(reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: ptr[7] = read_debug(reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: ptr[6] = read_debug(reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: ptr[5] = read_debug(reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: ptr[4] = read_debug(reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: ptr[3] = read_debug(reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: ptr[2] = read_debug(reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: ptr[1] = read_debug(reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: ptr[0] = read_debug(reg, 0); \ } #define restore_debug(ptr,reg,nr) \ switch (nr) { \ case 15: write_debug(ptr[15], reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: write_debug(ptr[14], reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: write_debug(ptr[13], reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: write_debug(ptr[12], reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: write_debug(ptr[11], reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: write_debug(ptr[10], reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: write_debug(ptr[9], reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: write_debug(ptr[8], reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: write_debug(ptr[7], reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: write_debug(ptr[6], reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: write_debug(ptr[5], reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: write_debug(ptr[4], reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: write_debug(ptr[3], reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: write_debug(ptr[2], reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: write_debug(ptr[1], reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: write_debug(ptr[0], reg, 0); \ } diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 5a0073511efb..452f4cacd674 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -340,10 +340,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); } @@ -352,10 +352,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); } @@ -373,10 +373,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); } @@ -385,10 +385,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); } diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index a206655a39a5..9b11c096a042 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -45,7 +45,7 @@ static u32 get_cpu_asid_bits(void) default: pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); - /* Fallthrough */ + fallthrough; case 0: asid = 8; break; diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c index e456652facce..d05c78eace1b 100644 --- a/arch/c6x/kernel/signal.c +++ b/arch/c6x/kernel/signal.c @@ -220,7 +220,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) regs->a4 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: do_restart: regs->a4 = regs->orig_a4; @@ -252,7 +252,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs, break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->a4 = regs->orig_a4; regs->pc -= 4; diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index 9452d6570b7e..970895df75ec 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -194,7 +194,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->a0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->a0 = regs->orig_a0; regs->pc -= TRAP0_SIZE; diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index 38d335488a54..69e68949787f 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -227,7 +227,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka) regs->er0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: do_restart: regs->er0 = regs->orig_er0; diff --git a/arch/hexagon/kernel/module.c b/arch/hexagon/kernel/module.c index cf99fb79a124..cb3bf19b0640 100644 --- a/arch/hexagon/kernel/module.c +++ b/arch/hexagon/kernel/module.c @@ -120,7 +120,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, } case R_HEXAGON_HI16: value = (value>>16) & 0xffff; - /* fallthrough */ + fallthrough; case R_HEXAGON_LO16: *location &= ~0x00c03fff; *location |= value & 0x3fff; diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index d48864c48e5a..94cc7ff52dce 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c @@ -155,7 +155,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->r00 = -EINTR; break; } - /* Fall through */ + fallthrough; case -ERESTARTNOINTR: regs->r06 = regs->syscall_nr; pt_set_elr(regs, pt_elr(regs) - 4); diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index bec762a9b418..fec70d662d0c 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -163,7 +163,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) case DIE_INIT_MONARCH_LEAVE: if (!kdump_freeze_monarch) break; - /* fall through */ + fallthrough; case DIE_INIT_SLAVE_LEAVE: case DIE_INIT_MONARCH_ENTER: case DIE_MCA_RENDZVOUS_LEAVE: diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 1a42ba885188..00a496cb346f 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -654,7 +654,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, } } else if (!is_internal(mod, val)) val = get_plt(mod, location, val, &ok); - /* FALL THROUGH */ + fallthrough; default: val -= bundle(location); break; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 971f166873aa..0dc3611e7971 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -3472,7 +3472,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) break; case PFM_CTX_LOADED: if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break; - /* fall through */ + fallthrough; case PFM_CTX_UNLOADED: case PFM_CTX_ZOMBIE: DPRINT(("invalid state=%d\n", state)); diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index d07ed65c9c6e..e67b22fc3c60 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -374,7 +374,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) /* note: scr->pt.r10 is already -1 */ break; } - /*FALLTHRU*/ + fallthrough; case ERESTARTNOINTR: ia64_decrement_ip(&scr->pt); restart = 0; /* don't restart twice if handle_signal() fails... */ diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 2d4e65ba5c3e..6c1a8951dfbb 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -1431,7 +1431,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (u.insn.x) /* oops, really a semaphore op (cmpxchg, etc) */ goto failure; - /*FALLTHRU*/ + fallthrough; case LDS_IMM_OP: case LDSA_IMM_OP: case LDFS_OP: @@ -1459,7 +1459,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (u.insn.x) /* oops, really a semaphore op (cmpxchg, etc) */ goto failure; - /*FALLTHRU*/ + fallthrough; case LD_IMM_OP: case LDA_IMM_OP: case LDBIAS_IMM_OP: @@ -1475,7 +1475,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (u.insn.x) /* oops, really a semaphore op (cmpxchg, etc) */ goto failure; - /*FALLTHRU*/ + fallthrough; case ST_IMM_OP: case STREL_IMM_OP: ret = emulate_store_int(ifa, u.insn, regs); diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index 7601fe0622d2..6bd64c35e691 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c @@ -324,7 +324,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char return 0; } } - /* fall through */ + fallthrough; case UNW_NAT_NONE: dummy_nat = 0; nat_addr = &dummy_nat; diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c index 37091898adb3..5e0e682f9c61 100644 --- a/arch/m68k/atari/atakeyb.c +++ b/arch/m68k/atari/atakeyb.c @@ -207,7 +207,7 @@ repeat: self_test_last_rcv = jiffies; break; } - /* FALL THROUGH */ + fallthrough; default: break_flag = scancode & BREAK_MASK; diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index fc034fd19798..a98fca977073 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -1067,7 +1067,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) regs->d0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: do_restart: regs->d0 = regs->orig_d0; diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 5c9f3a2d6538..a621fcc1a576 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -1018,7 +1018,7 @@ int __init mac_platform_init(void) */ platform_device_register_simple("mac_scsi", 1, mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc)); - /* fall through */ + fallthrough; case MAC_SCSI_OLD: /* Addresses from Developer Notes for Duo System, * PowerBook 180 & 160, 140 & 170, Macintosh IIsi diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 1f0fad2a98a0..ac77d73af19a 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -370,7 +370,7 @@ void via_nubus_irq_startup(int irq) /* Allow NuBus slots 9 through F. */ via2[vDirA] &= 0x80 | ~(1 << irq_idx); } - /* fall through */ + fallthrough; case MAC_VIA_IICI: via_irq_enable(irq); break; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 795f483b1050..ef46e77e97a5 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -118,7 +118,7 @@ good_area: pr_debug("do_page_fault: good_area\n"); switch (error_code & 3) { default: /* 3: write, present */ - /* fall through */ + fallthrough; case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) goto acc_err; diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 65bf5fd8d473..4a96b59f0bee 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -249,7 +249,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) regs->r3 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: do_restart: /* offset of 4 bytes to re-execute trap (brki) instruction */ diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h index 7dd4a80e05d6..6f4ac854b12d 100644 --- a/arch/mips/include/asm/unroll.h +++ b/arch/mips/include/asm/unroll.h @@ -28,38 +28,38 @@ BUILD_BUG_ON(!__builtin_constant_p(times)); \ \ switch (times) { \ - case 32: fn(__VA_ARGS__); /* fall through */ \ - case 31: fn(__VA_ARGS__); /* fall through */ \ - case 30: fn(__VA_ARGS__); /* fall through */ \ - case 29: fn(__VA_ARGS__); /* fall through */ \ - case 28: fn(__VA_ARGS__); /* fall through */ \ - case 27: fn(__VA_ARGS__); /* fall through */ \ - case 26: fn(__VA_ARGS__); /* fall through */ \ - case 25: fn(__VA_ARGS__); /* fall through */ \ - case 24: fn(__VA_ARGS__); /* fall through */ \ - case 23: fn(__VA_ARGS__); /* fall through */ \ - case 22: fn(__VA_ARGS__); /* fall through */ \ - case 21: fn(__VA_ARGS__); /* fall through */ \ - case 20: fn(__VA_ARGS__); /* fall through */ \ - case 19: fn(__VA_ARGS__); /* fall through */ \ - case 18: fn(__VA_ARGS__); /* fall through */ \ - case 17: fn(__VA_ARGS__); /* fall through */ \ - case 16: fn(__VA_ARGS__); /* fall through */ \ - case 15: fn(__VA_ARGS__); /* fall through */ \ - case 14: fn(__VA_ARGS__); /* fall through */ \ - case 13: fn(__VA_ARGS__); /* fall through */ \ - case 12: fn(__VA_ARGS__); /* fall through */ \ - case 11: fn(__VA_ARGS__); /* fall through */ \ - case 10: fn(__VA_ARGS__); /* fall through */ \ - case 9: fn(__VA_ARGS__); /* fall through */ \ - case 8: fn(__VA_ARGS__); /* fall through */ \ - case 7: fn(__VA_ARGS__); /* fall through */ \ - case 6: fn(__VA_ARGS__); /* fall through */ \ - case 5: fn(__VA_ARGS__); /* fall through */ \ - case 4: fn(__VA_ARGS__); /* fall through */ \ - case 3: fn(__VA_ARGS__); /* fall through */ \ - case 2: fn(__VA_ARGS__); /* fall through */ \ - case 1: fn(__VA_ARGS__); /* fall through */ \ + case 32: fn(__VA_ARGS__); fallthrough; \ + case 31: fn(__VA_ARGS__); fallthrough; \ + case 30: fn(__VA_ARGS__); fallthrough; \ + case 29: fn(__VA_ARGS__); fallthrough; \ + case 28: fn(__VA_ARGS__); fallthrough; \ + case 27: fn(__VA_ARGS__); fallthrough; \ + case 26: fn(__VA_ARGS__); fallthrough; \ + case 25: fn(__VA_ARGS__); fallthrough; \ + case 24: fn(__VA_ARGS__); fallthrough; \ + case 23: fn(__VA_ARGS__); fallthrough; \ + case 22: fn(__VA_ARGS__); fallthrough; \ + case 21: fn(__VA_ARGS__); fallthrough; \ + case 20: fn(__VA_ARGS__); fallthrough; \ + case 19: fn(__VA_ARGS__); fallthrough; \ + case 18: fn(__VA_ARGS__); fallthrough; \ + case 17: fn(__VA_ARGS__); fallthrough; \ + case 16: fn(__VA_ARGS__); fallthrough; \ + case 15: fn(__VA_ARGS__); fallthrough; \ + case 14: fn(__VA_ARGS__); fallthrough; \ + case 13: fn(__VA_ARGS__); fallthrough; \ + case 12: fn(__VA_ARGS__); fallthrough; \ + case 11: fn(__VA_ARGS__); fallthrough; \ + case 10: fn(__VA_ARGS__); fallthrough; \ + case 9: fn(__VA_ARGS__); fallthrough; \ + case 8: fn(__VA_ARGS__); fallthrough; \ + case 7: fn(__VA_ARGS__); fallthrough; \ + case 6: fn(__VA_ARGS__); fallthrough; \ + case 5: fn(__VA_ARGS__); fallthrough; \ + case 4: fn(__VA_ARGS__); fallthrough; \ + case 3: fn(__VA_ARGS__); fallthrough; \ + case 2: fn(__VA_ARGS__); fallthrough; \ + case 1: fn(__VA_ARGS__); fallthrough; \ case 0: break; \ \ default: \ diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c index 62bdafbc53f4..9edd7ed7d7bf 100644 --- a/arch/nds32/kernel/fpu.c +++ b/arch/nds32/kernel/fpu.c @@ -45,7 +45,7 @@ void save_fpu(struct task_struct *tsk) : /* no output */ : "r" (&tsk->thread.fpu) : "memory"); - /* fall through */ + fallthrough; case SP32_DP16_reg: asm volatile ("fsdi $fd15, [%0+0x78]\n\t" "fsdi $fd14, [%0+0x70]\n\t" @@ -58,7 +58,7 @@ void save_fpu(struct task_struct *tsk) : /* no output */ : "r" (&tsk->thread.fpu) : "memory"); - /* fall through */ + fallthrough; case SP16_DP8_reg: asm volatile ("fsdi $fd7, [%0+0x38]\n\t" "fsdi $fd6, [%0+0x30]\n\t" @@ -67,7 +67,7 @@ void save_fpu(struct task_struct *tsk) : /* no output */ : "r" (&tsk->thread.fpu) : "memory"); - /* fall through */ + fallthrough; case SP8_DP4_reg: asm volatile ("fsdi $fd3, [%1+0x18]\n\t" "fsdi $fd2, [%1+0x10]\n\t" @@ -108,7 +108,7 @@ void load_fpu(const struct fpu_struct *fpregs) "fldi $fd16, [%0+0x80]\n\t" : /* no output */ : "r" (fpregs)); - /* fall through */ + fallthrough; case SP32_DP16_reg: asm volatile ("fldi $fd15, [%0+0x78]\n\t" "fldi $fd14, [%0+0x70]\n\t" @@ -120,7 +120,7 @@ void load_fpu(const struct fpu_struct *fpregs) "fldi $fd8, [%0+0x40]\n\t" : /* no output */ : "r" (fpregs)); - /* fall through */ + fallthrough; case SP16_DP8_reg: asm volatile ("fldi $fd7, [%0+0x38]\n\t" "fldi $fd6, [%0+0x30]\n\t" @@ -128,7 +128,7 @@ void load_fpu(const struct fpu_struct *fpregs) "fldi $fd4, [%0+0x20]\n\t" : /* no output */ : "r" (fpregs)); - /* fall through */ + fallthrough; case SP8_DP4_reg: asm volatile ("fldi $fd3, [%1+0x18]\n\t" "fldi $fd2, [%1+0x10]\n\t" diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c index 330b19fcd990..36e25a410bb0 100644 --- a/arch/nds32/kernel/signal.c +++ b/arch/nds32/kernel/signal.c @@ -316,7 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->uregs[0] = -EINTR; break; } - /* Else, fall through */ + fallthrough; case -ERESTARTNOINTR: regs->uregs[0] = regs->orig_r0; regs->ipc -= 4; @@ -361,7 +361,7 @@ static void do_signal(struct pt_regs *regs) switch (regs->uregs[0]) { case -ERESTART_RESTARTBLOCK: regs->uregs[15] = __NR_restart_syscall; - /* Fall through */ + fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 97804f21a40c..c779364f0cd0 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -244,7 +244,7 @@ int do_signal(struct pt_regs *regs, int syscall) switch (retval) { case -ERESTART_RESTARTBLOCK: restart = -2; - /* Fall through */ + fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 5df5d4cd5d4c..3c037fc96038 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -502,7 +502,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) regs->gr[28] = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: check_syscallno_in_delay_branch(regs); break; diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 43875c289723..a52c7abf2ca4 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -437,7 +437,6 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o break; default: - /* Fall through */ break; } @@ -644,12 +643,12 @@ void notrace handle_interruption(int code, struct pt_regs *regs) case 15: /* Data TLB miss fault/Data page fault */ - /* Fall through */ + fallthrough; case 16: /* Non-access instruction TLB miss fault */ /* The instruction TLB entry needed for the target address of the FIC is absent, and hardware can't find it, so we get to cleanup */ - /* Fall through */ + fallthrough; case 17: /* Non-access data TLB miss fault/Non-access data page fault */ /* FIXME: @@ -673,7 +672,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) handle_unaligned(regs); return; } - /* Fall Through */ + fallthrough; case 26: /* PCXL: Data memory access rights trap */ fault_address = regs->ior; @@ -683,7 +682,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) case 19: /* Data memory break trap */ regs->gr[0] |= PSW_X; /* So we can single-step over the trap */ - /* fall thru */ + fallthrough; case 21: /* Page reference trap */ handle_gdb_break(regs, TRAP_HWBKPT); @@ -730,7 +729,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) } mmap_read_unlock(current->mm); } - /* Fall Through */ + fallthrough; case 27: /* Data memory protection ID trap */ if (code == 27 && !user_mode(regs) && diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 4bfe2da9fbe3..716960f5d92e 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -67,7 +67,7 @@ parisc_acctyp(unsigned long code, unsigned int inst) case 0x30000000: /* coproc2 */ if (bit22set(inst)) return VM_WRITE; - /* fall through */ + fallthrough; case 0x0: /* indexed/memory management */ if (bit22set(inst)) { @@ -370,7 +370,7 @@ bad_area: } /* probably address is outside of mapped file */ - /* fall through */ + fallthrough; case 17: /* NA data TLB miss / page fault */ case 18: /* Unaligned access - PCXS only */ signo = SIGBUS; diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 16d09b36fe06..78d61f97371e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -475,7 +475,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: true_cond = COND_NE; - /* Fall through */ + fallthrough; cond_branch: /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 17ba190e84a5..e996e08f1061 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -250,7 +250,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->a0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->a0 = regs->orig_a0; regs->epc -= 0x4; diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c index bc5f2204693f..579575f9cdae 100644 --- a/arch/riscv/net/bpf_jit_comp32.c +++ b/arch/riscv/net/bpf_jit_comp32.c @@ -1020,7 +1020,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, emit_zext64(dst, ctx); break; } - /* Fallthrough. */ + fallthrough; case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU | BPF_SUB | BPF_X: @@ -1079,7 +1079,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case 16: emit(rv_slli(lo(rd), lo(rd), 16), ctx); emit(rv_srli(lo(rd), lo(rd), 16), ctx); - /* Fallthrough. */ + fallthrough; case 32: if (!ctx->prog->aux->verifier_zext) emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); diff --git a/arch/sh/drivers/platform_early.c b/arch/sh/drivers/platform_early.c index f3dc3f25b3ff..143747c45206 100644 --- a/arch/sh/drivers/platform_early.c +++ b/arch/sh/drivers/platform_early.c @@ -246,7 +246,7 @@ static int __init sh_early_platform_driver_probe_id(char *class_str, case EARLY_PLATFORM_ID_ERROR: pr_warn("%s: unable to parse %s parameter\n", class_str, epdrv->pdrv->driver.name); - /* fall-through */ + fallthrough; case EARLY_PLATFORM_ID_UNSET: match = NULL; break; diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c index 08e1af63edd9..34e25a439c81 100644 --- a/arch/sh/kernel/disassemble.c +++ b/arch/sh/kernel/disassemble.c @@ -486,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn) pr_cont("xd%d", rn & ~1); break; } - /* else, fall through */ + fallthrough; case D_REG_N: pr_cont("dr%d", rn); break; @@ -495,7 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn) pr_cont("xd%d", rm & ~1); break; } - /* else, fall through */ + fallthrough; case D_REG_M: pr_cont("dr%d", rm); break; diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index 0d5f3c9d52f3..e4147efa9ec6 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c @@ -266,7 +266,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->pc = addr; - /* fallthrough */ + fallthrough; case 'D': case 'k': atomic_set(&kgdb_cpu_doing_single_step, -1); diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index a0fbb8427b39..4fe3f00137bc 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -418,7 +418,7 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs, case -ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->regs[0] = save_r0; regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 4843f48bfe85..774a82b0c649 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c @@ -87,7 +87,6 @@ void auxio_set_lte(int on) __auxio_sbus_set_lte(on); break; case AUXIO_TYPE_EBUS: - /* FALL-THROUGH */ default: break; } diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index bfae98ab8638..23f8838dd96e 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -55,7 +55,7 @@ static int clock_board_calc_nslots(struct clock_board *p) else return 5; } - /* Fallthrough */ + fallthrough; default: return 4; } diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c index 7580775a14b9..58ad3f7de1fb 100644 --- a/arch/sparc/kernel/kgdb_32.c +++ b/arch/sparc/kernel/kgdb_32.c @@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, linux_regs->pc = addr; linux_regs->npc = addr + 4; } - /* fall through */ + fallthrough; case 'D': case 'k': diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 5d6c2d287e85..177746ae2c81 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, linux_regs->tpc = addr; linux_regs->tnpc = addr + 4; } - /* fall through */ + fallthrough; case 'D': case 'k': diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index c0886b400dad..2a12c86af956 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -359,7 +359,7 @@ int __init pcr_arch_init(void) * counter overflow interrupt so we can't make use of * their hardware currently. */ - /* fallthrough */ + fallthrough; default: err = -ENODEV; goto out_unregister; diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index da8902295c8c..3df960c137f7 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -224,7 +224,7 @@ void __init of_console_init(void) case PROMDEV_TTYB: skip = 1; - /* FALLTHRU */ + fallthrough; case PROMDEV_TTYA: type = "serial"; diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index e2c6f0abda00..e9695a06492f 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -646,7 +646,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; @@ -686,7 +686,7 @@ void do_signal32(struct pt_regs * regs) regs->tpc -= 4; regs->tnpc -= 4; pt_regs_clear_syscall(regs); - /* fall through */ + fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->tpc -= 4; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index f1f8c8ebe641..d0e0025ee3ba 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -440,7 +440,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->pc -= 4; @@ -506,7 +506,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) regs->pc -= 4; regs->npc -= 4; pt_regs_clear_syscall(regs); - /* fall through */ + fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->pc -= 4; diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 6937339a272c..255264bcb46a 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -461,7 +461,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; @@ -532,7 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) regs->tpc -= 4; regs->tnpc -= 4; pt_regs_clear_syscall(regs); - /* fall through */ + fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->tpc -= 4; diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c index 72e560ef4a09..d5beec856146 100644 --- a/arch/sparc/math-emu/math_32.c +++ b/arch/sparc/math-emu/math_32.c @@ -359,7 +359,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } - /* fall through */ + fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); @@ -380,7 +380,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } - /* fall through */ + fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); @@ -408,13 +408,13 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } - /* fall through */ + fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); return 0; } - /* fall through */ + fallthrough; case 1: rd = (void *)&fregs[freg]; break; diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c index c8eabb973b86..b1dbf2fa8c0a 100644 --- a/arch/sparc/net/bpf_jit_comp_32.c +++ b/arch/sparc/net/bpf_jit_comp_32.c @@ -491,7 +491,7 @@ void bpf_jit_compile(struct bpf_prog *fp) } else { emit_loadimm(K, r_A); } - /* Fallthrough */ + fallthrough; case BPF_RET | BPF_A: if (seen_or_pass0) { if (i != flen - 1) { diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index 3d57c71c532e..88cd9b5c1b74 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c @@ -70,7 +70,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) PT_REGS_SYSCALL_RET(regs) = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: PT_REGS_RESTART_SYSCALL(regs); PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs); diff --git a/arch/x86/boot/cmdline.c b/arch/x86/boot/cmdline.c index 4ff01176c1cc..21d56ae83cdf 100644 --- a/arch/x86/boot/cmdline.c +++ b/arch/x86/boot/cmdline.c @@ -54,7 +54,7 @@ int __cmdline_find_option(unsigned long cmdline_ptr, const char *option, char *b /* else */ state = st_wordcmp; opptr = option; - /* fall through */ + fallthrough; case st_wordcmp: if (c == '=' && !*opptr) { @@ -129,7 +129,7 @@ int __cmdline_find_option_bool(unsigned long cmdline_ptr, const char *option) state = st_wordcmp; opptr = option; wstart = pos; - /* fall through */ + fallthrough; case st_wordcmp: if (!*opptr) diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index 0048269180d5..dde7cb3724df 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -178,7 +178,7 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size, } *size = 0; } - /* Fall through */ + fallthrough; default: /* * If w/o offset, only size specified, memmap=nn[KMG] has the diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 50963472ee85..31e6887d24f1 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4682,7 +4682,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_CORE2_MEROM: x86_add_quirk(intel_clovertown_quirk); - /* fall through */ + fallthrough; case INTEL_FAM6_CORE2_MEROM_L: case INTEL_FAM6_CORE2_PENRYN: @@ -5062,7 +5062,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SKYLAKE_X: pmem = true; - /* fall through */ + fallthrough; case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_L: @@ -5114,7 +5114,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ICELAKE_X: case INTEL_FAM6_ICELAKE_D: pmem = true; - /* fall through */ + fallthrough; case INTEL_FAM6_ICELAKE_L: case INTEL_FAM6_ICELAKE: case INTEL_FAM6_TIGERLAKE_L: diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 63f58bdf556c..8961653c5dd2 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1268,7 +1268,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) ret = X86_BR_ZERO_CALL; break; } - /* fall through */ + fallthrough; case 0x9a: /* call far absolute */ ret = X86_BR_CALL; break; diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c3daf0aaa0ee..cdaab30880b9 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -239,7 +239,7 @@ void __init arch_init_ideal_nops(void) return; } - /* fall through */ + fallthrough; default: #ifdef CONFIG_X86_64 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 21325a4a78b9..779a89e31c4c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -800,7 +800,7 @@ static int irq_polarity(int idx) return IOAPIC_POL_HIGH; case MP_IRQPOL_RESERVED: pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n"); - /* fall through */ + fallthrough; case MP_IRQPOL_ACTIVE_LOW: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_POL_LOW; @@ -848,7 +848,7 @@ static int irq_trigger(int idx) return IOAPIC_EDGE; case MP_IRQTRIG_RESERVED: pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n"); - /* fall through */ + fallthrough; case MP_IRQTRIG_LEVEL: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_LEVEL; diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 7bda71def557..99ee61c9ba54 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -149,7 +149,7 @@ void __init default_setup_apic_routing(void) break; } /* P4 and above */ - /* fall through */ + fallthrough; case X86_VENDOR_HYGON: case X86_VENDOR_AMD: def_to_bigsmp = 1; diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index c7503be92f35..57074cf3ad7c 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -248,7 +248,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, switch (leaf) { case 1: l1 = &l1i; - /* fall through */ + fallthrough; case 0: if (!l1->val) return; diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 7843ab3fde09..3a44346f2276 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -199,7 +199,7 @@ static int raise_local(void) * calling irq_enter, but the necessary * machinery isn't exported currently. */ - /*FALL THROUGH*/ + fallthrough; case MCJ_CTX_PROCESS: raise_exception(m, NULL); break; diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index d8f9230d2034..abe9fe0fb851 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -193,7 +193,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval) if (!atomic_sub_return(1, &cmci_storm_on_cpus)) pr_notice("CMCI storm subsided: switching to interrupt mode\n"); - /* FALLTHROUGH */ + fallthrough; case CMCI_STORM_SUBSIDED: /* diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index 72182809b333..ca670919b561 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c @@ -98,7 +98,7 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) case 7: if (size < 0x40) break; - /* Else, fall through */ + fallthrough; case 6: case 5: case 4: diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 8cdf29ffd95f..b98ff620ba77 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -349,7 +349,7 @@ static int arch_build_bp_info(struct perf_event *bp, hw->len = X86_BREAKPOINT_LEN_X; return 0; } - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 68acd30c6b87..c2f02f308ecf 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -450,7 +450,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ip = addr; - /* fall through */ + fallthrough; case 'D': case 'k': /* clear the trace bit */ @@ -539,7 +539,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) * a system call which should be ignored */ return NOTIFY_DONE; - /* fall through */ + fallthrough; default: if (user_mode(regs)) return NOTIFY_DONE; diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 411af4aa7b51..baa21090c9be 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -312,7 +312,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) case 2: if (i == 0 || i == 13) continue; /* IRQ0 & IRQ13 not connected */ - /* fall through */ + fallthrough; default: if (i == 2) continue; /* IRQ2 is never connected */ @@ -356,7 +356,7 @@ static void __init construct_ioapic_table(int mpc_default_type) default: pr_err("???\nUnknown standard configuration %d\n", mpc_default_type); - /* fall through */ + fallthrough; case 1: case 5: memcpy(bus.bustype, "ISA ", 6); diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 5679aa3fdcb8..e7537c5440bb 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -204,7 +204,7 @@ static int set_segment_reg(struct task_struct *task, case offsetof(struct user_regs_struct, ss): if (unlikely(value == 0)) return -EIO; - /* Else, fall through */ + fallthrough; default: *pt_regs_access(task_pt_regs(task), offset) = value; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 0ec7ced727fe..a515e2d230b7 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -654,7 +654,7 @@ static void native_machine_emergency_restart(void) case BOOT_CF9_FORCE: port_cf9_safe = true; - /* Fall through */ + fallthrough; case BOOT_CF9_SAFE: if (port_cf9_safe) { diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index d5fa494c2304..be0d7d4152ec 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -726,7 +726,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->ax = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->ax = regs->orig_ax; regs->ip -= 2; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 15e5aad8ac2c..3fdaa042823d 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -735,7 +735,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) * OPCODE1() of the "short" jmp which checks the same condition. */ opc1 = OPCODE2(insn) - 0x10; - /* fall through */ + fallthrough; default: if (!is_cond_jmp_opcode(opc1)) return -ENOSYS; @@ -892,7 +892,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, fix_ip_or_call = 0; break; } - /* fall through */ + fallthrough; default: riprel_analyze(auprobe, &insn); } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d0e2825ae617..5299ef5ff18d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3016,7 +3016,7 @@ static void string_registers_quirk(struct x86_emulate_ctxt *ctxt) case 0xa4: /* movsb */ case 0xa5: /* movsd/w */ *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1; - /* fall through */ + fallthrough; case 0xaa: /* stosb */ case 0xab: /* stosd/w */ *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 814d3aee5cef..1d330564eed8 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1779,7 +1779,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); if (ret != HV_STATUS_INVALID_PORT_ID) break; - /* fall through - maybe userspace knows this conn_id. */ + fallthrough; /* maybe userspace knows this conn_id */ case HVCALL_POST_MESSAGE: /* don't bother userspace if it has no way to handle it */ if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index c47d2acec529..4aa1c2e00e2a 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -285,7 +285,7 @@ int kvm_set_routing_entry(struct kvm *kvm, switch (ue->u.irqchip.irqchip) { case KVM_IRQCHIP_PIC_SLAVE: e->irqchip.pin += PIC_NUM_PINS / 2; - /* fall through */ + fallthrough; case KVM_IRQCHIP_PIC_MASTER: if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2) return -EINVAL; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5ccbee7165a2..35cca2e0c802 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1053,7 +1053,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, switch (delivery_mode) { case APIC_DM_LOWEST: vcpu->arch.apic_arb_prio++; - /* fall through */ + fallthrough; case APIC_DM_FIXED: if (unlikely(trig_mode && !level)) break; @@ -1341,7 +1341,7 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) break; case APIC_TASKPRI: report_tpr_access(apic, false); - /* fall thru */ + fallthrough; default: val = kvm_lapic_get_reg(apic, offset); break; @@ -2027,7 +2027,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) case APIC_LVT0: apic_manage_nmi_watchdog(apic, val); - /* fall through */ + fallthrough; case APIC_LVTTHMR: case APIC_LVTPC: case APIC_LVT1: diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a5d0207e7189..43fdb0c12a5d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4422,7 +4422,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; - /* fall through */ + fallthrough; case PT64_ROOT_4LEVEL: rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 03dd7bac8034..0194336b64a4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2668,7 +2668,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_APICBASE: if (kvm_vcpu_apicv_active(vcpu)) avic_update_vapic_bar(to_svm(vcpu), data); - /* Fall through */ + fallthrough; default: return kvm_set_msr_common(vcpu, msr); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 46ba2e03a892..819c185adf09 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4654,7 +4654,7 @@ static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; - /* fall through */ + fallthrough; case DB_VECTOR: return !(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)); @@ -4827,7 +4827,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) } kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); - /* fall through */ + fallthrough; case BP_VECTOR: /* * Update instruction length as we may reinject #BP from @@ -5257,7 +5257,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE); } - /* fall through */ + fallthrough; case INTR_TYPE_SOFT_EXCEPTION: kvm_clear_exception_queue(vcpu); break; @@ -5610,7 +5610,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) * keeping track of global entries in shadow page tables. */ - /* fall-through */ + fallthrough; case INVPCID_TYPE_ALL_INCL_GLOBAL: kvm_mmu_unload(vcpu); return kvm_skip_emulated_instruction(vcpu); @@ -6578,7 +6578,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, break; case INTR_TYPE_SOFT_EXCEPTION: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); - /* fall through */ + fallthrough; case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); @@ -6588,7 +6588,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, break; case INTR_TYPE_SOFT_INTR: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); - /* fall through */ + fallthrough; case INTR_TYPE_EXT_INTR: kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 33945283fe07..d39d6cf1d473 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1116,14 +1116,12 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) vcpu->arch.eff_db[dr] = val; break; case 4: - /* fall through */ case 6: if (!kvm_dr6_valid(val)) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); break; case 5: - /* fall through */ default: /* 7 */ if (!kvm_dr7_valid(val)) return -1; /* #GP */ @@ -1154,12 +1152,10 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) *val = vcpu->arch.db[array_index_nospec(dr, size)]; break; case 4: - /* fall through */ case 6: *val = vcpu->arch.dr6; break; case 5: - /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; @@ -3051,7 +3047,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: - pr = true; /* fall through */ + pr = true; + fallthrough; case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: if (kvm_pmu_is_valid_msr(vcpu, msr)) @@ -4359,7 +4356,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, case KVM_CAP_HYPERV_SYNIC2: if (cap->args[0]) return -EINVAL; - /* fall through */ + fallthrough; case KVM_CAP_HYPERV_SYNIC: if (!irqchip_in_kernel(vcpu->kvm)) @@ -8672,7 +8669,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) vcpu->arch.pv.pv_unhalted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - /* fall through */ + fallthrough; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c index 4f1719e22d3c..b6da09339308 100644 --- a/arch/x86/lib/cmdline.c +++ b/arch/x86/lib/cmdline.c @@ -58,7 +58,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size, state = st_wordcmp; opptr = option; wstart = pos; - /* fall through */ + fallthrough; case st_wordcmp: if (!*opptr) { @@ -89,7 +89,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size, break; } state = st_wordskip; - /* fall through */ + fallthrough; case st_wordskip: if (!c) @@ -151,7 +151,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size, state = st_wordcmp; opptr = option; - /* fall through */ + fallthrough; case st_wordcmp: if ((c == '=') && !*opptr) { @@ -172,7 +172,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size, break; } state = st_wordskip; - /* fall through */ + fallthrough; case st_wordskip: if (myisspace(c)) diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 31600d851fd8..5e69603ff63f 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -179,7 +179,7 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) if (insn->addr_bytes == 2) return -EINVAL; - /* fall through */ + fallthrough; case -EDOM: case offsetof(struct pt_regs, bx): @@ -362,7 +362,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: - /* fall through */ default: return -EINVAL; } @@ -386,7 +385,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: - /* fall through */ default: return -EINVAL; } @@ -786,7 +784,7 @@ int insn_get_code_seg_params(struct pt_regs *regs) */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c index 73dc66d887f3..ec071cbb0804 100644 --- a/arch/x86/math-emu/errors.c +++ b/arch/x86/math-emu/errors.c @@ -186,7 +186,7 @@ void FPU_printall(void) case TAG_Special: /* Update tagi for the printk below */ tagi = FPU_Special(r); - /* fall through */ + fallthrough; case TAG_Valid: printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i, getsign(r) ? '-' : '+', diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c index 127ea54122d7..4a9887851ad8 100644 --- a/arch/x86/math-emu/fpu_trig.c +++ b/arch/x86/math-emu/fpu_trig.c @@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag) case TW_Denormal: if (denormal_operand() < 0) return; - /* fall through */ + fallthrough; case TAG_Zero: case TAG_Valid: setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr)); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 84d85dbd1dad..9e5ccc56f8e0 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -574,7 +574,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr, /* For SEV, these areas are encrypted */ if (sev_active()) break; - /* Fallthrough */ + fallthrough; case E820_TYPE_PRAM: return true; diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 76cee341507b..b3b17d6c50f0 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -448,7 +448,7 @@ static void do_signal(struct pt_regs *regs) regs->areg[2] = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->areg[2] = regs->syscall; regs->pc -= 3; diff --git a/block/badblocks.c b/block/badblocks.c index 2e5f5697db35..d39056630d9c 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -525,7 +525,7 @@ ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, case 3: if (newline != '\n') return -EINVAL; - /* fall through */ + fallthrough; case 2: if (length <= 0) return -EINVAL; diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index a4c0bec920cb..c34b090178a9 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4980,7 +4980,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) pr_err("bdi %s: bfq: bad prio class %d\n", bdi_dev_name(bfqq->bfqd->queue->backing_dev_info), ioprio_class); - /* fall through */ + fallthrough; case IOPRIO_CLASS_NONE: /* * No prio set, inherit CPU scheduling settings. @@ -5112,7 +5112,7 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, return &bfqg->async_bfqq[0][ioprio]; case IOPRIO_CLASS_NONE: ioprio = IOPRIO_NORM; - /* fall through */ + fallthrough; case IOPRIO_CLASS_BE: return &bfqg->async_bfqq[1][ioprio]; case IOPRIO_CLASS_IDLE: diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 0fa615eefd52..fd410086fe1d 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -528,7 +528,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE)) return false; - /* fallthrough */ + fallthrough; case REQ_OP_DISCARD: return true; default: diff --git a/block/ioprio.c b/block/ioprio.c index 77bcab11dce5..04ebd37966f1 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -71,7 +71,7 @@ int ioprio_check_cap(int ioprio) case IOPRIO_CLASS_RT: if (!capable(CAP_SYS_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; /* rt has prio field too */ case IOPRIO_CLASS_BE: if (data >= IOPRIO_BE_NR || data < 0) diff --git a/crypto/drbg.c b/crypto/drbg.c index e99fe34cfa00..3132967a1749 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1521,7 +1521,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg) case -EALREADY: err = 0; - /* fall through */ + fallthrough; default: drbg->random_ready.func = NULL; diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index ba0b7702f2e9..12e82a61b896 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -2348,121 +2348,121 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed(alg, sec, generic_hash_speed_template); break; } - /* fall through */ + fallthrough; case 301: test_hash_speed("md4", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 302: test_hash_speed("md5", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 303: test_hash_speed("sha1", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 304: test_hash_speed("sha256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 305: test_hash_speed("sha384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 306: test_hash_speed("sha512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 307: test_hash_speed("wp256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 308: test_hash_speed("wp384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 309: test_hash_speed("wp512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 310: test_hash_speed("tgr128", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 311: test_hash_speed("tgr160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 312: test_hash_speed("tgr192", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 313: test_hash_speed("sha224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 314: test_hash_speed("rmd128", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 315: test_hash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 316: test_hash_speed("rmd256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 317: test_hash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 318: test_hash_speed("ghash-generic", sec, hash_speed_template_16); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 319: test_hash_speed("crc32c", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 320: test_hash_speed("crct10dif", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 321: test_hash_speed("poly1305", sec, poly1305_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 322: test_hash_speed("sha3-224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 323: test_hash_speed("sha3-256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 324: test_hash_speed("sha3-384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 325: test_hash_speed("sha3-512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 326: test_hash_speed("sm3", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 327: test_hash_speed("streebog256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 328: test_hash_speed("streebog512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 399: break; @@ -2471,121 +2471,121 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed(alg, sec, generic_hash_speed_template); break; } - /* fall through */ + fallthrough; case 401: test_ahash_speed("md4", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 402: test_ahash_speed("md5", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 403: test_ahash_speed("sha1", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 404: test_ahash_speed("sha256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 405: test_ahash_speed("sha384", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 406: test_ahash_speed("sha512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 407: test_ahash_speed("wp256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 408: test_ahash_speed("wp384", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 409: test_ahash_speed("wp512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 410: test_ahash_speed("tgr128", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 411: test_ahash_speed("tgr160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 412: test_ahash_speed("tgr192", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 413: test_ahash_speed("sha224", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 414: test_ahash_speed("rmd128", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 415: test_ahash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 416: test_ahash_speed("rmd256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 417: test_ahash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 418: test_ahash_speed("sha3-224", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 419: test_ahash_speed("sha3-256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 420: test_ahash_speed("sha3-384", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 421: test_ahash_speed("sha3-512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 422: test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 423: test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 424: test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 425: test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 426: test_mb_ahash_speed("streebog256", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 427: test_mb_ahash_speed("streebog512", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 499: break; diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c index c2b452af6806..9861302cc7db 100644 --- a/drivers/accessibility/braille/braille_console.c +++ b/drivers/accessibility/braille/braille_console.c @@ -290,7 +290,7 @@ static int vt_notifier_call(struct notifier_block *blk, break; case '\t': c = ' '; - /* Fallthrough */ + fallthrough; default: if (c < 32) /* Ignore other control sequences */ diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 6853dbb4131d..49f7acbfcf01 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -470,7 +470,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) switch (priv->version) { case BRCM_SATA_BCM7425: hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE; - /* fall through */ + fallthrough; case BRCM_SATA_NSP: hpriv->flags |= AHCI_HFLAG_NO_NCQ; priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 129556fcf6be..86261deeb4c5 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -326,7 +326,7 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port, node); break; } - /* fall through */ + fallthrough; case -ENODEV: /* continue normally */ hpriv->phys[port] = NULL; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b1cd4d97bc2a..1a82058defdb 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -190,7 +190,7 @@ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, case ATA_LITER_PMP_FIRST: if (sata_pmp_attached(ap)) return ap->pmp_link; - /* fall through */ + fallthrough; case ATA_LITER_HOST_FIRST: return &ap->link; } @@ -201,11 +201,11 @@ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, case ATA_LITER_HOST_FIRST: if (sata_pmp_attached(ap)) return ap->pmp_link; - /* fall through */ + fallthrough; case ATA_LITER_PMP_FIRST: if (unlikely(ap->slave_link)) return ap->slave_link; - /* fall through */ + fallthrough; case ATA_LITER_EDGE: return NULL; } @@ -523,7 +523,7 @@ int atapi_cmd_type(u8 opcode) case ATA_12: if (atapi_passthru16) return ATAPI_PASS_THRU; - /* fall thru */ + fallthrough; default: return ATAPI_MISC; } @@ -1800,7 +1800,7 @@ retry: switch (class) { case ATA_DEV_SEMB: class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ - /* fall through */ + fallthrough; case ATA_DEV_ATA: case ATA_DEV_ZAC: tf.command = ATA_CMD_ID_ATA; @@ -2907,7 +2907,7 @@ int ata_bus_probe(struct ata_port *ap) case -ENODEV: /* give it just one more chance */ tries[dev->devno] = min(tries[dev->devno], 1); - /* fall through */ + fallthrough; case -EIO: if (tries[dev->devno] == 1) { /* This is the last chance, better to slow @@ -3158,7 +3158,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) case ATA_DNXFER_FORCE_PIO0: pio_mask &= 1; - /* fall through */ + fallthrough; case ATA_DNXFER_FORCE_PIO: mwdma_mask = 0; udma_mask = 0; @@ -4694,7 +4694,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) qc->tf.feature != SETFEATURES_RA_ON && qc->tf.feature != SETFEATURES_RA_OFF) break; - /* fall through */ + fallthrough; case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ case ATA_CMD_SET_MULTI: /* multi_count changed */ /* revalidate device */ diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 474c6c34fe02..d912eaa65c94 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1576,7 +1576,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, case ATA_DEV_ZAC: if (stat & ATA_SENSE) ata_eh_request_sense(qc, qc->scsicmd); - /* fall through */ + fallthrough; case ATA_DEV_ATA: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; @@ -3473,11 +3473,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) case -ENODEV: /* device missing or wrong IDENTIFY data, schedule probing */ ehc->i.probe_mask |= (1 << dev->devno); - /* fall through */ + fallthrough; case -EINVAL: /* give it just one more chance */ ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); - /* fall through */ + fallthrough; case -EIO: if (ehc->tries[dev->devno] == 1) { /* This is the last chance, better to slow diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index ec233208585b..4ce4cd32508c 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -4162,7 +4162,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6); break; } - /* Fallthrough */ + fallthrough; default: ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); break; @@ -4198,7 +4198,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) * turning this into a no-op. */ case SYNCHRONIZE_CACHE: - /* fall through */ + fallthrough; /* no-op's, complete with success */ case REZERO_UNIT: diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c index e01a3a6e4d46..2bc5fc81efe3 100644 --- a/drivers/ata/pata_atp867x.c +++ b/drivers/ata/pata_atp867x.c @@ -157,7 +157,7 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap, default: printk(KERN_WARNING "ATP867X: active %dclk is invalid. " "Using 12clk.\n", clk); - /* fall through */ + fallthrough; case 9 ... 12: clocks = 7; /* 12 clk */ break; @@ -190,7 +190,7 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk) default: printk(KERN_WARNING "ATP867X: recover %dclk is invalid. " "Using default 12clk.\n", clk); - /* fall through */ + fallthrough; case 12: /* default 12 clk */ clocks = 0; break; diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 916bf024d737..7511e11eef4d 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -369,7 +369,7 @@ static int serverworks_fixup(struct pci_dev *pdev) break; case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: ata_pci_bmdma_clear_simplex(pdev); - /* fall through */ + fallthrough; case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE: case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2: rc = serverworks_fixup_csb(pdev); diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index d7228f8e9297..664ef658a955 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2010,7 +2010,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) break; case ATA_CMD_WRITE_MULTI_FUA_EXT: tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ - /* fall through */ + fallthrough; case ATA_CMD_WRITE_MULTI_EXT: tf->command = ATA_CMD_PIO_WRITE_EXT; break; @@ -2044,7 +2044,7 @@ static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) case ATA_PROT_DMA: if (tf->command == ATA_CMD_DSM) return AC_ERR_OK; - /* fall-thru */ + fallthrough; case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: @@ -2296,7 +2296,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) switch (qc->tf.protocol) { case ATAPI_PROT_PIO: pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; - /* fall through */ + fallthrough; case ATAPI_PROT_NODATA: ap->hsm_task_state = HSM_ST_FIRST; break; @@ -2347,7 +2347,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) return AC_ERR_OTHER; break; /* use bmdma for this */ } - /* fall thru */ + fallthrough; case ATA_PROT_NCQ: mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; @@ -2376,7 +2376,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) ": attempting PIO w/multiple DRQ: " "this may fail due to h/w errata\n"); } - /* fall through */ + fallthrough; case ATA_PROT_NODATA: case ATAPI_PROT_PIO: case ATAPI_PROT_NODATA: @@ -3864,7 +3864,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) " and avoid the final two gigabytes on" " all RocketRAID BIOS initialized drives.\n"); } - /* fall through */ + fallthrough; case chip_6042: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_IIE; diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 8729f78cef5f..7815da8ef9e5 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -637,7 +637,7 @@ static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) switch (qc->tf.protocol) { case ATA_PROT_DMA: pdc_fill_sg(qc); - /*FALLTHROUGH*/ + fallthrough; case ATA_PROT_NODATA: i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma, qc->dev->devno, pp->pkt); @@ -652,7 +652,7 @@ static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) break; case ATAPI_PROT_DMA: pdc_fill_sg(qc); - /*FALLTHROUGH*/ + fallthrough; case ATAPI_PROT_NODATA: pdc_atapi_pkt(qc); break; @@ -1022,11 +1022,11 @@ static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc) case ATAPI_PROT_NODATA: if (qc->dev->flags & ATA_DFLAG_CDB_INTR) break; - /*FALLTHROUGH*/ + fallthrough; case ATA_PROT_NODATA: if (qc->tf.flags & ATA_TFLAG_POLLING) break; - /*FALLTHROUGH*/ + fallthrough; case ATAPI_PROT_DMA: case ATA_PROT_DMA: pdc_packet_start(qc); diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 2c7b30c5ea3d..4c01190a5e37 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -669,7 +669,7 @@ static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) case ATA_PROT_NODATA: if (qc->tf.flags & ATA_TFLAG_POLLING) break; - /*FALLTHROUGH*/ + fallthrough; case ATA_PROT_DMA: pdc20621_packet_start(qc); return 0; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 2ca9ec802734..c798856e74a4 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -711,7 +711,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q) switch (STATUS_CODE (qe)) { case 0x01: /* This is for AAL0 where we put the chip in streaming mode */ - /* Fall through */ + fallthrough; case 0x02: /* Process a real txdone entry. */ tmp = qe->p0; diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index a81bc49c14ac..9a70bee84125 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -376,33 +376,33 @@ fore200e_shutdown(struct fore200e* fore200e) case FORE200E_STATE_COMPLETE: kfree(fore200e->stats); - /* fall through */ + fallthrough; case FORE200E_STATE_IRQ: free_irq(fore200e->irq, fore200e->atm_dev); - /* fall through */ + fallthrough; case FORE200E_STATE_ALLOC_BUF: fore200e_free_rx_buf(fore200e); - /* fall through */ + fallthrough; case FORE200E_STATE_INIT_BSQ: fore200e_uninit_bs_queue(fore200e); - /* fall through */ + fallthrough; case FORE200E_STATE_INIT_RXQ: fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status); fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); - /* fall through */ + fallthrough; case FORE200E_STATE_INIT_TXQ: fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status); fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd); - /* fall through */ + fallthrough; case FORE200E_STATE_INIT_CMDQ: fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status); - /* fall through */ + fallthrough; case FORE200E_STATE_INITIALIZE: /* nothing to do for that state */ @@ -415,7 +415,7 @@ fore200e_shutdown(struct fore200e* fore200e) case FORE200E_STATE_MAP: fore200e->bus->unmap(fore200e); - /* fall through */ + fallthrough; case FORE200E_STATE_CONFIGURE: /* nothing to do for that state */ diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 8af793f5e811..17f44abc9418 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -1944,14 +1944,14 @@ he_tasklet(unsigned long data) switch (type) { case ITYPE_RBRQ_THRESH: HPRINTK("rbrq%d threshold\n", group); - /* fall through */ + fallthrough; case ITYPE_RBRQ_TIMER: if (he_service_rbrq(he_dev, group)) he_service_rbpl(he_dev, group); break; case ITYPE_TBRQ_THRESH: HPRINTK("tbrq%d threshold\n", group); - /* fall through */ + fallthrough; case ITYPE_TPD_COMPLETE: he_service_tbrq(he_dev, group); break; diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c index 63871859e6e8..3c081b6171a8 100644 --- a/drivers/atm/idt77105.c +++ b/drivers/atm/idt77105.c @@ -192,7 +192,7 @@ static int idt77105_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) switch (cmd) { case IDT77105_GETSTATZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; case IDT77105_GETSTAT: return fetch_stats(dev, arg, cmd == IDT77105_GETSTATZ); case ATM_SETLOOP: diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 986c1313694c..ac811cfa6843 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2019,7 +2019,7 @@ static int lanai_normalize_ci(struct lanai_dev *lanai, switch (*vpip) { case ATM_VPI_ANY: *vpip = 0; - /* FALLTHROUGH */ + fallthrough; case 0: break; default: diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index ee059c77e3bb..cf5fffcf98a1 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1447,7 +1447,7 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) switch (cmd) { case ZATM_GETPOOLZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; case ZATM_GETPOOL: { struct zatm_pool_info info; diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 99980aa3644b..1c82d824ae00 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1365,7 +1365,7 @@ static void panel_process_inputs(void) break; input->rise_timer = 0; input->state = INPUT_ST_RISING; - /* fall through */ + fallthrough; case INPUT_ST_RISING: if ((phys_curr & input->mask) != input->value) { input->state = INPUT_ST_LOW; @@ -1378,11 +1378,11 @@ static void panel_process_inputs(void) } input->high_timer = 0; input->state = INPUT_ST_HIGH; - /* fall through */ + fallthrough; case INPUT_ST_HIGH: if (input_state_high(input)) break; - /* fall through */ + fallthrough; case INPUT_ST_FALLING: input_state_falling(input); } diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 5327bfc6ba71..283ca2de76d4 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -289,10 +289,10 @@ static ssize_t firmware_loading_store(struct device *dev, } break; } - /* fallthrough */ + fallthrough; default: dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); - /* fallthrough */ + fallthrough; case -1: fw_load_abort(fw_sysfs); break; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 3cf9bc5d8d95..6dba41395155 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -1135,7 +1135,7 @@ noskb: if (buf) break; } bvcpy(skb, f->buf->bio, f->iter, n); - /* fall through */ + fallthrough; case ATA_CMD_PIO_WRITE: case ATA_CMD_PIO_WRITE_EXT: spin_lock_irq(&d->lock); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 1553d41f0b91..a50e13af0305 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1726,7 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, /* MSch: invalidate default_params */ default_params[drive].blocks = 0; set_capacity(floppy->disk, MAX_DISK_SIZE * 2); - /* Fall through */ + fallthrough; case FDFMTEND: case FDFLUSH: /* invalidate the buffer track to force a reread */ diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index fe6cb99eb917..740e93bad21f 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1733,7 +1733,7 @@ static inline void __drbd_chk_io_error_(struct drbd_device *device, _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL); break; } - /* fall through - for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */ + fallthrough; /* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */ case EP_DETACH: case EP_CALL_HELPER: /* Remember whether we saw a READ or WRITE error. diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index cb687ccdbd96..04b6bde9419d 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -430,7 +430,7 @@ int drbd_thread_start(struct drbd_thread *thi) thi->t_state = RESTARTING; drbd_info(resource, "Restarting %s thread (from %s [%d])\n", thi->name, current->comm, current->pid); - /* fall through */ + fallthrough; case RUNNING: case RESTARTING: default: diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 28eb078f8b75..43c8ae4d9fca 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -3883,7 +3883,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, if (nla_put_u32(skb, T_helper_exit_code, sib->helper_exit_code)) goto nla_put_failure; - /* fall through */ + fallthrough; case SIB_HELPER_PRE: if (nla_put_string(skb, T_helper, sib->helper_name)) goto nla_put_failure; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 1d17593f5d2b..422363daa618 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1797,7 +1797,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf break; else drbd_warn(connection, "Allocation of an epoch failed, slowing down\n"); - /* Fall through */ + fallthrough; case WO_BDEV_FLUSH: case WO_DRAIN_IO: @@ -2917,7 +2917,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet then we would do something smarter here than reading the block... */ peer_req->flags |= EE_RS_THIN_REQ; - /* fall through */ + fallthrough; case P_RS_DATA_REQUEST: peer_req->w.cb = w_e_end_rsdata_req; fault_type = DRBD_FAULT_RS_RD; @@ -3083,7 +3083,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold rv = 1; break; } - /* Else fall through - to one of the other strategies... */ + fallthrough; /* to one of the other strategies */ case ASB_DISCARD_OLDER_PRI: if (self == 0 && peer == 1) { rv = 1; @@ -3096,7 +3096,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold /* Else fall through to one of the other strategies... */ drbd_warn(device, "Discard younger/older primary did not find a decision\n" "Using discard-least-changes instead\n"); - /* fall through */ + fallthrough; case ASB_DISCARD_ZERO_CHG: if (ch_peer == 0 && ch_self == 0) { rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) @@ -3108,7 +3108,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold } if (after_sb_0p == ASB_DISCARD_ZERO_CHG) break; - /* else, fall through */ + fallthrough; case ASB_DISCARD_LEAST_CHG: if (ch_self < ch_peer) rv = -1; @@ -3608,7 +3608,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, switch (rr_conflict) { case ASB_CALL_HELPER: drbd_khelper(device, "pri-lost"); - /* fall through */ + fallthrough; case ASB_DISCONNECT: drbd_err(device, "I shall become SyncTarget, but I am primary!\n"); return C_MASK; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 674be09b2da9..5c975af9c15f 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -611,7 +611,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, drbd_set_out_of_sync(device, req->i.sector, req->i.size); drbd_report_io_error(device, req); __drbd_chk_io_error(device, DRBD_READ_ERROR); - /* fall through. */ + fallthrough; case READ_AHEAD_COMPLETED_WITH_ERROR: /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); @@ -836,7 +836,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, } /* else: FIXME can this happen? */ break; } - /* else, fall through - to BARRIER_ACKED */ + fallthrough; /* to BARRIER_ACKED */ case BARRIER_ACKED: /* barrier ack for READ requests does not make sense */ diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 09079aee8dc4..a563b023458a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -1680,7 +1680,7 @@ static void recal_interrupt(void) clear_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags); drive_state[current_drive].select_date = jiffies; - /* fall through */ + fallthrough; default: debugt(__func__, "default"); /* Recalibrate moves the head by at @@ -3592,7 +3592,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; process_fd_request(); - /* fall through */ + fallthrough; case FDGETDRVSTAT: outparam = &drive_state[drive]; break; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 2f137d6ce169..33cc6558f9d2 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1719,7 +1719,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_SET_BLOCK_SIZE: if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN)) return -EPERM; - /* Fall through */ + fallthrough; default: err = lo_simple_ioctl(lo, cmd, arg); break; @@ -1867,7 +1867,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_SET_STATUS64: case LOOP_CONFIGURE: arg = (unsigned long) compat_ptr(arg); - /* fall through */ + fallthrough; case LOOP_SET_FD: case LOOP_CHANGE_FD: case LOOP_SET_BLOCK_SIZE: diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index c0967507d085..a7af4f27b7c3 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -440,7 +440,7 @@ static void run_fsm(void) pd_claimed = 1; if (!pi_schedule_claimed(pi_current, run_fsm)) return; - /* fall through */ + fallthrough; case 1: pd_claimed = 2; pi_current->proto->connect(pi_current); @@ -465,7 +465,7 @@ static void run_fsm(void) if (stop) return; } - /* fall through */ + fallthrough; case Hold: schedule_fsm(); return; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 4becc1efe775..1034e445680c 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2641,7 +2641,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, */ if (pd->refcnt == 1) pkt_lock_door(pd, 0); - /* fall through */ + fallthrough; /* * forward selected CDROM ioctls to CD-ROM, for UDF */ diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d9c0e7d154f9..011539039693 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3293,7 +3293,7 @@ again: case __RBD_OBJ_COPYUP_OBJECT_MAPS: if (!pending_result_dec(&obj_req->pending, result)) return false; - /* fall through */ + fallthrough; case RBD_OBJ_COPYUP_OBJECT_MAPS: if (*result) { rbd_warn(rbd_dev, "snap object map update failed: %d", @@ -3312,7 +3312,7 @@ again: case __RBD_OBJ_COPYUP_WRITE_OBJECT: if (!pending_result_dec(&obj_req->pending, result)) return false; - /* fall through */ + fallthrough; case RBD_OBJ_COPYUP_WRITE_OBJECT: return true; default: @@ -3399,7 +3399,7 @@ again: case __RBD_OBJ_WRITE_COPYUP: if (!rbd_obj_advance_copyup(obj_req, result)) return false; - /* fall through */ + fallthrough; case RBD_OBJ_WRITE_COPYUP: if (*result) { rbd_warn(rbd_dev, "copyup failed: %d", *result); @@ -3592,7 +3592,7 @@ again: case __RBD_IMG_OBJECT_REQUESTS: if (!pending_result_dec(&img_req->pending, result)) return false; - /* fall through */ + fallthrough; case RBD_IMG_OBJECT_REQUESTS: return true; default: diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 7e261224ff10..8799e3bab067 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -425,7 +425,7 @@ static void card_state_change(struct rsxx_cardinfo *card, * Fall through so the DMA devices can be attached and * the user can attempt to pull off their data. */ - /* fall through */ + fallthrough; case CARD_STATE_GOOD: st = rsxx_get_card_size8(card, &card->size8); if (st) diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 3a476dc1d14f..ae6454c24594 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -1436,7 +1436,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, blk_mq_requeue_request(req, true); break; } - /* fall through */ + fallthrough; case SKD_CHECK_STATUS_REPORT_ERROR: default: diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index c2f71265af4b..adfc9352351d 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1260,7 +1260,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, break; case BLKIF_OP_WRITE_BARRIER: drain = true; - /* fall through */ + fallthrough; case BLKIF_OP_FLUSH_DISKCACHE: ring->st_f_req++; operation = REQ_OP_WRITE; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 42944d41aea0..b9aa5d1ac10b 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -843,7 +843,7 @@ static void frontend_changed(struct xenbus_device *dev, xenbus_switch_state(dev, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through */ + fallthrough; /* if not online */ case XenbusStateUnknown: /* implies xen_blkif_disconnect() via xen_blkbk_remove() */ diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3bb3dd8da9b0..91de2e0755ae 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1403,7 +1403,6 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp) case BLKIF_RSP_EOPNOTSUPP: return REQ_EOPNOTSUPP; case BLKIF_RSP_ERROR: - /* Fallthrough. */ default: return REQ_ERROR; } @@ -1643,7 +1642,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) info->feature_flush = 0; xlvbd_flush(info); } - /* fall through */ + fallthrough; case BLKIF_OP_READ: case BLKIF_OP_WRITE: if (unlikely(bret->status != BLKIF_RSP_OKAY)) @@ -2484,7 +2483,7 @@ static void blkback_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through */ + fallthrough; case XenbusStateClosing: if (info) blkfront_closing(info); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index fb5a901fd89e..efb088df1276 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1849,7 +1849,7 @@ static int sysc_clockdomain_init(struct sysc *ddata) switch (ddata->nr_clocks) { case 2: ick = ddata->clocks[SYSC_ICK]; - /* fallthrough */ + fallthrough; case 1: fck = ddata->clocks[SYSC_FCK]; break; diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index 89527bae4602..760d9a931289 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c @@ -357,7 +357,7 @@ found: default: break; } - /*FALLTHROUGH*/ + fallthrough; default: bridge->driver = &ali_generic_bridge; } diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c index ed4dc3b1843e..f292e74bd4a5 100644 --- a/drivers/char/ipmi/kcs_bmc.c +++ b/drivers/char/ipmi/kcs_bmc.c @@ -99,7 +99,7 @@ static void kcs_bmc_handle_data(struct kcs_bmc *kcs_bmc) switch (kcs_bmc->phase) { case KCS_PHASE_WRITE_START: kcs_bmc->phase = KCS_PHASE_WRITE_DATA; - /* fall through */ + fallthrough; case KCS_PHASE_WRITE_DATA: if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) { diff --git a/drivers/char/lp.c b/drivers/char/lp.c index bd95aba1f9fe..45932f05fd67 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -734,7 +734,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd, ret = lp_set_timeout32(minor, (void __user *)arg); break; } - /* fall through - for 64-bit */ + fallthrough; /* for 64-bit */ case LPSETTIMEOUT_NEW: ret = lp_set_timeout64(minor, (void __user *)arg); break; @@ -762,7 +762,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd, ret = lp_set_timeout32(minor, (void __user *)arg); break; } - /* fall through - for x32 mode */ + fallthrough; /* for x32 mode */ case LPSETTIMEOUT_NEW: ret = lp_set_timeout64(minor, (void __user *)arg); break; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 687d4af6945d..abd4ffdc8cde 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -791,7 +791,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) switch (orig) { case SEEK_CUR: offset += file->f_pos; - /* fall through */ + fallthrough; case SEEK_SET: /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ if ((unsigned long long)offset >= -MAX_ERRNO) { diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 8206412d25ba..e9f694b36871 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -286,7 +286,7 @@ static long nvram_misc_ioctl(struct file *file, unsigned int cmd, #ifdef CONFIG_PPC case OBSOLETE_PMAC_NVRAM_GET_OFFSET: pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); - /* fall through */ + fallthrough; case IOC_NVRAM_GET_OFFSET: ret = -EINVAL; #ifdef CONFIG_PPC_PMAC diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c index 38858e141731..80e960602030 100644 --- a/drivers/clocksource/timer-cadence-ttc.c +++ b/drivers/clocksource/timer-cadence-ttc.c @@ -309,7 +309,7 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, /* restore original register value */ writel_relaxed(ttccs->scale_clk_ctrl_reg_old, ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); - /* fall through */ + fallthrough; default: return NOTIFY_DONE; } @@ -392,7 +392,7 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE); - /* fall through */ + fallthrough; case PRE_RATE_CHANGE: case ABORT_RATE_CHANGE: default: diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index bb61677c11c7..ef0a3216a386 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -129,7 +129,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); case 0x0D: /* Pentium M (Dothan) */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; - /* fall through */ + fallthrough; case 0x09: /* Pentium M (Banias) */ return speedstep_get_frequency(SPEEDSTEP_CPU_PM); } diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 5c4f8f07c5a6..a13a2d1e444e 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -366,7 +366,7 @@ enum speedstep_processor speedstep_detect_processor(void) } else return SPEEDSTEP_CPU_PIII_C; } - /* fall through */ + fallthrough; default: return 0; } diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index ab0de27539ad..8f9fdd864391 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -86,11 +86,11 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data, case DRA76_EFUSE_HAS_PLUS_MPU_OPP: case DRA76_EFUSE_HAS_ALL_MPU_OPP: calculated_efuse |= DRA76_EFUSE_PLUS_MPU_OPP; - /* Fall through */ + fallthrough; case DRA7_EFUSE_HAS_ALL_MPU_OPP: case DRA7_EFUSE_HAS_HIGH_MPU_OPP: calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP; - /* Fall through */ + fallthrough; case DRA7_EFUSE_HAS_OD_MPU_OPP: calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP; } diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 1a46eeddf082..809c3033ca74 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2310,7 +2310,7 @@ static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req) case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START: ret = 0; - /* Fallthrough */ + fallthrough; default: artpec6_crypto_common_destroy(&req_ctx->common); diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index dc5fda522719..4fe7898c8561 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -90,11 +90,11 @@ static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list, case 3: sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); - /* Fall through */ + fallthrough; case 2: sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); - /* Fall through */ + fallthrough; case 1: sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c index 91dee616d15e..c5cce024886a 100644 --- a/drivers/crypto/chelsio/chcr_ktls.c +++ b/drivers/crypto/chelsio/chcr_ktls.c @@ -135,7 +135,7 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info, break; /* update to the next state and also initialize TCB */ tx_info->connection_state = new_state; - /* FALLTHRU */ + fallthrough; case KTLS_CONN_ACT_OPEN_RPL: /* if we are stuck in this state, means tcb init might not * received by HW, try sending it again. @@ -150,7 +150,7 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info, break; /* update to the next state and check if l2t_state is valid */ tx_info->connection_state = new_state; - /* FALLTHRU */ + fallthrough; case KTLS_CONN_SET_TCB_RPL: /* Check if l2t state is valid, then move to ready state. */ if (cxgb4_check_l2t_valid(tx_info->l2te)) { diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c index cbc3d7869ebe..c80baf1ad90b 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c @@ -140,11 +140,11 @@ static inline int setup_sgio_components(struct pci_dev *pdev, case 3: sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); - /* Fall through */ + fallthrough; case 2: sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); - /* Fall through */ + fallthrough; case 1: sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 519fd5acf713..8b090b7ae8c6 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -340,7 +340,7 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) /* VF is newer than PF and decides whether it is compatible */ if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) break; - /* fall through */ + fallthrough; case ADF_PF2VF_VF_INCOMPATIBLE: dev_err(&GET_DEV(accel_dev), "PF (vers %d) and VF (vers %d) are not compatible\n", diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index bff759e2f811..00c615f9f9a8 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -752,7 +752,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, case ICP_GPA_ABS: case ICP_GPB_ABS: ctx_mask = 0; - /* fall through */ + fallthrough; case ICP_GPA_REL: case ICP_GPB_REL: return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, @@ -762,7 +762,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, case ICP_SR_RD_ABS: case ICP_DR_RD_ABS: ctx_mask = 0; - /* fall through */ + fallthrough; case ICP_SR_REL: case ICP_DR_REL: case ICP_SR_RD_REL: @@ -772,7 +772,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, case ICP_SR_WR_ABS: case ICP_DR_WR_ABS: ctx_mask = 0; - /* fall through */ + fallthrough; case ICP_SR_WR_REL: case ICP_DR_WR_REL: return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index f22f6fa612b3..9866c2a5e9a7 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -314,17 +314,17 @@ void cryp_save_device_context(struct cryp_device_data *device_data, case CRYP_KEY_SIZE_256: ctx->key_4_l = readl_relaxed(&src_reg->key_4_l); ctx->key_4_r = readl_relaxed(&src_reg->key_4_r); - /* Fall through */ + fallthrough; case CRYP_KEY_SIZE_192: ctx->key_3_l = readl_relaxed(&src_reg->key_3_l); ctx->key_3_r = readl_relaxed(&src_reg->key_3_r); - /* Fall through */ + fallthrough; case CRYP_KEY_SIZE_128: ctx->key_2_l = readl_relaxed(&src_reg->key_2_l); ctx->key_2_r = readl_relaxed(&src_reg->key_2_r); - /* Fall through */ + fallthrough; default: ctx->key_1_l = readl_relaxed(&src_reg->key_1_l); @@ -364,17 +364,17 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, case CRYP_KEY_SIZE_256: writel_relaxed(ctx->key_4_l, ®->key_4_l); writel_relaxed(ctx->key_4_r, ®->key_4_r); - /* Fall through */ + fallthrough; case CRYP_KEY_SIZE_192: writel_relaxed(ctx->key_3_l, ®->key_3_l); writel_relaxed(ctx->key_3_r, ®->key_3_r); - /* Fall through */ + fallthrough; case CRYP_KEY_SIZE_128: writel_relaxed(ctx->key_2_l, ®->key_2_l); writel_relaxed(ctx->key_2_r, ®->key_2_r); - /* Fall through */ + fallthrough; default: writel_relaxed(ctx->key_1_l, ®->key_1_l); diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 9adc7a2fa3d3..a24882ba3764 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1767,7 +1767,7 @@ static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x) default: dev_err(&pl08x->adev->dev, "illegal burst size for memcpy, set to 1\n"); - /* Fall through */ + fallthrough; case PL08X_BURST_SZ_1: cctl |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; @@ -1806,7 +1806,7 @@ static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x) default: dev_err(&pl08x->adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); - /* Fall through */ + fallthrough; case PL08X_BUS_WIDTH_8_BITS: cctl |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; @@ -1850,7 +1850,7 @@ static u32 pl08x_ftdmac020_memcpy_cctl(struct pl08x_driver_data *pl08x) default: dev_err(&pl08x->adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); - /* Fall through */ + fallthrough; case PL08X_BUS_WIDTH_8_BITS: cctl |= PL080_WIDTH_8BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT | PL080_WIDTH_8BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT; @@ -2612,7 +2612,7 @@ static int pl08x_of_probe(struct amba_device *adev, switch (val) { default: dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); - /* Fall through */ + fallthrough; case 1: pd->memcpy_burst_size = PL08X_BURST_SZ_1; break; @@ -2647,7 +2647,7 @@ static int pl08x_of_probe(struct amba_device *adev, switch (val) { default: dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); - /* Fall through */ + fallthrough; case 8: pd->memcpy_bus_width = PL08X_BUS_WIDTH_8_BITS; break; diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index ad72b3f42ffa..e342cf52d296 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1163,7 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; - /* Fall through */ + fallthrough; case FSL_DMA_IP_83XX: chan->toggle_ext_start = fsl_chan_toggle_ext_start; chan->set_src_loop_size = fsl_chan_set_src_loop_size; diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 5c0fb3134825..88717506c1f6 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -556,7 +556,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) * We fall-through here intentionally, since a 2D transfer is * similar to MEMCPY just adding the 2D slot configuration. */ - /* Fall through */ + fallthrough; case IMXDMA_DESC_MEMCPY: imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); diff --git a/drivers/dma/iop-adma.h b/drivers/dma/iop-adma.h index c499c9578f00..d44eabb6f5eb 100644 --- a/drivers/dma/iop-adma.h +++ b/drivers/dma/iop-adma.h @@ -496,7 +496,7 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, } hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr; src_cnt = 24; - /* fall through */ + fallthrough; case 17 ... 24: if (!u_desc_ctrl.field.blk_ctrl) { hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; @@ -510,7 +510,7 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, } hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr; src_cnt = 16; - /* fall through */ + fallthrough; case 9 ... 16: if (!u_desc_ctrl.field.blk_ctrl) u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ @@ -522,7 +522,7 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, } hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr; src_cnt = 8; - /* fall through */ + fallthrough; case 2 ... 8: shift = 1; for (i = 0; i < src_cnt; i++) { @@ -602,19 +602,19 @@ iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, case 25 ... 32: u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; - /* fall through */ + fallthrough; case 17 ... 24: if (!u_desc_ctrl.field.blk_ctrl) { hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ } hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0; - /* fall through */ + fallthrough; case 9 ... 16: if (!u_desc_ctrl.field.blk_ctrl) u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0; - /* fall through */ + fallthrough; case 1 ... 8: if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4) u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 74df621402e1..ca4e0930207a 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -483,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf, default: pr_warn("%s(): invalid bus width %u\n", __func__, width); - /* fall through */ + fallthrough; case DMA_SLAVE_BUSWIDTH_1_BYTE: size = burst; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 2c508ee672b9..9b69716172a4 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1061,16 +1061,16 @@ static bool _start(struct pl330_thread *thrd) if (_state(thrd) == PL330_STATE_KILLING) UNTIL(thrd, PL330_STATE_STOPPED) - /* fall through */ + fallthrough; case PL330_STATE_FAULTING: _stop(thrd); - /* fall through */ + fallthrough; case PL330_STATE_KILLING: case PL330_STATE_COMPLETING: UNTIL(thrd, PL330_STATE_STOPPED) - /* fall through */ + fallthrough; case PL330_STATE_STOPPED: return _trigger(thrd); @@ -1121,7 +1121,6 @@ static u32 _emit_load(unsigned int dry_run, u8 buf[], switch (direction) { case DMA_MEM_TO_MEM: - /* fall through */ case DMA_MEM_TO_DEV: off += _emit_LD(dry_run, &buf[off], cond); break; @@ -1155,7 +1154,6 @@ static inline u32 _emit_store(unsigned int dry_run, u8 buf[], switch (direction) { case DMA_MEM_TO_MEM: - /* fall through */ case DMA_DEV_TO_MEM: off += _emit_ST(dry_run, &buf[off], cond); break; @@ -1216,7 +1214,6 @@ static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], switch (pxs->desc->rqtype) { case DMA_MEM_TO_DEV: - /* fall through */ case DMA_DEV_TO_MEM: off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc, cond); @@ -1266,7 +1263,6 @@ static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[], switch (pxs->desc->rqtype) { case DMA_MEM_TO_DEV: - /* fall through */ case DMA_DEV_TO_MEM: off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr); off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, 1, diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 2deeaab078a4..788d696323bb 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -383,7 +383,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) switch (desc->mark) { case DESC_COMPLETED: desc->mark = DESC_WAITING; - /* Fall through */ + fallthrough; case DESC_WAITING: if (head_acked) async_tx_ack(&desc->async_tx); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 6262f6370c5d..fcc08bbf6945 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3375,7 +3375,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) pvt->ops = &family_types[F17_M70H_CPUS].ops; break; } - /* fall through */ + fallthrough; case 0x18: fam_type = &family_types[F17_CPUS]; pvt->ops = &family_types[F17_CPUS].ops; diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index b8fc4b84fd86..928f63a374c7 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -198,7 +198,7 @@ static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *na switch (sz) { case 8: ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4)); - /* fall through */ + fallthrough; case 4: ret |= _apl_rd_reg(port, off, op, (u32 *)data); pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name, diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index b785e936244f..80db43a22069 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -957,7 +957,7 @@ static void set_broadcast_channel(struct fw_device *device, int generation) device->bc_implemented = BC_IMPLEMENTED; break; } - /* else, fall through - to case address error */ + fallthrough; /* to case address error */ case RCODE_ADDRESS_ERROR: device->bc_implemented = BC_UNIMPLEMENTED; } diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 185b0b78b3d6..af70e74f9a7e 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -277,7 +277,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, if ((data[0] & bit) == (data[1] & bit)) continue; - /* fall through - It's a 1394-1995 IRM, retry. */ + fallthrough; /* It's a 1394-1995 IRM, retry */ default: if (retry) { retry--; diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 94a13fca8267..ec68ed27b0a5 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -54,7 +54,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) switch (port_type) { case SELFID_PORT_CHILD: (*child_port_count)++; - /* fall through */ + fallthrough; case SELFID_PORT_PARENT: case SELFID_PORT_NCONN: (*total_port_count)++; diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 439d918bbaaf..ac487c96bb71 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -1097,14 +1097,14 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, rcode = RCODE_ADDRESS_ERROR; break; } - /* else fall through */ + fallthrough; case CSR_NODE_IDS: /* * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges */ - /* fall through */ + fallthrough; case CSR_STATE_CLEAR: case CSR_STATE_SET: diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 7dde21b18b04..020cb15a4d8f 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -1495,7 +1495,7 @@ static int handle_at_packet(struct context *context, packet->ack = RCODE_GENERATION; break; } - /* fall through */ + fallthrough; default: packet->ack = RCODE_SEND_ERROR; @@ -3054,7 +3054,7 @@ static int ohci_start_iso(struct fw_iso_context *base, case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; - /* fall through */ + fallthrough; case FW_ISO_CONTEXT_RECEIVE: index = ctx - ohci->ir_context_list; match = (tags << 28) | (sync << 8) | ctx->base.channel; diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c index d16645c1d8d9..3aa45934d60c 100644 --- a/drivers/gpio/gpio-aspeed-sgpio.c +++ b/drivers/gpio/gpio-aspeed-sgpio.c @@ -303,16 +303,16 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type) switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_BOTH: type2 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_RISING: type0 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_FALLING: handler = handle_edge_irq; break; case IRQ_TYPE_LEVEL_HIGH: type0 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_LEVEL_LOW: type1 |= bit; handler = handle_level_irq; diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 879db23d8454..bf08b4561f36 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -611,16 +611,16 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_BOTH: type2 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_RISING: type0 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_FALLING: handler = handle_edge_irq; break; case IRQ_TYPE_LEVEL_HIGH: type0 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_LEVEL_LOW: type1 |= bit; handler = handle_level_irq; diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index 53fae02c40ad..d5359341cc6b 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c @@ -129,7 +129,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data, case IRQ_TYPE_LEVEL_HIGH: polarity |= mask; - /* fall through */ + fallthrough; case IRQ_TYPE_LEVEL_LOW: type |= mask; break; diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index 8c9757774010..ad61daf6c212 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -617,14 +617,12 @@ static int sprd_eic_probe(struct platform_device *pdev) sprd_eic->chip.free = sprd_eic_free; sprd_eic->chip.set_config = sprd_eic_set_config; sprd_eic->chip.set = sprd_eic_set; - /* fall-through */ + fallthrough; case SPRD_EIC_ASYNC: - /* fall-through */ case SPRD_EIC_SYNC: sprd_eic->chip.get = sprd_eic_get; break; case SPRD_EIC_LATCH: - /* fall-through */ default: break; } diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 6c48809d0505..b0155d6007c8 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -308,7 +308,7 @@ static void stmpe_dbg_show_one(struct seq_file *s, if (ret < 0) return; edge_det = !!(ret & mask); - /* fall through */ + fallthrough; case STMPE1801: rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB + bank]; fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB + bank]; @@ -321,7 +321,7 @@ static void stmpe_dbg_show_one(struct seq_file *s, if (ret < 0) return; fall = !!(ret & mask); - /* fall through */ + fallthrough; case STMPE801: case STMPE1600: irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB + bank]; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 9276051663da..54ca3c18b291 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1264,7 +1264,7 @@ static int acpi_gpio_package_count(const union acpi_object *obj) switch (element->type) { case ACPI_TYPE_LOCAL_REFERENCE: element += 3; - /* Fallthrough */ + fallthrough; case ACPI_TYPE_INTEGER: element++; count++; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 7e59e473a190..cdea1338c8dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -152,7 +152,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, dev_warn(adev->dev, "Invalid sdma engine id (%d), using engine id 0\n", engine_id); - /* fall through */ + fallthrough; case 0: sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 33f1c4a46ebe..88f63d7ea371 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3250,7 +3250,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) dev_warn(adev->dev, "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n", adev->asic_type); - /* fall through */ + fallthrough; case CHIP_CARRIZO: modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6e4f3ff4810f..b67ba38a195f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1297,7 +1297,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) case CHIP_VEGA10: if (amdgpu_sriov_vf(adev)) break; - /* fall through */ + fallthrough; case CHIP_VEGA20: soc15_program_register_sequence(adev, golden_settings_mmhub_1_0_0, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index ea914b256ebd..b5986d19dc08 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -6196,7 +6196,7 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2; if (current_link_speed == AMDGPU_PCIE_GEN2) break; - /* fall through */ + fallthrough; case AMDGPU_PCIE_GEN2: if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index ca570b135478..e9de542f9b7c 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -532,7 +532,7 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW); malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH); malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE); - /* fall through */ + fallthrough; case 1: malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW); malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH); @@ -869,7 +869,7 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW); malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH); malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE); - /* fall through */ + fallthrough; case 1: malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW); malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH); @@ -1324,7 +1324,7 @@ static irqreturn_t malidp_se_irq(int irq, void *arg) break; case MW_RESTART: drm_writeback_signal_completion(&malidp->mw_connector, 0); - /* fall through - to a new start */ + fallthrough; /* to a new start */ case MW_START: /* writeback started, need to emulate one-shot mode */ hw->disable_memwrite(hwdev); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index dd12b55d57a2..6a9fba051d13 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -238,7 +238,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->dp501_fw_addr = NULL; } } - /* fallthrough */ + fallthrough; case 0x0c: ast->tx_chip_type = AST_TX_DP501; } diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index ce94f797d090..66b67402f1ac 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -409,7 +409,6 @@ static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status) switch (data_type) { case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: - fallthrough; case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: if (xfer->msg->rx_len > 1) { /* read second byte */ @@ -418,7 +417,6 @@ static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status) } fallthrough; case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: - fallthrough; case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: if (xfer->msg->rx_len > 0) { /* read first byte */ diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index d7e65c869415..9fef6413741d 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -61,10 +61,10 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data, switch (hparms->channels) { case 7 ... 8: conf0 |= HDMI_AUD_CONF0_I2S_EN3; - /* Fall-thru */ + fallthrough; case 5 ... 6: conf0 |= HDMI_AUD_CONF0_I2S_EN2; - /* Fall-thru */ + fallthrough; case 3 ... 4: conf0 |= HDMI_AUD_CONF0_I2S_EN1; /* Fall-thru */ diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 86b9f0f87a14..5b6e19ecbc84 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -604,13 +604,13 @@ static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata, DRM_DEV_ERROR(pdata->dev, "Unexpected max rate (%#x); assuming 5.4 GHz\n", (int)dpcd_val); - /* fall through */ + fallthrough; case DP_LINK_BW_5_4: rate_valid[7] = 1; - /* fall through */ + fallthrough; case DP_LINK_BW_2_7: rate_valid[4] = 1; - /* fall through */ + fallthrough; case DP_LINK_BW_1_62: rate_valid[1] = 1; break; diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index a0735fbc144b..7a01d0918861 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -537,7 +537,7 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) switch (map->type) { case _DRM_REGISTERS: iounmap(map->handle); - /* FALLTHROUGH */ + fallthrough; case _DRM_FRAME_BUFFER: arch_phys_wc_del(map->mtrr); break; diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index a3c82e726057..092c8c985911 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -492,7 +492,7 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], case DP_DS_16BPC: return 16; } - /* fall through */ + fallthrough; default: return 0; } diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 14b6f7638728..501b4fe55a3d 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1930,7 +1930,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, default: WARN(1, "Invalid aspect ratio (0%x) on mode\n", in->picture_aspect_ratio); - /* fall through */ + fallthrough; case HDMI_PICTURE_ASPECT_NONE: out->flags |= DRM_MODE_FLAG_PIC_AR_NONE; break; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 7a6f6df5e954..b38e9b592b8a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -987,10 +987,10 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, switch (length) { case 3: reg |= payload[2] << 16; - /* Fall through */ + fallthrough; case 2: reg |= payload[1] << 8; - /* Fall through */ + fallthrough; case 1: reg |= payload[0]; exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg); @@ -1038,7 +1038,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, payload[1] = reg >> 16; ++xfer->rx_done; } - /* Fall through */ + fallthrough; case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: payload[0] = reg >> 8; @@ -1082,10 +1082,10 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, switch (length) { case 3: payload[2] = (reg >> 16) & 0xff; - /* Fall through */ + fallthrough; case 2: payload[1] = (reg >> 8) & 0xff; - /* Fall through */ + fallthrough; case 1: payload[0] = reg & 0xff; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 86fac677fe69..3c6d9f3913d5 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -101,19 +101,19 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, break; case DRM_FORMAT_ARGB8888: alpha = DCU_LAYER_AB_WHOLE_FRAME; - /* fall-through */ + fallthrough; case DRM_FORMAT_XRGB8888: bpp = FSL_DCU_ARGB8888; break; case DRM_FORMAT_ARGB4444: alpha = DCU_LAYER_AB_WHOLE_FRAME; - /* fall-through */ + fallthrough; case DRM_FORMAT_XRGB4444: bpp = FSL_DCU_ARGB4444; break; case DRM_FORMAT_ARGB1555: alpha = DCU_LAYER_AB_WHOLE_FRAME; - /* fall-through */ + fallthrough; case DRM_FORMAT_XRGB1555: bpp = FSL_DCU_ARGB1555; break; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 8c55f5bee9ab..f4053dd6bde9 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -712,7 +712,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, switch (intel_dsi->pixel_format) { default: MISSING_CASE(intel_dsi->pixel_format); - /* fallthrough */ + fallthrough; case MIPI_DSI_FMT_RGB565: tmp |= PIX_FMT_RGB565; break; @@ -739,7 +739,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, switch (intel_dsi->video_mode_format) { default: MISSING_CASE(intel_dsi->video_mode_format); - /* fallthrough */ + fallthrough; case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS: tmp |= VIDEO_MODE_SYNC_EVENT; break; @@ -792,7 +792,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, switch (pipe) { default: MISSING_CASE(pipe); - /* fallthrough */ + fallthrough; case PIPE_A: tmp |= TRANS_DDI_EDP_INPUT_A_ON; break; diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index c53c85d38fa5..a0a41ec5c341 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -905,7 +905,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) drm_dbg_kms(&dev_priv->drm, "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", psr_table->tp1_wakeup_time); - /* fallthrough */ + fallthrough; case 2: dev_priv->vbt.psr.tp1_wakeup_time_us = 2500; break; @@ -925,7 +925,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) drm_dbg_kms(&dev_priv->drm, "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", psr_table->tp2_tp3_wakeup_time); - /* fallthrough */ + fallthrough; case 2: dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500; break; @@ -1775,7 +1775,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, switch (child->hdmi_max_data_rate) { default: MISSING_CASE(child->hdmi_max_data_rate); - /* fall through */ + fallthrough; case HDMI_MAX_DATA_RATE_PLATFORM: max_tmds_clock = 0; break; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index bb91dace304a..91a8161e7c05 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -326,7 +326,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, default: drm_err(&dev_priv->drm, "Unknown pnv display core clock 0x%04x\n", gcfgc); - /* fall through */ + fallthrough; case GC_DISPLAY_CLOCK_133_MHZ_PNV: cdclk_config->cdclk = 133333; break; @@ -766,7 +766,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, switch (cdclk) { default: MISSING_CASE(cdclk); - /* fall through */ + fallthrough; case 337500: val |= LCPLL_CLK_FREQ_337_5_BDW; break; @@ -1042,7 +1042,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, drm_WARN_ON(&dev_priv->drm, cdclk != dev_priv->cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); - /* fall through */ + fallthrough; case 308571: case 337500: freq_select = CDCLK_FREQ_337_308; @@ -1333,7 +1333,7 @@ static void icl_readout_refclk(struct drm_i915_private *dev_priv, switch (dssm) { default: MISSING_CASE(dssm); - /* fall through */ + fallthrough; case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: cdclk_config->ref = 24000; break; @@ -1561,7 +1561,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, drm_WARN_ON(&dev_priv->drm, cdclk != dev_priv->cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); - /* fall through */ + fallthrough; case 2: divider = BXT_CDCLK_CD2X_DIV_SEL_1; break; diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index eccaa79cb4a9..6968de4f3477 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -52,7 +52,7 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { default: MISSING_CASE(val); - /* fall through */ + fallthrough; case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; break; @@ -320,7 +320,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, break; default: MISSING_CASE(lane_count); - /* fall-through */ + fallthrough; case 4: lane_mask = PWR_UP_ALL_LANES; break; @@ -337,7 +337,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, break; default: MISSING_CASE(lane_count); - /* fall-through */ + fallthrough; case 4: lane_mask = PWR_UP_ALL_LANES; break; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 2c484b55bcdf..a49ff3a1a63c 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1888,7 +1888,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK); - /* fallthrough */ + fallthrough; case TRANS_DDI_EDP_INPUT_A_ON: case TRANS_DDI_EDP_INPUT_A_ONOFF: *pipe_mask = BIT(PIPE_A); @@ -4268,7 +4268,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->hdmi_scrambling = true; if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE) pipe_config->hdmi_high_tmds_clock_ratio = true; - /* fall through */ + fallthrough; case TRANS_DDI_MODE_SELECT_DVI: pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI); pipe_config->lane_count = 4; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b2ec3a5141cc..68325678f5ef 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2029,12 +2029,12 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) case I915_FORMAT_MOD_Y_TILED_CCS: if (is_ccs_plane(fb, color_plane)) return 128; - /* fall through */ + fallthrough; case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: if (is_ccs_plane(fb, color_plane)) return 64; - /* fall through */ + fallthrough; case I915_FORMAT_MOD_Y_TILED: if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) return 128; @@ -2043,7 +2043,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) case I915_FORMAT_MOD_Yf_TILED_CCS: if (is_ccs_plane(fb, color_plane)) return 128; - /* fall through */ + fallthrough; case I915_FORMAT_MOD_Yf_TILED: switch (cpp) { case 1: @@ -2185,7 +2185,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: if (is_semiplanar_uv_plane(fb, color_plane)) return intel_tile_row_size(fb, color_plane); - /* Fall-through */ + fallthrough; case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: return 16 * 1024; case I915_FORMAT_MOD_Y_TILED_CCS: @@ -2194,7 +2194,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, if (INTEL_GEN(dev_priv) >= 12 && is_semiplanar_uv_plane(fb, color_plane)) return intel_tile_row_size(fb, color_plane); - /* Fall-through */ + fallthrough; case I915_FORMAT_MOD_Yf_TILED: return 1 * 1024 * 1024; default: @@ -6211,7 +6211,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_ARGB16161616F: if (INTEL_GEN(dev_priv) >= 11) break; - /* fall through */ + fallthrough; default: drm_dbg_kms(&dev_priv->drm, "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", @@ -10896,7 +10896,7 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, break; default: MISSING_CASE(ddi_pll_sel); - /* fall through */ + fallthrough; case PORT_CLK_SEL_NONE: return; } @@ -10956,10 +10956,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, drm_WARN(dev, 1, "unknown pipe linked to transcoder %s\n", transcoder_name(panel_transcoder)); - /* fall through */ + fallthrough; case TRANS_DDI_EDP_INPUT_A_ONOFF: force_thru = true; - /* fall through */ + fallthrough; case TRANS_DDI_EDP_INPUT_A_ON: trans_pipe = PIPE_A; break; @@ -13183,7 +13183,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) case INTEL_OUTPUT_DDI: if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) break; - /* else, fall through */ + fallthrough; case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index aeb6ee395cce..afa7a378b31d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -892,7 +892,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, refclk = dev_priv->dpll.ref_clks.nssc; break; } - /* fall through */ + fallthrough; case WRPLL_REF_PCH_SSC: /* * We could calculate spread here, but our checking @@ -2977,7 +2977,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, switch (dev_priv->dpll.ref_clks.nssc) { default: MISSING_CASE(dev_priv->dpll.ref_clks.nssc); - /* fall-through */ + fallthrough; case 19200: *pll_params = tgl_tbt_pll_19_2MHz_values; break; @@ -2992,7 +2992,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, switch (dev_priv->dpll.ref_clks.nssc) { default: MISSING_CASE(dev_priv->dpll.ref_clks.nssc); - /* fall-through */ + fallthrough; case 19200: case 38400: *pll_params = icl_tbt_pll_19_2MHz_values; @@ -3120,7 +3120,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, switch (div1) { default: MISSING_CASE(div1); - /* fall through */ + fallthrough; case 2: hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2; break; diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index bbde3b12c311..4072d7062efd 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -229,7 +229,7 @@ int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, case DRM_MODE_SCALE_NONE: WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w); WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h); - /* fall through */ + fallthrough; case DRM_MODE_SCALE_FULLSCREEN: x = y = 0; width = adjusted_mode->crtc_hdisplay; diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 2da4388e1540..5e9fb349c829 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1531,7 +1531,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state, default: drm_WARN(&dev_priv->drm, 1, "unknown pixel multiplier specified\n"); - /* fall through */ + fallthrough; case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; @@ -2549,19 +2549,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) switch (sdvo->controlled_output) { case SDVO_OUTPUT_LVDS1: mask |= SDVO_OUTPUT_LVDS1; - /* fall through */ + fallthrough; case SDVO_OUTPUT_LVDS0: mask |= SDVO_OUTPUT_LVDS0; - /* fall through */ + fallthrough; case SDVO_OUTPUT_TMDS1: mask |= SDVO_OUTPUT_TMDS1; - /* fall through */ + fallthrough; case SDVO_OUTPUT_TMDS0: mask |= SDVO_OUTPUT_TMDS0; - /* fall through */ + fallthrough; case SDVO_OUTPUT_RGB1: mask |= SDVO_OUTPUT_RGB1; - /* fall through */ + fallthrough; case SDVO_OUTPUT_RGB0: mask |= SDVO_OUTPUT_RGB0; break; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index d03860fef2d7..c89f5f7ccb06 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -2147,7 +2147,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_RGB565: if (INTEL_GEN(dev_priv) >= 11) break; - /* fall through */ + fallthrough; case DRM_FORMAT_C8: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_XBGR16161616F: @@ -2702,7 +2702,7 @@ static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED) return true; - /* fall through */ + fallthrough; default: return false; } @@ -2733,7 +2733,7 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED) return true; - /* fall through */ + fallthrough; default: return false; } @@ -2768,7 +2768,7 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED) return true; - /* fall through */ + fallthrough; default: return false; } @@ -2801,7 +2801,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_ABGR8888: if (is_ccs_modifier(modifier)) return true; - /* fall through */ + fallthrough; case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: @@ -2819,7 +2819,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XVYU2101010: if (modifier == I915_FORMAT_MOD_Yf_TILED) return true; - /* fall through */ + fallthrough; case DRM_FORMAT_C8: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: @@ -2834,7 +2834,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, modifier == I915_FORMAT_MOD_X_TILED || modifier == I915_FORMAT_MOD_Y_TILED) return true; - /* fall through */ + fallthrough; default: return false; } @@ -2860,7 +2860,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) return false; - /* fall through */ + fallthrough; case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_Y_TILED: @@ -2877,7 +2877,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_ABGR8888: if (is_ccs_modifier(modifier)) return true; - /* fall through */ + fallthrough; case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2889,7 +2889,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P016: if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) return true; - /* fall through */ + fallthrough; case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: @@ -2910,7 +2910,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, modifier == I915_FORMAT_MOD_X_TILED || modifier == I915_FORMAT_MOD_Y_TILED) return true; - /* fall through */ + fallthrough; default: return false; } diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 5b5dc86a5737..8f67aef18b2d 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -159,7 +159,7 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) switch (lane_mask) { default: MISSING_CASE(lane_mask); - /* fall-through */ + fallthrough; case 0x1: case 0x2: case 0x4: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index b23368529a40..753f82d87a31 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -209,7 +209,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err) switch (err) { default: WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); - /* fallthrough */ + fallthrough; case -EIO: /* shmemfs failure from swap device */ case -EFAULT: /* purged object */ case -ENODEV: /* bad object, how did you get here! */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7050519c87a4..d15ff6748a50 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -276,7 +276,7 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, switch (type) { default: MISSING_CASE(type); - /* fallthrough - to use PAGE_KERNEL anyway */ + fallthrough; /* to use PAGE_KERNEL anyway */ case I915_MAP_WB: pgprot = PAGE_KERNEL; break; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index e0f21f12d3ce..0be5e8683337 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -249,7 +249,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915, switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { default: MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); - /* fall through */ + fallthrough; case GEN7_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; @@ -416,7 +416,7 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) case 4: if (!IS_G4X(i915)) break; - /* fall through */ + fallthrough; case 5: g4x_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); @@ -445,7 +445,7 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) break; default: MISSING_CASE(INTEL_GEN(i915)); - /* fall-through */ + fallthrough; case 11: case 12: icl_get_stolen_reserved(i915, uncore, diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index dd1a42c4d344..26087dd79782 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -213,7 +213,7 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class) break; default: MISSING_CASE(class); - /* fall through */ + fallthrough; case VIDEO_DECODE_CLASS: case VIDEO_ENHANCEMENT_CLASS: case COPY_ENGINE_CLASS: diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 62979ea591f0..99e28d9021e8 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -1437,7 +1437,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) switch (vma->ggtt_view.type) { default: GEM_BUG_ON(vma->ggtt_view.type); - /* fall through */ + fallthrough; case I915_GGTT_VIEW_NORMAL: vma->pages = vma->obj->mm.pages; return 0; diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 94915f668715..898593ca4889 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -100,7 +100,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset) */ default: GEM_BUG_ON(engine->id); - /* fallthrough */ + fallthrough; case RCS0: hwsp = RENDER_HWS_PGA_GEN7; break; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 63bba7b4bb2f..05f3bc98d242 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1226,7 +1226,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) switch (notification) { case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; - /* fall through */ + fallthrough; case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); return PTR_ERR_OR_ZERO(mm); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 6a3a2ce0b394..3e6cbb0d1150 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1159,7 +1159,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee) switch (engine->id) { default: MISSING_CASE(engine->id); - /* fall through */ + fallthrough; case RCS0: mmio = RENDER_HWS_PGA_GEN7; break; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 056994224c6b..69c0fa20eba1 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -474,7 +474,7 @@ config_status(struct drm_i915_private *i915, u64 config) if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) /* Requires a mutex for sampling! */ return -ENODEV; - /* Fall-through. */ + fallthrough; case I915_PMU_REQUESTED_FREQUENCY: if (INTEL_GEN(i915) < 6) return -ENODEV; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 6776ebb3246d..8a4235d9d9f1 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -447,7 +447,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (fb->pitches[1] != fb->pitches[2]) return -EINVAL; - /* fall-through */ + fallthrough; case DRM_FORMAT_NV12: case DRM_FORMAT_NV16: ubo = drm_plane_state_to_ubo(state); diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c index f12e0271f166..ffc6b584dbf8 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.c +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c @@ -205,7 +205,7 @@ static int meson_g12a_afbcd_pixel_fmt(u64 modifier, uint32_t format) /* YTR is forbidden for non XBGR formats */ if (modifier & AFBC_FORMAT_MOD_YTR) return -EINVAL; - /* fall through */ + fallthrough; case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: return MAFBC_FMT_RGBA8888; diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c index a8bcc70644df..1ffbbecafa22 100644 --- a/drivers/gpu/drm/meson/meson_overlay.c +++ b/drivers/gpu/drm/meson/meson_overlay.c @@ -654,7 +654,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane, priv->viu.vd1_addr2, priv->viu.vd1_stride2, priv->viu.vd1_height2); - /* fallthrough */ + fallthrough; case 2: gem = drm_fb_cma_get_gem_obj(fb, 1); priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1]; @@ -666,7 +666,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane, priv->viu.vd1_addr1, priv->viu.vd1_stride1, priv->viu.vd1_height1); - /* fallthrough */ + fallthrough; case 1: gem = drm_fb_cma_get_gem_obj(fb, 0); priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0]; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 9e63a190642c..84a5d9c1f2a2 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -59,7 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; - /* fall-thru */ + fallthrough; case MSM_SUBMIT_CMD_BUF: /* copy commands into RB: */ obj = submit->bos[submit->cmd[i].idx].obj; @@ -150,7 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; - /* fall-thru */ + fallthrough; case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index b67b38c8fadf..4c9bda19cbc4 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -602,7 +602,7 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); - /* Fall through */ + fallthrough; case GMU_IDLE_STATE_SPTP: gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, GMU_PWR_COL_HYST); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index c5a3e4d4c007..3966abd523cc 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -117,7 +117,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; - /* fall-thru */ + fallthrough; case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index e23641a5ec84..bb0b09790157 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -474,7 +474,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* ignore if there has not been a ctx switch: */ if (priv->lastctx == ctx) break; - /* fall-thru */ + fallthrough; case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index e0817934ee16..bd12eae0cb31 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -597,7 +597,7 @@ static void venc_bridge_mode_set(struct drm_bridge *bridge, switch (venc_mode) { default: WARN_ON_ONCE(1); - /* Fall-through */ + fallthrough; case VENC_MODE_PAL: venc->config = &venc_config_pal_trm; break; diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index ba20c6f03719..886e9959496f 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4856,7 +4856,7 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic pi->force_pcie_gen = RADEON_PCIE_GEN2; if (current_link_speed == RADEON_PCIE_GEN2) break; - /* fall through */ + fallthrough; case RADEON_PCIE_GEN2: if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 3b7ead5be5bf..73f67bf222e1 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -820,7 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ((idx_value >> 21) & 0xF)); return -EINVAL; } - /* Fall through. */ + fallthrough; case 6: track->cb[i].cpp = 4; break; @@ -971,7 +971,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, return -EINVAL; } /* The same rules apply as for DXT3/5. */ - /* Fall through. */ + fallthrough; case R300_TX_FORMAT_DXT3: case R300_TX_FORMAT_DXT5: track->textures[i].cpp = 1; diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 1d4c04e0a449..50b89b6d9a6c 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -115,7 +115,7 @@ void r420_pipes_init(struct radeon_device *rdev) default: /* force to 1 pipe */ num_pipes = 1; - /* fall through */ + fallthrough; case 1: tmp = (0 << 1); break; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 49e8266461f8..390a9621604a 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -487,7 +487,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) return -EINVAL; } } - /* fall through */ + fallthrough; case V_0280A0_CLEAR_ENABLE: { uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); @@ -1535,7 +1535,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, break; case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: is_array = true; - /* fall through */ + fallthrough; case V_038000_SQ_TEX_DIM_2D_MSAA: array_check.nsamples = 1 << llevel; llevel = 0; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 1ad5c3b86b64..57fb3eb3a4b4 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -454,7 +454,7 @@ static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, if (p->rdev->family >= CHIP_PALM) return 0; - /* fall through */ + fallthrough; default: DRM_ERROR("UVD codec not supported by hardware %d!\n", stream_type); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index a167e1c36d24..d1c73e9db889 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -5744,7 +5744,7 @@ static void si_request_link_speed_change_before_state_change(struct radeon_devic si_pi->force_pcie_gen = RADEON_PCIE_GEN2; if (current_link_speed == RADEON_PCIE_GEN2) break; - /* fall through */ + fallthrough; case RADEON_PCIE_GEN2: if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index f858d8d06347..800721153d51 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -219,7 +219,7 @@ done: WREG32(RS_DQ_RD_RET_CONF, 0x3f); WREG32(MC_CONFIG, 0x1f); - /* fall through */ + fallthrough; case CHIP_RV670: case CHIP_RV635: diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c index a2ac25c11c90..e0d40ae67d54 100644 --- a/drivers/gpu/drm/savage/savage_state.c +++ b/drivers/gpu/drm/savage/savage_state.c @@ -306,7 +306,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; - /* fall through */ + fallthrough; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of vertices %u in TRILIST\n", @@ -444,7 +444,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; - /* fall through */ + fallthrough; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of vertices %u in TRILIST\n", @@ -566,7 +566,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; - /* fall through */ + fallthrough; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of indices %u in TRILIST\n", n); @@ -705,7 +705,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; - /* fall through */ + fallthrough; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of indices %u in TRILIST\n", n); @@ -1066,7 +1066,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ ret = -EINVAL; goto done; } - /* fall through */ + fallthrough; case SAVAGE_CMD_DMA_PRIM: case SAVAGE_CMD_VB_PRIM: if (!first_draw_cmd) diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 008f07923bbc..38a558768e53 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -850,13 +850,13 @@ static int hdmi_audio_configure(struct sti_hdmi *hdmi) switch (info->channels) { case 8: audio_cfg |= HDMI_AUD_CFG_CH78_VALID; - /* fall through */ + fallthrough; case 6: audio_cfg |= HDMI_AUD_CFG_CH56_VALID; - /* fall through */ + fallthrough; case 4: audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH; - /* fall through */ + fallthrough; case 2: audio_cfg |= HDMI_AUD_CFG_CH12_VALID; break; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 359b56e43b83..ced9a8287dd8 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -195,7 +195,7 @@ void sun4i_tcon_set_status(struct sun4i_tcon *tcon, switch (encoder->encoder_type) { case DRM_MODE_ENCODER_LVDS: is_lvds = true; - /* Fallthrough */ + fallthrough; case DRM_MODE_ENCODER_DSI: case DRM_MODE_ENCODER_NONE: channel = 0; @@ -342,7 +342,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon, /* R and B components are only 5 bits deep */ val |= SUN4I_TCON0_FRM_CTL_MODE_R; val |= SUN4I_TCON0_FRM_CTL_MODE_B; - /* Fall through */ + fallthrough; case MEDIA_BUS_FMT_RGB666_1X18: case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: /* Fall through: enable dithering */ diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index aa67cb037e9d..7f13f4d715bf 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -1027,7 +1027,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host, ret = sun6i_dsi_dcs_read(dsi, msg); break; } - /* Else, fall through */ + fallthrough; default: ret = -EINVAL; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 9a0b3240bc58..424ad60b4f38 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -135,7 +135,7 @@ static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v, default: WARN_ON_ONCE(1); - /* fallthrough */ + fallthrough; case 4: max = 4; break; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 1856962411c7..518220bd092a 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -386,7 +386,7 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB8888: reg |= LCDC_V2_TFT_24BPP_UNPACK; - /* fallthrough */ + fallthrough; case DRM_FORMAT_BGR888: case DRM_FORMAT_RGB888: reg |= LCDC_V2_TFT_24BPP_MODE; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 33526c5df0e8..4732dcc80e11 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -525,7 +525,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, if (unlikely(ret != 0)) return ret; } - /* fall through */ + fallthrough; case TTM_PL_TT: ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); break; diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 551fa31629af..5771bb53ce6a 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c @@ -179,21 +179,21 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) switch (vsg->state) { case dr_via_device_mapped: via_unmap_blit_from_device(pdev, vsg); - /* fall through */ + fallthrough; case dr_via_desc_pages_alloc: for (i = 0; i < vsg->num_desc_pages; ++i) { if (vsg->desc_pages[i] != NULL) free_page((unsigned long)vsg->desc_pages[i]); } kfree(vsg->desc_pages); - /* fall through */ + fallthrough; case dr_via_pages_locked: unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE)); - /* fall through */ + fallthrough; case dr_via_pages_alloc: vfree(vsg->pages); - /* fall through */ + fallthrough; default: vsg->state = dr_via_sg_init; } diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 013c9e0e412c..cc93a8c9547b 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -649,9 +649,7 @@ static void displback_changed(struct xenbus_device *xb_dev, switch (backend_state) { case XenbusStateReconfiguring: - /* fall through */ case XenbusStateReconfigured: - /* fall through */ case XenbusStateInitialised: break; @@ -701,7 +699,6 @@ static void displback_changed(struct xenbus_device *xb_dev, break; case XenbusStateUnknown: - /* fall through */ case XenbusStateClosed: if (xb_dev->state == XenbusStateClosed) break; diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c index dbcc16721931..34b4075a6a8e 100644 --- a/drivers/gpu/ipu-v3/ipu-dc.c +++ b/drivers/gpu/ipu-v3/ipu-dc.c @@ -141,7 +141,7 @@ static int ipu_bus_format_to_map(u32 fmt) switch (fmt) { default: WARN_ON(1); - /* fall-through */ + fallthrough; case MEDIA_BUS_FMT_RGB888_1X24: return IPU_DC_MAP_RGB24; case MEDIA_BUS_FMT_RGB565_1X16: diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c index ef0cbcd7540d..fcaf8466e627 100644 --- a/drivers/hid/hid-lg-g15.c +++ b/drivers/hid/hid-lg-g15.c @@ -680,7 +680,7 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i) * but it does have a separate power-on (reset) value. */ g15->leds[i].cdev.name = "g15::power_on_backlight_val"; - /* fall through */ + fallthrough; case LG_G15_KBD_BRIGHTNESS: g15->leds[i].cdev.brightness_set_blocking = lg_g510_kbd_led_set; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index a78c13cc9f47..38ee25a813b9 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -844,7 +844,7 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev, workitem.type = WORKITEM_TYPE_EMPTY; break; } - /* fall-through */ + fallthrough; case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: workitem.quad_id_msb = dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB]; diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index 2d8b589201a4..5576fed7bc15 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -163,16 +163,13 @@ static int ms_surface_dial_quirk(struct hid_input *hi, struct hid_field *field, { switch (usage->hid & HID_USAGE_PAGE) { case 0xff070000: - /* fall-through */ case HID_UP_DIGITIZER: /* ignore those axis */ return -1; case HID_UP_GENDESK: switch (usage->hid) { case HID_GD_X: - /* fall-through */ case HID_GD_Y: - /* fall-through */ case HID_GD_RFKILL_BTN: /* ignore those axis */ return -1; diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 8cffa84c9650..7f41213d5ae3 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -428,7 +428,6 @@ static void rmi_report(struct hid_device *hid, struct hid_report *report) switch (report->id) { case RMI_READ_DATA_REPORT_ID: - /* fall-through */ case RMI_ATTN_REPORT_ID: return; } diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index 1a6e600197d0..2ff4c8e366ff 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -780,7 +780,7 @@ static void kone_keep_values_up_to_date(struct kone_device *kone, case kone_mouse_event_switch_profile: kone->actual_dpi = kone->profiles[event->value - 1]. startup_dpi; - /* fall through */ + fallthrough; case kone_mouse_event_osd_profile: kone->actual_profile = event->value; break; diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 78a364ae2f68..7d20d1fcf8d2 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -974,7 +974,7 @@ int uclogic_params_init(struct uclogic_params *params, } break; } - /* FALL THROUGH */ + fallthrough; case VID_PID(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET): case VID_PID(USB_VENDOR_ID_HUION, diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c index 679e142fc850..e484c3618dec 100644 --- a/drivers/hid/hid-wiimote-core.c +++ b/drivers/hid/hid-wiimote-core.c @@ -1672,7 +1672,6 @@ static ssize_t wiimote_ext_show(struct device *dev, case WIIMOTE_EXT_GUITAR: return sprintf(buf, "guitar\n"); case WIIMOTE_EXT_UNKNOWN: - /* fallthrough */ default: return sprintf(buf, "unknown\n"); } @@ -1722,7 +1721,6 @@ static ssize_t wiimote_dev_show(struct device *dev, case WIIMOTE_DEV_PENDING: return sprintf(buf, "pending\n"); case WIIMOTE_DEV_UNKNOWN: - /* fallthrough */ default: return sprintf(buf, "unknown\n"); } diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 4140dea693e9..7c0752fbfcf7 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -781,7 +781,6 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case HIDIOCGUCODE: - /* fall through */ case HIDIOCGUSAGE: case HIDIOCSUSAGE: case HIDIOCGUSAGES: diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 1c96809b51c9..83dfec327c42 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -341,7 +341,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) case 2: /* Mouse with wheel */ input_report_key(input, BTN_MIDDLE, data[1] & 0x04); - /* fall through */ + fallthrough; case 3: /* Mouse without wheel */ wacom->tool[0] = BTN_TOOL_MOUSE; @@ -1201,7 +1201,7 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len) case 0x04: wacom_intuos_bt_process_data(wacom, data + i); i += 10; - /* fall through */ + fallthrough; case 0x03: wacom_intuos_bt_process_data(wacom, data + i); i += 10; @@ -2148,7 +2148,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field for (i = 0; i < wacom->led.count; i++) wacom_update_led(wacom, features->numbered_buttons, value, i); - /* fall through*/ + fallthrough; default: do_report = true; break; @@ -3602,14 +3602,14 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, switch (features->type) { case GRAPHIRE_BT: __clear_bit(ABS_MISC, input_dev->absbit); - /* fall through */ + fallthrough; case WACOM_MO: case WACOM_G4: input_set_abs_params(input_dev, ABS_DISTANCE, 0, features->distance_max, features->distance_fuzz, 0); - /* fall through */ + fallthrough; case GRAPHIRE: input_set_capability(input_dev, EV_REL, REL_WHEEL); @@ -3649,7 +3649,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, case INTUOS4S: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); input_abs_set_res(input_dev, ABS_Z, 287); - /* fall through */ + fallthrough; case INTUOS: wacom_setup_intuos(wacom_wac); @@ -3682,7 +3682,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, case TABLETPC: case TABLETPCE: __clear_bit(ABS_MISC, input_dev->absbit); - /* fall through */ + fallthrough; case DTUS: case DTUSX: @@ -3696,7 +3696,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, case PTU: __set_bit(BTN_STYLUS2, input_dev->keybit); - /* fall through */ + fallthrough; case PENPARTNER: __set_bit(BTN_TOOL_PEN, input_dev->keybit); @@ -3799,7 +3799,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40); input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40); - /* fall through */ + fallthrough; case INTUOS5: case INTUOS5L: @@ -3817,7 +3817,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); - /* fall through */ + fallthrough; case WACOM_27QHDT: if (wacom_wac->shared->touch->product == 0x32C || @@ -3826,14 +3826,14 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, __set_bit(SW_MUTE_DEVICE, input_dev->swbit); wacom_wac->shared->has_mute_touch_switch = true; } - /* fall through */ + fallthrough; case MTSCREEN: case MTTPC: case MTTPC_B: case TABLETPC2FG: input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT); - /*fall through */ + fallthrough; case TABLETPC: case TABLETPCE: @@ -3843,7 +3843,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, case INTUOSHT2: input_dev->evbit[0] |= BIT_MASK(EV_SW); __set_bit(SW_MUTE_DEVICE, input_dev->swbit); - /* fall through */ + fallthrough; case BAMBOO_PT: case BAMBOO_TOUCH: @@ -4099,7 +4099,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, __set_bit(KEY_BUTTONCONFIG, input_dev->keybit); __set_bit(KEY_INFO, input_dev->keybit); - /* fall through */ + fallthrough; case WACOM_21UX2: case WACOM_BEE: @@ -4115,7 +4115,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, case INTUOS3: case INTUOS3L: input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); - /* fall through */ + fallthrough; case INTUOS3S: input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); @@ -4139,7 +4139,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, * ID_INPUT_TABLET to be set. */ __set_bit(BTN_STYLUS, input_dev->keybit); - /* fall through */ + fallthrough; case INTUOS4: case INTUOS4L: diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 365b5d5967ac..96d0eccca3aa 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -291,7 +291,7 @@ static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) /* CMT speech workaround */ if (atomic_read(&ssi->tx_usecnt)) break; - /* Else, fall through */ + fallthrough; case RECEIVING: mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); @@ -466,7 +466,7 @@ static void ssip_keep_alive(struct timer_list *t) case SEND_READY: if (atomic_read(&ssi->tx_usecnt) == 0) break; - /* Fall through */ + fallthrough; /* * Workaround for cmt-speech in that case * we relay on audio timers. @@ -668,7 +668,7 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) case ACTIVE: dev_err(&cl->device, "Boot info req on active state\n"); ssip_error(cl); - /* Fall through */ + fallthrough; case INIT: case HANDSHAKE: spin_lock_bh(&ssi->lock); diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c index 4bc4a201f0f6..fa69b94debd9 100644 --- a/drivers/hsi/controllers/omap_ssi_core.c +++ b/drivers/hsi/controllers/omap_ssi_core.c @@ -296,7 +296,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event, break; case ABORT_RATE_CHANGE: dev_dbg(&ssi->device, "abort rate change\n"); - /* Fall through */ + fallthrough; case POST_RATE_CHANGE: dev_dbg(&ssi->device, "post rate change (%lu -> %lu)\n", clk_data->old_rate, clk_data->new_rate); diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index e74b144b8f3d..754d35a25a1c 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -354,7 +354,7 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op) out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled; - /* fallthrough */ + fallthrough; case KVP_OP_GET_IP_INFO: utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id, diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index 319a0519ebdb..208813158bb4 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -435,7 +435,7 @@ static const char *voltage_label(struct adt7462_data *data, int which) case 3: return "+1.5V"; } - /* fall through */ + fallthrough; case 2: if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT)) return "+12V3"; @@ -493,7 +493,7 @@ static const char *voltage_label(struct adt7462_data *data, int which) case 3: return "+1.5"; } - /* fall through */ + fallthrough; case 11: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && @@ -531,7 +531,7 @@ static int voltage_multiplier(struct adt7462_data *data, int which) case 3: return 7800; } - /* fall through */ + fallthrough; case 2: if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT)) return 62500; @@ -589,7 +589,7 @@ static int voltage_multiplier(struct adt7462_data *data, int which) case 3: return 7800; } - /* fall through */ + fallthrough; case 11: case 12: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index cf0962f7a020..e9c0bbc2caa9 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c @@ -406,10 +406,10 @@ static int emc1403_probe(struct i2c_client *client, switch (id->driver_data) { case emc1404: data->groups[2] = &emc1404_group; - /* fall through */ + fallthrough; case emc1403: data->groups[1] = &emc1403_group; - /* fall through */ + fallthrough; case emc1402: data->groups[0] = &emc1402_group; } diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index d09deb409de7..4dec793fd07d 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -1285,7 +1285,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) data->pwm_auto_point_pwm[nr][0] = f71882fg_read8(data, F71882FG_REG_POINT_PWM(nr, 0)); - /* Fall through */ + fallthrough; case f71862fg: data->pwm_auto_point_pwm[nr][1] = f71882fg_read8(data, @@ -2442,7 +2442,7 @@ static int f71882fg_probe(struct platform_device *pdev) case f71869a: /* These always have signed auto point temps */ data->auto_point_temp_signed = 1; - /* Fall through - to select correct fan/pwm reg bank! */ + fallthrough; /* to select correct fan/pwm reg bank! */ case f71889fg: case f71889ed: case f71889a: diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index eb72e390844e..6d1175a51832 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -96,7 +96,7 @@ int vid_from_reg(int val, u8 vrm) val &= 0x1f; if (val == 0x1f) return 0; - /* fall through */ + fallthrough; case 25: /* AMD NPT 0Fh */ val &= 0x3f; return (val < 32) ? 1550 - 25 * val @@ -122,7 +122,7 @@ int vid_from_reg(int val, u8 vrm) case 84: /* VRM 8.4 */ val &= 0x0f; - /* fall through */ + fallthrough; case 82: /* VRM 8.2 */ val &= 0x1f; return val == 0x1f ? 0 : diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index 7fc5b065ad8b..81e155692aba 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -352,7 +352,7 @@ static int ina3221_read_curr(struct device *dev, u32 attr, if (ret) return ret; - /* fall through */ + fallthrough; case hwmon_curr_crit: case hwmon_curr_max: if (!resistance_uo) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 750b08713dee..5bd15622a85f 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -2669,7 +2669,7 @@ static void pwm_update_registers(struct nct6775_data *data, int nr) case thermal_cruise: nct6775_write_value(data, data->REG_TARGET[nr], data->target_temp[nr]); - /* fall through */ + fallthrough; default: reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]); reg = (reg & ~data->tolerance_mask) | diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 30e18eb60da7..a71777990d49 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -752,7 +752,7 @@ static int occ_setup_sensor_attrs(struct occ *occ) switch (sensors->freq.version) { case 2: show_freq = occ_show_freq_2; - /* fall through */ + fallthrough; case 1: num_attrs += (sensors->freq.num_sensors * 2); break; @@ -763,7 +763,7 @@ static int occ_setup_sensor_attrs(struct occ *occ) switch (sensors->power.version) { case 2: show_power = occ_show_power_2; - /* fall through */ + fallthrough; case 1: num_attrs += (sensors->power.num_sensors * 4); break; @@ -781,7 +781,7 @@ static int occ_setup_sensor_attrs(struct occ *occ) break; case 3: show_caps = occ_show_caps_3; - /* fall through */ + fallthrough; case 2: num_attrs += (sensors->caps.num_sensors * 8); break; diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c index e1d10a6b7f7c..a07b97400cba 100644 --- a/drivers/hwmon/w83627hf.c +++ b/drivers/hwmon/w83627hf.c @@ -1213,7 +1213,7 @@ temp_type_store(struct device *dev, struct device_attribute *devattr, case W83781D_DEFAULT_BETA: dev_warn(dev, "Sensor type %d is deprecated, please use 4 " "instead\n", W83781D_DEFAULT_BETA); - /* fall through */ + fallthrough; case 4: /* thermistor */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 015f1ea31966..d833a4f16c47 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -814,7 +814,7 @@ store_sensor(struct device *dev, struct device_attribute *da, dev_warn(dev, "Sensor type %d is deprecated, please use 4 instead\n", W83781D_DEFAULT_BETA); - /* fall through */ + fallthrough; case 4: /* thermistor */ tmp = w83781d_read_value(data, W83781D_REG_SCFG1); w83781d_write_value(data, W83781D_REG_SCFG1, diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c index 44f68b965aec..6d52b530b429 100644 --- a/drivers/hwmon/w83795.c +++ b/drivers/hwmon/w83795.c @@ -2127,7 +2127,7 @@ static void w83795_apply_temp_config(struct w83795_data *data, u8 config, if (temp_chan >= 4) break; data->temp_mode |= 1 << temp_chan; - /* fall through */ + fallthrough; case 0x3: /* Thermistor */ data->has_temp |= 1 << temp_chan; break; diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 96544b348c27..7e642fb3ed15 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -346,10 +346,10 @@ static void debug_init_arch_data(void *info) switch (mode) { case EDDEVID_IMPL_FULL: drvdata->edvidsr_present = true; - /* Fall through */ + fallthrough; case EDDEVID_IMPL_EDPCSR_EDCIDSR: drvdata->edcidsr_present = true; - /* Fall through */ + fallthrough; case EDDEVID_IMPL_EDPCSR: /* * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 6d7d2169bfb2..96425e818fc2 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1382,7 +1382,6 @@ static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, return NOTIFY_BAD; break; case CPU_PM_EXIT: - /* fallthrough */ case CPU_PM_ENTER_FAILED: if (drvdata->state_needs_restore) etm4_cpu_restore(drvdata); diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 7040d583bed9..9ca3aaafcfbc 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -84,9 +84,7 @@ u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata) */ switch (drvdata->memwidth) { case TMC_MEM_INTF_WIDTH_32BITS: - /* fallthrough */ case TMC_MEM_INTF_WIDTH_64BITS: - /* fallthrough */ case TMC_MEM_INTF_WIDTH_128BITS: mask = GENMASK(31, 4); break; diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index a1529f571491..9ca8c4e045f8 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c @@ -84,11 +84,11 @@ static ssize_t notrace sth_stm_packet(struct stm_data *stm_data, /* Global packets (GERR, XSYNC, TRIG) are sent with register writes */ case STP_PACKET_GERR: reg += 4; - /* fall through */ + fallthrough; case STP_PACKET_XSYNC: reg += 8; - /* fall through */ + fallthrough; case STP_PACKET_TRIG: if (flags & STP_PACKET_TIMESTAMPED) diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 175c590b93b7..12ac4212aded 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1425,7 +1425,6 @@ omap_i2c_probe(struct platform_device *pdev) major = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev); break; case OMAP_I2C_SCHEME_1: - /* FALLTHROUGH */ default: omap->regs = (u8 *)reg_map_ip_v2; rev = (rev << 16) | diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c index 1c4c9bb06a0b..6eb0f50c5d28 100644 --- a/drivers/i2c/busses/i2c-opal.c +++ b/drivers/i2c/busses/i2c-opal.c @@ -125,7 +125,7 @@ static int i2c_opal_smbus_xfer(struct i2c_adapter *adap, u16 addr, case I2C_SMBUS_BYTE: req.buffer_ra = cpu_to_be64(__pa(&data->byte)); req.size = cpu_to_be32(1); - /* Fall through */ + fallthrough; case I2C_SMBUS_QUICK: req.type = (read_write == I2C_SMBUS_READ) ? OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE; diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index 5c5306cd50ec..8513bd353c05 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -603,7 +603,7 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m) ret = dw_i2c_clk_cfg(master); if (ret) return ret; - /* fall through */ + fallthrough; case I3C_BUS_MODE_PURE: ret = dw_i3c_clk_cfg(master); if (ret) diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index fd3b5da44619..50c9a41467c8 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -575,14 +575,14 @@ static u8 hpt3xx_udma_filter(ide_drive_t *drive) if (!HPT370_ALLOW_ATA100_5 || check_in_drive_list(drive, bad_ata100_5)) return ATA_UDMA4; - /* fall through */ + fallthrough; case HPT372 : case HPT372A: case HPT372N: case HPT374 : if (ata_id_is_sata(drive->id)) mask &= ~0x0e; - /* fall through */ + fallthrough; default: return mask; } @@ -602,7 +602,7 @@ static u8 hpt3xx_mdma_filter(ide_drive_t *drive) case HPT374 : if (ata_id_is_sata(drive->id)) return 0x00; - /* fall through */ + fallthrough; default: return 0x07; } diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 7f17f8303988..212bb2d8bf34 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -350,7 +350,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) */ if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT) break; - /* fall-through */ + fallthrough; case DATA_PROTECT: /* * No point in retrying after an illegal request or data @@ -750,7 +750,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) case REQ_OP_DRV_IN: case REQ_OP_DRV_OUT: expiry = ide_cd_expiry; - /*FALLTHRU*/ + fallthrough; default: timeout = ATAPI_WAIT_PC; break; diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 1fe1f9d37a51..af7503b47dbe 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -428,7 +428,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive) * (maintains previous driver behaviour) */ break; - /* fall through */ + fallthrough; case CAPACITY_CURRENT: /* Normal Zip/LS-120 disks */ if (memcmp(cap_desc, &floppy->cap_desc, 8)) diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index e867129466b0..1ddc45a04418 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -143,7 +143,7 @@ static void ide_classify_atapi_dev(ide_drive_t *drive) } /* Early cdrom models used zero */ type = ide_cdrom; - /* fall through */ + fallthrough; case ide_cdrom: drive->dev_flags |= IDE_DFLAG_REMOVABLE; #ifdef CONFIG_PPC diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index a26f85ab58a9..d016cbe68cba 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -129,7 +129,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) return pre_task_out_intr(drive, cmd); } handler = task_pio_intr; - /* fall through */ + fallthrough; case ATA_PROT_NODATA: if (handler == NULL) handler = task_no_data_intr; @@ -141,7 +141,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) hwif->expiry = dma_ops->dma_timer_expiry; ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD); dma_ops->dma_start(drive); - /* fall through */ + fallthrough; default: return ide_started; } @@ -579,10 +579,10 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) goto abort; } cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; - /* fall through */ + fallthrough; case TASKFILE_OUT: cmd.protocol = ATA_PROT_PIO; - /* fall through */ + fallthrough; case TASKFILE_OUT_DMAQ: case TASKFILE_OUT_DMA: cmd.tf_flags |= IDE_TFLAG_WRITE; @@ -598,10 +598,10 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) goto abort; } cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; - /* fall through */ + fallthrough; case TASKFILE_IN: cmd.protocol = ATA_PROT_PIO; - /* fall through */ + fallthrough; case TASKFILE_IN_DMAQ: case TASKFILE_IN_DMA: nsect = taskin / SECTOR_SIZE; diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c index 024bc7ba49ee..1a700bef6c56 100644 --- a/drivers/ide/sis5513.c +++ b/drivers/ide/sis5513.c @@ -494,7 +494,7 @@ static int init_chipset_sis5513(struct pci_dev *dev) pci_read_config_byte(dev, 0x09, ®); if ((reg & 0x0f) != 0x00) pci_write_config_byte(dev, 0x09, reg&0xf0); - /* fall through */ + fallthrough; case ATA_16: /* force per drive recovery and active timings needed on ATA_33 and below chips */ diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index ba27f8673131..4e6e70250048 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1580,7 +1580,7 @@ static int mma8452_probe(struct i2c_client *client, case FXLS8471_DEVICE_ID: if (ret == data->chip_info->chip_id) break; - /* fall through */ + fallthrough; default: ret = -ENODEV; goto disable_regulators; diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c index 7fdc5d2d1d35..1bb987a4acba 100644 --- a/drivers/iio/adc/ab8500-gpadc.c +++ b/drivers/iio/adc/ab8500-gpadc.c @@ -484,7 +484,7 @@ static int ab8500_gpadc_read(struct ab8500_gpadc *gpadc, delay_max = 10000; /* large range optimises sleepmode */ break; } - /* Fall through */ + fallthrough; default: ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA; break; diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c index 84a1733e5913..64c3cc382311 100644 --- a/drivers/iio/adc/cpcap-adc.c +++ b/drivers/iio/adc/cpcap-adc.c @@ -690,7 +690,7 @@ static void cpcap_adc_phase(struct cpcap_adc_request *req) break; case CPCAP_ADC_BATTI_PI17: index = req->bank_index; - /* fallthrough */ + fallthrough; default: req->result += conv_tbl[index].cal_offset; req->result += conv_tbl[index].align_offset; diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c index 5a29e32c295f..2ea9a5c4d846 100644 --- a/drivers/iio/chemical/sps30.c +++ b/drivers/iio/chemical/sps30.c @@ -118,7 +118,7 @@ static int sps30_do_cmd(struct sps30_state *state, u16 cmd, u8 *data, int size) case SPS30_READ_AUTO_CLEANING_PERIOD: buf[0] = SPS30_AUTO_CLEANING_PERIOD >> 8; buf[1] = (u8)(SPS30_AUTO_CLEANING_PERIOD & 0xff); - /* fall through */ + fallthrough; case SPS30_READ_DATA_READY_FLAG: case SPS30_READ_DATA: case SPS30_READ_SERIAL: diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index cc4875660a69..1fd75c02a7cd 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -220,7 +220,6 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st) break; case CH_MODE_UNUSED: - /* fall-through */ default: switch (st->channel_offstate[i]) { case CH_OFFSTATE_OUT_TRISTATE: @@ -237,7 +236,6 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st) break; case CH_OFFSTATE_PULLDOWN: - /* fall-through */ default: pulldown |= BIT(i); break; diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c index b3835fb6b862..be61c3b01e8b 100644 --- a/drivers/iio/dac/dpot-dac.c +++ b/drivers/iio/dac/dpot-dac.c @@ -78,7 +78,7 @@ static int dpot_dac_read_raw(struct iio_dev *indio_dev, */ *val2 = 1; ret = IIO_VAL_FRACTIONAL; - /* fall through */ + fallthrough; case IIO_VAL_FRACTIONAL: *val *= regulator_get_voltage(dac->vref) / 1000; *val2 *= dac->max_ohms; diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c index 9b47d9472a4f..d9b2ed80882a 100644 --- a/drivers/iio/health/max30102.c +++ b/drivers/iio/health/max30102.c @@ -273,10 +273,10 @@ static int max30102_read_measurement(struct max30102_data *data, switch (measurements) { case 3: MAX30102_COPY_DATA(2); - /* fall through */ + fallthrough; case 2: MAX30102_COPY_DATA(1); - /* fall through */ + fallthrough; case 1: MAX30102_COPY_DATA(0); break; diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index c539dfa3b8d3..319b64b2fd88 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -97,11 +97,11 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, adis->tx[9] = (value >> 24) & 0xff; adis->tx[6] = ADIS_WRITE_REG(reg + 2); adis->tx[7] = (value >> 16) & 0xff; - /* fall through */ + fallthrough; case 2: adis->tx[4] = ADIS_WRITE_REG(reg + 1); adis->tx[5] = (value >> 8) & 0xff; - /* fall through */ + fallthrough; case 1: adis->tx[2] = ADIS_WRITE_REG(reg); adis->tx[3] = value & 0xff; @@ -191,7 +191,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, adis->tx[2] = ADIS_READ_REG(reg + 2); adis->tx[3] = 0; spi_message_add_tail(&xfers[1], &msg); - /* fall through */ + fallthrough; case 2: adis->tx[4] = ADIS_READ_REG(reg); adis->tx[5] = 0; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 606d5e61c575..cdcd16f19500 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -599,7 +599,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, return scnprintf(buf, len, "%d", vals[0]); case IIO_VAL_INT_PLUS_MICRO_DB: scale_db = true; - /* fall through */ + fallthrough; case IIO_VAL_INT_PLUS_MICRO: if (vals[1] < 0) return scnprintf(buf, len, "-%d.%06u%s", abs(vals[0]), @@ -918,7 +918,7 @@ static ssize_t iio_write_channel_info(struct device *dev, break; case IIO_VAL_INT_PLUS_MICRO_DB: scale_db = true; - /* fall through */ + fallthrough; case IIO_VAL_INT_PLUS_MICRO: fract_mult = 100000; break; diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c index 155faaea8c72..8f5f857c2e7d 100644 --- a/drivers/iio/light/si1145.c +++ b/drivers/iio/light/si1145.c @@ -1042,7 +1042,7 @@ static int si1145_initialize(struct si1145_data *data) SI1145_LED_CURRENT_45mA); if (ret < 0) return ret; - /* fallthrough */ + fallthrough; case 2: ret = i2c_smbus_write_byte_data(client, SI1145_REG_PS_LED21, diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 6a8ae145f0c0..cbb44e401c0a 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -499,7 +499,7 @@ static int ak8974_detect(struct ak8974 *ak8974) switch (whoami) { case AK8974_WHOAMI_VALUE_AMI306: name = "ami306"; - /* fall-through */ + fallthrough; case AK8974_WHOAMI_VALUE_AMI305: ret = regmap_read(ak8974->map, AMI305_VER, &fw); if (ret) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index dc0558b23158..fbc28f1a8b92 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3034,7 +3034,7 @@ static int cm_rej_handler(struct cm_work *work) case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); - /* fall through */ + fallthrough; case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN) @@ -3044,7 +3044,7 @@ static int cm_rej_handler(struct cm_work *work) break; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); - /* fall through */ + fallthrough; case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_enter_timewait(cm_id_priv); @@ -3058,7 +3058,7 @@ static int cm_rej_handler(struct cm_work *work) cm_enter_timewait(cm_id_priv); break; } - /* fall through */ + fallthrough; default: pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", __func__, be32_to_cpu(cm_id_priv->id.local_id), @@ -3116,7 +3116,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, msg_response = CM_MSG_RESPONSE_OTHER; break; } - /* fall through */ + fallthrough; default: pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", __func__, be32_to_cpu(cm_id_priv->id.local_id), @@ -3227,7 +3227,7 @@ static int cm_mra_handler(struct cm_work *work) case IB_CM_MRA_REP_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); - /* fall through */ + fallthrough; default: pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n", __func__, be32_to_cpu(cm_id_priv->id.local_id), @@ -4214,7 +4214,7 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, qp_attr->retry_cnt = cm_id_priv->retry_count; qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; - /* fall through */ + fallthrough; case IB_QPT_XRC_TGT: *qp_attr_mask |= IB_QP_TIMEOUT; qp_attr->timeout = cm_id_priv->av.timeout; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 26de0dab60bb..7f0e91e92968 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1985,7 +1985,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, event.event = RDMA_CM_EVENT_ESTABLISHED; break; case IB_CM_DREQ_ERROR: - event.status = -ETIMEDOUT; /* fall through */ + event.status = -ETIMEDOUT; + fallthrough; case IB_CM_DREQ_RECEIVED: case IB_CM_DREP_RECEIVED: if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 614cff89fc71..13f43ab7220b 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -510,7 +510,6 @@ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, switch (ctx->type) { case RDMA_RW_SIG_MR: case RDMA_RW_MR: - /* fallthrough */ for (i = 0; i < ctx->nr_ops; i++) { rdma_rw_update_lkey(&ctx->reg[i], ctx->reg[i].wr.wr.opcode != diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index d03dacaef788..1d184ea05eba 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -794,7 +794,7 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); - /* fall through */ + fallthrough; case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); @@ -820,7 +820,7 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); - /* fall through */ + fallthrough; case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index ef04a261097f..e47c5949013f 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -259,7 +259,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle, return -EOPNOTSUPP; e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id; - /* fall through */ + fallthrough; case UVERBS_ATTR_TYPE_PTR_IN: /* Ensure that any data provided by userspace beyond the known * struct is zero. Userspace that knows how to use some future @@ -271,7 +271,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle, !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len)) return -EOPNOTSUPP; - /* fall through */ + fallthrough; case UVERBS_ATTR_TYPE_PTR_OUT: if (uattr->len < val_spec->u.ptr.min_len || (!val_spec->zero_trailing && diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 3f18efc0c297..5ee272d27aaa 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2657,7 +2657,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, default: break; } - /* fall through */ + fallthrough; case IB_WR_SEND_WITH_INV: rc = bnxt_re_build_send_wqe(qp, wr, &wqe); break; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 117b42349a28..d60e3dcea087 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1779,7 +1779,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, break; } - /* fall thru */ + fallthrough; case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: { diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 77bc02a9228e..1f288c73ccfc 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2885,7 +2885,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) case MORIBUND: case CLOSING: stop_ep_timer(ep); - /*FALLTHROUGH*/ + fallthrough; case FPDU_MODE: if (ep->com.qp && ep->com.qp->srq) { srqidx = ABORT_RSS_SRQIDX_G( @@ -3759,7 +3759,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, send_fw_act_open_req(ep, atid); return; } - /* fall through */ + fallthrough; case FW_EADDRINUSE: set_bit(ACT_RETRY_INUSE, &ep->com.history); if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index ac48012c992f..cbddb20c6121 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1165,7 +1165,7 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; } fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE; - /*FALLTHROUGH*/ + fallthrough; case IB_WR_RDMA_WRITE: fw_opcode = FW_RI_RDMA_WRITE_WR; swsqe->opcode = FW_RI_RDMA_WRITE; diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c index b12e4665c9ab..4a4ec2397857 100644 --- a/drivers/infiniband/hw/hfi1/pio_copy.c +++ b/drivers/infiniband/hw/hfi1/pio_copy.c @@ -209,7 +209,6 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n) fallthrough; case 1: *dest++ = *src++; - /* fall through */ } } diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index fa7a5ff498c7..a3b95805c154 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -2443,7 +2443,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node, case I40IW_CM_STATE_FIN_WAIT1: case I40IW_CM_STATE_LAST_ACK: cm_node->cm_id->rem_ref(cm_node->cm_id); - /* fall through */ + fallthrough; case I40IW_CM_STATE_TIME_WAIT: cm_node->state = I40IW_CM_STATE_CLOSED; i40iw_rem_ref_cm_node(cm_node); diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 688f19667221..86d3f8aff329 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -1964,7 +1964,6 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq, info->out_rdrsp = true; break; case I40IW_AE_SOURCE_RSVD: - /* fallthrough */ default: break; } @@ -3762,14 +3761,14 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); set_64bit_val(wqe, 56, info->entry[2].data); - /* fallthrough */ + fallthrough; case 2: set_64bit_val(wqe, 32, (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) | LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); set_64bit_val(wqe, 40, info->entry[1].data); - /* fallthrough */ + fallthrough; case 1: set_64bit_val(wqe, 0, LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD)); diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index ae8b97c30665..e1085634b8d9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c @@ -353,7 +353,6 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) i40iw_cm_disconn(iwqp); break; case I40IW_AE_BAD_CLOSE: - /* fall through */ case I40IW_AE_RESET_SENT: i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0); i40iw_cm_disconn(iwqp); @@ -413,7 +412,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG: case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT: ctx_info->err_rq_idx_valid = false; - /* fall through */ + fallthrough; default: if (!info->sq && ctx_info->err_rq_idx_valid) { ctx_info->err_rq_idx = info->wqe_idx; diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 9c96ece5e7f3..58a433135a03 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1489,36 +1489,35 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) iwdev->iw_status = 0; i40iw_port_ibevent(iwdev); i40iw_destroy_rdma_device(iwdev->iwibdev); - /* fallthrough */ + fallthrough; case IP_ADDR_REGISTERED: if (!iwdev->reset) i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); - /* fallthrough */ - /* fallthrough */ + fallthrough; case PBLE_CHUNK_MEM: i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); - /* fallthrough */ + fallthrough; case CEQ_CREATED: i40iw_dele_ceqs(iwdev); - /* fallthrough */ + fallthrough; case AEQ_CREATED: i40iw_destroy_aeq(iwdev); - /* fallthrough */ + fallthrough; case IEQ_CREATED: i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset); - /* fallthrough */ + fallthrough; case ILQ_CREATED: i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset); - /* fallthrough */ + fallthrough; case CCQ_CREATED: i40iw_destroy_ccq(iwdev); - /* fallthrough */ + fallthrough; case HMC_OBJS_CREATED: i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset); - /* fallthrough */ + fallthrough; case CQP_CREATED: i40iw_destroy_cqp(iwdev, true); - /* fallthrough */ + fallthrough; case INITIAL_STATE: i40iw_cleanup_cm_core(&iwdev->cm_core); if (iwdev->vsi.pestat) { @@ -1528,7 +1527,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) i40iw_del_init_mem(iwdev); break; case INVALID_STATE: - /* fallthrough */ default: i40iw_pr_err("bad init_state = %d\n", iwdev->init_state); break; diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index d9c7ae6a7030..924be4b03c9a 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -814,13 +814,13 @@ void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi, switch (rsrc->completion) { case PUDA_HASH_CRC_COMPLETE: i40iw_free_hash_desc(rsrc->hash_desc); - /* fall through */ + fallthrough; case PUDA_QP_CREATED: if (!reset) i40iw_puda_free_qp(rsrc); i40iw_free_dma_mem(dev->hw, &rsrc->qpmem); - /* fallthrough */ + fallthrough; case PUDA_CQ_CREATED: if (!reset) i40iw_puda_free_cq(rsrc); diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 016524683e17..e07fb37af086 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -190,9 +190,8 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: action = I40IW_ARP_DELETE; - /* Fall through */ + fallthrough; case NETDEV_UP: - /* Fall through */ case NETDEV_CHANGEADDR: /* Just skip if no need to handle ARP cache */ @@ -247,9 +246,8 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: action = I40IW_ARP_DELETE; - /* Fall through */ + fallthrough; case NETDEV_UP: - /* Fall through */ case NETDEV_CHANGEADDR: i40iw_manage_arp_cache(iwdev, netdev->dev_addr, @@ -344,7 +342,7 @@ int i40iw_netdevice_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: iwdev->iw_status = 0; - /* Fall through */ + fallthrough; case NETDEV_UP: i40iw_port_ibevent(iwdev); break; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 6957e4f3404b..b51339328a51 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -810,7 +810,7 @@ void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, case I40IW_QP_STATE_RTS: if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE) i40iw_send_reset(iwqp->cm_node); - /* fall through */ + fallthrough; case I40IW_QP_STATE_IDLE: case I40IW_QP_STATE_TERMINATE: case I40IW_QP_STATE_CLOSING: @@ -2144,7 +2144,6 @@ static int i40iw_post_send(struct ib_qp *ibqp, switch (ib_wr->opcode) { case IB_WR_SEND: - /* fall-through */ case IB_WR_SEND_WITH_INV: if (ib_wr->opcode == IB_WR_SEND) { if (ib_wr->send_flags & IB_SEND_SOLICITED) @@ -2201,7 +2200,7 @@ static int i40iw_post_send(struct ib_qp *ibqp, break; case IB_WR_RDMA_READ_WITH_INV: inv_stag = true; - /* fall-through*/ + fallthrough; case IB_WR_RDMA_READ: if (ib_wr->num_sge > I40IW_MAX_SGE_RD) { err = -EINVAL; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index f8b936b76dcd..8a3436994f80 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -765,13 +765,13 @@ repoll: switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { case MLX4_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX4_OPCODE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case MLX4_OPCODE_SEND_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX4_OPCODE_SEND: case MLX4_OPCODE_SEND_INVAL: wc->opcode = IB_WC_SEND; diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index d844831179cf..5e4ec9786081 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -944,7 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, switch (sa_mad->mad_hdr.method) { case IB_MGMT_METHOD_SET: may_create = 1; - /* fall through */ + fallthrough; case IB_SA_METHOD_DELETE: req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f9ca6e000a81..2975f350b9fd 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1578,12 +1578,12 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, pd = to_mxrcd(init_attr->xrcd)->pd; xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; - /* fall through */ + fallthrough; case IB_QPT_XRC_INI: if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) return ERR_PTR(-ENOSYS); init_attr->recv_cq = init_attr->send_cq; - /* fall through */ + fallthrough; case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: @@ -1592,7 +1592,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, return ERR_PTR(-ENOMEM); qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; - /* fall through */ + fallthrough; case IB_QPT_UD: { err = create_qp_common(pd, init_attr, udata, 0, &qp); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 0133ebb8d740..dceb0eb2bed1 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -121,13 +121,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { case MLX5_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX5_OPCODE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case MLX5_OPCODE_SEND_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX5_OPCODE_SEND: case MLX5_OPCODE_SEND_INVAL: wc->opcode = IB_WC_SEND; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 454ce5de2de7..9bb9bb058932 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -250,9 +250,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, if (MLX5_CAP_GEN(dev->mdev, vport_counters) && method == IB_MGMT_METHOD_GET) return process_pma_cmd(dev, port_num, in, out); - /* fallthrough */ + fallthrough; case MLX5_IB_VENDOR_CLASS1: - /* fallthrough */ case MLX5_IB_VENDOR_CLASS2: case IB_MGMT_CLASS_CONG_MGMT: { if (method != IB_MGMT_METHOD_GET && diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index fbc45a5e76c5..d60d63221b14 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2872,7 +2872,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) break; case MLX5_EVENT_TYPE_GENERAL_EVENT: handle_general_event(ibdev, work->param, &ibev); - /* fall through */ + fallthrough; default: goto out; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 59fce5fac7a3..5758dbe64045 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -416,7 +416,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr) switch (attr->qp_type) { case IB_QPT_XRC_INI: size += sizeof(struct mlx5_wqe_xrc_seg); - /* fall through */ + fallthrough; case IB_QPT_RC: size += sizeof(struct mlx5_wqe_ctrl_seg) + max(sizeof(struct mlx5_wqe_atomic_seg) + @@ -441,7 +441,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr) if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) size += sizeof(struct mlx5_wqe_eth_pad) + sizeof(struct mlx5_wqe_eth_seg); - /* fall through */ + fallthrough; case IB_QPT_SMI: case MLX5_IB_QPT_HW_GSI: size += sizeof(struct mlx5_wqe_ctrl_seg) + diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index 0823c0bc7e73..f051f4e06b53 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -115,7 +115,7 @@ static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate) switch ((cur_rate - 1) / req_rate) { case 0: return MTHCA_RATE_MEMFREE_FULL; case 1: return MTHCA_RATE_MEMFREE_HALF; - case 2: /* fall through */ + case 2: case 3: return MTHCA_RATE_MEMFREE_QUARTER; default: return MTHCA_RATE_MEMFREE_EIGHTH; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 6cdbec13756a..c1751c9a0f62 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -2134,7 +2134,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, case IB_WR_SEND_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); - /* fall through */ + fallthrough; case IB_WR_SEND: hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); ocrdma_build_send(qp, hdr, wr); @@ -2148,7 +2148,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, case IB_WR_RDMA_WRITE_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); - /* fall through */ + fallthrough; case IB_WR_RDMA_WRITE: hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); status = ocrdma_build_write(qp, hdr, wr); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 4ce4e2eef6cc..b49bef94637e 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3528,7 +3528,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; case IB_WR_RDMA_READ_WITH_INV: SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1); - /* fallthrough -- same is identical to RDMA READ */ + fallthrough; /* same is identical to RDMA READ */ case IB_WR_RDMA_READ: wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index ca5ea734e3d0..44150be215bf 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2973,11 +2973,11 @@ static u32 qib_6120_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_6120_L_STATE_ACTIVE: - /* fall through */ case IB_6120_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_6120_L_STATE_DOWN: state = IB_PORT_DOWN; break; diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index ea3ddb05cbad..0a6f26d4cb31 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -3586,11 +3586,11 @@ static u32 qib_7220_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_7220_L_STATE_ACTIVE: - /* fall through */ case IB_7220_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_7220_L_STATE_DOWN: state = IB_PORT_DOWN; break; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 8bcbc884e5b6..a10eab89aee4 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5508,11 +5508,11 @@ static u32 qib_7322_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_7322_L_STATE_ACTIVE: - /* fall through */ case IB_7322_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_7322_L_STATE_DOWN: state = IB_PORT_DOWN; break; @@ -6533,7 +6533,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) "Invalid num_vls %u, using 4 VLs\n", qib_num_cfg_vls); qib_num_cfg_vls = 4; - /* fall through */ + fallthrough; case 4: ppd->vls_supported = IB_VL_VL0_3; break; diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 79bb83222e8d..e7789e724f56 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -433,7 +433,7 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) /* Bad mkey not a violation below level 2 */ if (ibp->rvp.mkeyprot < 2) break; - /* fall through */ + fallthrough; case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_TRAP_REPRESS: if (ibp->rvp.mkey_violations != 0xFFFF) @@ -828,7 +828,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, case IB_PORT_NOP: if (lstate == 0) break; - /* FALLTHROUGH */ + fallthrough; case IB_PORT_DOWN: if (lstate == 0) lstate = QIB_IB_LINKDOWN_ONLY; @@ -1928,7 +1928,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = IB_MAD_RESULT_SUCCESS; goto bail; } - /* FALLTHROUGH */ + fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); @@ -1962,7 +1962,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = IB_MAD_RESULT_SUCCESS; goto bail; } - /* FALLTHROUGH */ + fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); @@ -2322,7 +2322,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags, ret = cc_get_congestion_control_table(ccp, ibdev, port); goto bail; - /* FALLTHROUGH */ + fallthrough; default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); @@ -2339,7 +2339,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags, ret = cc_set_congestion_control_table(ccp, ibdev, port); goto bail; - /* FALLTHROUGH */ + fallthrough; default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index aaf7438258fa..3915e5b4a9bc 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -83,7 +83,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } - /* FALLTHROUGH */ + fallthrough; case OP(ATOMIC_ACKNOWLEDGE): /* * We can increment the tail pointer now that the last @@ -92,7 +92,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, */ if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) qp->s_tail_ack_queue = 0; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_ONLY): case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ @@ -149,7 +149,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, case OP(RDMA_READ_RESPONSE_FIRST): qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_READ_RESPONSE_MIDDLE): qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; @@ -471,10 +471,10 @@ no_flow_control: * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; ss = &qp->s_sge; @@ -510,10 +510,10 @@ no_flow_control: * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; ss = &qp->s_sge; @@ -1807,7 +1807,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, if (!ret) goto rnr_nak; qp->r_rcv_len = 0; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): case OP(RDMA_WRITE_MIDDLE): send_middle: @@ -1839,7 +1839,7 @@ send_middle: qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) goto no_immediate_data; - /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */ + fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */ case OP(SEND_LAST_WITH_IMMEDIATE): send_last_imm: wc.ex.imm_data = ohdr->u.imm_data; diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index 99e11c347130..8f8d61736656 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -763,7 +763,7 @@ void __qib_sdma_process_event(struct qib_pportdata *ppd, * bringing the link up with traffic active on * 7220, e.g. */ ss->go_s99_running = 1; - /* fall through -- and start dma engine */ + fallthrough; /* and start dma engine */ case qib_sdma_event_e10_go_hw_start: /* This reference means the state machine is started */ sdma_get(&ppd->sdma_state); diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index e17b91e2c22a..554af4273a13 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -161,7 +161,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): len = qp->s_len; if (len > pmtu) { @@ -185,7 +185,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): len = qp->s_len; if (len > pmtu) { @@ -351,7 +351,7 @@ send_first: goto no_immediate_data; else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) goto send_last_imm; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) @@ -440,7 +440,7 @@ rdma_first: wc.ex.imm_data = ohdr->u.rc.imm_data; goto rdma_last_imm; } - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 7acf9ba5358a..f6c01bad5a74 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -237,7 +237,7 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, case IB_QPT_GSI: if (ib_qib_disable_sma) break; - /* FALLTHROUGH */ + fallthrough; case IB_QPT_UD: qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); break; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index afcc2abcf55c..9a8f2a9507be 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -238,7 +238,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ret = -EINVAL; goto err_qp; } - /* fall through */ + fallthrough; case IB_QPT_RC: case IB_QPT_UD: qp = kzalloc(sizeof(*qp), GFP_KERNEL); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 332a8ba94b81..ee48befc8978 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1111,7 +1111,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, if (init_attr->port_num == 0 || init_attr->port_num > ibpd->device->phys_port_cnt) return ERR_PTR(-EINVAL); - /* fall through */ + fallthrough; case IB_QPT_UC: case IB_QPT_RC: case IB_QPT_UD: diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 4bc88708b355..7b4df0028388 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -282,7 +282,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, if ((syn & AETH_TYPE_MASK) != AETH_ACK) return COMPST_ERROR; - /* fall through */ + fallthrough; /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH) */ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 08f05ac5f5d5..ecdac3f8fcc9 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -71,7 +71,7 @@ void rxe_do_task(unsigned long data) case TASK_STATE_BUSY: task->state = TASK_STATE_ARMED; - /* fall through */ + fallthrough; case TASK_STATE_ARMED: spin_unlock_irqrestore(&task->state_lock, flags); return; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index bb61e534e468..658939e5c34a 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -540,7 +540,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, switch (wr->opcode) { case IB_WR_RDMA_WRITE_WITH_IMM: wr->ex.imm_data = ibwr->ex.imm_data; - /* fall through */ + fallthrough; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 1662216be66d..66764f7ef072 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -1224,12 +1224,10 @@ static void siw_cm_llp_data_ready(struct sock *sk) switch (cep->state) { case SIW_EPSTATE_RDMA_MODE: - /* fall through */ case SIW_EPSTATE_LISTENING: break; case SIW_EPSTATE_AWAIT_MPAREQ: - /* fall through */ case SIW_EPSTATE_AWAIT_MPAREP: siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR); break; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 857be5a7d0bd..4bd1f1f84057 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -1215,7 +1215,7 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error) case RDMAP_SEND_SE: case RDMAP_SEND_SE_INVAL: wqe->rqe.flags |= SIW_WQE_SOLICITED; - /* Fall through */ + fallthrough; case RDMAP_SEND: case RDMAP_SEND_INVAL: @@ -1386,7 +1386,7 @@ int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb, * DDP segment. */ qp->rx_fpdu->first_ddp_seg = 0; - /* Fall through */ + fallthrough; case SIW_GET_DATA_START: /* diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 9f53aa4feb87..d19d8325588b 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -1042,7 +1042,7 @@ next_wqe: case SIW_OP_SEND_REMOTE_INV: case SIW_OP_WRITE: siw_wqe_put_mem(wqe, tx_type); - /* Fall through */ + fallthrough; case SIW_OP_INVAL_STAG: case SIW_OP_REG_MR: @@ -1128,7 +1128,7 @@ next_wqe: case SIW_OP_READ: case SIW_OP_READ_LOCAL_INV: siw_wqe_put_mem(wqe, tx_type); - /* Fall through */ + fallthrough; case SIW_OP_INVAL_STAG: case SIW_OP_REG_MR: diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 9bf0fa30df28..7c41fb040f7c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -512,13 +512,13 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, return ipoib_cm_req_handler(cm_id, event); case IB_CM_DREQ_RECEIVED: ib_send_cm_drep(cm_id, NULL, 0); - /* Fall through */ + fallthrough; case IB_CM_REJ_RECEIVED: p = cm_id->context; priv = ipoib_priv(p->dev); if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) ipoib_warn(priv, "unable to move qp to error state\n"); - /* Fall through */ + fallthrough; default: return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 752581a8627b..ab75b7f745d4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -502,7 +502,7 @@ static struct net_device *ipoib_get_net_dev_by_params( default: dev_warn_ratelimited(&dev->dev, "duplicate IP address detected\n"); - /* Fall through */ + fallthrough; case 1: return net_dev; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 699e075ae1b3..2f3ebc0a75d9 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -711,7 +711,7 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve case RDMA_CM_EVENT_REJECTED: iser_info("Connection rejected: %s\n", rdma_reject_msg(cma_id, event->status)); - /* FALLTHROUGH */ + fallthrough; case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 61e2f7fc513d..e86acda3cf8c 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -664,8 +664,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_ESTABLISHED: isert_connected_handler(cma_id); break; - case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ - case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ + case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ ret = isert_disconnected_handler(cma_id, event->event); break; @@ -684,7 +684,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_REJECTED: isert_info("Connection rejected: %s\n", rdma_reject_msg(cma_id, event->status)); - /* fall through */ + fallthrough; case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_CONNECT_ERROR: ret = isert_connect_error(cma_id); @@ -1470,7 +1470,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) transport_generic_free_cmd(&cmd->se_cmd, 0); break; } - /* fall through */ + fallthrough; default: iscsit_release_cmd(cmd); break; @@ -1648,7 +1648,7 @@ isert_do_control_comp(struct work_struct *work) switch (cmd->i_state) { case ISTATE_SEND_TASKMGTRSP: iscsit_tmr_post_handler(cmd, cmd->conn); - /* fall through */ + fallthrough; case ISTATE_SEND_REJECT: case ISTATE_SEND_TEXTRSP: cmd->i_state = ISTATE_SENT_STATUS; diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c index 874a8eb7638c..4933085a864a 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -547,7 +547,6 @@ static void vema_get(struct opa_vnic_vema_port *port, vema_get_mac_entries(port, recvd_mad, rsp_mad); break; case OPA_EM_ATTR_IFACE_UCAST_MACS: - /* fall through */ case OPA_EM_ATTR_IFACE_MCAST_MACS: vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id); break; diff --git a/drivers/input/joystick/fsia6b.c b/drivers/input/joystick/fsia6b.c index e78c4c768990..76ffdec5c183 100644 --- a/drivers/input/joystick/fsia6b.c +++ b/drivers/input/joystick/fsia6b.c @@ -102,12 +102,12 @@ static irqreturn_t fsia6b_serio_irq(struct serio *serio, input_report_key(fsia6b->dev, sw_id++, sw_state == 0); - /* fall-through */ + fallthrough; case '2': input_report_key(fsia6b->dev, sw_id++, sw_state == 1); - /* fall-through */ + fallthrough; case '1': input_report_key(fsia6b->dev, sw_id++, diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c index 88df68cc4ac6..d37645e496ff 100644 --- a/drivers/input/joystick/gamecon.c +++ b/drivers/input/joystick/gamecon.c @@ -885,7 +885,6 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type) case GC_MULTI: input_set_capability(input_dev, EV_KEY, BTN_TRIGGER); - /* fall through */ break; case GC_PSX: diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c index 959c1d82aa66..1cedb45ba497 100644 --- a/drivers/input/tablet/wacom_serial4.c +++ b/drivers/input/tablet/wacom_serial4.c @@ -213,7 +213,7 @@ static void wacom_handle_model_response(struct wacom *wacom) case 0x3731: /* PL-710 */ wacom->res_x = 2540; wacom->res_y = 2540; - /* fall through */ + fallthrough; case 0x3535: /* PL-550 */ case 0x3830: /* PL-800 */ wacom->extra_z_bits = 2; diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 6b71b0aff115..98f17fa3a892 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -477,7 +477,7 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry) bootloader = appmode - 0x24; break; } - /* Fall through - for normal case */ + fallthrough; /* for normal case */ case 0x4c: case 0x4d: case 0x5a: diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c index 607d1aeb595d..bb1699e0d3c7 100644 --- a/drivers/input/touchscreen/wm831x-ts.c +++ b/drivers/input/touchscreen/wm831x-ts.c @@ -290,7 +290,7 @@ static int wm831x_ts_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported ISEL setting: %d\n", pdata->isel); - /* Fall through */ + fallthrough; case 200: case 0: wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2, diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 958050c213f9..c652f16eb702 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -2258,7 +2258,7 @@ static void iommu_enable_ga(struct amd_iommu *iommu) switch (amd_iommu_guest_ir) { case AMD_IOMMU_GUEST_IR_VAPIC: iommu_feature_enable(iommu, CONTROL_GAM_EN); - /* Fall through */ + fallthrough; case AMD_IOMMU_GUEST_IR_LEGACY_GA: iommu_feature_enable(iommu, CONTROL_GA_EN); iommu->irte_ops = &irte_128_ops; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 7196207be7ea..c192544e874b 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -903,7 +903,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) break; case CMDQ_OP_CFGI_CD: cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid); - /* Fallthrough */ + fallthrough; case CMDQ_OP_CFGI_STE: cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); @@ -936,7 +936,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) break; case CMDQ_OP_TLBI_NH_ASID: cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); - /* Fallthrough */ + fallthrough; case CMDQ_OP_TLBI_S12_VMALL: cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); break; @@ -1036,7 +1036,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) */ return; case CMDQ_ERR_CERROR_ILL_IDX: - /* Fallthrough */ default: break; } @@ -3758,7 +3757,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) switch (FIELD_GET(IDR0_STALL_MODEL, reg)) { case IDR0_STALL_MODEL_FORCE: smmu->features |= ARM_SMMU_FEAT_STALL_FORCE; - /* Fallthrough */ + fallthrough; case IDR0_STALL_MODEL_STALL: smmu->features |= ARM_SMMU_FEAT_STALLS; } @@ -3778,7 +3777,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) switch (FIELD_GET(IDR0_TTF, reg)) { case IDR0_TTF_AARCH32_64: smmu->ias = 40; - /* Fallthrough */ + fallthrough; case IDR0_TTF_AARCH64: break; default: @@ -3875,7 +3874,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) default: dev_info(smmu->dev, "unknown output address size. Truncating to 48-bit\n"); - /* Fallthrough */ + fallthrough; case IDR5_OAS_48_BIT: smmu->oas = 48; } diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index e9864e52b0e9..f8177c59d229 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5070,7 +5070,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) switch (type) { case IOMMU_DOMAIN_DMA: - /* fallthrough */ case IOMMU_DOMAIN_UNMANAGED: dmar_domain = alloc_domain(0); if (!dmar_domain) { diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index b4da396cce60..2bfdd5734844 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -440,7 +440,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, default: dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", mem->subtype); - /* Fall-through */ + fallthrough; case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: region = iommu_alloc_resv_region(start, size, 0, IOMMU_RESV_RESERVED); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 95f097448f97..548de7538632 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2737,7 +2737,7 @@ static bool allocate_vpe_l2_table(int cpu, u32 id) switch (gpsz) { default: WARN_ON(1); - /* fall through */ + fallthrough; case GIC_PAGE_SIZE_4K: psz = SZ_4K; break; @@ -2832,7 +2832,7 @@ static int allocate_vpe_l1_table(void) switch (gpsz) { default: gpsz = GIC_PAGE_SIZE_4K; - /* fall through */ + fallthrough; case GIC_PAGE_SIZE_4K: psz = SZ_4K; break; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 324f280ff606..850842f27bee 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -965,10 +965,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP0R3_EL1); write_gicreg(0, ICC_AP0R2_EL1); - /* Fall through */ + fallthrough; case 6: write_gicreg(0, ICC_AP0R1_EL1); - /* Fall through */ + fallthrough; case 5: case 4: write_gicreg(0, ICC_AP0R0_EL1); @@ -982,10 +982,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP1R3_EL1); write_gicreg(0, ICC_AP1R2_EL1); - /* Fall through */ + fallthrough; case 6: write_gicreg(0, ICC_AP1R1_EL1); - /* Fall through */ + fallthrough; case 5: case 4: write_gicreg(0, ICC_AP1R0_EL1); diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 4f74c15c4755..7031ef44de4f 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -259,7 +259,7 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, case 4: writel_relaxed(~0, reg + GPC_IMR1_CORE2); writel_relaxed(~0, reg + GPC_IMR1_CORE3); - /* fall through */ + fallthrough; case 2: writel_relaxed(~0, reg + GPC_IMR1_CORE0); writel_relaxed(~0, reg + GPC_IMR1_CORE1); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index aacfa012c082..215885962bb0 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -480,7 +480,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, case GIC_LOCAL_INT_TIMER: /* CONFIG_MIPS_CMP workaround (see __gic_init) */ map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; - /* fall-through */ + fallthrough; case GIC_LOCAL_INT_PERFCTR: case GIC_LOCAL_INT_FDC: /* diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index bc235db8a4c5..e46036374227 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -455,7 +455,7 @@ static void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, return; default: printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n"); - /* fall through */ + fallthrough; case AMBA_VENDOR_ARM: break; } diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index ecc1ef6c386d..f68569bfef7a 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -348,7 +348,7 @@ modehdlc(struct bchannel *bch, int protocol) switch (protocol) { case -1: /* used for init */ bch->state = -1; - /* fall through */ + fallthrough; case ISDN_P_NONE: if (bch->state == ISDN_P_NONE) break; diff --git a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h index b0d772340e16..448ded8f9d24 100644 --- a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h +++ b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h @@ -121,7 +121,6 @@ setup_embedded(struct hfc_multi *hc, struct hm_map *m) case HFC_IO_MODE_EMBSD: test_and_set_bit(HFC_CHIP_EMBSD, &hc->chip); hc->slots = 128; /* required */ - /* fall through */ hc->HFC_outb = HFC_outb_embsd; hc->HFC_inb = HFC_inb_embsd; hc->HFC_inw = HFC_inw_embsd; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 904a4f4c5ff9..56bd2e9db6ed 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1280,7 +1280,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol) case (-1): /* used for init */ bch->state = -1; bch->nr = bc; - /* fall through */ + fallthrough; case (ISDN_P_NONE): if (bch->state == ISDN_P_NONE) return 0; diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 4274906f8654..70061991915a 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -695,7 +695,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol) switch (protocol) { case (-1): /* used for init */ bch->state = -1; - /* fall through */ + fallthrough; case (ISDN_P_NONE): if (bch->state == ISDN_P_NONE) return 0; /* already in idle state */ diff --git a/drivers/isdn/hardware/mISDN/isdnhdlc.c b/drivers/isdn/hardware/mISDN/isdnhdlc.c index 9fea16ed3dd8..985367e6711d 100644 --- a/drivers/isdn/hardware/mISDN/isdnhdlc.c +++ b/drivers/isdn/hardware/mISDN/isdnhdlc.c @@ -397,7 +397,7 @@ int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, u16 slen, dsize--; break; } - /* fall through */ + fallthrough; case HDLC_SENDFLAG_ONE: if (hdlc->bit_shift == 8) { hdlc->cbin = hdlc->ffvalue >> diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c index f4cb29766888..a16c7a2a7f3d 100644 --- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c +++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c @@ -875,7 +875,7 @@ release_card(struct inf_hw *card) { release_card(card->sc[i]); card->sc[i] = NULL; } - /* fall through */ + fallthrough; default: pci_disable_device(card->pdev); pci_set_drvdata(card->pdev, NULL); diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c index 11e8c7d8b6e8..56943409b60d 100644 --- a/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -957,7 +957,7 @@ isar_pump_statev_fax(struct isar_ch *ch, u8 devt) { break; case PCTRL_CMD_FTM: p1 = 2; - /* fall through */ + fallthrough; case PCTRL_CMD_FTH: send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_SILON, 1, &p1); @@ -1163,7 +1163,7 @@ setup_pump(struct isar_ch *ch) { send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, PMOD_DTMF, 1, param); } - /* fall through */ + fallthrough; case ISDN_P_B_MODEM_ASYNC: ctrl = PMOD_DATAMODEM; if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) { @@ -1255,7 +1255,7 @@ setup_iom2(struct isar_ch *ch) { case ISDN_P_B_MODEM_ASYNC: case ISDN_P_B_T30_FAX: cmsb |= IOM_CTRL_RCV; - /* fall through */ + fallthrough; case ISDN_P_B_L2DTMF: if (test_bit(FLG_DTMFSEND, &ch->bch.Flags)) cmsb |= IOM_CTRL_RCV; @@ -1548,7 +1548,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb) ich->is->name, hh->id); ret = -EINVAL; } - /* fall through */ + fallthrough; default: pr_info("%s: %s unknown prim(%x,%x)\n", ich->is->name, __func__, hh->prim, hh->id); diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 27aa32914425..c2f76f398613 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -528,7 +528,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch, rq.protocol = ISDN_P_NT_S0; if (dev->Dprotocols & (1 << ISDN_P_NT_E1)) rq.protocol = ISDN_P_NT_E1; - /* fall through */ + fallthrough; case ISDN_P_LAPD_TE: ch->recv = mISDN_queue_message; ch->peer = &dev->D.st->own; diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index b413bafe93fd..97c68731406b 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -301,7 +301,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type) switch (type) { case PBLK_WRITE: kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap); - /* fall through */ + fallthrough; case PBLK_WRITE_INT: pool = &pblk->w_rq_pool; break; diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c index 75482eeab2c4..994ba5cb3678 100644 --- a/drivers/macintosh/adbhid.c +++ b/drivers/macintosh/adbhid.c @@ -881,7 +881,7 @@ adbhid_input_register(int id, int default_id, int original_handler_id, } if (hid->name[0]) break; - /* else fall through */ + fallthrough; default: pr_info("Trying to register unknown ADB device to input layer.\n"); diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 23f1f41c8602..96684581a25d 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -852,7 +852,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd) break; case SMU_I2C_TRANSFER_COMBINED: cmd->info.devaddr &= 0xfe; - /* fall through */ + fallthrough; case SMU_I2C_TRANSFER_STDSUB: if (cmd->info.sublen > 3) return -EINVAL; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 77fbfd52edcf..c1227bdb57e7 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -608,7 +608,7 @@ static void do_journal_discard(struct cache *ca) ca->sb.njournal_buckets; atomic_set(&ja->discard_in_flight, DISCARD_READY); - /* fallthrough */ + fallthrough; case DISCARD_READY: if (ja->discard_idx == ja->last_idx) diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 62fb917f7a4f..ae380bc3992e 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -33,27 +33,27 @@ int bch_ ## name ## _h(const char *cp, type *res) \ case 'y': \ case 'z': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'e': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'p': \ u++; \ - /* fall through */ \ + fallthrough; \ case 't': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'g': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'm': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'k': \ u++; \ if (e++ == cp) \ return -EINVAL; \ - /* fall through */ \ + fallthrough; \ case '\n': \ case '\0': \ if (*e == '\n') \ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 148960721254..238cd80826a6 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1552,7 +1552,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc, case -EBUSY: wait_for_completion(&ctx->restart); reinit_completion(&ctx->restart); - /* fall through */ + fallthrough; /* * The request is queued and processed asynchronously, * completion function kcryptd_async_done() will be called. diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 53645a6f474c..e3283d35c7fd 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1554,7 +1554,7 @@ static void pg_init_done(void *data, int errors) case SCSI_DH_RETRY: /* Wait before retrying. */ delay_retry = true; - /* fall through */ + fallthrough; case SCSI_DH_IMM_RETRY: case SCSI_DH_RES_TEMP_UNAVAIL: if (pg_init_limit_reached(m, pgpath)) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 32fa6499739f..fb0255d25e4b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1021,7 +1021,7 @@ static void clone_endio(struct bio *bio) switch (r) { case DM_ENDIO_REQUEUE: error = BLK_STS_DM_REQUEUE; - /*FALLTHRU*/ + fallthrough; case DM_ENDIO_DONE: break; case DM_ENDIO_INCOMPLETE: diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c index 6bbec89976a7..2cf973722f59 100644 --- a/drivers/md/md-autodetect.c +++ b/drivers/md/md-autodetect.c @@ -102,10 +102,10 @@ static int __init md_setup(char *str) pername = "raid0"; break; } - /* FALL THROUGH */ + fallthrough; case 1: /* the first device is numeric */ str = str1; - /* FALL THROUGH */ + fallthrough; case 0: md_setup_args[ent].level = LEVEL_NONE; pername="super-block"; diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index d61b524ae440..b10c51988c8e 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1433,7 +1433,7 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s case 0: md_bitmap_file_set_bit(bitmap, offset); md_bitmap_count_page(&bitmap->counts, offset, 1); - /* fall through */ + fallthrough; case 1: *bmc = 2; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ef0fd4830803..8b743657b957 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4083,7 +4083,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, break; } dev = &sh->dev[s->failed_num[0]]; - /* fall through */ + fallthrough; case check_state_compute_result: sh->check_state = check_state_idle; if (!dev) @@ -4214,7 +4214,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, /* we have 2-disk failure */ BUG_ON(s->failed != 2); - /* fall through */ + fallthrough; case check_state_compute_result: sh->check_state = check_state_idle; diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index 630a75e0eeb1..7607b516a7c4 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -210,7 +210,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) tpg->vdownsampling[1] = 1; tpg->hdownsampling[1] = 1; tpg->planes = 2; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGB332: case V4L2_PIX_FMT_RGB565: case V4L2_PIX_FMT_RGB565X: @@ -271,7 +271,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_YUV420M: case V4L2_PIX_FMT_YVU420M: tpg->buffers = 3; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: tpg->vdownsampling[1] = 2; @@ -284,7 +284,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_YUV422M: case V4L2_PIX_FMT_YVU422M: tpg->buffers = 3; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV422P: tpg->vdownsampling[1] = 1; tpg->vdownsampling[2] = 1; @@ -296,7 +296,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_NV16M: case V4L2_PIX_FMT_NV61M: tpg->buffers = 2; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: tpg->vdownsampling[1] = 1; @@ -308,7 +308,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_NV12M: case V4L2_PIX_FMT_NV21M: tpg->buffers = 2; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: tpg->vdownsampling[1] = 2; @@ -1275,7 +1275,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_RGB444: case V4L2_PIX_FMT_XRGB444: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV444: case V4L2_PIX_FMT_ARGB444: buf[0][offset] = (g_u_s << 4) | b_v; @@ -1283,21 +1283,21 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_RGBX444: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGBA444: buf[0][offset] = (b_v << 4) | (alpha >> 4); buf[0][offset + 1] = (r_y_h << 4) | g_u_s; break; case V4L2_PIX_FMT_XBGR444: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_ABGR444: buf[0][offset] = (g_u_s << 4) | r_y_h; buf[0][offset + 1] = (alpha & 0xf0) | b_v; break; case V4L2_PIX_FMT_BGRX444: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_BGRA444: buf[0][offset] = (r_y_h << 4) | (alpha >> 4); buf[0][offset + 1] = (b_v << 4) | g_u_s; @@ -1305,7 +1305,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_RGB555: case V4L2_PIX_FMT_XRGB555: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV555: case V4L2_PIX_FMT_ARGB555: buf[0][offset] = (g_u_s << 5) | b_v; @@ -1314,7 +1314,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_RGBX555: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGBA555: buf[0][offset] = (g_u_s << 6) | (b_v << 1) | ((alpha & 0x80) >> 7); @@ -1322,7 +1322,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_XBGR555: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_ABGR555: buf[0][offset] = (g_u_s << 5) | r_y_h; buf[0][offset + 1] = (alpha & 0x80) | (b_v << 2) @@ -1330,7 +1330,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_BGRX555: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_BGRA555: buf[0][offset] = (g_u_s << 6) | (r_y_h << 1) | ((alpha & 0x80) >> 7); @@ -1339,7 +1339,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_RGB555X: case V4L2_PIX_FMT_XRGB555X: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_ARGB555X: buf[0][offset] = (alpha & 0x80) | (r_y_h << 2) | (g_u_s >> 3); buf[0][offset + 1] = (g_u_s << 5) | b_v; @@ -1366,7 +1366,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_HSV32: case V4L2_PIX_FMT_XYUV32: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV32: case V4L2_PIX_FMT_ARGB32: case V4L2_PIX_FMT_AYUV32: @@ -1377,7 +1377,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_RGBX32: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGBA32: buf[0][offset] = r_y_h; buf[0][offset + 1] = g_u_s; @@ -1388,7 +1388,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_XBGR32: case V4L2_PIX_FMT_VUYX32: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_ABGR32: case V4L2_PIX_FMT_VUYA32: buf[0][offset] = b_v; @@ -1398,7 +1398,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_BGRX32: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_BGRA32: buf[0][offset] = alpha; buf[0][offset + 1] = b_v; diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 630509ecee20..89620da983ba 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -546,7 +546,7 @@ static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h) h->priv->ule_sndu_type_1 = 1; h->ts_remain -= 1; h->from_where += 1; - /* fallthrough */ + fallthrough; case 0: h->new_ts = 1; h->ts += TS_SZ; diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c index e92542b92d34..da0ff7b44da4 100644 --- a/drivers/media/dvb-frontends/bcm3510.c +++ b/drivers/media/dvb-frontends/bcm3510.c @@ -773,7 +773,7 @@ static int bcm3510_init(struct dvb_frontend* fe) deb_info("attempting to download firmware\n"); if ((ret = bcm3510_init_cold(st)) < 0) return ret; - /* fall-through */ + fallthrough; case JDEC_EEPROM_LOAD_WAIT: deb_info("firmware is loaded\n"); bcm3510_check_firmware_version(st); diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c index bc374750529b..08a85831e917 100644 --- a/drivers/media/dvb-frontends/dib0090.c +++ b/drivers/media/dvb-frontends/dib0090.c @@ -1693,7 +1693,7 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front if (state->identity.p1g) state->dc = dc_p1g_table; - /* fall through */ + fallthrough; case CT_TUNER_STEP_0: dprintk("Start/continue DC calibration for %s path\n", (state->dc->i == 1) ? "I" : "Q"); diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c index 0f0480d8576d..a6c2fc4586eb 100644 --- a/drivers/media/dvb-frontends/dib3000mb.c +++ b/drivers/media/dvb-frontends/dib3000mb.c @@ -224,7 +224,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner) switch (c->hierarchy) { case HIERARCHY_NONE: deb_setf("hierarchy: none\n"); - /* fall through */ + fallthrough; case HIERARCHY_1: deb_setf("hierarchy: alpha=1\n"); wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_1); diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c index 0a7790c4bad3..55bee50aa871 100644 --- a/drivers/media/dvb-frontends/dib7000p.c +++ b/drivers/media/dvb-frontends/dib7000p.c @@ -276,7 +276,7 @@ static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_p if (state->version != SOC7090) reg_1280 &= ~((1 << 11)); reg_1280 &= ~(1 << 6); - /* fall-through */ + fallthrough; case DIB7000P_POWER_INTERFACE_ONLY: /* just leave power on the control-interfaces: GPIO and (I2C or SDIO) */ /* TODO power up either SDIO or I2C */ diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c index 5de016412c42..237b9d04c076 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.c +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c @@ -2306,7 +2306,7 @@ hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16 pr_err("error %d\n", rc); goto rw_error; } - /* fallthrough */ + fallthrough; case SIO_HI_RA_RAM_CMD_BRDCTRL: rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_2__A, cmd->param2, 0); if (rc != 0) { @@ -2318,7 +2318,7 @@ hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16 pr_err("error %d\n", rc); goto rw_error; } - /* fallthrough */ + fallthrough; case SIO_HI_RA_RAM_CMD_NULL: /* No parameters */ break; @@ -2841,7 +2841,7 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o /* coef = 188/204 */ max_bit_rate = (ext_attr->curr_symbol_rate / 8) * nr_bits * 188; - /* fall-through - as b/c Annex A/C need following settings */ + fallthrough; /* as b/c Annex A/C need following settings */ case DRX_STANDARD_ITU_B: rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, FEC_OC_FCT_USAGE__PRE, 0); if (rc != 0) { @@ -3555,8 +3555,8 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg if (!ext_attr->has_smatx) return -EIO; switch (uio_cfg->mode) { - case DRX_UIO_MODE_FIRMWARE_SMA: /* fall through */ - case DRX_UIO_MODE_FIRMWARE_SAW: /* fall through */ + case DRX_UIO_MODE_FIRMWARE_SMA: + case DRX_UIO_MODE_FIRMWARE_SAW: case DRX_UIO_MODE_READWRITE: ext_attr->uio_sma_tx_mode = uio_cfg->mode; break; @@ -3579,7 +3579,7 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg if (!ext_attr->has_smarx) return -EIO; switch (uio_cfg->mode) { - case DRX_UIO_MODE_FIRMWARE0: /* fall through */ + case DRX_UIO_MODE_FIRMWARE0: case DRX_UIO_MODE_READWRITE: ext_attr->uio_sma_rx_mode = uio_cfg->mode; break; @@ -3603,7 +3603,7 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg if (!ext_attr->has_gpio) return -EIO; switch (uio_cfg->mode) { - case DRX_UIO_MODE_FIRMWARE0: /* fall through */ + case DRX_UIO_MODE_FIRMWARE0: case DRX_UIO_MODE_READWRITE: ext_attr->uio_gpio_mode = uio_cfg->mode; break; @@ -3639,7 +3639,7 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg } ext_attr->uio_irqn_mode = uio_cfg->mode; break; - case DRX_UIO_MODE_FIRMWARE0: /* fall through */ + case DRX_UIO_MODE_FIRMWARE0: default: return -EINVAL; break; @@ -4004,31 +4004,36 @@ static int scu_command(struct i2c_device_addr *dev_addr, struct drxjscu_cmd *cmd if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 4: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_3__A, *(cmd->parameter + 3), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 3: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_2__A, *(cmd->parameter + 2), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 2: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_1__A, *(cmd->parameter + 1), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 1: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_0__A, *(cmd->parameter + 0), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 0: /* do nothing */ break; @@ -4068,25 +4073,29 @@ static int scu_command(struct i2c_device_addr *dev_addr, struct drxjscu_cmd *cmd if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 3: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_2__A, cmd->result + 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 2: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_1__A, cmd->result + 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 1: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_0__A, cmd->result + 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 0: /* do nothing */ break; @@ -4791,7 +4800,7 @@ set_frequency(struct drx_demod_instance *demod, Sound carrier is already 3Mhz above centre frequency due to tuner setting so now add an extra shift of 1MHz... */ fm_frequency_shift = 1000; - /*fall through */ + fallthrough; case DRX_STANDARD_ITU_B: case DRX_STANDARD_NTSC: case DRX_STANDARD_PAL_SECAM_BG: @@ -10475,11 +10484,11 @@ ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel) (standard == DRX_STANDARD_NTSC)) { switch (channel->bandwidth) { case DRX_BANDWIDTH_6MHZ: - case DRX_BANDWIDTH_UNKNOWN: /* fall through */ + case DRX_BANDWIDTH_UNKNOWN: channel->bandwidth = DRX_BANDWIDTH_6MHZ; break; - case DRX_BANDWIDTH_8MHZ: /* fall through */ - case DRX_BANDWIDTH_7MHZ: /* fall through */ + case DRX_BANDWIDTH_8MHZ: + case DRX_BANDWIDTH_7MHZ: default: return -EINVAL; } @@ -10511,10 +10520,10 @@ ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel) } switch (channel->constellation) { - case DRX_CONSTELLATION_QAM16: /* fall through */ - case DRX_CONSTELLATION_QAM32: /* fall through */ - case DRX_CONSTELLATION_QAM64: /* fall through */ - case DRX_CONSTELLATION_QAM128: /* fall through */ + case DRX_CONSTELLATION_QAM16: + case DRX_CONSTELLATION_QAM32: + case DRX_CONSTELLATION_QAM64: + case DRX_CONSTELLATION_QAM128: case DRX_CONSTELLATION_QAM256: bandwidth_temp = channel->symbolrate * bw_rolloff_factor; bandwidth = bandwidth_temp / 100; @@ -10628,8 +10637,8 @@ ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel) } break; #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: rc = set_qam_channel(demod, channel, tuner_freq_offset); if (rc != 0) { @@ -10820,7 +10829,7 @@ ctrl_lock_status(struct drx_demod_instance *demod, enum drx_lock_status *lock_st SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK; break; #endif - case DRX_STANDARD_UNKNOWN: /* fallthrough */ + case DRX_STANDARD_UNKNOWN: default: return -EIO; } @@ -10888,8 +10897,8 @@ ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard) */ switch (prev_standard) { #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: rc = power_down_qam(demod, false); if (rc != 0) { @@ -10908,7 +10917,7 @@ ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard) case DRX_STANDARD_UNKNOWN: /* Do nothing */ break; - case DRX_STANDARD_AUTO: /* fallthrough */ + case DRX_STANDARD_AUTO: default: return -EINVAL; } @@ -10921,8 +10930,8 @@ ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard) switch (*standard) { #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: do { u16 dummy; @@ -11111,12 +11120,12 @@ ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode) goto rw_error; } break; - case DRX_STANDARD_PAL_SECAM_BG: /* fallthrough */ - case DRX_STANDARD_PAL_SECAM_DK: /* fallthrough */ - case DRX_STANDARD_PAL_SECAM_I: /* fallthrough */ - case DRX_STANDARD_PAL_SECAM_L: /* fallthrough */ - case DRX_STANDARD_PAL_SECAM_LP: /* fallthrough */ - case DRX_STANDARD_NTSC: /* fallthrough */ + case DRX_STANDARD_PAL_SECAM_BG: + case DRX_STANDARD_PAL_SECAM_DK: + case DRX_STANDARD_PAL_SECAM_I: + case DRX_STANDARD_PAL_SECAM_L: + case DRX_STANDARD_PAL_SECAM_LP: + case DRX_STANDARD_NTSC: case DRX_STANDARD_FM: rc = power_down_atv(demod, ext_attr->standard, true); if (rc != 0) { @@ -11127,7 +11136,7 @@ ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode) case DRX_STANDARD_UNKNOWN: /* Do nothing */ break; - case DRX_STANDARD_AUTO: /* fallthrough */ + case DRX_STANDARD_AUTO: default: return -EIO; } @@ -11220,8 +11229,8 @@ ctrl_set_cfg_pre_saw(struct drx_demod_instance *demod, struct drxj_cfg_pre_saw * ext_attr->vsb_pre_saw_cfg = *pre_saw; break; #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: ext_attr->qam_pre_saw_cfg = *pre_saw; break; @@ -11264,10 +11273,10 @@ ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain ext_attr = (struct drxj_data *) demod->my_ext_attr; switch (afe_gain->standard) { - case DRX_STANDARD_8VSB: /* fallthrough */ + case DRX_STANDARD_8VSB: fallthrough; #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: #endif /* Do nothing */ @@ -11301,8 +11310,8 @@ ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain ext_attr->vsb_pga_cfg = gain * 13 + 140; break; #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: ext_attr->qam_pga_cfg = gain * 13 + 140; break; diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c index fae6f3763364..45f982863904 100644 --- a/drivers/media/dvb-frontends/drxd_hard.c +++ b/drivers/media/dvb-frontends/drxd_hard.c @@ -1512,14 +1512,14 @@ static int SetDeviceTypeId(struct drxd_state *state) switch (deviceId) { case 4: state->diversity = 1; - /* fall through */ + fallthrough; case 3: case 7: state->PGA = 1; break; case 6: state->diversity = 1; - /* fall through */ + fallthrough; case 5: case 8: break; @@ -1966,7 +1966,7 @@ static int DRX_Start(struct drxd_state *state, s32 off) switch (p->transmission_mode) { default: /* Not set, detect it automatically */ operationMode |= SC_RA_RAM_OP_AUTO_MODE__M; - /* fall through - try first guess DRX_FFTMODE_8K */ + fallthrough; /* try first guess DRX_FFTMODE_8K */ case TRANSMISSION_MODE_8K: transmissionParams |= SC_RA_RAM_OP_PARAM_MODE_8K; if (state->type_A) { @@ -2139,7 +2139,7 @@ static int DRX_Start(struct drxd_state *state, s32 off) switch (p->modulation) { default: operationMode |= SC_RA_RAM_OP_AUTO_CONST__M; - /* fall through - try first guess DRX_CONSTELLATION_QAM64 */ + fallthrough; /* try first guess DRX_CONSTELLATION_QAM64 */ case QAM_64: transmissionParams |= SC_RA_RAM_OP_PARAM_CONST_QAM64; if (state->type_A) { @@ -2266,7 +2266,7 @@ static int DRX_Start(struct drxd_state *state, s32 off) break; default: operationMode |= SC_RA_RAM_OP_AUTO_RATE__M; - /* fall through */ + fallthrough; case FEC_2_3: transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_2_3; if (state->type_A) @@ -2301,7 +2301,7 @@ static int DRX_Start(struct drxd_state *state, s32 off) switch (p->bandwidth_hz) { case 0: p->bandwidth_hz = 8000000; - /* fall through */ + fallthrough; case 8000000: /* (64/7)*(8/8)*1000000 */ bandwidth = DRXD_BANDWIDTH_8MHZ_IN_HZ; diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index 0ae9d8c72d8d..32f9346deb3e 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -1756,7 +1756,7 @@ static int setoperation_mode(struct drxk_state *state, goto error; state->m_operation_mode = OM_NONE; break; - case OM_QAM_ITU_A: /* fallthrough */ + case OM_QAM_ITU_A: case OM_QAM_ITU_C: status = mpegts_stop(state); if (status < 0) @@ -1783,7 +1783,7 @@ static int setoperation_mode(struct drxk_state *state, if (status < 0) goto error; break; - case OM_QAM_ITU_A: /* fallthrough */ + case OM_QAM_ITU_A: case OM_QAM_ITU_C: dprintk(1, ": DVB-C Annex %c\n", (state->m_operation_mode == OM_QAM_ITU_A) ? 'A' : 'C'); @@ -2012,7 +2012,7 @@ static int mpegts_dto_setup(struct drxk_state *state, fec_oc_rcn_ctl_rate = 0xC00000; static_clk = state->m_dvbt_static_clk; break; - case OM_QAM_ITU_A: /* fallthrough */ + case OM_QAM_ITU_A: case OM_QAM_ITU_C: fec_oc_tmd_mode = 0x0004; fec_oc_rcn_ctl_rate = 0xD2B4EE; /* good for >63 Mb/s */ @@ -3249,11 +3249,11 @@ static int dvbt_sc_command(struct drxk_state *state, case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM: case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM: status |= write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1); - /* fall through - All commands using 1 parameters */ + fallthrough; /* All commands using 1 parameters */ case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING: case OFDM_SC_RA_RAM_CMD_USER_IO: status |= write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0); - /* fall through - All commands using 0 parameters */ + fallthrough; /* All commands using 0 parameters */ case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM: case OFDM_SC_RA_RAM_CMD_NULL: /* Write command */ @@ -3761,7 +3761,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, case TRANSMISSION_MODE_AUTO: default: operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M; - /* fall through - try first guess DRX_FFTMODE_8K */ + fallthrough; /* try first guess DRX_FFTMODE_8K */ case TRANSMISSION_MODE_8K: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K; break; @@ -3775,7 +3775,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, default: case GUARD_INTERVAL_AUTO: operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M; - /* fall through - try first guess DRX_GUARD_1DIV4 */ + fallthrough; /* try first guess DRX_GUARD_1DIV4 */ case GUARD_INTERVAL_1_4: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4; break; @@ -3798,7 +3798,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M; /* try first guess SC_RA_RAM_OP_PARAM_HIER_NO */ /* transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */ - /* fall through */ + fallthrough; case HIERARCHY_1: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1; break; @@ -3816,7 +3816,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, case QAM_AUTO: default: operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M; - /* fall through - try first guess DRX_CONSTELLATION_QAM64 */ + fallthrough; /* try first guess DRX_CONSTELLATION_QAM64 */ case QAM_64: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64; break; @@ -3841,7 +3841,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, WR16(dev_addr, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_HI)); break; - case DRX_PRIORITY_UNKNOWN: /* fall through */ + case DRX_PRIORITY_UNKNOWN: default: status = -EINVAL; goto error; @@ -3859,7 +3859,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, case FEC_AUTO: default: operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M; - /* fall through - try first guess DRX_CODERATE_2DIV3 */ + fallthrough; /* try first guess DRX_CODERATE_2DIV3 */ case FEC_2_3: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3; break; @@ -3893,7 +3893,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, switch (state->props.bandwidth_hz) { case 0: state->props.bandwidth_hz = 8000000; - /* fall through */ + fallthrough; case 8000000: bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ; status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index d3c330e035c4..722576f1732a 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -768,7 +768,7 @@ static int lgdt3306a_set_if(struct lgdt3306a_state *state, default: pr_warn("IF=%d KHz is not supported, 3250 assumed\n", if_freq_khz); - /* fallthrough */ + fallthrough; case 3250: /* 3.25Mhz */ nco1 = 0x34; nco2 = 0x00; diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c index 881897583cf2..399d5c519027 100644 --- a/drivers/media/dvb-frontends/mt352.c +++ b/drivers/media/dvb-frontends/mt352.c @@ -201,7 +201,7 @@ static int mt352_set_parameters(struct dvb_frontend *fe) if (op->hierarchy == HIERARCHY_AUTO || op->hierarchy == HIERARCHY_NONE) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c index 290b9eab099f..4404ace82981 100644 --- a/drivers/media/dvb-frontends/mxl5xx.c +++ b/drivers/media/dvb-frontends/mxl5xx.c @@ -739,7 +739,7 @@ static int get_frontend(struct dvb_frontend *fe, default: break; } - /* Fall through */ + fallthrough; case SYS_DVBS: switch ((enum MXL_HYDRA_MODULATION_E) reg_data[DMD_MODULATION_SCHEME_ADDR]) { diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c index 35a3e47497c2..24de1b115158 100644 --- a/drivers/media/dvb-frontends/or51132.c +++ b/drivers/media/dvb-frontends/or51132.c @@ -482,7 +482,7 @@ start: switch (reg&0xff) { case 0x06: if (reg & 0x1000) usK = 3 << 24; - /* fall through */ + fallthrough; case 0x43: /* QAM64 */ c = 150204167; break; diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c index 89402916d301..c1334d7eb442 100644 --- a/drivers/media/dvb-frontends/s5h1411.c +++ b/drivers/media/dvb-frontends/s5h1411.c @@ -398,7 +398,7 @@ static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz) default: dprintk("%s(%d KHz) Invalid, defaulting to 5380\n", __func__, KHz); - /* fall through */ + fallthrough; case 5380: case 44000: s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1be4); diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c index 2fc6aea580f9..2a2cf20a73d6 100644 --- a/drivers/media/dvb-frontends/zl10353.c +++ b/drivers/media/dvb-frontends/zl10353.c @@ -201,7 +201,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe) break; default: c->bandwidth_hz = 8000000; - /* fall through */ + fallthrough; case 8000000: zl10353_single_write(fe, MCLK_RATIO, 0x75); zl10353_single_write(fe, 0x64, 0x36); @@ -258,7 +258,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe) if (c->hierarchy == HIERARCHY_AUTO || c->hierarchy == HIERARCHY_NONE) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index 570a4a09c387..03eee606af91 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -2209,7 +2209,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; - /* fall-through */ + fallthrough; case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP: ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */ @@ -2370,7 +2370,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) /* Currently only enabled for the integrated IR controller */ if (!enable_885_ir) break; - /* fall-through */ + fallthrough; case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_HAUPPAUGE_HVR1800: case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE: diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 7cabb9e9ffe2..92fe051c672f 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -1310,7 +1310,7 @@ static void dvb_input_detach(struct ddb_input *input) dvb_unregister_frontend(dvb->fe2); if (dvb->fe) dvb_unregister_frontend(dvb->fe); - /* fallthrough */ + fallthrough; case 0x30: dvb_module_release(dvb->i2c_client[0]); dvb->i2c_client[0] = NULL; @@ -1321,22 +1321,22 @@ static void dvb_input_detach(struct ddb_input *input) dvb_frontend_detach(dvb->fe); dvb->fe = NULL; dvb->fe2 = NULL; - /* fallthrough */ + fallthrough; case 0x20: dvb_net_release(&dvb->dvbnet); - /* fallthrough */ + fallthrough; case 0x12: dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &dvb->hw_frontend); dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &dvb->mem_frontend); - /* fallthrough */ + fallthrough; case 0x11: dvb_dmxdev_release(&dvb->dmxdev); - /* fallthrough */ + fallthrough; case 0x10: dvb_dmx_release(&dvb->demux); - /* fallthrough */ + fallthrough; case 0x01: break; } @@ -1559,7 +1559,7 @@ static int dvb_input_attach(struct ddb_input *input) osc24 = 0; else osc24 = 1; - /* fall-through */ + fallthrough; case DDB_TUNER_DVBCT2_SONY_P: case DDB_TUNER_DVBC2T2_SONY_P: case DDB_TUNER_ISDBT_SONY_P: @@ -1575,7 +1575,7 @@ static int dvb_input_attach(struct ddb_input *input) break; case DDB_TUNER_DVBC2T2I_SONY: osc24 = 1; - /* fall-through */ + fallthrough; case DDB_TUNER_DVBCT2_SONY: case DDB_TUNER_DVBC2T2_SONY: case DDB_TUNER_ISDBT_SONY: @@ -2036,7 +2036,7 @@ static int ddb_port_attach(struct ddb_port *port) ret = ddb_ci_attach(port, ci_bitrate); if (ret < 0) break; - /* fall-through */ + fallthrough; case DDB_PORT_LOOP: ret = dvb_register_device(port->dvb[0].adap, &port->dvb[0].dev, @@ -2432,7 +2432,8 @@ void ddb_ports_init(struct ddb *dev) ddb_input_init(port, 4 + i, 1, 4 + i); ddb_output_init(port, i); break; - } /* fallthrough */ + } + fallthrough; case DDB_OCTOPUS: ddb_input_init(port, 2 * i, 0, 2 * i); ddb_input_init(port, 2 * i + 1, 1, 2 * i + 1); @@ -3417,7 +3418,7 @@ int ddb_exit_ddbridge(int stage, int error) default: case 2: destroy_workqueue(ddb_wq); - /* fall-through */ + fallthrough; case 1: ddb_class_destroy(); break; diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index 7fb3b1853b87..8944e4bd4638 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -952,7 +952,7 @@ static int meyeioc_sync(struct file *file, void *fh, int *i) mutex_unlock(&meye.lock); return -EINTR; } - /* fall through */ + fallthrough; case MEYE_BUF_DONE: meye.grab_buffer[*i].state = MEYE_BUF_UNUSED; if (kfifo_out_locked(&meye.doneq, (unsigned char *)&unused, diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index bf36b1e22b63..45228f4f6fc6 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -637,7 +637,7 @@ static void gpioirq(unsigned long cookie) iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2); break; } - /* fall through */ + fallthrough; case DATA_TS_RECORD: case DATA_PES_RECORD: @@ -2176,7 +2176,7 @@ static int frontend_init(struct av7110 *av7110) break; } } - /* fall-thru */ + fallthrough; case 0x0008: // Hauppauge/TT DVB-T // Grundig 29504-401 diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c index e8a8ec5405e2..93ca31e38ddd 100644 --- a/drivers/media/pci/ttpci/av7110_hw.c +++ b/drivers/media/pci/ttpci/av7110_hw.c @@ -1107,7 +1107,7 @@ int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc) break; case OSD_SetRow: dc->y1 = dc->y0; - /* fall through */ + fallthrough; case OSD_SetBlock: ret = OSDSetBlock(av7110, dc->x0, dc->y0, dc->x1, dc->y1, dc->color, dc->data); break; diff --git a/drivers/media/pci/ttpci/av7110_ipack.c b/drivers/media/pci/ttpci/av7110_ipack.c index ec528fae7333..30330ed01ce8 100644 --- a/drivers/media/pci/ttpci/av7110_ipack.c +++ b/drivers/media/pci/ttpci/av7110_ipack.c @@ -182,7 +182,7 @@ int av7110_ipack_instant_repack (const u8 *buf, int count, struct ipack *p) case DSM_CC_STREAM : case ISO13522_STREAM: p->done = 1; - /* fall through */ + fallthrough; case PRIVATE_STREAM1: case VIDEO_STREAM_S ... VIDEO_STREAM_E: case AUDIO_STREAM_S ... AUDIO_STREAM_E: diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c index 38cac508bd72..3cb83005cf09 100644 --- a/drivers/media/pci/ttpci/budget-av.c +++ b/drivers/media/pci/ttpci/budget-av.c @@ -1226,7 +1226,7 @@ static void frontend_init(struct budget_av *budget_av) * but so far it has been only confirmed for this type */ budget_av->reinitialise_demod = 1; - /* fall through */ + fallthrough; case SUBID_DVBS_KNC1_PLUS: case SUBID_DVBS_EASYWATCH_1: if (saa->pci->subsystem_vendor == 0x1894) { diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c index 9c811272abfe..a88711a3ac7f 100644 --- a/drivers/media/pci/ttpci/budget.c +++ b/drivers/media/pci/ttpci/budget.c @@ -613,7 +613,7 @@ static void frontend_init(struct budget *budget) break; } } - /* fall through */ + fallthrough; case 0x1018: // TT Budget-S-1401 (philips tda10086/philips tda8262) { struct dvb_frontend *fe; @@ -638,7 +638,7 @@ static void frontend_init(struct budget *budget) break; } } - /* fall through */ + fallthrough; case 0x101c: { /* TT S2-1600 */ const struct stv6110x_devctl *ctl; diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c index 36e5f2ff4ef1..b22dc1d72527 100644 --- a/drivers/media/platform/sh_vou.c +++ b/drivers/media/platform/sh_vou.c @@ -220,7 +220,7 @@ static void sh_vou_stream_config(struct sh_vou_device *vou_dev) break; case V4L2_PIX_FMT_RGB565: dataswap ^= 1; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGB565X: row_coeff = 2; break; @@ -802,7 +802,7 @@ static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt) default: pr_warn("%s(): Invalid bus-format code %d, using default 8-bit\n", __func__, bus_fmt); - /* fall through */ + fallthrough; case SH_VOU_BUS_8BIT: return 1; case SH_VOU_BUS_16BIT: diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index b203296de977..7e2460263882 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -105,7 +105,8 @@ static inline enum phase_diversity_modes_idx si476x_phase_diversity_mode_to_idx(enum si476x_phase_diversity_mode mode) { switch (mode) { - default: /* FALLTHROUGH */ + default: + fallthrough; case SI476X_PHDIV_DISABLED: return SI476X_IDX_PHDIV_DISABLED; case SI476X_PHDIV_PRIMARY_COMBINING: diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c index b0303cf00387..c37315226c42 100644 --- a/drivers/media/radio/tea575x.c +++ b/drivers/media/radio/tea575x.c @@ -249,7 +249,7 @@ int snd_tea575x_enum_freq_bands(struct snd_tea575x *tea, index = BAND_AM; break; } - /* Fall through */ + fallthrough; default: return -EINVAL; } diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 5bb144435c16..3fe3edd80876 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -112,7 +112,7 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_trace_printk: if (perfmon_capable()) return bpf_get_trace_printk_proto(); - /* fall through */ + fallthrough; default: return NULL; } diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c index 95727ca910f7..0cda78f72fd8 100644 --- a/drivers/media/rc/ir-rc6-decoder.c +++ b/drivers/media/rc/ir-rc6-decoder.c @@ -64,7 +64,7 @@ static enum rc6_mode rc6_mode(struct rc6_dec *data) case 6: if (!data->toggle) return RC6_MODE_6A; - /* fall through */ + fallthrough; default: return RC6_MODE_UNKNOWN; } diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c index 9fa58d92eb09..7d9a7c000c75 100644 --- a/drivers/media/rc/ir-sony-decoder.c +++ b/drivers/media/rc/ir-sony-decoder.c @@ -102,7 +102,7 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev) } data->state = STATE_FINISHED; - /* Fall through */ + fallthrough; case STATE_FINISHED: if (ev.pulse) diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c index 734a92caad8d..7b7d9fe4f945 100644 --- a/drivers/media/tuners/xc5000.c +++ b/drivers/media/tuners/xc5000.c @@ -756,7 +756,7 @@ static int xc5000_set_digital_params(struct dvb_frontend *fe) if (!bw) bw = 6000000; /* fall to OFDM handling */ - /* fall through */ + fallthrough; case SYS_DMBTH: case SYS_DVBT: case SYS_DVBT2: diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 198ddfb8d2b1..e3234d169065 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -525,7 +525,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb) case USB_SPEED_HIGH: info("running at HIGH speed."); break; - case USB_SPEED_UNKNOWN: /* fall through */ + case USB_SPEED_UNKNOWN: default: err("cannot handle USB speed because it is unknown."); return -ENODEV; diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c index 20c50c2d042e..e747548ab286 100644 --- a/drivers/media/usb/cpia2/cpia2_core.c +++ b/drivers/media/usb/cpia2/cpia2_core.c @@ -165,7 +165,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_BRIGHTNESS: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_BRIGHTNESS: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -176,7 +176,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_CONTRAST: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_CONTRAST: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -184,7 +184,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_SATURATION: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_SATURATION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -195,7 +195,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_GPIO_DATA: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_GPIO_DATA: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -203,7 +203,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_GPIO_DIRECTION: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_GPIO_DIRECTION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -211,7 +211,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VC_MP_GPIO_DATA: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VC_MP_GPIO_DATA: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -219,7 +219,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION: cmd.buffer.block_data[0] = param; - /*fall through */ + fallthrough; case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -234,7 +234,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_FLICKER_MODES: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_FLICKER_MODES: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -281,7 +281,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_USER_MODE: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_USER_MODE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -301,7 +301,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_WAKEUP: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_WAKEUP: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -309,7 +309,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_PW_CONTROL: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_PW_CONTROL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -322,7 +322,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_SYSTEM_CTRL: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_SYSTEM_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; @@ -331,7 +331,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_SYSTEM_CTRL: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_SYSTEM_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -339,7 +339,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_EXP_MODES: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_EXP_MODES: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -347,7 +347,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_DEVICE_CONFIG: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_DEVICE_CONFIG: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -368,7 +368,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VC_CONTROL: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VC_CONTROL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -403,7 +403,7 @@ int cpia2_do_command(struct camera_data *cam, this register can also affect flicker modes */ cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_USER_EFFECTS: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -1751,7 +1751,7 @@ int cpia2_set_fps(struct camera_data *cam, int framerate) CPIA2_VP_SENSOR_FLAGS_500) { return -EINVAL; } - /* Fall through */ + fallthrough; case CPIA2_VP_FRAMERATE_15: case CPIA2_VP_FRAMERATE_12_5: case CPIA2_VP_FRAMERATE_7_5: diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index d9f953f2d088..425e470b0fd3 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -996,7 +996,7 @@ void cx231xx_v4l2_create_entities(struct cx231xx *dev) /* The DVB core will handle it */ if (dev->tuner_type == TUNER_ABSENT) continue; - /* fall through */ + fallthrough; default: /* just to shut up a gcc warning */ ent->function = MEDIA_ENT_F_CONN_RF; break; diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 4ef3fa98d20f..a6ae46567a31 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -1660,7 +1660,7 @@ static int dib8096_set_param_override(struct dvb_frontend *fe) switch (band) { default: deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency); - /* fall through */ + fallthrough; case BAND_VHF: state->dib8000_ops.set_gpio(fe, 3, 0, 1); break; diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index f96626fe2c0b..a27a68440325 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -1886,12 +1886,12 @@ static int dw2102_load_firmware(struct usb_device *dev, switch (le16_to_cpu(dev->descriptor.idProduct)) { case USB_PID_TEVII_S650: dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC; - /* fall through */ + fallthrough; case USB_PID_DW2104: reset = 1; dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1, DW210X_WRITE_MSG); - /* fall through */ + fallthrough; case USB_PID_DW3101: reset = 0; dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, @@ -1924,7 +1924,7 @@ static int dw2102_load_firmware(struct usb_device *dev, break; } } - /* fall through */ + fallthrough; case 0x2101: dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2, DW210X_READ_MSG); diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 3f3fbcd60cc6..45a2403aa039 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -2200,7 +2200,7 @@ static int check_range(enum v4l2_ctrl_type type, case V4L2_CTRL_TYPE_BOOLEAN: if (step != 1 || max > 1 || min < 0) return -ERANGE; - /* fall through */ + fallthrough; case V4L2_CTRL_TYPE_U8: case V4L2_CTRL_TYPE_U16: case V4L2_CTRL_TYPE_U32: diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index a556880f225a..2a22e13a6303 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -782,7 +782,6 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only) p->stepwise.step_height); break; case V4L2_FRMSIZE_TYPE_CONTINUOUS: - /* fall through */ default: pr_cont("\n"); break; @@ -816,7 +815,6 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only) p->stepwise.step.denominator); break; case V4L2_FRMIVAL_TYPE_CONTINUOUS: - /* fall through */ default: pr_cont("\n"); break; diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c index 5c91fc3e65b5..606a271bdd2d 100644 --- a/drivers/media/v4l2-core/videobuf-core.c +++ b/drivers/media/v4l2-core/videobuf-core.c @@ -354,7 +354,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b, break; case VIDEOBUF_ERROR: b->flags |= V4L2_BUF_FLAG_ERROR; - /* fall through */ + fallthrough; case VIDEOBUF_DONE: b->flags |= V4L2_BUF_FLAG_DONE; break; diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index f512cbc7a36c..ca0097664b12 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -313,7 +313,6 @@ static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd) tick_ps *= div; break; case GPMC_CD_FCLK: - /* FALL-THROUGH */ default: break; } diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index d9ee8e3dc72d..178954228631 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -371,7 +371,7 @@ again: serial mode), then just fall through */ if (msb_read_int_reg(msb, -1)) return 0; - /* fallthrough */ + fallthrough; case MSB_RP_RECEIVE_INT_REQ_RESULT: intreg = mrq->data[0]; @@ -403,7 +403,7 @@ again: case MSB_RP_RECEIVE_STATUS_REG: msb->regs.status = *(struct ms_status_register *)mrq->data; msb->state = MSB_RP_SEND_OOB_READ; - /* fallthrough */ + fallthrough; case MSB_RP_SEND_OOB_READ: if (!msb_read_regs(msb, @@ -418,7 +418,7 @@ again: msb->regs.extra_data = *(struct ms_extra_data_register *) mrq->data; msb->state = MSB_RP_SEND_READ_DATA; - /* fallthrough */ + fallthrough; case MSB_RP_SEND_READ_DATA: /* Skip that state if we only read the oob */ @@ -518,7 +518,7 @@ again: msb->state = MSB_WB_RECEIVE_INT_REQ; if (msb_read_int_reg(msb, -1)) return 0; - /* fallthrough */ + fallthrough; case MSB_WB_RECEIVE_INT_REQ: intreg = mrq->data[0]; @@ -549,7 +549,7 @@ again: msb->int_polling = false; msb->state = MSB_WB_SEND_WRITE_DATA; - /* fallthrough */ + fallthrough; case MSB_WB_SEND_WRITE_DATA: sg_init_table(sg, ARRAY_SIZE(sg)); @@ -628,7 +628,7 @@ again: msb->state = MSB_SC_RECEIVE_INT_REQ; if (msb_read_int_reg(msb, -1)) return 0; - /* fallthrough */ + fallthrough; case MSB_SC_RECEIVE_INT_REQ: intreg = mrq->data[0]; diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index 4a6b866b0291..e83c3ada9389 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -255,11 +255,11 @@ static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host, case 3: host->io_word[0] |= buf[off + 2] << 16; host->io_pos++; - /* fall through */ + fallthrough; case 2: host->io_word[0] |= buf[off + 1] << 8; host->io_pos++; - /* fall through */ + fallthrough; case 1: host->io_word[0] |= buf[off]; host->io_pos++; diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index fc35c7404429..786e46798da2 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c @@ -162,11 +162,11 @@ static unsigned int tifm_ms_write_data(struct tifm_ms *host, case 3: host->io_word |= buf[off + 2] << 16; host->io_pos++; - /* fall through */ + fallthrough; case 2: host->io_word |= buf[off + 1] << 8; host->io_pos++; - /* fall through */ + fallthrough; case 1: host->io_word |= buf[off]; host->io_pos++; diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 5216487db4fb..9903e9660a38 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -642,7 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) freereq = 0; if (event != MPI_EVENT_EVENT_CHANGE) break; - /* fall through */ + fallthrough; case MPI_FUNCTION_CONFIG: case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; @@ -1887,7 +1887,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) case MPI_MANUFACTPAGE_DEVICEID_FC939X: case MPI_MANUFACTPAGE_DEVICEID_FC949X: ioc->errata_flag_1064 = 1; - /* fall through */ + fallthrough; case MPI_MANUFACTPAGE_DEVICEID_FC909: case MPI_MANUFACTPAGE_DEVICEID_FC929: case MPI_MANUFACTPAGE_DEVICEID_FC919: @@ -1932,7 +1932,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) pcixcmd &= 0x8F; pci_write_config_byte(pdev, 0x6a, pcixcmd); } - /* fall through */ + fallthrough; case MPI_MANUFACTPAGE_DEVID_1030_53C1035: ioc->bus_type = SPI; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 6a79cd0ebe2b..18b91ea1a353 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -4326,7 +4326,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, } } mpt_findImVolumes(ioc); - /* fall through */ + fallthrough; case MPTSAS_ADD_DEVICE: memset(&sas_device, 0, sizeof(struct mptsas_devinfo)); diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 1491561d2e5c..8543f0324d5a 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -784,7 +784,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) /* * Allow non-SAS & non-NEXUS_LOSS to drop into below code */ - /* Fall through */ + fallthrough; case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ /* Linux handles an unsolicited DID_RESET better @@ -881,7 +881,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */ scsi_set_resid(sc, 0); - /* Fall through */ + fallthrough; case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ sc->result = (DID_OK << 16) | scsi_status; diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index a9d9c1cdf546..a5983d515db0 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -1515,10 +1515,10 @@ static unsigned long dsiclk_rate(u8 n) switch (divsel) { case PRCM_DSI_PLLOUT_SEL_PHI_4: div *= 2; - /* Fall through */ + fallthrough; case PRCM_DSI_PLLOUT_SEL_PHI_2: div *= 2; - /* Fall through */ + fallthrough; case PRCM_DSI_PLLOUT_SEL_PHI: return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), PLL_RAW) / div; diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c index af764bc87d7c..761b4ef3a381 100644 --- a/drivers/mfd/iqs62x.c +++ b/drivers/mfd/iqs62x.c @@ -136,7 +136,7 @@ static int iqs62x_dev_init(struct iqs62x_core *iqs62x) if (val & IQS620_PROX_SETTINGS_4_SAR_EN) iqs62x->ui_sel = IQS62X_UI_SAR1; - /* fall through */ + fallthrough; case IQS621_PROD_NUM: ret = regmap_write(iqs62x->regmap, IQS620_GLBL_EVENT_MASK, @@ -470,7 +470,7 @@ static irqreturn_t iqs62x_irq(int irq, void *context) case IQS62X_EVENT_UI_LO: event_data.ui_data = get_unaligned_le16(&event_map[i]); - /* fall through */ + fallthrough; case IQS62X_EVENT_UI_HI: case IQS62X_EVENT_NONE: @@ -491,7 +491,7 @@ static irqreturn_t iqs62x_irq(int irq, void *context) case IQS62X_EVENT_HYST: event_map[i] <<= iqs62x->dev_desc->hyst_shift; - /* fall through */ + fallthrough; case IQS62X_EVENT_WHEEL: case IQS62X_EVENT_HALL: diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c index 5bef142c4835..111d11fd25aa 100644 --- a/drivers/mfd/mxs-lradc.c +++ b/drivers/mfd/mxs-lradc.c @@ -172,7 +172,7 @@ static int mxs_lradc_probe(struct platform_device *pdev) MXS_LRADC_TOUCHSCREEN_5WIRE; break; } - /* fall through - to an error message for i.MX23 */ + fallthrough; /* to an error message for i.MX23 */ default: dev_err(&pdev->dev, "Unsupported number of touchscreen wires (%d)\n" diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 1e6431cb8536..2a3a240b4619 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -308,7 +308,7 @@ static int usbhs_runtime_resume(struct device *dev) i, r); } } - /* Fall through - as HSIC mode needs utmi_clk */ + fallthrough; /* as HSIC mode needs utmi_clk */ case OMAP_EHCI_PORT_MODE_TLL: if (!IS_ERR(omap->utmi_clk[i])) { @@ -344,7 +344,7 @@ static int usbhs_runtime_suspend(struct device *dev) if (!IS_ERR(omap->hsic480m_clk[i])) clk_disable_unprepare(omap->hsic480m_clk[i]); - /* Fall through - as utmi_clks were used in HSIC mode */ + fallthrough; /* as utmi_clks were used in HSIC mode */ case OMAP_EHCI_PORT_MODE_TLL: if (!IS_ERR(omap->utmi_clk[i])) diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c index abaab541df19..545196c85b5c 100644 --- a/drivers/mfd/rave-sp.c +++ b/drivers/mfd/rave-sp.c @@ -270,7 +270,7 @@ static void *stuff(unsigned char *dest, const unsigned char *src, size_t n) case RAVE_SP_ETX: case RAVE_SP_DLE: *dest++ = RAVE_SP_DLE; - /* FALLTHROUGH */ + fallthrough; default: *dest++ = byte; } @@ -541,7 +541,7 @@ static int rave_sp_receive_buf(struct serdev_device *serdev, * deframer buffer */ - /* FALLTHROUGH */ + fallthrough; case RAVE_SP_EXPECT_ESCAPED_DATA: if (deframer->length == sizeof(deframer->data)) { diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 75859e492984..df5cebb372a5 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -95,7 +95,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) break; default: pr_err("Failed to retrieve valid hwlock: %d\n", ret); - /* fall-through */ + fallthrough; case -EPROBE_DEFER: goto err_regmap; } diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index cde9a2fc1325..ed8d38b09925 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -90,10 +90,10 @@ static int at25_ee_read(void *priv, unsigned int offset, switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; - /* fall through */ + fallthrough; case 2: *cp++ = offset >> 8; - /* fall through */ + fallthrough; case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; @@ -178,10 +178,10 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; - /* fall through */ + fallthrough; case 2: *cp++ = offset >> 8; - /* fall through */ + fallthrough; case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; @@ -278,7 +278,7 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip) switch (val) { case 9: chip->flags |= EE_INSTR_BIT3_IS_ADDR; - /* fall through */ + fallthrough; case 8: chip->flags |= EE_ADDR1; break; diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c index 9cc6b2a6cf22..304d6c833712 100644 --- a/drivers/misc/mic/scif/scif_api.c +++ b/drivers/misc/mic/scif/scif_api.c @@ -178,7 +178,7 @@ int scif_close(scif_epd_t epd) case SCIFEP_ZOMBIE: dev_err(scif_info.mdev.this_device, "SCIFAPI close: zombie state unexpected\n"); - /* fall through */ + fallthrough; case SCIFEP_DISCONNECTED: spin_unlock(&ep->lock); scif_unregister_all_windows(epd); @@ -645,7 +645,7 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block) ep->port.port = err; ep->port.node = scif_info.nodeid; ep->conn_async_state = ASYNC_CONN_IDLE; - /* Fall through */ + fallthrough; case SCIFEP_BOUND: /* * If a non-blocking connect has been already initiated diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index de8f61efaef5..2da3b474f486 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -657,7 +657,7 @@ int scif_unregister_window(struct scif_window *window) window->unreg_state = OP_IN_PROGRESS; send_msg = true; } - /* fall through */ + fallthrough; case OP_IN_PROGRESS: { scif_get_window(window, 1); diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index f6e600bfac5d..0ea923fe6371 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c @@ -622,7 +622,7 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd, break; case CBSS_PAGE_OVERFLOW: STAT(mesq_noop_page_overflow); - /* fall through */ + fallthrough; default: BUG(); } @@ -780,7 +780,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd, break; case CBSS_PAGE_OVERFLOW: STAT(mesq_page_overflow); - /* fall through */ + fallthrough; default: BUG(); } diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index d5e097cd556d..8a495dc82f16 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -1173,7 +1173,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) if (!xpc_kdebug_ignore) break; - /* fall through */ + fallthrough; case DIE_MCA_MONARCH_ENTER: case DIE_INIT_MONARCH_ENTER: xpc_arch_ops.offline_heartbeat(); @@ -1184,7 +1184,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) if (!xpc_kdebug_ignore) break; - /* fall through */ + fallthrough; case DIE_MCA_MONARCH_LEAVE: case DIE_INIT_MONARCH_LEAVE: xpc_arch_ops.online_heartbeat(); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 21a04bc97d40..099a53bdbb7d 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -441,10 +441,10 @@ xpc_discovery(void) switch (region_size) { case 128: max_regions *= 2; - /* fall through */ + fallthrough; case 64: max_regions *= 2; - /* fall through */ + fallthrough; case 32: max_regions *= 2; region_size = 16; diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 98c60f11b76b..7791bde81a36 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -574,7 +574,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, xpc_wakeup_channel_mgr(part); } - /* fall through */ + fallthrough; case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: spin_lock_irqsave(&part_uv->flags_lock, irq_flags); part_uv->flags |= XPC_P_ENGAGED_UV; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index ce43f7573d80..c8fae6611b73 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -191,7 +191,7 @@ int mmc_of_parse(struct mmc_host *host) switch (bus_width) { case 8: host->caps |= MMC_CAP_8_BIT_DATA; - /* fall through - Hosts capable of 8-bit can also do 4 bits */ + fallthrough; /* Hosts capable of 8-bit can also do 4 bits */ case 4: host->caps |= MMC_CAP_4_BIT_DATA; break; diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 300901415aa2..3fc3bbea8536 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2418,7 +2418,7 @@ static void atmci_get_cap(struct atmel_mci *host) case 0x600: case 0x500: host->caps.has_odd_clk_div = 1; - /* Fall through */ + fallthrough; case 0x400: case 0x300: host->caps.has_dma_conf_reg = 1; @@ -2426,16 +2426,16 @@ static void atmci_get_cap(struct atmel_mci *host) host->caps.has_cfg_reg = 1; host->caps.has_cstor_reg = 1; host->caps.has_highspeed = 1; - /* Fall through */ + fallthrough; case 0x200: host->caps.has_rwproof = 1; host->caps.need_blksz_mul_4 = 0; host->caps.need_notbusy_for_read_ops = 1; - /* Fall through */ + fallthrough; case 0x100: host->caps.has_bad_data_ordering = 0; host->caps.need_reset_after_xfer = 0; - /* Fall through */ + fallthrough; case 0x0: break; default: diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index f01fecd75833..e50a08bce7ef 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -300,7 +300,7 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, * then it's harmless for us to allow it. */ cmd_reg |= MMCCMD_BSYEXP; - /* FALLTHROUGH */ + fallthrough; case MMC_RSP_R1: /* 48 bits, CRC */ cmd_reg |= MMCCMD_RSPFMT_R1456; break; diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 50977ff18074..db1a84b2ba61 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -238,7 +238,7 @@ static void dw_mci_hs_set_timing(struct dw_mci *host, int timing, if (smpl_phase >= USE_DLY_MIN_SMPL && smpl_phase <= USE_DLY_MAX_SMPL) use_smpl_dly = 1; - /* fallthrough */ + fallthrough; case MMC_TIMING_UHS_SDR50: if (smpl_phase >= ENABLE_SHIFT_MIN_SMPL && smpl_phase <= ENABLE_SHIFT_MAX_SMPL) diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 35ae5737c622..0fba940544ca 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2030,7 +2030,7 @@ static void dw_mci_tasklet_func(unsigned long priv) } prev_state = state = STATE_SENDING_DATA; - /* fall through */ + fallthrough; case STATE_SENDING_DATA: /* @@ -2088,7 +2088,7 @@ static void dw_mci_tasklet_func(unsigned long priv) } prev_state = state = STATE_DATA_BUSY; - /* fall through */ + fallthrough; case STATE_DATA_BUSY: if (!dw_mci_clear_pending_data_complete(host)) { @@ -2141,7 +2141,7 @@ static void dw_mci_tasklet_func(unsigned long priv) */ prev_state = state = STATE_SENDING_STOP; - /* fall through */ + fallthrough; case STATE_SENDING_STOP: if (!dw_mci_clear_pending_cmd_complete(host)) diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 447552ac25c4..81d71010b474 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -739,7 +739,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; jz_mmc_prepare_data_transfer(host); - /* fall through */ + fallthrough; case JZ4740_MMC_STATE_TRANSFER_DATA: if (host->use_dma) { @@ -774,7 +774,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; } jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE); - /* fall through */ + fallthrough; case JZ4740_MMC_STATE_SEND_STOP: if (!req->stop) diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 9b2cf7afc246..703d5834f9a5 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -294,7 +294,7 @@ static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: vdd = 0; - /* fall through */ + fallthrough; case MMC_POWER_UP: if (!IS_ERR(mmc->supply.vmmc)) { host->error = mmc_regulator_set_ocr(mmc, diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 15e21894bd44..904f5237d8f7 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -685,7 +685,7 @@ static int renesas_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) case HOST_MODE: if (host->pdata->flags & TMIO_MMC_HAVE_CBSY) bit = TMIO_STAT_CMD_BUSY; - /* fallthrough */ + fallthrough; case CTL_SD_CARD_CLK_CTL: return renesas_sdhi_wait_idle(host, bit); } diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index a76b4513fbec..d738907a622f 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1556,7 +1556,7 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, "failed to request card-detect gpio!\n"); return err; } - /* fall through */ + fallthrough; case ESDHC_CD_CONTROLLER: /* we have a working card_detect back */ diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 9194bb73e601..080ced1e63f0 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -609,7 +609,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev) switch (pdata->max_width) { case 8: host->mmc->caps |= MMC_CAP_8_BIT_DATA; - /* Fall through */ + fallthrough; case 4: host->mmc->caps |= MMC_CAP_4_BIT_DATA; break; diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index a910cb461ed7..bafa2e41c8b6 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -470,7 +470,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) break; default: - /* fall-through */ + fallthrough; case MMC_SIGNAL_VOLTAGE_330: ret = pinctrl_select_state(sprd_host->pinctrl, sprd_host->pins_default); diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index e6e9e286cc34..03ce57ef4585 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -527,7 +527,7 @@ static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host, ret = true; break; } - /* fall through */ + fallthrough; default: reg &= ~XENON_TIMING_ADJUST_SLOW_MODE; ret = false; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 3ad394b40eb1..592a55a34b58 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2825,7 +2825,7 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) case MMC_TIMING_UHS_SDR50: if (host->flags & SDHCI_SDR50_NEEDS_TUNING) break; - /* FALLTHROUGH */ + fallthrough; default: goto out; diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 5987656e0474..fd8b72d3e02c 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -335,7 +335,7 @@ static unsigned int tifm_sd_op_flags(struct mmc_command *cmd) break; case MMC_RSP_R1B: rc |= TIFM_MMCSD_RSP_BUSY; - /* fall-through */ + fallthrough; case MMC_RSP_R1: rc |= TIFM_MMCSD_RSP_R1; break; diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 369b8dee2e3d..7666c90054ae 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1343,7 +1343,7 @@ static int usdhi6_stop_cmd(struct usdhi6_host *host) host->wait = USDHI6_WAIT_FOR_STOP; return 0; } - /* fall through - Unsupported STOP command. */ + fallthrough; /* Unsupported STOP command */ default: dev_err(mmc_dev(host->mmc), "unsupported stop CMD%d for CMD%d\n", @@ -1691,7 +1691,7 @@ static void usdhi6_timeout_work(struct work_struct *work) switch (host->wait) { default: dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); - /* fall through - mrq can be NULL, but is impossible. */ + fallthrough; /* mrq can be NULL, but is impossible */ case USDHI6_WAIT_FOR_CMD: usdhi6_error_code(host); if (mrq) @@ -1713,7 +1713,7 @@ static void usdhi6_timeout_work(struct work_struct *work) host->offset, data->blocks, data->blksz, data->sg_len, sg_dma_len(sg), sg->offset); usdhi6_sg_unmap(host, true); - /* fall through - page unmapped in USDHI6_WAIT_FOR_DATA_END. */ + fallthrough; /* page unmapped in USDHI6_WAIT_FOR_DATA_END */ case USDHI6_WAIT_FOR_DATA_END: usdhi6_error_code(host); data->error = -ETIMEDOUT; diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c index 12466b06692c..22ed051eb1a4 100644 --- a/drivers/mux/adgs1408.c +++ b/drivers/mux/adgs1408.c @@ -93,7 +93,7 @@ static int adgs1408_probe(struct spi_device *spi) mux->idle_state = idle_state; break; } - /* fall through */ + fallthrough; default: dev_err(dev, "invalid idle-state %d\n", idle_state); return -EINVAL; diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 18428e104445..1c6c27f35ac4 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -301,7 +301,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) dev->irq = cops_irq(ioaddr, board); if (dev->irq) break; - /* fall through - Once no IRQ found on this port. */ + fallthrough; /* Once no IRQ found on this port */ case 1: retval = -EINVAL; goto err_out; diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 14a5fb378145..98df38fe553c 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -363,13 +363,13 @@ static int __init arcrimi_setup(char *s) switch (ints[0]) { default: /* ERROR */ pr_err("Too many arguments\n"); - /* Fall through */ + fallthrough; case 3: /* Node ID */ node = ints[3]; - /* Fall through */ + fallthrough; case 2: /* IRQ */ irq = ints[2]; - /* Fall through */ + fallthrough; case 1: /* IO address */ io = ints[1]; } diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index cd27fdc1059b..f983c4ce6b07 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -197,22 +197,22 @@ static int __init com20020isa_setup(char *s) switch (ints[0]) { default: /* ERROR */ pr_info("Too many arguments\n"); - /* Fall through */ + fallthrough; case 6: /* Timeout */ timeout = ints[6]; - /* Fall through */ + fallthrough; case 5: /* CKP value */ clockp = ints[5]; - /* Fall through */ + fallthrough; case 4: /* Backplane flag */ backplane = ints[4]; - /* Fall through */ + fallthrough; case 3: /* Node ID */ node = ints[3]; - /* Fall through */ + fallthrough; case 2: /* IRQ */ irq = ints[2]; - /* Fall through */ + fallthrough; case 1: /* IO address */ io = ints[1]; } diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 186bbf87bc84..cf214b730671 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -363,10 +363,10 @@ static int __init com90io_setup(char *s) switch (ints[0]) { default: /* ERROR */ pr_err("Too many arguments\n"); - /* Fall through */ + fallthrough; case 2: /* IRQ */ irq = ints[2]; - /* Fall through */ + fallthrough; case 1: /* IO address */ io = ints[1]; } diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index bd75d06ad7df..3dc3d533cb19 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -693,13 +693,13 @@ static int __init com90xx_setup(char *s) switch (ints[0]) { default: /* ERROR */ pr_err("Too many arguments\n"); - /* Fall through */ + fallthrough; case 3: /* Mem address */ shmem = ints[3]; - /* Fall through */ + fallthrough; case 2: /* IRQ */ irq = ints[2]; - /* Fall through */ + fallthrough; case 1: /* IO address */ io = ints[1]; } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index cddaa43a9d52..aa001b16765a 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1149,7 +1149,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) port->actor_oper_port_state &= ~LACP_STATE_EXPIRED; port->sm_rx_state = AD_RX_PORT_DISABLED; - /* Fall Through */ + fallthrough; case AD_RX_PORT_DISABLED: port->sm_vars &= ~AD_PORT_MATCHED; break; @@ -1588,7 +1588,7 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best, if (__agg_active_ports(curr) < __agg_active_ports(best)) return best; - /*FALLTHROUGH*/ + fallthrough; case BOND_AD_STABLE: case BOND_AD_BANDWIDTH: if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best)) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c5d3032dd1a2..42ef25ec0af5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2274,7 +2274,7 @@ static int bond_miimon_inspect(struct bonding *bond) "active " : "backup ") : "", bond->params.downdelay * bond->params.miimon); } - /*FALLTHRU*/ + fallthrough; case BOND_LINK_FAIL: if (link_state) { /* recovered before downdelay expired */ @@ -2310,7 +2310,7 @@ static int bond_miimon_inspect(struct bonding *bond) bond->params.updelay * bond->params.miimon); } - /*FALLTHRU*/ + fallthrough; case BOND_LINK_BACK: if (!link_state) { bond_propose_link_state(slave, BOND_LINK_DOWN); @@ -3322,7 +3322,7 @@ static int bond_slave_netdev_event(unsigned long event, if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_adapter_speed_duplex_changed(slave); - /* Fallthrough */ + fallthrough; case NETDEV_DOWN: /* Refresh slave-array if applicable! * If the setup does not use miimon or arpmon (mode-specific!), @@ -3760,7 +3760,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return -EINVAL; mii->phy_id = 0; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* We do this again just in case we were called by SIOCGMIIREG * instead of SIOCGMIIPHY. diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 9df2007b5e56..38e9f80ed1ef 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -898,7 +898,7 @@ static void at91_irq_err_state(struct net_device *dev, CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } - /* fall through */ + fallthrough; case CAN_STATE_ERROR_WARNING: /* * from: ERROR_ACTIVE, ERROR_WARNING @@ -948,7 +948,7 @@ static void at91_irq_err_state(struct net_device *dev, netdev_dbg(dev, "Error Active\n"); cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; - /* fall through */ + fallthrough; case CAN_STATE_ERROR_WARNING: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = AT91_IRQ_ERRP; diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 6ad83a881039..9469d4421afe 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -659,7 +659,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd) pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, PCIEFD_REG_CAN_CLK_SEL); - /* fall through */ + fallthrough; case CANFD_CLK_SEL_80MHZ: priv->ucan.can.clock.freq = 80 * 1000 * 1000; break; diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index d7222ba46622..d7c2ec529b8f 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -150,7 +150,7 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of) priv->read_reg = sp_read_reg16; priv->write_reg = sp_write_reg16; break; - case 1: /* fallthrough */ + case 1: default: priv->read_reg = sp_read_reg8; priv->write_reg = sp_write_reg8; diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 91cdc0a2b1a7..b4a39f0449ba 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -153,7 +153,7 @@ static void slc_bump(struct slcan *sl) switch (*cmd) { case 'r': cf.can_id = CAN_RTR_FLAG; - /* fallthrough */ + fallthrough; case 't': /* store dlc ASCII value and terminate SFF CAN ID string */ cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; @@ -163,7 +163,7 @@ static void slc_bump(struct slcan *sl) break; case 'R': cf.can_id = CAN_RTR_FLAG; - /* fallthrough */ + fallthrough; case 'T': cf.can_id |= CAN_EFF_FLAG; /* store dlc ASCII value and terminate EFF CAN ID string */ diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 5009ff294941..d17608870f2d 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -864,7 +864,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_warning++; - /* fall through */ + fallthrough; case CAN_STATE_ERROR_WARNING: if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index d2539c95adb6..66d0198e7834 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -415,7 +415,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, new_state = CAN_STATE_ERROR_WARNING; break; } - /* fall through */ + fallthrough; case CAN_STATE_ERROR_WARNING: if (n & PCAN_USB_ERROR_BUS_HEAVY) { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 0b7766b715fd..d91df34e7fa8 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -345,7 +345,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, default: netdev_warn(netdev, "tx urb submitting failed err=%d\n", err); - /* fall through */ + fallthrough; case -ENOENT: /* cable unplugged */ stats->tx_dropped++; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 53cb2f72bdd0..1689ab387612 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -133,10 +133,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...) switch (id) { case PCAN_USBPRO_TXMSG8: i += 4; - /* fall through */ + fallthrough; case PCAN_USBPRO_TXMSG4: i += 4; - /* fall through */ + fallthrough; case PCAN_USBPRO_TXMSG0: *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0837ae0e0c5e..e731db900ee0 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1061,7 +1061,7 @@ static void b53_force_port_config(struct b53_device *dev, int port, switch (speed) { case 2000: reg |= PORT_OVERRIDE_SPEED_2000M; - /* fallthrough */ + fallthrough; case SPEED_1000: reg |= PORT_OVERRIDE_SPEED_1000M; break; diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c index 629bf14128a2..5ae3d9783b68 100644 --- a/drivers/net/dsa/b53/b53_serdes.c +++ b/drivers/net/dsa/b53/b53_serdes.c @@ -170,7 +170,7 @@ void b53_serdes_phylink_validate(struct b53_device *dev, int port, switch (lane) { case 0: phylink_set(supported, 2500baseX_Full); - /* fallthrough */ + fallthrough; case 1: phylink_set(supported, 1000baseX_Full); break; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index bafddb35f3a9..5ebff986a1ac 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -566,7 +566,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, switch (state->interface) { case PHY_INTERFACE_MODE_RGMII: id_mode_dis = 1; - /* fallthrough */ + fallthrough; case PHY_INTERFACE_MODE_RGMII_TXID: port_mode = EXT_GPHY; break; diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index dc999406ce86..3cb22d149813 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1083,7 +1083,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port) interface = PHY_INTERFACE_MODE_GMII; if (gbit) break; - /* fall through */ + fallthrough; case 0: interface = PHY_INTERFACE_MODE_MII; break; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 8dcb8a49ab67..0c37ddb58c1e 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -566,7 +566,7 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) case P5_INTF_SEL_PHY_P0: /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */ val |= MHWTRAP_PHY0_SEL; - /* fall through */ + fallthrough; case P5_INTF_SEL_PHY_P4: /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */ val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7a71c9902e73..f0dbc05e30a4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -875,7 +875,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, break; case STATS_TYPE_BANK1: reg = bank1_select; - /* fall through */ + fallthrough; case STATS_TYPE_BANK0: reg |= s->reg | histogram; mv88e6xxx_g1_stats_read(chip, reg, &low); diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 139d0120f511..667f38c9e4c6 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -1259,14 +1259,14 @@ el3_up(struct net_device *dev) pr_cont("Forcing 3c5x9b full-duplex mode"); break; } - /* fall through */ + fallthrough; case 8: /* set full-duplex mode based on eeprom config setting */ if ((sw_info & 0x000f) && (sw_info & 0x8000)) { pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)"); break; } - /* fall through */ + fallthrough; default: /* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */ pr_cont("Setting 3c5x9/3c5x9B half-duplex mode"); diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index bd0ada4e81b0..f66e7fb9a2bb 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -1046,7 +1046,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get the address of the PHY in use. */ data->phy_id = phy; - /* fall through */ + fallthrough; case SIOCGMIIREG: /* Read the specified MII register. */ { int saved_window; diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 08db4c9da2fa..a00b36f91d9f 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -610,7 +610,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; - /* Fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 645efac6310d..164c3ed550bf 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -1108,7 +1108,7 @@ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; - /* fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 6234fcd844ee..696517eae77f 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1712,13 +1712,13 @@ static bool slic_is_fiber(unsigned short subdev) { switch (subdev) { /* Mojave */ - case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: /* fallthrough */ - case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: /* fallthrough */ + case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: + case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: fallthrough; /* Oasis */ - case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: /* fallthrough */ - case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: /* fallthrough */ - case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: /* fallthrough */ - case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: /* fallthrough */ + case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: + case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: + case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: + case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: return true; } return false; diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index ac86fcae1582..8470c836fa18 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -547,7 +547,7 @@ static int acenic_probe_one(struct pci_dev *pdev, ap->name); break; } - /* Fall through */ + fallthrough; case PCI_VENDOR_ID_SGI: printk(KERN_INFO "%s: SGI AceNIC ", ap->name); break; diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index b6c43b58ed3d..960d483e8997 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1475,7 +1475,7 @@ static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = lp->ext_phy_addr; - /* fallthru */ + fallthrough; case SIOCGMIIREG: spin_lock_irq(&lp->lock); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 43294a148f8a..4ba75551cb17 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1538,7 +1538,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, /* PTP v2, UDP, any kind of event packet */ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); - /* Fall through - to PTP v1, UDP, any kind of event packet */ + fallthrough; /* to PTP v1, UDP, any kind of event packet */ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); @@ -1549,7 +1549,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, /* PTP v2, UDP, Sync packet */ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); - /* Fall through - to PTP v1, UDP, Sync packet */ + fallthrough; /* to PTP v1, UDP, Sync packet */ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); @@ -1560,7 +1560,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, /* PTP v2, UDP, Delay_req packet */ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); - /* Fall through - to PTP v1, UDP, Delay_req packet */ + fallthrough; /* to PTP v1, UDP, Delay_req packet */ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 34d18302b1a3..a5fd161ab5ee 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -217,7 +217,7 @@ static int bgmac_probe(struct bcma_device *core) /* BCM 471X/535X family */ case BCMA_CHIP_ID_BCM4716: bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; - /* fallthrough */ + fallthrough; case BCMA_CHIP_ID_BCM47162: bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2; bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 6795b6d95f54..f37f1c58f368 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -131,7 +131,7 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev) switch (bgmac->net_dev->phydev->speed) { default: netdev_err(net_dev, "Unsupported speed. Defaulting to 1000Mb\n"); - /* fall through */ + fallthrough; case SPEED_1000: val |= NICPM_IOMUX_CTRL_SPD_1000M << NICPM_IOMUX_CTRL_SPD_SHIFT; break; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index c8cc14eadbb4..3e8a179f39db 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1337,13 +1337,13 @@ bnx2_set_mac_link(struct bnx2 *bp) val |= BNX2_EMAC_MODE_PORT_MII_10M; break; } - /* fall through */ + fallthrough; case SPEED_100: val |= BNX2_EMAC_MODE_PORT_MII; break; case SPEED_2500: val |= BNX2_EMAC_MODE_25G_MODE; - /* fall through */ + fallthrough; case SPEED_1000: val |= BNX2_EMAC_MODE_PORT_GMII; break; @@ -1995,26 +1995,26 @@ bnx2_remote_phy_event(struct bnx2 *bp) switch (speed) { case BNX2_LINK_STATUS_10HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_10FULL: bp->line_speed = SPEED_10; break; case BNX2_LINK_STATUS_100HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_100BASE_T4: case BNX2_LINK_STATUS_100FULL: bp->line_speed = SPEED_100; break; case BNX2_LINK_STATUS_1000HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_1000FULL: bp->line_speed = SPEED_1000; break; case BNX2_LINK_STATUS_2500HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_2500FULL: bp->line_speed = SPEED_2500; break; @@ -7856,7 +7856,7 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = bp->phy_addr; - /* fallthru */ + fallthrough; case SIOCGMIIREG: { u32 mii_regval; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 1426c691c7c4..4e85e7dbc2be 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -4712,14 +4712,14 @@ static void bnx2x_sync_link(struct link_params *params, LINK_STATUS_SPEED_AND_DUPLEX_MASK) { case LINK_10THD: vars->duplex = DUPLEX_HALF; - /* Fall thru */ + fallthrough; case LINK_10TFD: vars->line_speed = SPEED_10; break; case LINK_100TXHD: vars->duplex = DUPLEX_HALF; - /* Fall thru */ + fallthrough; case LINK_100T4: case LINK_100TXFD: vars->line_speed = SPEED_100; @@ -4727,14 +4727,14 @@ static void bnx2x_sync_link(struct link_params *params, case LINK_1000THD: vars->duplex = DUPLEX_HALF; - /* Fall thru */ + fallthrough; case LINK_1000TFD: vars->line_speed = SPEED_1000; break; case LINK_2500THD: vars->duplex = DUPLEX_HALF; - /* Fall thru */ + fallthrough; case LINK_2500TFD: vars->line_speed = SPEED_2500; break; @@ -6339,7 +6339,7 @@ int bnx2x_set_led(struct link_params *params, */ if (!vars->link_up) break; - /* fall through */ + fallthrough; case LED_MODE_ON: if (((params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || @@ -12508,13 +12508,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params, switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_10M_HALF: phy->req_duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case PORT_FEATURE_LINK_SPEED_10M_FULL: phy->req_line_speed = SPEED_10; break; case PORT_FEATURE_LINK_SPEED_100M_HALF: phy->req_duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case PORT_FEATURE_LINK_SPEED_100M_FULL: phy->req_line_speed = SPEED_100; break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7f24d2689fdd..3c543dd7a8f3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8600,11 +8600,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp) bp->num_queues, 1 + bp->num_cnic_queues); - /* fall through */ + fallthrough; case BNX2X_INT_MODE_MSI: bnx2x_enable_msi(bp); - /* fall through */ + fallthrough; case BNX2X_INT_MODE_INTX: bp->num_ethernet_queues = 1; bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 80d250a6d048..e26f4da5a6d7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* fall through */ + fallthrough; /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: @@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* fall through */ + fallthrough; /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index b4476f44e386..9c2f51f23035 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1809,7 +1809,7 @@ get_vf: DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", vf->abs_vfid, qidx); bnx2x_vf_handle_rss_update_eqe(bp, vf); - /* fall through */ + fallthrough; case EVENT_RING_OPCODE_VF_FLR: /* Do nothing for now */ return 0; @@ -2207,7 +2207,7 @@ int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) rc = bnx2x_vf_close(bp, vf); if (rc) goto op_err; - /* Fall through - to release resources */ + fallthrough; /* to release resources */ case VF_ACQUIRED: DP(BNX2X_MSG_IOV, "about to free resources\n"); bnx2x_vf_free_resc(bp, vf); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 31fb5a28e1c4..f92fd8863f48 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1923,7 +1923,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) break; case BNXT_FW_HEALTH_REG_TYPE_GRC: reg_off = fw_health->mapped_regs[reg_idx]; - /* fall through */ + fallthrough; case BNXT_FW_HEALTH_REG_TYPE_BAR0: val = readl(bp->bar0 + reg_off); break; @@ -1966,11 +1966,11 @@ static int bnxt_async_event_process(struct bnxt *bp, } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); } - /* fall through */ + fallthrough; case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); - /* fall through */ + fallthrough; case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); break; @@ -9765,7 +9765,7 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: mdio->phy_id = bp->link_info.phy_addr; - /* fallthru */ + fallthrough; case SIOCGMIIREG: { u16 mii_regval = 0; @@ -11022,7 +11022,7 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) writel(reg_off & BNXT_GRC_BASE_MASK, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; - /* fall through */ + fallthrough; case BNXT_FW_HEALTH_REG_TYPE_BAR0: writel(val, bp->bar0 + reg_off); break; @@ -11135,7 +11135,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; } - /* fall through */ + fallthrough; case BNXT_FW_RESET_STATE_RESET_FW: bnxt_reset_all(bp); bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; @@ -11158,7 +11158,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) } pci_set_master(bp->pdev); bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; - /* fall through */ + fallthrough; case BNXT_FW_RESET_STATE_POLL_FW: bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; rc = __bnxt_hwrm_ver_get(bp, true); @@ -11173,7 +11173,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; - /* fall through */ + fallthrough; case BNXT_FW_RESET_STATE_OPENING: while (!rtnl_trylock()) { bnxt_queue_fw_reset_work(bp, HZ / 10); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 64da654f1038..17934cd87a4a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1073,7 +1073,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) cmd->data |= RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -1092,7 +1092,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) cmd->data |= RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 2704a4709bc7..fcc262064766 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -201,10 +201,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, break; default: bpf_warn_invalid_xdp_action(act); - /* Fall thru */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(bp->dev, xdp_prog, act); - /* Fall thru */ + fallthrough; case XDP_DROP: bnxt_reuse_rx_data(rxr, cons, page); break; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index c5cca63b8571..84536292b031 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -3311,7 +3311,7 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) } case CNIC_CTL_FCOE_STATS_GET_CMD: ulp_type = CNIC_ULP_FCOE; - /* fall through */ + fallthrough; case CNIC_CTL_ISCSI_STATS_GET_CMD: cnic_hold(dev); cnic_copy_ulp_stats(dev, ulp_type); @@ -4044,7 +4044,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) l4kcqe->status, l5kcqe->completion_status); opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; } - /* Fall through */ + fallthrough; case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 1fecc25767bd..0ca8436d2e9d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1185,10 +1185,10 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) continue; case BCMGENET_STAT_RUNT: offset += BCMGENET_STAT_OFFSET; - /* fall through */ + fallthrough; case BCMGENET_STAT_MIB_TX: offset += BCMGENET_STAT_OFFSET; - /* fall through */ + fallthrough; case BCMGENET_STAT_MIB_RX: val = bcmgenet_umac_readl(priv, UMAC_MIB_START + j + offset); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 511d553a4d11..6fb6c3556285 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -192,7 +192,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) switch (priv->phy_interface) { case PHY_INTERFACE_MODE_INTERNAL: phy_name = "internal PHY"; - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_MOCA: /* Irrespective of the actually configured PHY speed (100 or * 1000) GENETv4 only has an internal GPHY so we will just end diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ebff1fc0d8ce..9894594d957d 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -715,7 +715,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0; - /* fall through */ + fallthrough; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -776,7 +776,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return; - /* fall through */ + fallthrough; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -1586,7 +1586,7 @@ static int tg3_mdio_init(struct tg3 *tp) phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; - /* fall through */ + fallthrough; case PHY_ID_RTL8211C: phydev->interface = PHY_INTERFACE_MODE_RGMII; break; @@ -2114,7 +2114,7 @@ static int tg3_phy_init(struct tg3 *tp) phy_support_asym_pause(phydev); break; } - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_MII: phy_set_max_speed(phydev, SPEED_100); phy_support_asym_pause(phydev); @@ -4390,7 +4390,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) MII_TG3_DSP_TAP26_RMRXSTO | MII_TG3_DSP_TAP26_OPCSINPT; tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); - /* Fall through */ + fallthrough; case ASIC_REV_5720: case ASIC_REV_5762: if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) @@ -4538,7 +4538,7 @@ static int tg3_phy_pull_config(struct tg3 *tp) tp->link_config.speed = SPEED_1000; break; } - /* Fall through */ + fallthrough; default: goto done; } @@ -5209,7 +5209,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) ap->state = ANEG_STATE_AN_ENABLE; - /* fall through */ + fallthrough; case ANEG_STATE_AN_ENABLE: ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); if (ap->flags & MR_AN_ENABLE) { @@ -5239,7 +5239,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ret = ANEG_TIMER_ENAB; ap->state = ANEG_STATE_RESTART; - /* fall through */ + fallthrough; case ANEG_STATE_RESTART: delta = ap->cur_time - ap->link_time; if (delta > ANEG_STATE_SETTLE_TIME) @@ -5282,7 +5282,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ap->state = ANEG_STATE_ACK_DETECT; - /* fall through */ + fallthrough; case ANEG_STATE_ACK_DETECT: if (ap->ack_match != 0) { if ((ap->rxconfig & ~ANEG_CFG_ACK) == @@ -10720,40 +10720,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) switch (limit) { case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); - /* fall through */ + fallthrough; case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); - /* fall through */ + fallthrough; case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); - /* fall through */ + fallthrough; case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); - /* fall through */ + fallthrough; case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); - /* fall through */ + fallthrough; case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); - /* fall through */ + fallthrough; case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); - /* fall through */ + fallthrough; case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); - /* fall through */ + fallthrough; case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); - /* fall through */ + fallthrough; case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); - /* fall through */ + fallthrough; case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); - /* fall through */ + fallthrough; case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); - /* fall through */ + fallthrough; case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ case 3: @@ -13998,7 +13998,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = tp->phy_addr; - /* fall through */ + fallthrough; case SIOCGMIIREG: { u32 mii_regval; @@ -17136,7 +17136,7 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; break; } - /* fallthrough */ + fallthrough; case 128: default: val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; @@ -17151,28 +17151,28 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) DMA_RWCTRL_WRITE_BNDRY_16); break; } - /* fallthrough */ + fallthrough; case 32: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_32 | DMA_RWCTRL_WRITE_BNDRY_32); break; } - /* fallthrough */ + fallthrough; case 64: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_64 | DMA_RWCTRL_WRITE_BNDRY_64); break; } - /* fallthrough */ + fallthrough; case 128: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_128 | DMA_RWCTRL_WRITE_BNDRY_128); break; } - /* fallthrough */ + fallthrough; case 256: val |= (DMA_RWCTRL_READ_BNDRY_256 | DMA_RWCTRL_WRITE_BNDRY_256); diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 49358d42a0e2..b9dd06b12945 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -321,7 +321,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) case IOC_E_PFFAILED: case IOC_E_HWERROR: del_timer(&ioc->ioc_timer); - /* fall through */ + fallthrough; case IOC_E_TIMEOUT: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); @@ -780,7 +780,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) case IOCPF_E_INITFAIL: del_timer(&ioc->iocpf_timer); - /* fall through */ + fallthrough; case IOCPF_E_TIMEOUT: bfa_nw_ioc_hw_sem_release(ioc); @@ -849,7 +849,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) case IOCPF_E_FAIL: del_timer(&ioc->iocpf_timer); - /* fall through*/ + fallthrough; case IOCPF_E_TIMEOUT: bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 40107a9bd120..a2c983f56b00 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1084,7 +1084,7 @@ bna_enet_sm_cfg_wait(struct bna_enet *enet, case ENET_E_CHLD_STOPPED: bna_enet_rx_start(enet); - /* Fall through */ + fallthrough; case ENET_E_FWRESP_PAUSE: if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index b5ecbfe13ab0..2623a0da4682 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -1636,7 +1636,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx) &q1->qpt); cfg_req->q_cfg[i].qs.rx_buffer_size = htons((u16)q1->buffer_size); - /* Fall through */ + fallthrough; case BNA_RXP_SINGLE: /* Large/Single RxQ */ diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 31ebf3ee7ec0..283918aeb741 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -460,7 +460,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd) case HWTSTAMP_TX_ONESTEP_SYNC: if (gem_ptp_set_one_step_sync(bp, 1) != 0) return -ERANGE; - /* fall through */ + fallthrough; case HWTSTAMP_TX_ON: tx_bd_control = TSTAMP_ALL_FRAMES; break; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e73bc211779a..8e0ed01e7f03 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -977,15 +977,14 @@ static void octeon_destroy_resources(struct octeon_device *oct) schedule_timeout_uninterruptible(HZ / 10); - /* fallthrough */ + fallthrough; case OCT_DEV_HOST_OK: - /* fallthrough */ case OCT_DEV_CONSOLE_INIT_DONE: /* Remove any consoles */ octeon_remove_consoles(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_IO_QUEUES_DONE: if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); @@ -1027,7 +1026,7 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_free_sc_done_list(oct); octeon_free_sc_zombie_list(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); @@ -1062,17 +1061,17 @@ static void octeon_destroy_resources(struct octeon_device *oct) kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; - /* fallthrough */ + fallthrough; case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: if (OCTEON_CN23XX_PF(oct)) octeon_free_ioq_vector(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_MBOX_SETUP_DONE: if (OCTEON_CN23XX_PF(oct)) oct->fn_list.free_mbox(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE: /* Wait for any pending operations */ @@ -1095,11 +1094,11 @@ static void octeon_destroy_resources(struct octeon_device *oct) } } - /* fallthrough */ + fallthrough; case OCT_DEV_RESP_LIST_INIT_DONE: octeon_delete_response_list(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_INSTR_QUEUE_INIT_DONE: for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) @@ -1110,16 +1109,16 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (oct->sriov_info.sriov_enabled) pci_disable_sriov(oct->pci_dev); #endif - /* fallthrough */ + fallthrough; case OCT_DEV_SC_BUFF_POOL_INIT_DONE: octeon_free_sc_buffer_pool(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_DISPATCH_INIT_DONE: octeon_delete_dispatch_list(oct); cancel_delayed_work_sync(&oct->nic_poll_work.work); - /* fallthrough */ + fallthrough; case OCT_DEV_PCI_MAP_DONE: refcount = octeon_deregister_device(oct); @@ -1137,13 +1136,13 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); - /* fallthrough */ + fallthrough; case OCT_DEV_PCI_ENABLE_DONE: pci_clear_master(oct->pci_dev); /* Disable the device, releasing the PCI INT */ pci_disable_device(oct->pci_dev); - /* fallthrough */ + fallthrough; case OCT_DEV_BEGIN_STATE: /* Nothing to be done here either */ break; @@ -2168,7 +2167,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: if (lio->oct_dev->ptp_enable) return hwtstamp_ioctl(netdev, ifr); - /* fall through */ + fallthrough; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 90ef21086f27..8c5879e31240 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -460,9 +460,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) schedule_timeout_uninterruptible(HZ / 10); - /* fallthrough */ + fallthrough; case OCT_DEV_HOST_OK: - /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); @@ -504,7 +503,7 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_free_sc_done_list(oct); octeon_free_sc_zombie_list(oct); - /* fall through */ + fallthrough; case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); @@ -533,15 +532,15 @@ static void octeon_destroy_resources(struct octeon_device *oct) else cn23xx_vf_ask_pf_to_do_flr(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: octeon_free_ioq_vector(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_MBOX_SETUP_DONE: oct->fn_list.free_mbox(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE: mdelay(100); @@ -551,11 +550,11 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_delete_droq(oct, i); } - /* fallthrough */ + fallthrough; case OCT_DEV_RESP_LIST_INIT_DONE: octeon_delete_response_list(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_INSTR_QUEUE_INIT_DONE: for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) @@ -563,27 +562,27 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_delete_instr_queue(oct, i); } - /* fallthrough */ + fallthrough; case OCT_DEV_SC_BUFF_POOL_INIT_DONE: octeon_free_sc_buffer_pool(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_DISPATCH_INIT_DONE: octeon_delete_dispatch_list(oct); cancel_delayed_work_sync(&oct->nic_poll_work.work); - /* fallthrough */ + fallthrough; case OCT_DEV_PCI_MAP_DONE: octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); - /* fallthrough */ + fallthrough; case OCT_DEV_PCI_ENABLE_DONE: pci_clear_master(oct->pci_dev); /* Disable the device, releasing the PCI INT */ pci_disable_device(oct->pci_dev); - /* fallthrough */ + fallthrough; case OCT_DEV_BEGIN_STATE: /* Nothing to be done here either */ break; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 83dabcffc789..c7bdac79299a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -522,7 +522,7 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic, case SCTP_V4_FLOW: case SCTP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case IPV4_FLOW: case IPV6_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index c1378b5c780c..063e560d9c1b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -594,10 +594,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, return true; default: bpf_warn_invalid_xdp_action(action); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(nic->netdev, prog, action); - /* fall through */ + fallthrough; case XDP_DROP: /* Check if it's a recycled page, if not * unmap the DMA mapping. diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 42c6e9379882..387c357e1b8e 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2543,7 +2543,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) !(data->phy_id & 0xe0e0)) data->phy_id = mdio_phy_id_c45(data->phy_id >> 8, data->phy_id & 0x1f); - /* FALLTHRU */ + fallthrough; case SIOCGMIIPHY: return mdio_mii_ioctl(&pi->phy.mdio, data, cmd); case SIOCCHIOCTL: diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index b3e4118a15e7..9749d1239f58 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -136,7 +136,7 @@ again: if (e->state == L2T_STATE_STALE) e->state = L2T_STATE_VALID; spin_unlock_bh(&e->lock); - /* fall through */ + fallthrough; case L2T_STATE_VALID: /* fast-path, send the packet on */ return cxgb3_ofld_send(dev, skb); case L2T_STATE_RESOLVING: diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index c4864125fe02..a10a6862a9a4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -231,7 +231,7 @@ again: if (e->state == L2T_STATE_STALE) e->state = L2T_STATE_VALID; spin_unlock_bh(&e->lock); - /* fall through */ + fallthrough; case L2T_STATE_VALID: /* fast-path, send the packet on */ return t4_ofld_send(adap, skb); case L2T_STATE_RESOLVING: diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 8a56491bb034..fa3367966f4b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7656,13 +7656,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); - /* Fall through */ + fallthrough; case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); - /* Fall through */ + fallthrough; case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); - /* Fall through */ + fallthrough; case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index dbe8ee7e0e21..e2fe78e2e242 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -517,7 +517,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, } cpl = (void *)p; } - /* Fall through */ + fallthrough; case CPL_SGE_EGR_UPDATE: { /* diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 6bc7e7ba38c3..552d89fdf54a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -272,7 +272,7 @@ static netdev_features_t enic_features_check(struct sk_buff *skb, case ntohs(ETH_P_IPV6): if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) goto out; - /* Fall through */ + fallthrough; case ntohs(ETH_P_IP): break; default: diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 7f7705138262..5c6c8c5ec747 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -385,7 +385,7 @@ static void dm9000_set_io(struct board_info *db, int byte_width) case 3: dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); - /* fall through */ + fallthrough; case 2: db->dumpblk = dm9000_dumpblk_16bit; db->outblk = dm9000_outblk_16bit; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 0ccd9994ad45..f9dd1aa9f2da 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -3203,7 +3203,7 @@ srom_map_media(struct net_device *dev) case SROM_10BASETF: if (!lp->params.fdx) return -1; lp->fdx = true; - /* fall through */ + fallthrough; case SROM_10BASET: if (lp->params.fdx && !lp->fdx) return -1; @@ -3225,7 +3225,7 @@ srom_map_media(struct net_device *dev) case SROM_100BASETF: if (!lp->params.fdx) return -1; lp->fdx = true; - /* fall through */ + fallthrough; case SROM_100BASET: if (lp->params.fdx && !lp->fdx) return -1; @@ -3239,7 +3239,7 @@ srom_map_media(struct net_device *dev) case SROM_100BASEFF: if (!lp->params.fdx) return -1; lp->fdx = true; - /* fall through */ + fallthrough; case SROM_100BASEF: if (lp->params.fdx && !lp->fdx) return -1; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 9db23527275a..3a8659c5da06 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -911,7 +911,7 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) data->phy_id = 1; else return -ENODEV; - /* Fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 5dcc66f60144..5a43be327f58 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1443,7 +1443,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ spin_lock_irq(&np->lock); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index d6ed1d943762..99cc1c46fb30 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -571,7 +571,7 @@ static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds) break; } } - /* fall through */ + fallthrough; case PHY_TYPE_SFP_PLUS_10GB: case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 43570f4911ea..fdff3b4723ba 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -945,7 +945,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, break; case FQ_TYPE_TX_CONF_MQ: priv->conf_fqs[conf_cnt++] = &fq->fq_base; - /* fall through */ + fallthrough; case FQ_TYPE_TX_CONFIRM: dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); break; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 9db2a02fb531..1268996b7030 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -375,7 +375,7 @@ static int dpaa_get_hash_opts(struct net_device *dev, case UDP_V6_FLOW: if (priv->keygen_in_use) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case IPV4_FLOW: case IPV6_FLOW: case SCTP_V4_FLOW: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 457106e761be..cf5383bb8331 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -376,10 +376,10 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, break; default: bpf_warn_invalid_xdp_action(xdp_act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); - /* fall through */ + fallthrough; case XDP_DROP: xdp_release_buf(priv, ch, addr); ch->stats.xdp_drop++; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 645764abdaae..bb9887f98841 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -528,7 +528,7 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac, case 100: tmp_reg16 |= IF_MODE_SGMII_SPEED_100M; break; - case 1000: /* fallthrough */ + case 1000: default: tmp_reg16 |= IF_MODE_SGMII_SPEED_1G; break; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index c27df153f895..624b2eb6f01d 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1344,10 +1344,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) switch (port->port_type) { case FMAN_PORT_TYPE_RX: set_rx_dflt_cfg(port, params); - /* fall through */ + fallthrough; case FMAN_PORT_TYPE_TX: set_tx_dflt_cfg(port, params, &port->dts_params); - /* fall through */ + fallthrough; default: set_dflt_cfg(port, params); } diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index db791f60b884..714b501be7d0 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1348,7 +1348,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) switch (ugeth->max_speed) { case SPEED_10: upsmr |= UCC_GETH_UPSMR_R10M; - /* FALLTHROUGH */ + fallthrough; case SPEED_100: if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) upsmr |= UCC_GETH_UPSMR_RMM; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 49624acf2473..4eb50296f653 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -305,7 +305,7 @@ static int __lb_setup(struct net_device *ndev, break; case MAC_LOOP_PHY_NONE: ret = hns_nic_config_phy_loopback(phy_dev, 0x0); - /* fall through */ + fallthrough; case MAC_LOOP_NONE: if (!ret && h->dev->ops->set_loopback) { if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 87776ce3539b..c2ea0348b2f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2746,7 +2746,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; - /* fall through */ + fallthrough; case HNS3_OL4_TYPE_NO_TUN: l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 36575e72a915..d553ed7ee64c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3061,7 +3061,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) * by first decoding the types of errors. */ set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); - /* fall through */ + fallthrough; case HCLGE_VECTOR0_EVENT_RST: hclge_reset_task_schedule(hdev); break; @@ -3686,12 +3686,10 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev) switch (hdev->reset_type) { case HNAE3_FUNC_RESET: - /* fall through */ case HNAE3_FLR_RESET: ret = hclge_set_all_vf_rst(hdev, false); break; case HNAE3_GLOBAL_RESET: - /* fall through */ case HNAE3_IMP_RESET: ret = hclge_set_rst_done(hdev); break; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 0273fb7a9d01..3153d62cc73e 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3247,7 +3247,7 @@ static int ehea_mem_notifier(struct notifier_block *nb, switch (action) { case MEM_CANCEL_OFFLINE: pr_info("memory offlining canceled"); - /* Fall through - re-add canceled memory block */ + fallthrough; /* re-add canceled memory block */ case MEM_ONLINE: pr_info("memory is going online"); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 06248a7db7f2..c00b9097eeea 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2319,7 +2319,7 @@ static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: data->phy_id = dev->phy.address; - /* Fall through */ + fallthrough; case SIOCGMIIREG: data->val_out = emac_mdio_read(ndev, dev->phy.address, data->reg_num); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 63dde3bcf5bc..664e8ccc88d2 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4079,7 +4079,6 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: - fallthrough; case e1000_pch_tgp: case e1000_pch_adp: fc->refresh_time = 0xFFFF; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 4f05f6efe6af..d9c3a6b169f9 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -718,7 +718,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) case e1000_i354: case e1000_i210: case e1000_i211: - fallthrough; default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 832bbb8b05c8..dfcb1767acbb 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2205,10 +2205,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(pp->dev, prog, act); - /* fall through */ + fallthrough; case XDP_DROP: mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); ret = MVNETA_XDP_DROPPED; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index d4a4e241333d..41d935d1aaf6 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1638,7 +1638,7 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; if (info->data & RXH_L4_B_2_3) hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; - /* Fallthrough */ + fallthrough; case MVPP22_FLOW_IP4: case MVPP22_FLOW_IP6: if (info->data & RXH_L2DA) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2a8a5842eaef..6e140d1b8967 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5437,7 +5437,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config, } if (state->interface != PHY_INTERFACE_MODE_NA) break; - /* Fall-through */ + fallthrough; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: @@ -5451,7 +5451,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config, phylink_set(mask, 1000baseX_Full); if (state->interface != PHY_INTERFACE_MODE_NA) break; - /* Fall-through */ + fallthrough; case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX: if (port->comphy || diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 36953d4f51c7..01a793105599 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -737,7 +737,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, sizeof(struct nix_rx_mce_s)); - /* Fall through */ + fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index b792f6306a64..6a930351cb23 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2448,7 +2448,7 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = hw->phy_addr; - /* fallthru */ + fallthrough; case SIOCGMIIREG: { u16 val = 0; spin_lock_bh(&hw->phy_lock); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index cec8124301c7..344864275ed5 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1376,7 +1376,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = PHY_ADDR_MARV; - /* fallthru */ + fallthrough; case SIOCGMIIREG: { u16 val = 0; @@ -2764,7 +2764,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) case OP_RXCHKSVLAN: sky2_rx_tag(sky2, length); - /* fall through */ + fallthrough; case OP_RXCHKS: if (likely(dev->features & NETIF_F_RXCSUM)) sky2_rx_checksum(sky2, status); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 0870fe78ea38..6d2d60675ffd 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -228,7 +228,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, if (!MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) goto err_phy; - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_ID: @@ -501,11 +501,11 @@ static void mtk_validate(struct phylink_config *config, case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: phylink_set(mask, 1000baseT_Half); - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_SGMII: phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_RMII: case PHY_INTERFACE_MODE_REVMII: diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c index 7a04c626a2aa..bcd166911d44 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c @@ -72,7 +72,7 @@ static int mlxfw_fsm_state_err(struct mlxfw_dev *mlxfw_dev, case MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET: MLXFW_ERR_MSG(mlxfw_dev, extack, "pending reset", err); break; - case MLXFW_FSM_STATE_ERR_OK: /* fall through */ + case MLXFW_FSM_STATE_ERR_OK: case MLXFW_FSM_STATE_ERR_MAX: MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown error", err); break; @@ -155,7 +155,7 @@ mlxfw_fsm_reactivate_err(struct mlxfw_dev *mlxfw_dev, case MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED: MLXFW_REACT_ERR("fw already activated", err); break; - case MLXFW_FSM_REACTIVATE_STATUS_OK: /* fall through */ + case MLXFW_FSM_REACTIVATE_STATUS_OK: case MLXFW_FSM_REACTIVATE_STATUS_MAX: MLXFW_REACT_ERR("unexpected error", err); break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 08d101138fbe..ec45a03140d7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2289,21 +2289,21 @@ int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module) /* Here we need to get the module width according to the module type. */ switch (module_type) { - case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: /* fall through */ + case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: + case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: case MLXSW_REG_PMTM_MODULE_TYPE_OSFP: return 8; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */ + case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: + case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: case MLXSW_REG_PMTM_MODULE_TYPE_QSFP: return 4; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: /* fall through */ + case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: + case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: + case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: case MLXSW_REG_PMTM_MODULE_TYPE_DSFP: return 2; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: /* fall through */ + case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: + case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: case MLXSW_REG_PMTM_MODULE_TYPE_SFP: return 1; default: diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 44fa02cbb683..056eeb85be60 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -30,8 +30,8 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: *qsfp = false; break; - case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: /* fall-through */ - case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: *qsfp = true; break; @@ -205,7 +205,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; break; - case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: if (module_id == MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 || module_rev_id >= diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 3fe878d7c94c..61719ec89808 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -259,8 +259,8 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, */ fault = 1; break; - case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ - case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_NO_CONN: + case MLXSW_REG_MTBR_NO_TEMP_SENS: case MLXSW_REG_MTBR_INDEX_NA: default: fault = 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index fdf9aa8314b2..4186e29119c2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -517,8 +517,8 @@ enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) return MLXSW_REG_SPMS_STATE_FORWARDING; case BR_STATE_LEARNING: return MLXSW_REG_SPMS_STATE_LEARNING; - case BR_STATE_LISTENING: /* fall-through */ - case BR_STATE_DISABLED: /* fall-through */ + case BR_STATE_LISTENING: + case BR_STATE_DISABLED: case BR_STATE_BLOCKING: return MLXSW_REG_SPMS_STATE_DISCARDING; default: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f9ba59641b4d..5240bf11b6c4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -636,11 +636,11 @@ static inline unsigned int mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) { switch (type) { - case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */ - case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */ - case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */ - case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */ - case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: + case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: + case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: + case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: + case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: default: return 1; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 0521e9d48c45..24f1fd1f8d56 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1164,7 +1164,7 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id, addr_len = 4; addr_prefix_len = 32; break; - case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ + case MLXSW_SP_L3_PROTO_IPV6: default: WARN_ON(1); return NULL; @@ -4555,14 +4555,14 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; return 0; } - /* fall through */ + fallthrough; case RTN_BROADCAST: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; return 0; case RTN_BLACKHOLE: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; return 0; - case RTN_UNREACHABLE: /* fall through */ + case RTN_UNREACHABLE: case RTN_PROHIBIT: /* Packets hitting these routes need to be trapped, but * can do so with a lower priority than packets directed @@ -5990,7 +5990,7 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info); fib_info_put(fib_work->fen_info.fi); break; - case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event, fib_work->fnh_info.fib_nh); @@ -6050,7 +6050,7 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) rtnl_lock(); mutex_lock(&mlxsw_sp->router->lock); switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_ADD: replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; @@ -6089,7 +6089,7 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_nh_notifier_info *fnh_info; switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: fen_info = container_of(info, struct fib_entry_notifier_info, info); @@ -6099,7 +6099,7 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, */ fib_info_hold(fib_work->fen_info.fi); break; - case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: fnh_info = container_of(info, struct fib_nh_notifier_info, info); @@ -6116,8 +6116,8 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, int err; switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ - case FIB_EVENT_ENTRY_APPEND: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_APPEND: case FIB_EVENT_ENTRY_DEL: fen6_info = container_of(info, struct fib6_entry_notifier_info, info); @@ -6136,13 +6136,13 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ - case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_ADD: case FIB_EVENT_ENTRY_DEL: memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info)); mr_cache_hold(fib_work->men_info.mfc); break; - case FIB_EVENT_VIF_ADD: /* fall through */ + case FIB_EVENT_VIF_ADD: case FIB_EVENT_VIF_DEL: memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info)); dev_hold(fib_work->ven_info.dev); @@ -6215,13 +6215,13 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, router = container_of(nb, struct mlxsw_sp_router, fib_nb); switch (event) { - case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_ADD: case FIB_EVENT_RULE_DEL: err = mlxsw_sp_router_fib_rule_event(event, info, router->mlxsw_sp); return notifier_from_errno(err); - case FIB_EVENT_ENTRY_ADD: /* fall through */ - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_ADD: + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_APPEND: if (router->aborted) { NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route"); @@ -7277,7 +7277,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, goto out; switch (event) { - case NETDEV_CHANGEMTU: /* fall through */ + case NETDEV_CHANGEMTU: case NETDEV_CHANGEADDR: err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif); break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 5c959a995199..1d18e41ab255 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -1523,12 +1523,12 @@ mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) enum mlxsw_sp_span_trigger_type type; switch (trigger_entry->trigger) { - case MLXSW_SP_SPAN_TRIGGER_INGRESS: /* fall-through */ + case MLXSW_SP_SPAN_TRIGGER_INGRESS: case MLXSW_SP_SPAN_TRIGGER_EGRESS: type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; break; - case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: /* fall-through */ - case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: /* fall-through */ + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: case MLXSW_SP_SPAN_TRIGGER_ECN: type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index a26162b08b7d..72912afa6f72 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1297,7 +1297,7 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, uip = be32_to_cpu(addr->addr4); sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4; break; - case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ + case MLXSW_SP_L3_PROTO_IPV6: default: WARN_ON(1); return -EOPNOTSUPP; @@ -2870,7 +2870,7 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) fdb_info = &switchdev_work->fdb_info; mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); break; - case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_BRIDGE: case SWITCHDEV_FDB_DEL_TO_BRIDGE: /* These events are only used to potentially update an existing * SPAN mirror. @@ -3116,9 +3116,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, switchdev_work->event = event; switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ - case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ - case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + case SWITCHDEV_FDB_ADD_TO_BRIDGE: case SWITCHDEV_FDB_DEL_TO_BRIDGE: fdb_info = container_of(info, struct switchdev_notifier_fdb_info, @@ -3138,7 +3138,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, */ dev_hold(dev); break; - case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */ + case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_vxlan_fdb_event_work); diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index c533d06fbe3a..dcde496da7fb 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -548,7 +548,7 @@ static int lan743x_ethtool_get_rxnfc(struct net_device *netdev, case TCP_V4_FLOW:case UDP_V4_FLOW: case TCP_V6_FLOW:case UDP_V6_FLOW: rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case IPV4_FLOW: case IPV6_FLOW: rxnfc->data |= RXH_IP_SRC | RXH_IP_DST; return 0; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 867c680f5917..5abb7d2b0a9e 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -859,7 +859,7 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) switch (state) { case BR_STATE_FORWARDING: ocelot->bridge_fwd_mask |= BIT(port); - /* Fallthrough */ + fallthrough; case BR_STATE_LEARNING: port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA; break; diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index c2867fe995bc..3de8430ee8c5 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -3081,7 +3081,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = np->phy_addr_external; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ /* The phy_id is not enough to uniquely identify diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 4f1f90f5e178..78eba10300ae 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -3768,20 +3768,20 @@ vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1, VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( itable[j]); - /* fall through */ + fallthrough; case 2: *data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( itable[j]); - /* fall through */ + fallthrough; case 3: *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( itable[j]); - /* fall through */ + fallthrough; case 4: *data1 |= VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c index 7c50e3dfb9d5..76c51da5b66f 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c @@ -296,7 +296,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, break; } #endif - /* fall through */ + fallthrough; case AF_INET: req_sz = sizeof(struct nfp_crypto_req_add_v4); ipv6 = false; diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index ff844e5cc41f..1cbe2c9f3959 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -297,7 +297,7 @@ nfp_fl_get_tun_from_act(struct nfp_app *app, case htons(GENEVE_UDP_PORT): if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE) return NFP_FL_TUNNEL_GENEVE; - /* FALLTHROUGH */ + fallthrough; default: return NFP_FL_TUNNEL_NONE; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index a050cb898782..f21cf1f40f98 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -289,7 +289,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) skb_stored = nfp_flower_lag_unprocessed_msg(app, skb); break; } - /* fall through */ + fallthrough; default: err_default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 4651fe417b7f..36356f96661d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -784,7 +784,7 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: if (tunnel_act) *tunnel_act = true; - /* fall through */ + fallthrough; case NFP_FL_ACTION_OPCODE_PRE_LAG: memcpy(act_dst + act_off, act_src + act_off, act_len); break; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index b04b83687fe2..2643ea5948f4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -137,7 +137,7 @@ static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) val; case NN_LM_MOD_DEC: lm_dec = true; - /* fall through */ + fallthrough; case NN_LM_MOD_INC: if (val) { pr_err("LM offset in inc/dev mode\n"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 39ee23e8c0bf..21ea22694e47 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1940,10 +1940,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) continue; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(dp->netdev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index a486008eb80a..252fe06f58aa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -340,12 +340,12 @@ static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok, switch (maptype) { case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET: bartok = -1; - /* FALLTHROUGH */ + fallthrough; case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK: baract = NFP_CPP_ACTION_RW; if (act == 0) act = NFP_CPP_ACTION_RW; - /* FALLTHROUGH */ + fallthrough; case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED: break; default: diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 75f012444796..2260c2403a83 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -213,7 +213,7 @@ u64 nfp_rtsym_size(const struct nfp_rtsym *sym) return 0; default: pr_warn("rtsym '%s': unknown type: %d\n", sym->name, sym->type); - /* fall through */ + fallthrough; case NFP_RTSYM_TYPE_OBJECT: case NFP_RTSYM_TYPE_FUNCTION: return sym->size; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c index a26966fa40b9..dceec80fd642 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c @@ -410,7 +410,7 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) case SPEED_1000 + HALF_DUPLEX: netdev_dbg(adapter->netdev, "Half Duplex is not supported at 1000 Mbps\n"); - /* fall through */ + fallthrough; case SPEED_1000 + FULL_DUPLEX: full_duplex_only: netdev_dbg(adapter->netdev, diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 647a1431b359..3da075307178 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -1356,7 +1356,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = np->phys[0] & 0x1f; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index 66f45fce90fa..c3f50ddbe824 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -153,7 +153,7 @@ skip: case NETXEN_BRDTYPE_P3_4_GB_MM: supported |= SUPPORTED_Autoneg; advertising |= ADVERTISED_Autoneg; - /* fall through */ + fallthrough; case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: @@ -182,7 +182,7 @@ skip: supported |= SUPPORTED_TP; check_sfp_module = netif_running(dev) && adapter->has_link_events; - /* fall through */ + fallthrough; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P3_10G_XFP: supported |= SUPPORTED_FIBRE; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 876743a79c1f..0e4cd8890cff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2046,7 +2046,7 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) rdma_tasks); /* no need for break since RoCE coexist with Ethernet */ } - /* fall through */ + fallthrough; case QED_PCI_ETH: { struct qed_eth_pf_params *p_params = diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index b3c9ebaf2280..b8f076e4e6b8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3109,14 +3109,14 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_PORT: rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, p_params->p_tunn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2558cb680db3..f39f629242a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -761,7 +761,7 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) kfree(int_params->msix_table); if (force_mode) goto out; - /* Fallthrough */ + fallthrough; case QED_INT_MODE_MSI: if (cdev->num_hwfns == 1) { @@ -775,7 +775,7 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) if (force_mode) goto out; } - /* Fallthrough */ + fallthrough; case QED_INT_MODE_INTA: int_params->out.int_mode = QED_INT_MODE_INTA; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 5be08f83e0aa..cd882c453394 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1085,7 +1085,7 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_NOTICE(p_hwfn, "Unknown WoL configuration %02x\n", p_hwfn->cdev->wol_config); - /* Fallthrough */ + fallthrough; case QED_OV_WOL_DEFAULT: wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; } @@ -1365,7 +1365,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, break; case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: p_link->full_duplex = false; - /* Fall-through */ + fallthrough; case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: p_link->speed = 1000; break; @@ -2451,7 +2451,7 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); - /* Fallthrough */ + fallthrough; default: rc = -EINVAL; } @@ -3546,7 +3546,7 @@ qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn, switch (p_in_params->cmd) { case DRV_MSG_SET_RESOURCE_VALUE_MSG: mfw_resc_info.size = p_in_params->resc_max_val; - /* Fallthrough */ + fallthrough; case DRV_MSG_GET_RESOURCE_ALLOC_MSG: break; default: @@ -3823,7 +3823,7 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "Resource unlock request for an already released resource [%d]\n", p_params->resource); - /* Fallthrough */ + fallthrough; case RESOURCE_OPCODE_RELEASED: p_params->b_released = true; break; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 0d0e38debbc2..569e2a7a64e5 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1542,7 +1542,7 @@ static void ql_link_state_machine_work(struct work_struct *work) if (test_bit(QL_LINK_MASTER, &qdev->flags)) ql_port_start(qdev); qdev->port_link_state = LS_DOWN; - /* Fall Through */ + fallthrough; case LS_DOWN: if (curr_link_state == LS_UP) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 5c2a3acf1e89..b9894d54469c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -353,7 +353,7 @@ skip: case QLCNIC_BRDTYPE_P3P_4_GB_MM: supported |= SUPPORTED_Autoneg; advertising |= ADVERTISED_Autoneg; - /* fall through */ + fallthrough; case QLCNIC_BRDTYPE_P3P_10G_CX4: case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: case QLCNIC_BRDTYPE_P3P_10000_BASE_T: @@ -377,7 +377,7 @@ skip: supported |= SUPPORTED_TP; check_sfp_module = netif_running(adapter->netdev) && ahw->has_link_events; - /* fall through */ + fallthrough; case QLCNIC_BRDTYPE_P3P_10G_XFP: supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_FIBRE; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index d1da92ac7fbe..fc9e6626db55 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4994,7 +4994,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) rtl_unlock_config_regs(tp); RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); rtl_lock_config_regs(tp); - /* fall through */ + fallthrough; case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: flags = PCI_IRQ_LEGACY; break; @@ -5137,7 +5137,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: rtl8168ep_stop_cmac(tp); - /* fall through */ + fallthrough; case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index fc99e7118e49..42458a46ffaf 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2169,7 +2169,7 @@ static void rocker_router_fib_event_work(struct work_struct *work) rocker_world_fib4_del(rocker, &fib_work->fen_info); fib_info_put(fib_work->fen_info.fi); break; - case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_ADD: case FIB_EVENT_RULE_DEL: rule = fib_work->fr_info.rule; if (!fib4_rule_default(rule)) @@ -2201,7 +2201,7 @@ static int rocker_router_fib_event(struct notifier_block *nb, fib_work->event = event; switch (event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: if (info->family == AF_INET) { struct fib_entry_notifier_info *fen_info = ptr; @@ -2224,7 +2224,7 @@ static int rocker_router_fib_event(struct notifier_block *nb, */ fib_info_hold(fib_work->fen_info.fi); break; - case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_ADD: case FIB_EVENT_RULE_DEL: memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); fib_rule_get(fib_work->fr_info.rule); @@ -2811,7 +2811,7 @@ static int rocker_switchdev_event(struct notifier_block *unused, switchdev_work->event = event; switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_DEL_TO_DEVICE: memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index 21465cb3d60a..7f8b10c49660 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -316,7 +316,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, case TCP_V4_FLOW: case UDP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -327,7 +327,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, case TCP_V6_FLOW: case UDP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index db90d94e24c9..a6bae6a234ba 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -957,7 +957,7 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev, switch (info->flow_type) { case TCP_V4_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c index 332183280a45..fa1ade856b10 100644 --- a/drivers/net/ethernet/sfc/falcon/farch.c +++ b/drivers/net/ethernet/sfc/falcon/farch.c @@ -1049,10 +1049,10 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) switch (rx_ev_hdr_type) { case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags |= EF4_RX_PKT_TCP; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags |= EF4_RX_PKT_CSUMMED; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: case FSE_AZ_RX_EV_HDR_TYPE_OTHER: break; @@ -1310,7 +1310,7 @@ int ef4_farch_ev_process(struct ef4_channel *channel, int budget) if (efx->type->handle_global_event && efx->type->handle_global_event(channel, &event)) break; - /* else fall through */ + fallthrough; default: netif_err(channel->efx, hw, channel->efx->net_dev, "channel %d unknown event type %d (data " @@ -1983,7 +1983,7 @@ ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec, EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT): is_full = true; - /* fall through */ + fallthrough; case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO | EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): { __be32 rhost, host1, host2; @@ -2034,7 +2034,7 @@ ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec, case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID: is_full = true; - /* fall through */ + fallthrough; case EF4_FILTER_MATCH_LOC_MAC: spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL : EF4_FARCH_FILTER_MAC_WILD); @@ -2081,7 +2081,7 @@ ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec, case EF4_FARCH_FILTER_TCP_FULL: case EF4_FARCH_FILTER_UDP_FULL: is_full = true; - /* fall through */ + fallthrough; case EF4_FARCH_FILTER_TCP_WILD: case EF4_FARCH_FILTER_UDP_WILD: { __be32 host1, host2; @@ -2125,7 +2125,7 @@ ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec, case EF4_FARCH_FILTER_MAC_FULL: is_full = true; - /* fall through */ + fallthrough; case EF4_FARCH_FILTER_MAC_WILD: gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC; if (is_full) diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index d07eeaad9bdf..4002f9a3ae90 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1038,10 +1038,10 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) switch (rx_ev_hdr_type) { case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags |= EFX_RX_PKT_TCP; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags |= EFX_RX_PKT_CSUMMED; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: case FSE_AZ_RX_EV_HDR_TYPE_OTHER: break; @@ -1316,7 +1316,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) if (efx->type->handle_global_event && efx->type->handle_global_event(channel, &event)) break; - /* else fall through */ + fallthrough; default: netif_err(channel->efx, hw, channel->efx->net_dev, "channel %d unknown event type %d (data " @@ -2043,7 +2043,7 @@ efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): is_full = true; - /* fall through */ + fallthrough; case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { __be32 rhost, host1, host2; @@ -2094,7 +2094,7 @@ efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: is_full = true; - /* fall through */ + fallthrough; case EFX_FILTER_MATCH_LOC_MAC: spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : EFX_FARCH_FILTER_MAC_WILD); @@ -2141,7 +2141,7 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, case EFX_FARCH_FILTER_TCP_FULL: case EFX_FARCH_FILTER_UDP_FULL: is_full = true; - /* fall through */ + fallthrough; case EFX_FARCH_FILTER_TCP_WILD: case EFX_FARCH_FILTER_UDP_WILD: { __be32 host1, host2; @@ -2185,7 +2185,7 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, case EFX_FARCH_FILTER_MAC_FULL: is_full = true; - /* fall through */ + fallthrough; case EFX_FARCH_FILTER_MAC_WILD: gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; if (is_full) diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 5a74d880b733..1523be77b9db 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -140,7 +140,7 @@ efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx, switch (encap_type & EFX_ENCAP_TYPES_MASK) { case EFX_ENCAP_TYPE_VXLAN: vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; - /* fallthrough */ + fallthrough; case EFX_ENCAP_TYPE_GENEVE: COPY_VALUE(ether_type, ETHER_TYPE); outer_ip_proto = IPPROTO_UDP; diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c index 56af8b54a864..714d7f937212 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -282,7 +282,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx, break; default: WARN_ON(1); - /* Fall through */ + fallthrough; case MC_CMD_FCNTL_OFF: link_state->fc = 0; break; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 59a43d586967..aaa112877561 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -358,7 +358,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, case XDP_ABORTED: trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); - /* Fall through */ + fallthrough; case XDP_DROP: efx_free_rx_buffers(rx_queue, rx_buf, 1); channel->n_rx_xdp_drops++; diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 336105f77313..cfa460c7db23 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -2228,7 +2228,7 @@ static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = sis_priv->mii->phy_addr; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 186c0bddbe5f..01069dfaf75c 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -712,7 +712,7 @@ static void smc911x_phy_detect(struct net_device *dev) /* Found an external PHY */ break; } - /* Else, fall through */ + fallthrough; default: /* Internal media only */ SMC_GET_PHY_ID1(lp, 1, id1); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 25db667fa879..806eb651cea3 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -919,10 +919,10 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->ndev, prog, act); - /* fall through -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: ret = NETSEC_XDP_CONSUMED; page = virt_to_head_page(xdp->data); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index d0d2d0fc5f0a..08c76636c164 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -84,9 +84,10 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) return ERR_PTR(err); switch (phy_mode) { - case PHY_INTERFACE_MODE_RGMII: /* Fall through */ - case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */: - case PHY_INTERFACE_MODE_RGMII_RXID: /* Fall through */ + case PHY_INTERFACE_MODE_RGMII: + fallthrough; + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII; break; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index e113b1376fdd..bf195adee393 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev, ret = phy_loopback(dev->phydev, true); if (!ret) break; - /* Fallthrough */ + fallthrough; case STMMAC_LOOPBACK_MAC: ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true); break; @@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev, ret = phy_loopback(dev->phydev, false); if (!ret) break; - /* Fallthrough */ + fallthrough; case STMMAC_LOOPBACK_MAC: stmmac_set_mac_loopback(priv, priv->ioaddr, false); break; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 3d747846f482..cc27d660a818 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -228,7 +228,7 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv, switch (cls->command) { case TC_CLSU32_REPLACE_KNODE: tc_unfill_entry(priv, cls); - /* Fall through */ + fallthrough; case TC_CLSU32_NEW_KNODE: return tc_config_knode(priv, cls); case TC_CLSU32_DELETE_KNODE: diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index e2bc7a25f6d1..b624e177ec71 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4759,7 +4759,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = cp->phy_addr; - /* Fallthrough... */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ spin_lock_irqsave(&cp->lock, flags); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 9b5effb72657..68695d4afacd 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -8835,7 +8835,7 @@ static int walk_phys(struct niu *np, struct niu_parent *parent) else goto unknown_vg_1g_port; - /* fallthru */ + fallthrough; case 0x22: val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1) | @@ -8860,7 +8860,7 @@ static int walk_phys(struct niu *np, struct niu_parent *parent) else goto unknown_vg_1g_port; - /* fallthru */ + fallthrough; case 0x13: if ((lowest_10g & 0x7) == 0) val = (phy_encode(PORT_TYPE_10G, 0) | diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index eeb8518c8a84..8deb943ca5de 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2712,7 +2712,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = gp->mii_phy_addr; - /* Fallthrough... */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 4e184eecc8e1..6e72ecbe5cf7 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -67,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, dev_warn(priv->dev, "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", phy_modes(phy_mode)); - /* fallthrough */ + fallthrough; case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; @@ -122,7 +122,7 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv, dev_warn(priv->dev, "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", phy_modes(phy_mode)); - /* fallthrough */ + fallthrough; case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index d6d7a7d9c7ad..482a1a451e43 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1371,10 +1371,10 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(ndev, prog, act); - /* fall through -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: goto drop; } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 58623e974a0c..76a342ea3797 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -948,7 +948,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: /* get address of MII PHY in use. */ data->phy_id = phy; - /* fall through */ + fallthrough; case SIOCGMIIREG: /* read MII PHY register. */ diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 2db546b27ee0..dc14a66583ff 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -877,7 +877,7 @@ static int gelic_wl_set_auth(struct net_device *netdev, case IW_AUTH_KEY_MGMT: if (param->value & IW_AUTH_KEY_MGMT_PSK) break; - /* intentionally fall through */ + fallthrough; default: ret = -EOPNOTSUPP; break; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 07389702a540..5f5b33e6653b 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -786,7 +786,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) /* fallthrough, if we release the descriptors * brutally (then we don't care about * SPIDER_NET_DESCR_CARDOWNED) */ - /* Fall through */ + fallthrough; case SPIDER_NET_DESCR_RESPONSE_ERROR: case SPIDER_NET_DESCR_PROTECTION_ERROR: @@ -1397,9 +1397,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, show_error = 0; break; - case SPIDER_NET_GDDDEN0INT: /* fallthrough */ - case SPIDER_NET_GDCDEN0INT: /* fallthrough */ - case SPIDER_NET_GDBDEN0INT: /* fallthrough */ + case SPIDER_NET_GDDDEN0INT: + case SPIDER_NET_GDCDEN0INT: + case SPIDER_NET_GDBDEN0INT: case SPIDER_NET_GDADEN0INT: /* someone has set RX_DMA_EN to 0 */ show_error = 0; @@ -1449,10 +1449,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, * Logging is not needed. */ show_error = 0; break; - case SPIDER_NET_GRFDFLLINT: /* fallthrough */ - case SPIDER_NET_GRFCFLLINT: /* fallthrough */ - case SPIDER_NET_GRFBFLLINT: /* fallthrough */ - case SPIDER_NET_GRFAFLLINT: /* fallthrough */ + case SPIDER_NET_GRFDFLLINT: + case SPIDER_NET_GRFCFLLINT: + case SPIDER_NET_GRFBFLLINT: + case SPIDER_NET_GRFAFLLINT: case SPIDER_NET_GRMFLLINT: /* Could happen when rx chain is full */ if (card->ignore_rx_ramfull == 0) { @@ -1473,9 +1473,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, break; /* chain end */ - case SPIDER_NET_GDDDCEINT: /* fallthrough */ - case SPIDER_NET_GDCDCEINT: /* fallthrough */ - case SPIDER_NET_GDBDCEINT: /* fallthrough */ + case SPIDER_NET_GDDDCEINT: + case SPIDER_NET_GDCDCEINT: + case SPIDER_NET_GDBDCEINT: case SPIDER_NET_GDADCEINT: spider_net_resync_head_ptr(card); spider_net_refill_rx_chain(card); @@ -1486,9 +1486,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, break; /* invalid descriptor */ - case SPIDER_NET_GDDINVDINT: /* fallthrough */ - case SPIDER_NET_GDCINVDINT: /* fallthrough */ - case SPIDER_NET_GDBINVDINT: /* fallthrough */ + case SPIDER_NET_GDDINVDINT: + case SPIDER_NET_GDCINVDINT: + case SPIDER_NET_GDBINVDINT: case SPIDER_NET_GDAINVDINT: /* Could happen when rx chain is full */ spider_net_resync_head_ptr(card); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 3e3883ad88b0..3e337142b516 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1434,7 +1434,7 @@ do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get the address of the PHY in use. */ data->phy_id = 0; /* we have only this address */ - /* fall through */ + fallthrough; case SIOCGMIIREG: /* Read the specified MII register. */ data->val_out = mii_rd(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c index 1be039579d70..554cde8d6073 100644 --- a/drivers/net/fddi/skfp/pcmplc.c +++ b/drivers/net/fddi/skfp/pcmplc.c @@ -847,7 +847,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) case ACTIONS(PC5_SIGNAL) : ACTIONS_DONE() ; - /* fall through */ + fallthrough; case PC5_SIGNAL : if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT)) break ; @@ -946,7 +946,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ; ACTIONS_DONE() ; cmd = 0 ; - /* fall thru */ + fallthrough; case PC6_JOIN : switch (plc->p_state) { case PS_ACTIVE: diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 8c810edece86..466622664424 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -974,7 +974,7 @@ static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid) FJES_RX_STOP_REQ_DONE; spin_unlock_irqrestore(&hw->rx_status_lock, flags); clear_bit(src_epid, &hw->txrx_stop_req_bit); - /* fall through */ + fallthrough; case EP_PARTNER_UNSHARE: case EP_PARTNER_COMPLETE: default: diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 4476491b58f9..e4e4981ac1d2 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -500,7 +500,7 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat) } break; } - /* fall through */ + fallthrough; default: if (bc->hdlctx.calibrate <= 0) diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index deef14215110..17be2bb2985c 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -482,7 +482,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) case CRC_MODE_SMACK_TEST: ax->crcmode = CRC_MODE_FLEX_TEST; printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name); - // fall through + fallthrough; case CRC_MODE_SMACK: *p |= 0x80; crc = swab16(crc16(0, p, len)); @@ -491,7 +491,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) case CRC_MODE_FLEX_TEST: ax->crcmode = CRC_MODE_NONE; printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name); - // fall through + fallthrough; case CRC_MODE_FLEX: *p |= 0x20; crc = calc_crc_flex(p, len); @@ -744,7 +744,6 @@ static int mkiss_open(struct tty_struct *tty) ax->dev->name); break; case 0: - /* fall through */ default: crc_force = 0; printk(KERN_INFO "mkiss: %s: crc mode is auto.\n", diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 5da04e997989..c8d803d3616c 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -842,7 +842,7 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: if (!net_eq(dev_net(dev), &init_net)) break; - /* fall through */ + fallthrough; case SIOCGHWTSTAMP: if (netif_device_present(real_dev) && ops->ndo_do_ioctl) err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd); diff --git a/drivers/net/mii.c b/drivers/net/mii.c index 44612122338b..f6a97c859f3a 100644 --- a/drivers/net/mii.c +++ b/drivers/net/mii.c @@ -597,7 +597,7 @@ int generic_mii_ioctl(struct mii_if_info *mii_if, switch(cmd) { case SIOCGMIIPHY: mii_data->phy_id = mii_if->phy_id; - /* fall through */ + fallthrough; case SIOCGMIIREG: mii_data->val_out = diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c index 7971dc4f54f1..0e9511661601 100644 --- a/drivers/net/netdevsim/bus.c +++ b/drivers/net/netdevsim/bus.c @@ -193,7 +193,7 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count) switch (err) { case 1: port_count = 1; - /* fall through */ + fallthrough; case 2: if (id > INT_MAX) { pr_err("Value of \"id\" is too big.\n"); diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index f32d56ac3e80..deea17a0e79c 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -760,14 +760,14 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, spin_lock_bh(&data->fib_lock); switch (event) { - case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_ADD: case FIB_EVENT_RULE_DEL: err = nsim_fib_rule_event(data, info, event == FIB_EVENT_RULE_ADD); break; - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ - case FIB_EVENT_ENTRY_APPEND: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_APPEND: case FIB_EVENT_ENTRY_DEL: err = nsim_fib_event(data, info, event); break; diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c index 7471a8b90873..307f0ac1287b 100644 --- a/drivers/net/phy/adin.c +++ b/drivers/net/phy/adin.c @@ -366,10 +366,10 @@ static int adin_set_edpd(struct phy_device *phydev, u16 tx_interval) switch (tx_interval) { case 1000: /* 1 second */ - /* fallthrough */ + fallthrough; case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS: val |= ADIN1300_NRG_PD_TX_EN; - /* fallthrough */ + fallthrough; case ETHTOOL_PHY_EDPD_NO_TX: break; default: diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 50fb7d16b75a..79e67f2fe00a 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -766,13 +766,13 @@ static int decode_evnt(struct dp83640_private *dp83640, switch (words) { case 3: dp83640->edata.sec_hi = phy_txts->sec_hi; - /* fall through */ + fallthrough; case 2: dp83640->edata.sec_lo = phy_txts->sec_lo; - /* fall through */ + fallthrough; case 1: dp83640->edata.ns_hi = phy_txts->ns_hi; - /* fall through */ + fallthrough; case 0: dp83640->edata.ns_lo = phy_txts->ns_lo; } @@ -1409,7 +1409,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts, kfree_skb(skb); return; } - /* fall through */ + fallthrough; case HWTSTAMP_TX_ON: skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index c4641b1704d6..18d81f43f2a8 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -279,13 +279,13 @@ static struct phy_device *__fixed_phy_register(unsigned int irq, phy->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phy->supported); - /* fall through */ + fallthrough; case SPEED_100: linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phy->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phy->supported); - /* fall through */ + fallthrough; case SPEED_10: default: linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 79b4f35d151e..735a806045ac 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -355,7 +355,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: mii_data->phy_id = phydev->mdio.addr; - /* fall through */ + fallthrough; case SIOCGMIIREG: if (mdio_phy_id_is_c45(mii_data->phy_id)) { @@ -433,7 +433,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: if (phydev->mii_ts && phydev->mii_ts->hwtstamp) return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr); - /* fall through */ + fallthrough; default: return -EOPNOTSUPP; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 57d44648c8dd..8adfbad0a1e8 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1979,7 +1979,7 @@ static int genphy_setup_master_slave(struct phy_device *phydev) break; case MASTER_SLAVE_CFG_MASTER_FORCE: ctl |= CTL1000_AS_MASTER; - /* fallthrough */ + fallthrough; case MASTER_SLAVE_CFG_SLAVE_FORCE: ctl |= CTL1000_ENABLE_MASTER; break; diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 32b4bd6a5b55..32f4e8ec96cf 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1905,7 +1905,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: mii->phy_id = pl->phydev->mdio.addr; - /* fall through */ + fallthrough; case SIOCGMIIREG: ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); @@ -1928,7 +1928,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: mii->phy_id = 0; - /* fall through */ + fallthrough; case SIOCGMIIREG: ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 6900c68260e0..58014feedf6c 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -149,7 +149,7 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, port = PORT_TP; break; } - /* fallthrough */ + fallthrough; case SFF8024_CONNECTOR_SG: /* guess */ case SFF8024_CONNECTOR_HSSDC_II: case SFF8024_CONNECTOR_NOSEPARATE: @@ -301,7 +301,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, break; case SFF8024_ECC_100GBASE_CR4: phylink_set(modes, 100000baseCR4_Full); - /* fallthrough */ + fallthrough; case SFF8024_ECC_25GBASE_CR_S: case SFF8024_ECC_25GBASE_CR_N: phylink_set(modes, 25000baseCR_Full); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index c24b0e83dd32..cf83314c8591 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -552,7 +552,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_temp_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_temp_input: case hwmon_temp_label: return 0444; @@ -571,7 +571,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_in_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_in_input: case hwmon_in_label: return 0444; @@ -590,7 +590,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_curr_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_curr_input: case hwmon_curr_label: return 0444; @@ -618,7 +618,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_power_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_power_input: case hwmon_power_label: return 0444; @@ -1872,7 +1872,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event) dev_warn(sfp->dev, "hwmon probe failed: %d\n", err); sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0); - /* fall through */ + fallthrough; case SFP_MOD_WAITDEV: /* Ensure that the device is attached before proceeding */ if (sfp->sm_dev_state < SFP_DEV_DOWN) @@ -1890,7 +1890,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event) goto insert; sfp_sm_mod_next(sfp, SFP_MOD_HPOWER, 0); - /* fall through */ + fallthrough; case SFP_MOD_HPOWER: /* Enable high power mode */ err = sfp_sm_mod_hpower(sfp, true); diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index d82016dcde3b..4406b353123e 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -498,7 +498,7 @@ plip_receive(unsigned short nibble_timeout, struct net_device *dev, *data_p = (c0 >> 3) & 0x0f; write_data (dev, 0x10); /* send ACK */ *ns_p = PLIP_NB_1; - /* fall through */ + fallthrough; case PLIP_NB_1: cx = nibble_timeout; @@ -594,7 +594,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, printk(KERN_DEBUG "%s: receive start\n", dev->name); rcv->state = PLIP_PK_LENGTH_LSB; rcv->nibble = PLIP_NB_BEGIN; - /* fall through */ + fallthrough; case PLIP_PK_LENGTH_LSB: if (snd->state != PLIP_PK_DONE) { @@ -615,7 +615,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, return TIMEOUT; } rcv->state = PLIP_PK_LENGTH_MSB; - /* fall through */ + fallthrough; case PLIP_PK_LENGTH_MSB: if (plip_receive(nibble_timeout, dev, @@ -638,7 +638,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, rcv->state = PLIP_PK_DATA; rcv->byte = 0; rcv->checksum = 0; - /* fall through */ + fallthrough; case PLIP_PK_DATA: lbuf = rcv->skb->data; @@ -651,7 +651,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, rcv->checksum += lbuf[--rcv->byte]; } while (rcv->byte); rcv->state = PLIP_PK_CHECKSUM; - /* fall through */ + fallthrough; case PLIP_PK_CHECKSUM: if (plip_receive(nibble_timeout, dev, @@ -664,7 +664,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, return ERROR; } rcv->state = PLIP_PK_DONE; - /* fall through */ + fallthrough; case PLIP_PK_DONE: /* Inform the upper layer for the arrival of a packet. */ @@ -710,7 +710,7 @@ plip_send(unsigned short nibble_timeout, struct net_device *dev, case PLIP_NB_BEGIN: write_data (dev, data & 0x0f); *ns_p = PLIP_NB_1; - /* fall through */ + fallthrough; case PLIP_NB_1: write_data (dev, 0x10 | (data & 0x0f)); @@ -725,7 +725,7 @@ plip_send(unsigned short nibble_timeout, struct net_device *dev, } write_data (dev, 0x10 | (data >> 4)); *ns_p = PLIP_NB_2; - /* fall through */ + fallthrough; case PLIP_NB_2: write_data (dev, (data >> 4)); @@ -814,7 +814,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, &snd->nibble, snd->length.b.lsb)) return TIMEOUT; snd->state = PLIP_PK_LENGTH_MSB; - /* fall through */ + fallthrough; case PLIP_PK_LENGTH_MSB: if (plip_send(nibble_timeout, dev, @@ -823,7 +823,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, snd->state = PLIP_PK_DATA; snd->byte = 0; snd->checksum = 0; - /* fall through */ + fallthrough; case PLIP_PK_DATA: do { @@ -835,7 +835,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, snd->checksum += lbuf[--snd->byte]; } while (snd->byte); snd->state = PLIP_PK_CHECKSUM; - /* fall through */ + fallthrough; case PLIP_PK_CHECKSUM: if (plip_send(nibble_timeout, dev, @@ -846,7 +846,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, dev_kfree_skb(snd->skb); dev->stats.tx_packets++; snd->state = PLIP_PK_DONE; - /* fall through */ + fallthrough; case PLIP_PK_DONE: /* Close the connection */ @@ -935,7 +935,7 @@ plip_interrupt(void *dev_id) switch (nl->connection) { case PLIP_CN_CLOSING: netif_wake_queue (dev); - /* fall through */ + fallthrough; case PLIP_CN_NONE: case PLIP_CN_SEND: rcv->state = PLIP_PK_TRIGGER; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3c11a77f5709..7959b5c2d11f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1590,10 +1590,10 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(tun->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: this_cpu_inc(tun->pcpu_stats->rx_dropped); break; @@ -2417,7 +2417,7 @@ static int tun_xdp_one(struct tun_struct *tun, switch (err) { case XDP_REDIRECT: *flush = true; - /* fall through */ + fallthrough; case XDP_TX: return 0; case XDP_PASS: diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 7e44110746dd..0717c18015c9 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -333,13 +333,13 @@ static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed) switch (speed) { case SPEED_5000: aqc111_data->phy_cfg |= AQ_ADV_5G; - /* fall-through */ + fallthrough; case SPEED_2500: aqc111_data->phy_cfg |= AQ_ADV_2G5; - /* fall-through */ + fallthrough; case SPEED_1000: aqc111_data->phy_cfg |= AQ_ADV_1G; - /* fall-through */ + fallthrough; case SPEED_100: aqc111_data->phy_cfg |= AQ_ADV_100M; /* fall-through */ diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index d387bc7ac1b6..97ba67042d12 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -858,7 +858,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); - /* fall through */ + fallthrough; case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 9bdbd7b472a0..dba847f28096 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -97,7 +97,7 @@ static void tx_complete(struct urb *req) case -ECONNRESET: case -ESHUTDOWN: dev->stats.tx_aborted_errors++; - /* fall through */ + fallthrough; default: dev->stats.tx_errors++; dev_dbg(&dev->dev, "TX error (%d)\n", status); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 442507f25aad..65b315bc60ab 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3192,7 +3192,7 @@ static void rx_complete(struct urb *urb) case -EPIPE: dev->net->stats.rx_errors++; lan78xx_defer_kevent(dev, EVENT_RX_HALT); - /* FALLTHROUGH */ + fallthrough; case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, @@ -3213,7 +3213,7 @@ static void rx_complete(struct urb *urb) /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; - /* FALLTHROUGH */ + fallthrough; default: state = rx_cleanup; diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 0ef7e1f443e3..e92cb51a2c77 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -629,7 +629,7 @@ static void write_bulk_callback(struct urb *urb) return; default: netif_info(pegasus, tx_err, net, "TX status %d\n", status); - /* FALL THROUGH */ + fallthrough; case 0: break; } @@ -1009,7 +1009,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) switch (cmd) { case SIOCDEVPRIVATE: data[0] = pegasus->phy; - /* fall through */ + fallthrough; case SIOCDEVPRIVATE + 1: read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); res = 0; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2b02fefd094d..b1770489aca5 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1682,7 +1682,7 @@ static void intr_callback(struct urb *urb) case -ECONNRESET: /* unlink */ case -ESHUTDOWN: netif_device_detach(tp->netdev); - /* fall through */ + fallthrough; case -ENOENT: case -EPROTO: netif_info(tp, intr, tp->netdev, @@ -3251,7 +3251,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) r8152_mdio_write(tp, MII_BMCR, data); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); - /* fall through */ + fallthrough; default: if (data != PHY_STAT_LAN_ON) @@ -4849,7 +4849,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, tp->ups_info.speed_duplex = NWAY_1000M_FULL; break; } - /* fall through */ + fallthrough; default: ret = -EINVAL; goto out; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index e7c630d37589..733f120c852b 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -843,7 +843,7 @@ static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCDEVPRIVATE: data[0] = dev->phy; - /* fall through */ + fallthrough; case SIOCDEVPRIVATE + 1: read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]); break; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index e45935a5856a..2b2a841cd938 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -110,7 +110,7 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) if (!usb_endpoint_dir_in(&e->desc)) continue; intr = 1; - /* FALLTHROUGH */ + fallthrough; case USB_ENDPOINT_XFER_BULK: break; default: @@ -628,7 +628,7 @@ block: /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; - // FALLTHROUGH + fallthrough; default: state = rx_cleanup; @@ -1530,7 +1530,7 @@ static void usbnet_bh (struct timer_list *t) continue; case tx_done: kfree(entry->urb->sg); - /* fall through */ + fallthrough; case rx_cleanup: usb_free_urb (entry->urb); dev_kfree_skb (skb); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index e56cd562a664..a475f48d43c4 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -610,10 +610,10 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: stats->xdp_drops++; goto err_xdp; @@ -745,10 +745,10 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: stats->xdp_drops++; goto xdp_drop; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0ada48edf749..263b005981bd 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -724,7 +724,7 @@ static struct sk_buff *receive_small(struct net_device *dev, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(vi->dev, xdp_prog, act); case XDP_DROP: @@ -922,10 +922,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(vi->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: if (unlikely(xdp_page != page)) __free_pages(xdp_page, 0); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index def27afa1c69..1014693a5ceb 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -743,7 +743,7 @@ vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter, case ESP_V4_FLOW: if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + fallthrough; case SCTP_V4_FLOW: case IPV4_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 1ea15f2123ed..8ccd086e06cb 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -173,7 +173,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, case X25_IFACE_DISCONNECT: if ((err = lapb_disconnect_request(dev)) != LAPB_OK) pr_err("lapb_disconnect_request err: %d\n", err); - /* Fall thru */ + fallthrough; default: goto drop; } diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 77ccf3672ede..bc2c1c7fb1a4 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -413,7 +413,7 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int case SDLA_RET_NO_BUFS: if (cmd == SDLA_INFORMATION_WRITE) break; - /* Else, fall through */ + fallthrough; default: netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n", diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index de7984463595..7ee980575208 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -330,7 +330,7 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, if (err != LAPB_OK) netdev_err(dev, "lapb_disconnect_request error: %d\n", err); - /* fall through */ + fallthrough; default: kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index 4fe7c7e132c4..9afed3b133d3 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c @@ -352,7 +352,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m, case I2400M_SS_IDLE: d_printf(1, dev, "entering BS-negotiated idle mode\n"); - /* Fall through */ + fallthrough; case I2400M_SS_DISCONNECTING: case I2400M_SS_DATA_PATH_CONNECTED: wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED); diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c index 1f7709d24f35..27ab233650d5 100644 --- a/drivers/net/wimax/i2400m/usb-fw.c +++ b/drivers/net/wimax/i2400m/usb-fw.c @@ -135,7 +135,7 @@ retry: msleep(10); /* give the device some time */ goto retry; } - /* fall through */ + fallthrough; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c index 3a0e7226768a..3ba9d70cca1b 100644 --- a/drivers/net/wimax/i2400m/usb-tx.c +++ b/drivers/net/wimax/i2400m/usb-tx.c @@ -136,7 +136,7 @@ retry: msleep(10); /* give the device some time */ goto retry; } - /* fall through */ + fallthrough; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 9659f9e1aaa6..b684e97ac976 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -195,7 +195,7 @@ retry: msleep(10); /* give the device some time */ goto retry; } - /* fall through */ + fallthrough; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 6b7532f7c936..ff96f22648ef 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -393,7 +393,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE: seq_puts(m, "Hash Algorithm: NONE\n"); - /* FALLTHRU */ + fallthrough; default: return; } diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 7e62a6ee7622..f1c1624cec8f 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -448,7 +448,7 @@ static void frontend_changed(struct xenbus_device *dev, set_backend_state(be, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through - if not online */ + fallthrough; /* if not online */ case XenbusStateUnknown: set_backend_state(be, XenbusStateClosed); device_unregister(&dev->dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 458be6882b98..3e9895bec15f 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -2341,7 +2341,7 @@ static void netback_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* Fall through - Missed the backend's CLOSING state. */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index 346e084387f7..f7464bd6d57c 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -2321,7 +2321,7 @@ static int pn533_transceive(struct nfc_dev *nfc_dev, break; } - /* fall through */ + fallthrough; default: /* jumbo frame ? */ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { @@ -2448,7 +2448,7 @@ static void pn533_wq_mi_recv(struct work_struct *work) break; } - /* fall through */ + fallthrough; default: skb_put_u8(skb, 1); /*TG*/ diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index 0b9ca6d20ffa..8874d605b14f 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -611,7 +611,7 @@ static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb, switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_res->pfb)) { case ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU: pr_err("Received a ACK/NACK PDU\n"); - /* fall through */ + fallthrough; case ST21NFCA_NFC_DEP_PFB_I_PDU: info->dep_info.curr_nfc_dep_pni = ST21NFCA_NFC_DEP_PFB_PNI(dep_res->pfb + 1); diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c index e46adaac1c63..3bd97c73f983 100644 --- a/drivers/nfc/trf7970a.c +++ b/drivers/nfc/trf7970a.c @@ -1153,7 +1153,7 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on) dev_err(trf->dev, "%s - Invalid request: %d %d\n", __func__, trf->state, on); ret = -EINVAL; - /* FALLTHROUGH */ + fallthrough; case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: case TRF7970A_ST_WAIT_FOR_RX_DATA: @@ -1960,7 +1960,7 @@ static void trf7970a_shutdown(struct trf7970a *trf) case TRF7970A_ST_WAIT_TO_ISSUE_EOF: case TRF7970A_ST_LISTENING: trf7970a_send_err_upstream(trf, -ECANCELED); - /* FALLTHROUGH */ + fallthrough; case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: trf7970a_switch_rf_off(trf); diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index e6d1f5b298f3..4a02561cfb96 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1483,7 +1483,7 @@ static void ntb_rx_copy_callback(void *data, case DMA_TRANS_READ_FAILED: case DMA_TRANS_WRITE_FAILED: entry->errors++; - /* fall through */ + fallthrough; case DMA_TRANS_ABORTED: { struct ntb_transport_qp *qp = entry->qp; @@ -1739,7 +1739,7 @@ static void ntb_tx_copy_callback(void *data, case DMA_TRANS_READ_FAILED: case DMA_TRANS_WRITE_FAILED: entry->errors++; - /* fall through */ + fallthrough; case DMA_TRANS_ABORTED: { void __iomem *offset = diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 88cff309d8e4..96ee5a476c4d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -330,7 +330,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -340,7 +340,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_NEW: case NVME_CTRL_LIVE: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -350,7 +350,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_NEW: case NVME_CTRL_RESETTING: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -361,7 +361,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -371,7 +371,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_DELETING: case NVME_CTRL_DEAD: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -380,7 +380,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, switch (old_state) { case NVME_CTRL_DELETING: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ba725ae47305..24d174a0623b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1244,7 +1244,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) switch (dev->ctrl.state) { case NVME_CTRL_CONNECTING: nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); - /* fall through */ + fallthrough; case NVME_CTRL_DELETING: dev_warn_ratelimited(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 44c76ffbb264..610e0b6bb6e4 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1915,7 +1915,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: nvme_rdma_destroy_queue_ib(queue); - /* fall through */ + fallthrough; case RDMA_CM_EVENT_ADDR_ERROR: dev_dbg(queue->ctrl->ctrl.device, "CM error event %d\n", ev->event); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 62fbaecdc960..f0c4bb47d22d 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -866,7 +866,6 @@ static void nvme_tcp_state_change(struct sock *sk) case TCP_LAST_ACK: case TCP_FIN_WAIT1: case TCP_FIN_WAIT2: - /* fallthrough */ nvme_tcp_error_recovery(&queue->ctrl->ctrl); break; default: diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b92f45f5cd5b..ca7a58dae275 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -73,7 +73,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) status = NVME_SC_ACCESS_DENIED; break; case -EIO: - /* FALLTHRU */ + fallthrough; default: req->error_loc = offsetof(struct nvme_common_command, opcode); status = NVME_SC_INTERNAL | NVME_SC_DNR; diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index c97e60b71bbc..3da067a8311e 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -812,7 +812,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, break; /* Fall-Thru to RSP handling */ - /* FALLTHRU */ + fallthrough; case NVMET_FCOP_RSP: if (fcpreq) { diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 3dd6f566a240..125dde3f410e 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -139,7 +139,6 @@ static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) req->error_loc = offsetof(struct nvme_rw_command, nsid); break; case BLK_STS_IOERR: - /* fallthru */ default: status = NVME_SC_INTERNAL | NVME_SC_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 3ccb59260b4a..ae6620489457 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1758,7 +1758,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, schedule_delayed_work(&port->repair_work, 0); break; } - /* FALLTHROUGH */ + fallthrough; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: nvmet_rdma_queue_disconnect(queue); @@ -1769,7 +1769,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_REJECTED: pr_debug("Connection rejected: %s\n", rdma_reject_msg(cm_id, event->status)); - /* FALLTHROUGH */ + fallthrough; case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_CONNECT_ERROR: nvmet_rdma_queue_connect_fail(cm_id, queue); diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index f28d6a3c5a68..4547ac44c8d4 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c @@ -260,7 +260,7 @@ static void parport_ieee1284_terminate (struct parport *port) port->ieee1284.phase = IEEE1284_PH_FWD_IDLE; } - /* fall through */ + fallthrough; default: /* Terminate from all other modes. */ @@ -598,7 +598,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len) case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: parport_negotiate (port, IEEE1284_MODE_COMPAT); - /* fall through */ + fallthrough; case IEEE1284_MODE_COMPAT: pr_debug("%s: Using compatibility mode\n", port->name); fn = port->ops->compat_write_data; @@ -702,7 +702,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len) if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) { return -EIO; } - /* fall through - to NIBBLE */ + fallthrough; /* to NIBBLE */ case IEEE1284_MODE_NIBBLE: pr_debug("%s: Using nibble mode\n", port->name); fn = port->ops->nibble_read_data; diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 77e37e3cb3a0..eda4ded4d5e5 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -1647,7 +1647,7 @@ static int parport_ECP_supported(struct parport *pb) break; default: pr_warn("0x%lx: Unknown implementation ID\n", pb->base); - /* Fall through - Assume 1 */ + fallthrough; /* Assume 1 */ case 1: pword = 1; } diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 90df28c7cb0c..5fef2613b223 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -439,7 +439,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); break; - case IMX6QP: /* FALLTHROUGH */ + case IMX6QP: case IMX6Q: /* power up core phy and enable ref clock */ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, @@ -642,7 +642,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); - /* FALLTHROUGH */ + fallthrough; default: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); @@ -1105,7 +1105,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) dev_err(dev, "pcie_aux clock source missing or invalid\n"); return PTR_ERR(imx6_pcie->pcie_aux); } - /* fall through */ + fallthrough; case IMX7D: if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) imx6_pcie->controller_id = 1; diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c index c9530038ca9a..afde4aa8f6dc 100644 --- a/drivers/pci/controller/pci-rcar-gen2.c +++ b/drivers/pci/controller/pci-rcar-gen2.c @@ -223,7 +223,7 @@ static void rcar_pci_setup(struct rcar_pci_priv *priv) pr_warn("unknown window size %ld - defaulting to 256M\n", window_size); window_size = SZ_256M; - /* fall-through */ + fallthrough; case SZ_256M: val |= RCAR_USBCTR_PCIAHB_WIN1_256M; break; diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c index 5c93aa14f0de..ae9acc77d14f 100644 --- a/drivers/pci/hotplug/ibmphp_res.c +++ b/drivers/pci/hotplug/ibmphp_res.c @@ -1941,7 +1941,7 @@ static int __init update_bridge_ranges(struct bus_node **bus) break; case PCI_HEADER_TYPE_BRIDGE: function = 0x8; - /* fall through */ + fallthrough; case PCI_HEADER_TYPE_MULTIBRIDGE: /* We assume here that only 1 bus behind the bridge TO DO: add functionality for several: diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 6503d15effbb..9f85815b4f53 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -236,7 +236,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) switch (ctrl->state) { case BLINKINGOFF_STATE: cancel_delayed_work(&ctrl->button_work); - /* fall through */ + fallthrough; case ON_STATE: ctrl->state = POWEROFF_STATE; mutex_unlock(&ctrl->state_lock); @@ -265,7 +265,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) switch (ctrl->state) { case BLINKINGON_STATE: cancel_delayed_work(&ctrl->button_work); - /* fall through */ + fallthrough; case OFF_STATE: ctrl->state = POWERON_STATE; mutex_unlock(&ctrl->state_lock); diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index afdc52d1cae7..65502e3f7b4f 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -642,7 +642,7 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot) switch (p_slot->state) { case BLINKINGON_STATE: cancel_delayed_work(&p_slot->work); - /* fall through */ + fallthrough; case STATIC_STATE: p_slot->state = POWERON_STATE; mutex_unlock(&p_slot->lock); @@ -678,7 +678,7 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot) switch (p_slot->state) { case BLINKINGOFF_STATE: cancel_delayed_work(&p_slot->work); - /* fall through */ + fallthrough; case STATIC_STATE: p_slot->state = POWEROFF_STATE; mutex_unlock(&p_slot->lock); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a458c46d7e39..e39c5499770f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1049,7 +1049,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) need_restore = true; - /* Fall-through - force to D0 */ + fallthrough; /* force to D0 */ default: pmcsr = 0; break; @@ -2541,7 +2541,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) case PCI_D2: if (pci_no_d1d2(dev)) break; - /* else, fall through */ + fallthrough; default: target_state = state; } diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index bd2b691fa7a3..d35186b01d98 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -231,7 +231,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, } /* If arch decided it can't, fall through... */ #endif /* HAVE_PCI_MMAP */ - /* fall through */ + fallthrough; default: ret = -EINVAL; break; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bdf9b52567e0..2a589b6d6ed8 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1730,7 +1730,7 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) case PCI_DEVICE_ID_JMICRON_JMB366: /* Redirect IDE second PATA port to the right spot */ conf5 |= (1 << 24); - /* Fall through */ + fallthrough; case PCI_DEVICE_ID_JMICRON_JMB361: case PCI_DEVICE_ID_JMICRON_JMB363: case PCI_DEVICE_ID_JMICRON_JMB369: @@ -2224,7 +2224,7 @@ static void quirk_netmos(struct pci_dev *dev) if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && dev->subsystem_device == 0x0299) return; - /* else, fall through */ + fallthrough; case PCI_DEVICE_ID_NETMOS_9735: case PCI_DEVICE_ID_NETMOS_9745: case PCI_DEVICE_ID_NETMOS_9845: diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 3951e02b7ded..2ce636937c6e 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1253,7 +1253,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) additional_mmio_size = pci_hotplug_mmio_size; additional_mmio_pref_size = pci_hotplug_mmio_pref_size; } - /* Fall through */ + fallthrough; default: pbus_size_io(bus, realloc_head ? 0 : additional_io_size, additional_io_size, realloc_head); diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index fab267e359e7..c0e85be598c1 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -1096,7 +1096,7 @@ static void __ref pcifront_backend_changed(struct xenbus_device *xdev, case XenbusStateClosed: if (xdev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's CLOSING state. */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: dev_warn(&xdev->dev, "backend going away!\n"); pcifront_try_disconnect(pdev); diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 590e594092f2..a7c7c7cd2326 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c @@ -255,10 +255,10 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt, switch (state->Vcc) { case 50: ++v; - /* fall through */ + fallthrough; case 33: ++v; - /* fall through */ + fallthrough; case 0: break; default: @@ -269,11 +269,11 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt, switch (state->Vpp) { case 12: ++p; - /* fall through */ + fallthrough; case 33: case 50: ++p; - /* fall through */ + fallthrough; case 0: break; default: diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 7b7d23f25713..a0a71c1df042 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1404,7 +1404,7 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region, break; case CCN_TYPE_SBAS: ccn->sbas_present = 1; - /* Fall-through */ + fallthrough; default: component = &ccn->node[id]; break; diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index e51ddb6d63ed..cc00915ad6d1 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1002,7 +1002,7 @@ static void __arm_spe_pmu_dev_probe(void *info) default: dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n", fld); - /* Fallthrough */ + fallthrough; case 8: spe_pmu->min_period = 4096; } @@ -1021,7 +1021,7 @@ static void __arm_spe_pmu_dev_probe(void *info) default: dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n", fld); - /* Fallthrough */ + fallthrough; case 2: spe_pmu->counter_sz = 12; } diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c index 61054272a7c8..327df1a99f77 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c @@ -53,7 +53,7 @@ static int qcom_usb_hs_phy_set_mode(struct phy *phy, case PHY_MODE_USB_OTG: case PHY_MODE_USB_HOST: val |= ULPI_INT_IDGRD; - /* fall through */ + fallthrough; case PHY_MODE_USB_DEVICE: val |= ULPI_INT_SESS_VALID; default: diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index a84e9f027fc4..46ebdb1460a3 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -546,7 +546,7 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work) rport->state = OTG_STATE_B_IDLE; if (!vbus_attach) rockchip_usb2phy_power_off(rport->phy); - /* fall through */ + fallthrough; case OTG_STATE_B_IDLE: if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) > 0) { dev_dbg(&rport->phy->dev, "usb otg host connect\n"); @@ -754,11 +754,11 @@ static void rockchip_chg_detect_work(struct work_struct *work) rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP; else rphy->chg_type = POWER_SUPPLY_TYPE_USB_CDP; - /* fall through */ + fallthrough; case USB_CHG_STATE_SECONDARY_DONE: rphy->chg_state = USB_CHG_STATE_DETECTED; delay = 0; - /* fall through */ + fallthrough; case USB_CHG_STATE_DETECTED: /* put the controller in normal mode */ property_enable(base, &rphy->phy_cfg->chg_det.opmode, true); @@ -835,7 +835,7 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work) dev_dbg(&rport->phy->dev, "FS/LS online\n"); break; } - /* fall through */ + fallthrough; case PHY_STATE_CONNECT: if (rport->suspended) { dev_dbg(&rport->phy->dev, "Connected\n"); diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 5e1d14e35f20..0d46706afd2d 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c @@ -431,7 +431,7 @@ static void olpc_xo175_ec_complete(void *arg) input_sync(priv->pwrbtn); input_report_key(priv->pwrbtn, KEY_POWER, 0); input_sync(priv->pwrbtn); - /* fall through */ + fallthrough; case EVENT_POWER_PRESS_WAKE: case EVENT_TIMED_HOST_WAKE: pm_wakeup_event(priv->pwrbtn->dev.parent, diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 60c18f21588d..49f4b73be513 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -1001,7 +1001,7 @@ static acpi_status WMID_get_u32(u32 *value, u32 cap) *value = tmp & 0x1; return 0; } - /* fall through */ + fallthrough; default: return AE_ERROR; } @@ -1328,7 +1328,7 @@ static acpi_status get_u32(u32 *value, u32 cap) status = AMW0_get_u32(value, cap); break; } - /* fall through */ + fallthrough; case ACER_WMID: status = WMID_get_u32(value, cap); break; @@ -1371,7 +1371,7 @@ static acpi_status set_u32(u32 value, u32 cap) return AMW0_set_u32(value, cap); } - /* fall through */ + fallthrough; case ACER_WMID: return WMID_set_u32(value, cap); case ACER_WMID_v2: @@ -1381,7 +1381,7 @@ static acpi_status set_u32(u32 value, u32 cap) return wmid_v2_set_u32(value, cap); else if (wmi_has_guid(WMID_GUID2)) return WMID_set_u32(value, cap); - /* fall through */ + fallthrough; default: return AE_BAD_PARAMETER; } diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 5e9c2296931c..70edc5bb3a14 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -1587,10 +1587,10 @@ static ssize_t kbd_led_timeout_store(struct device *dev, switch (unit) { case KBD_TIMEOUT_DAYS: value *= 24; - /* fall through */ + fallthrough; case KBD_TIMEOUT_HOURS: value *= 60; - /* fall through */ + fallthrough; case KBD_TIMEOUT_MINUTES: value *= 60; unit = KBD_TIMEOUT_SECONDS; diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c index ec515223f654..d8afed5db94c 100644 --- a/drivers/platform/x86/surfacepro3_button.c +++ b/drivers/platform/x86/surfacepro3_button.c @@ -84,28 +84,28 @@ static void surface_button_notify(struct acpi_device *device, u32 event) /* Power button press,release handle */ case SURFACE_BUTTON_NOTIFY_PRESS_POWER: pressed = true; - /*fall through*/ + fallthrough; case SURFACE_BUTTON_NOTIFY_RELEASE_POWER: key_code = KEY_POWER; break; /* Home button press,release handle */ case SURFACE_BUTTON_NOTIFY_PRESS_HOME: pressed = true; - /*fall through*/ + fallthrough; case SURFACE_BUTTON_NOTIFY_RELEASE_HOME: key_code = KEY_LEFTMETA; break; /* Volume up button press,release handle */ case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP: pressed = true; - /*fall through*/ + fallthrough; case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP: key_code = KEY_VOLUMEUP; break; /* Volume down button press,release handle */ case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN: pressed = true; - /*fall through*/ + fallthrough; case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN: key_code = KEY_VOLUMEDOWN; break; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 4864a5c189d4..9c4df41687a3 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -4060,7 +4060,7 @@ static bool hotkey_notify_6xxx(const u32 hkey, * AC status changed; can be triggered by plugging or * unplugging AC adapter, docking or undocking. */ - /* fallthrough */ + fallthrough; case TP_HKEY_EV_KEY_NUMLOCK: case TP_HKEY_EV_KEY_FN: @@ -4176,7 +4176,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event) known_ev = true; break; } - /* fallthrough - to default */ + fallthrough; /* to default */ default: known_ev = false; } @@ -6266,7 +6266,7 @@ static int thermal_get_sensor(int idx, s32 *value) idx -= 8; } #endif - /* fallthrough */ + fallthrough; case TPACPI_THERMAL_TPEC_8: if (idx <= 7) { if (!acpi_ec_read(t + idx, &tmp)) diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 36fff00af9eb..e557d757c647 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -2748,7 +2748,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev) result = hci_write(dev, HCI_SYSTEM_EVENT, 1); if (result == TOS_SUCCESS) pr_notice("Re-enabled hotkeys\n"); - /* Fall through */ + fallthrough; default: retries--; break; diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 9469fe182d02..db65be026920 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -748,7 +748,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di, USB_CH_IP_CUR_LVL_1P5; break; } - /* else, fall through */ + fallthrough; case USB_STAT_HM_IDGND: dev_err(di->dev, "USB Type - Charging not allowed\n"); di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05; @@ -2410,7 +2410,7 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work) * of 1sec for enabling charging */ msleep(1000); - /* Intentional fall through */ + fallthrough; case AB8500_BM_USB_STATE_CONFIGURED: /* * USB is configured, enable charging with the charging diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 751c4f6c7487..7eec415c82a3 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -1542,7 +1542,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di) ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INITMEASURING); - /* Intentional fallthrough */ + fallthrough; case AB8500_FG_DISCHARGE_INITMEASURING: /* * Discard a number of samples during startup. @@ -1572,7 +1572,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di) ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_RECOVERY); - /* Intentional fallthrough */ + fallthrough; case AB8500_FG_DISCHARGE_RECOVERY: sleep_time = di->bm->fg_params->recovery_sleep_timer; diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c index 2fb33a07879a..175c4f3d7955 100644 --- a/drivers/power/supply/abx500_chargalg.c +++ b/drivers/power/supply/abx500_chargalg.c @@ -1419,7 +1419,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) abx500_chargalg_stop_charging(di); di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING; abx500_chargalg_state_to(di, STATE_HANDHELD); - /* Intentional fallthrough */ + fallthrough; case STATE_HANDHELD: break; @@ -1435,7 +1435,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) di->maintenance_chg = false; abx500_chargalg_state_to(di, STATE_SUSPENDED); power_supply_changed(di->chargalg_psy); - /* Intentional fallthrough */ + fallthrough; case STATE_SUSPENDED: /* CHARGING is suspended */ @@ -1444,7 +1444,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_BATT_REMOVED_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_BATT_REMOVED); - /* Intentional fallthrough */ + fallthrough; case STATE_BATT_REMOVED: if (!di->events.batt_rem) @@ -1454,7 +1454,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_HW_TEMP_PROTECT_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT); - /* Intentional fallthrough */ + fallthrough; case STATE_HW_TEMP_PROTECT: if (!di->events.main_thermal_prot && @@ -1465,7 +1465,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_OVV_PROTECT_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_OVV_PROTECT); - /* Intentional fallthrough */ + fallthrough; case STATE_OVV_PROTECT: if (!di->events.vbus_ovv && @@ -1479,7 +1479,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_CHG_NOT_OK_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_CHG_NOT_OK); - /* Intentional fallthrough */ + fallthrough; case STATE_CHG_NOT_OK: if (!di->events.mainextchnotok && @@ -1490,7 +1490,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_SAFETY_TIMER_EXPIRED_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED); - /* Intentional fallthrough */ + fallthrough; case STATE_SAFETY_TIMER_EXPIRED: /* We exit this state when charger is removed */ @@ -1537,7 +1537,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_WAIT_FOR_RECHARGE_INIT: abx500_chargalg_hold_charging(di); abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE); - /* Intentional fallthrough */ + fallthrough; case STATE_WAIT_FOR_RECHARGE: if (di->batt_data.percent <= @@ -1558,7 +1558,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) di->bm->batt_id].maint_a_cur_lvl); abx500_chargalg_state_to(di, STATE_MAINTENANCE_A); power_supply_changed(di->chargalg_psy); - /* Intentional fallthrough*/ + fallthrough; case STATE_MAINTENANCE_A: if (di->events.maintenance_timer_expired) { @@ -1578,7 +1578,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) di->bm->batt_id].maint_b_cur_lvl); abx500_chargalg_state_to(di, STATE_MAINTENANCE_B); power_supply_changed(di->chargalg_psy); - /* Intentional fallthrough*/ + fallthrough; case STATE_MAINTENANCE_B: if (di->events.maintenance_timer_expired) { @@ -1597,7 +1597,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) di->charge_status = POWER_SUPPLY_STATUS_CHARGING; abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH); power_supply_changed(di->chargalg_psy); - /* Intentional fallthrough */ + fallthrough; case STATE_TEMP_LOWHIGH: if (!di->events.btemp_lowhigh) @@ -1607,7 +1607,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_WD_EXPIRED_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_WD_EXPIRED); - /* Intentional fallthrough */ + fallthrough; case STATE_WD_EXPIRED: if (!di->events.ac_wd_expired && @@ -1618,7 +1618,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_TEMP_UNDEROVER_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER); - /* Intentional fallthrough */ + fallthrough; case STATE_TEMP_UNDEROVER: if (!di->events.btemp_underover) diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c index d01dc0332edc..0eaa86c52874 100644 --- a/drivers/power/supply/axp20x_usb_power.c +++ b/drivers/power/supply/axp20x_usb_power.c @@ -349,7 +349,7 @@ static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power, case 100000: if (power->axp20x_id == AXP221_ID) return -EINVAL; - /* fall through */ + fallthrough; case 500000: case 900000: val = (900000 - intval) / 400000; diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c index 2a45e84447fe..d89e08efd2ad 100644 --- a/drivers/power/supply/cros_usbpd-charger.c +++ b/drivers/power/supply/cros_usbpd-charger.c @@ -383,7 +383,7 @@ static int cros_usbpd_charger_get_prop(struct power_supply *psy, */ if (ec_device->mkbp_event_supported || port->psy_online) break; - /* fall through */ + fallthrough; case POWER_SUPPLY_PROP_CURRENT_MAX: case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: case POWER_SUPPLY_PROP_VOLTAGE_NOW: diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c index 5fca4960f440..8878f9131184 100644 --- a/drivers/power/supply/max8925_power.c +++ b/drivers/power/supply/max8925_power.c @@ -121,7 +121,7 @@ static irqreturn_t max8925_charger_handler(int irq, void *data) case MAX8925_IRQ_VCHG_THM_OK_F: /* Battery is not ready yet */ dev_dbg(chip->dev, "Battery temperature is out of range\n"); - /* Fall through */ + fallthrough; case MAX8925_IRQ_VCHG_DC_OVP: dev_dbg(chip->dev, "Error detection\n"); __set_charger(info, 0); diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c index 65832bc229f6..18b33f14dfee 100644 --- a/drivers/power/supply/wm831x_power.c +++ b/drivers/power/supply/wm831x_power.c @@ -665,7 +665,7 @@ static int wm831x_power_probe(struct platform_device *pdev) break; default: dev_err(&pdev->dev, "Failed to find USB phy: %d\n", ret); - /* fall-through */ + fallthrough; case -EPROBE_DEFER: goto err_bat_irq; break; diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c index 26923af574f4..e05cee457471 100644 --- a/drivers/power/supply/wm8350_power.c +++ b/drivers/power/supply/wm8350_power.c @@ -227,7 +227,7 @@ static irqreturn_t wm8350_charger_handler(int irq, void *data) case WM8350_IRQ_EXT_USB_FB: case WM8350_IRQ_EXT_WALL_FB: wm8350_charger_config(wm8350, policy); - /* Fall through */ + fallthrough; case WM8350_IRQ_EXT_BAT_FB: power_supply_changed(power->battery); power_supply_changed(power->usb); diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c index 24f04ffdd986..9d66257e1da5 100644 --- a/drivers/ps3/ps3av.c +++ b/drivers/ps3/ps3av.c @@ -769,7 +769,7 @@ static int ps3av_auto_videomode(struct ps3av_pkt_av_get_hw_conf *av_hw_conf) switch (info->monitor_type) { case PS3AV_MONITOR_TYPE_DVI: dvi = PS3AV_MODE_DVI; - /* fall through */ + fallthrough; case PS3AV_MONITOR_TYPE_HDMI: id = ps3av_hdmi_get_id(info); break; diff --git a/drivers/ps3/ps3av_cmd.c b/drivers/ps3/ps3av_cmd.c index f0e650cc866e..c22206652f06 100644 --- a/drivers/ps3/ps3av_cmd.c +++ b/drivers/ps3/ps3av_cmd.c @@ -693,11 +693,11 @@ void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *audio, u32 avport, switch (ch) { case PS3AV_CMD_AUDIO_NUM_OF_CH_8: audio->audio_enable[3] = 1; - /* fall through */ + fallthrough; case PS3AV_CMD_AUDIO_NUM_OF_CH_6: audio->audio_enable[2] = 1; audio->audio_enable[1] = 1; - /* fall through */ + fallthrough; case PS3AV_CMD_AUDIO_NUM_OF_CH_2: default: audio->audio_enable[0] = 1; diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index c07ceec3c6d4..a30342942e26 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -2150,7 +2150,7 @@ static void mport_release_mapping(struct kref *ref) switch (map->dir) { case MAP_INBOUND: rio_unmap_inb_region(mport, map->phys_addr); - /* fall through */ + fallthrough; case MAP_DMA: dma_free_coherent(mport->dev.parent, map->size, map->virt_addr, map->phys_addr); diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index fbc95cadaf53..1bacb37e8a99 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -399,7 +399,7 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) if (rate_count > 0) break; - /* fall through */ + fallthrough; default: /* Not supported for this regulator */ return -ENOTSUPP; @@ -1022,7 +1022,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) * (See include/linux/mfd/axp20x.h) */ reg = AXP803_DCDC_FREQ_CTRL; - /* Fall through - to the check below.*/ + fallthrough; /* to the check below */ case AXP806_ID: /* * AXP806 also have DCDC work frequency setting register at a @@ -1030,7 +1030,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) */ if (axp20x->variant == AXP806_ID) reg = AXP806_DCDC_FREQ_CTRL; - /* Fall through */ + fallthrough; case AXP221_ID: case AXP223_ID: case AXP809_ID: @@ -1118,7 +1118,7 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work * (See include/linux/mfd/axp20x.h) */ reg = AXP806_DCDC_MODE_CTRL2; - /* Fall through - to the check below. */ + fallthrough; /* to the check below */ case AXP221_ID: case AXP223_ID: case AXP809_ID: diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 75ff7c563c5d..3fd359914690 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1895,7 +1895,7 @@ struct regulator *_regulator_get(struct device *dev, const char *id, case EXCLUSIVE_GET: dev_warn(dev, "dummy supplies not allowed for exclusive requests\n"); - /* fall through */ + fallthrough; default: return ERR_PTR(-ENODEV); diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index 44e4cecbf6de..87b020d0b958 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -319,7 +319,7 @@ static int slg51000_regulator_init(struct slg51000 *chip) rdesc->linear_min_sel = 0; break; } - /* Fall through - to the check below.*/ + fallthrough; /* to the check below */ default: rdesc->linear_min_sel = vsel_range[0]; diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index f7db250a7583..430265c404d6 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c @@ -312,7 +312,7 @@ static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index) switch (info->flags) { case SMPS_OFFSET_EN: voltage = 100000; - /* fall through */ + fallthrough; case 0: switch (index) { case 0: diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c index 6955fab0a78b..d94b7391bf9d 100644 --- a/drivers/remoteproc/omap_remoteproc.c +++ b/drivers/remoteproc/omap_remoteproc.c @@ -511,7 +511,6 @@ static void omap_rproc_mbox_callback(struct mbox_client *client, void *data) dev_info(dev, "received echo reply from %s\n", name); break; case RP_MBOX_SUSPEND_ACK: - /* Fall through */ case RP_MBOX_SUSPEND_CANCEL: oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK; complete(&oproc->pm_comp); diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c index d170fe663210..e8aa8691deb2 100644 --- a/drivers/reset/reset-imx7.c +++ b/drivers/reset/reset-imx7.c @@ -222,7 +222,7 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev, switch (id) { case IMX8MQ_RESET_PCIEPHY: - case IMX8MQ_RESET_PCIEPHY2: /* fallthrough */ + case IMX8MQ_RESET_PCIEPHY2: /* * wait for more than 10us to release phy g_rst and * btnrst @@ -232,12 +232,12 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev, break; case IMX8MQ_RESET_PCIE_CTRL_APPS_EN: - case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */ + case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: + case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N: + case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N: + case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N: + case IMX8MQ_RESET_MIPI_DSI_RESET_N: + case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: value = assert ? 0 : bit; break; } diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 1995f5b3ea67..f40312b16da0 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -553,7 +553,7 @@ static void qcom_glink_receive_version(struct qcom_glink *glink, break; case GLINK_VERSION_1: glink->features &= features; - /* FALLTHROUGH */ + fallthrough; default: qcom_glink_send_version_ack(glink); break; @@ -584,7 +584,7 @@ static void qcom_glink_receive_version_ack(struct qcom_glink *glink, break; glink->features &= features; - /* FALLTHROUGH */ + fallthrough; default: qcom_glink_send_version(glink); break; diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 9b70b371bd0c..8a89bc52b0d4 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -740,7 +740,7 @@ static int wdt_ioctl(struct file *file, unsigned int cmd, return -EINVAL; wdt_margin = new_margin; wdt_ping(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(wdt_margin, (int __user *)arg); diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index ca55ba975aeb..f8b99cb72959 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -353,7 +353,7 @@ static int pcf85063_load_capacitance(struct pcf85063 *pcf85063, default: dev_warn(&pcf85063->rtc->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 7000", load); - /* fall through */ + fallthrough; case 7000: break; case 12500: diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 47e0f411dd5c..57d351dfe272 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -108,7 +108,7 @@ static int pcf8523_load_capacitance(struct i2c_client *client) default: dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500", load); - /* fall through */ + fallthrough; case 12500: value |= REG_CONTROL1_CAP_SEL; break; diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index c9bc3d4a1e66..0a969af80af7 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c @@ -331,7 +331,7 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev) default: dev_warn(&pdev->dev, "invalid crystal-freq specified in device-tree. Assuming no crystal\n"); - /* fall-through */ + fallthrough; case 0: /* keep XTAL on in low-power mode */ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP; diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 3ce99e4db44d..661d2a49bce9 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1695,7 +1695,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) grp->changed_side = 2; break; } - /* Else, fall through */ + fallthrough; case MPCG_STATE_XID0IOWAIX: case MPCG_STATE_XID7INITW: case MPCG_STATE_XID7INITX: diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index ab316baa8284..85a1a4533cbe 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -357,7 +357,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/ if (callback) grp->send_qllc_disc = 1; - /* Else, fall through */ + fallthrough; case MPCG_STATE_XID0IOWAIT: fsm_deltimer(&grp->timer); grp->outstanding_xid2 = 0; @@ -1470,7 +1470,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) && (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) break; - /* Else, fall through */ + fallthrough; default: fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); } @@ -2089,7 +2089,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev) grp->estconnfunc = NULL; break; } - /* Else, fall through */ + fallthrough; case MPCG_STATE_FLOWC: case MPCG_STATE_READY: grp->send_qllc_disc = 2; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bba1b54b8aa3..6a7398251423 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1071,7 +1071,7 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, break; case -EIO: qeth_schedule_recovery(card); - /* fall through */ + fallthrough; default: qeth_clear_ipacmd_list(card); goto err_idx; @@ -2886,7 +2886,7 @@ void qeth_print_status_message(struct qeth_card *card) card->info.mcl_level[3]); break; } - /* fallthrough */ + fallthrough; case QETH_CARD_TYPE_IQD: if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { card->info.mcl_level[0] = (char) _ebcasc[(__u8) diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index ebdc03210608..f870c5322bfe 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -356,7 +356,7 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, 10000baseT_Full); ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); - /* fall through */ + fallthrough; case SPEED_1000: ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); @@ -366,7 +366,7 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, 1000baseT_Half); ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Half); - /* fall through */ + fallthrough; case SPEED_100: ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); @@ -376,7 +376,7 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, 100baseT_Half); ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); - /* fall through */ + fallthrough; case SPEED_10: ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 8b342a88ff5c..3a94f6cad167 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -488,7 +488,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work) kfree(mac); break; } - /* fall through */ + fallthrough; default: /* for next call to set_rx_mode(): */ mac->disp_flag = QETH_DISP_ADDR_DELETE; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index fe44b0249e34..4d461960370d 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1235,7 +1235,7 @@ static void qeth_l3_rx_mode_work(struct work_struct *work) break; } addr->ref_counter = 1; - /* fall through */ + fallthrough; default: /* for next call to set_rx_mode(): */ addr->disp_flag = QETH_DISP_ADDR_DELETE; diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 461b3babb601..84b57a8f86bf 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1832,7 +1832,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) case REQUEST_SENSE: /* clear the internal sense magic */ SCp->cmnd[6] = 0; - /* fall through */ + fallthrough; default: /* OK, get it from the command */ switch(SCp->sc_data_direction) { diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index bb49d83cadc7..ccb061ab0a0a 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -2635,7 +2635,7 @@ static int blogic_resultcode(struct blogic_adapter *adapter, case BLOGIC_BAD_CMD_PARAM: blogic_warn("BusLogic Driver Protocol Error 0x%02X\n", adapter, adapter_status); - /* fall through */ + fallthrough; case BLOGIC_DATA_UNDERRUN: case BLOGIC_DATA_OVERRUN: case BLOGIC_NOEXPECT_BUSFREE: diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c index 0f17bd51088a..24ace1824048 100644 --- a/drivers/scsi/FlashPoint.c +++ b/drivers/scsi/FlashPoint.c @@ -1034,11 +1034,14 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) temp6 >>= 1; switch (temp & 0x3) { case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */ - temp6 |= 0x8000; /* Fall through */ + temp6 |= 0x8000; + fallthrough; case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */ - temp5 |= 0x8000; /* Fall through */ + temp5 |= 0x8000; + fallthrough; case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */ - temp2 |= 0x8000; /* Fall through */ + temp2 |= 0x8000; + fallthrough; case AUTO_RATE_00: /* Asynchronous */ break; } diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index f2f7e6e76c07..d654a6cc4162 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -1943,7 +1943,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) return; /* Reject message */ - /* Fall through */ + fallthrough; default: /* * If we get something weird that we aren't expecting, diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 769af4ca9ca9..fd6ae5c38086 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -2809,7 +2809,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) !(dev->raw_io_64) || ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) break; - /* fall through */ + fallthrough; case INQUIRY: case READ_CAPACITY: case TEST_UNIT_READY: @@ -2884,7 +2884,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) /* Issue FIB to tell Firmware to flush it's cache */ if ((aac_cache & 6) != 2) return aac_synchronize(scsicmd); - /* fall through */ + fallthrough; case INQUIRY: { struct inquiry_data inq_data; @@ -3240,7 +3240,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) SCSI_SENSE_BUFFERSIZE)); break; } - /* fall through */ + fallthrough; case RESERVE: case RELEASE: case REZERO_UNIT: @@ -3253,7 +3253,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) case START_STOP: return aac_start_stop(scsicmd); - /* FALLTHRU */ + fallthrough; default: /* * Unhandled commands diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index adbdc3b7c7a7..383e74fea6ed 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1431,7 +1431,7 @@ retry_next: "enclosure services event"); scsi_device_set_state(device, SDEV_RUNNING); } - /* FALLTHRU */ + fallthrough; case CHANGE: if ((channel == CONTAINER_CHANNEL) && (!dev->fsa_dev[container].valid)) { diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 8588da0a0655..a3aee146537b 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -765,7 +765,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) !(aac->raw_io_64) || ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) break; - /* fall through */ + fallthrough; case INQUIRY: case READ_CAPACITY: /* diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index c912d29b8bdf..1c617c0d5899 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -2274,7 +2274,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK: tag = SCB_GET_TAG(scb); - /* fall through */ + fallthrough; case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET: lun = scb->hscb->lun; @@ -2285,7 +2285,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) break; case SIU_TASKMGMT_LUN_RESET: lun = scb->hscb->lun; - /* fall through */ + fallthrough; case SIU_TASKMGMT_TARGET_RESET: { struct ahd_devinfo devinfo; @@ -3791,7 +3791,7 @@ ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } - /* FALLTHROUGH */ + fallthrough; case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; @@ -5104,7 +5104,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) break; case MSG_MESSAGE_REJECT: response = ahd_handle_msg_reject(ahd, devinfo); - /* FALLTHROUGH */ + fallthrough; case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; @@ -5454,7 +5454,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; - /* FALLTHROUGH */ + fallthrough; case MSG_TERM_IO_PROC: default: reject = TRUE; @@ -6117,17 +6117,17 @@ ahd_free(struct ahd_softc *ahd) default: case 5: ahd_shutdown(ahd); - /* FALLTHROUGH */ + fallthrough; case 4: ahd_dmamap_unload(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); - /* FALLTHROUGH */ + fallthrough; case 3: ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, ahd->shared_data_map.dmamap); ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); - /* FALLTHROUGH */ + fallthrough; case 2: ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); case 1: @@ -6513,7 +6513,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd) } ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); } - /* fall through */ + fallthrough; case 6: { struct map_node *sg_map; @@ -6528,7 +6528,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd) } ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); } - /* fall through */ + fallthrough; case 5: { struct map_node *hscb_map; @@ -7171,7 +7171,7 @@ ahd_init(struct ahd_softc *ahd) case FLX_CSTAT_OVER: case FLX_CSTAT_UNDER: warn_user++; - /* fall through */ + fallthrough; case FLX_CSTAT_INVALID: case FLX_CSTAT_OKAY: if (warn_user == 0 && bootverbose == 0) @@ -8175,12 +8175,12 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in qinfifo\n"); ahd_done_with_status(ahd, scb, status); - /* FALLTHROUGH */ + fallthrough; case SEARCH_REMOVE: break; case SEARCH_PRINT: printk(" 0x%x", ahd->qinfifo[qinpos]); - /* FALLTHROUGH */ + fallthrough; case SEARCH_COUNT: ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; @@ -8271,7 +8271,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB pending MK_MSG\n"); ahd_done_with_status(ahd, mk_msg_scb, status); - /* FALLTHROUGH */ + fallthrough; case SEARCH_REMOVE: { u_int tail_offset; @@ -8295,7 +8295,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, } case SEARCH_PRINT: printk(" 0x%x", SCB_GET_TAG(scb)); - /* FALLTHROUGH */ + fallthrough; case SEARCH_COUNT: break; } @@ -8376,7 +8376,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahd_done_with_status(ahd, scb, status); - /* fall through */ + fallthrough; case SEARCH_REMOVE: ahd_rem_wscb(ahd, scbid, prev, next, tid); *list_tail = prev; @@ -8385,7 +8385,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, break; case SEARCH_PRINT: printk("0x%x ", scbid); - /* fall through */ + fallthrough; case SEARCH_COUNT: prev = scbid; break; @@ -9023,7 +9023,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) case SCSI_STATUS_OK: printk("%s: Interrupted for status of 0???\n", ahd_name(ahd)); - /* FALLTHROUGH */ + fallthrough; default: ahd_done(ahd, scb); break; @@ -9512,7 +9512,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) fmt3_ins = &instr.format3; fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); } - /* fall through */ + fallthrough; case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: @@ -9523,7 +9523,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; - /* fall through */ + fallthrough; case AIC_OP_ROL: { int i, count; diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index d019e3f2bb9b..7c321303969e 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -2035,7 +2035,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd) break; case CAM_AUTOSENSE_FAIL: new_status = DID_ERROR; - /* Fallthrough */ + fallthrough; case CAM_SCSI_STATUS_ERROR: scsi_status = ahd_cmd_get_scsi_status(cmd); diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 3d4df906fa4f..2231c4afa531 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -2404,7 +2404,7 @@ ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } - /* FALLTHROUGH */ + fallthrough; case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; @@ -3599,7 +3599,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) break; case MSG_MESSAGE_REJECT: response = ahc_handle_msg_reject(ahc, devinfo); - /* FALLTHROUGH */ + fallthrough; case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; @@ -4465,17 +4465,17 @@ ahc_free(struct ahc_softc *ahc) default: case 5: ahc_shutdown(ahc); - /* FALLTHROUGH */ + fallthrough; case 4: ahc_dmamap_unload(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); - /* FALLTHROUGH */ + fallthrough; case 3: ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, ahc->shared_data_dmamap); ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); - /* FALLTHROUGH */ + fallthrough; case 2: ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); case 1: @@ -4893,30 +4893,30 @@ ahc_fini_scbdata(struct ahc_softc *ahc) } ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); } - /* fall through */ + fallthrough; case 6: ahc_dmamap_unload(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); - /* fall through */ + fallthrough; case 5: ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, scb_data->sense_dmamap); ahc_dmamap_destroy(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); - /* fall through */ + fallthrough; case 4: ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); - /* fall through */ + fallthrough; case 3: ahc_dmamap_unload(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); - /* fall through */ + fallthrough; case 2: ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, scb_data->hscb_dmamap); ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); - /* fall through */ + fallthrough; case 1: ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); break; @@ -5981,7 +5981,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, printk("Inactive SCB in Waiting List\n"); ahc_done(ahc, scb); } - /* fall through */ + fallthrough; case SEARCH_REMOVE: next = ahc_rem_wscb(ahc, next, prev); break; @@ -6987,7 +6987,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) address -= address_offset; fmt3_ins->address = address; } - /* fall through */ + fallthrough; case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: @@ -7013,7 +7013,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) fmt1_ins->opcode = AIC_OP_AND; fmt1_ins->immediate = 0xff; } - /* fall through */ + fallthrough; case AIC_OP_ROL: if ((ahc->features & AHC_ULTRA2) != 0) { int i, count; diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index c264b4b56970..e2d880a5f391 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -706,11 +706,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) switch (pd->max_sas_lrate) { case SAS_LINK_RATE_6_0_GBPS: *speed_mask &= ~SAS_SPEED_60_DIS; - /* fall through*/ + fallthrough; default: case SAS_LINK_RATE_3_0_GBPS: *speed_mask &= ~SAS_SPEED_30_DIS; - /* fall through*/ + fallthrough; case SAS_LINK_RATE_1_5_GBPS: *speed_mask &= ~SAS_SPEED_15_DIS; } @@ -718,7 +718,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) switch (pd->min_sas_lrate) { case SAS_LINK_RATE_6_0_GBPS: *speed_mask |= SAS_SPEED_30_DIS; - /* fall through*/ + fallthrough; case SAS_LINK_RATE_3_0_GBPS: *speed_mask |= SAS_SPEED_15_DIS; default: @@ -730,7 +730,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) switch (pd->max_sata_lrate) { case SAS_LINK_RATE_3_0_GBPS: *speed_mask &= ~SATA_SPEED_30_DIS; - /* fall through*/ + fallthrough; default: case SAS_LINK_RATE_1_5_GBPS: *speed_mask &= ~SATA_SPEED_15_DIS; @@ -789,7 +789,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc) /* link reset retries, this should be nominal */ control_phy->link_reset_retries = 10; - /* fall through */ + fallthrough; case RELEASE_SPINUP_HOLD: /* 0x02 */ /* decide the func_mask */ diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 1fcee65193a3..0eb6e206a2b4 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -490,7 +490,7 @@ int asd_abort_task(struct sas_task *task) switch (tcs.dl_opcode) { default: res = asd_clear_nexus(task); - /* fallthrough */ + fallthrough; case TC_NO_ERROR: break; /* The task hasn't been sent to the device xor diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index fa562a085600..ec895d0319f0 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -4470,7 +4470,7 @@ static const char *arcmsr_info(struct Scsi_Host *host) case PCI_DEVICE_ID_ARECA_1202: case PCI_DEVICE_ID_ARECA_1210: raid6 = 0; - /*FALLTHRU*/ + fallthrough; case PCI_DEVICE_ID_ARECA_1120: case PCI_DEVICE_ID_ARECA_1130: case PCI_DEVICE_ID_ARECA_1160: diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 6c68c2303638..2e687ce60753 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -603,7 +603,7 @@ static void fas216_handlesync(FAS216_Info *info, char *msg) msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT); info->scsi.phase = PHASE_MSGOUT_EXPECT; - /* fall through */ + fallthrough; case async: dev->period = info->ifcfg.asyncperiod / 4; @@ -916,7 +916,7 @@ static void fas216_disconnect_intr(FAS216_Info *info) fas216_done(info, DID_ABORT); break; } - /* else, fall through */ + fallthrough; default: /* huh? */ printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n", @@ -1413,7 +1413,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */ case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */ fas216_stoptransfer(info); - /* fall through */ + fallthrough; case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */ case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */ @@ -1426,7 +1426,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */ case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */ fas216_stoptransfer(info); - /* fall through */ + fallthrough; case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */ case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */ @@ -1581,7 +1581,7 @@ static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned fas216_message(info); break; } - /* else, fall through */ + fallthrough; default: fas216_log(info, 0, "internal phase %s for function done?" @@ -1964,7 +1964,7 @@ static void fas216_kick(FAS216_Info *info) switch (where_from) { case TYPE_QUEUE: fas216_allocate_tag(info, SCpnt); - /* fall through */ + fallthrough; case TYPE_OTHER: fas216_start_command(info, SCpnt); break; diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 93da6344424d..a13c203ef7a9 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -677,7 +677,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_MAX_XMIT_DLENGTH: if (conn->max_xmit_dlength > 65536) conn->max_xmit_dlength = 65536; - /* fall through */ + fallthrough; default: return 0; } diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 8dc2e0824ad7..5c3513a4b450 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1532,7 +1532,7 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, break; case UNSOL_DATA_DIGEST_ERROR_NOTIFY: error = 1; - /* fall through */ + fallthrough; case UNSOL_DATA_NOTIFY: pasync_handle = pasync_ctx->async_entry[ci].data; break; diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 29f99561dfc3..38d1c453074d 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -2572,7 +2572,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) case FCP_IODIR_RW: bfa_stats(itnim, input_reqs); bfa_stats(itnim, output_reqs); - /* fall through */ + fallthrough; default: bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); } @@ -2807,7 +2807,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) case BFI_IOIM_STS_TIMEDOUT: bfa_stats(ioim->itnim, iocomp_timedout); - /* fall through */ + fallthrough; case BFI_IOIM_STS_ABORTED: rsp->io_status = BFI_IOIM_STS_ABORTED; bfa_stats(ioim->itnim, iocomp_aborted); @@ -3203,7 +3203,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, switch (event) { case BFA_TSKIM_SM_DONE: bfa_reqq_wcancel(&tskim->reqq_wait); - /* fall through */ + fallthrough; case BFA_TSKIM_SM_QRESUME: bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); bfa_tskim_send_abort(tskim); diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 297a77f5806c..3486e402bfc1 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -6422,7 +6422,7 @@ bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, switch (event) { case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); - /* fall through */ + fallthrough; case BFA_FCS_VPORT_SM_RSP_OK: case BFA_FCS_VPORT_SM_RSP_ERROR: @@ -6448,7 +6448,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, switch (event) { case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); - /* fall through */ + fallthrough; case BFA_FCS_VPORT_SM_RSP_OK: case BFA_FCS_VPORT_SM_RSP_ERROR: diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index 143c35bd668c..c21aa37b8adb 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -419,13 +419,13 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); - /* fall through */ + fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); bfa_fcxp_discard(rport->fcxp); - /* fall through */ + fallthrough; case RPSM_EVENT_FAILED: if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { rport->plogi_retries++; @@ -856,7 +856,7 @@ bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, * At least go offline when a PLOGI is received. */ bfa_fcxp_discard(rport->fcxp); - /* fall through */ + fallthrough; case RPSM_EVENT_FAILED: case RPSM_EVENT_ADDRESS_CHANGE: @@ -1042,7 +1042,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); - /* fall through */ + fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); @@ -1131,7 +1131,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_send_plogiacc(rport, NULL); break; } - /* fall through */ + fallthrough; case RPSM_EVENT_ADDRESS_CHANGE: if (!bfa_fcs_lport_is_online(rport->port)) { @@ -1288,7 +1288,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); - /* fall through */ + fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); @@ -1332,7 +1332,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); - /* fall through */ + fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index dd5821dfcac2..325ad8a592bb 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -969,7 +969,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_INITFAIL: bfa_iocpf_timer_stop(ioc); - /* fall through */ + fallthrough; case IOCPF_E_TIMEOUT: writel(1, ioc->ioc_regs.ioc_sem_reg); @@ -1045,7 +1045,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_FAIL: bfa_iocpf_timer_stop(ioc); - /* fall through */ + fallthrough; case IOCPF_E_TIMEOUT: bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); @@ -5988,7 +5988,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, case BFA_DCONF_SM_IOCDISABLE: case BFA_DCONF_SM_FLASH_COMP: bfa_timer_stop(&dconf->timer); - /* fall through */ + fallthrough; case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 1e266c1ef793..11c0c3e6f014 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -6397,7 +6397,7 @@ bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) dport->test_state = BFA_DPORT_ST_INP; bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU); } - /* fall thru */ + fallthrough; case BFA_DPORT_SM_REQFAIL: bfa_sm_set_state(dport, bfa_dport_sm_enabled); diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index e72d7bb7f4f4..08992095ce7a 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1404,7 +1404,6 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], break; case FCOE_KCQE_OPCODE_FCOE_ERROR: - /* fall thru */ default: printk(KERN_ERR PFX "unknown opcode 0x%x\n", kcqe->op_code); diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 98d4d39aaa57..7fa20609d5e7 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -2939,7 +2939,7 @@ csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) case CSIO_HWE_FW_DLOAD: csio_set_state(&hw->sm, csio_hws_resetting); /* Download firmware */ - /* Fall through */ + fallthrough; case CSIO_HWE_HBA_RESET: csio_set_state(&hw->sm, csio_hws_resetting); diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index 61cf54208451..dc98f51f466f 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -1187,7 +1187,6 @@ csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) break; case CSIO_LNE_LINK_DOWN: - /* Fall through */ case CSIO_LNE_DOWN_LINK: csio_set_state(&ln->sm, csio_lns_uninit); if (csio_is_phys_ln(ln)) { diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index 0ca695110f54..9010cb6045dc 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c @@ -808,7 +808,7 @@ csio_wr_destroy_queues(struct csio_hw *hw, bool cmd) csio_q_eqid(hw, i) = CSIO_MAX_QID; } - /* fall through */ + fallthrough; case CSIO_INGRESS: if (csio_q_iqid(hw, i) != CSIO_MAX_QID) { csio_wr_cleanup_iq_ftr(hw, i); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 2b48954b6b1e..37d99357120f 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -643,7 +643,7 @@ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, int *need_rst) { switch (abort_reason) { - case CPL_ERR_BAD_SYN: /* fall through */ + case CPL_ERR_BAD_SYN: case CPL_ERR_CONN_RESET: return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; case CPL_ERR_XMIT_TIMEDOUT: diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 4e82c14cb795..2c3491528d42 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1133,7 +1133,7 @@ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, int *need_rst) { switch (abort_reason) { - case CPL_ERR_BAD_SYN: /* fall through */ + case CPL_ERR_BAD_SYN: case CPL_ERR_CONN_RESET: return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 94250ebe9e80..e72440d919d2 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -748,16 +748,16 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ if (index == PRIMARY_HWQ) cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq); - /* fall through */ + fallthrough; case UNMAP_TWO: cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq); - /* fall through */ + fallthrough; case UNMAP_ONE: cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq); - /* fall through */ + fallthrough; case FREE_IRQ: cfg->ops->free_afu_irqs(hwq->ctx_cookie); - /* fall through */ + fallthrough; case UNDO_NOOP: /* No action required */ break; @@ -971,18 +971,18 @@ static void cxlflash_remove(struct pci_dev *pdev) switch (cfg->init_state) { case INIT_STATE_CDEV: cxlflash_release_chrdev(cfg); - /* fall through */ + fallthrough; case INIT_STATE_SCSI: cxlflash_term_local_luns(cfg); scsi_remove_host(cfg->host); - /* fall through */ + fallthrough; case INIT_STATE_AFU: term_afu(cfg); - /* fall through */ + fallthrough; case INIT_STATE_PCI: cfg->ops->destroy_afu(cfg->afu_cookie); pci_disable_device(pdev); - /* fall through */ + fallthrough; case INIT_STATE_NONE: free_mem(cfg); scsi_host_put(cfg->host); @@ -2355,11 +2355,11 @@ retry: cxlflash_schedule_async_reset(cfg); break; } - /* fall through - to retry */ + fallthrough; /* to retry */ case -EAGAIN: if (++nretry < 2) goto retry; - /* fall through - to exit */ + fallthrough; /* to exit */ default: break; } @@ -2533,12 +2533,12 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) cfg->state = STATE_NORMAL; wake_up_all(&cfg->reset_waitq); ssleep(1); - /* fall through */ + fallthrough; case STATE_RESET: wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); if (cfg->state == STATE_NORMAL) break; - /* fall through */ + fallthrough; default: rc = FAILED; break; @@ -3019,7 +3019,7 @@ retry: wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); if (cfg->state == STATE_NORMAL) goto retry; - /* else, fall through */ + fallthrough; default: /* Ideally should not happen */ dev_err(dev, "%s: Device is not ready, state=%d\n", @@ -3531,7 +3531,7 @@ static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, if (likely(do_ioctl)) break; - /* fall through */ + fallthrough; default: rc = -EINVAL; goto out; diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index 593669ac3669..5dddf67dfa24 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -375,14 +375,13 @@ retry: switch (sshdr.sense_key) { case NO_SENSE: case RECOVERED_ERROR: - /* fall through */ case NOT_READY: result &= ~SAM_STAT_CHECK_CONDITION; break; case UNIT_ATTENTION: switch (sshdr.asc) { case 0x29: /* Power on Reset or Device Reset */ - /* fall through */ + fallthrough; case 0x2A: /* Device capacity changed */ case 0x3F: /* Report LUNs changed */ /* Retry the command once more */ @@ -1791,13 +1790,12 @@ static int process_sense(struct scsi_device *sdev, switch (sshdr.sense_key) { case NO_SENSE: case RECOVERED_ERROR: - /* fall through */ case NOT_READY: break; case UNIT_ATTENTION: switch (sshdr.asc) { case 0x29: /* Power on Reset or Device Reset */ - /* fall through */ + fallthrough; case 0x2A: /* Device settings/capacity changed */ rc = read_cap16(sdev, lli); if (rc) { @@ -2157,7 +2155,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) if (unlikely(rc)) goto cxlflash_ioctl_exit; - /* fall through */ + fallthrough; case DK_CXLFLASH_MANAGE_LUN: known_ioctl = true; @@ -2168,7 +2166,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) if (likely(do_ioctl)) break; - /* fall through */ + fallthrough; default: rc = -EINVAL; goto cxlflash_ioctl_exit; diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 8acd4bb9fefb..4a3f7831a2d6 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -60,7 +60,7 @@ static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h, ret = SCSI_DH_OK; break; } - /* Fallthrough */ + fallthrough; default: sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed, sense %x/%x/%x\n", @@ -147,7 +147,7 @@ retry: rc = SCSI_DH_RETRY; break; } - /* fall through */ + fallthrough; default: sdev_printk(KERN_WARNING, sdev, "%s: sending start_stop_unit failed, " diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c index b02ac389e6c6..429d64299fe9 100644 --- a/drivers/scsi/esas2r/esas2r_flash.c +++ b/drivers/scsi/esas2r/esas2r_flash.c @@ -1500,7 +1500,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, return complete_fmapi_req(a, rq, FI_STAT_SUCCESS); } - /* fall through */ + fallthrough; case FI_ACT_UP: /* Upload the components */ default: diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index eb7d139ffc00..09c5c24bf391 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -1236,7 +1236,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a, a->init_msg = ESAS2R_INIT_MSG_GET_INIT; break; } - /* fall through */ + fallthrough; case ESAS2R_INIT_MSG_GET_INIT: if (msg == ESAS2R_INIT_MSG_GET_INIT) { @@ -1250,7 +1250,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a, esas2r_hdebug("FAILED"); } } - /* fall through */ + fallthrough; default: rq->req_stat = RS_SUCCESS; diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index 89afa31e33cb..43a1fd11df5e 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -307,7 +307,7 @@ static void esp_reset_esp(struct esp *esp) case FASHME: esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); - /* fallthrough... */ + fallthrough; case FAS236: case PCSCSI: @@ -1741,7 +1741,7 @@ again: case ESP_EVENT_DATA_IN: write = 1; - /* fallthru */ + fallthrough; case ESP_EVENT_DATA_OUT: { struct esp_cmd_entry *ent = esp->active_cmd; diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 1409c7687853..5ea426effa60 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -450,10 +450,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) switch (fip->mode) { default: LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode); - /* fall-through */ + fallthrough; case FIP_MODE_AUTO: LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); - /* fall-through */ + fallthrough; case FIP_MODE_FABRIC: case FIP_MODE_NON_FIP: mutex_unlock(&fip->ctlr_mutex); @@ -773,7 +773,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, fc_fcoe_set_mac(mac, fh->fh_d_id); fip->update_mac(lport, mac); } - /* fall through */ + fallthrough; case ELS_LS_RJT: op = fr_encaps(fp); if (op) @@ -2439,7 +2439,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, frport->enode_mac, 0); break; } - /* fall through */ + fallthrough; case FIP_ST_VNMP_START: LIBFCOE_FIP_DBG(fip, "vn_probe_req: " "restart VN2VN negotiation\n"); diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 2cc676e3df6a..29e4cdcade72 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -340,7 +340,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, break; case BOARD_DTC3181E: hostdata->io_width = 2; /* 16-bit PDMA */ - /* fall through */ + fallthrough; case BOARD_NCR53C400A: case BOARD_HP_C2502: hostdata->c400_ctl_status = 9; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 11caa4b0d797..d9d21d23372e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1144,7 +1144,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, hisi_hba->hw->get_events(hisi_hba, phy_no); break; } - /* fallthru */ + fallthrough; case PHY_FUNC_RELEASE_SPINUP_HOLD: default: return -EOPNOTSUPP; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 91794a50b31f..48d5da59262b 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -4697,7 +4697,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) case WRITE_6: case WRITE_12: is_write = 1; - /* fall through */ + fallthrough; case READ_6: case READ_12: if (*cdb_len == 6) { @@ -5147,7 +5147,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, switch (cmd->cmnd[0]) { case WRITE_6: is_write = 1; - /* fall through */ + fallthrough; case READ_6: first_block = (((cmd->cmnd[1] & 0x1F) << 16) | (cmd->cmnd[2] << 8) | @@ -5158,7 +5158,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, break; case WRITE_10: is_write = 1; - /* fall through */ + fallthrough; case READ_10: first_block = (((u64) cmd->cmnd[2]) << 24) | @@ -5171,7 +5171,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, break; case WRITE_12: is_write = 1; - /* fall through */ + fallthrough; case READ_12: first_block = (((u64) cmd->cmnd[2]) << 24) | @@ -5186,7 +5186,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, break; case WRITE_16: is_write = 1; - /* fall through */ + fallthrough; case READ_16: first_block = (((u64) cmd->cmnd[2]) << 56) | diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 77f4d37d5bd6..ea7c8930592d 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1866,7 +1866,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job) port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) | (bsg_request->rqst_data.h_els.port_id[1] << 8) | bsg_request->rqst_data.h_els.port_id[2]; - /* fall through */ + fallthrough; case FC_BSG_RPT_ELS: fc_flags = IBMVFC_FC_ELS; break; @@ -1875,7 +1875,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job) port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) | (bsg_request->rqst_data.h_ct.port_id[1] << 8) | bsg_request->rqst_data.h_ct.port_id[2]; - /* fall through */ + fallthrough; case FC_BSG_RPT_CT: fc_flags = IBMVFC_FC_CT_IU; break; @@ -4122,7 +4122,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) return; case IBMVFC_MAD_CRQ_ERROR: ibmvfc_retry_host_init(vhost); - /* fall through */ + fallthrough; case IBMVFC_MAD_DRIVER_FAILED: ibmvfc_free_event(evt); return; diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index d9e94e81da01..cc3908c2d2f9 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -1581,7 +1581,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, case H_PERMISSION: if (connection_broken(vscsi)) flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); - /* Fall through */ + fallthrough; default: dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n", rc); @@ -2489,10 +2489,10 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi) break; case H_CLOSED: vscsi->flags |= CLIENT_FAILED; - /* Fall through */ + fallthrough; case H_DROPPED: vscsi->flags |= RESPONSE_Q_DOWN; - /* Fall through */ + fallthrough; case H_REMOTE_PARM: dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", rc); diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 1459b1467027..862d35a098cf 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -801,7 +801,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) case 1: /* Phase 1 - Connected */ imm_connect(dev, CONNECT_EPP_MAYBE); cmd->SCp.phase++; - /* fall through */ + fallthrough; case 2: /* Phase 2 - We are now talking to the scsi bus */ if (!imm_select(dev, scmd_id(cmd))) { @@ -809,7 +809,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) return 0; } cmd->SCp.phase++; - /* fall through */ + fallthrough; case 3: /* Phase 3 - Ready to accept a command */ w_ctr(ppb, 0x0c); @@ -819,7 +819,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) if (!imm_send_command(cmd)) return 0; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 4: /* Phase 4 - Setup scatter/gather buffers */ if (scsi_bufflen(cmd)) { @@ -835,7 +835,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) cmd->SCp.phase++; if (cmd->SCp.this_residual & 0x01) cmd->SCp.this_residual++; - /* fall through */ + fallthrough; case 5: /* Phase 5 - Pre-Data transfer stage */ /* Spin lock for BUSY */ @@ -852,7 +852,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) if (imm_negotiate(dev)) return 0; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 6: /* Phase 6 - Data transfer stage */ /* Spin lock for BUSY */ @@ -868,7 +868,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) return 1; } cmd->SCp.phase++; - /* fall through */ + fallthrough; case 7: /* Phase 7 - Post data transfer stage */ if ((dev->dp) && (dev->rd)) { @@ -880,7 +880,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) } } cmd->SCp.phase++; - /* fall through */ + fallthrough; case 8: /* Phase 8 - Read status/message */ /* Check for data overrun */ diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 7f9b3f20e5e4..4cacb800b530 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -778,7 +778,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) break; case SCU_EVENT_LINK_FAILURE: scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); - /* fall through */ + fallthrough; case SCU_EVENT_HARD_RESET_RECEIVED: /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index cd1e4b4d95bb..c3f540b55689 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -310,7 +310,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, /* Kill all outstanding requests for the device. */ sci_remote_device_terminate_requests(idev); - /* Fall through - into the default case... */ + fallthrough; /* into the default case */ default: clear_bit(IDEV_IO_READY, &idev->flags); break; @@ -593,7 +593,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, break; } - /* fall through - and treat as unhandled... */ + fallthrough; /* and treat as unhandled */ default: dev_dbg(scirdev_to_dev(idev), "%s: device: %p event code: %x: %s\n", diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 474a43460963..68333f523b35 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -225,7 +225,7 @@ static void sci_remote_node_context_continue_state_transitions(struct sci_remote case RNC_DEST_READY: case RNC_DEST_SUSPENDED_RESUME: rnc->destination_state = RNC_DEST_READY; - /* Fall through... */ + fallthrough; case RNC_DEST_FINAL: sci_remote_node_context_resume(rnc, rnc->user_callback, rnc->user_cookie); @@ -601,9 +601,9 @@ enum sci_status sci_remote_node_context_suspend( __func__, sci_rnc); return SCI_FAILURE_INVALID_STATE; } - /* Fall through - and handle like SCI_RNC_POSTING */ + fallthrough; /* and handle like SCI_RNC_POSTING */ case SCI_RNC_RESUMING: - /* Fall through - and handle like SCI_RNC_POSTING */ + fallthrough; /* and handle like SCI_RNC_POSTING */ case SCI_RNC_POSTING: /* Set the destination state to AWAIT - this signals the * entry into the SCI_RNC_READY state that a suspension diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 6561a07db189..6e0817941fa7 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -894,7 +894,7 @@ sci_io_request_terminate(struct isci_request *ireq) * and don't wait for the task response. */ sci_change_state(&ireq->sm, SCI_REQ_ABORTING); - /* Fall through - and handle like ABORTING... */ + fallthrough; /* and handle like ABORTING */ case SCI_REQ_ABORTING: if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) set_bit(IREQ_PENDING_ABORT, &ireq->flags); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 16eb3b60ed58..96a2952cf626 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -2108,7 +2108,7 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) switch (op) { case ELS_LS_RJT: FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n"); - /* fall through */ + fallthrough; case ELS_LS_ACC: goto cleanup; default: @@ -2622,7 +2622,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) case FC_EOF_T: if (f_ctl & FC_FC_END_SEQ) skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl)); - /* fall through */ + fallthrough; case FC_EOF_N: if (fh->fh_type == FC_TYPE_BLS) fc_exch_recv_bls(ema->mp, fp); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index e11d4f002bd4..7cfeb6886237 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -752,7 +752,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) brp = fc_frame_payload_get(fp, sizeof(*brp)); if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR) break; - /* fall thru */ + fallthrough; default: /* * we will let the command timeout @@ -1536,7 +1536,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) "device %x invalid REC reject %d/%d\n", fsp->rport->port_id, rjt->er_reason, rjt->er_explan); - /* fall through */ + fallthrough; case ELS_RJT_UNSUP: FC_FCP_DBG(fsp, "device does not support REC\n"); rpriv = fsp->rport->dd_data; @@ -1668,7 +1668,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n", fsp, fsp->rport->port_id, error); fsp->status_code = FC_CMD_PLOGO; - /* fall through */ + fallthrough; case -FC_EX_TIMEOUT: /* @@ -1830,7 +1830,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) break; case -FC_EX_CLOSED: /* e.g., link failure */ FC_FCP_DBG(fsp, "SRR error, exchange closed\n"); - /* fall through */ + fallthrough; default: fc_fcp_retry_cmd(fsp, FC_ERROR); break; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index b84dbc316df1..6557fda85c5c 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1578,7 +1578,7 @@ static void fc_lport_timeout(struct work_struct *work) case LPORT_ST_DPRT: FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n", fc_lport_state(lport)); - /* fall thru */ + fallthrough; case LPORT_ST_SCR: fc_lport_enter_scr(lport); break; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 18663a82865f..a60b228d13f1 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1723,7 +1723,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) kref_put(&rdata->kref, fc_rport_destroy); goto busy; } - /* fall through */ + fallthrough; default: FC_RPORT_DBG(rdata, "Reject ELS 0x%02x while in state %s\n", diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 49c8a1818baf..1e9c3171fa9f 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -248,7 +248,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) hdr_lun = scsilun_to_int(&tmf->lun); if (hdr_lun != task->sc->device->lun) return 0; - /* fall through */ + fallthrough; case ISCSI_TM_FUNC_TARGET_WARM_RESET: /* * Fail all SCSI cmd PDUs @@ -1674,7 +1674,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) sc->result = DID_NO_CONNECT << 16; break; } - /* fall through */ + fallthrough; case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_IMM_RETRY << 16; @@ -2239,7 +2239,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) "progress\n"); goto success; } - /* fall through */ + fallthrough; default: conn->tmf_state = TMF_INITIAL; goto failed; diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 6ef93c7af954..37e5d4e48c2f 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -772,7 +772,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) iscsi_tcp_data_recv_prep(tcp_conn); return 0; } - /* fall through */ + fallthrough; case ISCSI_OP_LOGOUT_RSP: case ISCSI_OP_NOOP_IN: case ISCSI_OP_SCSI_TMFUNC_RSP: diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 1b93332daa6b..6a521ba7a616 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -324,7 +324,7 @@ static int smp_ata_check_ready(struct ata_link *link) case SAS_END_DEVICE: if (ex_phy->attached_sata_dev) return sas_ata_clear_pending(dev, ex_phy); - /* fall through */ + fallthrough; default: return -ENODEV; } diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index daf951b0b3f5..cd7c7d269f6f 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -108,7 +108,7 @@ static int sas_get_port_device(struct asd_sas_port *port) rphy = NULL; break; } - /* fall through */ + fallthrough; case SAS_END_DEVICE: rphy = sas_end_device_alloc(port->port); break; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index b7d1b1ea185d..8d6bcc19359f 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1096,7 +1096,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } else memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); - /* fallthrough */ + fallthrough; case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 9e0975e55c27..1bf939818c98 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -622,7 +622,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * sas_scsi_clear_queue_lu(work_q, cmd); goto Again; } - /* fallthrough */ + fallthrough; case TASK_IS_NOT_AT_LU: case TASK_ABORT_FAILED: pr_notice("task 0x%p is not at LU: I_T recover\n", diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index ef2015fad2d5..d0141a23a833 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -3202,7 +3202,7 @@ port_out: case SLI_MGMT_GHAT: case SLI_MGMT_GRPL: rsp_size = FC_MAX_NS_RSP; - /* fall through */ + fallthrough; case SLI_MGMT_DHBA: case SLI_MGMT_DHAT: pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID; @@ -3215,7 +3215,7 @@ port_out: case SLI_MGMT_GPAT: case SLI_MGMT_GPAS: rsp_size = FC_MAX_NS_RSP; - /* fall through */ + fallthrough; case SLI_MGMT_DPRT: case SLI_MGMT_DPA: pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 48dc63f22cca..6aae61d6ee16 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -9134,7 +9134,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_nlp_put(ndlp); return; } - /* fall through */ + fallthrough; default: /* Try to recover from this error */ if (phba->sli_rev == LPFC_SLI_REV4) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 142a02114479..d32c7e7ab09d 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4728,15 +4728,14 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, case CMD_GEN_REQUEST64_CR: if (iocb->context_un.ndlp == ndlp) return 1; - /* fall through */ + fallthrough; case CMD_ELS_REQUEST64_CR: if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) return 1; - /* fall through */ + fallthrough; case CMD_XMIT_ELS_RSP64_CX: if (iocb->context1 == (uint8_t *) ndlp) return 1; - /* fall through */ } } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ @@ -6055,7 +6054,7 @@ restart_disc: case LPFC_LINK_UP: lpfc_issue_clear_la(phba, vport); - /* fall through */ + fallthrough; case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index cad53d19cb25..92d6e7b98770 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -464,7 +464,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, case NLP_STE_NPR_NODE: if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) break; - /* fall through */ + fallthrough; case NLP_STE_REG_LOGIN_ISSUE: case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index e5be334d6a11..0c39ed50998c 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1225,7 +1225,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_ncmd, nCmd, lpfc_ncmd->cur_iocbq.sli4_xritag, bf_get(lpfc_wcqe_c_xb, wcqe)); - /* fall through */ + fallthrough; default: out_err: lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 5e802c8b22a9..983eeb0e3d07 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1093,7 +1093,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, break; } - /* fall through */ + fallthrough; case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the error @@ -1213,7 +1213,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, rc = BG_ERR_TGT | BG_ERR_CHECK; break; } - /* fall through */ + fallthrough; case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the @@ -1295,7 +1295,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, switch (op) { case SCSI_PROT_WRITE_PASS: rc = BG_ERR_CHECK; - /* fall through */ + fallthrough; case SCSI_PROT_WRITE_INSERT: /* @@ -3980,7 +3980,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->cur_iocbq.sli4_lxritag, 0, 0); } - /* fall through */ + fallthrough; default: cmd->result = DID_ERROR << 16; break; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 4cd7ded656b7..e158cd77d387 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -9339,7 +9339,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, */ if (piocb->iocb_cmpl) piocb->iocb_cmpl = NULL; - /*FALLTHROUGH*/ + fallthrough; case CMD_CREATE_XRI_CR: case CMD_CLOSE_XRI_CN: case CMD_CLOSE_XRI_CX: @@ -9653,7 +9653,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, cmnd = CMD_XMIT_SEQUENCE64_CR; if (phba->link_flag & LS_LOOPBACK_MODE) bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); - /* fall through */ + fallthrough; case CMD_XMIT_SEQUENCE64_CR: /* word3 iocb=io_tag32 wqe=reserved */ wqe->xmit_sequence.rsvd3 = 0; @@ -13630,7 +13630,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2537 Receive Frame Truncated!!\n"); - /* fall through */ + fallthrough; case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); @@ -13678,7 +13678,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); } - /* fallthrough */ + fallthrough; case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; @@ -14162,7 +14162,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6126 Receive Frame Truncated!!\n"); - /* fall through */ + fallthrough; case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); @@ -14209,7 +14209,7 @@ drop: atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); } - /* fallthrough */ + fallthrough; case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; @@ -15096,7 +15096,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 256: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_256); @@ -15238,7 +15238,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, LPFC_CQ_CNT_WORD7); break; } - /* fall through */ + fallthrough; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0361 Unsupported CQ count: " @@ -15249,7 +15249,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 256: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, LPFC_CQ_CNT_256); @@ -15417,7 +15417,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, LPFC_CQ_CNT_WORD7); break; } - /* fall through */ + fallthrough; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3118 Bad CQ count. (%d)\n", @@ -15426,7 +15426,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest */ + fallthrough; /* otherwise default to smallest */ case 256: bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, LPFC_CQ_CNT_256); @@ -15702,7 +15702,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 16: bf_set(lpfc_mq_context_ring_size, &mq_create_ext->u.request.context, @@ -16123,7 +16123,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, @@ -16260,7 +16260,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 0484ee52ae80..ac406049e7c8 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -491,9 +491,9 @@ mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) if (adapter->support_random_del && adapter->read_ldidmap ) switch (cmd->cmnd[0]) { - case READ_6: /* fall through */ - case WRITE_6: /* fall through */ - case READ_10: /* fall through */ + case READ_6: + case WRITE_6: + case READ_10: case WRITE_10: ldrv_num += 0x80; } @@ -852,7 +852,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) return scb; #if MEGA_HAVE_CLUSTERING - case RESERVE: /* Fall through */ + case RESERVE: case RELEASE: /* @@ -987,7 +987,7 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, adapter->flag |= (1L << cmd->device->channel); } - /* Fall through */ + fallthrough; default: pthru->numsgelements = mega_build_sglist(adapter, scb, &pthru->dataxferaddr, &pthru->dataxferlen); @@ -1050,7 +1050,7 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, adapter->flag |= (1L << cmd->device->channel); } - /* Fall through */ + fallthrough; default: epthru->numsgelements = mega_build_sglist(adapter, scb, &epthru->dataxferaddr, &epthru->dataxferlen); diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 19469a2c0ea3..4a27ac869f2e 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -1581,7 +1581,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) return NULL; } - /* Fall through */ + fallthrough; case READ_CAPACITY: /* diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 861f7140f52e..2b7e7b5f38ed 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3522,7 +3522,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, megasas_complete_int_cmd(instance, cmd); break; } - /* fall through */ + fallthrough; case MFI_CMD_LD_READ: case MFI_CMD_LD_WRITE: diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 0824410f78f8..883cccb59c2d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3534,7 +3534,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; } - /* Fall through - and complete IO */ + fallthrough; /* and complete IO */ case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ atomic_dec(&instance->fw_outstanding); if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index fd1d03064079..0a9f4e44ab2c 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1457,7 +1457,7 @@ static void cmd_complete(struct mesh_state *ms) /* huh? we expected a phase mismatch */ ms->n_msgin = 0; ms->msgphase = msg_in; - /* fall through */ + fallthrough; case msg_in: /* should have some message bytes in fifo */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 1d64524cd863..5730f32496b6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -4681,7 +4681,7 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) ioc_info(ioc, "performance mode: balanced\n"); return; } - /* Fall through */ + fallthrough; case MPT_PERF_MODE_LATENCY: /* * Enable interrupt coalescing on all reply queues diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 43260306668c..7c119b904834 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -1002,7 +1002,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } /* drop to default case for posting the request */ } - /* fall through */ + fallthrough; default: ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 08fc4b381056..2e2756d8a49b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -5470,7 +5470,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: scsi_set_resid(scmd, 0); - /* fall through */ + fallthrough; case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: case MPI2_IOCSTATUS_SUCCESS: scmd->result = (DID_OK << 16) | scsi_status; @@ -6480,7 +6480,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, if (!test_bit(handle, ioc->pend_os_device_add)) break; - /* fall through */ + fallthrough; case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: @@ -7208,7 +7208,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, event_data->PortEntry[i].PortStatus &= 0xF0; event_data->PortEntry[i].PortStatus |= MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; - /* fall through */ + fallthrough; case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: if (ioc->shost_recovery) break; @@ -10653,7 +10653,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: dev_info(&pdev->dev, "HBA is in Configurable Secure mode\n"); - /* fall through */ + fallthrough; case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index d4bd31a75b9d..b2869c5dd7fb 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -650,7 +650,7 @@ static void myrb_bgi_control(struct myrb_hba *cb) if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) sdev_printk(KERN_INFO, sdev, "Background Initialization Aborted\n"); - /* Fallthrough */ + fallthrough; case MYRB_STATUS_NO_BGI_INPROGRESS: cb->bgi_status.status = MYRB_BGI_INVALID; break; @@ -1528,7 +1528,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost, scmd->scsi_done(scmd); return 0; } - /* fall through */ + fallthrough; case WRITE_6: lba = (((scmd->cmnd[1] & 0x1F) << 16) | (scmd->cmnd[2] << 8) | @@ -1545,7 +1545,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost, scmd->scsi_done(scmd); return 0; } - /* fall through */ + fallthrough; case WRITE_10: case VERIFY: /* 0x2F */ case WRITE_VERIFY: /* 0x2E */ @@ -1562,7 +1562,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost, scmd->scsi_done(scmd); return 0; } - /* fall through */ + fallthrough; case WRITE_12: case VERIFY_12: /* 0xAF */ case WRITE_VERIFY_12: /* 0xAE */ diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index f88adab3f913..03d70138ad58 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -3640,7 +3640,7 @@ ncr_script_copy_and_bind (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len) new = old; break; } - /* fall through */ + fallthrough; default: panic("ncr_script_copy_and_bind: weird relocation %x\n", old); break; @@ -3910,14 +3910,14 @@ static void __init ncr_prepare_setting(struct ncb *np) np->scsi_mode = SMODE_HVD; break; } - /* fall through */ + fallthrough; case 3: /* SYMBIOS controllers report HVD through GPIO3 */ if (INB(nc_gpreg) & 0x08) break; - /* fall through */ + fallthrough; case 2: /* Set HVD unconditionally */ np->scsi_mode = SMODE_HVD; - /* fall through */ + fallthrough; case 1: /* Trust previous settings for HVD */ if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; @@ -4296,7 +4296,7 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) break; cp->phys.header.wgoalp = cpu_to_scr(goalp); cp->phys.header.wlastp = cpu_to_scr(lastp); - /* fall through */ + fallthrough; case DMA_FROM_DEVICE: goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8; if (segments <= MAX_SCATTERL) @@ -6717,7 +6717,7 @@ void ncr_int_sir (struct ncb *np) OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0])); return; } - /* fall through */ + fallthrough; case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */ case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */ case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */ @@ -6825,7 +6825,7 @@ void ncr_int_sir (struct ncb *np) */ OUTB (HS_PRT, HS_BUSY); - /* fall through */ + fallthrough; case SIR_NEGO_PROTO: /*------------------------------------------------------- diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 8655ff1249bb..bc5a623519e7 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -1113,7 +1113,7 @@ static irqreturn_t nspintr(int irq, void *dev_id) nsp_scsi_done(tmpSC); return IRQ_HANDLED; } - /* fall thru */ + fallthrough; default: if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) { return IRQ_HANDLED; diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 0ae800c5b739..aa41f7ac91cb 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -717,7 +717,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) } cmd->SCp.phase++; } - /* fall through */ + fallthrough; case 2: /* Phase 2 - We are now talking to the scsi bus */ if (!ppa_select(dev, scmd_id(cmd))) { @@ -725,7 +725,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) return 0; } cmd->SCp.phase++; - /* fall through */ + fallthrough; case 3: /* Phase 3 - Ready to accept a command */ w_ctr(ppb, 0x0c); @@ -735,7 +735,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) if (!ppa_send_command(cmd)) return 0; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 4: /* Phase 4 - Setup scatter/gather buffers */ if (scsi_bufflen(cmd)) { @@ -749,7 +749,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) } cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 5: /* Phase 5 - Data transfer stage */ w_ctr(ppb, 0x0c); @@ -762,7 +762,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) if (retv == 0) return 1; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 6: /* Phase 6 - Read status/message */ cmd->result = DID_OK << 16; diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index de9fd7f688d0..b569fd6e96d6 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -177,7 +177,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, break; case CS_TIMEOUT: rval = QLA_FUNCTION_TIMEOUT; - /* fall through */ + fallthrough; default: ql_dbg(ql_dbg_disc, vha, 0x2033, "%s failed, completion status (%x) on port_id: " diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 57a2d76aa691..507919d4ab36 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -857,7 +857,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport); break; } - /* fall through */ + fallthrough; default: if (fcport_is_smaller(fcport)) { /* local adapter is bigger */ diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index e3d2dea0b057..0954fa41911c 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2874,7 +2874,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - /* fall through */ + fallthrough; default: ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index ab5275dbc338..25e0a1684763 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1580,11 +1580,11 @@ global_port_update: qla2xxx_wake_dpc(vha); } } - /* fall through */ + fallthrough; case MBA_IDC_COMPLETE: if (ha->notify_lb_portup_comp && !vha->vp_idx) complete(&ha->lb_portup_comp); - /* Fallthru */ + fallthrough; case MBA_IDC_TIME_EXT: if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) @@ -2188,7 +2188,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - /* fall through */ + fallthrough; default: data[0] = MBS_COMMAND_ERROR; break; @@ -2368,7 +2368,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, case CS_PORT_UNAVAILABLE: case CS_PORT_LOGGED_OUT: fcport->nvme_flag |= NVME_FLAG_RESETTING; - /* fall through */ + fallthrough; case CS_ABORTED: case CS_PORT_BUSY: fd->transferred_length = 0; @@ -3485,7 +3485,7 @@ process_err: } else { qlt_24xx_process_atio_queue(vha, 1); } - /* fall through */ + fallthrough; case ABTS_RESP_24XX: case CTIO_TYPE7: case CTIO_CRC2: diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index e161c05d7d82..411b8a9ff393 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -2457,7 +2457,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, sec_mask = 0x10000; break; } - /* Fall through... */ + fallthrough; case 0x1f: /* Atmel flash. */ /* 512k sector size. */ @@ -2466,7 +2466,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, sec_mask = 0x80000000; break; } - /* Fall through... */ + fallthrough; case 0x01: /* AMD flash. */ if (flash_id == 0x38 || flash_id == 0x40 || @@ -2499,7 +2499,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, sec_mask = 0x1e000; break; } - /* fall through */ + fallthrough; default: /* Default to 16 kb sector size. */ rest_addr = 0x3fff; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 90289162dbd4..2d445bdb2129 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -442,7 +442,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt, vha, 0xe073, "qla_target(%d):%s: CRC2 Response pkt\n", vha->vp_idx, __func__); - /* fall through */ + fallthrough; case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; @@ -4423,7 +4423,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, case QLA_TGT_CLEAR_TS: case QLA_TGT_ABORT_TS: abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); - /* fall through */ + fallthrough; case QLA_TGT_CLEAR_ACA: h = qlt_find_qphint(vha, mcmd->unpacked_lun); mcmd->qpair = h->qpair; @@ -5057,7 +5057,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, res = 1; break; } - /* fall through */ + fallthrough; case ELS_LOGO: case ELS_PRLO: spin_lock_irqsave(&ha->tgt.sess_lock, flags); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index bab87e47b238..676778cbc550 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -2907,7 +2907,7 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, chap_tbl.secret_len); } } - /* fall through */ + fallthrough; default: return iscsi_session_get_param(cls_sess, param, buf); } diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 3790e8b70bba..48ff7d88af86 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -200,15 +200,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f /* Write mailbox command registers. */ switch (mbox_param[param[0]] >> 4) { case 6: sbus_writew(param[5], qpti->qregs + MBOX5); - /* Fall through */ + fallthrough; case 5: sbus_writew(param[4], qpti->qregs + MBOX4); - /* Fall through */ + fallthrough; case 4: sbus_writew(param[3], qpti->qregs + MBOX3); - /* Fall through */ + fallthrough; case 3: sbus_writew(param[2], qpti->qregs + MBOX2); - /* Fall through */ + fallthrough; case 2: sbus_writew(param[1], qpti->qregs + MBOX1); - /* Fall through */ + fallthrough; case 1: sbus_writew(param[0], qpti->qregs + MBOX0); } @@ -259,15 +259,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f /* Read back output parameters. */ switch (mbox_param[param[0]] & 0xf) { case 6: param[5] = sbus_readw(qpti->qregs + MBOX5); - /* Fall through */ + fallthrough; case 5: param[4] = sbus_readw(qpti->qregs + MBOX4); - /* Fall through */ + fallthrough; case 4: param[3] = sbus_readw(qpti->qregs + MBOX3); - /* Fall through */ + fallthrough; case 3: param[2] = sbus_readw(qpti->qregs + MBOX2); - /* Fall through */ + fallthrough; case 2: param[1] = sbus_readw(qpti->qregs + MBOX1); - /* Fall through */ + fallthrough; case 1: param[0] = sbus_readw(qpti->qregs + MBOX0); } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 927b1e641842..7d3571a2bd89 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -599,7 +599,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd) set_host_byte(scmd, DID_ALLOC_FAILURE); return SUCCESS; } - /* FALLTHROUGH */ + fallthrough; case COPY_ABORTED: case VOLUME_OVERFLOW: case MISCOMPARE: @@ -621,7 +621,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd) return ADD_TO_MLQUEUE; else set_host_byte(scmd, DID_TARGET_FAILURE); - /* FALLTHROUGH */ + fallthrough; case ILLEGAL_REQUEST: if (sshdr.asc == 0x20 || /* Invalid command operation code */ @@ -734,7 +734,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) switch (status_byte(scmd->result)) { case GOOD: scsi_handle_queue_ramp_up(scmd->device); - /* FALLTHROUGH */ + fallthrough; case COMMAND_TERMINATED: return SUCCESS; case CHECK_CONDITION: @@ -755,7 +755,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) return FAILED; case QUEUE_FULL: scsi_handle_queue_full(scmd->device); - /* fall through */ + fallthrough; case BUSY: return NEEDS_RETRY; default: @@ -1302,7 +1302,7 @@ retry_tur: case NEEDS_RETRY: if (retry_cnt--) goto retry_tur; - /*FALLTHRU*/ + fallthrough; case SUCCESS: return 0; default: @@ -1739,7 +1739,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd) if (msg_byte(scmd->result) == COMMAND_COMPLETE && status_byte(scmd->result) == RESERVATION_CONFLICT) return 0; - /* fall through */ + fallthrough; case DID_SOFT_ERROR: return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER); } @@ -1810,7 +1810,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) set_host_byte(scmd, DID_TIME_OUT); return SUCCESS; } - /* FALLTHROUGH */ + fallthrough; case DID_NO_CONNECT: case DID_BAD_TARGET: /* @@ -1854,7 +1854,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) * lower down */ break; - /* fallthrough */ + fallthrough; case DID_BUS_BUSY: case DID_PARITY: goto maybe_retry; @@ -1892,7 +1892,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) * the case of trying to send too many commands to a * tagged queueing device. */ - /* FALLTHROUGH */ + fallthrough; case BUSY: /* * device can't talk to us at the moment. Should only @@ -1905,7 +1905,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) if (scmd->cmnd[0] == REPORT_LUNS) scmd->device->sdev_target->expecting_lun_change = 0; scsi_handle_queue_ramp_up(scmd->device); - /* FALLTHROUGH */ + fallthrough; case COMMAND_TERMINATED: return SUCCESS; case TASK_ABORTED: @@ -2376,22 +2376,22 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) rtn = scsi_try_bus_device_reset(scmd); if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; - /* FALLTHROUGH */ + fallthrough; case SG_SCSI_RESET_TARGET: rtn = scsi_try_target_reset(scmd); if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; - /* FALLTHROUGH */ + fallthrough; case SG_SCSI_RESET_BUS: rtn = scsi_try_bus_reset(scmd); if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; - /* FALLTHROUGH */ + fallthrough; case SG_SCSI_RESET_HOST: rtn = scsi_try_host_reset(scmd); if (rtn == SUCCESS) break; - /* FALLTHROUGH */ + fallthrough; default: rtn = FAILED; break; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 45d04b7b2643..14872c9dc78c 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -117,14 +117,14 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, case NOT_READY: /* This happens if there is no disc in drive */ if (sdev->removable) break; - /* FALLTHROUGH */ + fallthrough; case UNIT_ATTENTION: if (sdev->removable) { sdev->changed = 1; result = 0; /* This is no longer considered an error */ break; } - /* FALLTHROUGH -- for non-removable media */ + fallthrough; /* for non-removable media */ default: sdev_printk(KERN_INFO, sdev, "ioctl_internal_command return code = %x\n", diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7c6dd6f75190..7affaaf8b98e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -795,7 +795,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) } if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req))) return; - /*FALLTHRU*/ + fallthrough; case ACTION_REPREP: scsi_io_completion_reprep(cmd, q); break; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index bd38c8cea56e..ca1e6cf6a38e 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -516,7 +516,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, break; case BMIC_SENSE_DIAG_OPTIONS: cdb_length = 0; - /* fall through */ + fallthrough; case BMIC_IDENTIFY_CONTROLLER: case BMIC_IDENTIFY_PHYSICAL_DEVICE: case BMIC_SENSE_SUBSYSTEM_INFORMATION: @@ -527,7 +527,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, break; case BMIC_SET_DIAG_OPTIONS: cdb_length = 0; - /* fall through */ + fallthrough; case BMIC_WRITE_HOST_WELLNESS: request->data_direction = SOP_WRITE_FLAG; cdb[0] = BMIC_WRITE; @@ -2324,7 +2324,7 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, switch (scmd->cmnd[0]) { case WRITE_6: is_write = true; - /* fall through */ + fallthrough; case READ_6: first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | (scmd->cmnd[2] << 8) | scmd->cmnd[3]); @@ -2334,21 +2334,21 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, break; case WRITE_10: is_write = true; - /* fall through */ + fallthrough; case READ_10: first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); break; case WRITE_12: is_write = true; - /* fall through */ + fallthrough; case READ_12: first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); block_cnt = get_unaligned_be32(&scmd->cmnd[6]); break; case WRITE_16: is_write = true; - /* fall through */ + fallthrough; case READ_16: first_block = get_unaligned_be64(&scmd->cmnd[2]); block_cnt = get_unaligned_be32(&scmd->cmnd[10]); @@ -2948,7 +2948,7 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: if (io_request->scmd) io_request->scmd->result = 0; - /* fall through */ + fallthrough; case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: break; case PQI_RESPONSE_IU_VENDOR_GENERAL: @@ -3115,12 +3115,11 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, switch (reset_status) { case RESET_INITIATE_DRIVER: - /* fall through */ case RESET_TIMEDOUT: dev_info(&ctrl_info->pci_dev->dev, "resetting controller %u\n", ctrl_info->ctrl_id); sis_soft_reset(ctrl_info); - /* fall through */ + fallthrough; case RESET_INITIATE_FIRMWARE: rc = pqi_ofa_ctrl_restart(ctrl_info); pqi_ofa_free_host_buffer(ctrl_info); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 0c4aa4665a2f..3b3a53c6a0de 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -877,10 +877,10 @@ static void get_sectorsize(struct scsi_cd *cd) case 2340: case 2352: sector_size = 2048; - /* fall through */ + fallthrough; case 2048: cd->capacity *= 4; - /* fall through */ + fallthrough; case 512: break; default: diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 87fbc0ea350b..e2e5356a997d 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -339,14 +339,14 @@ static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s) switch (sense[0] & 0x7f) { case 0x71: s->deferred = 1; - /* fall through */ + fallthrough; case 0x70: s->fixed_format = 1; s->flags = sense[2] & 0xe0; break; case 0x73: s->deferred = 1; - /* fall through */ + fallthrough; case 0x72: s->fixed_format = 0; ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4); @@ -2723,7 +2723,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon switch (cmd_in) { case MTFSFM: chg_eof = 0; /* Changed from the FSF after this */ - /* fall through */ + fallthrough; case MTFSF: cmd[0] = SPACE; cmd[1] = 0x01; /* Space FileMarks */ @@ -2738,7 +2738,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon break; case MTBSFM: chg_eof = 0; /* Changed from the FSF after this */ - /* fall through */ + fallthrough; case MTBSF: cmd[0] = SPACE; cmd[1] = 0x01; /* Space FileMarks */ diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 701b842296f0..2e3fbc2fae97 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -397,12 +397,12 @@ static int sun3scsi_dma_finish(int write_flag) case CSR_LEFT_3: *vaddr = (dregs->bpack_lo & 0xff00) >> 8; vaddr--; - /* Fall through */ + fallthrough; case CSR_LEFT_2: *vaddr = (dregs->bpack_hi & 0x00ff); vaddr--; - /* Fall through */ + fallthrough; case CSR_LEFT_1: *vaddr = (dregs->bpack_hi & 0xff00) >> 8; diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c index 6d7651a7847e..c6db61b61de3 100644 --- a/drivers/scsi/sym53c8xx_2/sym_fw.c +++ b/drivers/scsi/sym53c8xx_2/sym_fw.c @@ -523,7 +523,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len) new = old; break; } - /* fall through */ + fallthrough; default: new = 0; panic("sym_fw_bind_script: " diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 8410117d5aa4..cc11daa1222b 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -3059,7 +3059,7 @@ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb sym_print_addr(cp->cmd, "%s\n", s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } - /* fall through */ + fallthrough; default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; @@ -4620,7 +4620,7 @@ static void sym_int_sir(struct sym_hcb *np) * Negotiation failed. * Target does not want answer message. */ - /* fall through */ + fallthrough; case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c index d37e2a69136a..e13d5351f155 100644 --- a/drivers/scsi/sym53c8xx_2/sym_nvram.c +++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c @@ -695,7 +695,7 @@ static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram) data, len); if (!x) break; - /* fall through */ + fallthrough; default: x = sym_read_T93C46_nvram(np, nvram); break; diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index bcfbbd0d5c45..5b2bc1a6f922 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -110,7 +110,7 @@ static int ufs_bsg_request(struct bsg_job *job) goto out; } - /* fall through */ + fallthrough; case UPIU_TRANSACTION_NOP_OUT: case UPIU_TRANSACTION_TASK_REQ: ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req, diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index da199fa7a3e0..1d157ff58d81 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1612,7 +1612,7 @@ start: * currently running. Hence, fall through to cancel gating * work and to enable clocks. */ - /* fallthrough */ + fallthrough; case CLKS_OFF: ufshcd_scsi_block_requests(hba); hba->clk_gating.state = REQ_CLKS_ON; @@ -1624,7 +1624,7 @@ start: * fall through to check if we should wait for this * work to be done or not. */ - /* fallthrough */ + fallthrough; case REQ_CLKS_ON: if (async) { rc = -EAGAIN; @@ -4737,7 +4737,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) switch (scsi_status) { case SAM_STAT_CHECK_CONDITION: ufshcd_copy_sense_data(lrbp); - /* fallthrough */ + fallthrough; case SAM_STAT_GOOD: result |= DID_OK << 16 | COMMAND_COMPLETE << 8 | @@ -6277,7 +6277,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, switch (msgcode) { case UPIU_TRANSACTION_NOP_OUT: cmd_type = DEV_CMD_TYPE_NOP; - /* fall through */ + fallthrough; case UPIU_TRANSACTION_QUERY_REQ: ufshcd_hold(hba, false); mutex_lock(&hba->dev_cmd.lock); diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index ca1c39b6f631..3b1803432090 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -148,7 +148,7 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) default: scmd_printk(KERN_WARNING, sc, "Unknown response %d", resp->response); - /* fall through */ + fallthrough; case VIRTIO_SCSI_S_FAILURE: set_host_byte(sc, DID_ERROR); break; diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 8dbb4db6831a..081f54ab7d86 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -607,7 +607,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, case BTSTAT_TAGREJECT: case BTSTAT_BADMSG: cmd->result = (DRIVER_INVALID << 24); - /* fall through */ + fallthrough; case BTSTAT_HAHARDWARE: case BTSTAT_INVPHASE: diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index f81046f0e68a..87dafbc942d3 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c @@ -1854,7 +1854,7 @@ round_4(unsigned int x) case 1: --x; break; case 2: ++x; - /* fall through */ + fallthrough; case 3: ++x; } return x; diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index f0068e96a177..259fc248d06c 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -1111,7 +1111,7 @@ static void scsifront_backend_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's Closing state */ + fallthrough; /* Missed the backend's Closing state */ case XenbusStateClosing: scsifront_disconnect(info); break; diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index e19102f46302..b25d0f7dac9e 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -353,7 +353,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_u32("nmodem_supported", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.nmodem_supported); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 14): qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters); qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset); @@ -368,14 +368,14 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, &qcom_socinfo->info.num_defective_parts); debugfs_create_u32("ndefective_parts_array_offset", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.ndefective_parts_array_offset); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 13): qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id); debugfs_create_u32("nproduct_id", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.nproduct_id); DEBUGFS_ADD(info, chip_id); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 12): qcom_socinfo->info.chip_family = __le32_to_cpu(info->chip_family); @@ -392,7 +392,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_x32("raw_device_number", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.raw_device_num); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 11): case SOCINFO_VERSION(0, 10): case SOCINFO_VERSION(0, 9): @@ -400,12 +400,12 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_u32("foundry_id", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.foundry_id); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 8): case SOCINFO_VERSION(0, 7): DEBUGFS_ADD(info, pmic_model); DEBUGFS_ADD(info, pmic_die_rev); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 6): qcom_socinfo->info.hw_plat_subtype = __le32_to_cpu(info->hw_plat_subtype); @@ -413,7 +413,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_u32("hardware_platform_subtype", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.hw_plat_subtype); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 5): qcom_socinfo->info.accessory_chip = __le32_to_cpu(info->accessory_chip); @@ -421,27 +421,27 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_u32("accessory_chip", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.accessory_chip); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 4): qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver); debugfs_create_u32("platform_version", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.plat_ver); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 3): qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat); debugfs_create_u32("hardware_platform", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.hw_plat); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 2): qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver); debugfs_create_u32("raw_version", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.raw_ver); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 1): DEBUGFS_ADD(info, build_id); break; diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 42cf37a0556b..d332e5d9abac 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2229,7 +2229,7 @@ static int tegra_pmc_clk_notify_cb(struct notifier_block *nb, case POST_RATE_CHANGE: pmc->rate = data->new_rate; - /* fall through */ + fallthrough; case ABORT_RATE_CHANGE: mutex_unlock(&pmc->powergates_lock); diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 2f717812c766..03b034c15d2b 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -164,10 +164,10 @@ static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs) switch (count) { case 3: *bs->rx_buf++ = (data >> 16) & 0xff; - /* fallthrough */ + fallthrough; case 2: *bs->rx_buf++ = (data >> 8) & 0xff; - /* fallthrough */ + fallthrough; case 1: *bs->rx_buf++ = (data >> 0) & 0xff; /* fallthrough - no default */ diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index 54ad0ac121e5..ee905880769e 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c @@ -226,7 +226,7 @@ static void fsl_spi_free_dummy_rx(void) case 1: kfree(fsl_dummy_rx); fsl_dummy_rx = NULL; - /* fall through */ + fallthrough; default: fsl_dummy_rx_refcnt--; break; @@ -294,7 +294,7 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) switch (mspi->subblock) { default: dev_warn(dev, "cell-index unspecified, assuming SPI1\n"); - /* fall through */ + fallthrough; case 0: mspi->subblock = QE_CR_SUBBLOCK_SPI1; break; diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index bd23c4689b46..127b8bd25831 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -506,7 +506,7 @@ static int sprd_adi_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "failed to find hwlock id, %d\n", ret); - /* fall-through */ + fallthrough; case -EPROBE_DEFER: goto put_ctlr; } diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 823dc99be46f..a8d2525e7af9 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -425,7 +425,7 @@ void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2); break; } - /* Fall through */ + fallthrough; default: *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB); } diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c index 1ca2ac5ef2b8..354486b7ed3a 100644 --- a/drivers/ssb/driver_mipscore.c +++ b/drivers/ssb/driver_mipscore.c @@ -342,7 +342,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore) set_irq(dev, irq++); break; } - /* fallthrough */ + fallthrough; case SSB_DEV_EXTIF: set_irq(dev, 0); break; diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c index b97a5c32d44a..f49ab1aa2149 100644 --- a/drivers/ssb/scan.c +++ b/drivers/ssb/scan.c @@ -228,7 +228,7 @@ static void __iomem *ssb_ioremap(struct ssb_bus *bus, switch (bus->bustype) { case SSB_BUSTYPE_SSB: /* Only map the first core for now. */ - /* fallthrough... */ + fallthrough; case SSB_BUSTYPE_PCMCIA: mmio = ioremap(baseaddr, SSB_CORE_SIZE); break; diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index 8ea65bef35d2..a4e4eef55f35 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -4984,7 +4984,7 @@ enum mipi_port_id __get_mipi_port(struct atomisp_device *isp, if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID) { return MIPI_PORT1_ID + 1; } - /* fall through */ + fallthrough; default: dev_err(isp->dev, "unsupported port: %d\n", port); return MIPI_PORT0_ID; diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c index cccc5bfa1057..1b2b2c68025b 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c +++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c @@ -704,14 +704,14 @@ static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd, return false; } - /* fall-through */ + fallthrough; case ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE: if (pipe_id == IA_CSS_PIPE_ID_CAPTURE || pipe_id == IA_CSS_PIPE_ID_PREVIEW) return true; return false; - /* fall-through */ + fallthrough; case ATOMISP_RUN_MODE_VIDEO: if (!asd->continuous_mode->val) { if (pipe_id == IA_CSS_PIPE_ID_VIDEO || @@ -720,7 +720,7 @@ static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd, else return false; } - /* fall through */ + fallthrough; case ATOMISP_RUN_MODE_SDV: if (pipe_id == IA_CSS_PIPE_ID_CAPTURE || pipe_id == IA_CSS_PIPE_ID_VIDEO) @@ -2765,7 +2765,7 @@ static unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd, if (!atomisp_is_mbuscode_raw(asd->fmt[asd->capture_pad].fmt.code)) { return IA_CSS_PIPE_ID_CAPTURE; } - /* fall through */ + fallthrough; case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW: if (asd->yuvpp_mode) return IA_CSS_PIPE_ID_YUVPP; diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index f8d616f08b51..65b0c8a662a0 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -1467,7 +1467,6 @@ enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd) case ATOMISP_RUN_MODE_VIDEO: return IA_CSS_PIPE_ID_VIDEO; case ATOMISP_RUN_MODE_STILL_CAPTURE: - /* fall through */ default: return IA_CSS_PIPE_ID_CAPTURE; } diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index a000a1e316f7..0114b040247b 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -1086,7 +1086,7 @@ static int atomisp_subdev_probe(struct atomisp_device *isp) case RAW_CAMERA: dev_dbg(isp->dev, "raw_index: %d\n", raw_index); raw_index = isp->input_cnt; - /* fall through */ + fallthrough; case SOC_CAMERA: dev_dbg(isp->dev, "SOC_INDEX: %d\n", isp->input_cnt); if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) { diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c index 4fb9bfdd2f4c..f13af2329f48 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c @@ -660,7 +660,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo, break; } - /* fall through */ + fallthrough; /* * if dynamic memory pool doesn't exist, need to free diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c index 54434c2dbaf9..a68cbb4995f0 100644 --- a/drivers/staging/media/atomisp/pci/sh_css.c +++ b/drivers/staging/media/atomisp/pci/sh_css.c @@ -4510,7 +4510,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe, #endif pipe->stop_requested = false; } - /* fall through */ + fallthrough; case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME: case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME: frame = (struct ia_css_frame *)HOST_ADDRESS(ddr_buffer.kernel_ptr); diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c index 24041849384a..6386a3989bfe 100644 --- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c +++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c @@ -110,7 +110,7 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx, case V4L2_MPEG2_PICTURE_CODING_TYPE_B: backward_addr = hantro_get_ref(ctx, slice_params->backward_ref_ts); - /* fall-through */ + fallthrough; case V4L2_MPEG2_PICTURE_CODING_TYPE_P: forward_addr = hantro_get_ref(ctx, slice_params->forward_ref_ts); diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c index 7e9aad671489..f610fa5b4335 100644 --- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c +++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c @@ -112,7 +112,7 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu, case V4L2_MPEG2_PICTURE_CODING_TYPE_B: backward_addr = hantro_get_ref(ctx, slice_params->backward_ref_ts); - /* fall-through */ + fallthrough; case V4L2_MPEG2_PICTURE_CODING_TYPE_P: forward_addr = hantro_get_ref(ctx, slice_params->forward_ref_ts); diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c index d92fd804488e..21ebf7769696 100644 --- a/drivers/staging/media/imx/imx-media-csi.c +++ b/drivers/staging/media/imx/imx-media-csi.c @@ -488,7 +488,7 @@ static int csi_idmac_setup_channel(struct csi_priv *priv) passthrough_cycles = incc->cycles; break; } - /* fallthrough - non-passthrough RGB565 (CSI-2 bus) */ + fallthrough; /* non-passthrough RGB565 (CSI-2 bus) */ default: burst_size = (image.pix.width & 0xf) ? 8 : 16; passthrough_bits = 16; diff --git a/drivers/staging/media/usbvision/usbvision-i2c.c b/drivers/staging/media/usbvision/usbvision-i2c.c index 6e4df3335b1b..aa3ff67a3cb1 100644 --- a/drivers/staging/media/usbvision/usbvision-i2c.c +++ b/drivers/staging/media/usbvision/usbvision-i2c.c @@ -303,13 +303,13 @@ usbvision_i2c_read_max4(struct usb_usbvision *usbvision, unsigned char addr, switch (len) { case 4: buf[3] = usbvision_read_reg(usbvision, USBVISION_SER_DAT4); - /* fall through */ + fallthrough; case 3: buf[2] = usbvision_read_reg(usbvision, USBVISION_SER_DAT3); - /* fall through */ + fallthrough; case 2: buf[1] = usbvision_read_reg(usbvision, USBVISION_SER_DAT2); - /* fall through */ + fallthrough; case 1: buf[0] = usbvision_read_reg(usbvision, USBVISION_SER_DAT1); break; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index 30ea37e1a3f5..bd37f2afadea 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -444,7 +444,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, case CPL_RX_ISCSI_DDP: case CPL_FW4_ACK: lro_flush = false; - /* fall through */ + fallthrough; case CPL_ABORT_RPL_RSS: case CPL_PASS_ESTABLISH: case CPL_PEER_CLOSE: diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index c9689610e186..cd045dc75a58 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3740,7 +3740,7 @@ check_rsp_state: case ISTATE_SEND_LOGOUTRSP: if (!iscsit_logout_post_handler(cmd, conn)) return -ECONNRESET; - /* fall through */ + fallthrough; case ISTATE_SEND_STATUS: case ISTATE_SEND_ASYNCMSG: case ISTATE_SEND_NOPIN: diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 8fc88654bff6..5f79ea05f9b8 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -345,7 +345,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type, break; case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: we = 1; - /* fall through */ + fallthrough; case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: /* * Some commands are only allowed for registered I_T Nexuses. @@ -354,7 +354,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type, break; case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: we = 1; - /* fall through */ + fallthrough; case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: /* * Each registered I_T Nexus is a reservation holder. diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index f1e81886122d..6e8b8d30938f 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -734,7 +734,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, } if (!protect) return TCM_NO_SENSE; - /* Fallthrough */ + fallthrough; default: pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " "PROTECT: 0x%02x\n", cdb[0], protect); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9fb0be0aa620..590eac2df909 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2236,7 +2236,7 @@ static void transport_complete_qf(struct se_cmd *cmd) ret = cmd->se_tfo->queue_data_in(cmd); break; } - /* fall through */ + fallthrough; case DMA_NONE: queue_status: trace_target_cmd_complete(cmd); @@ -2431,7 +2431,7 @@ queue_rsp: goto queue_full; break; } - /* fall through */ + fallthrough; case DMA_NONE: queue_status: trace_target_cmd_complete(cmd); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index e9f0dda5ff92..a7ed56602c6c 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -537,7 +537,7 @@ static void ft_send_work(struct work_struct *work) case FCP_PTA_ACA: task_attr = TCM_ACA_TAG; break; - case FCP_PTA_SIMPLE: /* Fallthrough */ + case FCP_PTA_SIMPLE: default: task_attr = TCM_SIMPLE_TAG; } diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c index e64db5f80d90..4ffa2e2c0145 100644 --- a/drivers/thermal/qcom/tsens-v0_1.c +++ b/drivers/thermal/qcom/tsens-v0_1.c @@ -220,7 +220,7 @@ static int calibrate_8916(struct tsens_priv *priv) p2[4] = (qfprom_cdata[1] & MSM8916_S4_P2_MASK) >> MSM8916_S4_P2_SHIFT; for (i = 0; i < priv->num_sensors; i++) p2[i] = ((base1 + p2[i]) << 3); - /* Fall through */ + fallthrough; case ONE_PT_CALIB2: base0 = (qfprom_cdata[0] & MSM8916_BASE0_MASK); p1[0] = (qfprom_cdata[0] & MSM8916_S0_P1_MASK) >> MSM8916_S0_P1_SHIFT; @@ -355,7 +355,7 @@ static int calibrate_8974(struct tsens_priv *priv) p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT; p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT; p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT; - /* Fall through */ + fallthrough; case ONE_PT_CALIB: case ONE_PT_CALIB2: base1 = bkp[0] & BASE1_MASK; @@ -390,7 +390,7 @@ static int calibrate_8974(struct tsens_priv *priv) p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT; p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT; p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT; - /* Fall through */ + fallthrough; case ONE_PT_CALIB: case ONE_PT_CALIB2: base1 = calib[0] & BASE1_MASK; @@ -420,7 +420,7 @@ static int calibrate_8974(struct tsens_priv *priv) p2[i] <<= 2; p2[i] |= BIT_APPEND; } - /* Fall through */ + fallthrough; case ONE_PT_CALIB2: for (i = 0; i < priv->num_sensors; i++) { p1[i] += base1; diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c index b682a4df0081..3c19a3800c6d 100644 --- a/drivers/thermal/qcom/tsens-v1.c +++ b/drivers/thermal/qcom/tsens-v1.c @@ -202,7 +202,7 @@ static int calibrate_v1(struct tsens_priv *priv) p2[9] = (qfprom_cdata[3] & S9_P2_MASK) >> S9_P2_SHIFT; for (i = 0; i < priv->num_sensors; i++) p2[i] = ((base1 + p2[i]) << 2); - /* Fall through */ + fallthrough; case ONE_PT_CALIB2: base0 = (qfprom_cdata[4] & BASE0_MASK) >> BASE0_SHIFT; p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT; @@ -263,7 +263,7 @@ static int calibrate_8976(struct tsens_priv *priv) for (i = 0; i < priv->num_sensors; i++) p2[i] = ((base1 + p2[i]) << 2); - /* Fall through */ + fallthrough; case ONE_PT_CALIB2: base0 = qfprom_cdata[0] & MSM8976_BASE0_MASK; p1[0] = (qfprom_cdata[0] & MSM8976_S0_P1_MASK) >> MSM8976_S0_P1_SHIFT; diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index f77ceae5c7d7..394a23ce6ca4 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -453,7 +453,7 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, "RX: checksum mismatch, dropping packet\n"); goto rx; } - /* Fall through */ + fallthrough; case TB_CFG_PKG_ICM_EVENT: if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) goto rx; diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 712395f518b8..3845db569e4c 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2092,7 +2092,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (tb_route(sw)) return 0; - /* fallthrough */ + fallthrough; case 3: ret = tb_switch_set_uuid(sw); if (ret) diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 2aae2c76d880..1a7e849840b2 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -315,7 +315,7 @@ static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) switch (rate) { default: WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate); - /* Fallthrough */ + fallthrough; case 1620: val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT; break; @@ -355,7 +355,7 @@ static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes) default: WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", lanes); - /* Fallthrough */ + fallthrough; case 1: val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT; break; diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 2a0e51a20e34..92c9a476defc 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -492,7 +492,7 @@ static void xencons_backend_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's CLOSING state. */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c index 21e76a2ec182..a8e19b4833bf 100644 --- a/drivers/tty/mips_ejtag_fdc.c +++ b/drivers/tty/mips_ejtag_fdc.c @@ -243,7 +243,7 @@ done: /* Fall back to a 3 byte encoding */ word.bytes = 3; word.word &= 0x00ffffff; - /* Fall through */ + fallthrough; case 3: /* 3 byte encoding */ word.word |= 0x82000000; diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 0a29a94ec438..35cf12147e39 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1584,7 +1584,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) gsm_process_modem(tty, dlci, modem, clen); tty_kref_put(tty); } - /* Fall through */ + fallthrough; case 1: /* Line state will go via DLCI 0 controls only */ default: tty_insert_flip_string(port, data, len); @@ -1986,7 +1986,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) gsm->address = 0; gsm->state = GSM_ADDRESS; gsm->fcs = INIT_FCS; - /* Fall through */ + fallthrough; case GSM_ADDRESS: /* Address continuation */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index b09eac4b6d64..8e975cb29833 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -602,7 +602,7 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, case TCOFLUSH: flush_tx_queue(tty); } - /* fall through - to default */ + fallthrough; /* to default */ default: error = n_tty_ioctl_helper(tty, file, cmd, arg); diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c index f75696f0ee2d..934dd2fb2ec8 100644 --- a/drivers/tty/n_r3964.c +++ b/drivers/tty/n_r3964.c @@ -605,7 +605,6 @@ static void receive_char(struct r3964_info *pInfo, const unsigned char c) } break; case R3964_WAIT_FOR_RX_REPEAT: - /* FALLTHROUGH */ case R3964_IDLE: if (c == STX) { /* Prevent rx_queue from overflow: */ diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c index db88dee3a399..f8e99995eee9 100644 --- a/drivers/tty/serial/8250/8250_em.c +++ b/drivers/tty/serial/8250/8250_em.c @@ -39,7 +39,7 @@ static void serial8250_em_serial_out(struct uart_port *p, int offset, int value) break; case UART_IER: /* IER @ 0x04 */ value &= 0x0f; /* only 4 valid bits - not Xscale */ - /* fall-through */ + fallthrough; case UART_DLL_EM: /* DLL @ 0x24 (+9) */ case UART_DLM_EM: /* DLM @ 0x28 (+9) */ writel(value, p->membase + (offset << 2)); diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index d1d253c4b518..31c9e83ea3cb 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -255,7 +255,7 @@ static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level) case CHIP_ID_F81866: sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1, 0); - /* fall through */ + fallthrough; case CHIP_ID_F81865: sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_SHARE, F81866_IRQ_SHARE); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 1a74d511b02a..3eb2d485eaeb 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -631,7 +631,7 @@ pci_timedia_setup(struct serial_private *priv, break; case 3: offset = board->uart_offset; - /* FALLTHROUGH */ + fallthrough; case 4: /* BAR 2 */ case 5: /* BAR 3 */ case 6: /* BAR 4 */ diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 09475695effd..06028da9bc6a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1872,7 +1872,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir) switch (iir & 0x3f) { case UART_IIR_RX_TIMEOUT: serial8250_rx_dma_flush(up); - /* fall-through */ + fallthrough; case UART_IIR_RLSI: return true; } diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index e0b73a5402db..a2978abab0db 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -75,7 +75,7 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset) break; case UART_LCR: valshift = 8; - /* fall through */ + fallthrough; case UART_MCR: offset = UNIPHIER_UART_LCR_MCR; break; @@ -101,7 +101,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value) case UART_SCR: /* No SCR for this hardware. Use CHAR as a scratch register */ valshift = 8; - /* fall through */ + fallthrough; case UART_FCR: offset = UNIPHIER_UART_CHAR_FCR; break; @@ -109,7 +109,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value) valshift = 8; /* Divisor latch access bit does not exist. */ value &= ~UART_LCR_DLAB; - /* fall through */ + fallthrough; case UART_MCR: offset = UNIPHIER_UART_LCR_MCR; break; diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index e43471b33710..bb5fc8bdd57a 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1845,7 +1845,7 @@ static void atmel_get_ip_name(struct uart_port *port) version = atmel_uart_readl(port, ATMEL_US_VERSION); switch (version) { case 0x814: /* sama5d2 */ - /* fall through */ + fallthrough; case 0x701: /* sama5d4 */ atmel_port->fidi_min = 3; atmel_port->fidi_max = 65535; diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 8573fc9cb0cd..76b94d0ff586 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -587,7 +587,6 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id) transmit_chars(up, lsr); break; case UART_IIR_RX_TIMEOUT: - /* FALLTHROUGH */ case UART_IIR_RDI: serial_omap_rdi(up, lsr); break; @@ -598,7 +597,6 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id) /* simply try again */ break; case UART_IIR_XOFF: - /* FALLTHROUGH */ default: break; } diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c index b5ef86ae2746..85366e059258 100644 --- a/drivers/tty/serial/rda-uart.c +++ b/drivers/tty/serial/rda-uart.c @@ -259,7 +259,7 @@ static void rda_uart_set_termios(struct uart_port *port, case CS5: case CS6: dev_warn(port->dev, "bit size not supported, using 7 bits\n"); - /* Fall through */ + fallthrough; case CS7: ctrl &= ~RDA_UART_DBITS_8; break; diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index b87914ae6da8..bd13014a1c53 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -876,7 +876,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data) tegra_uart_write(tup, ier, UART_IER); break; } - /* Fall through */ + fallthrough; case 2: /* Receive */ if (!tup->use_rx_pio) { is_rx_start = tup->rx_in_progress; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 3403dd790517..f797c971cd82 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2101,7 +2101,7 @@ uart_set_options(struct uart_port *port, struct console *co, switch (parity) { case 'o': case 'O': termios.c_cflag |= PARODD; - /*fall through*/ + fallthrough; case 'e': case 'E': termios.c_cflag |= PARENB; break; diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 8ce9a7a256e5..319e5ceb6130 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -514,7 +514,7 @@ static void receive_kbd_ms_chars(struct uart_sunsu_port *up, int is_break) switch (ret) { case 2: sunsu_change_mouse_baud(up); - /* fallthru */ + fallthrough; case 1: break; diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index 7ea06bbc6197..001e19d7c17d 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c @@ -306,7 +306,7 @@ static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up, switch (ret) { case 2: sunzilog_change_mouse_baud(up); - /* fallthru */ + fallthrough; case 1: break; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 2833f1418d6d..a9b1ee27183a 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -544,7 +544,7 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb, cdns_uart->baud = cdns_uart_set_baud_rate(cdns_uart->port, cdns_uart->baud); - /* fall through */ + fallthrough; case ABORT_RATE_CHANGE: if (!locked) spin_lock_irqsave(&cdns_uart->port->lock, flags); diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 9245fffdbceb..e18f318586ab 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -866,7 +866,7 @@ static int __tty_perform_flush(struct tty_struct *tty, unsigned long arg) ld->ops->flush_buffer(tty); tty_unthrottle(tty); } - /* fall through */ + fallthrough; case TCOFLUSH: tty_driver_flush_buffer(tty); break; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index ccb533fd00a2..d8b9363b81c1 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1553,7 +1553,7 @@ static void csi_J(struct vc_data *vc, int vpar) break; case 3: /* include scrollback */ flush_scrollback(vc); - /* fallthrough */ + fallthrough; case 2: /* erase whole display */ vc_uniscr_clear_lines(vc, 0, vc->vc_rows); count = vc->vc_cols * vc->vc_rows; @@ -2167,7 +2167,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); if (!is_kbd(vc, lnm)) return; - /* fall through */ + fallthrough; case 13: cr(vc); return; @@ -2306,7 +2306,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; } vc->vc_priv = EPecma; - /* fall through */ + fallthrough; case ESgetpars: if (c == ';' && vc->vc_npar < NPAR - 1) { vc->vc_npar++; diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index f7f6229082ca..60f4711717d2 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -710,7 +710,8 @@ static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb) if (ret) return ret; break; - } /* else fallthrough */ + } + fallthrough; case STATUS_STAGE: pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN; ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1, diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 052d5accfe9b..5b768b80d1ee 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -727,7 +727,7 @@ static void hub_irq(struct urb *urb) if ((++hub->nerrors < 10) || hub->error) goto resubmit; hub->error = status; - /* FALL THROUGH */ + fallthrough; /* let hub_wq handle things */ case 0: /* we got data: port status changed */ diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 422aea24afcd..2eb34c8b4065 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -646,9 +646,8 @@ static int dwc3_phy_setup(struct dwc3 *dwc) if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) break; } - /* FALLTHROUGH */ + fallthrough; case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: - /* FALLTHROUGH */ default: break; } @@ -1411,7 +1410,7 @@ static void dwc3_check_params(struct dwc3 *dwc) default: dev_err(dev, "invalid maximum_speed parameter %d\n", dwc->maximum_speed); - /* fall through */ + fallthrough; case USB_SPEED_UNKNOWN: /* default to superspeed */ dwc->maximum_speed = USB_SPEED_SUPER; diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 331c951d72dc..950c9435beec 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2039,7 +2039,6 @@ static int do_scsi_command(struct fsg_common *common) case RELEASE: case RESERVE: case SEND_DIAGNOSTIC: - fallthrough; default: unknown_cmnd: diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index fa6793065c7c..a6426dd1cfef 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -328,7 +328,7 @@ static int usba_config_fifo_table(struct usba_udc *udc) switch (fifo_mode) { default: fifo_mode = 0; - /* fall through */ + fallthrough; case 0: udc->fifo_cfg = NULL; n = 0; diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index b2638e83bb49..a6f7b2594c09 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -250,7 +250,7 @@ static int dr_controller_setup(struct fsl_udc *udc) break; case FSL_USB2_PHY_UTMI_WIDE: portctrl |= PORTSCX_PTW_16BIT; - /* fall through */ + fallthrough; case FSL_USB2_PHY_UTMI: case FSL_USB2_PHY_UTMI_DUAL: if (udc->pdata->have_sysif_regs) { diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c index cfafdd92c2a8..10324a7334fe 100644 --- a/drivers/usb/gadget/udc/pxa25x_udc.c +++ b/drivers/usb/gadget/udc/pxa25x_udc.c @@ -2340,12 +2340,12 @@ static int pxa25x_udc_probe(struct platform_device *pdev) case PXA250_A0: case PXA250_A1: /* A0/A1 "not released"; ep 13, 15 unusable */ - /* fall through */ + fallthrough; case PXA250_B2: case PXA210_B2: case PXA250_B1: case PXA210_B1: case PXA250_B0: case PXA210_B0: /* OUT-DMA is broken ... */ - /* fall through */ + fallthrough; case PXA250_C0: case PXA210_C0: break; #elif defined(CONFIG_ARCH_IXP4XX) diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index a87c0b26279e..3055d9abfec3 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -1019,7 +1019,7 @@ static int isp116x_hub_control(struct usb_hcd *hcd, spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_OCIC); spin_unlock_irqrestore(&isp116x->lock, flags); - /* fall through */ + fallthrough; case C_HUB_LOCAL_POWER: DBG("C_HUB_LOCAL_POWER\n"); break; @@ -1421,10 +1421,10 @@ static int isp116x_bus_suspend(struct usb_hcd *hcd) isp116x_write_reg32(isp116x, HCCONTROL, (val & ~HCCONTROL_HCFS) | HCCONTROL_USB_RESET); - /* fall through */ + fallthrough; case HCCONTROL_USB_RESET: ret = -EBUSY; - /* fall through */ + fallthrough; default: /* HCCONTROL_USB_SUSPEND */ spin_unlock_irqrestore(&isp116x->lock, flags); break; diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index b8961c0381cf..8c1bbac6d136 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -957,7 +957,8 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev) ehci_bios_handoff(pdev, op_reg_base, cap, offset); break; case 0: /* Illegal reserved cap, set cap=0 so we exit */ - cap = 0; /* fall through */ + cap = 0; + fallthrough; default: dev_warn(&pdev->dev, "EHCI: unrecognized capability %02x\n", diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index fcc5ac5ce8b1..ccb0156fcebe 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -699,7 +699,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) switch (comp_code) { case COMP_SUCCESS: remain_length = 0; - /* FALLTHROUGH */ + fallthrough; case COMP_SHORT_PACKET: status = 0; break; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index c3554e37e09f..fe27dfc45270 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1483,7 +1483,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; case USB_PORT_FEAT_C_SUSPEND: bus_state->port_c_suspend &= ~(1 << wIndex); - /* fall through */ + fallthrough; case USB_PORT_FEAT_C_RESET: case USB_PORT_FEAT_C_BH_PORT_RESET: case USB_PORT_FEAT_C_CONNECTION: diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 696fad50b478..fe405cd38dbc 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1311,7 +1311,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, interval = xhci_parse_microframe_interval(udev, ep); break; } - /* Fall through - SS and HS isoc/int have same decoding */ + fallthrough; /* SS and HS isoc/int have same decoding */ case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: @@ -1331,7 +1331,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, * since it uses the same rules as low speed interrupt * endpoints. */ - /* fall through */ + fallthrough; case USB_SPEED_LOW: if (usb_endpoint_xfer_int(&ep->desc) || diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 2c255d0620b0..a741a38a4c69 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2103,7 +2103,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, break; xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", trb_comp_code, ep_index); - /* else fall through */ + fallthrough; case COMP_STALL_ERROR: /* Did we transfer part of the data (middle) phase? */ if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3c41b14ecce7..627c4c510446 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -4618,7 +4618,7 @@ static unsigned long long xhci_calculate_intel_u1_timeout( break; } /* Otherwise the calculation is the same as isoc eps */ - /* fall through */ + fallthrough; case USB_ENDPOINT_XFER_ISOC: timeout_ns = xhci_service_interval_to_ns(desc); timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index c545b27ea568..edb5b63d7063 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -975,7 +975,7 @@ static int cppi_channel_program(struct dma_channel *ch, musb_dbg(musb, "%cX DMA%d not allocated!", cppi_ch->transmit ? 'T' : 'R', cppi_ch->index); - /* FALLTHROUGH */ + fallthrough; case MUSB_DMA_STATUS_FREE: break; } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 5a56a03996b1..849e0b770130 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -852,7 +852,7 @@ static void musb_handle_intr_suspend(struct musb *musb, u8 devctl) case OTG_STATE_B_IDLE: if (!musb->is_active) break; - /* fall through */ + fallthrough; case OTG_STATE_B_PERIPHERAL: musb_g_suspend(musb); musb->is_active = musb->g.b_hnp_enable; @@ -972,9 +972,8 @@ static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl) case OTG_STATE_A_PERIPHERAL: musb_hnp_stop(musb); musb_root_disconnect(musb); - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_B_WAIT_ACON: - /* FALLTHROUGH */ case OTG_STATE_B_PERIPHERAL: case OTG_STATE_B_IDLE: musb_g_disconnect(musb); @@ -1009,7 +1008,7 @@ static void musb_handle_intr_reset(struct musb *musb) switch (musb->xceiv->otg->state) { case OTG_STATE_A_SUSPEND: musb_g_reset(musb); - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ /* never use invalid T(a_wait_bcon) */ musb_dbg(musb, "HNP: in %s, %d msec timeout", @@ -1030,7 +1029,7 @@ static void musb_handle_intr_reset(struct musb *musb) break; case OTG_STATE_B_IDLE: musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_B_PERIPHERAL: musb_g_reset(musb); break; @@ -1471,7 +1470,7 @@ static int ep_config_from_table(struct musb *musb) switch (fifo_mode) { default: fifo_mode = 0; - /* FALLTHROUGH */ + fallthrough; case 0: cfg = mode_0_cfg; n = ARRAY_SIZE(mode_0_cfg); @@ -2018,7 +2017,7 @@ static void musb_pm_runtime_check_session(struct musb *musb) musb->quirk_retries--; return; } - /* fall through */ + fallthrough; case MUSB_QUIRK_A_DISCONNECT_19: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 19556c1a8ae8..30085b2be7b9 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -232,7 +232,7 @@ static int dsps_check_status(struct musb *musb, void *unused) dsps_mod_timer_optional(glue); break; } - /* fall through */ + fallthrough; case OTG_STATE_A_WAIT_BCON: /* keep VBUS on for host-only mode */ @@ -242,7 +242,7 @@ static int dsps_check_status(struct musb *musb, void *unused) } musb_writeb(musb->mregs, MUSB_DEVCTL, 0); skip_session = 1; - /* fall through */ + fallthrough; case OTG_STATE_A_IDLE: case OTG_STATE_B_IDLE: @@ -793,7 +793,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, case USB_SPEED_SUPER: dev_warn(dev, "ignore incorrect maximum_speed " "(super-speed) setting in dts"); - /* fall through */ + fallthrough; default: config->maximum_speed = USB_SPEED_HIGH; } diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 0ae3e0be043e..44d3cb02fa76 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -735,7 +735,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb) musb_writeb(mbase, MUSB_TESTMODE, musb->test_mode_nr); } - /* FALLTHROUGH */ + fallthrough; case MUSB_EP0_STAGE_STATUSOUT: /* end of sequence #1: write to host (TX state) */ @@ -767,7 +767,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb) */ retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_SETUP; - /* FALLTHROUGH */ + fallthrough; case MUSB_EP0_STAGE_SETUP: setup: diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 8b7d22a0c0fb..30c5e7de0761 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -360,7 +360,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, qh = first_qh(head); break; } - /* fall through */ + fallthrough; case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: @@ -1019,7 +1019,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) musb->ep0_stage = MUSB_EP0_OUT; more = true; } - /* FALLTHROUGH */ + fallthrough; case MUSB_EP0_OUT: fifo_count = min_t(size_t, qh->maxpacket, urb->transfer_buffer_length - @@ -2222,7 +2222,7 @@ static int musb_urb_enqueue( interval = max_t(u8, epd->bInterval, 1); break; } - /* FALLTHROUGH */ + fallthrough; case USB_ENDPOINT_XFER_ISOC: /* ISO always uses logarithmic encoding */ interval = min_t(u8, epd->bInterval, 16); diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index cb7ae297a3af..cafc69536e1d 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -211,7 +211,7 @@ void musb_root_disconnect(struct musb *musb) musb->g.is_a_peripheral = 1; break; } - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_A_HOST: musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; musb->is_active = 0; diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index d62c78b97cad..4232f1ce3fbf 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -104,7 +104,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) if (error) break; musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; - /* Fall through */ + fallthrough; case OTG_STATE_A_WAIT_VRISE: case OTG_STATE_A_WAIT_BCON: case OTG_STATE_A_HOST: diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 99890d1bbfcb..c26683a2702b 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -464,7 +464,7 @@ static void musb_do_idle(struct timer_list *t) dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n", usb_otg_state_string(musb->xceiv->otg->state)); } - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_A_IDLE: tusb_musb_set_vbus(musb, 0); default: diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c index c8a988d2cfdd..15dc25801cdc 100644 --- a/drivers/usb/storage/sddr55.c +++ b/drivers/usb/storage/sddr55.c @@ -592,7 +592,7 @@ static unsigned long sddr55_get_capacity(struct us_data *us) { case 0x64: info->pageshift = 8; info->smallpageshift = 1; - /* fall through */ + fallthrough; case 0x5d: // 5d is a ROM card with pagesize 512. return 0x00200000; diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index d592071119ba..08f9296431e9 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -688,7 +688,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, break; case DMA_BIDIRECTIONAL: cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB; - /* fall through */ + fallthrough; case DMA_TO_DEVICE: cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB; case DMA_NONE: diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index f57d91fd0e09..bd80e03b2b6f 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -157,7 +157,7 @@ static enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink) case 0x3: if (sink) return TYPEC_CC_RP_3_0; - /* fall through */ + fallthrough; case 0x0: default: return TYPEC_CC_OPEN; diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 620465c2a1da..1ab1f5cda4ac 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -990,7 +990,7 @@ static long vfio_pci_ioctl(void *device_data, case VFIO_PCI_ERR_IRQ_INDEX: if (pci_is_pcie(vdev->pdev)) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index c992973cc2d5..5fbf0c1f7433 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -2439,7 +2439,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) break; case VFIO_TYPE1_NESTING_IOMMU: iommu->nesting = true; - /* fall through */ + fallthrough; case VFIO_TYPE1v2_IOMMU: iommu->v2 = true; break; diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index ddc7f5f0401f..8ec19425671f 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c @@ -681,7 +681,7 @@ static int adp8860_probe(struct i2c_client *client, switch (ADP8860_MANID(reg_val)) { case ADP8863_MANUFID: data->gdwn_dis = !!pdata->gdwn_dis; - /* fall through */ + fallthrough; case ADP8860_MANUFID: data->en_ambl_sens = !!pdata->en_ambl_sens; break; diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c index 09a9ad901dad..bcc92aecf666 100644 --- a/drivers/video/fbdev/acornfb.c +++ b/drivers/video/fbdev/acornfb.c @@ -857,7 +857,7 @@ static void acornfb_parse_dram(char *opt) case 'M': case 'm': size *= 1024; - /* Fall through */ + fallthrough; case 'K': case 'k': size *= 1024; diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 6f7838979f0a..ae3d8e8b8d33 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -419,7 +419,7 @@ static int arcfb_ioctl(struct fb_info *info, schedule(); finish_wait(&arcfb_waitq, &wait); } - /* fall through */ + fallthrough; case FBIO_GETCONTROL2: { diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 1e252192569a..bfd2f00b403b 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -508,7 +508,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var, case 32: var->transp.offset = 24; var->transp.length = 8; - /* fall through */ + fallthrough; case 24: if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { /* RGB:888 mode */ @@ -633,7 +633,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info) case 2: value |= ATMEL_LCDC_PIXELSIZE_2; break; case 4: value |= ATMEL_LCDC_PIXELSIZE_4; break; case 8: value |= ATMEL_LCDC_PIXELSIZE_8; break; - case 15: /* fall through */ + case 15: case 16: value |= ATMEL_LCDC_PIXELSIZE_16; break; case 24: value |= ATMEL_LCDC_PIXELSIZE_24; break; case 32: value |= ATMEL_LCDC_PIXELSIZE_32; break; diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c index 7c4483c7f313..f3d8123d7f36 100644 --- a/drivers/video/fbdev/aty/radeon_pm.c +++ b/drivers/video/fbdev/aty/radeon_pm.c @@ -1208,11 +1208,11 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo) case 1: if (mc & 0x4) break; - /* fall through */ + fallthrough; case 2: dll_sleep_mask |= MDLL_R300_RDCK__MRDCKB_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKB_RESET; - /* fall through */ + fallthrough; case 0: dll_sleep_mask |= MDLL_R300_RDCK__MRDCKA_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKA_RESET; @@ -1221,7 +1221,7 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo) case 1: if (!(mc & 0x4)) break; - /* fall through */ + fallthrough; case 2: dll_sleep_mask |= MDLL_R300_RDCK__MRDCKD_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKD_RESET; diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c index 3df64a973194..15a9ee7cd734 100644 --- a/drivers/video/fbdev/cirrusfb.c +++ b/drivers/video/fbdev/cirrusfb.c @@ -1476,11 +1476,11 @@ static void init_vgachip(struct fb_info *info) mdelay(100); /* mode */ vga_wgfx(cinfo->regbase, CL_GR31, 0x00); - /* fall through */ + fallthrough; case BT_GD5480: /* from Klaus' NetBSD driver: */ vga_wgfx(cinfo->regbase, CL_GR2F, 0x00); - /* fall through */ + fallthrough; case BT_ALPINE: /* put blitter into 542x compat */ vga_wgfx(cinfo->regbase, CL_GR33, 0x00); diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c index 9c4f1be856ec..a88dcb63eeb4 100644 --- a/drivers/video/fbdev/controlfb.c +++ b/drivers/video/fbdev/controlfb.c @@ -713,7 +713,7 @@ static int controlfb_blank(int blank_mode, struct fb_info *info) break; case FB_BLANK_POWERDOWN: ctrl &= ~0x33; - /* fall through */ + fallthrough; case FB_BLANK_NORMAL: ctrl |= 0x400; break; diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index da7c88ffaa6a..6815bfb7f572 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1306,7 +1306,7 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, case FBIOGET_CON2FBMAP: case FBIOPUT_CON2FBMAP: arg = (unsigned long) compat_ptr(arg); - /* fall through */ + fallthrough; case FBIOBLANK: ret = do_fb_ioctl(info, cmd, arg); break; diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 67ebfe5c9f1d..a547c21c7e92 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -1287,7 +1287,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd, dev_warn(info->dev, "MFB_SET_PIXFMT value of 0x%08x is deprecated.\n", MFB_SET_PIXFMT_OLD); - /* fall through */ + fallthrough; case MFB_SET_PIXFMT: if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt))) return -EFAULT; @@ -1297,7 +1297,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd, dev_warn(info->dev, "MFB_GET_PIXFMT value of 0x%08x is deprecated.\n", MFB_GET_PIXFMT_OLD); - /* fall through */ + fallthrough; case MFB_GET_PIXFMT: pix_fmt = ad->pix_fmt; if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt))) diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c index 13ded3a10708..e5475ae1e158 100644 --- a/drivers/video/fbdev/gxt4500.c +++ b/drivers/video/fbdev/gxt4500.c @@ -534,7 +534,7 @@ static int gxt4500_setcolreg(unsigned int reg, unsigned int red, break; case DFA_PIX_32BIT: val |= (reg << 24); - /* fall through */ + fallthrough; case DFA_PIX_24BIT: val |= (reg << 16) | (reg << 8); break; diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index e4c3c8b65da4..02411d89cb46 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -648,13 +648,13 @@ static int synthvid_connect_vsp(struct hv_device *hdev) ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10); if (!ret) break; - /* Fallthrough */ + fallthrough; case VERSION_WIN8: case VERSION_WIN8_1: ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8); if (!ret) break; - /* Fallthrough */ + fallthrough; case VERSION_WS2008: case VERSION_WIN7: ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7); diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c index c65ec7386e87..e6f35f8feefc 100644 --- a/drivers/video/fbdev/i740fb.c +++ b/drivers/video/fbdev/i740fb.c @@ -430,7 +430,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var, break; case 9 ... 15: bpp = 15; - /* fall through */ + fallthrough; case 16: if ((1000000 / var->pixclock) > DACSPEED16) { dev_err(info->device, "requested pixclock %i MHz out of range (max. %i MHz at 15/16bpp)\n", diff --git a/drivers/video/fbdev/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c index 01c75c031cb6..39ebbe026ddf 100644 --- a/drivers/video/fbdev/mmp/fb/mmpfb.c +++ b/drivers/video/fbdev/mmp/fb/mmpfb.c @@ -90,8 +90,6 @@ static int var_to_pixfmt(struct fb_var_screeninfo *var) else return PIXFMT_BGR888UNPACK; } - - /* fall through */ } return -EINVAL; diff --git a/drivers/video/fbdev/nvidia/nv_hw.c b/drivers/video/fbdev/nvidia/nv_hw.c index 8335da4ca30e..9b0a324bb1b4 100644 --- a/drivers/video/fbdev/nvidia/nv_hw.c +++ b/drivers/video/fbdev/nvidia/nv_hw.c @@ -896,7 +896,7 @@ void NVCalcStateExt(struct nvidia_par *par, if (!par->FlatPanel) state->control = NV_RD32(par->PRAMDAC0, 0x0580) & 0xeffffeff; - /* fallthrough */ + fallthrough; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 5cd0f5f6a4ae..4501e848a36f 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -141,7 +141,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); - /* fall through */ + fallthrough; case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, regno); @@ -211,7 +211,7 @@ static int offb_blank(int blank, struct fb_info *info) /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ out_le32(par->cmap_adr + 0x58, in_le32(par->cmap_adr + 0x58) & ~0x20); - /* fall through */ + fallthrough; case cmap_r128: /* Set palette index & data */ out_8(par->cmap_adr + 0xb0, i); diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c index fa73acfc1371..7317c9aad677 100644 --- a/drivers/video/fbdev/omap/lcdc.c +++ b/drivers/video/fbdev/omap/lcdc.c @@ -328,13 +328,13 @@ static int omap_lcdc_setup_plane(int plane, int channel_out, lcdc.bpp = 12; break; } - /* fallthrough */ + fallthrough; case OMAPFB_COLOR_YUV422: if (lcdc.ext_mode) { lcdc.bpp = 16; break; } - /* fallthrough */ + fallthrough; default: /* FIXME: other BPPs. * bpp1: code 0, size 256 diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 0cbcc74fa943..3d090d2d9ed9 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -253,7 +253,7 @@ static int _setcolreg(struct fb_info *info, u_int regno, u_int red, u_int green, if (fbdev->ctrl->setcolreg) r = fbdev->ctrl->setcolreg(regno, red, green, blue, transp, update_hw_pal); - /* Fallthrough */ + fallthrough; case OMAPFB_COLOR_RGB565: case OMAPFB_COLOR_RGB444: if (r != 0) @@ -443,7 +443,7 @@ static int set_color_mode(struct omapfb_plane_struct *plane, return 0; case 12: var->bits_per_pixel = 16; - /* fall through */ + fallthrough; case 16: if (plane->fbdev->panel->bpp == 12) plane->color_mode = OMAPFB_COLOR_RGB444; @@ -1531,27 +1531,27 @@ static void omapfb_free_resources(struct omapfb_device *fbdev, int state) case OMAPFB_ACTIVE: for (i = 0; i < fbdev->mem_desc.region_cnt; i++) unregister_framebuffer(fbdev->fb_info[i]); - /* fall through */ + fallthrough; case 7: omapfb_unregister_sysfs(fbdev); - /* fall through */ + fallthrough; case 6: if (fbdev->panel->disable) fbdev->panel->disable(fbdev->panel); - /* fall through */ + fallthrough; case 5: omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED); - /* fall through */ + fallthrough; case 4: planes_cleanup(fbdev); - /* fall through */ + fallthrough; case 3: ctrl_cleanup(fbdev); - /* fall through */ + fallthrough; case 2: if (fbdev->panel->cleanup) fbdev->panel->cleanup(fbdev->panel); - /* fall through */ + fallthrough; case 1: dev_set_drvdata(fbdev->dev, NULL); kfree(fbdev); @@ -1854,7 +1854,7 @@ static int __init omapfb_setup(char *options) case 'm': case 'M': vram *= 1024; - /* Fall through */ + fallthrough; case 'k': case 'K': vram *= 1024; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index 3920a0db0390..b2d6e6df2161 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -1861,7 +1861,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, if (color_mode == OMAP_DSS_COLOR_YUV2 || color_mode == OMAP_DSS_COLOR_UYVY) width = width >> 1; - /* fall through */ + fallthrough; case OMAP_DSS_ROT_90: case OMAP_DSS_ROT_270: *offset1 = 0; @@ -1884,7 +1884,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, if (color_mode == OMAP_DSS_COLOR_YUV2 || color_mode == OMAP_DSS_COLOR_UYVY) width = width >> 1; - /* fall through */ + fallthrough; case OMAP_DSS_ROT_90 + 4: case OMAP_DSS_ROT_270 + 4: *offset1 = 0; diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c index f40be68d5aac..ea8c88aa4477 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c @@ -760,7 +760,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) r = -ENODEV; break; } - /* FALLTHROUGH */ + fallthrough; case OMAPFB_WAITFORVSYNC: DBG("ioctl WAITFORVSYNC\n"); diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index 836e7b1639ce..a3decc7fadde 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c @@ -882,7 +882,7 @@ int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl, / (var->bits_per_pixel >> 2); break; } - /* fall through */ + fallthrough; default: screen_width = fix->line_length / (var->bits_per_pixel >> 3); break; diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c index c7c98d8e2359..0642555289e0 100644 --- a/drivers/video/fbdev/pm2fb.c +++ b/drivers/video/fbdev/pm2fb.c @@ -233,10 +233,10 @@ static u32 to3264(u32 timing, int bpp, int is64) switch (bpp) { case 24: timing *= 3; - /* fall through */ + fallthrough; case 8: timing >>= 1; - /* fall through */ + fallthrough; case 16: timing >>= 1; case 32: diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c index eedfbd3572a8..47e6a1d0d229 100644 --- a/drivers/video/fbdev/pxa168fb.c +++ b/drivers/video/fbdev/pxa168fb.c @@ -60,8 +60,6 @@ static int determine_best_pix_fmt(struct fb_var_screeninfo *var) else return PIX_FMT_BGR1555; } - - /* fall through */ } /* @@ -87,8 +85,6 @@ static int determine_best_pix_fmt(struct fb_var_screeninfo *var) else return PIX_FMT_BGR888UNPACK; } - - /* fall through */ } return -EINVAL; diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index a53d24fb7183..f1551e00eb12 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -1614,7 +1614,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state) */ if (old_state != C_DISABLE_PM) break; - /* fall through */ + fallthrough; case C_ENABLE: /* diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c index 9b3493846f4d..ce55b9d2e862 100644 --- a/drivers/video/fbdev/riva/fbdev.c +++ b/drivers/video/fbdev/riva/fbdev.c @@ -1093,7 +1093,7 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) break; case 9 ... 15: var->green.length = 5; - /* fall through */ + fallthrough; case 16: var->bits_per_pixel = 16; /* The Riva128 supports RGB555 only */ diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c index 9dc925054930..ba316bd56efd 100644 --- a/drivers/video/fbdev/s3c-fb.c +++ b/drivers/video/fbdev/s3c-fb.c @@ -284,7 +284,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var, /* 666 with one bit alpha/transparency */ var->transp.offset = 18; var->transp.length = 1; - /* fall through */ + fallthrough; case 18: var->bits_per_pixel = 32; @@ -312,7 +312,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var, case 25: var->transp.length = var->bits_per_pixel - 24; var->transp.offset = 24; - /* fall through */ + fallthrough; case 24: /* our 24bpp is unpacked, so 32bpp */ var->bits_per_pixel = 32; @@ -809,7 +809,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info) case FB_BLANK_POWERDOWN: wincon &= ~WINCONx_ENWIN; sfb->enabled &= ~(1 << index); - /* fall through - to FB_BLANK_NORMAL */ + fallthrough; /* to FB_BLANK_NORMAL */ case FB_BLANK_NORMAL: /* disable the DMA and display 0x0 (black) */ diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c index bda6cc313c8b..e31cf63b0a62 100644 --- a/drivers/video/fbdev/sa1100fb.c +++ b/drivers/video/fbdev/sa1100fb.c @@ -935,7 +935,7 @@ static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state) */ if (old_state != C_DISABLE_PM) break; - /* fall through */ + fallthrough; case C_ENABLE: /* diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c index 3fd87aeb6c79..a2442aae7e12 100644 --- a/drivers/video/fbdev/savage/savagefb_driver.c +++ b/drivers/video/fbdev/savage/savagefb_driver.c @@ -1860,7 +1860,7 @@ static int savage_init_hw(struct savagefb_par *par) if ((vga_in8(0x3d5, par) & 0xC0) == (0x01 << 6)) RamSavage4[1] = 8; - /*FALLTHROUGH*/ + fallthrough; case S3_SAVAGE2000: videoRam = RamSavage4[(config1 & 0xE0) >> 5] * 1024; diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index 8a27d12e6ea8..c1043420dbd3 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -1594,7 +1594,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl) case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: info->fix.ypanstep = 2; - /* Fall through */ + fallthrough; case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: info->fix.xpanstep = 2; @@ -2085,7 +2085,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: info->fix.ypanstep = 2; - /* Fall through */ + fallthrough; case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: info->fix.xpanstep = 2; diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c index ac140962b1bf..03c736f6f3d0 100644 --- a/drivers/video/fbdev/sis/sis_main.c +++ b/drivers/video/fbdev/sis/sis_main.c @@ -1739,7 +1739,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); - /* fall through */ + fallthrough; case SISFB_GET_INFO: /* For communication with X driver */ ivideo->sisfb_infoblock.sisfb_id = SISFB_ID; ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR; @@ -1793,7 +1793,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); - /* fall through */ + fallthrough; case SISFB_GET_VBRSTATUS: if(sisfb_CheckVBRetrace(ivideo)) return put_user((u32)1, argp); @@ -1804,7 +1804,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); - /* fall through */ + fallthrough; case SISFB_GET_AUTOMAXIMIZE: if(ivideo->sisfb_max) return put_user((u32)1, argp); @@ -1815,7 +1815,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); - /* fall through */ + fallthrough; case SISFB_SET_AUTOMAXIMIZE: if(get_user(gpu32, argp)) return -EFAULT; diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index 3dd1b1d76e98..6a52eba64559 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -1005,7 +1005,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info) case FB_BLANK_POWERDOWN: ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE; sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0); - /* fall through */ + fallthrough; case FB_BLANK_NORMAL: ctrl |= SM501_DC_CRT_CONTROL_BLANK; diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index de953ddb6312..265865610edc 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -999,7 +999,7 @@ stifb_blank(int blank_mode, struct fb_info *info) case S9000_ID_HCRX: HYPER_ENABLE_DISABLE_DISPLAY(fb, enable); break; - case S9000_ID_A1659A: /* fall through */ + case S9000_ID_A1659A: case S9000_ID_TIMBER: case CRX24_OVERLAY_PLANES: default: @@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) dev_name); goto out_err0; } - /* fall through */ + fallthrough; case S9000_ID_ARTIST: case S9000_ID_HCRX: case S9000_ID_TIMBER: diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c index f73e26c18c09..f056d80f6359 100644 --- a/drivers/video/fbdev/tdfxfb.c +++ b/drivers/video/fbdev/tdfxfb.c @@ -523,7 +523,7 @@ static int tdfxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) case 32: var->transp.offset = 24; var->transp.length = 8; - /* fall through */ + fallthrough; case 24: var->red.offset = 16; var->green.offset = 8; diff --git a/drivers/video/fbdev/via/lcd.c b/drivers/video/fbdev/via/lcd.c index 3fea01db58d6..4a869402d120 100644 --- a/drivers/video/fbdev/via/lcd.c +++ b/drivers/video/fbdev/via/lcd.c @@ -744,7 +744,7 @@ static void set_lcd_output_path(int set_iga, int output_interface) viaparinfo->chip_info->gfx_chip_name)) viafb_write_reg_mask(CR97, VIACR, 0x84, BIT7 + BIT2 + BIT1 + BIT0); - /* fall through */ + fallthrough; case INTERFACE_DVP0: case INTERFACE_DVP1: case INTERFACE_DFP_HIGH: diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 00307b8693bf..5ec51445bee8 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -677,7 +677,7 @@ static void xenfb_backend_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's CLOSING state. */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c index 9673eb12dacd..f22ebe89fe13 100644 --- a/drivers/watchdog/sc1200wdt.c +++ b/drivers/watchdog/sc1200wdt.c @@ -234,7 +234,7 @@ static long sc1200wdt_ioctl(struct file *file, unsigned int cmd, return -EINVAL; timeout = new_timeout; sc1200wdt_write_data(WDTO, timeout); - /* fall through - and return the new timeout */ + fallthrough; /* and return the new timeout */ case WDIOC_GETTIMEOUT: return put_user(timeout * 60, p); diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c index 184a06a74f83..c00627825de8 100644 --- a/drivers/watchdog/wdrtas.c +++ b/drivers/watchdog/wdrtas.c @@ -332,7 +332,7 @@ static long wdrtas_ioctl(struct file *file, unsigned int cmd, wdrtas_interval = i; else wdrtas_interval = wdrtas_get_interval(i); - /* fallthrough */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(wdrtas_interval, argp); diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index b43b5595e988..72d725a0ab5c 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -1263,7 +1263,7 @@ static void pvcalls_front_changed(struct xenbus_device *dev, if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state */ - /* fall through */ + fallthrough; case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c index 7457213b8915..f914b72557ef 100644 --- a/drivers/xen/xen-acpi-memhotplug.c +++ b/drivers/xen/xen-acpi-memhotplug.c @@ -229,7 +229,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) case ACPI_NOTIFY_BUS_CHECK: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "\nReceived BUS CHECK notification for device\n")); - /* Fall Through */ + fallthrough; case ACPI_NOTIFY_DEVICE_CHECK: if (event == ACPI_NOTIFY_DEVICE_CHECK) ACPI_DEBUG_PRINT((ACPI_DB_INFO, diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index f2115587855f..b500466a6c37 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -545,7 +545,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev, xenbus_switch_state(xdev, XenbusStateClosed); if (xenbus_dev_is_online(xdev)) break; - /* fall through - if not online */ + fallthrough; /* if not online */ case XenbusStateUnknown: dev_dbg(&xdev->dev, "frontend is gone! unregister device\n"); device_unregister(&xdev->dev); diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 75c0a2e9a6db..1e8cfd80a4e6 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -1185,7 +1185,7 @@ static void scsiback_frontend_changed(struct xenbus_device *dev, xenbus_switch_state(dev, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through - if not online */ + fallthrough; /* if not online */ case XenbusStateUnknown: device_unregister(&dev->dev); break; diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 15379089853b..480944606a3c 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -401,12 +401,12 @@ static void xenbus_reset_frontend(char *fe, char *be, int be_state) case XenbusStateConnected: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); xenbus_reset_wait_for_backend(be, XenbusStateClosing); - /* fall through */ + fallthrough; case XenbusStateClosing: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); xenbus_reset_wait_for_backend(be, XenbusStateClosed); - /* fall through */ + fallthrough; case XenbusStateClosed: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 92cd1d80218d..3576123d8299 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -213,7 +213,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) break; default: WARN_ONCE(1, "unknown lock status code: %d\n", status); - /* fall through */ + fallthrough; case P9_LOCK_ERROR: case P9_LOCK_GRACE: res = -ENOLCK; diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c index 30d526fecc3f..05e963402e25 100644 --- a/fs/adfs/dir_f.c +++ b/fs/adfs/dir_f.c @@ -18,11 +18,11 @@ static inline unsigned int adfs_readval(unsigned char *p, int len) switch (len) { case 4: val |= p[3] << 24; - /* fall through */ + fallthrough; case 3: val |= p[2] << 16; - /* fall through */ + fallthrough; case 2: val |= p[1] << 8; - /* fall through */ + fallthrough; default: val |= p[0]; } return val; @@ -32,11 +32,11 @@ static inline void adfs_writeval(unsigned char *p, int len, unsigned int val) { switch (len) { case 4: p[3] = val >> 24; - /* fall through */ + fallthrough; case 3: p[2] = val >> 16; - /* fall through */ + fallthrough; case 2: p[1] = val >> 8; - /* fall through */ + fallthrough; default: p[0] = val; } } diff --git a/fs/affs/inode.c b/fs/affs/inode.c index a346cf7659f1..044412110b52 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -93,7 +93,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) case ST_ROOT: inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; - /* fall through */ + fallthrough; case ST_USERDIR: if (be32_to_cpu(tail->stype) == ST_USERDIR || affs_test_opt(sbi->s_flags, SF_SETMODE)) { diff --git a/fs/affs/super.c b/fs/affs/super.c index 47107c6712a6..a100cd9950c8 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -474,7 +474,7 @@ got_root: case MUFS_INTLFFS: case MUFS_DCFFS: affs_set_opt(sbi->s_flags, SF_MUFS); - /* fall thru */ + fallthrough; case FS_INTLFFS: case FS_DCFFS: affs_set_opt(sbi->s_flags, SF_INTL); @@ -486,7 +486,7 @@ got_root: break; case MUFS_OFS: affs_set_opt(sbi->s_flags, SF_MUFS); - /* fall through */ + fallthrough; case FS_OFS: affs_set_opt(sbi->s_flags, SF_OFS); sb->s_flags |= SB_NOEXEC; @@ -494,7 +494,7 @@ got_root: case MUFS_DCOFS: case MUFS_INTLOFS: affs_set_opt(sbi->s_flags, SF_MUFS); - /* fall through */ + fallthrough; case FS_DCOFS: case FS_INTLOFS: affs_set_opt(sbi->s_flags, SF_INTL); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index bef413818af7..a4e9e6e07e93 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -252,7 +252,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) call->unmarshall++; /* extract the FID array and its count in two steps */ - /* fall through */ + fallthrough; case 1: _debug("extract FID count"); ret = afs_extract_data(call, true); @@ -271,7 +271,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) afs_extract_to_buf(call, call->count * 3 * 4); call->unmarshall++; - /* Fall through */ + fallthrough; case 2: _debug("extract FID array"); ret = afs_extract_data(call, true); @@ -297,7 +297,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) call->unmarshall++; /* extract the callback array and its count in two steps */ - /* fall through */ + fallthrough; case 3: _debug("extract CB count"); ret = afs_extract_data(call, true); @@ -312,7 +312,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4); call->unmarshall++; - /* Fall through */ + fallthrough; case 4: _debug("extract discard %zu/%u", iov_iter_count(call->iter), call->count2 * 3 * 4); @@ -391,7 +391,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; case 1: _debug("extract UUID"); ret = afs_extract_data(call, false); @@ -503,7 +503,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; case 1: _debug("extract UUID"); ret = afs_extract_data(call, false); @@ -618,7 +618,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call) call->unmarshall++; /* extract the FID array and its count in two steps */ - /* Fall through */ + fallthrough; case 1: _debug("extract FID count"); ret = afs_extract_data(call, true); @@ -637,7 +637,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call) afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; case 2: _debug("extract FID array"); ret = afs_extract_data(call, false); diff --git a/fs/afs/file.c b/fs/afs/file.c index 6f6ed1605cfe..371d1488cc54 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -311,7 +311,7 @@ int afs_page_filler(void *data, struct page *page) case -ENOBUFS: _debug("cache said ENOBUFS"); - /* fall through */ + fallthrough; default: go_on: req = kzalloc(struct_size(req, array, 1), GFP_KERNEL); diff --git a/fs/afs/flock.c b/fs/afs/flock.c index ffb8575345ca..eff82a6839e4 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -376,7 +376,7 @@ again: spin_unlock(&vnode->lock); return; - /* Fall through */ + fallthrough; default: /* Looks like a lock request was withdrawn. */ spin_unlock(&vnode->lock); diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index acb4d0ca2649..1d95ed9dd86e 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -320,7 +320,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->tmp_u = htonl(0); afs_extract_to_tmp(call); } - /* Fall through */ + fallthrough; /* extract the returned data length */ case 1: @@ -348,7 +348,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->bvec[0].bv_page = req->pages[req->index]; iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size); ASSERTCMP(size, <=, PAGE_SIZE); - /* Fall through */ + fallthrough; /* extract the returned data */ case 2: @@ -375,7 +375,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) /* Discard any excess data the server gave us */ afs_extract_discard(call, req->actual_len - req->len); call->unmarshall = 3; - /* Fall through */ + fallthrough; case 3: _debug("extract discard %zu/%llu", @@ -388,7 +388,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) no_more_data: call->unmarshall = 4; afs_extract_to_buf(call, (21 + 3 + 6) * 4); - /* Fall through */ + fallthrough; /* extract the metadata */ case 4: @@ -1343,7 +1343,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) case 0: call->unmarshall++; afs_extract_to_buf(call, 12 * 4); - /* Fall through */ + fallthrough; /* extract the returned status record */ case 1: @@ -1356,7 +1356,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) xdr_decode_AFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* extract the volume name length */ case 2: @@ -1371,7 +1371,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the volume name */ case 3: @@ -1385,7 +1385,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message length */ case 4: @@ -1400,7 +1400,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message */ case 5: @@ -1415,7 +1415,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day length */ case 6: @@ -1430,7 +1430,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day */ case 7: @@ -1682,7 +1682,7 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the capabilities word count */ case 1: @@ -1696,7 +1696,7 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) call->count2 = count; afs_extract_discard(call, count * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract capabilities words */ case 2: @@ -1776,7 +1776,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file status count and array in two steps */ case 1: @@ -1794,7 +1794,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_counts: afs_extract_to_buf(call, 21 * sizeof(__be32)); - /* Fall through */ + fallthrough; case 2: _debug("extract status array %u", call->count); @@ -1824,7 +1824,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* Extract the callback count and array in two steps */ case 3: @@ -1841,7 +1841,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_cbs: afs_extract_to_buf(call, 3 * sizeof(__be32)); - /* Fall through */ + fallthrough; case 4: _debug("extract CB array"); @@ -1870,7 +1870,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) afs_extract_to_buf(call, 6 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; case 5: ret = afs_extract_data(call, false); @@ -1974,7 +1974,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the returned data length */ case 1: @@ -1992,7 +1992,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) acl->size = call->count2; afs_extract_begin(call, acl->data, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the returned data */ case 2: @@ -2002,7 +2002,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) afs_extract_to_buf(call, (21 + 6) * 4); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the metadata */ case 3: diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 5334f1bd2bca..1d1a8debe472 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -120,42 +120,42 @@ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code) if (e->error == -ETIMEDOUT || e->error == -ETIME) return; - /* Fall through */ + fallthrough; case -ETIMEDOUT: case -ETIME: if (e->error == -ENOMEM || e->error == -ENONET) return; - /* Fall through */ + fallthrough; case -ENOMEM: case -ENONET: if (e->error == -ERFKILL) return; - /* Fall through */ + fallthrough; case -ERFKILL: if (e->error == -EADDRNOTAVAIL) return; - /* Fall through */ + fallthrough; case -EADDRNOTAVAIL: if (e->error == -ENETUNREACH) return; - /* Fall through */ + fallthrough; case -ENETUNREACH: if (e->error == -EHOSTUNREACH) return; - /* Fall through */ + fallthrough; case -EHOSTUNREACH: if (e->error == -EHOSTDOWN) return; - /* Fall through */ + fallthrough; case -EHOSTDOWN: if (e->error == -ECONNREFUSED) return; - /* Fall through */ + fallthrough; case -ECONNREFUSED: if (e->error == -ECONNRESET) return; - /* Fall through */ + fallthrough; case -ECONNRESET: /* Responded, but call expired. */ if (e->responded) return; diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index 6a0935cb822f..d83f13c44b92 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -281,7 +281,7 @@ bool afs_select_fileserver(struct afs_operation *op) case -ETIME: if (op->error != -EDESTADDRREQ) goto iterate_address; - /* Fall through */ + fallthrough; case -ERFKILL: case -EADDRNOTAVAIL: case -ENETUNREACH: diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 8fc8fb406a5a..8be709cb8542 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -568,7 +568,7 @@ static void afs_deliver_to_call(struct afs_call *call) case -EIO: pr_err("kAFS: Call %u in bad state %u\n", call->debug_id, state); - /* Fall through */ + fallthrough; case -ENODATA: case -EBADMSG: case -EMSGSIZE: @@ -669,7 +669,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call, ret = call->ret0; call->ret0 = 0; - /* Fall through */ + fallthrough; case -ECONNABORTED: ac->responded = true; break; @@ -872,7 +872,7 @@ void afs_send_empty_reply(struct afs_call *call) _debug("oom"); rxrpc_kernel_abort_call(net->socket, call->rxcall, RX_USER_ABORT, -ENOMEM, "KOO"); - /* Fall through */ + fallthrough; default: _leave(" [error]"); return; diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index fd82850cd424..dc9327332f06 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -196,7 +196,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call) /* Extract the returned uuid, uniquifier, nentries and * blkaddrs size */ - /* Fall through */ + fallthrough; case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -221,7 +221,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call) count = min(call->count, 4U); afs_extract_to_buf(call, count * sizeof(__be32)); - /* Fall through - and extract entries */ + fallthrough; /* and extract entries */ case 2: ret = afs_extract_data(call, call->count > 4); if (ret < 0) @@ -324,7 +324,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through - and extract the capabilities word count */ + fallthrough; /* and extract the capabilities word count */ case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -337,7 +337,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call) call->unmarshall++; afs_extract_discard(call, count * sizeof(__be32)); - /* Fall through - and extract capabilities words */ + fallthrough; /* and extract capabilities words */ case 2: ret = afs_extract_data(call, false); if (ret < 0) @@ -436,7 +436,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) /* Extract the returned uuid, uniquifier, fsEndpoints count and * either the first fsEndpoint type or the volEndpoints * count if there are no fsEndpoints. */ - /* Fall through */ + fallthrough; case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -475,7 +475,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) afs_extract_to_buf(call, size); call->unmarshall = 2; - /* Fall through - and extract fsEndpoints[] entries */ + fallthrough; /* and extract fsEndpoints[] entries */ case 2: ret = afs_extract_data(call, true); if (ret < 0) @@ -526,7 +526,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) * extract the type of the next endpoint when we extract the * data of the current one, but this is the first... */ - /* Fall through */ + fallthrough; case 3: ret = afs_extract_data(call, true); if (ret < 0) @@ -552,7 +552,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) afs_extract_to_buf(call, size); call->unmarshall = 4; - /* Fall through - and extract volEndpoints[] entries */ + fallthrough; /* and extract volEndpoints[] entries */ case 4: ret = afs_extract_data(call, true); if (ret < 0) @@ -587,7 +587,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) afs_extract_discard(call, 0); call->unmarshall = 5; - /* Fall through - Done */ + fallthrough; /* Done */ case 5: ret = afs_extract_data(call, false); if (ret < 0) @@ -663,7 +663,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through - and extract the cell name length */ + fallthrough; /* and extract the cell name length */ case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -685,7 +685,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) afs_extract_begin(call, cell_name, namesz); call->unmarshall++; - /* Fall through - and extract cell name */ + fallthrough; /* and extract cell name */ case 2: ret = afs_extract_data(call, true); if (ret < 0) @@ -694,7 +694,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) afs_extract_discard(call, call->count2); call->unmarshall++; - /* Fall through - and extract padding */ + fallthrough; /* and extract padding */ case 3: ret = afs_extract_data(call, false); if (ret < 0) diff --git a/fs/afs/write.c b/fs/afs/write.c index a121c247d95a..4b2265cb1891 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -609,7 +609,7 @@ no_more: default: pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); - /* Fall through */ + fallthrough; case -EACCES: case -EPERM: case -ENOKEY: diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 8c24fdc899e3..3b1239b7e90d 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -373,7 +373,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) req->offset = req->pos & (PAGE_SIZE - 1); afs_extract_to_tmp64(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the returned data length */ case 1: @@ -401,7 +401,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) call->bvec[0].bv_page = req->pages[req->index]; iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size); ASSERTCMP(size, <=, PAGE_SIZE); - /* Fall through */ + fallthrough; /* extract the returned data */ case 2: @@ -428,7 +428,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) /* Discard any excess data the server gave us */ afs_extract_discard(call, req->actual_len - req->len); call->unmarshall = 3; - /* Fall through */ + fallthrough; case 3: _debug("extract discard %zu/%llu", @@ -444,7 +444,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); - /* Fall through */ + fallthrough; /* extract the metadata */ case 4: @@ -461,7 +461,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) req->file_size = vp->scb.status.size; call->unmarshall++; - /* Fall through */ + fallthrough; case 5: break; @@ -1262,7 +1262,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) case 0: call->unmarshall++; afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus)); - /* Fall through */ + fallthrough; /* extract the returned status record */ case 1: @@ -1275,7 +1275,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) xdr_decode_YFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* extract the volume name length */ case 2: @@ -1290,7 +1290,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the volume name */ case 3: @@ -1304,7 +1304,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message length */ case 4: @@ -1319,7 +1319,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message */ case 5: @@ -1334,7 +1334,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day length */ case 6: @@ -1349,7 +1349,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day */ case 7: @@ -1363,7 +1363,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) _debug("motd '%s'", p); call->unmarshall++; - /* Fall through */ + fallthrough; case 8: break; @@ -1622,7 +1622,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file status count and array in two steps */ case 1: @@ -1640,7 +1640,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_counts: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus)); - /* Fall through */ + fallthrough; case 2: _debug("extract status array %u", call->count); @@ -1670,7 +1670,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* Extract the callback count and array in two steps */ case 3: @@ -1687,7 +1687,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_cbs: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack)); - /* Fall through */ + fallthrough; case 4: _debug("extract CB array"); @@ -1716,7 +1716,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; - /* Fall through */ + fallthrough; case 5: ret = afs_extract_data(call, false); @@ -1727,7 +1727,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; - /* Fall through */ + fallthrough; case 6: break; @@ -1804,7 +1804,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file ACL length */ case 1: @@ -1826,7 +1826,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_discard(call, size); } call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file ACL */ case 2: @@ -1836,7 +1836,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the volume ACL length */ case 3: @@ -1858,7 +1858,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_discard(call, size); } call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the volume ACL */ case 4: @@ -1871,7 +1871,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the metadata */ case 5: @@ -1886,7 +1886,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; - /* Fall through */ + fallthrough; case 6: break; diff --git a/fs/aio.c b/fs/aio.c index 5736bff48e9e..d5ec30385566 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1511,7 +1511,7 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret) * may be already running. Just fail this IO with EINTR. */ ret = -EINTR; - /*FALLTHRU*/ + fallthrough; default: req->ki_complete(req, ret, 0); } diff --git a/fs/buffer.c b/fs/buffer.c index d468ed9981e0..50bbc99e3d96 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1958,7 +1958,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, */ set_buffer_new(bh); set_buffer_unwritten(bh); - /* FALLTHRU */ + fallthrough; case IOMAP_MAPPED: if ((iomap->flags & IOMAP_F_NEW) || offset >= i_size_read(inode)) diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 060bdcc5ce32..9ff9d10a60ff 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1745,7 +1745,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) case -ENOENT: if (d_really_is_negative(dentry)) valid = 1; - /* Fallthrough */ + fallthrough; default: break; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index d51c3f2fdca0..30cd00265181 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -252,7 +252,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) case S_IFREG: ceph_fscache_register_inode_cookie(inode); ceph_fscache_file_set_cookie(inode, file); - /* fall through */ + fallthrough; case S_IFDIR: ret = ceph_init_file_info(inode, file, fmode, S_ISDIR(inode->i_mode)); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 0e763d2dcf16..0496934feecb 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -581,7 +581,7 @@ should_set_ext_sec_flag(enum securityEnum sectype) if (global_secflags & (CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)) return true; - /* Fallthrough */ + fallthrough; default: return false; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a275ee399dce..51c6b7880a70 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1378,25 +1378,25 @@ static int cifs_parse_security_flavors(char *value, return 1; case Opt_sec_krb5i: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_sec_krb5: vol->sectype = Kerberos; break; case Opt_sec_ntlmsspi: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_sec_ntlmssp: vol->sectype = RawNTLMSSP; break; case Opt_sec_ntlmi: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_ntlm: vol->sectype = NTLM; break; case Opt_sec_ntlmv2i: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_sec_ntlmv2: vol->sectype = NTLMv2; break; @@ -2187,7 +2187,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, vol->password = NULL; break; } - /* Fallthrough - to Opt_pass below.*/ + fallthrough; /* to Opt_pass below */ case Opt_pass: /* Obtain the value string */ value = strchr(data, '='); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 69cd5856621b..de564368a887 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -798,7 +798,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; - /* Fallthrough */ + fallthrough; default: return Unspecified; } @@ -815,7 +815,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) default: break; } - /* Fallthrough - to attempt LANMAN authentication next */ + fallthrough; /* to attempt LANMAN authentication next */ case CIFS_NEGFLAVOR_LANMAN: switch (requested) { case LANMAN: @@ -823,7 +823,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) case Unspecified: if (global_secflags & CIFSSEC_MAY_LANMAN) return LANMAN; - /* Fallthrough */ + fallthrough; default: return Unspecified; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 667d70aa335f..96c172d94fba 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1101,7 +1101,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; - /* Fallthrough */ + fallthrough; default: return Unspecified; } diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index cb733652ecca..ca2273727225 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1688,11 +1688,11 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) switch (whence) { case 1: offset += file->f_pos; - /* fall through */ + fallthrough; case 0: if (offset >= 0) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/fs/dax.c b/fs/dax.c index 95341af1a966..994ab66a9907 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1367,7 +1367,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, ret = dax_load_hole(&xas, mapping, &entry, vmf); goto finish_iomap; } - /*FALLTHRU*/ + fallthrough; default: WARN_ON_ONCE(1); error = -EIO; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 18d81599522f..002123efc6b0 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -5817,7 +5817,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, break; case -EAGAIN: error = 0; - /* fall through */ + fallthrough; default: __put_lkb(ls, lkb); goto out; diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 7d40d78ea864..ae325541884e 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -359,7 +359,7 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, return z_erofs_extent_lookback(m, m->delta[0]); case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ + fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: map->m_la = (lcn << lclusterbits) | m->clusterofs; break; @@ -416,7 +416,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: if (endoff >= m.clusterofs) map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ + fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: if (endoff >= m.clusterofs) { map->m_la = (m.lcn << lclusterbits) | m.clusterofs; @@ -433,7 +433,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, end = (m.lcn << lclusterbits) | m.clusterofs; map->m_flags |= EROFS_MAP_FULL_MAPPED; m.delta[0] = 1; - /* fallthrough */ + fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: /* get the correspoinding first chunk */ err = z_erofs_extent_lookback(&m, m.delta[0]); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 80662e1f7889..415c21f0e750 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1241,7 +1241,7 @@ do_indirects: mark_inode_dirty(inode); ext2_free_branches(inode, &nr, &nr+1, 1); } - /* fall through */ + fallthrough; case EXT2_IND_BLOCK: nr = i_data[EXT2_DIND_BLOCK]; if (nr) { @@ -1249,7 +1249,7 @@ do_indirects: mark_inode_dirty(inode); ext2_free_branches(inode, &nr, &nr+1, 2); } - /* fall through */ + fallthrough; case EXT2_DIND_BLOCK: nr = i_data[EXT2_TIND_BLOCK]; if (nr) { diff --git a/fs/ext2/super.c b/fs/ext2/super.c index dda860562ca3..7fab2b3b5b39 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -587,7 +587,7 @@ static int parse_options(char *options, struct super_block *sb, case Opt_xip: ext2_msg(sb, KERN_INFO, "use dax instead of xip"); set_opt(opts->s_mount_opt, XIP); - /* Fall through */ + fallthrough; case Opt_dax: #ifdef CONFIG_FS_DAX ext2_msg(sb, KERN_WARNING, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 16322ea5b463..d9e52a7f3702 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2646,7 +2646,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, case FI_NEW_INODE: if (set) return; - /* fall through */ + fallthrough; case FI_DATA_EXIST: case FI_INLINE_DOTS: case FI_PIN_FILE: diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 9bbaa2614679..3ad7bdbda5ca 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -618,10 +618,10 @@ pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) switch (dn->max_level) { case 3: base += 2 * indirect_blks; - /* fall through */ + fallthrough; case 2: base += 2 * direct_blks; - /* fall through */ + fallthrough; case 1: base += direct_index; break; diff --git a/fs/fcntl.c b/fs/fcntl.c index 2e4c0fa2074b..19ac5baad50f 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -362,7 +362,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, case F_OFD_SETLK: case F_OFD_SETLKW: #endif - /* Fallthrough */ + fallthrough; case F_SETLK: case F_SETLKW: if (copy_from_user(&flock, argp, sizeof(flock))) @@ -771,7 +771,7 @@ static void send_sigio_to_task(struct task_struct *p, if (!do_send_sig_info(signum, &si, p, type)) break; } - /* fall-through - fall back on the old plain SIGIO signal */ + fallthrough; /* fall back on the old plain SIGIO signal */ case 0: do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type); } diff --git a/fs/fs_context.c b/fs/fs_context.c index 7d5c5dd2b1d5..2834d1afa6e8 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -521,7 +521,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) switch (param->type) { case fs_value_is_string: len = 1 + param->size; - /* Fall through */ + fallthrough; case fs_value_is_flag: len += strlen(param->key); break; diff --git a/fs/fsopen.c b/fs/fsopen.c index 2fa3f241b762..27a890aa493a 100644 --- a/fs/fsopen.c +++ b/fs/fsopen.c @@ -412,7 +412,7 @@ SYSCALL_DEFINE5(fsconfig, break; case FSCONFIG_SET_PATH_EMPTY: lookup_flags = LOOKUP_EMPTY; - /* fallthru */ + fallthrough; case FSCONFIG_SET_PATH: param.type = fs_value_is_filename; param.name = getname_flags(_value, lookup_flags, NULL); diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 770f3a720db9..0f69fbd4af66 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -746,7 +746,7 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, } if (n == 0) break; - /* fall through - To branching from existing tree */ + fallthrough; /* To branching from existing tree */ case ALLOC_GROW_DEPTH: if (i > 1 && i < mp->mp_fheight) gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); @@ -757,7 +757,7 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, state = ALLOC_DATA; if (n == 0) break; - /* fall through - To tree complete, adding data blocks */ + fallthrough; /* To tree complete, adding data blocks */ case ALLOC_DATA: BUG_ON(n > dblks); BUG_ON(mp->mp_bh[end_of_metadata] == NULL); diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 4b67d47a7e00..6e173ae378c4 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1599,7 +1599,7 @@ static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state) case GFS2_QUOTA_ON: state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; - /*FALLTHRU*/ + fallthrough; case GFS2_QUOTA_ACCOUNT: state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | QCI_SYSFILE; diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 61eec628805d..0350dc7821bf 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -195,7 +195,7 @@ reread: switch (sbi->s_vhdr->signature) { case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX): set_bit(HFSPLUS_SB_HFSX, &sbi->flags); - /*FALLTHRU*/ + fallthrough; case cpu_to_be16(HFSPLUS_VOLHEAD_SIG): break; case cpu_to_be16(HFSP_WRAP_MAGIC): diff --git a/fs/io_uring.c b/fs/io_uring.c index 91e2cc8414f9..8a53af8e5fe2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2563,7 +2563,7 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) * IO with EINTR. */ ret = -EINTR; - /* fall through */ + fallthrough; default: kiocb->ki_complete(kiocb, ret, 0); } diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c index 89f61d93c0bc..107ee80c3568 100644 --- a/fs/iomap/seek.c +++ b/fs/iomap/seek.c @@ -127,7 +127,7 @@ iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length, SEEK_HOLE); if (offset < 0) return length; - /* fall through */ + fallthrough; case IOMAP_HOLE: *(loff_t *)data = offset; return 0; @@ -175,7 +175,7 @@ iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length, SEEK_DATA); if (offset < 0) return length; - /*FALLTHRU*/ + fallthrough; default: *(loff_t *)data = offset; return 0; diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index ab8cdd9e9325..78858f6e9583 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -341,7 +341,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) rdev = old_decode_dev(je16_to_cpu(jdev.old_id)); else rdev = new_decode_dev(je32_to_cpu(jdev.new_id)); - /* fall through */ + fallthrough; case S_IFSOCK: case S_IFIFO: diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index bccfc40b3a74..2f6f0b140c05 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -1273,7 +1273,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, dbg_readinode("symlink's target '%s' cached\n", f->target); } - /* fall through... */ + fallthrough; case S_IFBLK: case S_IFCHR: diff --git a/fs/libfs.c b/fs/libfs.c index 4d08edf19c78..e0d42e977d9a 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -137,11 +137,11 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) switch (whence) { case 1: offset += file->f_pos; - /* fall through */ + fallthrough; case 0: if (offset >= 0) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/fs/locks.c b/fs/locks.c index 8fc0542f5132..1f84a03601fe 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1499,7 +1499,7 @@ static void lease_clear_pending(struct file_lock *fl, int arg) switch (arg) { case F_UNLCK: fl->fl_flags &= ~FL_UNLOCK_PENDING; - /* fall through */ + fallthrough; case F_RDLCK: fl->fl_flags &= ~FL_DOWNGRADE_PENDING; } @@ -2525,7 +2525,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, cmd = F_SETLKW; file_lock->fl_flags |= FL_OFDLCK; file_lock->fl_owner = filp; - /* Fallthrough */ + fallthrough; case F_SETLKW: file_lock->fl_flags |= FL_SLEEP; } @@ -2656,7 +2656,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, cmd = F_SETLKW64; file_lock->fl_flags |= FL_OFDLCK; file_lock->fl_owner = filp; - /* Fallthrough */ + fallthrough; case F_SETLKW64: file_lock->fl_flags |= FL_SLEEP; } diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index d1a0e2c8b1b4..08108b6d2fa1 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -753,7 +753,7 @@ out: case -ENODEV: /* Our extent block devices are unavailable */ set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags); - /* Fall through */ + fallthrough; case 0: return lseg; default: diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a12f42e7d8c7..e732580fe47b 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1181,7 +1181,7 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) /* A NFSv4 OPEN will revalidate later */ if (server->caps & NFS_CAP_ATOMIC_OPEN) goto out; - /* Fallthrough */ + fallthrough; case S_IFDIR: if (server->flags & NFS_MOUNT_NOCTO) break; diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index a13e69009f19..7f5aa0403e16 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -187,7 +187,7 @@ static int filelayout_async_handle_error(struct rpc_task *task, pnfs_error_mark_layout_for_return(inode, lseg); pnfs_set_lo_fail(lseg); rpc_wake_up(&tbl->slot_tbl_waitq); - /* fall through */ + fallthrough; default: reset: dprintk("%s Retry through MDS. Error %d\n", __func__, diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 965145592750..ff8965d1a4d4 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1133,7 +1133,7 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid); rpc_wake_up(&tbl->slot_tbl_waitq); - /* fall through */ + fallthrough; default: if (ff_layout_avoid_mds_available_ds(lseg)) return -NFS4ERR_RESET_TO_PNFS; @@ -1260,7 +1260,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, */ if (opnum == OP_READ) break; - /* Fallthrough */ + fallthrough; default: pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg); diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c index 66949da0e827..524812984e2d 100644 --- a/fs/nfs/fs_context.c +++ b/fs/nfs/fs_context.c @@ -651,21 +651,21 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: protofamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_udp: ctx->flags &= ~NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: protofamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_tcp: ctx->flags |= NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP; break; case Opt_xprt_rdma6: protofamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_rdma: /* vector side protocols to TCP */ ctx->flags |= NFS_MOUNT_TCP; @@ -684,13 +684,13 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: mountfamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_udp: ctx->mount_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: mountfamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_tcp: ctx->mount_server.protocol = XPRT_TRANSPORT_TCP; break; @@ -899,9 +899,11 @@ static int nfs23_parse_monolithic(struct fs_context *fc, ctx->version = NFS_DEFAULT_VERSION; switch (data->version) { case 1: - data->namlen = 0; /* fall through */ + data->namlen = 0; + fallthrough; case 2: - data->bsize = 0; /* fall through */ + data->bsize = 0; + fallthrough; case 3: if (data->flags & NFS_MOUNT_VER3) goto out_no_v3; @@ -909,14 +911,14 @@ static int nfs23_parse_monolithic(struct fs_context *fc, memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); /* Turn off security negotiation */ extra_flags |= NFS_MOUNT_SECFLAVOUR; - /* fall through */ + fallthrough; case 4: if (data->flags & NFS_MOUNT_SECFLAVOUR) goto out_no_sec; - /* fall through */ + fallthrough; case 5: memset(data->context, 0, sizeof(data->context)); - /* fall through */ + fallthrough; case 6: if (data->flags & NFS_MOUNT_VER3) { if (data->root.size > NFS3_FHSIZE || data->root.size == 0) diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 26c94b32d6f4..c6c863382f37 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -108,7 +108,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type) case -EPROTONOSUPPORT: dprintk("NFS_V3_ACL extension not supported; disabling\n"); server->caps &= ~NFS_CAP_ACLS; - /* fall through */ + fallthrough; case -ENOTSUPP: status = -EOPNOTSUPP; default: @@ -228,7 +228,7 @@ static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, dprintk("NFS_V3_ACL SETACL RPC not supported" "(will not retry)\n"); server->caps &= ~NFS_CAP_ACLS; - /* fall through */ + fallthrough; case -ENOTSUPP: status = -EOPNOTSUPP; } diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index a33970765467..fdfc77486ace 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -211,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) ret = nfs42_proc_llseek(filep, offset, whence); if (ret != -ENOTSUPP) return ret; - /* Fall through */ + fallthrough; default: return nfs_file_llseek(filep, offset, whence); } diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 1e7296395d71..62e6eea5c516 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -520,7 +520,7 @@ static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap, switch (token) { case Opt_find_uid: im->im_type = IDMAP_TYPE_USER; - /* Fall through */ + fallthrough; case Opt_find_gid: im->im_conv = IDMAP_CONV_NAMETOID; ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ); @@ -528,7 +528,7 @@ static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap, case Opt_find_user: im->im_type = IDMAP_TYPE_USER; - /* Fall through */ + fallthrough; case Opt_find_group: im->im_conv = IDMAP_CONV_IDTONAME; ret = match_int(&substr, &im->im_id); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index dbd01548335b..f8946b9468ef 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -483,7 +483,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server, stateid); goto wait_on_recovery; } - /* Fall through */ + fallthrough; case -NFS4ERR_OPENMODE: if (inode) { int err; @@ -534,10 +534,10 @@ static int nfs4_do_handle_exception(struct nfs_server *server, ret = -EBUSY; break; } - /* Fall through */ + fallthrough; case -NFS4ERR_DELAY: nfs_inc_server_stats(server, NFSIOS_DELAY); - /* Fall through */ + fallthrough; case -NFS4ERR_GRACE: case -NFS4ERR_LAYOUTTRYLATER: case -NFS4ERR_RECALLCONFLICT: @@ -1505,7 +1505,7 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, case NFS4_OPEN_CLAIM_PREVIOUS: if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) break; - /* Fall through */ + fallthrough; default: return 0; } @@ -2439,7 +2439,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) case NFS4_OPEN_CLAIM_DELEG_CUR_FH: case NFS4_OPEN_CLAIM_DELEG_PREV_FH: data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; - /* Fall through */ + fallthrough; case NFS4_OPEN_CLAIM_FH: task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; } @@ -3545,11 +3545,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data) nfs4_free_revoked_stateid(server, &calldata->arg.stateid, task->tk_msg.rpc_cred); - /* Fallthrough */ + fallthrough; case -NFS4ERR_BAD_STATEID: if (calldata->arg.fmode == 0) break; - /* Fallthrough */ + fallthrough; default: task->tk_status = nfs4_async_handle_exception(task, server, task->tk_status, &exception); @@ -6294,7 +6294,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) nfs4_free_revoked_stateid(data->res.server, data->args.stateid, task->tk_msg.rpc_cred); - /* Fallthrough */ + fallthrough; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: case -ETIMEDOUT: @@ -6314,7 +6314,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) data->res.fattr = NULL; goto out_restart; } - /* Fallthrough */ + fallthrough; default: task->tk_status = nfs4_async_handle_exception(task, data->res.server, task->tk_status, @@ -6622,13 +6622,13 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) if (nfs4_update_lock_stateid(calldata->lsp, &calldata->res.stateid)) break; - /* Fall through */ + fallthrough; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_EXPIRED: nfs4_free_revoked_stateid(calldata->server, &calldata->arg.stateid, task->tk_msg.rpc_cred); - /* Fall through */ + fallthrough; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: if (nfs4_sync_lock_stateid(&calldata->arg.stateid, @@ -8665,7 +8665,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); rpc_delay(task, NFS4_POLL_RETRY_MIN); task->tk_status = 0; - /* fall through */ + fallthrough; case -NFS4ERR_RETRY_UNCACHED_REP: rpc_restart_call_prepare(task); return; @@ -9113,13 +9113,13 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf switch(task->tk_status) { case 0: wake_up_all(&clp->cl_lock_waitq); - /* Fallthrough */ + fallthrough; case -NFS4ERR_COMPLETE_ALREADY: case -NFS4ERR_WRONG_CRED: /* What to do here? */ break; case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); - /* fall through */ + fallthrough; case -NFS4ERR_RETRY_UNCACHED_REP: return -EAGAIN; case -NFS4ERR_BADSESSION: @@ -9434,10 +9434,10 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) &lrp->args.range, lrp->args.inode)) goto out_restart; - /* Fallthrough */ + fallthrough; default: task->tk_status = 0; - /* Fallthrough */ + fallthrough; case 0: break; case -NFS4ERR_DELAY: diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index b1dba24918f8..4bf10792cb5b 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1530,7 +1530,7 @@ restart: default: pr_err("NFS: %s: unhandled error %d\n", __func__, status); - /* Fall through */ + fallthrough; case -ENOMEM: case -NFS4ERR_DENIED: case -NFS4ERR_RECLAIM_BAD: @@ -1667,7 +1667,7 @@ restart: break; } printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status); - /* Fall through */ + fallthrough; case -ENOENT: case -ENOMEM: case -EACCES: @@ -1683,7 +1683,7 @@ restart: set_bit(ops->state_flag_bit, &state->flags); break; } - /* Fall through */ + fallthrough; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_OLD_STATEID: @@ -1695,7 +1695,7 @@ restart: case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); - /* Fall through */ + fallthrough; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: @@ -2273,11 +2273,11 @@ again: case -ETIMEDOUT: if (clnt->cl_softrtry) break; - /* Fall through */ + fallthrough; case -NFS4ERR_DELAY: case -EAGAIN: ssleep(1); - /* Fall through */ + fallthrough; case -NFS4ERR_STALE_CLIENTID: dprintk("NFS: %s after status %d, retrying\n", __func__, status); @@ -2289,7 +2289,7 @@ again: } if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) break; - /* Fall through */ + fallthrough; case -NFS4ERR_CLID_INUSE: case -NFS4ERR_WRONGSEC: /* No point in retrying if we already used RPC_AUTH_UNIX */ diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 6ea4cac41e46..6985cacf4700 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -711,7 +711,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, case FLUSH_COND_STABLE: if (nfs_reqs_to_commit(cinfo)) break; - /* fall through */ + fallthrough; default: hdr->args.stable = NFS_FILE_SYNC; } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 40332c758d84..71f7741126b6 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1541,7 +1541,7 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args, case 0: if (res->lrs_present) res_stateid = &res->stateid; - /* Fallthrough */ + fallthrough; default: arg_stateid = &args->stateid; } diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c index 8ceb6425e01a..d056ad2fdefd 100644 --- a/fs/nfs_common/nfsacl.c +++ b/fs/nfs_common/nfsacl.c @@ -237,7 +237,7 @@ posix_acl_from_nfsacl(struct posix_acl *acl) break; case ACL_MASK: mask = pa; - /* fall through */ + fallthrough; case ACL_OTHER: break; } diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 9bbaa671c079..311e5ce80cfc 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -83,13 +83,13 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, bex->soff = iomap.addr; break; } - /*FALLTHRU*/ + fallthrough; case IOMAP_HOLE: if (seg->iomode == IOMODE_READ) { bex->es = PNFS_BLOCK_NONE_DATA; break; } - /*FALLTHRU*/ + fallthrough; case IOMAP_DELALLOC: default: WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type); diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 7fbe9840a03e..052be5bf9ef5 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -1119,7 +1119,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback break; case -ESERVERFAULT: ++session->se_cb_seq_nr; - /* Fall through */ + fallthrough; case 1: case -NFS4ERR_BADSESSION: nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status); diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index e12409eca7cc..a97873f2d22b 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -681,7 +681,7 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) rpc_delay(task, HZ/100); /* 10 mili-seconds */ return 0; } - /* Fallthrough */ + fallthrough; default: /* * Unknown error or non-responding client, we'll need to fence. diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index a527da3d8052..eaf50eafa935 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -428,7 +428,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, goto out; open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; reclaim = true; - /* fall through */ + fallthrough; case NFS4_OPEN_CLAIM_FH: case NFS4_OPEN_CLAIM_DELEG_CUR_FH: status = do_open_fhandle(rqstp, cstate, open); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 81ed8e8bab3f..49a604b1c6a6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3117,7 +3117,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, break; default: /* checked by xdr code */ WARN_ON_ONCE(1); - /* fall through */ + fallthrough; case SP4_SSV: status = nfserr_encr_alg_unsupp; goto out_nolock; @@ -4532,7 +4532,7 @@ static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, rpc_delay(task, 2 * HZ); return 0; } - /*FALLTHRU*/ + fallthrough; default: return 1; } @@ -5652,7 +5652,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) break; default: printk("unknown stateid type %x\n", s->sc_type); - /* Fallthrough */ + fallthrough; case NFS4_CLOSED_STID: case NFS4_CLOSED_DELEG_STID: status = nfserr_bad_stateid; @@ -6742,7 +6742,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, case NFS4_READW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; - /* Fallthrough */ + fallthrough; case NFS4_READ_LT: spin_lock(&fp->fi_lock); nf = find_readable_file_locked(fp); @@ -6754,7 +6754,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, case NFS4_WRITEW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; - /* Fallthrough */ + fallthrough; case NFS4_WRITE_LT: spin_lock(&fp->fi_lock); nf = find_writeable_file_locked(fp); @@ -6816,7 +6816,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, break; case FILE_LOCK_DEFERRED: nbl = NULL; - /* Fallthrough */ + fallthrough; case -EAGAIN: /* conflock holds conflicting lock */ status = nfserr_denied; dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 37bc8f5f4514..c81dbbad8792 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -459,7 +459,7 @@ static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp) case FSID_DEV: if (!old_valid_dev(exp_sb(exp)->s_dev)) return false; - /* FALL THROUGH */ + fallthrough; case FSID_MAJOR_MINOR: case FSID_ENCODE_DEV: return exp_sb(exp)->s_type->fs_flags & FS_REQUIRES_DEV; @@ -469,7 +469,7 @@ static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp) case FSID_UUID16: if (!is_root_export(exp)) return false; - /* fall through */ + fallthrough; case FSID_UUID4_INUM: case FSID_UUID16_INUM: return exp->ex_uuid != NULL; diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 543bbe0a556e..6e0b066480c5 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -314,7 +314,7 @@ nfsd_proc_create(struct svc_rqst *rqstp) rdev = inode->i_rdev; attr->ia_valid |= ATTR_SIZE; - /* FALLTHROUGH */ + fallthrough; case S_IFIFO: /* this is probably a permission check.. * at least IRIX implements perm checking on diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index b603dfcdd361..f7f6473578af 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -221,7 +221,7 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change) case NFSD_TEST: if (nn->nfsd_versions) return nn->nfsd_versions[vers]; - /* Fallthrough */ + fallthrough; case NFSD_AVAIL: return nfsd_support_version(vers); } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 7d2933b85b65..aba5af9df328 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1456,7 +1456,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, *created = true; break; } - /* fall through */ + fallthrough; case NFS4_CREATE_EXCLUSIVE4_1: if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime && d_inode(dchild)->i_atime.tv_sec == v_atime @@ -1465,7 +1465,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, *created = true; goto set_attr; } - /* fall through */ + fallthrough; case NFS3_CREATE_GUARDED: err = nfserr_exist; } diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index fb5a9a8a13cf..e516ae389ca5 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -519,7 +519,7 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) break; case NILFS_IFILE_INO: lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); - /* Fall through */ + fallthrough; default: bmap->b_ptr_type = NILFS_BMAP_PTR_VM; bmap->b_last_allocated_key = 0; diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 0b453ef8fae5..2217f904a7cf 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -626,7 +626,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, !(flags & NILFS_SS_SYNDT)) goto try_next_pseg; state = RF_DSYNC_ST; - /* Fall through */ + fallthrough; case RF_DSYNC_ST: if (!(flags & NILFS_SS_SYNDT)) goto confused; diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index a651e821c2de..e3726aca28ed 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1138,7 +1138,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) nilfs_sc_cstage_set(sci, NILFS_ST_DAT); goto dat_stage; } - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_GC: if (nilfs_doing_gc()) { head = &sci->sc_gc_inodes; @@ -1159,7 +1160,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) } sci->sc_stage.gc_inode_ptr = NULL; } - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_FILE: head = &sci->sc_dirty_files; ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, @@ -1186,7 +1188,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) } nilfs_sc_cstage_inc(sci); sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED; - /* Fall through */ + fallthrough; case NILFS_ST_IFILE: err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile, &nilfs_sc_file_ops); @@ -1197,13 +1199,14 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) err = nilfs_segctor_create_checkpoint(sci); if (unlikely(err)) break; - /* Fall through */ + fallthrough; case NILFS_ST_CPFILE: err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile, &nilfs_sc_file_ops); if (unlikely(err)) break; - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_SUFILE: err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs, sci->sc_nfreesegs, &ndone); @@ -1219,7 +1222,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) &nilfs_sc_file_ops); if (unlikely(err)) break; - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_DAT: dat_stage: err = nilfs_segctor_scan_file(sci, nilfs->ns_dat, @@ -1230,7 +1234,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; } - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_SR: if (mode == SC_LSEG_SR) { /* Appending a super root */ diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 559de311deca..3e01d8f2ab90 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -1147,7 +1147,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, } switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { - case FAN_MARK_ADD: /* fallthrough */ + case FAN_MARK_ADD: case FAN_MARK_REMOVE: if (!mask) return -EINVAL; diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 1ef24574f481..cea739be77c4 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -67,7 +67,7 @@ static void o2quo_fence_self(void) default: WARN_ON(o2nm_single_cluster->cl_fence_method >= O2NM_FENCE_METHODS); - /* fall through */ + fallthrough; case O2NM_FENCE_RESET: printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this " "system by restarting ***\n"); diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c index 819428dfa32f..3ce89216670c 100644 --- a/fs/pstore/zone.c +++ b/fs/pstore/zone.c @@ -1081,7 +1081,6 @@ next_zone: readop = psz_ftrace_read; break; case PSTORE_TYPE_CONSOLE: - fallthrough; case PSTORE_TYPE_PMSG: readop = psz_record_read; break; diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 5444d3c4d93f..47f9e151988b 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -38,7 +38,7 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd, if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) break; - /*FALLTHROUGH*/ + fallthrough; default: if (!capable(CAP_SYS_ADMIN)) return -EPERM; diff --git a/fs/seq_file.c b/fs/seq_file.c index 4e6239f33c06..31219c1db17d 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -295,7 +295,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence) switch (whence) { case SEEK_CUR: offset += file->f_pos; - /* fall through */ + fallthrough; case SEEK_SET: if (offset < 0) break; diff --git a/fs/signalfd.c b/fs/signalfd.c index 5b78719be445..456046e15873 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -176,7 +176,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info if (!nonblock) break; ret = -EAGAIN; - /* fall through */ + fallthrough; default: spin_unlock_irq(¤t->sighand->siglock); return ret; diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 22bfda158f7f..6d6cd85c2b4c 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -269,7 +269,7 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, break; /* No more room on heap so make it un-categorized */ cat = LPROPS_UNCAT; - /* Fall through */ + fallthrough; case LPROPS_UNCAT: list_add(&lprops->list, &c->uncat_list); break; @@ -313,7 +313,7 @@ static void ubifs_remove_from_cat(struct ubifs_info *c, case LPROPS_FREEABLE: c->freeable_cnt -= 1; ubifs_assert(c, c->freeable_cnt >= 0); - /* Fall through */ + fallthrough; case LPROPS_UNCAT: case LPROPS_EMPTY: case LPROPS_FRDI_IDX: diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index 6023c97c6da2..25ff91c7e94a 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -52,7 +52,7 @@ static int udf_pc_to_char(struct super_block *sb, unsigned char *from, elen += pc->lengthComponentIdent; break; } - /* Fall through */ + fallthrough; case 2: if (tolen == 0) return -ENAMETOOLONG; diff --git a/fs/ufs/util.h b/fs/ufs/util.h index e1f1b2e868a7..4931bec1a01c 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -42,7 +42,7 @@ ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, case UFS_ST_SUNOS: if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT) return fs32_to_cpu(sb, usb1->fs_u0.fs_sun.fs_state); - /* Fall Through - to UFS_ST_SUN */ + fallthrough; /* to UFS_ST_SUN */ case UFS_ST_SUN: return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state); case UFS_ST_SUNx86: @@ -63,7 +63,7 @@ ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value); break; } - /* Fall Through - to UFS_ST_SUN */ + fallthrough; /* to UFS_ST_SUN */ case UFS_ST_SUN: usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value); break; @@ -197,7 +197,7 @@ ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode) case UFS_UID_EFT: if (inode->ui_u1.oldids.ui_suid == 0xFFFF) return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid); - /* Fall through */ + fallthrough; default: return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid); } @@ -215,7 +215,7 @@ ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value) inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value); if (value > 0xFFFF) value = 0xFFFF; - /* Fall through */ + fallthrough; default: inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value); break; @@ -231,7 +231,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) case UFS_UID_EFT: if (inode->ui_u1.oldids.ui_sgid == 0xFFFF) return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid); - /* Fall through */ + fallthrough; default: return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid); } @@ -249,7 +249,7 @@ ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value) inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value); if (value > 0xFFFF) value = 0xFFFF; - /* Fall through */ + fallthrough; default: inode->ui_u1.oldids.ui_sgid = cpu_to_fs16(sb, value); break; diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c index 96bd160da48b..018057546067 100644 --- a/fs/vboxsf/utils.c +++ b/fs/vboxsf/utils.c @@ -226,7 +226,7 @@ int vboxsf_getattr(const struct path *path, struct kstat *kstat, break; case AT_STATX_FORCE_SYNC: sf_i->force_restat = 1; - /* fall-through */ + fallthrough; default: err = vboxsf_inode_revalidate(dentry); } diff --git a/include/linux/compat.h b/include/linux/compat.h index d38c4d7e83bd..b354ce58966e 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -429,11 +429,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, compat_sigset_t v; switch (_NSIG_WORDS) { case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; - /* fall through */ + fallthrough; case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; - /* fall through */ + fallthrough; case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; - /* fall through */ + fallthrough; case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; } return copy_to_user(compat, &v, size) ? -EFAULT : 0; diff --git a/include/linux/filter.h b/include/linux/filter.h index 0a355b005bf4..ebfb7cfb65f1 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1200,7 +1200,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) BPF_ANCILLARY(RANDOM); BPF_ANCILLARY(VLAN_TPID); } - /* Fallthrough. */ + fallthrough; default: return ftest->code; } diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 19ddd43aee68..cfb62e9f37be 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -86,17 +86,17 @@ static inline u32 jhash(const void *key, u32 length, u32 initval) } /* Last block: affect all 32 bits of (c) */ switch (length) { - case 12: c += (u32)k[11]<<24; /* fall through */ - case 11: c += (u32)k[10]<<16; /* fall through */ - case 10: c += (u32)k[9]<<8; /* fall through */ - case 9: c += k[8]; /* fall through */ - case 8: b += (u32)k[7]<<24; /* fall through */ - case 7: b += (u32)k[6]<<16; /* fall through */ - case 6: b += (u32)k[5]<<8; /* fall through */ - case 5: b += k[4]; /* fall through */ - case 4: a += (u32)k[3]<<24; /* fall through */ - case 3: a += (u32)k[2]<<16; /* fall through */ - case 2: a += (u32)k[1]<<8; /* fall through */ + case 12: c += (u32)k[11]<<24; fallthrough; + case 11: c += (u32)k[10]<<16; fallthrough; + case 10: c += (u32)k[9]<<8; fallthrough; + case 9: c += k[8]; fallthrough; + case 8: b += (u32)k[7]<<24; fallthrough; + case 7: b += (u32)k[6]<<16; fallthrough; + case 6: b += (u32)k[5]<<8; fallthrough; + case 5: b += k[4]; fallthrough; + case 4: a += (u32)k[3]<<24; fallthrough; + case 3: a += (u32)k[2]<<16; fallthrough; + case 2: a += (u32)k[1]<<8; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); case 0: /* Nothing left to add */ @@ -132,8 +132,8 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) /* Handle the last 3 u32's */ switch (length) { - case 3: c += k[2]; /* fall through */ - case 2: b += k[1]; /* fall through */ + case 3: c += k[2]; fallthrough; + case 2: b += k[1]; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); case 0: /* Nothing left to add */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 1983e08f5906..97c83773b6f0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -157,11 +157,14 @@ static inline void __mm_zero_struct_page(struct page *page) switch (sizeof(struct page)) { case 80: - _pp[9] = 0; /* fallthrough */ + _pp[9] = 0; + fallthrough; case 72: - _pp[8] = 0; /* fallthrough */ + _pp[8] = 0; + fallthrough; case 64: - _pp[7] = 0; /* fallthrough */ + _pp[7] = 0; + fallthrough; case 56: _pp[6] = 0; _pp[5] = 0; diff --git a/include/linux/signal.h b/include/linux/signal.h index 6bb1a3f0258c..7bbc0e9cf084 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -137,11 +137,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ b3 = b->sig[3]; b2 = b->sig[2]; \ r->sig[3] = op(a3, b3); \ r->sig[2] = op(a2, b2); \ - /* fall through */ \ + fallthrough; \ case 2: \ a1 = a->sig[1]; b1 = b->sig[1]; \ r->sig[1] = op(a1, b1); \ - /* fall through */ \ + fallthrough; \ case 1: \ a0 = a->sig[0]; b0 = b->sig[0]; \ r->sig[0] = op(a0, b0); \ @@ -171,9 +171,9 @@ static inline void name(sigset_t *set) \ switch (_NSIG_WORDS) { \ case 4: set->sig[3] = op(set->sig[3]); \ set->sig[2] = op(set->sig[2]); \ - /* fall through */ \ + fallthrough; \ case 2: set->sig[1] = op(set->sig[1]); \ - /* fall through */ \ + fallthrough; \ case 1: set->sig[0] = op(set->sig[0]); \ break; \ default: \ @@ -194,7 +194,7 @@ static inline void sigemptyset(sigset_t *set) memset(set, 0, sizeof(sigset_t)); break; case 2: set->sig[1] = 0; - /* fall through */ + fallthrough; case 1: set->sig[0] = 0; break; } @@ -207,7 +207,7 @@ static inline void sigfillset(sigset_t *set) memset(set, -1, sizeof(sigset_t)); break; case 2: set->sig[1] = -1; - /* fall through */ + fallthrough; case 1: set->sig[0] = -1; break; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 46881d902124..ab57cf787c1f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3745,19 +3745,19 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, #define __it(x, op) (x -= sizeof(u##op)) #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) case 32: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 24: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 16: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 8: diffs |= __it_diff(a, b, 64); break; case 28: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 20: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 12: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 4: diffs |= __it_diff(a, b, 32); break; } diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h index adcc6a97db61..143568d64b20 100644 --- a/include/math-emu/op-common.h +++ b/include/math-emu/op-common.h @@ -308,7 +308,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ R##_e = X##_e; \ - /* Fall through */ \ + fallthrough; \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ @@ -319,7 +319,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \ R##_e = Y##_e; \ - /* Fall through */ \ + fallthrough; \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ @@ -417,7 +417,7 @@ do { \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ R##_s = X##_s; \ - /* Fall through */ \ + fallthrough; \ \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ @@ -431,7 +431,7 @@ do { \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ R##_s = Y##_s; \ - /* Fall through */ \ + fallthrough; \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ @@ -497,7 +497,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ FP_SET_EXCEPTION(FP_EX_DIVZERO); \ - /* Fall through */ \ + fallthrough; \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ R##_c = FP_CLS_INF; \ diff --git a/ipc/sem.c b/ipc/sem.c index 8c0244e0365e..f6c30a85dadf 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1691,7 +1691,7 @@ static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int v case IPC_SET: if (copy_semid_from_user(&semid64, p, version)) return -EFAULT; - /* fall through */ + fallthrough; case IPC_RMID: return semctl_down(ns, semid, cmd, &semid64); default: @@ -1805,7 +1805,7 @@ static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int vers case IPC_SET: if (copy_compat_semid_from_user(&semid64, p, version)) return -EFAULT; - /* fallthru */ + fallthrough; case IPC_RMID: return semctl_down(ns, semid, cmd, &semid64); default: diff --git a/ipc/shm.c b/ipc/shm.c index f1ed36e3ac9f..e25c7c6106bc 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -1179,7 +1179,7 @@ static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int ver case IPC_SET: if (copy_shmid_from_user(&sem64, buf, version)) return -EFAULT; - /* fallthru */ + fallthrough; case IPC_RMID: return shmctl_down(ns, shmid, cmd, &sem64); case SHM_LOCK: @@ -1374,7 +1374,7 @@ static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int versio case IPC_SET: if (copy_compat_shmid_from_user(&sem64, uptr, version)) return -EFAULT; - /* fallthru */ + fallthrough; case IPC_RMID: return shmctl_down(ns, shmid, cmd, &sem64); case SHM_LOCK: diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index a10e2997aa6c..333b3bcfc545 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -681,7 +681,7 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) data->values[i] = AUDIT_UID_UNSET; break; } - /* fall through - if set */ + fallthrough; /* if set */ default: data->values[i] = f->val; } diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 83ff127ef7ae..e21de4f1754c 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1794,7 +1794,7 @@ static bool cg_sockopt_is_valid_access(int off, int size, return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; case offsetof(struct bpf_sockopt, optname): - /* fallthrough */ + fallthrough; case offsetof(struct bpf_sockopt, level): if (size != size_default) return false; diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index f1c46529929b..6386b7bb98f2 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -279,7 +279,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, break; default: bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + fallthrough; case XDP_DROP: xdp_return_frame(xdpf); stats->drop++; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 86299a292214..1bf960aa615c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2029,7 +2029,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, case BPF_PROG_TYPE_EXT: if (expected_attach_type) return -EINVAL; - /* fallthrough */ + fallthrough; default: return 0; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ef938f17b944..47e74f09fa37 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5236,7 +5236,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, off_reg == dst_reg ? dst : src); return -EACCES; } - /* fall-through */ + fallthrough; default: break; } @@ -10988,7 +10988,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) default: if (!prog_extension) return -EINVAL; - /* fallthrough */ + fallthrough; case BPF_MODIFY_RETURN: case BPF_LSM_MAC: case BPF_TRACE_FENTRY: diff --git a/kernel/capability.c b/kernel/capability.c index 1444f3954d75..7c59b096c98a 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -93,7 +93,7 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy) break; case _LINUX_CAPABILITY_VERSION_2: warn_deprecated_v2(); - /* fall through - v3 is otherwise equivalent to v2. */ + fallthrough; /* v3 is otherwise equivalent to v2 */ case _LINUX_CAPABILITY_VERSION_3: *tocopy = _LINUX_CAPABILITY_U32S_3; break; diff --git a/kernel/compat.c b/kernel/compat.c index b8d2800bb4b7..05adfd6fa8bf 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -255,11 +255,11 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat) return -EFAULT; switch (_NSIG_WORDS) { case 4: set->sig[3] = v.sig[6] | (((long)v.sig[7]) << 32 ); - /* fall through */ + fallthrough; case 3: set->sig[2] = v.sig[4] | (((long)v.sig[5]) << 32 ); - /* fall through */ + fallthrough; case 2: set->sig[1] = v.sig[2] | (((long)v.sig[3]) << 32 ); - /* fall through */ + fallthrough; case 1: set->sig[0] = v.sig[0] | (((long)v.sig[1]) << 32 ); } #else diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index a790026e42d0..cc3c43dfec44 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -1046,14 +1046,14 @@ int gdb_serial_stub(struct kgdb_state *ks) return DBG_PASS_EVENT; } #endif - /* Fall through */ + fallthrough; case 'C': /* Exception passing */ tmp = gdb_cmd_exception_pass(ks); if (tmp > 0) goto default_handle; if (tmp == 0) break; - /* Fall through - on tmp < 0 */ + fallthrough; /* on tmp < 0 */ case 'c': /* Continue packet */ case 's': /* Single step packet */ if (kgdb_contthread && kgdb_contthread != current) { @@ -1062,7 +1062,7 @@ int gdb_serial_stub(struct kgdb_state *ks) break; } dbg_activate_sw_breakpoints(); - /* Fall through - to default processing */ + fallthrough; /* to default processing */ default: default_handle: error = kgdb_arch_handle_exception(ks->ex_vector, diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c index 750497b0003a..f877a0a0d7cf 100644 --- a/kernel/debug/kdb/kdb_keyboard.c +++ b/kernel/debug/kdb/kdb_keyboard.c @@ -173,11 +173,11 @@ int kdb_get_kbd_char(void) case KT_LATIN: if (isprint(keychar)) break; /* printable characters */ - /* fall through */ + fallthrough; case KT_SPEC: if (keychar == K_ENTER) break; - /* fall through */ + fallthrough; default: return -1; /* ignore unprintables */ } diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 004c5b6c87f8..6226502ce049 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -432,7 +432,7 @@ int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) *word = w8; break; } - /* fall through */ + fallthrough; default: diag = KDB_BADWIDTH; kdb_printf("kdb_getphysword: bad width %ld\n", (long) size); @@ -481,7 +481,7 @@ int kdb_getword(unsigned long *word, unsigned long addr, size_t size) *word = w8; break; } - /* fall through */ + fallthrough; default: diag = KDB_BADWIDTH; kdb_printf("kdb_getword: bad width %ld\n", (long) size); @@ -525,7 +525,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size) diag = kdb_putarea(addr, w8); break; } - /* fall through */ + fallthrough; default: diag = KDB_BADWIDTH; kdb_printf("kdb_putword: bad width %ld\n", (long) size); diff --git a/kernel/events/core.c b/kernel/events/core.c index 5bfe8e3c6e44..7ed5248f0445 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10034,7 +10034,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, case IF_SRC_KERNELADDR: case IF_SRC_KERNEL: kernel = 1; - /* fall through */ + fallthrough; case IF_SRC_FILEADDR: case IF_SRC_FILE: diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index a8e14c80b405..762a928e18f9 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -173,7 +173,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags __irq_wake_thread(desc, action); - /* Fall through - to add to randomness */ + fallthrough; /* to add to randomness */ case IRQ_HANDLED: *flags |= action->flags; break; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 52ac5391dcc6..5df903fccb60 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -271,7 +271,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: cpumask_copy(desc->irq_common_data.affinity, mask); - /* fall through */ + fallthrough; case IRQ_SET_MASK_OK_NOCOPY: irq_validate_effective_affinity(data); irq_set_thread_affinity(desc); @@ -868,7 +868,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) case IRQ_SET_MASK_OK_DONE: irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); irqd_set(&desc->irq_data, flags); - /* fall through */ + fallthrough; case IRQ_SET_MASK_OK_NOCOPY: flags = irqd_get_trigger_type(&desc->irq_data); diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 95cb74f73292..4fb15fa96734 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -684,12 +684,12 @@ bool kallsyms_show_value(const struct cred *cred) case 0: if (kallsyms_for_perf()) return true; - /* fallthrough */ + fallthrough; case 1: if (security_capable(cred, &init_user_ns, CAP_SYSLOG, CAP_OPT_NOAUDIT) == 0) return true; - /* fallthrough */ + fallthrough; default: return false; } diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index f33769f97aca..e7aa57fb2fdc 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -659,7 +659,7 @@ static void power_down(void) break; case HIBERNATION_PLATFORM: hibernation_platform_enter(); - /* Fall through */ + fallthrough; case HIBERNATION_SHUTDOWN: if (pm_power_off) kernel_power_off(); diff --git a/kernel/power/qos.c b/kernel/power/qos.c index db0bed2cae26..ec7e1e85923e 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -119,7 +119,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, * and add, then see if the aggregate has changed. */ plist_del(node, &c->list); - /* fall through */ + fallthrough; case PM_QOS_ADD_REQ: plist_node_init(node, new_value); plist_add(node, &c->list); @@ -188,7 +188,7 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf, break; case PM_QOS_UPDATE_REQ: pm_qos_flags_remove_req(pqf, req); - /* fall through */ + fallthrough; case PM_QOS_ADD_REQ: req->flags = val; INIT_LIST_HEAD(&req->node); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8471a0f7eb32..2d95dc3f4644 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2320,7 +2320,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) state = possible; break; } - /* Fall-through */ + fallthrough; case possible: do_set_cpus_allowed(p, cpu_possible_mask); state = fail; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 007b0a6b0152..1bd7e3af904f 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1219,13 +1219,13 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, case sa_rootdomain: if (!atomic_read(&d->rd->refcount)) free_rootdomain(&d->rd->rcu); - /* Fall through */ + fallthrough; case sa_sd: free_percpu(d->sd); - /* Fall through */ + fallthrough; case sa_sd_storage: __sdt_free(cpu_map); - /* Fall through */ + fallthrough; case sa_none: break; } diff --git a/kernel/signal.c b/kernel/signal.c index 42b67d2cea37..a38b3edc6851 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -851,7 +851,7 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info, */ if (!sid || sid == task_session(current)) break; - /* fall through */ + fallthrough; default: return -EPERM; } diff --git a/kernel/sys.c b/kernel/sys.c index ca11af9d815d..ab6c409b1159 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1753,7 +1753,7 @@ void getrusage(struct task_struct *p, int who, struct rusage *r) if (who == RUSAGE_CHILDREN) break; - /* fall through */ + fallthrough; case RUSAGE_SELF: thread_group_cputime_adjusted(p, &tgutime, &tgstime); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index c4038511d5c9..95b6a708b040 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -377,7 +377,7 @@ static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) switch (state) { case ODEBUG_STATE_ACTIVE: WARN_ON(1); - /* fall through */ + fallthrough; default: return false; } diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 07709ac30439..bf540f5a4115 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -439,12 +439,12 @@ static struct pid *good_sigevent(sigevent_t * event) rtn = pid_task(pid, PIDTYPE_PID); if (!rtn || !same_thread_group(rtn, current)) return NULL; - /* FALLTHRU */ + fallthrough; case SIGEV_SIGNAL: case SIGEV_THREAD: if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) return NULL; - /* FALLTHRU */ + fallthrough; case SIGEV_NONE: return pid; default: diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index e51778c312f1..36d7464c8962 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -381,7 +381,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode) switch (mode) { case TICK_BROADCAST_FORCE: tick_broadcast_forced = 1; - /* fall through */ + fallthrough; case TICK_BROADCAST_ON: cpumask_set_cpu(cpu, tick_broadcast_on); if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { diff --git a/kernel/time/timer.c b/kernel/time/timer.c index a16764b0116e..a50364df1054 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -666,7 +666,7 @@ static bool timer_fixup_activate(void *addr, enum debug_obj_state state) case ODEBUG_STATE_ACTIVE: WARN_ON(1); - /* fall through */ + fallthrough; default: return false; } diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7ba62d68885a..4b3a42fc3b24 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -745,7 +745,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) #endif case BLKTRACESTART: start = 1; - /* fall through */ + fallthrough; case BLKTRACESTOP: ret = __blk_trace_startstop(q, start); break; diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index bf44f6bbd0c3..78a678eeb140 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -499,7 +499,7 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, ptr++; break; } - /* fall through */ + fallthrough; default: parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); @@ -1273,7 +1273,7 @@ static int parse_pred(const char *str, void *data, switch (op) { case OP_NE: pred->not = 1; - /* Fall through */ + fallthrough; case OP_GLOB: case OP_EQ: break; diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index 58f72b25f8e9..13da529e2e72 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -381,7 +381,7 @@ next_op: case ASN1_OP_END_SET_ACT: if (unlikely(!(flags & FLAG_MATCHED))) goto tag_mismatch; - /* fall through */ + fallthrough; case ASN1_OP_END_SEQ: case ASN1_OP_END_SET_OF: @@ -448,7 +448,7 @@ next_op: pc += asn1_op_lengths[op]; goto next_op; } - /* fall through */ + fallthrough; case ASN1_OP_ACT: ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len); diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 6f4bcf524554..04c98799c3ba 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -1113,7 +1113,7 @@ struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, index_key)) goto found_leaf; } - /* fall through */ + fallthrough; case assoc_array_walk_tree_empty: case assoc_array_walk_found_wrong_shortcut: default: diff --git a/lib/bootconfig.c b/lib/bootconfig.c index a5f701161f6b..1b5de2a45b27 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -817,7 +817,7 @@ int __init xbc_init(char *buf, const char **emsg, int *epos) q - 2); break; } - /* Fall through */ + fallthrough; case '=': ret = xbc_parse_kv(&p, q, c); break; @@ -826,7 +826,7 @@ int __init xbc_init(char *buf, const char **emsg, int *epos) break; case '#': q = skip_comment(q); - /* fall through */ + fallthrough; case ';': case '\n': ret = xbc_parse_key(&p, q); diff --git a/lib/cmdline.c b/lib/cmdline.c index fbb9981a04a4..55768b4f3f58 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -132,23 +132,23 @@ unsigned long long memparse(const char *ptr, char **retptr) case 'E': case 'e': ret <<= 10; - /* fall through */ + fallthrough; case 'P': case 'p': ret <<= 10; - /* fall through */ + fallthrough; case 'T': case 't': ret <<= 10; - /* fall through */ + fallthrough; case 'G': case 'g': ret <<= 10; - /* fall through */ + fallthrough; case 'M': case 'm': ret <<= 10; - /* fall through */ + fallthrough; case 'K': case 'k': ret <<= 10; diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c index a4db51c21266..06811d866775 100644 --- a/lib/dim/net_dim.c +++ b/lib/dim/net_dim.c @@ -233,7 +233,7 @@ void net_dim(struct dim *dim, struct dim_sample end_sample) schedule_work(&dim->work); break; } - /* fall through */ + fallthrough; case DIM_START_MEASURE: dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr, &dim->start_sample); diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c index f7e26c7b4749..15462d54758d 100644 --- a/lib/dim/rdma_dim.c +++ b/lib/dim/rdma_dim.c @@ -59,7 +59,7 @@ static bool rdma_dim_decision(struct dim_stats *curr_stats, struct dim *dim) break; case DIM_STATS_WORSE: dim_turn(dim); - /* fall through */ + fallthrough; case DIM_STATS_BETTER: step_res = rdma_dim_step(dim); if (step_res == DIM_ON_EDGE) @@ -94,7 +94,7 @@ void rdma_dim(struct dim *dim, u64 completions) schedule_work(&dim->work); break; } - /* fall through */ + fallthrough; case DIM_START_MEASURE: dim->state = DIM_MEASURE_IN_PROGRESS; dim_update_sample_with_comps(curr_sample->event_ctr, 0, 0, diff --git a/lib/glob.c b/lib/glob.c index 0ba3ea86b546..85ecbda45cd8 100644 --- a/lib/glob.c +++ b/lib/glob.c @@ -102,7 +102,7 @@ bool __pure glob_match(char const *pat, char const *str) break; case '\\': d = *pat++; - /*FALLTHROUGH*/ + fallthrough; default: /* Literal character */ literal: if (c == d) { diff --git a/lib/siphash.c b/lib/siphash.c index c47bb6ff2149..a90112ee72a1 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -68,11 +68,11 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) bytemask_from_count(left))); #else switch (left) { - case 7: b |= ((u64)end[6]) << 48; /* fall through */ - case 6: b |= ((u64)end[5]) << 40; /* fall through */ - case 5: b |= ((u64)end[4]) << 32; /* fall through */ + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= le32_to_cpup(data); break; - case 3: b |= ((u64)end[2]) << 16; /* fall through */ + case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } @@ -101,11 +101,11 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) bytemask_from_count(left))); #else switch (left) { - case 7: b |= ((u64)end[6]) << 48; /* fall through */ - case 6: b |= ((u64)end[5]) << 40; /* fall through */ - case 5: b |= ((u64)end[4]) << 32; /* fall through */ + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= get_unaligned_le32(end); break; - case 3: b |= ((u64)end[2]) << 16; /* fall through */ + case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } @@ -268,11 +268,11 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) bytemask_from_count(left))); #else switch (left) { - case 7: b |= ((u64)end[6]) << 48; /* fall through */ - case 6: b |= ((u64)end[5]) << 40; /* fall through */ - case 5: b |= ((u64)end[4]) << 32; /* fall through */ + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= le32_to_cpup(data); break; - case 3: b |= ((u64)end[2]) << 16; /* fall through */ + case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } @@ -301,11 +301,11 @@ u32 __hsiphash_unaligned(const void *data, size_t len, bytemask_from_count(left))); #else switch (left) { - case 7: b |= ((u64)end[6]) << 48; /* fall through */ - case 6: b |= ((u64)end[5]) << 40; /* fall through */ - case 5: b |= ((u64)end[4]) << 32; /* fall through */ + case 7: b |= ((u64)end[6]) << 48; fallthrough; + case 6: b |= ((u64)end[5]) << 40; fallthrough; + case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= get_unaligned_le32(end); break; - case 3: b |= ((u64)end[2]) << 16; /* fall through */ + case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } @@ -431,7 +431,7 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) v0 ^= m; } switch (left) { - case 3: b |= ((u32)end[2]) << 16; /* fall through */ + case 3: b |= ((u32)end[2]) << 16; fallthrough; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } @@ -454,7 +454,7 @@ u32 __hsiphash_unaligned(const void *data, size_t len, v0 ^= m; } switch (left) { - case 3: b |= ((u32)end[2]) << 16; /* fall through */ + case 3: b |= ((u32)end[2]) << 16; fallthrough; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index ab749ec10ab5..64fd9015ad80 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c @@ -193,7 +193,7 @@ startover: TOKEN_MISMATCH(); block_idx++; - /* fall through */ + fallthrough; case TS_FSM_ANY: if (next == NULL) diff --git a/lib/vsprintf.c b/lib/vsprintf.c index c155769559ab..19ebe1b257ec 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1265,7 +1265,7 @@ char *mac_address_string(char *buf, char *end, u8 *addr, case 'R': reversed = true; - /* fall through */ + fallthrough; default: separator = ':'; @@ -1681,7 +1681,8 @@ char *uuid_string(char *buf, char *end, const u8 *addr, switch (*(++fmt)) { case 'L': - uc = true; /* fall-through */ + uc = true; + fallthrough; case 'l': index = guid_index; break; @@ -2218,7 +2219,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'S': case 's': ptr = dereference_symbol_descriptor(ptr); - /* Fallthrough */ + fallthrough; case 'B': return symbol_string(buf, end, ptr, spec, fmt); case 'R': @@ -2449,7 +2450,7 @@ qualifier: case 'x': spec->flags |= SMALL; - /* fall through */ + fallthrough; case 'X': spec->base = 16; @@ -2467,7 +2468,7 @@ qualifier: * utility, treat it as any other invalid or * unsupported format specifier. */ - /* Fall-through */ + fallthrough; default: WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt); @@ -3410,10 +3411,10 @@ int vsscanf(const char *buf, const char *fmt, va_list args) break; case 'i': base = 0; - /* fall through */ + fallthrough; case 'd': is_sign = true; - /* fall through */ + fallthrough; case 'u': break; case '%': diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c index 9f336bc07ed6..ca2603abee08 100644 --- a/lib/xz/xz_dec_lzma2.c +++ b/lib/xz/xz_dec_lzma2.c @@ -1043,7 +1043,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, s->lzma2.sequence = SEQ_LZMA_PREPARE; - /* Fall through */ + fallthrough; case SEQ_LZMA_PREPARE: if (s->lzma2.compressed < RC_INIT_BYTES) @@ -1055,7 +1055,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, s->lzma2.compressed -= RC_INIT_BYTES; s->lzma2.sequence = SEQ_LZMA_RUN; - /* Fall through */ + fallthrough; case SEQ_LZMA_RUN: /* diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c index bd1d182419d7..fea86deaaa01 100644 --- a/lib/xz/xz_dec_stream.c +++ b/lib/xz/xz_dec_stream.c @@ -583,7 +583,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) if (ret != XZ_OK) return ret; - /* Fall through */ + fallthrough; case SEQ_BLOCK_START: /* We need one byte of input to continue. */ @@ -608,7 +608,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->temp.pos = 0; s->sequence = SEQ_BLOCK_HEADER; - /* Fall through */ + fallthrough; case SEQ_BLOCK_HEADER: if (!fill_temp(s, b)) @@ -620,7 +620,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_BLOCK_UNCOMPRESS; - /* Fall through */ + fallthrough; case SEQ_BLOCK_UNCOMPRESS: ret = dec_block(s, b); @@ -629,7 +629,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_BLOCK_PADDING; - /* Fall through */ + fallthrough; case SEQ_BLOCK_PADDING: /* @@ -651,7 +651,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_BLOCK_CHECK; - /* Fall through */ + fallthrough; case SEQ_BLOCK_CHECK: if (s->check_type == XZ_CHECK_CRC32) { @@ -675,7 +675,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_INDEX_PADDING; - /* Fall through */ + fallthrough; case SEQ_INDEX_PADDING: while ((s->index.size + (b->in_pos - s->in_start)) @@ -699,7 +699,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_INDEX_CRC32; - /* Fall through */ + fallthrough; case SEQ_INDEX_CRC32: ret = crc32_validate(s, b); @@ -709,7 +709,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->temp.size = STREAM_HEADER_SIZE; s->sequence = SEQ_STREAM_FOOTER; - /* Fall through */ + fallthrough; case SEQ_STREAM_FOOTER: if (!fill_temp(s, b)) diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h index 3a49784d5c61..7c65c66e41fd 100644 --- a/lib/zstd/bitstream.h +++ b/lib/zstd/bitstream.h @@ -259,15 +259,15 @@ ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, s bitD->bitContainer = *(const BYTE *)(bitD->start); switch (srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16); - /* fall through */ + fallthrough; case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24); - /* fall through */ + fallthrough; case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32); - /* fall through */ + fallthrough; case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24; - /* fall through */ + fallthrough; case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16; - /* fall through */ + fallthrough; case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8; default:; } diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c index 5e0b67003e55..b080264ed3ad 100644 --- a/lib/zstd/compress.c +++ b/lib/zstd/compress.c @@ -3182,7 +3182,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t * zcs->outBuffFlushedSize = 0; zcs->stage = zcss_flush; /* pass-through to flush stage */ } - /* fall through */ + fallthrough; case zcss_flush: { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c index 269ee9a796c1..66cd487a326a 100644 --- a/lib/zstd/decompress.c +++ b/lib/zstd/decompress.c @@ -442,7 +442,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize case set_repeat: if (dctx->litEntropy == 0) return ERROR(dictionary_corrupted); - /* fall-through */ + fallthrough; case set_compressed: if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ @@ -1768,7 +1768,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, c return 0; } dctx->expected = 0; /* not necessary to copy more */ - /* fall through */ + fallthrough; case ZSTDds_decodeFrameHeader: memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); @@ -2309,7 +2309,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB switch (zds->stage) { case zdss_init: ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ - /* fall-through */ + fallthrough; case zdss_loadHeader: { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); @@ -2376,7 +2376,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB } zds->stage = zdss_read; } - /* fall through */ + fallthrough; case zdss_read: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); @@ -2405,7 +2405,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB zds->stage = zdss_load; /* pass-through */ } - /* fall through */ + fallthrough; case zdss_load: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); @@ -2438,7 +2438,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB /* pass-through */ } } - /* fall through */ + fallthrough; case zdss_flush: { size_t const toFlushSize = zds->outEnd - zds->outStart; diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c index e727812d12aa..08b4ae80aed4 100644 --- a/lib/zstd/huf_compress.c +++ b/lib/zstd/huf_compress.c @@ -556,9 +556,9 @@ size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, si n = srcSize & ~3; /* join to mod 4 */ switch (srcSize & 3) { case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC); - /* fall through */ + fallthrough; case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC); - /* fall through */ + fallthrough; case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC); case 0: default:; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 3dd7c972677b..ec8408d1638f 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -367,7 +367,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: if (!net_eq(dev_net(dev), &init_net)) break; - /* fall through */ + fallthrough; case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 3debad93be1a..bc8807d9281f 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -520,7 +520,7 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's CLOSING state */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/net/atm/common.c b/net/atm/common.c index 84367b844b14..1cfa9bf1d187 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -297,7 +297,7 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) break; default: pr_warn("AAL problems ... (%d)\n", aal); - /* fall through */ + fallthrough; case ATM_AAL5: max_sdu = ATM_MAX_AAL5_PDU; } @@ -417,7 +417,7 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi, case ATM_NO_AAL: /* ATM_AAL5 is also used in the "0 for default" case */ vcc->qos.aal = ATM_AAL5; - /* fall through */ + fallthrough; case ATM_AAL5: error = atm_init_aal5(vcc); vcc->stats = &dev->stats.aal5; diff --git a/net/atm/lec.c b/net/atm/lec.c index 875fc0bc1780..b570ef919c28 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -380,7 +380,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) if (mesg->content.normal.no_source_le_narp) break; - /* FALL THROUGH */ + fallthrough; case l_arp_update: lec_arp_update(priv, mesg->content.normal.mac_addr, mesg->content.normal.atm_addr, diff --git a/net/atm/resources.c b/net/atm/resources.c index 94bdc6527ee8..53236986dfe0 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c @@ -266,7 +266,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len, goto done; } } - /* fall through */ + fallthrough; case ATM_SETESIF: { unsigned char esi[ESI_LEN]; @@ -288,7 +288,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len, error = -EPERM; goto done; } - /* fall through */ + fallthrough; case ATM_GETSTAT: size = sizeof(struct atm_dev_stats); error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ); @@ -361,7 +361,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len, error = -EINVAL; goto done; } - /* fall through */ + fallthrough; case ATM_SETCIRANGE: case SONET_GETSTATZ: case SONET_SETDIAG: @@ -371,7 +371,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len, error = -EPERM; goto done; } - /* fall through */ + fallthrough; default: if (IS_ENABLED(CONFIG_COMPAT) && compat) { #ifdef CONFIG_COMPAT diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 99eb8c6c0fbc..a66f211726e7 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -425,7 +425,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: is_l2 = true; - /* fall through */ + fallthrough; case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_XMIT: diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index b93876c57fc4..1be4c898b2fa 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -1086,7 +1086,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk, break; case -ERESTARTSYS: ret = -EINTR; - /* fall through */ + fallthrough; case -EAGAIN: /* OK */ if (todo_size != size) ret = size - todo_size; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index a8dd956b5e8e..0cec4152f979 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -860,7 +860,7 @@ static int j1939_xtp_txnext_transmiter(struct j1939_session *session) return ret; } - /* fall through */ + fallthrough; case J1939_TP_CMD_CTS: case 0xff: /* did some data */ case J1939_ETP_CMD_DPO: @@ -1764,12 +1764,12 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, case J1939_ETP_CMD_DPO: if (skcb->addr.type == J1939_ETP) break; - /* fall through */ - case J1939_TP_CMD_BAM: /* fall through */ + fallthrough; + case J1939_TP_CMD_BAM: case J1939_TP_CMD_CTS: /* fall through */ if (skcb->addr.type != J1939_ETP) break; - /* fall through */ + fallthrough; default: netdev_info(priv->ndev, "%s: 0x%p: last %02x\n", __func__, session, session->last_cmd); @@ -1965,8 +1965,8 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) switch (cmd) { case J1939_ETP_CMD_RTS: extd = J1939_ETP; - /* fall through */ - case J1939_TP_CMD_BAM: /* fall through */ + fallthrough; + case J1939_TP_CMD_BAM: case J1939_TP_CMD_RTS: /* fall through */ if (skcb->addr.type != extd) return; @@ -1987,7 +1987,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) case J1939_ETP_CMD_CTS: extd = J1939_ETP; - /* fall through */ + fallthrough; case J1939_TP_CMD_CTS: if (skcb->addr.type != extd) return; @@ -2014,7 +2014,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) case J1939_ETP_CMD_EOMA: extd = J1939_ETP; - /* fall through */ + fallthrough; case J1939_TP_CMD_EOMA: if (skcb->addr.type != extd) return; @@ -2050,14 +2050,14 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb) switch (skcb->addr.pgn) { case J1939_ETP_PGN_DAT: skcb->addr.type = J1939_ETP; - /* fall through */ + fallthrough; case J1939_TP_PGN_DAT: j1939_xtp_rx_dat(priv, skb); break; case J1939_ETP_PGN_CTL: skcb->addr.type = J1939_ETP; - /* fall through */ + fallthrough; case J1939_TP_PGN_CTL: if (skb->len < 8) return 0; /* Don't care. Nothing to extract here */ diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 81e1e006c540..16a47c0eef37 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -50,35 +50,35 @@ unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length) switch (len) { case 11: c = c + ((__u32)k[10] << 24); - /* fall through */ + fallthrough; case 10: c = c + ((__u32)k[9] << 16); - /* fall through */ + fallthrough; case 9: c = c + ((__u32)k[8] << 8); /* the first byte of c is reserved for the length */ - /* fall through */ + fallthrough; case 8: b = b + ((__u32)k[7] << 24); - /* fall through */ + fallthrough; case 7: b = b + ((__u32)k[6] << 16); - /* fall through */ + fallthrough; case 6: b = b + ((__u32)k[5] << 8); - /* fall through */ + fallthrough; case 5: b = b + k[4]; - /* fall through */ + fallthrough; case 4: a = a + ((__u32)k[3] << 24); - /* fall through */ + fallthrough; case 3: a = a + ((__u32)k[2] << 16); - /* fall through */ + fallthrough; case 2: a = a + ((__u32)k[1] << 8); - /* fall through */ + fallthrough; case 1: a = a + k[0]; /* case 0: nothing left to add */ diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 07e5614eb3f1..7057f8db4f99 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -987,7 +987,7 @@ int crush_do_rule(const struct crush_map *map, case CRUSH_RULE_CHOOSELEAF_FIRSTN: case CRUSH_RULE_CHOOSE_FIRSTN: firstn = 1; - /* fall through */ + fallthrough; case CRUSH_RULE_CHOOSELEAF_INDEP: case CRUSH_RULE_CHOOSE_INDEP: if (wsize == 0) diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 27d6ab11f9ee..bdfd66ba3843 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -412,7 +412,7 @@ static void ceph_sock_state_change(struct sock *sk) switch (sk->sk_state) { case TCP_CLOSE: dout("%s TCP_CLOSE\n", __func__); - /* fall through */ + fallthrough; case TCP_CLOSE_WAIT: dout("%s TCP_CLOSE_WAIT\n", __func__); con_sock_state_closing(con); @@ -2751,7 +2751,7 @@ more: switch (ret) { case -EBADMSG: con->error_msg = "bad crc/signature"; - /* fall through */ + fallthrough; case -EBADE: ret = -EIO; break; diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 3d8c8015e976..d633a0aeaa55 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -1307,7 +1307,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, * request had a non-zero tid. Work around this weirdness * by allocating a new message. */ - /* fall through */ + fallthrough; case CEPH_MSG_MON_MAP: case CEPH_MSG_MDS_MAP: case CEPH_MSG_OSD_MAP: diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index e4fbcad6e7d8..7901ab6c79fd 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -3854,7 +3854,7 @@ static void scan_requests(struct ceph_osd *osd, if (!force_resend && !force_resend_writes) break; - /* fall through */ + fallthrough; case CALC_TARGET_NEED_RESEND: cancel_linger_map_check(lreq); /* @@ -3891,7 +3891,7 @@ static void scan_requests(struct ceph_osd *osd, !force_resend_writes)) break; - /* fall through */ + fallthrough; case CALC_TARGET_NEED_RESEND: cancel_map_check(req); unlink_request(osd, req); diff --git a/net/core/dev.c b/net/core/dev.c index d42c9ea0c3c0..b9c6f31ae96e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4690,10 +4690,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(skb->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: do_drop: kfree_skb(skb); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index b2cf9b7bb7b8..205e92e604ef 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -322,7 +322,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) err = net_hwtstamp_validate(ifr); if (err) return err; - /* fall through */ + fallthrough; /* * Unknown or private ioctl @@ -478,7 +478,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c case SIOCSIFTXQLEN: if (!capable(CAP_NET_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; /* * These ioctl calls: * - require local superuser power. @@ -503,7 +503,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c case SIOCSHWTSTAMP: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: dev_load(net, ifr->ifr_name); diff --git a/net/core/devlink.c b/net/core/devlink.c index e5feb87beca7..80ec1cd81c64 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -6196,8 +6196,8 @@ devlink_trap_action_get_from_info(struct genl_info *info, val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]); switch (val) { - case DEVLINK_TRAP_ACTION_DROP: /* fall-through */ - case DEVLINK_TRAP_ACTION_TRAP: /* fall-through */ + case DEVLINK_TRAP_ACTION_DROP: + case DEVLINK_TRAP_ACTION_TRAP: case DEVLINK_TRAP_ACTION_MIRROR: *p_trap_action = val; break; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index b09bebeadf0b..9704522b0872 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -1189,7 +1189,7 @@ static int net_dm_alert_mode_get_from_info(struct genl_info *info, val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]); switch (val) { - case NET_DM_ALERT_MODE_SUMMARY: /* fall-through */ + case NET_DM_ALERT_MODE_SUMMARY: case NET_DM_ALERT_MODE_PACKET: *p_alert_mode = val; break; diff --git a/net/core/filter.c b/net/core/filter.c index b2df52086445..1f647ab986b6 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9223,7 +9223,7 @@ sk_reuseport_is_valid_access(int off, int size, case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): if (size < sizeof_field(struct sk_buff, protocol)) return false; - /* fall through */ + fallthrough; case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): case bpf_ctx_range(struct sk_reuseport_md, bind_inany): case bpf_ctx_range(struct sk_reuseport_md, len): diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b53b6d38c4df..95f4c6b8f51a 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3430,7 +3430,7 @@ xmit_more: net_info_ratelimited("%s xmit error: %d\n", pkt_dev->odevname, ret); pkt_dev->errors++; - /* fall through */ + fallthrough; case NETDEV_TX_BUSY: /* Retry it next time */ refcount_dec(&(pkt_dev->skb->users)); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 6a32a1fd34f8..649583158983 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -772,7 +772,6 @@ static void sk_psock_verdict_apply(struct sk_psock *psock, sk_psock_skb_redirect(skb); break; case __SK_DROP: - /* fall-through */ default: out_free: kfree_skb(skb); diff --git a/net/core/sock.c b/net/core/sock.c index e4f40b175acb..f8e5ccc45272 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1008,7 +1008,7 @@ set_sndbuf: break; case SO_TIMESTAMPING_NEW: sock_set_flag(sk, SOCK_TSTAMP_NEW); - /* fall through */ + fallthrough; case SO_TIMESTAMPING_OLD: if (val & ~SOF_TIMESTAMPING_MASK) { ret = -EINVAL; diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index aef72f6a2829..b9ee1a4a8955 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -608,7 +608,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, */ if (hc->rx_x_recv > 0) break; - /* fall through */ + fallthrough; case CCID3_FBACK_PERIODIC: delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); if (delta <= 0) diff --git a/net/dccp/feat.c b/net/dccp/feat.c index afc071ea1271..788dd629c420 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -1407,7 +1407,8 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, * Negotiation during connection setup */ case DCCP_LISTEN: - server = true; /* fall through */ + server = true; + fallthrough; case DCCP_REQUESTING: switch (opt) { case DCCPO_CHANGE_L: diff --git a/net/dccp/input.c b/net/dccp/input.c index bd9cfdb67436..2cbb757a894f 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -64,7 +64,7 @@ static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) */ if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) break; - /* fall through */ + fallthrough; case DCCP_REQUESTING: case DCCP_ACTIVE_CLOSEREQ: dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); @@ -76,7 +76,7 @@ static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) queued = 1; dccp_fin(sk, skb); dccp_set_state(sk, DCCP_PASSIVE_CLOSE); - /* fall through */ + fallthrough; case DCCP_PASSIVE_CLOSE: /* * Retransmitted Close: we have already enqueued the first one. @@ -113,7 +113,7 @@ static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) queued = 1; dccp_fin(sk, skb); dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ); - /* fall through */ + fallthrough; case DCCP_PASSIVE_CLOSEREQ: sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); } @@ -530,7 +530,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, case DCCP_PKT_DATA: if (sk->sk_state == DCCP_RESPOND) break; - /* fall through */ + fallthrough; case DCCP_PKT_DATAACK: case DCCP_PKT_ACK: /* @@ -684,7 +684,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* Step 8: if using Ack Vectors, mark packet acknowledgeable */ dccp_handle_ackvec_processing(sk, skb); dccp_deliver_input_to_ccids(sk, skb); - /* fall through */ + fallthrough; case DCCP_RESPOND: queued = dccp_rcv_respond_partopen_state_process(sk, skb, dh, len); diff --git a/net/dccp/options.c b/net/dccp/options.c index 51aaba7a5d45..d24cad05001e 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -225,7 +225,7 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, * interested. The RX CCID need not parse Ack Vectors, * since it is only interested in clearing old state. */ - /* fall through */ + fallthrough; case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC: if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk, pkt_type, opt, value, len)) diff --git a/net/dccp/output.c b/net/dccp/output.c index 6433187a5cc4..50e6d5699bb2 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -62,7 +62,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; - /* fall through */ + fallthrough; case DCCP_PKT_DATAACK: case DCCP_PKT_RESET: break; @@ -72,12 +72,12 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) /* Use ISS on the first (non-retransmitted) Request. */ if (icsk->icsk_retransmits == 0) dcb->dccpd_seq = dp->dccps_iss; - /* fall through */ + fallthrough; case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_ack_seq; - /* fall through */ + fallthrough; default: /* * Set owner/destructor: some skbs are allocated via @@ -481,7 +481,7 @@ struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb) case DCCP_RESET_CODE_PACKET_ERROR: dhr->dccph_reset_data[0] = rxdh->dccph_type; break; - case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */ + case DCCP_RESET_CODE_OPTION_ERROR: case DCCP_RESET_CODE_MANDATORY_ERROR: memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3); break; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index d148ab1530e5..6d705d90c614 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -101,7 +101,7 @@ void dccp_set_state(struct sock *sk, const int state) if (inet_csk(sk)->icsk_bind_hash != NULL && !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) inet_put_port(sk); - /* fall through */ + fallthrough; default: if (oldstate == DCCP_OPEN) DCCP_DEC_STATS(DCCP_MIB_CURRESTAB); @@ -834,7 +834,7 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, case DCCP_PKT_CLOSEREQ: if (!(flags & MSG_PEEK)) dccp_finish_passive_close(sk); - /* fall through */ + fallthrough; case DCCP_PKT_RESET: dccp_pr_debug("found fin (%s) ok!\n", dccp_packet_name(dh->dccph_type)); @@ -960,7 +960,7 @@ static void dccp_terminate_connection(struct sock *sk) case DCCP_PARTOPEN: dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); - /* fall through */ + fallthrough; case DCCP_OPEN: dccp_send_close(sk, 1); @@ -969,7 +969,7 @@ static void dccp_terminate_connection(struct sock *sk) next_state = DCCP_ACTIVE_CLOSEREQ; else next_state = DCCP_CLOSING; - /* fall through */ + fallthrough; default: dccp_set_state(sk, next_state); } diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 3b53d766789d..5dbd45dc35ad 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -623,12 +623,12 @@ static void dn_destroy_sock(struct sock *sk) goto disc_reject; case DN_RUN: scp->state = DN_DI; - /* fall through */ + fallthrough; case DN_DI: case DN_DR: disc_reject: dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation); - /* fall through */ + fallthrough; case DN_NC: case DN_NR: case DN_RJ: @@ -642,7 +642,7 @@ disc_reject: break; default: printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n"); - /* fall through */ + fallthrough; case DN_O: dn_stop_slow_timer(sk); diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index c68503a18025..c97bdca5ec30 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -483,7 +483,7 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb) break; case DN_RUN: sk->sk_shutdown |= SHUTDOWN_MASK; - /* fall through */ + fallthrough; case DN_CC: scp->state = DN_CN; } diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index 33fefb0aebca..4086f9c746af 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -156,7 +156,7 @@ static void dn_rehash_zone(struct dn_zone *dz) default: printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n", old_divisor); - /* fall through */ + fallthrough; case 256: new_divisor = 1024; new_hashmask = 0x3FF; diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index deae519bdeec..67b5ab2657b7 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c @@ -75,7 +75,7 @@ static void strip_it(char *str) case '\r': case ':': *str = 0; - /* Fallthrough */ + fallthrough; case 0: return; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 41d60eeefdbd..9af1a2d0cec4 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -2009,7 +2009,7 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused, switchdev_work->event = event; switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_DEL_TO_DEVICE: if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr)) goto err_fdb_work_init; diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index bbe9b3b2d395..be6f06adefe0 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -195,7 +195,7 @@ static int lowpan_frag_rx_handlers_result(struct sk_buff *skb, net_warn_ratelimited("%s: received unknown dispatch\n", __func__); - /* fall-through */ + fallthrough; default: /* all others failure */ return NET_RX_DROP; diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c index b34d050c9687..517e6493f5d1 100644 --- a/net/ieee802154/6lowpan/rx.c +++ b/net/ieee802154/6lowpan/rx.c @@ -35,11 +35,11 @@ static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res) net_warn_ratelimited("%s: received unknown dispatch\n", __func__); - /* fall-through */ + fallthrough; case RX_DROP_UNUSABLE: kfree_skb(skb); - /* fall-through */ + fallthrough; case RX_DROP: return NET_RX_DROP; case RX_QUEUED: diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 6ee9851ac7c6..a95af62acb52 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -418,7 +418,7 @@ static void iucv_sock_close(struct sock *sk) sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } - /* fall through */ + fallthrough; case IUCV_DISCONN: sk->sk_state = IUCV_CLOSING; @@ -433,7 +433,7 @@ static void iucv_sock_close(struct sock *sk) iucv_sock_in_state(sk, IUCV_CLOSED, 0), timeo); } - /* fall through */ + fallthrough; case IUCV_CLOSING: sk->sk_state = IUCV_CLOSED; @@ -444,7 +444,7 @@ static void iucv_sock_close(struct sock *sk) skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); - /* fall through */ + fallthrough; default: iucv_sever_path(sk, 1); @@ -2111,10 +2111,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, kfree_skb(skb); break; } - /* fall through - and receive non-zero length data */ + fallthrough; /* and receive non-zero length data */ case (AF_IUCV_FLAG_SHT): /* shutdown request */ - /* fall through - and receive zero length data */ + fallthrough; /* and receive zero length data */ case 0: /* plain data frame */ IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 6fdd0c9f865a..f2868a8a50c3 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1516,7 +1516,7 @@ static void mpls_ifdown(struct net_device *dev, int event) case NETDEV_DOWN: case NETDEV_UNREGISTER: nh_flags |= RTNH_F_DEAD; - /* fall through */ + fallthrough; case NETDEV_CHANGE: nh_flags |= RTNH_F_LINKDOWN; break; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 1aad411a0e46..49b815023986 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -193,7 +193,6 @@ static void mptcp_check_data_fin_ack(struct sock *sk) sk->sk_state_change(sk); break; case TCP_CLOSING: - fallthrough; case TCP_LAST_ACK: inet_sk_state_store(sk, TCP_CLOSE); sk->sk_state_change(sk); @@ -1541,7 +1540,7 @@ static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) case TCP_LISTEN: if (!(how & RCV_SHUTDOWN)) break; - /* fall through */ + fallthrough; case TCP_SYN_SENT: tcp_disconnect(ssk, O_NONBLOCK); break; diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 1f387be7827b..f1be3e3f6425 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -474,7 +474,7 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) switch (nd->state) { case ncsi_dev_state_suspend: nd->state = ncsi_dev_state_suspend_select; - /* Fall through */ + fallthrough; case ncsi_dev_state_suspend_select: ndp->pending_req_num = 1; @@ -1302,7 +1302,7 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) switch (nd->state) { case ncsi_dev_state_probe: nd->state = ncsi_dev_state_probe_deselect; - /* Fall through */ + fallthrough; case ncsi_dev_state_probe_deselect: ndp->pending_req_num = 8; diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 32b028853a7c..dc2e7da2742a 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -315,7 +315,7 @@ tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) switch (skb->ip_summed) { case CHECKSUM_NONE: skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); - /* fall through */ + fallthrough; case CHECKSUM_COMPLETE: #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index 153d89647c87..68260d91c988 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -318,7 +318,7 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) case CHECKSUM_NONE: skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); - /* fall through */ + fallthrough; case CHECKSUM_COMPLETE: #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { diff --git a/net/netlink/policy.c b/net/netlink/policy.c index 2b3e26f7496f..641ffbdd977a 100644 --- a/net/netlink/policy.c +++ b/net/netlink/policy.c @@ -188,7 +188,7 @@ send_attribute: goto next; case NLA_NESTED: type = NL_ATTR_TYPE_NESTED; - /* fall through */ + fallthrough; case NLA_NESTED_ARRAY: if (pt->type == NLA_NESTED_ARRAY) type = NL_ATTR_TYPE_NESTED_ARRAY; diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c index 2bef3779f893..69e58906c32b 100644 --- a/net/netrom/nr_in.c +++ b/net/netrom/nr_in.c @@ -122,7 +122,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb, case NR_DISCREQ: nr_write_internal(sk, NR_DISCACK); - /* fall through */ + fallthrough; case NR_DISCACK: nr_disconnect(sk, 0); break; diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 0891ee02ca4f..78da5eab252a 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -263,7 +263,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, case 3: re_sort_routes(nr_node, 0, 1); re_sort_routes(nr_node, 1, 2); - /* fall through */ + fallthrough; case 2: re_sort_routes(nr_node, 0, 1); case 1: @@ -356,7 +356,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n switch (i) { case 0: nr_node->routes[0] = nr_node->routes[1]; - /* fall through */ + fallthrough; case 1: nr_node->routes[1] = nr_node->routes[2]; case 2: @@ -479,7 +479,7 @@ static int nr_dec_obs(void) switch (i) { case 0: s->routes[0] = s->routes[1]; - /* Fallthrough */ + fallthrough; case 1: s->routes[1] = s->routes[2]; case 2: @@ -526,7 +526,7 @@ void nr_rt_device_down(struct net_device *dev) switch (i) { case 0: t->routes[0] = t->routes[1]; - /* fall through */ + fallthrough; case 1: t->routes[1] = t->routes[2]; case 2: diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 98d393e70de3..a3f1204f1ed2 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -778,7 +778,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, } } /* Non-ICMP, fall thru to initialize if needed. */ - /* fall through */ + fallthrough; case IP_CT_NEW: /* Seen it before? This can happen for loopback, retrans, * or local packets. @@ -1540,7 +1540,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, switch (type) { case OVS_CT_ATTR_FORCE_COMMIT: info->force = true; - /* fall through. */ + fallthrough; case OVS_CT_ATTR_COMMIT: info->commit = true; break; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 03942c30d83e..b03d142ec82e 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -675,7 +675,7 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) case -EINVAL: memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr)); - /* fall-through */ + fallthrough; case -EPROTO: skb->transport_header = skb->network_header; error = 0; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 479c257ded73..da8254e680f9 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4061,7 +4061,7 @@ static int packet_notifier(struct notifier_block *this, case NETDEV_UNREGISTER: if (po->mclist) packet_dev_mclist_delete(dev, &po->mclist); - /* fallthrough */ + fallthrough; case NETDEV_DOWN: if (dev->ifindex == po->ifindex) { diff --git a/net/phonet/pep.c b/net/phonet/pep.c index e47d09aca4af..a1525916885a 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -368,7 +368,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) err = -EINVAL; goto out; } - /* fall through */ + fallthrough; case PNS_PEP_DISABLE_REQ: atomic_set(&pn->tx_credits, 0); pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); @@ -385,7 +385,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) case PNS_PIPE_ALIGNED_DATA: __skb_pull(skb, 1); - /* fall through */ + fallthrough; case PNS_PIPE_DATA: __skb_pull(skb, 3); /* Pipe data header */ if (!pn_flow_safe(pn->rx_fc)) { @@ -417,11 +417,11 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) err = pipe_rcv_created(sk, skb); if (err) break; - /* fall through */ + fallthrough; case PNS_PIPE_RESET_IND: if (!pn->init_enable) break; - /* fall through */ + fallthrough; case PNS_PIPE_ENABLED_IND: if (!pn_flow_safe(pn->tx_fc)) { atomic_set(&pn->tx_credits, 1); @@ -555,7 +555,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) switch (hdr->message_id) { case PNS_PIPE_ALIGNED_DATA: __skb_pull(skb, 1); - /* fall through */ + fallthrough; case PNS_PIPE_DATA: __skb_pull(skb, 3); /* Pipe data header */ if (!pn_flow_safe(pn->rx_fc)) { diff --git a/net/rds/send.c b/net/rds/send.c index 9a529a01cdc6..985d0b7713ac 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -934,7 +934,7 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs, case RDS_CMSG_ZCOPY_COOKIE: zcopy_cookie = true; - /* fall through */ + fallthrough; case RDS_CMSG_RDMA_DEST: case RDS_CMSG_RDMA_MAP: diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c index 0d4fab2be82b..6af786d66b03 100644 --- a/net/rose/rose_in.c +++ b/net/rose/rose_in.c @@ -216,7 +216,7 @@ static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int framety switch (frametype) { case ROSE_RESET_REQUEST: rose_write_internal(sk, ROSE_RESET_CONFIRMATION); - /* fall through */ + fallthrough; case ROSE_RESET_CONFIRMATION: rose_stop_timer(sk); rose_start_idletimer(sk); diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 5277631fa14c..6e35703ff353 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -343,7 +343,7 @@ static int rose_del_node(struct rose_route_struct *rose_route, case 0: rose_node->neighbour[0] = rose_node->neighbour[1]; - /* fall through */ + fallthrough; case 1: rose_node->neighbour[1] = rose_node->neighbour[2]; @@ -505,7 +505,7 @@ void rose_rt_device_down(struct net_device *dev) switch (i) { case 0: t->neighbour[0] = t->neighbour[1]; - /* fall through */ + fallthrough; case 1: t->neighbour[1] = t->neighbour[2]; case 2: diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index e6725a6de015..186c8a889b16 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -246,7 +246,7 @@ static int rxrpc_listen(struct socket *sock, int backlog) ret = 0; break; } - /* Fall through */ + fallthrough; default: ret = -EBUSY; break; @@ -545,7 +545,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) rx->local = local; rx->sk.sk_state = RXRPC_CLIENT_BOUND; - /* Fall through */ + fallthrough; case RXRPC_CLIENT_BOUND: if (!m->msg_name && @@ -553,7 +553,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) m->msg_name = &rx->connect_srx; m->msg_namelen = sizeof(rx->connect_srx); } - /* Fall through */ + fallthrough; case RXRPC_SERVER_BOUND: case RXRPC_SERVER_LISTENING: ret = rxrpc_do_sendmsg(rx, m, len); diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 032ed76c0166..ef160566aa9a 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -622,7 +622,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx) case RXRPC_CALL_SERVER_ACCEPTING: __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); abort = true; - /* fall through */ + fallthrough; case RXRPC_CALL_COMPLETE: ret = call->error; goto out_discard; diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index f2a1a5dbb5a7..159e3eda7914 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -881,7 +881,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; rxrpc_activate_channels_locked(conn); } - /* fall through */ + fallthrough; case RXRPC_CONN_CLIENT_ACTIVE: if (list_empty(&conn->waiting_calls)) { rxrpc_deactivate_one_channel(conn, channel); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 767579328a06..fbde8b824e23 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -1084,7 +1084,7 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx, switch (READ_ONCE(call->state)) { case RXRPC_CALL_SERVER_AWAIT_ACK: rxrpc_call_completed(call); - /* Fall through */ + fallthrough; case RXRPC_CALL_COMPLETE: break; default: @@ -1243,12 +1243,12 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) case RXRPC_PACKET_TYPE_BUSY: if (rxrpc_to_server(sp)) goto discard; - /* Fall through */ + fallthrough; case RXRPC_PACKET_TYPE_ACK: case RXRPC_PACKET_TYPE_ACKALL: if (sp->hdr.callNumber == 0) goto bad_message; - /* Fall through */ + fallthrough; case RXRPC_PACKET_TYPE_ABORT: break; diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index c8b2097f499c..ede058f9cc15 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -162,7 +162,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) /* Fall through and set IPv4 options too otherwise we don't get * errors from IPv4 packets sent through the IPv6 socket. */ - /* Fall through */ + fallthrough; case AF_INET: /* we want to receive ICMP errors */ ip_sock_set_recverr(local->socket->sk); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index a852f46d5234..be032850ae8c 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -273,7 +273,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer, case SO_EE_ORIGIN_ICMP6: if (err == EACCES) err = EHOSTUNREACH; - /* Fall through */ + fallthrough; default: _proto("Rx Received error report { orig=%u }", ee->ee_origin); break; diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index efecc5a8f67d..c4684dde1f16 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -776,7 +776,7 @@ out: case RXRPC_ACK_DELAY: if (ret != -EAGAIN) break; - /* Fall through */ + fallthrough; default: rxrpc_send_ack_packet(call, false, NULL); } diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index f3f6da6e4ad2..0824e103d037 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -241,7 +241,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); if (!last) break; - /* Fall through */ + fallthrough; case RXRPC_CALL_SERVER_SEND_REPLY: call->state = RXRPC_CALL_SERVER_AWAIT_ACK; rxrpc_notify_end_tx(rx, call, notify_end_tx); @@ -721,13 +721,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (p.call.timeouts.normal > 0 && j == 0) j = 1; WRITE_ONCE(call->next_rx_timo, j); - /* Fall through */ + fallthrough; case 2: j = msecs_to_jiffies(p.call.timeouts.idle); if (p.call.timeouts.idle > 0 && j == 0) j = 1; WRITE_ONCE(call->next_req_timo, j); - /* Fall through */ + fallthrough; case 1: if (p.call.timeouts.hard > 0) { j = msecs_to_jiffies(p.call.timeouts.hard); diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 0618b63f87c4..7d37638ee1c7 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1670,7 +1670,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return 0; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index aea2a982984d..8a58f42d6d19 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -875,7 +875,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp) case AF_INET: if (!__ipv6_only_sock(sctp_opt2sk(sp))) return 1; - /* fallthru */ + fallthrough; default: return 0; } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 577e3bc4ee6f..3fd06a27105d 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -912,7 +912,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) case SCTP_CID_ABORT: if (sctp_test_T_bit(chunk)) ctx->packet->vtag = ctx->asoc->c.my_vtag; - /* fallthru */ + fallthrough; /* The following chunks are "response" chunks, i.e. * they are generated in response to something we @@ -927,7 +927,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) case SCTP_CID_ECN_CWR: case SCTP_CID_ASCONF_ACK: one_packet = 1; - /* Fall through */ + fallthrough; case SCTP_CID_SACK: case SCTP_CID_HEARTBEAT: @@ -1030,7 +1030,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, if (!ctx->packet || !ctx->packet->has_cookie_echo) return; - /* fall through */ + fallthrough; case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 47910470e532..c11c24524652 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2077,7 +2077,7 @@ static enum sctp_ierror sctp_process_unk_param( break; case SCTP_PARAM_ACTION_DISCARD_ERR: retval = SCTP_IERROR_ERROR; - /* Fall through */ + fallthrough; case SCTP_PARAM_ACTION_SKIP_ERR: /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 9f36fe911d08..aa821e71f05e 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1516,7 +1516,7 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, if (timer_pending(timer)) break; - /* fall through */ + fallthrough; case SCTP_CMD_TIMER_START: timer = &asoc->timers[cmd->obj.to]; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index e86620fbd90f..c669f8bd1eab 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -4315,7 +4315,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net, sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err_chunk)); } - /* Fall Through */ + fallthrough; case SCTP_IERROR_AUTH_BAD_KEYID: case SCTP_IERROR_BAD_SIG: return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 290270c821ca..3b5c374c6d2c 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -372,7 +372,7 @@ static void smc_close_passive_work(struct work_struct *work) case SMC_PEERCLOSEWAIT1: if (rxflags->peer_done_writing) sk->sk_state = SMC_PEERCLOSEWAIT2; - /* fall through */ + fallthrough; /* to check for closing */ case SMC_PEERCLOSEWAIT2: if (!smc_cdc_rxed_any_close(conn)) diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 90b8329fef82..8b300b74a722 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -137,7 +137,7 @@ gss_krb5_make_confounder(char *p, u32 conflen) switch (conflen) { case 16: *q++ = i++; - /* fall through */ + fallthrough; case 8: *q++ = i++; break; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index a91d1cdad9d7..62e0b6c1e8cf 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1702,7 +1702,7 @@ call_reserveresult(struct rpc_task *task) switch (status) { case -ENOMEM: rpc_delay(task, HZ >> 2); - /* fall through */ + fallthrough; case -EAGAIN: /* woken up; retry */ task->tk_action = call_retry_reserve; return; @@ -1759,13 +1759,13 @@ call_refreshresult(struct rpc_task *task) /* Use rate-limiting and a max number of retries if refresh * had status 0 but failed to update the cred. */ - /* fall through */ + fallthrough; case -ETIMEDOUT: rpc_delay(task, 3*HZ); - /* fall through */ + fallthrough; case -EAGAIN: status = -EACCES; - /* fall through */ + fallthrough; case -EKEYEXPIRED: if (!task->tk_cred_retry) break; @@ -2132,7 +2132,7 @@ call_connect_status(struct rpc_task *task) rpc_force_rebind(clnt); goto out_retry; } - /* fall through */ + fallthrough; case -ECONNRESET: case -ECONNABORTED: case -ENETDOWN: @@ -2146,7 +2146,7 @@ call_connect_status(struct rpc_task *task) break; /* retry with existing socket, after a delay */ rpc_delay(task, 3*HZ); - /* fall through */ + fallthrough; case -EADDRINUSE: case -ENOTCONN: case -EAGAIN: @@ -2228,7 +2228,7 @@ call_transmit_status(struct rpc_task *task) */ case -ENOBUFS: rpc_delay(task, HZ>>2); - /* fall through */ + fallthrough; case -EBADSLT: case -EAGAIN: task->tk_action = call_transmit; @@ -2247,7 +2247,7 @@ call_transmit_status(struct rpc_task *task) rpc_call_rpcerror(task, task->tk_status); return; } - /* fall through */ + fallthrough; case -ECONNRESET: case -ECONNABORTED: case -EADDRINUSE: @@ -2313,7 +2313,7 @@ call_bc_transmit_status(struct rpc_task *task) break; case -ENOBUFS: rpc_delay(task, HZ>>2); - /* fall through */ + fallthrough; case -EBADSLT: case -EAGAIN: task->tk_status = 0; @@ -2380,7 +2380,7 @@ call_status(struct rpc_task *task) * were a timeout. */ rpc_delay(task, 3*HZ); - /* fall through */ + fallthrough; case -ETIMEDOUT: break; case -ECONNREFUSED: @@ -2391,7 +2391,7 @@ call_status(struct rpc_task *task) break; case -EADDRINUSE: rpc_delay(task, 3*HZ); - /* fall through */ + fallthrough; case -EPIPE: case -EAGAIN: break; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 6ba9d5842629..5a8e47bbfb9f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1623,7 +1623,7 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) case -EAGAIN: xprt_add_backlog(xprt, task); dprintk("RPC: waiting for request slot\n"); - /* fall through */ + fallthrough; default: task->tk_status = -EAGAIN; } diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 75c646743df3..3f86d039875c 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -268,7 +268,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) case RDMA_CM_EVENT_DEVICE_REMOVAL: pr_info("rpcrdma: removing device %s for %pISpc\n", ep->re_id->device->name, sap); - /* fall through */ + fallthrough; case RDMA_CM_EVENT_ADDR_CHANGE: ep->re_connect_status = -ENODEV; goto disconnected; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c57aef829403..554e1bb4c1c7 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -885,7 +885,7 @@ static int xs_local_send_request(struct rpc_rqst *req) default: dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); - /* fall through */ + fallthrough; case -EPIPE: xs_close(xprt); status = -ENOTCONN; @@ -1436,7 +1436,7 @@ static void xs_tcp_state_change(struct sock *sk) xprt->connect_cookie++; clear_bit(XPRT_CONNECTED, &xprt->state); xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); - /* fall through */ + fallthrough; case TCP_CLOSING: /* * If the server closed down the connection, make sure that @@ -2202,7 +2202,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) switch (ret) { case 0: xs_set_srcport(transport, sock); - /* fall through */ + fallthrough; case -EINPROGRESS: /* SYN_SENT! */ if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) @@ -2255,7 +2255,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) default: printk("%s: connect returned unhandled error %d\n", __func__, status); - /* fall through */ + fallthrough; case -EADDRNOTAVAIL: /* We're probably in TIME_WAIT. Get rid of existing socket, * and retry diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 808b147df7d5..650414110452 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -652,7 +652,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, test_and_set_bit_lock(0, &b->up); break; } - /* fall through */ + fallthrough; case NETDEV_GOING_DOWN: clear_bit_unlock(0, &b->up); tipc_reset_bearer(net, b); diff --git a/net/tipc/group.c b/net/tipc/group.c index 89257e2a980d..588c2d2b0c69 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -536,7 +536,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq, update = true; deliver = false; } - /* Fall thru */ + fallthrough; case TIPC_GRP_BCAST_MSG: m->bc_rcv_nxt++; ack = msg_grp_bc_ack_req(hdr); diff --git a/net/tipc/link.c b/net/tipc/link.c index 107578122973..b7362556da95 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1239,7 +1239,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, skb_queue_tail(mc_inputq, skb); return true; } - /* fall through */ + fallthrough; case CONN_MANAGER: skb_queue_tail(inputq, skb); return true; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 07419f36116a..2679e97e0389 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -783,7 +783,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock, case TIPC_ESTABLISHED: if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) revents |= EPOLLOUT; - /* fall through */ + fallthrough; case TIPC_LISTEN: case TIPC_CONNECTING: if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) @@ -2597,7 +2597,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest, * case is EINPROGRESS, rather than EALREADY. */ res = -EINPROGRESS; - /* fall through */ + fallthrough; case TIPC_CONNECTING: if (!timeout) { if (previous == TIPC_CONNECTING) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 181ea6fb56a6..92784e51ee7d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -837,7 +837,7 @@ static int unix_create(struct net *net, struct socket *sock, int protocol, */ case SOCK_RAW: sock->type = SOCK_DGRAM; - /* fall through */ + fallthrough; case SOCK_DGRAM: sock->ops = &unix_dgram_ops; break; diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 90f0f82cd9ca..e97a4f0c32a3 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -957,7 +957,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, if (!ht_cap->ht_supported && chandef->chan->band != NL80211_BAND_6GHZ) return false; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_20_NOHT: prohibited_flags |= IEEE80211_CHAN_NO_20MHZ; width = 20; @@ -983,7 +983,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, if (chandef->chan->band != NL80211_BAND_6GHZ && cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) return false; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_80: prohibited_flags |= IEEE80211_CHAN_NO_80MHZ; width = 80; diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index a6c61a2e6569..db7333e20dd7 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -941,7 +941,7 @@ void cfg80211_cac_event(struct net_device *netdev, sizeof(struct cfg80211_chan_def)); queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk); cfg80211_sched_dfs_chan_update(rdev); - /* fall through */ + fallthrough; case NL80211_RADAR_CAC_ABORTED: wdev->cac_started = false; break; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c04fc6cf6583..fde420af3f00 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2107,7 +2107,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 1: if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, sizeof(u32) * rdev->wiphy.n_cipher_suites, @@ -2154,7 +2154,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 2: if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, rdev->wiphy.interface_modes)) @@ -2162,7 +2162,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 3: nl_bands = nla_nest_start_noflag(msg, NL80211_ATTR_WIPHY_BANDS); @@ -2189,7 +2189,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->chan_start++; if (state->split) break; - /* fall through */ + fallthrough; default: /* add frequencies */ nl_freqs = nla_nest_start_noflag(msg, @@ -2244,7 +2244,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 4: nl_cmds = nla_nest_start_noflag(msg, NL80211_ATTR_SUPPORTED_COMMANDS); @@ -2273,7 +2273,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 5: if (rdev->ops->remain_on_channel && (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && @@ -2291,7 +2291,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 6: #ifdef CONFIG_PM if (nl80211_send_wowlan(msg, rdev, state->split)) @@ -2302,7 +2302,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, #else state->split_start++; #endif - /* fall through */ + fallthrough; case 7: if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, rdev->wiphy.software_iftypes)) @@ -2315,7 +2315,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 8: if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, @@ -5207,7 +5207,7 @@ bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr) break; default: WARN_ON(1); - /* fall through */ + fallthrough; case RATE_INFO_BW_20: rate_flg = 0; break; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index e67a74488bbe..04f2d198c215 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1433,7 +1433,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, switch (ftype) { case CFG80211_BSS_FTYPE_BEACON: ies->from_beacon = true; - /* fall through */ + fallthrough; case CFG80211_BSS_FTYPE_UNKNOWN: rcu_assign_pointer(tmp.pub.beacon_ies, ies); break; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 985f3c23f054..079ce320dc1e 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -205,7 +205,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev, return err; case CFG80211_CONN_ASSOC_FAILED_TIMEOUT: *treason = NL80211_TIMEOUT_ASSOC; - /* fall through */ + fallthrough; case CFG80211_CONN_ASSOC_FAILED: cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, @@ -215,7 +215,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev, cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); - /* fall through */ + fallthrough; case CFG80211_CONN_ABANDON: /* free directly, disconnected event already sent */ cfg80211_sme_free(wdev); diff --git a/net/wireless/util.c b/net/wireless/util.c index dfad1c0f57ad..7c5d5365a5eb 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -198,7 +198,7 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband) sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_G; want--; - /* fall through */ + fallthrough; default: sband->bitrates[i].flags |= IEEE80211_RATE_ERP_G; @@ -1008,7 +1008,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, case NL80211_IFTYPE_STATION: if (dev->ieee80211_ptr->use_4addr) break; - /* fall through */ + fallthrough; case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_ADHOC: diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index aa918d7ff6bd..4d2160c989a3 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -1334,7 +1334,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) wstats.qual.qual = sig + 110; break; } - /* fall through */ + fallthrough; case CFG80211_SIGNAL_TYPE_UNSPEC: if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; @@ -1343,7 +1343,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) wstats.qual.qual = sinfo.signal; break; } - /* fall through */ + fallthrough; default: wstats.qual.updated |= IW_QUAL_LEVEL_INVALID; wstats.qual.updated |= IW_QUAL_QUAL_INVALID; diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 7fb327632272..8e1a49b0c0dc 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -98,7 +98,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, *vc_fac_mask |= X25_MASK_REVERSE; break; } - /*fall through */ + fallthrough; case X25_FAC_THROUGHPUT: facilities->throughput = p[1]; *vc_fac_mask |= X25_MASK_THROUGHPUT; diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 4d3bb46aaae0..e1c4197af468 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -349,7 +349,7 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp case X25_RESET_REQUEST: x25_write_internal(sk, X25_RESET_CONFIRMATION); - /* fall through */ + fallthrough; case X25_RESET_CONFIRMATION: { x25_stop_timer(sk); x25->condition = 0x00; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d5280fd6f9c1..d622c2548d22 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -3410,7 +3410,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse) switch (nexthdr) { case NEXTHDR_FRAGMENT: onlyproto = 1; - /* fall through */ + fallthrough; case NEXTHDR_ROUTING: case NEXTHDR_HOP: case NEXTHDR_DEST: diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c index 7d7153777678..4b22ace52f80 100644 --- a/samples/bpf/hbm.c +++ b/samples/bpf/hbm.c @@ -483,7 +483,7 @@ int main(int argc, char **argv) "Option -%c requires an argument.\n\n", optopt); case 'h': - // fallthrough + fallthrough; default: Usage(); return 0; diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 7b0e13ce7dc7..f919ebd042fd 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -577,7 +577,7 @@ static struct aa_label *x_to_label(struct aa_profile *profile, stack = NULL; break; } - /* fall through - to X_NAME */ + fallthrough; /* to X_NAME */ case AA_X_NAME: if (xindex & AA_X_CHILD) /* released by caller */ diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 30c246a9d440..fa49b81eb54c 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -292,13 +292,13 @@ void aa_apply_modes_to_perms(struct aa_profile *profile, struct aa_perms *perms) switch (AUDIT_MODE(profile)) { case AUDIT_ALL: perms->audit = ALL_PERMS_MASK; - /* fall through */ + fallthrough; case AUDIT_NOQUIET: perms->quiet = 0; break; case AUDIT_QUIET: perms->audit = 0; - /* fall through */ + fallthrough; case AUDIT_QUIET_DENIED: perms->quiet = ALL_PERMS_MASK; break; diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 372d16382960..b8848f53c8cc 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -223,7 +223,7 @@ static int xattr_verify(enum ima_hooks func, struct integrity_iint_cache *iint, case IMA_XATTR_DIGEST_NG: /* first byte contains algorithm id */ hash_start = 1; - /* fall through */ + fallthrough; case IMA_XATTR_DIGEST: if (iint->flags & IMA_DIGSIG_REQUIRED) { *cause = "IMA-signature-required"; @@ -395,7 +395,7 @@ int ima_appraise_measurement(enum ima_hooks func, /* It's fine not to have xattrs when using a modsig. */ if (try_modsig) break; - /* fall through */ + fallthrough; case INTEGRITY_NOLABEL: /* No security.evm xattr. */ cause = "missing-HMAC"; goto out; diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 07f033634b27..b4de33074b37 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -1279,12 +1279,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) case Opt_uid_gt: case Opt_euid_gt: entry->uid_op = &uid_gt; - /* fall through */ + fallthrough; case Opt_uid_lt: case Opt_euid_lt: if ((token == Opt_uid_lt) || (token == Opt_euid_lt)) entry->uid_op = &uid_lt; - /* fall through */ + fallthrough; case Opt_uid_eq: case Opt_euid_eq: uid_token = (token == Opt_uid_eq) || @@ -1313,11 +1313,11 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) break; case Opt_fowner_gt: entry->fowner_op = &uid_gt; - /* fall through */ + fallthrough; case Opt_fowner_lt: if (token == Opt_fowner_lt) entry->fowner_op = &uid_lt; - /* fall through */ + fallthrough; case Opt_fowner_eq: ima_log_string_op(ab, "fowner", args[0].from, entry->fowner_op); diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c index 41a5f435b793..c022ee9e2a4e 100644 --- a/security/integrity/ima/ima_template_lib.c +++ b/security/integrity/ima/ima_template_lib.c @@ -77,7 +77,7 @@ static void ima_show_template_data_ascii(struct seq_file *m, /* skip ':' and '\0' */ buf_ptr += 2; buflen -= buf_ptr - field_data->data; - /* fall through */ + fallthrough; case DATA_FMT_DIGEST: case DATA_FMT_HEX: if (!buflen) diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 7e0232db1707..1fe8b934f656 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -465,7 +465,7 @@ key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx) case -EAGAIN: /* no key */ if (ret) break; - /* fall through */ + fallthrough; case -ENOKEY: /* negative key */ ret = key_ref; break; @@ -487,7 +487,7 @@ key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx) case -EAGAIN: /* no key */ if (ret) break; - /* fall through */ + fallthrough; case -ENOKEY: /* negative key */ ret = key_ref; break; @@ -509,7 +509,7 @@ key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx) case -EAGAIN: /* no key */ if (ret) break; - /* fall through */ + fallthrough; case -ENOKEY: /* negative key */ ret = key_ref; break; diff --git a/security/keys/request_key.c b/security/keys/request_key.c index e1b9f1a80676..2da4404276f0 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -295,26 +295,26 @@ static int construct_get_dest_keyring(struct key **_dest_keyring) } } - /* fall through */ + fallthrough; case KEY_REQKEY_DEFL_THREAD_KEYRING: dest_keyring = key_get(cred->thread_keyring); if (dest_keyring) break; - /* fall through */ + fallthrough; case KEY_REQKEY_DEFL_PROCESS_KEYRING: dest_keyring = key_get(cred->process_keyring); if (dest_keyring) break; - /* fall through */ + fallthrough; case KEY_REQKEY_DEFL_SESSION_KEYRING: dest_keyring = key_get(cred->session_keyring); if (dest_keyring) break; - /* fall through */ + fallthrough; case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: ret = look_up_user_keyrings(NULL, &dest_keyring); if (ret < 0) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index ca901025802a..a340986aa92e 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3606,26 +3606,20 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case FIONREAD: - /* fall through */ case FIBMAP: - /* fall through */ case FIGETBSZ: - /* fall through */ case FS_IOC_GETFLAGS: - /* fall through */ case FS_IOC_GETVERSION: error = file_has_perm(cred, file, FILE__GETATTR); break; case FS_IOC_SETFLAGS: - /* fall through */ case FS_IOC_SETVERSION: error = file_has_perm(cred, file, FILE__SETATTR); break; /* sys_ioctl() checks */ case FIONBIO: - /* fall through */ case FIOASYNC: error = file_has_perm(cred, file, 0); break; @@ -3783,7 +3777,7 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd, err = file_has_perm(cred, file, FILE__WRITE); break; } - /* fall through */ + fallthrough; case F_SETOWN: case F_SETSIG: case F_GETFL: diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c index 408d306895f8..d338962fb0c4 100644 --- a/security/selinux/ss/mls.c +++ b/security/selinux/ss/mls.c @@ -535,7 +535,7 @@ int mls_compute_sid(struct policydb *p, scontext, tcontext); } - /* Fallthrough */ + fallthrough; case AVTAB_CHANGE: if ((tclass == p->process_class) || sock) /* Use the process MLS attributes. */ @@ -546,8 +546,6 @@ int mls_compute_sid(struct policydb *p, case AVTAB_MEMBER: /* Use the process effective MLS attributes. */ return mls_context_cpy_low(newcontext, scontext); - - /* fall through */ } return -EINVAL; } diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 8ffbf951b7ed..8c0893eb5aa8 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -3365,7 +3365,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) * to set mount options simulate setting the * superblock default. */ - /* Fall through */ + fallthrough; default: /* * This isn't an understood special case. diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index c16b8c1b03e7..4bee32bfe16d 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -1240,7 +1240,7 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head, tomoyo_set_space(head); tomoyo_set_string(head, cond->transit->name); } - /* fall through */ + fallthrough; case 1: { const u16 condc = cond->condc; @@ -1345,12 +1345,12 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head, } } head->r.cond_step++; - /* fall through */ + fallthrough; case 2: if (!tomoyo_flush(head)) break; head->r.cond_step++; - /* fall through */ + fallthrough; case 3: if (cond->grant_log != TOMOYO_GRANTLOG_AUTO) tomoyo_io_printf(head, " grant_log=%s", @@ -1639,7 +1639,7 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) tomoyo_set_string(head, tomoyo_dif[i]); head->r.index = 0; head->r.step++; - /* fall through */ + fallthrough; case 1: while (head->r.index < TOMOYO_MAX_ACL_GROUPS) { i = head->r.index++; @@ -1652,14 +1652,14 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) head->r.index = 0; head->r.step++; tomoyo_set_lf(head); - /* fall through */ + fallthrough; case 2: if (!tomoyo_read_domain2(head, &domain->acl_info_list)) return; head->r.step++; if (!tomoyo_set_lf(head)) return; - /* fall through */ + fallthrough; case 3: head->r.step = 0; if (head->r.print_this_domain_only) @@ -2088,7 +2088,7 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) /* Check max_learning_entry parameter. */ if (tomoyo_domain_quota_is_ok(r)) break; - /* fall through */ + fallthrough; default: return 0; } @@ -2710,13 +2710,13 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, case TOMOYO_DOMAINPOLICY: if (tomoyo_select_domain(head, cp0)) continue; - /* fall through */ + fallthrough; case TOMOYO_EXCEPTIONPOLICY: if (!strcmp(cp0, "select transition_only")) { head->r.print_transition_related_only = true; continue; } - /* fall through */ + fallthrough; default: if (!tomoyo_manager()) { error = -EPERM; diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 86f7d1b90212..051f7297877c 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -927,7 +927,7 @@ int tomoyo_path2_perm(const u8 operation, const struct path *path1, case TOMOYO_TYPE_LINK: if (!d_is_dir(path1->dentry)) break; - /* fall through */ + fallthrough; case TOMOYO_TYPE_PIVOT_ROOT: tomoyo_add_slash(&buf1); tomoyo_add_slash(&buf2); diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c index b8161a08f2ca..58bb49fff184 100644 --- a/sound/ppc/snd_ps3.c +++ b/sound/ppc/snd_ps3.c @@ -227,14 +227,14 @@ static int snd_ps3_program_dma(struct snd_ps3_card_info *card, switch (filltype) { case SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL: silent = 1; - /* intentionally fall thru */ + fallthrough; case SND_PS3_DMA_FILLTYPE_FIRSTFILL: ch0_kick_event = PS3_AUDIO_KICK_EVENT_ALWAYS; break; case SND_PS3_DMA_FILLTYPE_SILENT_RUNNING: silent = 1; - /* intentionally fall thru */ + fallthrough; case SND_PS3_DMA_FILLTYPE_RUNNING: ch0_kick_event = PS3_AUDIO_KICK_EVENT_SERIALOUT0_EMPTY; break; diff --git a/sound/soc/atmel/mchp-i2s-mcc.c b/sound/soc/atmel/mchp-i2s-mcc.c index 3cb63886195f..04acc18f2d72 100644 --- a/sound/soc/atmel/mchp-i2s-mcc.c +++ b/sound/soc/atmel/mchp-i2s-mcc.c @@ -536,7 +536,7 @@ static int mchp_i2s_mcc_hw_params(struct snd_pcm_substream *substream, /* cpu is BCLK master */ mrb |= MCHP_I2SMCC_MRB_CLKSEL_INT; set_divs = 1; - /* fall through */ + fallthrough; case SND_SOC_DAIFMT_CBM_CFM: /* cpu is slave */ mra |= MCHP_I2SMCC_MRA_MODE_SLAVE; diff --git a/sound/soc/codecs/jz4770.c b/sound/soc/codecs/jz4770.c index c0a28f06b09a..298689a07168 100644 --- a/sound/soc/codecs/jz4770.c +++ b/sound/soc/codecs/jz4770.c @@ -202,7 +202,7 @@ static int jz4770_codec_set_bias_level(struct snd_soc_component *codec, REG_CR_VIC_SB_SLEEP, REG_CR_VIC_SB_SLEEP); regmap_update_bits(regmap, JZ4770_CODEC_REG_CR_VIC, REG_CR_VIC_SB, REG_CR_VIC_SB); - /* fall-through */ + fallthrough; default: break; } diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c index f0da55901dcb..b8845f45549e 100644 --- a/sound/soc/codecs/pcm186x.c +++ b/sound/soc/codecs/pcm186x.c @@ -401,7 +401,7 @@ static int pcm186x_set_fmt(struct snd_soc_dai *dai, unsigned int format) break; case SND_SOC_DAIFMT_DSP_A: priv->tdm_offset += 1; - /* fall through */ + fallthrough; /* DSP_A uses the same basic config as DSP_B * except we need to shift the TDM output by one BCK cycle */ diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index d8b9c6547142..404be27c15fe 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -898,7 +898,7 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt) "missing baudclk for master mode\n"); return -EINVAL; } - /* fall through */ + fallthrough; case SND_SOC_DAIFMT_CBM_CFS: ssi->i2s_net |= SSI_SCR_I2S_MODE_MASTER; break; diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c index fd5dcd6b9f85..907f5f1f7b44 100644 --- a/sound/soc/hisilicon/hi6210-i2s.c +++ b/sound/soc/hisilicon/hi6210-i2s.c @@ -261,13 +261,13 @@ static int hi6210_i2s_hw_params(struct snd_pcm_substream *substream, switch (params_format(params)) { case SNDRV_PCM_FORMAT_U16_LE: signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT; - /* fall through */ + fallthrough; case SNDRV_PCM_FORMAT_S16_LE: bits = HII2S_BITS_16; break; case SNDRV_PCM_FORMAT_U24_LE: signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT; - /* fall through */ + fallthrough; case SNDRV_PCM_FORMAT_S24_LE: bits = HII2S_BITS_24; break; diff --git a/sound/soc/intel/baytrail/sst-baytrail-pcm.c b/sound/soc/intel/baytrail/sst-baytrail-pcm.c index 54a66cc6db89..d2cda33b65d5 100644 --- a/sound/soc/intel/baytrail/sst-baytrail-pcm.c +++ b/sound/soc/intel/baytrail/sst-baytrail-pcm.c @@ -181,7 +181,7 @@ static int sst_byt_pcm_trigger(struct snd_soc_component *component, break; case SNDRV_PCM_TRIGGER_SUSPEND: pdata->restore_stream = false; - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: sst_byt_stream_pause(byt, pcm_data->stream); break; diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 414ae4bb5224..7ae34b49815c 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -573,7 +573,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev) break; default: dev_err(dev, "get speaker GPIO failed: %d\n", ret); - /* fall through */ + fallthrough; case -EPROBE_DEFER: return ret; } diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c index 4e2897596cea..688b5e0a49e3 100644 --- a/sound/soc/intel/boards/bytcr_rt5651.c +++ b/sound/soc/intel/boards/bytcr_rt5651.c @@ -1009,7 +1009,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Failed to get ext-amp-enable GPIO: %d\n", ret_val); - /* fall through */ + fallthrough; case -EPROBE_DEFER: put_device(codec_dev); return ret_val; @@ -1029,7 +1029,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Failed to get hp-detect GPIO: %d\n", ret_val); - /* fall through */ + fallthrough; case -EPROBE_DEFER: put_device(codec_dev); return ret_val; diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c index 5dee55e9546b..bbe8d782e0af 100644 --- a/sound/soc/intel/skylake/skl-pcm.c +++ b/sound/soc/intel/skylake/skl-pcm.c @@ -488,7 +488,7 @@ static int skl_pcm_trigger(struct snd_pcm_substream *substream, int cmd, stream->lpib); snd_hdac_ext_stream_set_lpib(stream, stream->lpib); } - /* fall through */ + fallthrough; case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c index 36df30915378..c8664ab80d45 100644 --- a/sound/soc/meson/axg-tdm-interface.c +++ b/sound/soc/meson/axg-tdm-interface.c @@ -58,17 +58,17 @@ int axg_tdm_set_tdm_slots(struct snd_soc_dai *dai, u32 *tx_mask, switch (slot_width) { case 0: slot_width = 32; - /* Fall-through */ + fallthrough; case 32: fmt |= SNDRV_PCM_FMTBIT_S32_LE; - /* Fall-through */ + fallthrough; case 24: fmt |= SNDRV_PCM_FMTBIT_S24_LE; fmt |= SNDRV_PCM_FMTBIT_S20_LE; - /* Fall-through */ + fallthrough; case 16: fmt |= SNDRV_PCM_FMTBIT_S16_LE; - /* Fall-through */ + fallthrough; case 8: fmt |= SNDRV_PCM_FMTBIT_S8; break; @@ -133,7 +133,7 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) case SND_SOC_DAIFMT_CBS_CFM: case SND_SOC_DAIFMT_CBM_CFS: dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n"); - /* Fall-through */ + fallthrough; default: return -EINVAL; } diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index d1e09ade0190..c4e7307a4437 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c @@ -488,7 +488,7 @@ static int pxa_ssp_configure_dai_fmt(struct ssp_priv *priv) case SND_SOC_DAIFMT_DSP_A: sspsp |= SSPSP_FSRT; - /* fall through */ + fallthrough; case SND_SOC_DAIFMT_DSP_B: sscr0 |= SSCR0_MOD | SSCR0_PSP; sscr1 |= SSCR1_TRAIL | SSCR1_RWOT; diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 1707414cfa92..5adb293d0435 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -229,13 +229,13 @@ static int rockchip_pdm_hw_params(struct snd_pcm_substream *substream, switch (params_channels(params)) { case 8: val |= PDM_PATH3_EN; - /* fallthrough */ + fallthrough; case 6: val |= PDM_PATH2_EN; - /* fallthrough */ + fallthrough; case 4: val |= PDM_PATH1_EN; - /* fallthrough */ + fallthrough; case 2: val |= PDM_PATH0_EN; break; diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index 80ecb5c7fed0..df53d4ea808f 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -733,7 +733,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, switch (params_channels(params)) { case 6: val |= MOD_DC2_EN; - /* Fall through */ + fallthrough; case 4: val |= MOD_DC1_EN; break; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 2fe1b2ec7c8f..663e3839f251 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -618,7 +618,7 @@ int snd_soc_suspend(struct device *dev) "ASoC: idle_bias_off CODEC on over suspend\n"); break; } - /* fall through */ + fallthrough; case SND_SOC_BIAS_OFF: snd_soc_component_suspend(component); diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index cee998671318..5b60379237bf 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1057,7 +1057,7 @@ static int soc_tplg_denum_create(struct soc_tplg *tplg, unsigned int count, ec->hdr.name); goto err_denum; } - /* fall through */ + fallthrough; case SND_SOC_TPLG_CTL_ENUM: case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT: @@ -1445,7 +1445,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create( ec->hdr.name); goto err_se; } - /* fall through */ + fallthrough; case SND_SOC_TPLG_CTL_ENUM: case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT: diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index df1c6997cb4e..c6cb8c212eca 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -310,7 +310,7 @@ static int hda_link_pcm_trigger(struct snd_pcm_substream *substream, return ret; } - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: snd_hdac_ext_link_stream_start(link_dev); @@ -333,7 +333,7 @@ static int hda_link_pcm_trigger(struct snd_pcm_substream *substream, link_dev->link_prepared = 0; - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: snd_hdac_ext_link_stream_clear(link_dev); break; diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c index d730e437e4ba..71c3f29057a7 100644 --- a/sound/soc/sof/pcm.c +++ b/sound/soc/sof/pcm.c @@ -361,7 +361,7 @@ static int sof_pcm_trigger(struct snd_soc_component *component, return ret; } - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_START: if (spcm->stream[substream->stream].suspend_ignored) { /* @@ -386,7 +386,7 @@ static int sof_pcm_trigger(struct snd_soc_component *component, spcm->stream[substream->stream].suspend_ignored = true; return 0; } - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_STOP: stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP; ipc_first = true; diff --git a/sound/soc/ti/davinci-i2s.c b/sound/soc/ti/davinci-i2s.c index d89b5c928c4d..dd34504c09ba 100644 --- a/sound/soc/ti/davinci-i2s.c +++ b/sound/soc/ti/davinci-i2s.c @@ -289,7 +289,7 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, * rate is lowered. */ inv_fs = true; - /* fall through */ + fallthrough; case SND_SOC_DAIFMT_DSP_A: dev->mode = MOD_DSP_A; break; diff --git a/sound/soc/ti/n810.c b/sound/soc/ti/n810.c index 2802a33b9c5f..ed217b34f846 100644 --- a/sound/soc/ti/n810.c +++ b/sound/soc/ti/n810.c @@ -46,7 +46,7 @@ static void n810_ext_control(struct snd_soc_dapm_context *dapm) switch (n810_jack_func) { case N810_JACK_HS: line1l = 1; - /* fall through */ + fallthrough; case N810_JACK_HP: hp = 1; break; diff --git a/sound/soc/ti/omap-dmic.c b/sound/soc/ti/omap-dmic.c index 01abf1be5d78..a26588e9c3bc 100644 --- a/sound/soc/ti/omap-dmic.c +++ b/sound/soc/ti/omap-dmic.c @@ -203,10 +203,10 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream, switch (channels) { case 6: dmic->ch_enabled |= OMAP_DMIC_UP3_ENABLE; - /* fall through */ + fallthrough; case 4: dmic->ch_enabled |= OMAP_DMIC_UP2_ENABLE; - /* fall through */ + fallthrough; case 2: dmic->ch_enabled |= OMAP_DMIC_UP1_ENABLE; break; diff --git a/sound/soc/ti/omap-mcpdm.c b/sound/soc/ti/omap-mcpdm.c index d482b62f314a..fafb2998ad0d 100644 --- a/sound/soc/ti/omap-mcpdm.c +++ b/sound/soc/ti/omap-mcpdm.c @@ -309,19 +309,19 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream, /* up to 3 channels for capture */ return -EINVAL; link_mask |= 1 << 4; - /* fall through */ + fallthrough; case 4: if (stream == SNDRV_PCM_STREAM_CAPTURE) /* up to 3 channels for capture */ return -EINVAL; link_mask |= 1 << 3; - /* fall through */ + fallthrough; case 3: link_mask |= 1 << 2; - /* fall through */ + fallthrough; case 2: link_mask |= 1 << 1; - /* fall through */ + fallthrough; case 1: link_mask |= 1 << 0; break; diff --git a/sound/soc/ti/rx51.c b/sound/soc/ti/rx51.c index 2176a95201bf..a2629ccc1dc8 100644 --- a/sound/soc/ti/rx51.c +++ b/sound/soc/ti/rx51.c @@ -55,7 +55,7 @@ static void rx51_ext_control(struct snd_soc_dapm_context *dapm) break; case RX51_JACK_HS: hs = 1; - /* fall through */ + fallthrough; case RX51_JACK_HP: hp = 1; break; diff --git a/sound/soc/zte/zx-i2s.c b/sound/soc/zte/zx-i2s.c index 568cde64ff8b..1c1a44e08a67 100644 --- a/sound/soc/zte/zx-i2s.c +++ b/sound/soc/zte/zx-i2s.c @@ -294,7 +294,7 @@ static int zx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, zx_i2s_rx_dma_en(zx_i2s->reg_base, true); else zx_i2s_tx_dma_en(zx_i2s->reg_base, true); - /* fall thru */ + fallthrough; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (capture) @@ -308,7 +308,7 @@ static int zx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, zx_i2s_rx_dma_en(zx_i2s->reg_base, false); else zx_i2s_tx_dma_en(zx_i2s->reg_base, false); - /* fall thru */ + fallthrough; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (capture) diff --git a/sound/soc/zte/zx-spdif.c b/sound/soc/zte/zx-spdif.c index a3a07c0730e6..b4168bd532b7 100644 --- a/sound/soc/zte/zx-spdif.c +++ b/sound/soc/zte/zx-spdif.c @@ -218,7 +218,7 @@ static int zx_spdif_trigger(struct snd_pcm_substream *substream, int cmd, val = readl_relaxed(zx_spdif->reg_base + ZX_FIFOCTRL); val |= ZX_FIFOCTRL_TX_FIFO_RST; writel_relaxed(val, zx_spdif->reg_base + ZX_FIFOCTRL); - /* fall thru */ + fallthrough; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: zx_spdif_cfg_tx(zx_spdif->reg_base, true); -- cgit v1.2.3 From 12564485ed8caac3c18572793ec01330792c7191 Mon Sep 17 00:00:00 2001 From: Shawn Anastasio Date: Fri, 21 Aug 2020 13:55:56 -0500 Subject: Revert "powerpc/64s: Remove PROT_SAO support" This reverts commit 5c9fa16e8abd342ce04dc830c1ebb2a03abf6c05. Since PROT_SAO can still be useful for certain classes of software, reintroduce it. Concerns about guest migration for LPARs using SAO will be addressed next. Signed-off-by: Shawn Anastasio Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200821185558.35561-2-shawn@anastas.io --- arch/powerpc/include/asm/book3s/64/pgtable.h | 8 ++--- arch/powerpc/include/asm/cputable.h | 10 +++---- arch/powerpc/include/asm/mman.h | 26 ++++++++++++++--- arch/powerpc/include/asm/nohash/64/pgtable.h | 2 ++ arch/powerpc/include/uapi/asm/mman.h | 2 +- arch/powerpc/kernel/dt_cpu_ftrs.c | 2 +- arch/powerpc/mm/book3s64/hash_utils.c | 2 ++ include/linux/mm.h | 2 ++ include/trace/events/mmflags.h | 2 ++ mm/ksm.c | 4 +++ tools/testing/selftests/powerpc/mm/.gitignore | 1 + tools/testing/selftests/powerpc/mm/Makefile | 4 ++- tools/testing/selftests/powerpc/mm/prot_sao.c | 42 +++++++++++++++++++++++++++ 13 files changed, 90 insertions(+), 17 deletions(-) create mode 100644 tools/testing/selftests/powerpc/mm/prot_sao.c (limited to 'include') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 6de56c3b33c4..495fc0ccb453 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -20,13 +20,9 @@ #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ - -#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */ - /* No bits set is normal cacheable memory */ - /* 0x00010 unused, is SAO bit on radix POWER9 */ +#define _PAGE_SAO 0x00010 /* Strong access order */ #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */ - #define _PAGE_DIRTY 0x00080 /* C: page changed */ #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ /* @@ -828,6 +824,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, return hash__set_pte_at(mm, addr, ptep, pte, percpu); } +#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) + #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t prot) { diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index e005b4581023..32a15dc49e8c 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -196,7 +196,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000) #define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000) #define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000) -// Free LONG_ASM_CONST(0x0000000008000000) +#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000) #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000) #define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000) #define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000) @@ -441,7 +441,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \ + CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | \ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX ) @@ -450,7 +450,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ @@ -461,7 +461,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ @@ -479,7 +479,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 7c07728af300..4ba303ea27f5 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -13,20 +13,38 @@ #include #include -#ifdef CONFIG_PPC_MEM_KEYS static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { - return pkey_to_vmflag_bits(pkey); +#ifdef CONFIG_PPC_MEM_KEYS + return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey)); +#else + return ((prot & PROT_SAO) ? VM_SAO : 0); +#endif } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { - return __pgprot(vmflag_to_pte_pkey_bits(vm_flags)); +#ifdef CONFIG_PPC_MEM_KEYS + return (vm_flags & VM_SAO) ? + __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : + __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); +#else + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); +#endif } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) -#endif + +static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) +{ + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) + return false; + if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO)) + return false; + return true; +} +#define arch_validate_prot arch_validate_prot #endif /* CONFIG_PPC64 */ #endif /* _ASM_POWERPC_MMAN_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 59ee9fa4ae09..6cb8aa357191 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -82,6 +82,8 @@ */ #include +#define _PAGE_SAO 0 + #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) /* diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h index 3a700351feca..c0c737215b00 100644 --- a/arch/powerpc/include/uapi/asm/mman.h +++ b/arch/powerpc/include/uapi/asm/mman.h @@ -11,7 +11,7 @@ #include -#define PROT_SAO 0x10 /* Unsupported since v5.9 */ +#define PROT_SAO 0x10 /* Strong Access Ordering */ #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 8dc46f38680b..f204ad79b6b5 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -653,7 +653,7 @@ static struct dt_cpu_feature_match __initdata {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, {"processor-utilization-of-resources-register", feat_enable_purr, 0}, {"no-execute", feat_enable, 0}, - /* strong-access-ordering is unused */ + {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, {"cache-inhibited-large-page", feat_enable_large_ci, 0}, {"coprocessor-icswx", feat_enable, 0}, {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0}, diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 890a71c5293e..c663e7ba801f 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -232,6 +232,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) rflags |= HPTE_R_I; else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) rflags |= (HPTE_R_I | HPTE_R_G); + else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) + rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); else /* * Add memory coherence if cache inhibited is not set diff --git a/include/linux/mm.h b/include/linux/mm.h index 1983e08f5906..5abe6df4247e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -321,6 +321,8 @@ extern unsigned int kobjsize(const void *objp); #if defined(CONFIG_X86) # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ +#elif defined(CONFIG_PPC) +# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_IA64) diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 939092dbcb8b..5fb752034386 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -114,6 +114,8 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" ) #if defined(CONFIG_X86) #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } +#elif defined(CONFIG_PPC) +#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64) #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } #elif !defined(CONFIG_MMU) diff --git a/mm/ksm.c b/mm/ksm.c index 0aa2247bddd7..90a625b02a1d 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2453,6 +2453,10 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, if (vma_is_dax(vma)) return 0; +#ifdef VM_SAO + if (*vm_flags & VM_SAO) + return 0; +#endif #ifdef VM_SPARC_ADI if (*vm_flags & VM_SPARC_ADI) return 0; diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore index 91c775c23c66..aac4a59f9e28 100644 --- a/tools/testing/selftests/powerpc/mm/.gitignore +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -2,6 +2,7 @@ hugetlb_vs_thp_test subpage_prot tempfile +prot_sao segv_errors wild_bctr large_vm_fork_separation diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 250ce172e0da..defe488d6bf1 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -2,7 +2,7 @@ noarg: $(MAKE) -C ../ -TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \ +TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ large_vm_fork_separation bad_accesses pkey_exec_prot \ pkey_siginfo stack_expansion_signal stack_expansion_ldst @@ -14,6 +14,8 @@ include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c ../utils.c +$(OUTPUT)/prot_sao: ../utils.c + $(OUTPUT)/wild_bctr: CFLAGS += -m64 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 $(OUTPUT)/bad_accesses: CFLAGS += -m64 diff --git a/tools/testing/selftests/powerpc/mm/prot_sao.c b/tools/testing/selftests/powerpc/mm/prot_sao.c new file mode 100644 index 000000000000..e2eed65b7735 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/prot_sao.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2016, Michael Ellerman, IBM Corp. + */ + +#include +#include +#include +#include + +#include + +#include "utils.h" + +#define SIZE (64 * 1024) + +int test_prot_sao(void) +{ + char *p; + + /* 2.06 or later should support SAO */ + SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); + + /* + * Ensure we can ask for PROT_SAO. + * We can't really verify that it does the right thing, but at least we + * confirm the kernel will accept it. + */ + p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + FAIL_IF(p == MAP_FAILED); + + /* Write to the mapping, to at least cause a fault */ + memset(p, 0xaa, SIZE); + + return 0; +} + +int main(void) +{ + return test_harness(test_prot_sao, "prot-sao"); +} -- cgit v1.2.3 From ebb21aa1882f418b436ee23463683790c553a447 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 11 Aug 2020 17:46:58 +1000 Subject: drm/ttm: drop bus.size from bus placement. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is always calculated the same, and only used in a couple of places. Signed-off-by: Dave Airlie Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20200811074658.58309-2-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 7 ++++--- drivers/gpu/drm/ttm/ttm_bo_util.c | 7 ++++--- include/drm/ttm/ttm_resource.h | 2 -- 4 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 924c19ce3f5a..fc5f7ac53d0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -751,6 +751,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct drm_mm_node *mm_node = mem->mm_node; + size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; switch (mem->mem_type) { case TTM_PL_SYSTEM: @@ -761,7 +762,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ - if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size) + if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) return -EINVAL; /* Only physically contiguous buffers apply. In a contiguous * buffer, size of the first mm_node would match the number of diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 8ca2253f6fbf..3a4372ea74b9 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -362,6 +362,7 @@ memcpy: static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { struct radeon_device *rdev = radeon_get_rdev(bdev); + size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; switch (mem->mem_type) { case TTM_PL_SYSTEM: @@ -380,7 +381,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ - if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) + if ((mem->bus.offset + bus_size) > rdev->mc.visible_vram_size) return -EINVAL; mem->bus.base = rdev->mc.aper_base; mem->bus.is_iomem = true; @@ -392,11 +393,11 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso if (mem->placement & TTM_PL_FLAG_WC) mem->bus.addr = ioremap_wc(mem->bus.base + mem->bus.offset, - mem->bus.size); + bus_size); else mem->bus.addr = ioremap(mem->bus.base + mem->bus.offset, - mem->bus.size); + bus_size); if (!mem->bus.addr) return -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index eaed29b81d9e..ee04716b2603 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -140,7 +140,6 @@ int ttm_mem_io_reserve(struct ttm_bo_device *bdev, mem->bus.addr = NULL; mem->bus.offset = 0; - mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; mem->bus.is_iomem = false; retry: @@ -214,12 +213,14 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev, if (mem->bus.addr) { addr = mem->bus.addr; } else { + size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; + if (mem->placement & TTM_PL_FLAG_WC) addr = ioremap_wc(mem->bus.base + mem->bus.offset, - mem->bus.size); + bus_size); else addr = ioremap(mem->bus.base + mem->bus.offset, - mem->bus.size); + bus_size); if (!addr) { (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index bac22a56f6cd..6d4226190480 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -162,7 +162,6 @@ struct ttm_resource_manager { * @addr: mapped virtual address * @base: bus base address * @is_iomem: is this io memory ? - * @size: size in byte * @offset: offset from the base address * @io_reserved_vm: The VM system has a refcount in @io_reserved_count * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve @@ -172,7 +171,6 @@ struct ttm_resource_manager { struct ttm_bus_placement { void *addr; phys_addr_t base; - unsigned long size; unsigned long offset; bool is_iomem; bool io_reserved_vm; -- cgit v1.2.3 From f062f025fc3a4fae3e6a50d13fb1fafb11900fa7 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 19 Aug 2020 10:50:08 +0200 Subject: libceph: add __maybe_unused to DEFINE_CEPH_FEATURE Avoid -Wunused-const-variable warnings for "make W=1". Reported-by: Leon Romanovsky Signed-off-by: Ilya Dryomov Reviewed-by: Leon Romanovsky --- include/linux/ceph/ceph_features.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index fcd84e8d88f4..999636d53cf2 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -11,14 +11,14 @@ #define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL #define DEFINE_CEPH_FEATURE(bit, incarnation, name) \ - static const uint64_t CEPH_FEATURE_##name = (1ULL< Date: Mon, 24 Aug 2020 13:46:22 +0200 Subject: ipv6: ndisc: adjust ndisc_ifinfo_sysctl_change prototype Commit 32927393dc1c ("sysctl: pass kernel pointers to ->proc_handler") changed ndisc_ifinfo_sysctl_change to take a kernel pointer. Adjust its prototype in net/ndisc.h as well to fix the following sparse warning: net/ipv6/ndisc.c:1838:5: error: symbol 'ndisc_ifinfo_sysctl_change' redeclared with different type (incompatible argument 3 (different address spaces)): net/ipv6/ndisc.c:1838:5: int extern [addressable] [signed] [toplevel] ndisc_ifinfo_sysctl_change( ... ) net/ipv6/ndisc.c: note: in included file (through include/net/ipv6.h): ./include/net/ndisc.h:496:5: note: previously declared as: ./include/net/ndisc.h:496:5: int extern [addressable] [signed] [toplevel] ndisc_ifinfo_sysctl_change( ... ) net/ipv6/ndisc.c: note: in included file (through include/net/ip6_route.h): Fixes: 32927393dc1c ("sysctl: pass kernel pointers to ->proc_handler") Cc: Christoph Hellwig Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- include/net/ndisc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 9205a76d967a..38e4094960ce 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -494,7 +494,7 @@ int igmp6_event_report(struct sk_buff *skb); #ifdef CONFIG_SYSCTL int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen); -- cgit v1.2.3 From be769db2f95861cc8c7c8fedcc71a8c39b803b10 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 22 Aug 2020 08:23:29 +1000 Subject: net: Get rid of consume_skb when tracing is off The function consume_skb is only meaningful when tracing is enabled. This patch makes it conditional on CONFIG_TRACEPOINTS. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 +++++++++ net/core/skbuff.c | 2 ++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 46881d902124..e8bca74857a3 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1056,7 +1056,16 @@ void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); + +#ifdef CONFIG_TRACEPOINTS void consume_skb(struct sk_buff *skb); +#else +static inline void consume_skb(struct sk_buff *skb) +{ + return kfree_skb(skb); +} +#endif + void __consume_stateless_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); extern struct kmem_cache *skbuff_head_cache; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index e18184ffa9c3..6faf73d6a0f7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -820,6 +820,7 @@ void skb_tx_error(struct sk_buff *skb) } EXPORT_SYMBOL(skb_tx_error); +#ifdef CONFIG_TRACEPOINTS /** * consume_skb - free an skbuff * @skb: buffer to free @@ -837,6 +838,7 @@ void consume_skb(struct sk_buff *skb) __kfree_skb(skb); } EXPORT_SYMBOL(consume_skb); +#endif /** * consume_stateless_skb - free an skbuff, assuming it is stateless -- cgit v1.2.3 From c94a88f341c9b8f05d8639f62bb5d95936f881cd Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 20 Aug 2020 19:20:46 +0200 Subject: sched: Use __always_inline on is_idle_task() is_idle_task() may be used from noinstr functions such as irqentry_enter(). Since the compiler is free to not inline regular inline functions, switch to using __always_inline. Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200820172046.GA177701@elver.google.com --- include/linux/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 93ecd930efd3..afe01e232935 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1666,7 +1666,7 @@ extern struct task_struct *idle_task(int cpu); * * Return: 1 if @p is an idle task. 0 otherwise. */ -static inline bool is_idle_task(const struct task_struct *p) +static __always_inline bool is_idle_task(const struct task_struct *p) { return !!(p->flags & PF_IDLE); } -- cgit v1.2.3 From fddf9055a60dfcc97bda5ef03c8fa4108ed555c5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 Aug 2020 09:13:30 +0200 Subject: lockdep: Use raw_cpu_*() for per-cpu variables Sven reported that commit a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables") caused trouble on s390 because their this_cpu_*() primitives disable preemption which then lands back tracing. On the one hand, per-cpu ops should use preempt_*able_notrace() and raw_local_irq_*(), on the other hand, we can trivialy use raw_cpu_*() ops for this. Fixes: a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables") Reported-by: Sven Schnelle Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200821085348.192346882@infradead.org --- include/linux/irqflags.h | 6 +++--- include/linux/lockdep.h | 18 +++++++++++++----- kernel/locking/lockdep.c | 4 ++-- 3 files changed, 18 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index bd5c55755447..d7e50a215ea9 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -53,13 +53,13 @@ DECLARE_PER_CPU(int, hardirq_context); extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); -# define lockdep_hardirq_context() (this_cpu_read(hardirq_context)) +# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) # define lockdep_hardirq_enter() \ do { \ - if (this_cpu_inc_return(hardirq_context) == 1) \ + if (__this_cpu_inc_return(hardirq_context) == 1)\ current->hardirq_threaded = 0; \ } while (0) # define lockdep_hardirq_threaded() \ @@ -68,7 +68,7 @@ do { \ } while (0) # define lockdep_hardirq_exit() \ do { \ - this_cpu_dec(hardirq_context); \ + __this_cpu_dec(hardirq_context); \ } while (0) # define lockdep_softirq_enter() \ do { \ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 62a382d1845b..6a584b3e5c74 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -535,19 +535,27 @@ do { \ DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); +/* + * The below lockdep_assert_*() macros use raw_cpu_read() to access the above + * per-cpu variables. This is required because this_cpu_read() will potentially + * call into preempt/irq-disable and that obviously isn't right. This is also + * correct because when IRQs are enabled, it doesn't matter if we accidentally + * read the value from our previous CPU. + */ + #define lockdep_assert_irqs_enabled() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_irqs_disabled() \ do { \ - WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_in_irq() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \ } while (0) #define lockdep_assert_preemption_enabled() \ @@ -555,7 +563,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() != 0 || \ - !this_cpu_read(hardirqs_enabled))); \ + !raw_cpu_read(hardirqs_enabled))); \ } while (0) #define lockdep_assert_preemption_disabled() \ @@ -563,7 +571,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() == 0 && \ - this_cpu_read(hardirqs_enabled))); \ + raw_cpu_read(hardirqs_enabled))); \ } while (0) #else diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 2fad21d345b0..c872e95e6e4d 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3756,7 +3756,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) skip_checks: /* we'll do an OFF -> ON transition: */ - this_cpu_write(hardirqs_enabled, 1); + __this_cpu_write(hardirqs_enabled, 1); trace->hardirq_enable_ip = ip; trace->hardirq_enable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_on_events); @@ -3795,7 +3795,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) /* * We have done an ON -> OFF transition: */ - this_cpu_write(hardirqs_enabled, 0); + __this_cpu_write(hardirqs_enabled, 0); trace->hardirq_disable_ip = ip; trace->hardirq_disable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_off_events); -- cgit v1.2.3 From bf9282dc26e7fe2a0736edc568762f0f05d12416 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Aug 2020 12:22:17 +0200 Subject: cpuidle: Make CPUIDLE_FLAG_TLB_FLUSHED generic This allows moving the leave_mm() call into generic code before rcu_idle_enter(). Gets rid of more trace_*_rcuidle() users. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.369441600@infradead.org --- arch/x86/include/asm/mmu.h | 1 + arch/x86/mm/tlb.c | 13 ++----------- drivers/cpuidle/cpuidle.c | 4 ++++ drivers/idle/intel_idle.c | 16 ---------------- include/linux/cpuidle.h | 13 +++++++------ include/linux/mmu_context.h | 5 +++++ 6 files changed, 19 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 0a301ad0b02f..9257667d13c5 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -59,5 +59,6 @@ typedef struct { } void leave_mm(int cpu); +#define leave_mm leave_mm #endif /* _ASM_X86_MMU_H */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 1a3569b43aa5..0951b47e64c1 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -555,21 +555,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); load_new_mm_cr3(next->pgd, new_asid, true); - /* - * NB: This gets called via leave_mm() in the idle path - * where RCU functions differently. Tracing normally - * uses RCU, so we need to use the _rcuidle variant. - * - * (There is no good reason for this. The idle code should - * be rearranged to call this before rcu_idle_enter().) - */ - trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); + trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ load_new_mm_cr3(next->pgd, new_asid, false); - /* See above wrt _rcuidle. */ - trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); + trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); } /* Make sure we write CR3 before loaded_mm. */ diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 9bcda4153d3b..04becd70cc41 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "cpuidle.h" @@ -228,6 +229,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, broadcast = false; } + if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED) + leave_mm(dev->cpu); + /* Take note of the planned idle state. */ sched_idle_set_state(target_state); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 8e0fb1a5bdbd..9a810e4a7946 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -89,14 +89,6 @@ static unsigned int mwait_substates __initdata; */ #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) -/* - * Set this flag for states where the HW flushes the TLB for us - * and so we don't need cross-calls to keep it consistent. - * If this flag is set, SW flushes the TLB, so even if the - * HW doesn't do the flushing, this flag is safe to use. - */ -#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16) - /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) @@ -131,14 +123,6 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, unsigned long eax = flg2MWAIT(state->flags); unsigned long ecx = 1; /* break on interrupt flag */ bool tick; - int cpu = smp_processor_id(); - - /* - * leave_mm() to avoid costly and often unnecessary wakeups - * for flushing the user TLB's associated with the active mm. - */ - if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) - leave_mm(cpu); if (!static_cpu_has(X86_FEATURE_ARAT)) { /* diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index b65909ae4e20..75895e6363b8 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -75,12 +75,13 @@ struct cpuidle_state { }; /* Idle State Flags */ -#define CPUIDLE_FLAG_NONE (0x00) -#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ -#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ -#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ -#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ -#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_NONE (0x00) +#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ +#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ +#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ +#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ +#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */ struct cpuidle_device_kobj; struct cpuidle_state_kobj; diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index c51a84132d7c..03dee12d2b61 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -3,10 +3,15 @@ #define _LINUX_MMU_CONTEXT_H #include +#include /* Architectures that care about IRQ state in switch_mm can override this. */ #ifndef switch_mm_irqs_off # define switch_mm_irqs_off switch_mm #endif +#ifndef leave_mm +static inline void leave_mm(int cpu) { } +#endif + #endif -- cgit v1.2.3 From 00b0ed2d4997af6d0a93edef820386951fd66d94 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Aug 2020 19:28:06 +0200 Subject: locking/lockdep: Cleanup Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.546087214@infradead.org --- include/linux/irqflags.h | 54 +++++++++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index d7e50a215ea9..00d553d77911 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -49,10 +49,11 @@ struct irqtrace_events { DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); - extern void trace_hardirqs_on_prepare(void); - extern void trace_hardirqs_off_finish(void); - extern void trace_hardirqs_on(void); - extern void trace_hardirqs_off(void); +extern void trace_hardirqs_on_prepare(void); +extern void trace_hardirqs_off_finish(void); +extern void trace_hardirqs_on(void); +extern void trace_hardirqs_off(void); + # define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) @@ -120,17 +121,17 @@ do { \ #else # define trace_hardirqs_on_prepare() do { } while (0) # define trace_hardirqs_off_finish() do { } while (0) -# define trace_hardirqs_on() do { } while (0) -# define trace_hardirqs_off() do { } while (0) -# define lockdep_hardirq_context() 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled() 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_threaded() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define lockdep_hardirq_context() 0 +# define lockdep_softirq_context(p) 0 +# define lockdep_hardirqs_enabled() 0 +# define lockdep_softirqs_enabled(p) 0 +# define lockdep_hardirq_enter() do { } while (0) +# define lockdep_hardirq_threaded() do { } while (0) +# define lockdep_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) # define lockdep_hrtimer_enter(__hrtimer) false # define lockdep_hrtimer_exit(__context) do { } while (0) # define lockdep_posixtimer_enter() do { } while (0) @@ -181,17 +182,25 @@ do { \ * if !TRACE_IRQFLAGS. */ #ifdef CONFIG_TRACE_IRQFLAGS -#define local_irq_enable() \ - do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) -#define local_irq_disable() \ - do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) + +#define local_irq_enable() \ + do { \ + trace_hardirqs_on(); \ + raw_local_irq_enable(); \ + } while (0) + +#define local_irq_disable() \ + do { \ + raw_local_irq_disable(); \ + trace_hardirqs_off(); \ + } while (0) + #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ trace_hardirqs_off(); \ } while (0) - #define local_irq_restore(flags) \ do { \ if (raw_irqs_disabled_flags(flags)) { \ @@ -214,10 +223,7 @@ do { \ #define local_irq_enable() do { raw_local_irq_enable(); } while (0) #define local_irq_disable() do { raw_local_irq_disable(); } while (0) -#define local_irq_save(flags) \ - do { \ - raw_local_irq_save(flags); \ - } while (0) +#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0) #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) #define safe_halt() do { raw_safe_halt(); } while (0) -- cgit v1.2.3 From 044d0d6de9f50192f9697583504a382347ee95ca Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 23 Jul 2020 20:56:14 +1000 Subject: lockdep: Only trace IRQ edges Problem: raw_local_irq_save(); // software state on local_irq_save(); // software state off ... local_irq_restore(); // software state still off, because we don't enable IRQs raw_local_irq_restore(); // software state still off, *whoopsie* existing instances: - lock_acquire() raw_local_irq_save() __lock_acquire() arch_spin_lock(&graph_lock) pv_wait() := kvm_wait() (same or worse for Xen/HyperV) local_irq_save() - trace_clock_global() raw_local_irq_save() arch_spin_lock() pv_wait() := kvm_wait() local_irq_save() - apic_retrigger_irq() raw_local_irq_save() apic->send_IPI() := default_send_IPI_single_phys() local_irq_save() Possible solutions: A) make it work by enabling the tracing inside raw_*() B) make it work by keeping tracing disabled inside raw_*() C) call it broken and clean it up now Now, given that the only reason to use the raw_* variant is because you don't want tracing. Therefore A) seems like a weird option (although it can be done). C) is tempting, but OTOH it ends up converting a _lot_ of code to raw just because there is one raw user, this strips the validation/tracing off for all the other users. So we pick B) and declare any code that ends up doing: raw_local_irq_save() local_irq_save() lockdep_assert_irqs_disabled(); broken. AFAICT this problem has existed forever, the only reason it came up is because commit: 859d069ee1dd ("lockdep: Prepare for NMI IRQ state tracking") changed IRQ tracing vs lockdep recursion and the first instance is fairly common, the other cases hardly ever happen. Signed-off-by: Nicholas Piggin [rewrote changelog] Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200723105615.1268126-1-npiggin@gmail.com --- arch/powerpc/include/asm/hw_irq.h | 11 ++++------- include/linux/irqflags.h | 15 +++++++-------- 2 files changed, 11 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 3a0db7b0b46e..35060be09073 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -200,17 +200,14 @@ static inline bool arch_irqs_disabled(void) #define powerpc_local_irq_pmu_save(flags) \ do { \ raw_local_irq_pmu_save(flags); \ - trace_hardirqs_off(); \ + if (!raw_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while(0) #define powerpc_local_irq_pmu_restore(flags) \ do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_pmu_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ + if (!raw_irqs_disabled_flags(flags)) \ trace_hardirqs_on(); \ - raw_local_irq_pmu_restore(flags); \ - } \ + raw_local_irq_pmu_restore(flags); \ } while(0) #else #define powerpc_local_irq_pmu_save(flags) \ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 00d553d77911..3ed4e8771b64 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -191,25 +191,24 @@ do { \ #define local_irq_disable() \ do { \ + bool was_disabled = raw_irqs_disabled();\ raw_local_irq_disable(); \ - trace_hardirqs_off(); \ + if (!was_disabled) \ + trace_hardirqs_off(); \ } while (0) #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ - trace_hardirqs_off(); \ + if (!raw_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while (0) #define local_irq_restore(flags) \ do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ + if (!raw_irqs_disabled_flags(flags)) \ trace_hardirqs_on(); \ - raw_local_irq_restore(flags); \ - } \ + raw_local_irq_restore(flags); \ } while (0) #define safe_halt() \ -- cgit v1.2.3 From 9d3004bf7ba32c89873bb8422671d52b4bb52ce1 Mon Sep 17 00:00:00 2001 From: Simon Leiner Date: Tue, 25 Aug 2020 11:31:53 +0200 Subject: arm/xen: Add misuse warning to virt_to_gfn As virt_to_gfn uses virt_to_phys, it will return invalid addresses when used with vmalloc'd addresses. This patch introduces a warning, when virt_to_gfn is used in this way. Signed-off-by: Simon Leiner Reviewed-by: Stefano Stabellini Link: https://lore.kernel.org/r/20200825093153.35500-2-simon@leiner.me Signed-off-by: Juergen Gross --- include/xen/arm/page.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h index d7f6af50e200..39df751d0dc4 100644 --- a/include/xen/arm/page.h +++ b/include/xen/arm/page.h @@ -76,7 +76,11 @@ static inline unsigned long bfn_to_pfn(unsigned long bfn) #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) /* VIRT <-> GUEST conversion */ -#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) +#define virt_to_gfn(v) \ + ({ \ + WARN_ON_ONCE(!virt_addr_valid(v)); \ + pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT); \ + }) #define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) /* Only used in PV code. But ARM guests are always HVM. */ -- cgit v1.2.3 From aac544c3553d98ebe150dda19a25aa253f7ad3fe Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 25 Aug 2020 01:25:11 +0200 Subject: Compiler Attributes: remove comment about sparse not supporting __has_attribute Sparse supports __has_attribute() since 2018-08-31, so the comment is not true anymore but more importantly is rather confusing. So remove it. Signed-off-by: Luc Van Oostenryck Signed-off-by: Miguel Ojeda --- include/linux/compiler_attributes.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include') diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 6122efdad6ad..af7a58c19e20 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -24,12 +24,6 @@ * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17. * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute * by hand. - * - * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__ - * depending on the compiler used to build it; however, these attributes have - * no semantic effects for sparse, so it does not matter. Also note that, - * in order to avoid sparse's warnings, even the unsupported ones must be - * defined to 0. */ #ifndef __has_attribute # define __has_attribute(x) __GCC4_has_attribute_##x -- cgit v1.2.3 From 5861af92ff2a2e002449191413c35f3ec5f721fe Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 25 Aug 2020 01:25:26 +0200 Subject: Compiler Attributes: fix comment concerning GCC 4.6 GCC 4.6 is not supported anymore, so remove a reference to it, leaving just the part about version prior GCC 5. Signed-off-by: Luc Van Oostenryck Signed-off-by: Miguel Ojeda --- include/linux/compiler_attributes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index af7a58c19e20..ea7b756b1c8f 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -22,7 +22,7 @@ /* * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17. - * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute + * In the meantime, to support gcc < 5, we implement __has_attribute * by hand. */ #ifndef __has_attribute -- cgit v1.2.3 From 30b8e6b22fd0f7a56911e69c681e92532e72e3b6 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 27 Aug 2020 10:54:16 +0530 Subject: cpufreq: Use WARN_ON_ONCE() for invalid relation The relation can't be invalid here, so if it turns out to be invalid, just WARN_ON_ONCE() and return 0. Signed-off-by: Viresh Kumar [ rjw: Subject and changelog edits ] Signed-off-by: Rafael J. Wysocki --- include/linux/cpufreq.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 8f141d4c859c..a911e5d06845 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -956,8 +956,8 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, case CPUFREQ_RELATION_C: return cpufreq_table_find_index_c(policy, target_freq); default: - pr_err("%s: Invalid relation: %d\n", __func__, relation); - return -EINVAL; + WARN_ON_ONCE(1); + return 0; } } -- cgit v1.2.3 From 645f08975f49441b3e753d8dc5b740cbcb226594 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 27 Aug 2020 07:27:49 -0400 Subject: net: Fix some comments Fix some comments, including wrong function name, duplicated word and so on. Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++-- include/uapi/linux/in.h | 2 +- net/core/sock.c | 2 +- net/ipv4/raw.c | 2 +- net/l3mdev/l3mdev.c | 2 +- net/socket.c | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e8bca74857a3..8d9ab50b08c9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -71,7 +71,7 @@ * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain * TCP or UDP packets over IPv6. These are specifically * unencapsulated packets of the form IPv6|TCP or - * IPv4|UDP where the Next Header field in the IPv6 + * IPv6|UDP where the Next Header field in the IPv6 * header is either TCP or UDP. IPv6 extension headers * are not supported with this feature. This feature * cannot be set in features for a device with @@ -2667,7 +2667,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) * * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) * to reduce average number of cache lines per packet. - * get_rps_cpus() for example only access one 64 bytes aligned block : + * get_rps_cpu() for example only access one 64 bytes aligned block : * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) */ #ifndef NET_SKB_PAD diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 3d0d8231dc19..7d6687618d80 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -135,7 +135,7 @@ struct in_addr { * this socket to prevent accepting spoofed ones. */ #define IP_PMTUDISC_INTERFACE 4 -/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get +/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get * fragmented if they exeed the interface mtu */ #define IP_PMTUDISC_OMIT 5 diff --git a/net/core/sock.c b/net/core/sock.c index e4f40b175acb..8eb2c924805a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3254,7 +3254,7 @@ void sk_common_release(struct sock *sk) sk->sk_prot->destroy(sk); /* - * Observation: when sock_common_release is called, processes have + * Observation: when sk_common_release is called, processes have * no access to socket. But net still has. * Step one, detach it from networking: * diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 6fd4330287c2..407956be7deb 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -610,7 +610,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } else if (!ipc.oif) { ipc.oif = inet->uc_index; } else if (ipv4_is_lbcast(daddr) && inet->uc_index) { - /* oif is set, packet is to local broadcast and + /* oif is set, packet is to local broadcast * and uc_index is set. oif is most likely set * by sk_bound_dev_if. If uc_index != oif check if the * oif is an L3 master and uc_index is an L3 slave. diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c index e71ca5aec684..864326f150e2 100644 --- a/net/l3mdev/l3mdev.c +++ b/net/l3mdev/l3mdev.c @@ -154,7 +154,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex) EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu); /** - * l3mdev_fib_table - get FIB table id associated with an L3 + * l3mdev_fib_table_rcu - get FIB table id associated with an L3 * master interface * @dev: targeted interface */ diff --git a/net/socket.c b/net/socket.c index dbbe8ea7d395..0c0144604f81 100644 --- a/net/socket.c +++ b/net/socket.c @@ -3610,7 +3610,7 @@ int kernel_getsockname(struct socket *sock, struct sockaddr *addr) EXPORT_SYMBOL(kernel_getsockname); /** - * kernel_peername - get the address which the socket is connected (kernel space) + * kernel_getpeername - get the address which the socket is connected (kernel space) * @sock: socket * @addr: address holder * @@ -3671,7 +3671,7 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, EXPORT_SYMBOL(kernel_sendpage_locked); /** - * kernel_shutdown - shut down part of a full-duplex connection (kernel space) + * kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space) * @sock: socket * @how: connection part * -- cgit v1.2.3 From 89d29997f103d08264b0685796b420d911658b96 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 24 Aug 2020 12:10:33 -0700 Subject: irqchip/eznps: Fix build error for !ARC700 builds eznps driver is supposed to be platform independent however it ends up including stuff from inside arch/arc headers leading to rand config build errors. The quick hack to fix this (proper fix is too much chrun for non active user-base) is to add following to nps platform agnostic header. - copy AUX_IENABLE from arch/arc header - move CTOP_AUX_IACK from arch/arc/plat-eznps/*/** Reported-by: kernel test robot Reported-by: Sebastian Andrzej Siewior Link: https://lkml.kernel.org/r/20200824095831.5lpkmkafelnvlpi2@linutronix.de Signed-off-by: Vineet Gupta --- arch/arc/plat-eznps/include/plat/ctop.h | 1 - include/soc/nps/common.h | 6 ++++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index a4a61531c7fb..77712c5ffe84 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h @@ -33,7 +33,6 @@ #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) -#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088) #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h index 9b1d43d671a3..8c18dc6d3fde 100644 --- a/include/soc/nps/common.h +++ b/include/soc/nps/common.h @@ -45,6 +45,12 @@ #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 +#ifndef AUX_IENABLE +#define AUX_IENABLE 0x40c +#endif + +#define CTOP_AUX_IACK (0xFFFFF800 + 0x088) + #ifndef __ASSEMBLY__ /* In order to increase compilation test coverage */ -- cgit v1.2.3 From ee921183557af39c1a0475f982d43b0fcac25e2e Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 23 Aug 2020 13:55:36 +0200 Subject: netfilter: nfnetlink: nfnetlink_unicast() reports EAGAIN instead of ENOBUFS Frontend callback reports EAGAIN to nfnetlink to retry a command, this is used to signal that module autoloading is required. Unfortunately, nlmsg_unicast() reports EAGAIN in case the receiver socket buffer gets full, so it enters a busy-loop. This patch updates nfnetlink_unicast() to turn EAGAIN into ENOBUFS and to use nlmsg_unicast(). Remove the flags field in nfnetlink_unicast() since this is always MSG_DONTWAIT in the existing code which is exactly what nlmsg_unicast() passes to netlink_unicast() as parameter. Fixes: 96518518cc41 ("netfilter: add nftables") Reported-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/nfnetlink.h | 3 +- net/netfilter/nf_tables_api.c | 61 ++++++++++++++++++------------------- net/netfilter/nfnetlink.c | 11 +++++-- net/netfilter/nfnetlink_log.c | 3 +- net/netfilter/nfnetlink_queue.c | 2 +- 5 files changed, 40 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 851425c3178f..89016d08f6a2 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags); +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) { diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 71e501c5ad21..b7dc1cbf40ea 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -815,11 +815,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, family, table); if (err < 0) - goto err; + goto err_fill_table_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_table_info: kfree_skb(skb2); return err; } @@ -1563,11 +1563,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, family, table, chain); if (err < 0) - goto err; + goto err_fill_chain_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_chain_info: kfree_skb(skb2); return err; } @@ -3008,11 +3008,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, family, table, chain, rule, NULL); if (err < 0) - goto err; + goto err_fill_rule_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_rule_info: kfree_skb(skb2); return err; } @@ -3968,11 +3968,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0); if (err < 0) - goto err; + goto err_fill_set_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_set_info: kfree_skb(skb2); return err; } @@ -4860,24 +4860,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, err = -ENOMEM; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (skb == NULL) - goto err1; + return err; err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid, NFT_MSG_NEWSETELEM, 0, set, &elem); if (err < 0) - goto err2; + goto err_fill_setelem; - err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT); - /* This avoids a loop in nfnetlink. */ - if (err < 0) - goto err1; + return nfnetlink_unicast(skb, ctx->net, ctx->portid); - return 0; -err2: +err_fill_setelem: kfree_skb(skb); -err1: - /* this avoids a loop in nfnetlink. */ - return err == -EAGAIN ? -ENOBUFS : err; + return err; } /* called with rcu_read_lock held */ @@ -6182,10 +6176,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, family, table, obj, reset); if (err < 0) - goto err; + goto err_fill_obj_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_obj_info: kfree_skb(skb2); return err; } @@ -7045,10 +7040,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, NFT_MSG_NEWFLOWTABLE, 0, family, flowtable, &flowtable->hook_list); if (err < 0) - goto err; + goto err_fill_flowtable_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_flowtable_info: kfree_skb(skb2); return err; } @@ -7234,10 +7230,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk, err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) - goto err; + goto err_fill_gen_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_gen_info: kfree_skb(skb2); return err; } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 5f24edf95830..3a2e64e13b22 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -149,10 +149,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) } EXPORT_SYMBOL_GPL(nfnetlink_set_err); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags) +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid) { - return netlink_unicast(net->nfnl, skb, portid, flags); + int err; + + err = nlmsg_unicast(net->nfnl, skb, portid); + if (err == -EAGAIN) + err = -ENOBUFS; + + return err; } EXPORT_SYMBOL_GPL(nfnetlink_unicast); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index f02992419850..b35e8d9a5b37 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -356,8 +356,7 @@ __nfulnl_send(struct nfulnl_instance *inst) goto out; } } - nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, - MSG_DONTWAIT); + nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid); out: inst->qlen = 0; inst->skb = NULL; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index dadfc06245a3..d1d8bca03b4f 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -681,7 +681,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, *packet_id_ptr = htonl(entry->id); /* nfnetlink_unicast will either free the nskb or add it to a socket */ - err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); + err = nfnetlink_unicast(nskb, net, queue->peer_portid); if (err < 0) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { failopen = 1; -- cgit v1.2.3 From ef91bb196b0db1013ef8705367bc2d7944ef696b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 28 Aug 2020 17:11:25 +1000 Subject: kernel.h: Silence sparse warning in lower_32_bits I keep getting sparse warnings in crypto such as: CHECK drivers/crypto/ccree/cc_hash.c drivers/crypto/ccree/cc_hash.c:49:9: warning: cast truncates bits from constant value (47b5481dbefa4fa4 becomes befa4fa4) drivers/crypto/ccree/cc_hash.c:49:26: warning: cast truncates bits from constant value (db0c2e0d64f98fa7 becomes 64f98fa7) [.. many more ..] This patch removes the warning by adding a mask to keep sparse happy. Signed-off-by: Herbert Xu Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 500def620d8f..c25b8e41c0ea 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -186,7 +186,7 @@ * lower_32_bits - return bits 0-31 of a number * @n: the number we're accessing */ -#define lower_32_bits(n) ((u32)(n)) +#define lower_32_bits(n) ((u32)((n) & 0xffffffff)) struct completion; struct pt_regs; -- cgit v1.2.3 From e5fc436f06eef54ef512ea55a9db8eb9f2e76959 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Fri, 28 Aug 2020 10:53:01 +0200 Subject: sparse: use static inline for __chk_{user,io}_ptr() __chk_user_ptr() & __chk_io_ptr() are dummy extern functions which only exist to enforce the typechecking of __user or __iomem pointers in macros when using sparse. This typechecking is done by inserting a call to these functions. But the presence of these calls can inhibit some simplifications and so influence the result of sparse's analysis of context/locking. Fix this by changing these calls into static inline calls with an empty body. Signed-off-by: Luc Van Oostenryck Signed-off-by: Miguel Ojeda --- include/linux/compiler_types.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 4b33cb385f96..6e390d58a9f8 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -11,8 +11,8 @@ # define __iomem __attribute__((noderef, address_space(__iomem))) # define __percpu __attribute__((noderef, address_space(__percpu))) # define __rcu __attribute__((noderef, address_space(__rcu))) -extern void __chk_user_ptr(const volatile void __user *); -extern void __chk_io_ptr(const volatile void __iomem *); +static inline void __chk_user_ptr(const volatile void __user *ptr) { } +static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } /* context/locking */ # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) -- cgit v1.2.3 From 55977744f9d862512a524fea93fc5226b09e76a9 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 28 Aug 2020 18:50:42 -0400 Subject: drm/amdkfd: Add GPU reset SMI event Add support for reporting GPU reset events through SMI. KFD would report both pre and post GPU reset events. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 ++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++ drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 34 +++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h | 1 + include/uapi/linux/kfd_ioctl.h | 2 ++ 5 files changed, 41 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index e1cd6599529f..0e71a0543f98 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -812,6 +812,8 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) if (!kfd->init_complete) return 0; + kfd_smi_event_update_gpu_reset(kfd, false); + kfd->dqm->ops.pre_reset(kfd->dqm); kgd2kfd_suspend(kfd, false); @@ -840,6 +842,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) atomic_set(&kfd->sram_ecc_flag, 0); + kfd_smi_event_update_gpu_reset(kfd, true); + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f14beb93acb4..023629f28495 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -312,6 +312,8 @@ struct kfd_dev { /* Clients watching SMI events */ struct list_head smi_clients; spinlock_t smi_lock; + + uint32_t reset_seq_num; }; enum kfd_mempool { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 4d4b6e3ab697..17d1736367ea 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -174,6 +174,36 @@ static void add_event_to_kfifo(struct kfd_dev *dev, unsigned int smi_event, rcu_read_unlock(); } +void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) +{ + /* + * GpuReset msg = Reset seq number (incremented for + * every reset message sent before GPU reset). + * 1 byte event + 1 byte space + 8 bytes seq num + + * 1 byte \n + 1 byte \0 = 12 + */ + char fifo_in[12]; + int len; + unsigned int event; + + if (list_empty(&dev->smi_clients)) + return; + + memset(fifo_in, 0x0, sizeof(fifo_in)); + + if (post_reset) { + event = KFD_SMI_EVENT_GPU_POST_RESET; + } else { + event = KFD_SMI_EVENT_GPU_PRE_RESET; + ++(dev->reset_seq_num); + } + + len = snprintf(fifo_in, sizeof(fifo_in), "%x %x\n", event, + dev->reset_seq_num); + + add_event_to_kfifo(dev, event, fifo_in, len); +} + void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, uint32_t throttle_bitmask) { @@ -191,7 +221,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, if (list_empty(&dev->smi_clients)) return; - len = snprintf(fifo_in, 29, "%x %x:%llx\n", + len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%llx\n", KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, atomic64_read(&adev->smu.throttle_int_counter)); @@ -218,7 +248,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) if (!task_info.pid) return; - len = snprintf(fifo_in, 29, "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT, + len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT, task_info.pid, task_info.task_name); add_event_to_kfifo(dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 15537b2cccb5..b9b0438202e2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -27,5 +27,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd); void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid); void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, uint32_t throttle_bitmask); +void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset); #endif diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index cb1f963a84e0..8b7368bfbd84 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -453,6 +453,8 @@ enum kfd_smi_event { KFD_SMI_EVENT_NONE = 0, /* not used */ KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */ KFD_SMI_EVENT_THERMAL_THROTTLE = 2, + KFD_SMI_EVENT_GPU_PRE_RESET = 3, + KFD_SMI_EVENT_GPU_POST_RESET = 4, }; #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) -- cgit v1.2.3 From 5dc1a0bcb758c343b873e8330ee986417f5a1727 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 28 Aug 2020 19:53:08 -0400 Subject: include/uapi/linux: Fix indentation in kfd_smi_event enum Replace spaces with Tabs to fix indentation in kfd_smi_event enum. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- include/uapi/linux/kfd_ioctl.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 8b7368bfbd84..695b606da4b1 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -450,9 +450,9 @@ struct kfd_ioctl_import_dmabuf_args { * KFD SMI(System Management Interface) events */ enum kfd_smi_event { - KFD_SMI_EVENT_NONE = 0, /* not used */ - KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */ - KFD_SMI_EVENT_THERMAL_THROTTLE = 2, + KFD_SMI_EVENT_NONE = 0, /* not used */ + KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */ + KFD_SMI_EVENT_THERMAL_THROTTLE = 2, KFD_SMI_EVENT_GPU_PRE_RESET = 3, KFD_SMI_EVENT_GPU_POST_RESET = 4, }; -- cgit v1.2.3 From 4b4659128e634ce65dc02acf297d623f5bc97497 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 26 Aug 2020 14:24:45 -0400 Subject: drm/i915/dp: Extract drm_dp_read_mst_cap() Just a tiny drive-by cleanup, we can consolidate i915's code for checking for MST support into a helper to be shared across drivers. v5: * Drop !!() * Move drm_dp_has_mst() out of header * Change name from drm_dp_has_mst() to drm_dp_read_mst_cap() Signed-off-by: Lyude Paul Reviewed-by: Sean Paul Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-10-lyude@redhat.com --- drivers/gpu/drm/drm_dp_mst_topology.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dp.c | 18 ++---------------- include/drm/drm_dp_mst_helper.h | 3 +-- 3 files changed, 25 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index b23cb2fec3f3..61755c26fac0 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3486,6 +3486,28 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) return dp_link_bw * dp_link_count / 2; } +/** + * drm_dp_read_mst_cap() - check whether or not a sink supports MST + * @aux: The DP AUX channel to use + * @dpcd: A cached copy of the DPCD capabilities for this sink + * + * Returns: %True if the sink supports MST, %false otherwise + */ +bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 mstm_cap; + + if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) + return false; + + if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1) + return false; + + return mstm_cap & DP_MST_CAP; +} +EXPORT_SYMBOL(drm_dp_read_mst_cap); + /** * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager * @mgr: manager to set state for diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 79c27f91f42c..4c7314b7a84e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4699,20 +4699,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) return true; } -static bool -intel_dp_sink_can_mst(struct intel_dp *intel_dp) -{ - u8 mstm_cap; - - if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) - return false; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) - return false; - - return mstm_cap & DP_MST_CAP; -} - static bool intel_dp_can_mst(struct intel_dp *intel_dp) { @@ -4720,7 +4706,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) return i915->params.enable_dp_mst && intel_dp->can_mst && - intel_dp_sink_can_mst(intel_dp); + drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); } static void @@ -4729,7 +4715,7 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); + bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 8b9eb4db3381..6ae5860d8644 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -728,10 +728,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); - +bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); - int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); -- cgit v1.2.3 From 3d3721ccb18a3dcec874c44120e2df7ec1c1db99 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 26 Aug 2020 14:24:49 -0400 Subject: drm/i915/dp: Extract drm_dp_read_downstream_info() We're going to be doing the same probing process in nouveau for determining downstream DP port capabilities, so let's deduplicate the work by moving i915's code for handling this into a shared helper: drm_dp_read_downstream_info(). Note that when we do this, we also do make some functional changes while we're at it: * We always clear the downstream port info before trying to read it, just to make things easier for the caller * We skip reading downstream port info if the DPCD indicates that we don't support downstream port info * We only read as many bytes as needed for the reported number of downstream ports, no sense in reading the whole thing every time v2: * Fixup logic for calculating the downstream port length to account for the fact that downstream port caps can be either 1 byte or 4 bytes long. We can actually skip fixing the max_clock/max_bpc helpers here since they all check for DP_DETAILED_CAP_INFO_AVAILABLE anyway. * Fix ret code check for drm_dp_dpcd_read v5: * Change name from drm_dp_downstream_read_info() to drm_dp_read_downstream_info() * Also, add "See Also" sections for the various downstream info functions (drm_dp_read_downstream_info(), drm_dp_downstream_max_clock(), drm_dp_downstream_max_bpc()) Reviewed-by: Sean Paul Signed-off-by: Lyude Paul Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-14-lyude@redhat.com --- drivers/gpu/drm/drm_dp_helper.c | 62 +++++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/display/intel_dp.c | 14 ++------ include/drm/drm_dp_helper.h | 3 ++ 3 files changed, 65 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 4c21cf69dad5..f3643894ad95 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -423,6 +423,56 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, } EXPORT_SYMBOL(drm_dp_send_real_edid_checksum); +static u8 drm_dp_downstream_port_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 port_count = dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_PORT_COUNT_MASK; + + if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE && port_count > 4) + port_count = 4; + + return port_count; +} + +/** + * drm_dp_read_downstream_info() - read DPCD downstream port info if available + * @aux: DisplayPort AUX channel + * @dpcd: A cached copy of the port's DPCD + * @downstream_ports: buffer to store the downstream port info in + * + * See also: + * drm_dp_downstream_max_clock() + * drm_dp_downstream_max_bpc() + * + * Returns: 0 if either the downstream port info was read successfully or + * there was no downstream info to read, or a negative error code otherwise. + */ +int drm_dp_read_downstream_info(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]) +{ + int ret; + u8 len; + + memset(downstream_ports, 0, DP_MAX_DOWNSTREAM_PORTS); + + /* No downstream info to read */ + if (!drm_dp_is_branch(dpcd) || + dpcd[DP_DPCD_REV] < DP_DPCD_REV_10 || + !(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) + return 0; + + len = drm_dp_downstream_port_count(dpcd); + if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) + len *= 4; + + ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len); + if (ret < 0) + return ret; + + return ret == len ? 0 : -EIO; +} +EXPORT_SYMBOL(drm_dp_read_downstream_info); + /** * drm_dp_downstream_max_clock() - extract branch device max * pixel rate for legacy VGA @@ -431,7 +481,11 @@ EXPORT_SYMBOL(drm_dp_send_real_edid_checksum); * @dpcd: DisplayPort configuration data * @port_cap: port capabilities * - * Returns max clock in kHz on success or 0 if max clock not defined + * See also: + * drm_dp_read_downstream_info() + * drm_dp_downstream_max_bpc() + * + * Returns: Max clock in kHz on success or 0 if max clock not defined */ int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]) @@ -462,7 +516,11 @@ EXPORT_SYMBOL(drm_dp_downstream_max_clock); * @dpcd: DisplayPort configuration data * @port_cap: port capabilities * - * Returns max bpc on success or 0 if max bpc not defined + * See also: + * drm_dp_read_downstream_info() + * drm_dp_downstream_max_clock() + * + * Returns: Max bpc on success or 0 if max bpc not defined */ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 4c7314b7a84e..9c4b806af8c7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4685,18 +4685,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) return false; } - if (!drm_dp_is_branch(intel_dp->dpcd)) - return true; /* native DP sink */ - - if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) - return true; /* no per-port downstream info */ - - if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, - intel_dp->downstream_ports, - DP_MAX_DOWNSTREAM_PORTS) < 0) - return false; /* downstream port status fetch failed */ - - return true; + return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, + intel_dp->downstream_ports) == 0; } static bool diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 5c2819924862..b8716b200666 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1613,6 +1613,9 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, u8 real_edid_checksum); +int drm_dp_read_downstream_info(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]); int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], -- cgit v1.2.3 From 693c3ec5976eb4b66cbd4f3f1c701a6f0ae1c9b9 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 26 Aug 2020 14:24:51 -0400 Subject: drm/i915/dp: Extract drm_dp_read_sink_count_cap() Since other drivers are also going to need to be aware of the sink count in order to do proper dongle detection, we might as well steal i915's DP_SINK_COUNT helpers and move them into DRM helpers so that other dirvers can use them as well. Note that this also starts using intel_dp_has_sink_count() in intel_dp_detect_dpcd(), which is a functional change. v5: * Change name from drm_dp_has_sink_count() to drm_dp_read_sink_count_cap() Signed-off-by: Lyude Paul Reviewed-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-16-lyude@redhat.com --- drivers/gpu/drm/drm_dp_helper.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dp.c | 21 ++++++++++++--------- include/drm/drm_dp_helper.h | 8 +++++++- 3 files changed, 41 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index f3643894ad95..65ff21ae0c27 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -726,6 +726,28 @@ void drm_dp_set_subconnector_property(struct drm_connector *connector, } EXPORT_SYMBOL(drm_dp_set_subconnector_property); +/** + * drm_dp_read_sink_count_cap() - Check whether a given connector has a valid sink + * count + * @connector: The DRM connector to check + * @dpcd: A cached copy of the connector's DPCD RX capabilities + * @desc: A cached copy of the connector's DP descriptor + * + * Returns: %True if the (e)DP connector has a valid sink count that should + * be probed, %false otherwise. + */ +bool drm_dp_read_sink_count_cap(struct drm_connector *connector, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const struct drm_dp_desc *desc) +{ + /* Some eDP panels don't set a valid value for the sink count */ + return connector->connector_type != DRM_MODE_CONNECTOR_eDP && + dpcd[DP_DPCD_REV] >= DP_DPCD_REV_11 && + dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT && + !drm_dp_has_quirk(desc, 0, DP_DPCD_QUIRK_NO_SINK_COUNT); +} +EXPORT_SYMBOL(drm_dp_read_sink_count_cap); + /* * I2C-over-AUX implementation */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 9c4b806af8c7..38318ae935f8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4634,6 +4634,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) return true; } +static bool +intel_dp_has_sink_count(struct intel_dp *intel_dp) +{ + if (!intel_dp->attached_connector) + return false; + + return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, + intel_dp->dpcd, + &intel_dp->desc); +} static bool intel_dp_get_dpcd(struct intel_dp *intel_dp) @@ -4653,13 +4663,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) intel_dp_set_common_rates(intel_dp); } - /* - * Some eDP panels do not set a valid value for sink count, that is why - * it don't care about read it here and in intel_edp_init_dpcd(). - */ - if (!intel_dp_is_edp(intel_dp) && - !drm_dp_has_quirk(&intel_dp->desc, 0, - DP_DPCD_QUIRK_NO_SINK_COUNT)) { + if (intel_dp_has_sink_count(intel_dp)) { u8 count; ssize_t r; @@ -5939,9 +5943,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) return connector_status_connected; /* If we're HPD-aware, SINK_COUNT changes dynamically */ - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && + if (intel_dp_has_sink_count(intel_dp) && intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { - return intel_dp->sink_count ? connector_status_connected : connector_status_disconnected; } diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index b8716b200666..4c56ce4dc54f 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1631,6 +1631,11 @@ void drm_dp_set_subconnector_property(struct drm_connector *connector, const u8 *dpcd, const u8 port_cap[4]); +struct drm_dp_desc; +bool drm_dp_read_sink_count_cap(struct drm_connector *connector, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const struct drm_dp_desc *desc); + void drm_dp_remote_aux_init(struct drm_dp_aux *aux); void drm_dp_aux_init(struct drm_dp_aux *aux); int drm_dp_aux_register(struct drm_dp_aux *aux); @@ -1689,7 +1694,8 @@ enum drm_dp_quirk { * @DP_DPCD_QUIRK_NO_SINK_COUNT: * * The device does not set SINK_COUNT to a non-zero value. - * The driver should ignore SINK_COUNT during detection. + * The driver should ignore SINK_COUNT during detection. Note that + * drm_dp_read_sink_count_cap() automatically checks for this quirk. */ DP_DPCD_QUIRK_NO_SINK_COUNT, /** -- cgit v1.2.3 From 4778ff052812029bc076f5e78eff3ba1851cbea7 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 26 Aug 2020 14:24:52 -0400 Subject: drm/i915/dp: Extract drm_dp_read_sink_count() And of course, we'll also need to read the sink count from other drivers as well if we're checking whether or not it's supported. So, let's extract the code for this into another helper. v2: * Fix drm_dp_dpcd_readb() ret check * Add back comment and move back sink_count assignment in intel_dp_get_dpcd() v5: * Change name from drm_dp_get_sink_count() to drm_dp_read_sink_count() * Also, add "See also:" section to kdocs Signed-off-by: Lyude Paul Reviewed-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-17-lyude@redhat.com --- drivers/gpu/drm/drm_dp_helper.c | 26 ++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dp.c | 11 +++++------ include/drm/drm_dp_helper.h | 1 + 3 files changed, 32 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 65ff21ae0c27..46d88ef4f59a 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -733,6 +733,8 @@ EXPORT_SYMBOL(drm_dp_set_subconnector_property); * @dpcd: A cached copy of the connector's DPCD RX capabilities * @desc: A cached copy of the connector's DP descriptor * + * See also: drm_dp_read_sink_count() + * * Returns: %True if the (e)DP connector has a valid sink count that should * be probed, %false otherwise. */ @@ -748,6 +750,30 @@ bool drm_dp_read_sink_count_cap(struct drm_connector *connector, } EXPORT_SYMBOL(drm_dp_read_sink_count_cap); +/** + * drm_dp_read_sink_count() - Retrieve the sink count for a given sink + * @aux: The DP AUX channel to use + * + * See also: drm_dp_read_sink_count_cap() + * + * Returns: The current sink count reported by @aux, or a negative error code + * otherwise. + */ +int drm_dp_read_sink_count(struct drm_dp_aux *aux) +{ + u8 count; + int ret; + + ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count); + if (ret < 0) + return ret; + if (ret != 1) + return -EIO; + + return DP_GET_SINK_COUNT(count); +} +EXPORT_SYMBOL(drm_dp_read_sink_count); + /* * I2C-over-AUX implementation */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 38318ae935f8..0de94fc6289b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4648,6 +4648,8 @@ intel_dp_has_sink_count(struct intel_dp *intel_dp) static bool intel_dp_get_dpcd(struct intel_dp *intel_dp) { + int ret; + if (!intel_dp_read_dpcd(intel_dp)) return false; @@ -4664,11 +4666,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) } if (intel_dp_has_sink_count(intel_dp)) { - u8 count; - ssize_t r; - - r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); - if (r < 1) + ret = drm_dp_read_sink_count(&intel_dp->aux); + if (ret < 0) return false; /* @@ -4676,7 +4675,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) * a member variable in intel_dp will track any changes * between short pulse interrupts. */ - intel_dp->sink_count = DP_GET_SINK_COUNT(count); + intel_dp->sink_count = ret; /* * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 4c56ce4dc54f..bc5cb8c503fb 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1635,6 +1635,7 @@ struct drm_dp_desc; bool drm_dp_read_sink_count_cap(struct drm_connector *connector, const u8 dpcd[DP_RECEIVER_CAP_SIZE], const struct drm_dp_desc *desc); +int drm_dp_read_sink_count(struct drm_dp_aux *aux); void drm_dp_remote_aux_init(struct drm_dp_aux *aux); void drm_dp_aux_init(struct drm_dp_aux *aux); -- cgit v1.2.3 From b9936121d95b0127d34fa6c25678994582d1b17c Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 26 Aug 2020 14:24:55 -0400 Subject: drm/i915/dp: Extract drm_dp_read_dpcd_caps() Since DP 1.3, it's been possible for DP receivers to specify an additional set of DPCD capabilities, which can take precedence over the capabilities reported at DP_DPCD_REV. Basically any device supporting DP is going to need to read these in an identical manner, in particular nouveau, so let's go ahead and just move this code out of i915 into a shared DRM DP helper that we can use in other drivers. v2: * Remove redundant dpcd[DP_DPCD_REV] == 0 check * Fix drm_dp_dpcd_read() ret checks Signed-off-by: Lyude Paul Reviewed-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-20-lyude@redhat.com --- drivers/gpu/drm/drm_dp_helper.c | 77 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dp.c | 60 +--------------------- drivers/gpu/drm/i915/display/intel_dp.h | 1 - drivers/gpu/drm/i915/display/intel_lspcon.c | 2 +- include/drm/drm_dp_helper.h | 3 ++ 5 files changed, 83 insertions(+), 60 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 46d88ef4f59a..9ca88e6c7882 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -433,6 +433,83 @@ static u8 drm_dp_downstream_port_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) return port_count; } +static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 dpcd_ext[6]; + int ret; + + /* + * Prior to DP1.3 the bit represented by + * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. + * If it is set DP_DPCD_REV at 0000h could be at a value less than + * the true capability of the panel. The only way to check is to + * then compare 0000h and 2200h. + */ + if (!(dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) + return 0; + + ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext, + sizeof(dpcd_ext)); + if (ret < 0) + return ret; + if (ret != sizeof(dpcd_ext)) + return -EIO; + + if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { + DRM_DEBUG_KMS("%s: Extended DPCD rev less than base DPCD rev (%d > %d)\n", + aux->name, dpcd[DP_DPCD_REV], + dpcd_ext[DP_DPCD_REV]); + return 0; + } + + if (!memcmp(dpcd, dpcd_ext, sizeof(dpcd_ext))) + return 0; + + DRM_DEBUG_KMS("%s: Base DPCD: %*ph\n", + aux->name, DP_RECEIVER_CAP_SIZE, dpcd); + + memcpy(dpcd, dpcd_ext, sizeof(dpcd_ext)); + + return 0; +} + +/** + * drm_dp_read_dpcd_caps() - read DPCD caps and extended DPCD caps if + * available + * @aux: DisplayPort AUX channel + * @dpcd: Buffer to store the resulting DPCD in + * + * Attempts to read the base DPCD caps for @aux. Additionally, this function + * checks for and reads the extended DPRX caps (%DP_DP13_DPCD_REV) if + * present. + * + * Returns: %0 if the DPCD was read successfully, negative error code + * otherwise. + */ +int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + int ret; + + ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE); + if (ret < 0) + return ret; + if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0) + return -EIO; + + ret = drm_dp_read_extended_dpcd_caps(aux, dpcd); + if (ret < 0) + return ret; + + DRM_DEBUG_KMS("%s: DPCD: %*ph\n", + aux->name, DP_RECEIVER_CAP_SIZE, dpcd); + + return ret; +} +EXPORT_SYMBOL(drm_dp_read_dpcd_caps); + /** * drm_dp_read_downstream_info() - read DPCD downstream port info if available * @aux: DisplayPort AUX channel diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 0de94fc6289b..284b15f84592 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4449,62 +4449,6 @@ intel_dp_link_down(struct intel_encoder *encoder, } } -static void -intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 dpcd_ext[6]; - - /* - * Prior to DP1.3 the bit represented by - * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. - * if it is set DP_DPCD_REV at 0000h could be at a value less than - * the true capability of the panel. The only way to check is to - * then compare 0000h and 2200h. - */ - if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) - return; - - if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, - &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { - drm_err(&i915->drm, - "DPCD failed read at extended capabilities\n"); - return; - } - - if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { - drm_dbg_kms(&i915->drm, - "DPCD extended DPCD rev less than base DPCD rev\n"); - return; - } - - if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) - return; - - drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n", - (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); - - memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); -} - -bool -intel_dp_read_dpcd(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - - if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, - sizeof(intel_dp->dpcd)) < 0) - return false; /* aux transfer failed */ - - intel_dp_extended_receiver_capabilities(intel_dp); - - drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd), - intel_dp->dpcd); - - return intel_dp->dpcd[DP_DPCD_REV] != 0; -} - bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) { u8 dprx = 0; @@ -4563,7 +4507,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) /* this function is meant to be called only once */ drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); - if (!intel_dp_read_dpcd(intel_dp)) + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) return false; drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, @@ -4650,7 +4594,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) { int ret; - if (!intel_dp_read_dpcd(intel_dp)) + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) return false; /* diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index b901ab850cbd..0a3af3410d52 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -99,7 +99,6 @@ bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); bool intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status); -bool intel_dp_read_dpcd(struct intel_dp *intel_dp); bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_max_data_rate(int max_link_clock, int max_lanes); diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index b781bf469644..dc1b35559afd 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -571,7 +571,7 @@ bool lspcon_init(struct intel_digital_port *dig_port) return false; } - if (!intel_dp_read_dpcd(dp)) { + if (drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd) != 0) { DRM_ERROR("LSPCON DPCD read failed\n"); return false; } diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index bc5cb8c503fb..85513eeb2196 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1607,6 +1607,9 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, return drm_dp_dpcd_write(aux, offset, &value, 1); } +int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]); + int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, u8 status[DP_LINK_STATUS_SIZE]); -- cgit v1.2.3 From 35556bed836f8dc07ac55f69c8d17dce3e7f0e25 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 1 Sep 2020 10:52:33 +0100 Subject: HID: core: Sanitize event code and type when mapping input When calling into hid_map_usage(), the passed event code is blindly stored as is, even if it doesn't fit in the associated bitmap. This event code can come from a variety of sources, including devices masquerading as input devices, only a bit more "programmable". Instead of taking the event code at face value, check that it actually fits the corresponding bitmap, and if it doesn't: - spit out a warning so that we know which device is acting up - NULLify the bitmap pointer so that we catch unexpected uses Code paths that can make use of untrusted inputs can now check that the mapping was indeed correct and bail out if not. Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Benjamin Tissoires --- drivers/hid/hid-input.c | 4 ++++ drivers/hid/hid-multitouch.c | 2 ++ include/linux/hid.h | 42 +++++++++++++++++++++++++++++------------- 3 files changed, 35 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index b8eabf206e74..88e19996427e 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1132,6 +1132,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: + /* Mapping failed, bail out */ + if (!bit) + return; + if (device->driver->input_mapped && device->driver->input_mapped(device, hidinput, field, usage, &bit, &max) < 0) { diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 3f94b4954225..e3152155c4b8 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -856,6 +856,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, code = BTN_0 + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); + if (!*bit) + return -1; input_set_capability(hi->input, EV_KEY, code); return 1; diff --git a/include/linux/hid.h b/include/linux/hid.h index 875f71132b14..c7044a14200e 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -959,34 +959,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) { * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type + * + * The value pointed to by @bit will be set to NULL if either @type is + * an unhandled event type, or if @c is out of range for @type. This + * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, - __u8 type, __u16 c) + __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; - - usage->type = type; - usage->code = c; + unsigned long *bmap = NULL; + unsigned int limit = 0; switch (type) { case EV_ABS: - *bit = input->absbit; - *max = ABS_MAX; + bmap = input->absbit; + limit = ABS_MAX; break; case EV_REL: - *bit = input->relbit; - *max = REL_MAX; + bmap = input->relbit; + limit = REL_MAX; break; case EV_KEY: - *bit = input->keybit; - *max = KEY_MAX; + bmap = input->keybit; + limit = KEY_MAX; break; case EV_LED: - *bit = input->ledbit; - *max = LED_MAX; + bmap = input->ledbit; + limit = LED_MAX; break; } + + if (unlikely(c > limit || !bmap)) { + pr_warn_ratelimited("%s: Invalid code %d type %d\n", + input->name, c, type); + *bit = NULL; + return; + } + + usage->type = type; + usage->code = c; + *max = limit; + *bit = bmap; } /** @@ -1000,7 +1015,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); - clear_bit(c, *bit); + if (*bit) + clear_bit(usage->code, *bit); } /** -- cgit v1.2.3 From 9ab57658a608f879469ffa22b723c4539c05a58f Mon Sep 17 00:00:00 2001 From: Sean Paul Date: Tue, 18 Aug 2020 11:38:49 -0400 Subject: drm/i915: Fix sha_text population code This patch fixes a few bugs: 1- We weren't taking into account sha_leftovers when adding multiple ksvs to sha_text. As such, we were or'ing the end of ksv[j - 1] with the beginning of ksv[j] 2- In the sha_leftovers == 2 and sha_leftovers == 3 case, bstatus was being placed on the wrong half of sha_text, overlapping the leftover ksv value 3- In the sha_leftovers == 2 case, we need to manually terminate the byte stream with 0x80 since the hardware doesn't have enough room to add it after writing M0 The upside is that all of the HDCP supported HDMI repeaters I could find on Amazon just strip HDCP anyways, so it turns out to be _really_ hard to hit any of these cases without an MST hub, which is not (yet) supported. Oh, and the sha_leftovers == 1 case works perfectly! Fixes: ee5e5e7a5e0f ("drm/i915: Add HDCP framework + base implementation") Cc: Chris Wilson Cc: Ramalingam C Cc: Daniel Vetter Cc: Sean Paul Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: intel-gfx@lists.freedesktop.org Cc: # v4.17+ Reviewed-by: Ramalingam C Signed-off-by: Sean Paul Signed-off-by: Ramalingam C Link: https://patchwork.freedesktop.org/patch/msgid/20200818153910.27894-2-sean@poorly.run (cherry picked from commit 1f0882214fd0037b74f245d9be75c31516fed040) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_hdcp.c | 26 ++++++++++++++++++++------ include/drm/drm_hdcp.h | 3 +++ 2 files changed, 23 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 89a4d294822d..6189b7583277 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -336,8 +336,10 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, /* Fill up the empty slots in sha_text and write it out */ sha_empty = sizeof(sha_text) - sha_leftovers; - for (j = 0; j < sha_empty; j++) - sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8); + for (j = 0; j < sha_empty; j++) { + u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8); + sha_text |= ksv[j] << off; + } ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) @@ -435,7 +437,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, /* Write 32 bits of text */ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); - sha_text |= bstatus[0] << 24 | bstatus[1] << 16; + sha_text |= bstatus[0] << 8 | bstatus[1]; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) return ret; @@ -450,17 +452,29 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, return ret; sha_idx += sizeof(sha_text); } + + /* + * Terminate the SHA-1 stream by hand. For the other leftover + * cases this is appended by the hardware. + */ + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_32); + sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); } else if (sha_leftovers == 3) { - /* Write 32 bits of text */ + /* Write 32 bits of text (filled from LSB) */ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); - sha_text |= bstatus[0] << 24; + sha_text |= bstatus[0]; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); - /* Write 8 bits of text, 24 bits of M0 */ + /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); ret = intel_write_sha_text(dev_priv, bstatus[1]); diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index c6bab4986a65..fe58dbb46962 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h @@ -29,6 +29,9 @@ /* Slave address for the HDCP registers in the receiver */ #define DRM_HDCP_DDC_ADDR 0x3A +/* Value to use at the end of the SHA-1 bytestream used for repeaters */ +#define DRM_HDCP_SHA1_TERMINATOR 0x80 + /* HDCP register offsets for HDMI/DVI devices */ #define DRM_HDCP_DDC_BKSV 0x00 #define DRM_HDCP_DDC_RI_PRIME 0x08 -- cgit v1.2.3 From 3b5455636fe26ea21b4189d135a424a6da016418 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 2 Sep 2020 12:32:45 -0400 Subject: libata: implement ATA_HORKAGE_MAX_TRIM_128M and apply to Sandisks All three generations of Sandisk SSDs lock up hard intermittently. Experiments showed that disabling NCQ lowered the failure rate significantly and the kernel has been disabling NCQ for some models of SD7's and 8's, which is obviously undesirable. Karthik worked with Sandisk to root cause the hard lockups to trim commands larger than 128M. This patch implements ATA_HORKAGE_MAX_TRIM_128M which limits max trim size to 128M and applies it to all three generations of Sandisk SSDs. Signed-off-by: Tejun Heo Cc: Karthik Shivaram Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe --- drivers/ata/libata-core.c | 5 ++--- drivers/ata/libata-scsi.c | 8 +++++++- include/linux/libata.h | 1 + 3 files changed, 10 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b1cd4d97bc2a..1be73d29119a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3868,9 +3868,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, - /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on - SD7SN6S256G and SD8SN8U256G */ - { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* Sandisk SD7/8/9s lock up hard on large trims */ + { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index ec233208585b..c7b5049b42d1 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2080,6 +2080,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) { + struct ata_device *dev = args->dev; u16 min_io_sectors; rbuf[1] = 0xb0; @@ -2105,7 +2106,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * with the unmap bit set. */ if (ata_id_has_trim(args->id)) { - put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); + u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; + + if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) + max_blocks = 128 << (20 - SECTOR_SHIFT); + + put_unaligned_be64(max_blocks, &rbuf[36]); put_unaligned_be32(1, &rbuf[28]); } diff --git a/include/linux/libata.h b/include/linux/libata.h index 77ccf040a128..5f550eb27f81 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -421,6 +421,7 @@ enum { ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ -- cgit v1.2.3 From 7e24969022cbd61ddc586f14824fc205661bb124 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 17 Aug 2020 18:00:55 +0800 Subject: block: allow for_each_bvec to support zero len bvec Block layer usually doesn't support or allow zero-length bvec. Since commit 1bdc76aea115 ("iov_iter: use bvec iterator to implement iterate_bvec()"), iterate_bvec() switches to bvec iterator. However, Al mentioned that 'Zero-length segments are not disallowed' in iov_iter. Fixes for_each_bvec() so that it can move on after seeing one zero length bvec. Fixes: 1bdc76aea115 ("iov_iter: use bvec iterator to implement iterate_bvec()") Reported-by: syzbot Signed-off-by: Ming Lei Tested-by: Tetsuo Handa Cc: Al Viro Cc: Matthew Wilcox Cc: Link: https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2262077.html Signed-off-by: Jens Axboe --- include/linux/bvec.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/bvec.h b/include/linux/bvec.h index ac0c7299d5b8..dd74503f7e5e 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -117,11 +117,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, return true; } +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) +{ + iter->bi_bvec_done = 0; + iter->bi_idx++; +} + #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) /* for iterating one bio from start to end */ #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ -- cgit v1.2.3 From aecb2016c90a1b620e21c9e143afbdc9666cce52 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Tue, 1 Sep 2020 10:33:24 +0200 Subject: xen/balloon: add header guard MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to protect against the header being included multiple times on the same compilation unit. Signed-off-by: Roger Pau Monné Reviewed-by: Boris Ostrovsky Link: https://lore.kernel.org/r/20200901083326.21264-2-roger.pau@citrix.com Signed-off-by: Juergen Gross --- include/xen/balloon.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/xen/balloon.h b/include/xen/balloon.h index 6fb95aa19405..6dbdb0b3fd03 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -2,6 +2,8 @@ /****************************************************************************** * Xen balloon functionality */ +#ifndef _XEN_BALLOON_H +#define _XEN_BALLOON_H #define RETRY_UNLIMITED 0 @@ -34,3 +36,5 @@ static inline void xen_balloon_init(void) { } #endif + +#endif /* _XEN_BALLOON_H */ -- cgit v1.2.3 From 4533d3aed857c558d6aabd00d0cb04100c5a2258 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Tue, 1 Sep 2020 10:33:25 +0200 Subject: memremap: rename MEMORY_DEVICE_DEVDAX to MEMORY_DEVICE_GENERIC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is in preparation for the logic behind MEMORY_DEVICE_DEVDAX also being used by non DAX devices. No functional change intended. Signed-off-by: Roger Pau Monné Reviewed-by: Ira Weiny Acked-by: Andrew Morton Reviewed-by: Pankaj Gupta Link: https://lore.kernel.org/r/20200901083326.21264-3-roger.pau@citrix.com Signed-off-by: Juergen Gross --- drivers/dax/device.c | 2 +- include/linux/memremap.h | 9 ++++----- mm/memremap.c | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 4c0af2eb7e19..1e89513f3c59 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -429,7 +429,7 @@ int dev_dax_probe(struct device *dev) return -EBUSY; } - dev_dax->pgmap.type = MEMORY_DEVICE_DEVDAX; + dev_dax->pgmap.type = MEMORY_DEVICE_GENERIC; addr = devm_memremap_pages(dev, &dev_dax->pgmap); if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 5f5b2df06e61..e5862746751b 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -46,11 +46,10 @@ struct vmem_altmap { * wakeup is used to coordinate physical address space management (ex: * fs truncate/hole punch) vs pinned pages (ex: device dma). * - * MEMORY_DEVICE_DEVDAX: + * MEMORY_DEVICE_GENERIC: * Host memory that has similar access semantics as System RAM i.e. DMA - * coherent and supports page pinning. In contrast to - * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax - * character device. + * coherent and supports page pinning. This is for example used by DAX devices + * that expose memory using a character device. * * MEMORY_DEVICE_PCI_P2PDMA: * Device memory residing in a PCI BAR intended for use with Peer-to-Peer @@ -60,7 +59,7 @@ enum memory_type { /* 0 is reserved to catch uninitialized type fields */ MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_FS_DAX, - MEMORY_DEVICE_DEVDAX, + MEMORY_DEVICE_GENERIC, MEMORY_DEVICE_PCI_P2PDMA, }; diff --git a/mm/memremap.c b/mm/memremap.c index 03e38b7a38f1..006dace60b1a 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -216,7 +216,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) return ERR_PTR(-EINVAL); } break; - case MEMORY_DEVICE_DEVDAX: + case MEMORY_DEVICE_GENERIC: need_devmap_managed = false; break; case MEMORY_DEVICE_PCI_P2PDMA: -- cgit v1.2.3 From 9e2369c06c8a181478039258a4598c1ddd2cadfa Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Tue, 1 Sep 2020 10:33:26 +0200 Subject: xen: add helpers to allocate unpopulated memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To be used in order to create foreign mappings. This is based on the ZONE_DEVICE facility which is used by persistent memory devices in order to create struct pages and kernel virtual mappings for the IOMEM areas of such devices. Note that on kernels without support for ZONE_DEVICE Xen will fallback to use ballooned pages in order to create foreign mappings. The newly added helpers use the same parameters as the existing {alloc/free}_xenballooned_pages functions, which allows for in-place replacement of the callers. Once a memory region has been added to be used as scratch mapping space it will no longer be released, and pages returned are kept in a linked list. This allows to have a buffer of pages and prevents resorting to frequent additions and removals of regions. If enabled (because ZONE_DEVICE is supported) the usage of the new functionality untangles Xen balloon and RAM hotplug from the usage of unpopulated physical memory ranges to map foreign pages, which is the correct thing to do in order to avoid mappings of foreign pages depend on memory hotplug. Note the driver is currently not enabled on Arm platforms because it would interfere with the identity mapping required on some platforms. Signed-off-by: Roger Pau Monné Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/20200901083326.21264-4-roger.pau@citrix.com Signed-off-by: Juergen Gross --- drivers/gpu/drm/xen/xen_drm_front_gem.c | 9 +- drivers/xen/Kconfig | 10 ++ drivers/xen/Makefile | 1 + drivers/xen/balloon.c | 4 +- drivers/xen/grant-table.c | 4 +- drivers/xen/privcmd.c | 4 +- drivers/xen/unpopulated-alloc.c | 183 ++++++++++++++++++++++++++++++++ drivers/xen/xenbus/xenbus_client.c | 6 +- drivers/xen/xlate_mmu.c | 4 +- include/xen/xen.h | 9 ++ 10 files changed, 219 insertions(+), 15 deletions(-) create mode 100644 drivers/xen/unpopulated-alloc.c (limited to 'include') diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 39ff95b75357..534daf37c97e 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -18,6 +18,7 @@ #include #include +#include #include "xen_drm_front.h" #include "xen_drm_front_gem.h" @@ -99,8 +100,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) * allocate ballooned pages which will be used to map * grant references provided by the backend */ - ret = alloc_xenballooned_pages(xen_obj->num_pages, - xen_obj->pages); + ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, + xen_obj->pages); if (ret < 0) { DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", xen_obj->num_pages, ret); @@ -152,8 +153,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) } else { if (xen_obj->pages) { if (xen_obj->be_alloc) { - free_xenballooned_pages(xen_obj->num_pages, - xen_obj->pages); + xen_free_unpopulated_pages(xen_obj->num_pages, + xen_obj->pages); gem_free_pages_array(xen_obj); } else { drm_gem_put_pages(&xen_obj->base, diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 46e7fd099904..0ab54df82520 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -324,4 +324,14 @@ config XEN_HAVE_VPMU config XEN_FRONT_PGDIR_SHBUF tristate +config XEN_UNPOPULATED_ALLOC + bool "Use unpopulated memory ranges for guest mappings" + depends on X86 && ZONE_DEVICE + default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0 + help + Use unpopulated memory ranges in order to create mappings for guest + memory regions, including grant maps and foreign pages. This avoids + having to balloon out RAM regions in order to obtain physical memory + space to create such mappings. + endmenu diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 0d322f3d90cd..3cca2be28824 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -42,3 +42,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o xen-gntalloc-y := gntalloc.o xen-privcmd-y := privcmd.o privcmd-buf.o obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o +obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index b1d8b028bf80..4bfbe71705e4 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -654,7 +654,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) } EXPORT_SYMBOL(free_xenballooned_pages); -#ifdef CONFIG_XEN_PV +#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC) static void __init balloon_add_region(unsigned long start_pfn, unsigned long pages) { @@ -708,7 +708,7 @@ static int __init balloon_init(void) register_sysctl_table(xen_root); #endif -#ifdef CONFIG_XEN_PV +#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC) { int i; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 8d06bf1cc347..523dcdf39cc9 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -801,7 +801,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) { int ret; - ret = alloc_xenballooned_pages(nr_pages, pages); + ret = xen_alloc_unpopulated_pages(nr_pages, pages); if (ret < 0) return ret; @@ -836,7 +836,7 @@ EXPORT_SYMBOL_GPL(gnttab_pages_clear_private); void gnttab_free_pages(int nr_pages, struct page **pages) { gnttab_pages_clear_private(nr_pages, pages); - free_xenballooned_pages(nr_pages, pages); + xen_free_unpopulated_pages(nr_pages, pages); } EXPORT_SYMBOL_GPL(gnttab_free_pages); diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 095d683ad574..8bcb0ce223a5 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -425,7 +425,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) if (pages == NULL) return -ENOMEM; - rc = alloc_xenballooned_pages(numpgs, pages); + rc = xen_alloc_unpopulated_pages(numpgs, pages); if (rc != 0) { pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, numpgs, rc); @@ -896,7 +896,7 @@ static void privcmd_close(struct vm_area_struct *vma) rc = xen_unmap_domain_gfn_range(vma, numgfns, pages); if (rc == 0) - free_xenballooned_pages(numpgs, pages); + xen_free_unpopulated_pages(numpgs, pages); else pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", numpgs, rc); diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c new file mode 100644 index 000000000000..3b98dc921426 --- /dev/null +++ b/drivers/xen/unpopulated-alloc.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +static DEFINE_MUTEX(list_lock); +static LIST_HEAD(page_list); +static unsigned int list_count; + +static int fill_list(unsigned int nr_pages) +{ + struct dev_pagemap *pgmap; + void *vaddr; + unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); + int ret; + + pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); + if (!pgmap) + return -ENOMEM; + + pgmap->type = MEMORY_DEVICE_GENERIC; + pgmap->res.name = "Xen scratch"; + pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + + ret = allocate_resource(&iomem_resource, &pgmap->res, + alloc_pages * PAGE_SIZE, 0, -1, + PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); + if (ret < 0) { + pr_err("Cannot allocate new IOMEM resource\n"); + kfree(pgmap); + return ret; + } + +#ifdef CONFIG_XEN_HAVE_PVMMU + /* + * memremap will build page tables for the new memory so + * the p2m must contain invalid entries so the correct + * non-present PTEs will be written. + * + * If a failure occurs, the original (identity) p2m entries + * are not restored since this region is now known not to + * conflict with any devices. + */ + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + xen_pfn_t pfn = PFN_DOWN(pgmap->res.start); + + for (i = 0; i < alloc_pages; i++) { + if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { + pr_warn("set_phys_to_machine() failed, no memory added\n"); + release_resource(&pgmap->res); + kfree(pgmap); + return -ENOMEM; + } + } + } +#endif + + vaddr = memremap_pages(pgmap, NUMA_NO_NODE); + if (IS_ERR(vaddr)) { + pr_err("Cannot remap memory range\n"); + release_resource(&pgmap->res); + kfree(pgmap); + return PTR_ERR(vaddr); + } + + for (i = 0; i < alloc_pages; i++) { + struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); + + BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i)); + list_add(&pg->lru, &page_list); + list_count++; + } + + return 0; +} + +/** + * xen_alloc_unpopulated_pages - alloc unpopulated pages + * @nr_pages: Number of pages + * @pages: pages returned + * @return 0 on success, error otherwise + */ +int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) +{ + unsigned int i; + int ret = 0; + + mutex_lock(&list_lock); + if (list_count < nr_pages) { + ret = fill_list(nr_pages - list_count); + if (ret) + goto out; + } + + for (i = 0; i < nr_pages; i++) { + struct page *pg = list_first_entry_or_null(&page_list, + struct page, + lru); + + BUG_ON(!pg); + list_del(&pg->lru); + list_count--; + pages[i] = pg; + +#ifdef CONFIG_XEN_HAVE_PVMMU + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + ret = xen_alloc_p2m_entry(page_to_pfn(pg)); + if (ret < 0) { + unsigned int j; + + for (j = 0; j <= i; j++) { + list_add(&pages[j]->lru, &page_list); + list_count++; + } + goto out; + } + } +#endif + } + +out: + mutex_unlock(&list_lock); + return ret; +} +EXPORT_SYMBOL(xen_alloc_unpopulated_pages); + +/** + * xen_free_unpopulated_pages - return unpopulated pages + * @nr_pages: Number of pages + * @pages: pages to return + */ +void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) +{ + unsigned int i; + + mutex_lock(&list_lock); + for (i = 0; i < nr_pages; i++) { + list_add(&pages[i]->lru, &page_list); + list_count++; + } + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL(xen_free_unpopulated_pages); + +#ifdef CONFIG_XEN_PV +static int __init init(void) +{ + unsigned int i; + + if (!xen_domain()) + return -ENODEV; + + if (!xen_pv_domain()) + return 0; + + /* + * Initialize with pages from the extra memory regions (see + * arch/x86/xen/setup.c). + */ + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + unsigned int j; + + for (j = 0; j < xen_extra_mem[i].n_pfns; j++) { + struct page *pg = + pfn_to_page(xen_extra_mem[i].start_pfn + j); + + list_add(&pg->lru, &page_list); + list_count++; + } + } + + return 0; +} +subsys_initcall(init); +#endif diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 907bcbb93afb..2690318ad50f 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -621,7 +621,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev, bool leaked = false; unsigned int nr_pages = XENBUS_PAGES(nr_grefs); - err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); + err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages); if (err) goto out_err; @@ -662,7 +662,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev, addr, nr_pages); out_free_ballooned_pages: if (!leaked) - free_xenballooned_pages(nr_pages, node->hvm.pages); + xen_free_unpopulated_pages(nr_pages, node->hvm.pages); out_err: return err; } @@ -858,7 +858,7 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr) info.addrs); if (!rv) { vunmap(vaddr); - free_xenballooned_pages(nr_pages, node->hvm.pages); + xen_free_unpopulated_pages(nr_pages, node->hvm.pages); } else WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index 7b1077f0abcb..34742c6e189e 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -232,7 +232,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, kfree(pages); return -ENOMEM; } - rc = alloc_xenballooned_pages(nr_pages, pages); + rc = xen_alloc_unpopulated_pages(nr_pages, pages); if (rc) { pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__, nr_pages, rc); @@ -249,7 +249,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, if (!vaddr) { pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__, nr_pages, rc); - free_xenballooned_pages(nr_pages, pages); + xen_free_unpopulated_pages(nr_pages, pages); kfree(pages); kfree(pfns); return -ENOMEM; diff --git a/include/xen/xen.h b/include/xen/xen.h index 19a72f591e2b..43efba045acc 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -52,4 +52,13 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, extern u64 xen_saved_max_mem_size; #endif +#ifdef CONFIG_XEN_UNPOPULATED_ALLOC +int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); +void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); +#else +#define xen_alloc_unpopulated_pages alloc_xenballooned_pages +#define xen_free_unpopulated_pages free_xenballooned_pages +#include +#endif + #endif /* _XEN_XEN_H */ -- cgit v1.2.3 From 4facb95b7adaf77e2da73aafb9ba60996fe42a12 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Sep 2020 01:50:54 +0200 Subject: x86/entry: Unbreak 32bit fast syscall Andy reported that the syscall treacing for 32bit fast syscall fails: # ./tools/testing/selftests/x86/ptrace_syscall_32 ... [RUN] SYSEMU [FAIL] Initial args are wrong (nr=224, args=10 11 12 13 14 4289172732) ... [RUN] SYSCALL [FAIL] Initial args are wrong (nr=29, args=0 0 0 0 0 4289172732) The eason is that the conversion to generic entry code moved the retrieval of the sixth argument (EBP) after the point where the syscall entry work runs, i.e. ptrace, seccomp, audit... Unbreak it by providing a split up version of syscall_enter_from_user_mode(). - syscall_enter_from_user_mode_prepare() establishes state and enables interrupts - syscall_enter_from_user_mode_work() runs the entry work Replace the call to syscall_enter_from_user_mode() in the 32bit fast syscall C-entry with the split functions and stick the EBP retrieval between them. Fixes: 27d6b4d14f5c ("x86/entry: Use generic syscall entry function") Reported-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/87k0xdjbtt.fsf@nanos.tec.linutronix.de --- arch/x86/entry/common.c | 29 +++++++++++++++++-------- include/linux/entry-common.h | 51 ++++++++++++++++++++++++++++++++++++-------- kernel/entry/common.c | 35 ++++++++++++++++++++++++------ 3 files changed, 91 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 48512c7944e7..2f84c7ca74ea 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -60,16 +60,10 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs) #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs) { - unsigned int nr = (unsigned int)regs->orig_ax; - if (IS_ENABLED(CONFIG_IA32_EMULATION)) current_thread_info()->status |= TS_COMPAT; - /* - * Subtlety here: if ptrace pokes something larger than 2^32-1 into - * orig_ax, the unsigned int return value truncates it. This may - * or may not be necessary, but it matches the old asm behavior. - */ - return (unsigned int)syscall_enter_from_user_mode(regs, nr); + + return (unsigned int)regs->orig_ax; } /* @@ -91,15 +85,29 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs) { unsigned int nr = syscall_32_enter(regs); + /* + * Subtlety here: if ptrace pokes something larger than 2^32-1 into + * orig_ax, the unsigned int return value truncates it. This may + * or may not be necessary, but it matches the old asm behavior. + */ + nr = (unsigned int)syscall_enter_from_user_mode(regs, nr); + do_syscall_32_irqs_on(regs, nr); syscall_exit_to_user_mode(regs); } static noinstr bool __do_fast_syscall_32(struct pt_regs *regs) { - unsigned int nr = syscall_32_enter(regs); + unsigned int nr = syscall_32_enter(regs); int res; + /* + * This cannot use syscall_enter_from_user_mode() as it has to + * fetch EBP before invoking any of the syscall entry work + * functions. + */ + syscall_enter_from_user_mode_prepare(regs); + instrumentation_begin(); /* Fetch EBP from where the vDSO stashed it. */ if (IS_ENABLED(CONFIG_X86_64)) { @@ -122,6 +130,9 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs) return false; } + /* The case truncates any ptrace induced syscall nr > 2^32 -1 */ + nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr); + /* Now this is just like a normal syscall. */ do_syscall_32_irqs_on(regs, nr); syscall_exit_to_user_mode(regs); diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index efebbffcd5cc..159c7476b11b 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -110,15 +110,30 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs #endif /** - * syscall_enter_from_user_mode - Check and handle work before invoking - * a syscall + * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts * @regs: Pointer to currents pt_regs - * @syscall: The syscall number * * Invoked from architecture specific syscall entry code with interrupts * disabled. The calling code has to be non-instrumentable. When the - * function returns all state is correct and the subsequent functions can be - * instrumented. + * function returns all state is correct, interrupts are enabled and the + * subsequent functions can be instrumented. + * + * This handles lockdep, RCU (context tracking) and tracing state. + * + * This is invoked when there is extra architecture specific functionality + * to be done between establishing state and handling user mode entry work. + */ +void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); + +/** + * syscall_enter_from_user_mode_work - Check and handle work before invoking + * a syscall + * @regs: Pointer to currents pt_regs + * @syscall: The syscall number + * + * Invoked from architecture specific syscall entry code with interrupts + * enabled after invoking syscall_enter_from_user_mode_prepare() and extra + * architecture specific work. * * Returns: The original or a modified syscall number * @@ -127,12 +142,30 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs * syscall_set_return_value() first. If neither of those are called and -1 * is returned, then the syscall will fail with ENOSYS. * - * The following functionality is handled here: + * It handles the following work items: * - * 1) Establish state (lockdep, RCU (context tracking), tracing) - * 2) TIF flag dependent invocations of arch_syscall_enter_tracehook(), + * 1) TIF flag dependent invocations of arch_syscall_enter_tracehook(), * __secure_computing(), trace_sys_enter() - * 3) Invocation of audit_syscall_entry() + * 2) Invocation of audit_syscall_entry() + */ +long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); + +/** + * syscall_enter_from_user_mode - Establish state and check and handle work + * before invoking a syscall + * @regs: Pointer to currents pt_regs + * @syscall: The syscall number + * + * Invoked from architecture specific syscall entry code with interrupts + * disabled. The calling code has to be non-instrumentable. When the + * function returns all state is correct, interrupts are enabled and the + * subsequent functions can be instrumented. + * + * This is combination of syscall_enter_from_user_mode_prepare() and + * syscall_enter_from_user_mode_work(). + * + * Returns: The original or a modified syscall number. See + * syscall_enter_from_user_mode_work() for further explanation. */ long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); diff --git a/kernel/entry/common.c b/kernel/entry/common.c index fcae019158ca..18683598edbc 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -69,22 +69,45 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall, return ret ? : syscall_get_nr(current, regs); } -noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) +static __always_inline long +__syscall_enter_from_user_work(struct pt_regs *regs, long syscall) { unsigned long ti_work; - enter_from_user_mode(regs); - instrumentation_begin(); - - local_irq_enable(); ti_work = READ_ONCE(current_thread_info()->flags); if (ti_work & SYSCALL_ENTER_WORK) syscall = syscall_trace_enter(regs, syscall, ti_work); - instrumentation_end(); return syscall; } +long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall) +{ + return __syscall_enter_from_user_work(regs, syscall); +} + +noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) +{ + long ret; + + enter_from_user_mode(regs); + + instrumentation_begin(); + local_irq_enable(); + ret = __syscall_enter_from_user_work(regs, syscall); + instrumentation_end(); + + return ret; +} + +noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs) +{ + enter_from_user_mode(regs); + instrumentation_begin(); + local_irq_enable(); + instrumentation_end(); +} + /** * exit_to_user_mode - Fixup state when exiting to user mode * -- cgit v1.2.3 From 1a0cf26323c80e2f1c58fc04f15686de61bfab0c Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Fri, 21 Aug 2020 19:49:56 -0400 Subject: mm/ksm: Remove reuse_ksm_page() Remove the function as the last reference has gone away with the do_wp_page() changes. Signed-off-by: Peter Xu Signed-off-by: Linus Torvalds --- include/linux/ksm.h | 7 ------- mm/ksm.c | 25 ------------------------- 2 files changed, 32 deletions(-) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index e48b1e453ff5..161e8164abcf 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -53,8 +53,6 @@ struct page *ksm_might_need_to_copy(struct page *page, void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); -bool reuse_ksm_page(struct page *page, - struct vm_area_struct *vma, unsigned long address); #else /* !CONFIG_KSM */ @@ -88,11 +86,6 @@ static inline void rmap_walk_ksm(struct page *page, static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) { } -static inline bool reuse_ksm_page(struct page *page, - struct vm_area_struct *vma, unsigned long address) -{ - return false; -} #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ diff --git a/mm/ksm.c b/mm/ksm.c index 4102034cd55a..12958287fac3 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2660,31 +2660,6 @@ again: goto again; } -bool reuse_ksm_page(struct page *page, - struct vm_area_struct *vma, - unsigned long address) -{ -#ifdef CONFIG_DEBUG_VM - if (WARN_ON(is_zero_pfn(page_to_pfn(page))) || - WARN_ON(!page_mapped(page)) || - WARN_ON(!PageLocked(page))) { - dump_page(page, "reuse_ksm_page"); - return false; - } -#endif - - if (PageSwapCache(page) || !page_stable_node(page)) - return false; - /* Prohibit parallel get_ksm_page() */ - if (!page_ref_freeze(page, 1)) - return false; - - page_move_anon_rmap(page, vma); - page->index = linear_page_index(vma, address); - page_ref_unfreeze(page, 1); - - return true; -} #ifdef CONFIG_MIGRATION void ksm_migrate_page(struct page *newpage, struct page *oldpage) { -- cgit v1.2.3 From 798a6b87ecd72828a6c6b5469aaa2032a57e92b7 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Fri, 21 Aug 2020 19:49:58 -0400 Subject: mm: Add PGREUSE counter This accounts for wp_page_reuse() case, where we reused a page for COW. Signed-off-by: Peter Xu Signed-off-by: Linus Torvalds --- include/linux/vm_event_item.h | 1 + mm/memory.c | 1 + mm/vmstat.c | 1 + 3 files changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 24fc7c3ae7d6..58d3f91baad1 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -30,6 +30,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGFAULT, PGMAJFAULT, PGLAZYFREED, PGREFILL, + PGREUSE, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, PGSCAN_KSWAPD, diff --git a/mm/memory.c b/mm/memory.c index 56ae945c6845..20d93001ef93 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2619,6 +2619,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf) if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) update_mmu_cache(vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); + count_vm_event(PGREUSE); } /* diff --git a/mm/vmstat.c b/mm/vmstat.c index 3fb23a21f6dd..907a5045bfa3 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1198,6 +1198,7 @@ const char * const vmstat_text[] = { "pglazyfreed", "pgrefill", + "pgreuse", "pgsteal_kswapd", "pgsteal_direct", "pgscan_kswapd", -- cgit v1.2.3 From 428fc0aff4e59399ec719ffcc1f7a5d29a4ee476 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 4 Sep 2020 16:36:19 -0700 Subject: include/linux/log2.h: add missing () around n in roundup_pow_of_two() Otherwise gcc generates warnings if the expression is complicated. Fixes: 312a0c170945 ("[PATCH] LOG2: Alter roundup_pow_of_two() so that it can use a ilog2() on a constant") Signed-off-by: Jason Gunthorpe Signed-off-by: Andrew Morton Link: https://lkml.kernel.org/r/0-v1-8a2697e3c003+41165-log_brackets_jgg@nvidia.com Signed-off-by: Linus Torvalds --- include/linux/log2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/log2.h b/include/linux/log2.h index 83a4a3ca3e8a..c619ec6eff4a 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -173,7 +173,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n == 1) ? 1 : \ + ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ -- cgit v1.2.3